diff --git a/.github/workflows/build-binary.yaml b/.github/workflows/build-binary.yaml index 64ac8c4a..16a92eff 100644 --- a/.github/workflows/build-binary.yaml +++ b/.github/workflows/build-binary.yaml @@ -8,7 +8,7 @@ on: tags: env: - go_version: 1.21.8 + go_version: 1.22.12 jobs: build: @@ -24,7 +24,6 @@ jobs: with: go-version: ${{ env.go_version }} - - run: echo "IMAGE_TAG=dev" >> $GITHUB_ENV if: ${{ github.ref_name }} == 'main' - run: echo "IMAGE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV @@ -33,6 +32,8 @@ jobs: - run: sudo apt-get update -y && sudo apt-get install -y rsync - name: build id: build + env: + GOPATH: ${{ github.workspace }}/go run: | cd avalanchego ./scripts/build.sh diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 4a01bc55..572fb9a8 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -1,47 +1,132 @@ name: container-images - on: push: - branches: [ "main" ] + branches: [ "main", "feature/**" ] tags: [ "v*" ] jobs: - build-and-push: - runs-on: ubuntu-latest + build-and-push-matrix: + name: Build & Push Matrix + runs-on: ${{ matrix.runner }} permissions: contents: read packages: write + id-token: write + env: + USE_DOCKER_HUB: true + strategy: + fail-fast: false + matrix: + include: + - platform: linux/amd64 + runner: ubuntu-24.04 + - platform: linux/arm64 + runner: ubuntu-24.04-arm + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: docker/setup-qemu-action@v1 - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-qemu-action@v3 + - uses: docker/setup-buildx-action@v3 - run: echo "IMAGE_TAG=dev" >> $GITHUB_ENV - if: github.ref_name == 'main' + if: github.ref_name == 'main' || startsWith(github.ref_name, 'feature/') - run: echo "IMAGE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV if: startsWith(github.ref, 'refs/tags/v') - name: Login to ghcr.io - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Login to docker.io - uses: docker/login-action@v2 + if: ${{ env.USE_DOCKER_HUB == 'true' }} + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKER_HUB_UID }} password: ${{ secrets.DOCKER_HUB_PAT }} - - name: Build image - run: | + - name: Build standard image + run: | + TAGS="--tag ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}" + + if [ "${USE_DOCKER_HUB}" = "true" ]; then + TAGS="$TAGS --tag ${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}" + fi + docker buildx build \ - --platform linux/amd64,linux/arm64 \ - --tag ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }} \ - --tag ${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }} \ + --platform ${{ matrix.platform }} \ + $TAGS \ --file ./Dockerfile \ - --output type=image,push=true . + --output type=image,push=true \ + . + + - name: Build distroless image + run: | + TAGS="--tag ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}" + + if [ "${USE_DOCKER_HUB}" = "true" ]; then + TAGS="$TAGS --tag ${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}" + fi + + docker buildx build \ + --platform ${{ matrix.platform }} \ + $TAGS \ + --file ./Dockerfile.dless \ + --output type=image,push=true \ + . + + - name: Install Cosign + uses: sigstore/cosign-installer@v3.8.1 + + - name: Sign ghcr images + shell: bash + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign sign --yes "ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}" + cosign sign --yes "ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}-dless" + + - name: Sign docker hub images + if: ${{ env.USE_DOCKER_HUB == 'true' }} + shell: bash + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign sign --yes "${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}" + cosign sign --yes "${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}-dless" + + - name: Verify ghcr image signatures + shell: bash + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign verify \ + --certificate-identity=https://github.com/${{ github.repository }}/.github/workflows/build-container.yml@${{ github.ref }} \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + "ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}" + + cosign verify \ + --certificate-identity=https://github.com/${{ github.repository }}/.github/workflows/build-container.yml@${{ github.ref }} \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + "ghcr.io/${{ github.repository }}:${{ env.IMAGE_TAG }}-dless" + + - name: Verify docker hub image signatures + if: ${{ env.USE_DOCKER_HUB == 'true' }} + shell: bash + env: + COSIGN_EXPERIMENTAL: 1 + run: | + cosign verify \ + --certificate-identity=https://github.com/${{ github.repository }}/.github/workflows/build-container.yml@${{ github.ref }} \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + "${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}" + + cosign verify \ + --certificate-identity=https://github.com/${{ github.repository }}/.github/workflows/build-container.yml@${{ github.ref }} \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + "${{ secrets.DOCKER_HUB_REPO }}:${{ env.IMAGE_TAG }}-dless" diff --git a/Dockerfile b/Dockerfile index 3d58d949..48363c14 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21 AS build +FROM golang:1.22 AS build RUN apt-get update -y && \ apt-get install -y rsync @@ -14,7 +14,7 @@ WORKDIR /app/avalanchego/ RUN /app/avalanchego/scripts/build.sh -FROM ubuntu:24.10 +FROM ubuntu:24.04 WORKDIR /app @@ -32,9 +32,10 @@ ENV HTTP_HOST=0.0.0.0 \ NETWORK_ID=costwo \ AUTOCONFIGURE_PUBLIC_IP=1 \ AUTOCONFIGURE_BOOTSTRAP=1 \ - AUTOCONFIGURE_BOOTSTRAP_ENDPOINT=https://coston2.flare.network/ext/info \ + AUTOCONFIGURE_BOOTSTRAP_ENDPOINT=https://coston2-bootstrap.flare.network/ext/info \ EXTRA_ARGUMENTS="" \ - BOOTSTRAP_BEACON_CONNECTION_TIMEOUT="1m" + BOOTSTRAP_BEACON_CONNECTION_TIMEOUT="1m" \ + HTTP_ALLOWED_HOSTS="*" RUN apt-get update -y && \ apt-get install -y curl jq @@ -54,4 +55,4 @@ VOLUME [ "${CHAIN_CONFIG_DIR}" ] HEALTHCHECK CMD curl --fail http://localhost:${HTTP_PORT}/ext/health || exit 1 ENTRYPOINT [ "/usr/bin/bash" ] -CMD [ "/app/entrypoint.sh" ] +CMD [ "/app/entrypoint.sh" ] \ No newline at end of file diff --git a/Dockerfile.dless b/Dockerfile.dless new file mode 100644 index 00000000..29db48e3 --- /dev/null +++ b/Dockerfile.dless @@ -0,0 +1,62 @@ +FROM golang:1.22 AS build + +RUN apt-get update -y && \ + apt-get install -y rsync + +WORKDIR /app/ + +COPY ./.git /app/.git +COPY ./avalanchego /app/avalanchego +COPY ./config /app/config +COPY ./coreth /app/coreth + +WORKDIR /app/avalanchego/ + +RUN /app/avalanchego/scripts/build.sh + +RUN mkdir -p /app/conf/coston /app/conf/C /app/logs /app/db + +WORKDIR /entrypoint +COPY entrypoint/main.go . +RUN go build -ldflags="-s -w" -o /out/entrypoint main.go + +FROM gcr.io/distroless/base:nonroot AS final + +USER nonroot + +WORKDIR /app + +ENV HTTP_HOST=0.0.0.0 \ + HTTP_PORT=9650 \ + STAKING_PORT=9651 \ + PUBLIC_IP= \ + DB_DIR=/app/db \ + DB_TYPE=leveldb \ + BOOTSTRAP_IPS= \ + BOOTSTRAP_IDS= \ + CHAIN_CONFIG_DIR=/app/conf \ + LOG_DIR=/app/logs \ + LOG_LEVEL=info \ + NETWORK_ID=costwo \ + AUTOCONFIGURE_PUBLIC_IP=1 \ + AUTOCONFIGURE_BOOTSTRAP=1 \ + AUTOCONFIGURE_BOOTSTRAP_ENDPOINT=https://coston2-bootstrap.flare.network/ext/info \ + EXTRA_ARGUMENTS="" \ + BOOTSTRAP_BEACON_CONNECTION_TIMEOUT="1m" \ + HTTP_ALLOWED_HOSTS="*" + +COPY --from=build --chown=nonroot:nonroot /app/conf /app/conf +COPY --from=build --chown=nonroot:nonroot /app/logs /app/logs +COPY --from=build --chown=nonroot:nonroot /app/db /app/db + +COPY --from=build --chown=nonroot:nonroot /app/avalanchego/build /app/build +COPY --from=build --chown=nonroot:nonroot /out/entrypoint /app/entrypoint + +EXPOSE ${STAKING_PORT} +EXPOSE ${HTTP_PORT} + +VOLUME [ "${DB_DIR}" ] +VOLUME [ "${LOG_DIR}" ] +VOLUME [ "${CHAIN_CONFIG_DIR}" ] + +ENTRYPOINT [ "/app/entrypoint" ] diff --git a/README-docker.md b/README-docker.md index f785bb88..5612c9fe 100644 --- a/README-docker.md +++ b/README-docker.md @@ -1,13 +1,17 @@ -# Flare & Coston2 +# go-flare -Docker image for the Flare & Coston2 node implementation found on [github](https://github.com/flare-foundation/go-flare). +Docker images for the go-flare node implementation found at [github](https://github.com/flare-foundation/go-flare). + +## Variants +Images with `-dless` postfix are build using distroless base and are rootless. +Process runs under user `nonroot` with UID `65532`. You should chown your volume mounts to match this UID using `chown -R 65532:65532 /mnt/my/db`. ## Quickstart ```sh docker run -d \ -p 9650-9651:9650-9651 \ - flarefoundation/flare:latest + flarefoundation/go-flare: ``` Currently the default network is `costwo` but you can change that by providing a `NETWORK_ID` environment variable (i.e. `NETWORK_ID=flare`). @@ -53,9 +57,10 @@ These are the environment variables you can edit and their default values: | `NETWORK_ID` | `costwo` | The network id. The common ids are `flare \| costwo` | | `AUTOCONFIGURE_PUBLIC_IP` | `0` | Set to `1` to autoconfigure `PUBLIC_IP`, skipped if PUBLIC_IP is set | | `AUTOCONFIGURE_BOOTSTRAP` | `0` | Set to `1` to autoconfigure `BOOTSTRAP_IPS` and `BOOTSTRAP_IDS` | -| `AUTOCONFIGURE_BOOTSTRAP_ENDPOINT` | `https://coston2.flare.network/ext/info` | Endpoint used for [bootstrapping](https://docs.avax.network/nodes/maintain/avalanchego-config-flags#bootstrapping) when `AUTOCONFIGURE_BOOTSTRAP` is enabled. Possible values are `https://coston2.flare.network/ext/info` or `https://flare.flare.network/ext/info`. | +| `AUTOCONFIGURE_BOOTSTRAP_ENDPOINT` | `https://coston2-bootstrap.flare.network/ext/info` | Endpoint used for [bootstrapping](https://docs.avax.network/nodes/maintain/avalanchego-config-flags#bootstrapping) when `AUTOCONFIGURE_BOOTSTRAP` is enabled. Possible values are `https://coston2-bootstrap.flare.network/ext/info`, `https://flare-bootstrap.flare.network/ext/info`, `https://coston-bootstrap.flare.network/ext/info` or `https://songbird-bootstrap.flare.network/ext/info`. | | `AUTOCONFIGURE_FALLBACK_ENDPOINTS` | _(empty)_ | Comma-divided fallback bootstrap endpoints, used if `AUTOCONFIGURE_BOOTSTRAP_ENDPOINT` is not valid (not whitelisted / unreachable / etc), tested from first-to-last until one is valid | | `BOOTSTRAP_BEACON_CONNECTION_TIMEOUT` | `1m` | Set the duration value (eg. `45s` / `5m` / `1h`) for [--bootstrap-beacon-connection-timeout](https://docs.avax.network/nodes/maintain/avalanchego-config-flags#--bootstrap-beacon-connection-timeout-duration) AvalancheGo flag. | +| `HTTP_ALLOWED_HOSTS` | `*` | Blocks RPC calls unless they originate from these hostnames. | | `EXTRA_ARGUMENTS` | | Extra arguments passed to flare binary | @@ -73,13 +78,13 @@ The external API configuration is set to only respond to API calls so it offload "coreth-admin-api-enabled": false, "coreth-admin-api-dir": "", "eth-apis": [ - "public-eth", - "public-eth-filter", + "eth", + "eth-filter", "net", "web3", - "internal-public-eth", - "internal-public-blockchain", - "internal-public-transaction-pool" + "internal-eth", + "internal-blockchain", + "internal-transaction" ], } ``` @@ -94,22 +99,20 @@ Similarly to the external API configuration, this one also responds to API calls "coreth-admin-api-enabled": false, "coreth-admin-api-dir": "", "eth-apis": [ - "public-eth", - "public-eth-filter", - "private-admin", - "public-debug", - "private-debug", + "eth", + "eth-filter", + "admin", + "debug", "net", "debug-tracer", "web3", - "internal-public-eth", - "internal-public-blockchain", - "internal-public-transaction-pool", - "internal-public-tx-pool", - "internal-public-debug", - "internal-private-debug", - "internal-public-account", - "internal-private-personal" + "internal-eth", + "internal-blockchain", + "internal-transaction", + "internal-tx-pool", + "internal-debug", + "internal-account", + "internal-personal" ], } ``` diff --git a/README.md b/README.md index 7126a102..8dbe3f74 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,22 @@ # go-flare -go-flare is a modified version of [avalanchego@v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0) and [coreth@v0.12.0](https://github.com/ava-labs/coreth/releases/tag/v0.12.0), incorporating specific features for Flare and Songbird networks. These features include prioritized contract handling and the invocation of the daemon contract. +go-flare is a modified version of [avalanchego@v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0) and [coreth@v0.13.0](https://github.com/ava-labs/coreth/releases/tag/v0.13.0), incorporating specific features for Flare and Songbird networks. These features include prioritized contract handling and the invocation of the daemon contract. -**Networks should be updated to this version before** +**Test Networks should be updated to this version before** -- **Songbird: May 6, 2025 at 12 UTC** -- **Flare: May 13, 2025 at 12 UTC** +- **Coston2: June 24, 2025 at 12 UTC** +- **Coston: July 1, 2025 at 12 UTC** See [release notes](./RELEASES-flare.md) for more info. - ## System Requirements -- go version 1.21.8 +- go version 1.22 - gcc, g++ and jq - CPU: Equivalent of 8 AWS vCPU - RAM: 16 GiB - Storage: 1TB Flare / 3.5TB Songbird -- OS: Ubuntu 20.04/22.04 +- OS: Ubuntu 22.04/24.04 ## Compilation @@ -35,13 +34,13 @@ These servers fulfill a critical role in securing the network: - They run a consensus algorithm so that all validators in the network agree on the transactions to add to the blockchain. - Finally, they add the agreed-upon transactions to their copy of the ledger. -This guide explains how to deploy your own validator node so you can participate in the consensus and collect the rewards that the network provides to those who help secure it: https://docs.flare.network/infra/validation/deploying/ +This guide explains how to deploy your own validator node so you can participate in the consensus and collect the rewards that the network provides to those who help secure it: ## Deploy an Observation Node Observation nodes enable anyone to observe the network and submit transactions. Unlike validator nodes, which provide state consensus and add blocks, observation nodes remain outside the network and have no effect on consensus or blocks. -This guide explains how to deploy your own observation node: https://docs.flare.network/infra/observation/deploying/ +This guide explains how to deploy your own observation node: ## Tests @@ -50,16 +49,29 @@ See `tests/README.md` for testing details ## Container image Public container images are hosted on [Docker HUB](https://hub.docker.com/r/flarefoundation/go-flare) and [Github Packages](https://github.com/orgs/flare-foundation/packages?repo_name=go-flare); + ``` docker.io/flarefoundation/go-flare -hgcr.io/flare-foundation/go-flare +ghcr.io/flare-foundation/go-flare +``` + +Images are signed using [Cosign](https://github.com/sigstore/cosign) with the GitHub OIDC provider. To verify the image, run this command: + +```bash +cosign verify \ + --certificate-identity-regexp="^https://github\.com/flare-foundation/go-flare/\.github/workflows/build-container\.yml@" \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + ghcr.io/flare-foundation/go-flare: + +cosign verify \ + --certificate-identity-regexp="^https://github\.com/flare-foundation/go-flare/\.github/workflows/build-container\.yml@" \ + --certificate-oidc-issuer=https://token.actions.githubusercontent.com \ + docker.io/flarefoundation/go-flare: ``` ### Container builds in CI CI builds on each: + - push on `main` branch, pushes image tagged as "dev" - creation of a tag, pushes images tagged as the tag itself - -Builds: \ -two images, `go-flare:` one with `leveldb` diff --git a/RELEASES-flare.md b/RELEASES-flare.md index 6eb50b72..a67f5eda 100644 --- a/RELEASES-flare.md +++ b/RELEASES-flare.md @@ -2,6 +2,18 @@ Here are listed specific changes to the code for the Flare and Songbird networks. For a comprehensive list of general changes, see [here](./avalanchego/RELEASES.md) for the AvalancheGo project and [here](./coreth/RELEASES.md) for the Coreth project. +## v1.11.0 + +The changes go into effect + * June 24, 2025 at 12 PM UTC for the Coston2 network, + * July 1, 2025 at 12 PM UTC for the Coston network, + * July 22, 2025 at 12 PM UTC for the Songbird network, + * August 5, 2025 at 12 PM UTC for the Flare network. + +### Note: + +- Avalanche added in v1.10.3 a new config `--http-allowed-hosts` with a default value of `localhost`. Set `--http-allowed-hosts="*"` to allow RPC calls for all hosts. + ## v1.10.0 The changes go into effect on May 6, 2025 at 12 PM UTC for the Songbird network and on May 13, 2025 at 12 PM UTC for the Flare network. diff --git a/avalanchego/.golangci.yml b/avalanchego/.golangci.yml index 78f38391..8475a75c 100644 --- a/avalanchego/.golangci.yml +++ b/avalanchego/.golangci.yml @@ -1,59 +1,136 @@ # https://golangci-lint.run/usage/configuration/ run: timeout: 10m - # skip auto-generated files. - skip-files: - - ".*\\.pb\\.go$" - - ".*mock.*" + + # Enables skipping of directories: + # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + # Default: true + skip-dirs-use-default: false + + # If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. + # + # Allowed values: readonly|vendor|mod + # By default, it isn't set. + modules-download-mode: readonly + +output: + # Make issues output unique by line. + # Default: true + uniq-by-line: false issues: - # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 max-same-issues: 0 linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint disable-all: true enable: - asciicheck - bodyclose - depguard + - dupword - errcheck + - errorlint - exportloopref + - forbidigo + - gci - goconst - gocritic + # - goerr113 - gofmt - gofumpt - - goimports + # - gomnd - goprintffuncname - gosec - gosimple - govet + - importas - ineffassign + # - lll - misspell - nakedret - noctx - nolintlint + - perfsprint - prealloc + - predeclared - revive + - spancheck - staticcheck - stylecheck + - tagalign + - testifylint - typecheck - unconvert - unparam - unused + - usestdlibvars - whitespace - # - errorlint (TODO: re-enable in go1.20 migration) - # - goerr113 - # - gomnd - # - lll linters-settings: + depguard: + rules: + packages: + deny: + - pkg: "io/ioutil" + desc: io/ioutil is deprecated. Use package io or os instead. + - pkg: "github.com/stretchr/testify/assert" + desc: github.com/stretchr/testify/require should be used instead. + - pkg: "github.com/golang/mock/gomock" + desc: go.uber.org/mock/gomock should be used instead. errorlint: # Check for plain type assertions and type switches. asserts: false # Check for plain error comparisons. comparison: false + forbidigo: + # Forbid the following identifiers (list of regexp). + forbid: + - 'require\.Error$(# ErrorIs should be used instead)?' + - 'require\.ErrorContains$(# ErrorIs should be used instead)?' + - 'require\.EqualValues$(# Equal should be used instead)?' + - 'require\.NotEqualValues$(# NotEqual should be used instead)?' + - '^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)?' + # Exclude godoc examples from forbidigo checks. + exclude_godoc_examples: false + gci: + sections: + - standard + - default + - blank + - dot + - prefix(github.com/ava-labs/avalanchego) + - alias + skip-generated: true + custom-order: true + gosec: + excludes: + - G107 # Url provided to HTTP request as taint input https://securego.io/docs/rules/g107 + importas: + # Do not allow unaliased imports of aliased packages. + no-unaliased: false + # Do not allow non-required aliases. + no-extra-aliases: false + # List of aliases + alias: + - pkg: github.com/ava-labs/avalanchego/utils/math + alias: safemath + - pkg: github.com/ava-labs/avalanchego/utils/json + alias: avajson revive: rules: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr @@ -96,20 +173,32 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - name: useless-break disabled: false + spancheck: + # https://github.com/jjti/go-spancheck#checks + checks: + - end + # - record-error # check that `span.RecordError(err)` is called when an error is returned + # - set-status # check that `span.SetStatus(codes.Error, msg)` is called when an error is returned staticcheck: - go: "1.19" # https://staticcheck.io/docs/options#checks checks: - "all" - - "-SA6002" # argument should be pointer-like to avoid allocation, for sync.Pool - - "-SA1019" # deprecated packages e.g., golang.org/x/crypto/ripemd160 - # https://golangci-lint.run/usage/linters#gosec - gosec: - excludes: - - G107 # https://securego.io/docs/rules/g107.html - depguard: - list-type: blacklist - packages-with-error-message: - - io/ioutil: 'io/ioutil is deprecated. Use package io or os instead.' - - github.com/stretchr/testify/assert: 'github.com/stretchr/testify/require should be used instead.' - include-go-root: true + - "-SA6002" # Storing non-pointer values in sync.Pool allocates memory + - "-SA1019" # Using a deprecated function, variable, constant or field + tagalign: + align: true + sort: true + strict: true + order: + - serialize + testifylint: + # Enable all checkers (https://github.com/Antonboom/testifylint#checkers). + # Default: false + enable-all: true + # Disable checkers by name + # (in addition to default + # suite-thelper + # ). + disable: + - go-require + - float-compare diff --git a/avalanchego/.goreleaser.yml b/avalanchego/.goreleaser.yml deleted file mode 100644 index d67f5b92..00000000 --- a/avalanchego/.goreleaser.yml +++ /dev/null @@ -1,28 +0,0 @@ -# ref. https://goreleaser.com/customization/build/ -builds: - - main: ./main - binary: avalanchego - flags: - - -v - ignore: - - goos: linux - goarch: arm64 - - goos: darwin - goarch: amd64 - - goos: darwin - goarch: arm64 - - goos: darwin - goarch: 386 - - goos: linux - goarch: 386 - - goos: windows - goarch: 386 - - goos: freebsd - goarch: 386 - -release: - # Repo in which the release will be created. - # Default is extracted from the origin remote URL or empty if its private hosted. - github: - owner: ava-labs - name: avalanchego diff --git a/avalanchego/.kurtosis/kurtosis.sh b/avalanchego/.kurtosis/kurtosis.sh deleted file mode 100755 index a3d1cd85..00000000 --- a/avalanchego/.kurtosis/kurtosis.sh +++ /dev/null @@ -1,226 +0,0 @@ -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# -# Do not modify this file! It will get overwritten when you upgrade Kurtosis! -# -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -set -euo pipefail - - - -# ============================================================================================ -# Constants -# ============================================================================================ -# The directory where Kurtosis will store files it uses in between executions, e.g. access tokens -# Can make this configurable if needed -KURTOSIS_DIRPATH="${HOME}/.kurtosis" - -KURTOSIS_CORE_TAG="1.8" -KURTOSIS_DOCKERHUB_ORG="kurtosistech" -INITIALIZER_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_initializer:${KURTOSIS_CORE_TAG}" -API_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_api:${KURTOSIS_CORE_TAG}" - -POSITIONAL_ARG_DEFINITION_FRAGMENTS=2 - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -function print_help_and_exit() { - echo "" - echo "$(basename "${0}") [--custom-params custom_params_json] [--client-id client_id] [--client-secret client_secret] [--help] [--kurtosis-log-level kurtosis_log_level] [--list] [--parallelism parallelism] [--tests test_names] [--test-suite-log-level test_suite_log_level] test_suite_image" - echo "" - echo " --custom-params custom_params_json JSON string containing arbitrary data that will be passed as-is to your testsuite, so it can modify its behaviour based on input (default: {})" - echo " --client-id client_id An OAuth client ID which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --client-secret client_secret An OAuth client secret which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --help Display this message" - echo " --kurtosis-log-level kurtosis_log_level The log level that all output generated by the Kurtosis framework itself should log at (panic|fatal|error|warning|info|debug|trace) (default: info)" - echo " --list Rather than running the tests, lists the tests available to run" - echo " --parallelism parallelism The number of texts to execute in parallel (default: 4)" - echo " --tests test_names List of test names to run, separated by ',' (default or empty: run all tests)" - echo " --test-suite-log-level test_suite_log_level A string that will be passed as-is to the test suite container to indicate what log level the test suite container should output at; this string should be meaningful to the test suite container because Kurtosis won't know what logging framework the testsuite uses (default: info)" - echo " test_suite_image The Docker image containing the testsuite to execute" - - echo "" - exit 1 # Exit with an error code, so that if it gets accidentally called in parent scripts/CI it fails loudly -} - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -client_id="" -client_secret="" -custom_params_json="{}" -do_list="false" -kurtosis_log_level="info" -parallelism="4" -show_help="false" -test_names="" -test_suite_image="" -test_suite_log_level="info" - - - -POSITIONAL=() -while [ ${#} -gt 0 ]; do - key="${1}" - case "${key}" in - - --custom-params) - - custom_params_json="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-id) - - client_id="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-secret) - - client_secret="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --help) - show_help="true" - shift # Shift to clear out the flag - - ;; - - --kurtosis-log-level) - - kurtosis_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --list) - do_list="true" - shift # Shift to clear out the flag - - ;; - - --parallelism) - - parallelism="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --tests) - - test_names="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --test-suite-log-level) - - test_suite_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - -*) - echo "ERROR: Unrecognized flag '${key}'" >&2 - exit 1 - ;; - *) - POSITIONAL+=("${1}") - shift - ;; - esac -done - -if "${show_help}"; then - print_help_and_exit -fi - -# Restore positional parameters and assign them to variables -set -- "${POSITIONAL[@]}" -test_suite_image="${1:-}" - - - - - -# ============================================================================================ -# Arg Validation -# ============================================================================================ -if [ "${#}" -ne 1 ]; then - echo "ERROR: Expected 1 positional variables but got ${#}" >&2 - print_help_and_exit -fi - -if [ -z "$test_suite_image" ]; then - echo "ERROR: Variable 'test_suite_image' cannot be empty" >&2 - exit 1 -fi - - - -# ============================================================================================ -# Main Logic -# ============================================================================================# Because Kurtosis X.Y.Z tags are normalized to X.Y so that minor patch updates are transparently -# used, we need to pull the latest API & initializer images -echo "Pulling latest versions of API & initializer image..." -if ! docker pull "${INITIALIZER_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the initializer image (${INITIALIZER_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of initializer image" -fi -if ! docker pull "${API_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the API image (${API_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of API image" -fi - -# Kurtosis needs a Docker volume to store its execution data in -# To learn more about volumes, see: https://docs.docker.com/storage/volumes/ -sanitized_image="$(echo "${test_suite_image}" | sed 's/[^a-zA-Z0-9_.-]/_/g')" -suite_execution_volume="$(date +%Y-%m-%dT%H.%M.%S)_${sanitized_image}" -if ! docker volume create "${suite_execution_volume}" > /dev/null; then - echo "ERROR: Failed to create a Docker volume to store the execution files in" >&2 - exit 1 -fi - -if ! mkdir -p "${KURTOSIS_DIRPATH}"; then - echo "ERROR: Failed to create the Kurtosis directory at '${KURTOSIS_DIRPATH}'" >&2 - exit 1 -fi - -docker run \ - `# The Kurtosis initializer runs inside a Docker container, but needs to access to the Docker engine; this is how to do it` \ - `# For more info, see the bottom of: http://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/` \ - --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ - \ - `# Because the Kurtosis initializer runs inside Docker but needs to persist & read files on the host filesystem between execution,` \ - `# the container expects the Kurtosis directory to be bind-mounted at the special "/kurtosis" path` \ - --mount "type=bind,source=${KURTOSIS_DIRPATH},target=/kurtosis" \ - \ - `# The Kurtosis initializer image requires the volume for storing suite execution data to be mounted at the special "/suite-execution" path` \ - --mount "type=volume,source=${suite_execution_volume},target=/suite-execution" \ - \ - `# Keep these sorted alphabetically` \ - --env CLIENT_ID="${client_id}" \ - --env CLIENT_SECRET="${client_secret}" \ - --env CUSTOM_PARAMS_JSON="${custom_params_json}" \ - --env DO_LIST="${do_list}" \ - --env KURTOSIS_API_IMAGE="${API_IMAGE}" \ - --env KURTOSIS_LOG_LEVEL="${kurtosis_log_level}" \ - --env PARALLELISM="${parallelism}" \ - --env SUITE_EXECUTION_VOLUME="${suite_execution_volume}" \ - --env TEST_NAMES="${test_names}" \ - --env TEST_SUITE_IMAGE="${test_suite_image}" \ - --env TEST_SUITE_LOG_LEVEL="${test_suite_log_level}" \ - \ - "${INITIALIZER_IMAGE}" diff --git a/avalanchego/Dockerfile b/avalanchego/Dockerfile index 20c06bf4..62594f64 100644 --- a/avalanchego/Dockerfile +++ b/avalanchego/Dockerfile @@ -1,12 +1,10 @@ # Changes to the minimum golang version must also be replicated in # scripts/build_avalanche.sh -# scripts/local.Dockerfile # Dockerfile (here) # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.19.6-buster AS builder -RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 +FROM golang:1.21.7-bullseye AS builder WORKDIR /build # Copy and download avalanche dependencies using go mod @@ -18,7 +16,8 @@ RUN go mod download COPY . . # Build avalanchego -RUN ./scripts/build.sh +ARG RACE_FLAG="" +RUN ./scripts/build.sh ${RACE_FLAG} # ============= Cleanup Stage ================ FROM debian:11-slim AS execution diff --git a/avalanchego/LICENSE b/avalanchego/LICENSE index c9be72c5..6178f77a 100644 --- a/avalanchego/LICENSE +++ b/avalanchego/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (C) 2019-2023, Ava Labs, Inc. +Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/avalanchego/LICENSE.header b/avalanchego/LICENSE.header deleted file mode 100644 index 1be34461..00000000 --- a/avalanchego/LICENSE.header +++ /dev/null @@ -1,2 +0,0 @@ -Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -See the file LICENSE for licensing terms. \ No newline at end of file diff --git a/avalanchego/README.md b/avalanchego/README.md index 38b18a2b..7eec5b92 100644 --- a/avalanchego/README.md +++ b/avalanchego/README.md @@ -17,12 +17,13 @@ The minimum recommended hardware specification for nodes connected to Mainnet is - CPU: Equivalent of 8 AWS vCPU - RAM: 16 GiB - Storage: 1 TiB + - Nodes running for very long periods of time or nodes with custom configurations may observe higher storage requirements. - OS: Ubuntu 20.04/22.04 or macOS >= 12 - Network: Reliable IPv4 or IPv6 network connection, with an open public port. If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.19.6 +- [Go](https://golang.org/doc/install) version >= 1.21.7 - [gcc](https://gcc.gnu.org/) - g++ @@ -131,7 +132,12 @@ To connect to the Fuji Testnet, run: ### Creating a Local Testnet -See [this tutorial.](https://docs.avax.network/build/tutorials/platform/create-a-local-test-network/) +The [avalanche-cli](https://github.com/ava-labs/avalanche-cli) is the easiest way to start a local network. + +```sh +avalanche network start +avalanche network status +``` ## Bootstrapping @@ -153,13 +159,13 @@ To regenerate the protobuf go code, run `scripts/protobuf_codegen.sh` from the r This should only be necessary when upgrading protobuf versions or modifying .proto definition files. -To use this script, you must have [buf](https://docs.buf.build/installation) (v1.11.0), protoc-gen-go (v1.28.0) and protoc-gen-go-grpc (v1.2.0) installed. +To use this script, you must have [buf](https://docs.buf.build/installation) (v1.29.0), protoc-gen-go (v1.30.0) and protoc-gen-go-grpc (v1.3.0) installed. To install the buf dependencies: ```sh -go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.0 -go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0 +go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30.0 +go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 ``` If you have not already, you may need to add `$GOPATH/bin` to your `$PATH`: @@ -188,7 +194,7 @@ docker run -t -i -v $(pwd):/opt/avalanche -w/opt/avalanche avalanche:protobuf_co ### Running mock codegen -To regenerate the [gomock](https://github.com/golang/mock) code, run `scripts/mock.gen.sh` from the root of the repo. +To regenerate the [gomock](https://github.com/uber-go/mock) code, run `scripts/mock.gen.sh` from the root of the repo. This should only be necessary when modifying exported interfaces or after modifying `scripts/mock.mockgen.txt`. diff --git a/avalanchego/RELEASES.md b/avalanchego/RELEASES.md index 1a3d929e..a660d846 100644 --- a/avalanchego/RELEASES.md +++ b/avalanchego/RELEASES.md @@ -1,5 +1,1541 @@ # Release Notes +## [v1.11.1](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.1) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `33` and is compatible with version `v1.11.0`. + +### Fixes + +- Suspended transaction re-push gossip in the p2p SDK + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.0...v1.11.1 + +## [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0) + +This upgrade consists of the following Avalanche Community Proposals (ACPs): + +- [ACP-23](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md) P-Chain Native Transfers +- [ACP-24](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md) Activate Shanghai EIPs on C-Chain +- [ACP-25](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md) Virtual Machine Application Errors +- [ACP-30](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md) Integrate Avalanche Warp Messaging into the EVM +- [ACP-31](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md) Enable Subnet Ownership Transfer +- [ACP-41](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md) Remove Pending Stakers +- [ACP-62](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md) Disable AddValidatorTx and AddDelegatorTx + +The changes in the upgrade go into effect at 11 AM ET (4 PM UTC) on Wednesday, March 6th, 2024 on Mainnet. + +**All Durango supporting Mainnet nodes should upgrade before 11 AM ET, March 6th 2024.** + +The plugin version is updated to `33` all plugins must update to be compatible. + +### APIs + +- Added `platform.getSubnet` API + +### Configs + +- Deprecated: + - `api-auth-required` + - `api-auth-password` + - `api-auth-password-file` + +### Fixes + +- Fixed potential deadlock during P-chain shutdown +- Updated the consensus engine to recover from previously misconfigured subnets without requiring a restart + +### What's Changed + +- `ci`: Upgrade all workflow actions to versions using Node 20 by @marun in https://github.com/ava-labs/avalanchego/pull/2677 +- `tmpnet`: Ensure restart after chain creation by @marun in https://github.com/ava-labs/avalanchego/pull/2675 +- Publish docker images with race detection by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2680 +- `vms/platformvm`: Remove `NewRewardValidatorTx` from `Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2676 +- `ci`: Updated shellcheck script to support autofix by @marun in https://github.com/ava-labs/avalanchego/pull/2678 +- Unblock misconfigured subnets by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2679 +- Add transfer subnet ownership functionality to wallet by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2659 +- Add ACP-62 by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2681 +- `vms/platformvm`: Add missing txs to `txs.Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2663 +- `vms/platformvm`: Disable `AddValidatorTx` and `AddDelegatorTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2662 +- Remove chain router from node.Config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2683 +- Deprecate the auth API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2684 +- Fix P-chain Shutdown deadlock by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2686 +- Cleanup ID initialization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2690 +- Remove unused chains#beacons field by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2692 +- x/sync: Remove duplicated call to TrackBandwidth by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2694 +- Move VMAliaser into node from config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2689 +- Fix minor errors in x/sync tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2709 +- Update minimum golang version to v1.21.7 by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2710 +- Check for github action updates in dependabot by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2715 +- Update `golangci-lint` to `v1.56.1` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2714 +- Add stringer to warp types by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2712 +- Refactor `p2p.PeerTracker` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2701 +- Bump actions/stale from 8 to 9 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2719 +- Bump github/codeql-action from 2 to 3 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2720 +- Bump bufbuild/buf-setup-action from 1.26.1 to 1.29.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2721 +- Bump aws-actions/configure-aws-credentials from 1 to 4 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2722 +- Manually setup golang in codeql action by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2725 +- Provide pgo file during compilation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2724 +- P-chain - Tx builder cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2718 +- Refactor chain manager subnets by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2711 +- Replace snowball/snowflake interface with single shared snow interface by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2717 +- Remove duplicate IP length constant by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2733 +- Add `platform.getSubnet` API by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2704 +- Provide BLS signature in Handshake message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2730 +- Verify BLS signature provided in Handshake messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2735 +- Move UTXOs definition from primary to primary/common by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2741 +- Minimize Signer interface and document Sign by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2740 +- Revert setup-go during unit tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2744 +- P-chain wallet fees UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2734 +- `merkledb` -- generalize error case to check state that should never occur by @danlaine in https://github.com/ava-labs/avalanchego/pull/2743 +- Revert setup-go to v3 on all arm actions by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2749 +- Add AppError to Sender interface by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2737 +- P-chain - Cleaned up fork switch in UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2746 +- X-chain wallet fees UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2747 +- Add keys values to bimap by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2754 +- fix test sender by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2755 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.19...v1.11.0 + +## [v1.10.19](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.19) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `31` and is compatible with version `v1.10.18`. + +### APIs + +- Added `admin.dbGet` call to the `admin` API +- Added bloom filter metrics: + - `bloom_filter_count` + - `bloom_filter_entries` + - `bloom_filter_hashes` + - `bloom_filter_max_count` + - `bloom_filter_reset_count` + to the following namespaces: + - `avalanche_X_vm_mempool` + - `avalanche_P_vm_mempool` + - `avalanche_C_vm_sdk_atomic_mempool` + - `avalanche_C_vm_sdk_eth_mempool` + +### Fixes + +- Fixed race condition during validator set creation +- Fixed C-chain mempool bloom filter recalculation + +### What's Changed + +- `vms/platformvm`: Change `AdvanceTimeTo` to modify passed-in `parentState` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2489 +- `vms/platformvm`: Remove `MempoolTxVerifier` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2362 +- Verify `SignedIP.Timestamp` from `PeerList` messages by @danlaine in https://github.com/ava-labs/avalanchego/pull/2587 +- Fix metrics namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2632 +- Add bloom filter metrics to the p2p sdk by @ceyonur in https://github.com/ava-labs/avalanchego/pull/2612 +- Replace `shutdownEnvironment` with `t.Cleanup()` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2491 +- P-chain - Memo field zeroed post Durango by @abi87 in https://github.com/ava-labs/avalanchego/pull/2607 +- Refactor feature extensions out of VMManager by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2578 +- Remove getter for router on chain manager by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2641 +- Fix `require.ErrorIs` argument order by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2645 +- `api/admin`: Cleanup `SuccessResponseTests` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2644 +- Allow calls to `Options` before `Verify` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2363 +- Improve logging of unexpected proposer errors by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2646 +- Disable non-security related dependabot PRs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2647 +- Add historical fork times by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2649 +- Cleanup warp signer tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2651 +- Reintroduce the upgrade test against v1.10.18 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2652 +- Cleanup database benchmarks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2653 +- Cleanup database tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2654 +- `ci`: Add shellcheck step to lint job by @marun in https://github.com/ava-labs/avalanchego/pull/2650 +- Replace `closeFn` with `t.Cleanup` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2638 +- Fix TestExpiredBuildBlock by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2655 +- Add admin.dbGet API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2667 +- `ci`: Update shellcheck.sh to pass all args to shellcheck by @marun in https://github.com/ava-labs/avalanchego/pull/2657 +- `vms/platformvm`: Remove `NewAdvanceTimeTx` from `Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2668 +- Log error if database returns unsorted heights by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2670 +- `vms/platformvm`: Move `vm.Shutdown` call in tests to `t.Cleanup` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2669 +- `e2e`: Add test of `platform.getValidatorsAt` across nodes by @marun in https://github.com/ava-labs/avalanchego/pull/2664 +- Fix P-chain validator set lookup race condition by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2672 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.18...v1.10.19 + +## [v1.10.18](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.18) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `31` all plugins must update to be compatible. + +### APIs + +- Added `info.acps` API +- Added `supportedACPs` and `objectedACPs` for each peer returned by `info.peers` +- Added `txs` field to `BanffProposalBlock`'s json format +- Added metrics: + - `avalanche_network_validator_ips` + - `avalanche_network_gossipable_ips` + - `avalanche_network_ip_bloom_count` + - `avalanche_network_ip_bloom_entries` + - `avalanche_network_ip_bloom_hashes` + - `avalanche_network_ip_bloom_max_count` + - `avalanche_network_ip_bloom_reset_count` +- Added metrics related to `get_peer_list` message handling +- Added p2p SDK metrics to the P-chain and X-chain +- Renamed metrics related to message handling: + - `version` -> `handshake` + - `appRequestFailed` -> `appError` + - `crossChainAppRequestFailed` -> `crossChainAppError` +- Removed `gzip` compression time metrics +- Converted p2p SDK metrics to use vectors rather than independent metrics +- Converted client name reported over the p2p network from `avalanche` to `avalanchego` + +### Configs + +- Added: + - `--acp-support` + - `--acp-object` + - `snow-commit-threshold` + - `network-peer-list-pull-gossip-frequency` + - `network-peer-list-bloom-reset-frequency` + - `network` to the X-chain and P-chain configs including: + - `max-validator-set-staleness` + - `target-gossip-size` + - `pull-gossip-poll-size` + - `pull-gossip-frequency` + - `pull-gossip-throttling-period` + - `pull-gossip-throttling-limit` + - `expected-bloom-filter-elements` + - `expected-bloom-filter-false-positive-probability` + - `max-bloom-filter-false-positive-probability` + - `legacy-push-gossip-cache-size` +- Deprecated: + - `snow-virtuous-commit-threshold` + - `snow-rogue-commit-threshold` + - `network-peer-list-validator-gossip-size` + - `network-peer-list-non-validator-gossip-size` + - `network-peer-list-peers-gossip-size` + - `network-peer-list-gossip-frequency` +- Removed: + - `gzip` as an option for `network-compression-type` + +### Fixes + +- Fixed `platformvm.SetPreference` to correctly reset the block building timer +- Fixed early bootstrapping termination +- Fixed duplicated transaction initialization in the X-chain and P-chain +- Fixed IP gossip when using dynamically allocated staking ports +- Updated `golang.org/x/exp` dependency to fix downstream compilation errors +- Updated `golang.org/x/crypto` dependency to address `CVE-2023-48795` +- Updated minimum golang version to address `CVE-2023-39326` +- Restricted `GOPROXY` during compilation to avoid `direct` version control fallbacks +- Fixed `merkledb` deletion of the empty key +- Fixed `merkledb` race condition when interacting with invalidated or closed trie views +- Fixed `json.Marshal` for `wallet` transactions +- Fixed duplicate outbound dialer for manually tracked nodes in the p2p network + +### What's Changed + +- testing: Update to latest version of ginkgo by @marun in https://github.com/ava-labs/avalanchego/pull/2390 +- `vms/platformvm`: Cleanup block builder tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2406 +- Drop Pending Stakers 0 - De-duplicate staking tx verification by @abi87 in https://github.com/ava-labs/avalanchego/pull/2335 +- `vms/platformvm`: Initialize txs in `Transactions` field for `BanffProposalBlock` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2419 +- `vms/platformvm`: Move `VerifyUniqueInputs` from `verifier` to `backend` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2410 +- Fix duplicated bootstrapper engine termination by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2334 +- allow user of `build_fuzz.sh` to specify a directory to fuzz in by @danlaine in https://github.com/ava-labs/avalanchego/pull/2414 +- Update slices dependency to use Compare by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2424 +- `vms/platformvm`: Cleanup some block tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2422 +- ProposerVM Extend windows 0 - Cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2404 +- `vms/platformvm`: Add `decisionTxs` parameter to `NewBanffProposalBlock` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2411 +- Update minimum golang version to v1.20.12 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2427 +- Fix platformvm.SetPreference by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2429 +- Restrict GOPROXY by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2434 +- Drop Pending Stakers 1 - introduced ScheduledStaker txs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2323 +- Run merkledb fuzz tests every 6 hours by @danlaine in https://github.com/ava-labs/avalanchego/pull/2415 +- Remove unused error by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2426 +- Make `messageQueue.msgAndCtxs` a circular buffer by @danlaine in https://github.com/ava-labs/avalanchego/pull/2433 +- ProposerVM Extend windows 1 - UTs Cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2412 +- Change seed from int64 to uint64 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2438 +- Remove usage of timer.Timer in node by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2441 +- Remove staged timer again by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2440 +- `merkledb` / `sync` -- Disambiguate no end root from no start root by @danlaine in https://github.com/ava-labs/avalanchego/pull/2437 +- Drop Pending Stakers 2 - Replace txs.ScheduledStaker with txs.Staker by @abi87 in https://github.com/ava-labs/avalanchego/pull/2305 +- `vms/platformvm`: Remove double block building logic by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2380 +- Remove usage of timer.Timer in benchlist by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2446 +- `vms/avm`: Simplify `Peek` function in mempool by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2449 +- `vms/platformvm`: Remove `standardBlockState` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2450 +- Refactor sampler seeding by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2456 +- Update tmpnet fixture to include Proof-of-Possession for initial stakers by @marun in https://github.com/ava-labs/avalanchego/pull/2391 +- `vms/platformvm`: Remove `EnableAdding` and `DisableAdding` from `Mempool` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2463 +- `vms/avm`: Add `exists` bool to mempool `Peek` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2465 +- `vms/platformvm`: Remove `PeekTxs` from `Mempool` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2378 +- `vms/platformvm`: Add `processStandardTxs` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2461 +- `vms/platformvm`: Process `atomicRequests` and `onAcceptFunc` in option blocks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2459 +- `e2e`: Rename 'funded key' to 'pre-funded key' for consistency by @marun in https://github.com/ava-labs/avalanchego/pull/2455 +- `vms/platformvm`: Surface `VerifyUniqueInputs` in the `Manager` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2467 +- `vms/platformvm`: Add `TestBuildBlockShouldReward` test by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2466 +- Switch client version to a proto type from a string by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2188 +- Remove stale TODO by @danlaine in https://github.com/ava-labs/avalanchego/pull/2468 +- `vms/platformvm`: Add `TestBuildBlockDoesNotBuildWithEmptyMempool` test by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2469 +- `vms/platformvm`: Add `TestBuildBlockShouldAdvanceTime` test by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2471 +- `vms/platformvm`: Permit usage of the `Transactions` field in `BanffProposalBlock` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2451 +- `vms/platformvm`: Add `TestBuildBlockForceAdvanceTime` test by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2472 +- P2P AppError handling by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2248 +- `vms/platformvm`: Verify txs before building a block by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2359 +- Refactor p2p unit tests by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2475 +- Add ACP signaling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2476 +- Refactor SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2452 +- Cleanup CI by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2480 +- Ensure upgrade test uses the correct binary on restart by @marun in https://github.com/ava-labs/avalanchego/pull/2478 +- Prefetch Improvement by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2435 +- ci: run each fuzz test for 10 seconds by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2483 +- Remove nullable options by @nytzuga in https://github.com/ava-labs/avalanchego/pull/2481 +- `merkledb` -- dynamic root by @danlaine in https://github.com/ava-labs/avalanchego/pull/2177 +- fix onEvictCache by @danlaine in https://github.com/ava-labs/avalanchego/pull/2484 +- Remove cached node bytes from merkle nodes by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2393 +- Fix race in view iteration by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2486 +- MerkleDB -- update readme by @danlaine in https://github.com/ava-labs/avalanchego/pull/2423 +- Drop Pending Stakers 3 - persist stakers' StartTime by @abi87 in https://github.com/ava-labs/avalanchego/pull/2306 +- SDK Push Gossiper implementation by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2428 +- `tmpnet`: Move tmpnet/local to tmpnet package by @marun in https://github.com/ava-labs/avalanchego/pull/2457 +- `merkledb` -- make tests use time as randomness seed by @danlaine in https://github.com/ava-labs/avalanchego/pull/2470 +- `tmpnet`: Break config.go up into coherent parts by @marun in https://github.com/ava-labs/avalanchego/pull/2462 +- Drop Pending Stakers 4 - minimal UT infra cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2332 +- ProposerVM Extend windows 2- extend windowing by @abi87 in https://github.com/ava-labs/avalanchego/pull/2401 +- Support json marshalling txs returned from the wallet by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2494 +- Avoid escaping to improve readability by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2496 +- Allow OutputOwners to be json marshalled without InitCtx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2495 +- Drop Pending Stakers 5 - validated PostDurango StakerTxs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2314 +- Bump golang.org/x/crypto from 0.14.0 to 0.17.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2502 +- Remove unused `BuildGenesisTest` function by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2503 +- Remove unused `AcceptorTracker` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2508 +- Dedupe secp256k1 key usage in tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2511 +- Merkledb readme updates by @danlaine in https://github.com/ava-labs/avalanchego/pull/2510 +- Gossip Test structs by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2514 +- `tmpnet`: Separate node into orchestration, config and process by @marun in https://github.com/ava-labs/avalanchego/pull/2460 +- Move `snow.DefaultConsensusContextTest` to `snowtest.ConsensusContext` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2507 +- Add gossip Marshaller interface by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2509 +- Include chain creation error in health check by @marun in https://github.com/ava-labs/avalanchego/pull/2519 +- Make X-chain mempool safe for concurrent use by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2520 +- Initialize transactions once by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2521 +- `vms/avm`: Remove usage of `require.Contains` from service tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2517 +- Move context lock into issueTx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2524 +- Rework X-chain locking in tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2526 +- `vms/avm`: Simplify `mempool.Remove` signature by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2527 +- Remove unused mocks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2528 +- Move `avm.newContext` to `snowtest.Context` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2513 +- Do not fail-fast Tests / Unit by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2530 +- Make P-Chain Mempool thread-safe by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2523 +- `vms/platformvm`: Use `snowtest.Context` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2515 +- Export mempool errors by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2531 +- Move locking into issueTx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2532 +- Fix merge in wallet service by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2534 +- Introduce TxVerifier interface to network by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2533 +- Export P-Chain Mempool Errors by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2535 +- Rename `Version` message to `Handshake` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2479 +- Rename myVersionTime to ipSigningTime by @danlaine in https://github.com/ava-labs/avalanchego/pull/2537 +- Remove resolved TODO by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2540 +- Only initialize Txs once by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2538 +- JSON marshal the `Transactions` field in `BanffProposalBlocks` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2541 +- Enable `predeclared` linter by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2539 +- Move context lock into `network.issueTx` by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2525 +- Remove comment on treating failed sends as FATAL by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2544 +- Add TxVerifier interface to network by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2542 +- X-chain SDK gossip by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2490 +- Remove network context by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2543 +- Remove `snow.DefaultContextTest` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2518 +- Fix windowing when no validator is available by @abi87 in https://github.com/ava-labs/avalanchego/pull/2529 +- Unexport fields from gossip.BloomFilter by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2547 +- P-Chain SDK Gossip by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2487 +- Documentation Fixes: Grammatical Corrections and Typo Fixes Across Multiple Files by @joaolago1113 in https://github.com/ava-labs/avalanchego/pull/2550 +- Notify block builder of txs after reject by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2549 +- Set dependabot target branch to `dev` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2553 +- Remove `MockLogger` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2554 +- Clean up merkleDB interface and duplicate code by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2445 +- Do not mark txs as dropped when mempool is full by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2557 +- Update bug bounty program to immunefi by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2558 +- Fix p2p sdk metric labels by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2561 +- Suppress gossip warnings due to no sampled peers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2562 +- Remove dead code and unnecessary lock from reflect codec by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2560 +- Remove unused index interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2564 +- Implement SetMap and use it in XP-chain mempools by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2555 +- `vms/platformvm`: Add `TestIterate` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2565 +- Cleanup codec usage by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2563 +- Remove `len` tag parsing from the reflect codec by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2559 +- Use more specific type by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2567 +- Standardize `onShutdownCtx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2568 +- Verify avm mempool txs against the last accepted state by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2569 +- Update `CODEOWNERS` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2570 +- Remove license from mocks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2574 +- Add missing import by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2573 +- `vms/platformvm`: Prune mempool periodically by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2566 +- Update license header to 2024 by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2572 +- [MerkleDB] Make intermediate node cache two layered by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2576 +- Fix merkledb rebuild iterator by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2581 +- Fix intermediate node caching by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2585 +- Remove codec length check after Durango by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2586 +- `tmpnet`: Use AvalancheLocalChainConfig for cchain genesis by @marun in https://github.com/ava-labs/avalanchego/pull/2583 +- `testing`: Ensure CheckBootstrapIsPossible is safe for teardown by @marun in https://github.com/ava-labs/avalanchego/pull/2582 +- `tmpnet`: Separate network into orchestration and configuration by @marun in https://github.com/ava-labs/avalanchego/pull/2464 +- Update uintsize implementation by @danlaine in https://github.com/ava-labs/avalanchego/pull/2590 +- Optimize bloom filter by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2588 +- Remove TLS key gen from networking tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2596 +- [utils/bloom] Optionally Update Bloom Filter Size on Reset by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/2591 +- [ci] Increase Fuzz Time in Periodic Runs by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/2599 +- `tmpnet`: Save metrics snapshot to disk before node shutdown by @marun in https://github.com/ava-labs/avalanchego/pull/2601 +- chore: Fix typo s/useage/usage by @hugo-syn in https://github.com/ava-labs/avalanchego/pull/2602 +- Deprecate `SnowRogueCommitThresholdKey` and `SnowVirtuousCommitThresholdKey` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2600 +- Fix networking invalid field log by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2604 +- chore: Fix typo s/seperate/separate/ by @hugo-syn in https://github.com/ava-labs/avalanchego/pull/2605 +- Support dynamic port peerlist gossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2603 +- Replace `PeerListAck` with `GetPeerList` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2580 +- Log critical consensus values during health checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2609 +- Update contributions branch to master by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2610 +- Add ip bloom metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2614 +- `x/sync`: Auto-generate `MockNetworkClient` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2617 +- Remove CreateStaticHandlers from VM interface by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2589 +- `tmpnet`: Add support for subnets by @marun in https://github.com/ava-labs/avalanchego/pull/2492 +- Update `go.uber.org/mock/gomock` to `v0.4.0` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2618 +- Add `mockgen` source mode for generics + bls imports by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2615 +- Verify all MockGen generated files are re-generated in CI by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2616 +- Move division by 0 check out of the bloom loops by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2622 +- P-chain Add UTs around stakers persistence in platformvm state by @abi87 in https://github.com/ava-labs/avalanchego/pull/2505 +- Revert "Set dependabot target branch to `dev` (#2553)" by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2623 +- Remove remaining 2023 remnants by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2624 +- Deprecate push-based peerlist gossip flags by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2625 +- Remove support for compressing gzip messages by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2627 +- Always attempt to install mockgen `v0.4.0` before execution by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2628 +- Modify TLS parsing rules for Durango by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2458 + +### New Contributors + +- @joaolago1113 made their first contribution in https://github.com/ava-labs/avalanchego/pull/2550 +- @hugo-syn made their first contribution in https://github.com/ava-labs/avalanchego/pull/2602 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.17...v1.10.18 + +## [v1.10.17](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.17) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `30` and is compatible with versions `v1.10.15-v1.10.16`. + +### APIs + +- Added `avalanche_{chainID}_blks_build_accept_latency` metric +- Added `avalanche_{chainID}_blks_issued{source}` metric with sources: + - `pull_gossip` + - `push_gossip` + - `put_gossip` which is deprecated + - `built` + - `unknown` +- Added `avalanche_{chainID}_issuer_stake_sum` metric +- Added `avalanche_{chainID}_issuer_stake_count` metric + +### Configs + +- Added: + - `--consensus-frontier-poll-frequency` +- Removed: + - `--consensus-accepted-frontier-gossip-frequency` +- Deprecated: + - `--consensus-accepted-frontier-gossip-validator-size` + - `--consensus-accepted-frontier-gossip-non-validator-size` + - `--consensus-accepted-frontier-gossip-peer-size` + - Updated the default value to 1 to align with the change in default gossip frequency + - `--consensus-on-accept-gossip-validator-size` + - `--consensus-on-accept-gossip-non-validator-size` + - `--consensus-on-accept-gossip-peer-size` + +### Fixes + +- Fixed `duplicated operation on provided value` error when executing atomic operations after state syncing the C-chain +- Removed usage of atomic trie after commitment +- Fixed atomic trie root overwrite during state sync +- Prevented closure of `stdout` and `stderr` when shutting down the logger + +### What's Changed + +- Remove Banff check from mempool verifier by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2360 +- Document storage growth in readme by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2364 +- Add metric for duration between block timestamp and acceptance time by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2366 +- `vms/platformvm`: Remove unused `withMetrics` txheap by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2373 +- Move peerTracker from x/sync to network/p2p by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2356 +- Logging avoid closing standard outputs by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2372 +- `vms/platformvm`: Adjust `Diff.Apply` signature by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2368 +- Add bls validator info to genesis by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2371 +- Remove `engine.GetVM` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2374 +- `vms/platformvm`: Consolidate `state` pkg mocks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2370 +- Remove common bootstrapper by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2297 +- `vms/platformvm`: Move `toEngine` channel to mempool by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2333 +- `vms/avm`: Rename `states` pkg to `state` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2381 +- Implement generic bimap by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2383 +- Unexport RequestID from snowman engine by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2384 +- Add metric to track the stake weight of block providers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2376 +- Add block source metrics to monitor gossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2386 +- Rename `D` to `Durango` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2389 +- Replace periodic push accepted gossip with pull preference gossip for block discovery by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2367 +- MerkleDB Remove ID from Node to reduce size and removal channel creation. by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2324 +- Remove method `CappedList` from `set.Set` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2395 +- Periodically PullGossip only from connected validators by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2399 +- Update bootstrap IPs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2396 +- Rename `testnet` fixture to `tmpnet` by @marun in https://github.com/ava-labs/avalanchego/pull/2307 +- Add `p2p.Network` component by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2283 +- `vms/platformvm`: Move `GetRewardUTXOs`, `GetSubnets`, and `GetChains` to `State` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2402 +- Add more descriptive formatted error by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2403 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.16...v1.10.17 + +## [v1.10.16](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.16) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `30` and compatible with version `v1.10.15`. + +### APIs + +- Added log level information to the result of `admin.setLoggerLevel` +- Updated `info.peers` to return chain aliases for `benched` chains +- Added support to sample validators of non-tracked subnets with `platform.sampleValidators` +- Added `avalanche_{chainID}_max_verified_height` metric to track the highest verified block + +### Configs + +- Added `--db-read-only` to run the node without writing to disk. + - This flag is only expected to be used during testing as it will cause memory use to increase over time +- Removed `--bootstrap-retry-enabled` +- Removed `--bootstrap-retry-warn-frequency` + +### Fixes + +- Fixed packing of large block requests during C-chain state sync +- Fixed order of updating acceptor tip and sending chain events to C-chain event subscribers + +### What's Changed + +- Return log levels from admin.SetLoggerLevel by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2250 +- feat(api) : Peers function to return the PrimaryAlias of the chainID by @DoTheBestToGetTheBest in https://github.com/ava-labs/avalanchego/pull/2251 +- Switch to using require.TestingT interface in SenderTest struct by @marun in https://github.com/ava-labs/avalanchego/pull/2258 +- Cleanup `ipcs` `Socket` test by @danlaine in https://github.com/ava-labs/avalanchego/pull/2257 +- Require poll metrics to be registered by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2260 +- Track all subnet validator sets in the validator manager by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2253 +- e2e: Make NewWallet and NewEthclient regular functions by @marun in https://github.com/ava-labs/avalanchego/pull/2262 +- Fix typos in docs by @vuittont60 in https://github.com/ava-labs/avalanchego/pull/2261 +- Remove Token constants information from keys by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2197 +- Remove unused `UnsortedEquals` function by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2264 +- Document p2p package by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2254 +- Use extended public key to derive ledger addresses by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2246 +- `merkledb` -- rename nit by @danlaine in https://github.com/ava-labs/avalanchego/pull/2267 +- `merkledb` -- fix nil check in test by @danlaine in https://github.com/ava-labs/avalanchego/pull/2268 +- Add read-only database flag (`--db-read-only`) by @danlaine in https://github.com/ava-labs/avalanchego/pull/2266 +- `merkledb` -- remove unneeded var declarations by @danlaine in https://github.com/ava-labs/avalanchego/pull/2269 +- Add fuzz test for `NewIteratorWithStartAndPrefix` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1992 +- Return if element was deleted from `Hashmap` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2271 +- `mempool.NewMempool` -> `mempool.New` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2276 +- e2e: Refactor suite setup and helpers to tests/fixture/e2e for reuse by coreth by @marun in https://github.com/ava-labs/avalanchego/pull/2265 +- Cleanup platformvm mempool errs by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2278 +- MerkleDB:Naming and comments cleanup by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2274 +- Move `DropExpiredStakerTxs` to platformvm mempool by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2279 +- Cleanup `ids.NodeID` usage by @abi87 in https://github.com/ava-labs/avalanchego/pull/2280 +- Genesis validators cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2282 +- Remove Lazy Initialize on Node by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1384 +- Remove sentinel node from MerkleDB proofs by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2106 +- Embed `noop` handler for all unhandled messages by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2288 +- `merkledb` -- Add `Clearer` interface by @danlaine in https://github.com/ava-labs/avalanchego/pull/2277 +- Simplify get server creation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2285 +- Move management of platformvm preferred block to `executor.Manager` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2292 +- Add `recentTxsLock` to platform `network` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2294 +- e2e: More fixture refinement in support of coreth integration testing by @marun in https://github.com/ava-labs/avalanchego/pull/2275 +- Add `VerifyTx` to `executor.Manager` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2293 +- Simplify avalanche bootstrapping by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2286 +- Replace unique slices with sets in the engine interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2317 +- Use zap.Stringer rather than zap.Any by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2320 +- Move `AddUnverifiedTx` logic to `network.IssueTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2310 +- Remove `AddUnverifiedTx` from `Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2311 +- Remove error from SDK AppGossip handler by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2252 +- Rename AppRequestFailed to AppError by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2321 +- Remove `Network` interface from `Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2312 +- Update `error_code` to be sint32 instead of uint32. by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2322 +- Refactor bootstrapper implementation into consensus by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2300 +- Pchain - Cleanup NodeID generation in UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2291 +- nit: loop --> variadic by @danlaine in https://github.com/ava-labs/avalanchego/pull/2316 +- Update zap dependency to v1.26.0 by @danlaine in https://github.com/ava-labs/avalanchego/pull/2325 +- Remove useless anon functions by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2326 +- Move `network` implementation to separate package by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2296 +- Unexport avalanche constant from common package by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2327 +- Remove `common.Config` functions by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2328 +- Move engine startup into helper function by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2329 +- Remove bootstrapping retry config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2301 +- Export snowman bootstrapper by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2331 +- Remove common.Config from syncer.Config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2330 +- `platformvm.VM` -- replace `Config` field with `validators.Manager` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2319 +- Improve height monitoring by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2347 +- Cleanup snowman consensus metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2349 +- Expand consensus health check by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2354 +- Reduce the size of the OracleBlock interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2355 +- [vms/proposervm] Update Build Heuristic by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/2348 +- Use linkedhashmap for P-Chain mempool by @gyuho in https://github.com/ava-labs/avalanchego/pull/1536 +- Increase txs in pool metric when adding tx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2361 + +### New Contributors + +- @DoTheBestToGetTheBest made their first contribution in https://github.com/ava-labs/avalanchego/pull/2251 +- @vuittont60 made their first contribution in https://github.com/ava-labs/avalanchego/pull/2261 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.15...v1.10.16 + +## [v1.10.15](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.15) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `30` all plugins must update to be compatible. + +### Configs + +- Added `pebble` as an allowed option to `--db-type` + +### Fixes + +- Fixed C-chain tracer API panic + +### What's Changed + +- Reduce allocations on insert and remove by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2201 +- `merkledb` -- shift nit by @danlaine in https://github.com/ava-labs/avalanchego/pull/2218 +- Update `golangci-lint` to `v1.55.1` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2228 +- Add json marshal tests to existing serialization tests in `platformvm/txs` pkg by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2227 +- Move all blst function usage to `bls` pkg by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2222 +- `merkledb` -- don't pass `BranchFactor` to `encodeDBNode` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2217 +- Add `utils.Err` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2212 +- Enable `perfsprint` linter by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2229 +- Trim down size of secp256k1 `Factory` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2223 +- Fix test typos by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2233 +- P2P AppRequestFailed protobuf definition by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2111 +- Remove error from Router AppGossip by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2238 +- Document host and port behavior in help text by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2236 +- Remove `database.Manager` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2239 +- Add `BaseTx` support to platformvm by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2232 +- Add `pebble` as valid value for `--db-type`. by @danlaine in https://github.com/ava-labs/avalanchego/pull/2244 +- Add nullable option to codec by @nytzuga in https://github.com/ava-labs/avalanchego/pull/2171 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.14...v1.10.15 + +## [v1.10.14](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.14) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `29` and compatible with version `v1.10.13`. + +### Configs + +- Deprecated `--api-ipcs-enabled` +- Deprecated `--ipcs-chain-ids` +- Deprecated `--ipcs-path` +- Deprecated `--api-keystore-enabled` + +### Fixes + +- Fixed shutdown of timeout manager +- Fixed racy access of the shutdown time + +### What's Changed + +- Remove build check from unit tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2189 +- Update cgo usage by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2184 +- Deprecate IPC configs by @danlaine in https://github.com/ava-labs/avalanchego/pull/2168 +- Update P2P proto docs by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2181 +- Merkle db Make Paths only refer to lists of nodes by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2143 +- Deprecate keystore config by @danlaine in https://github.com/ava-labs/avalanchego/pull/2195 +- Add tests for BanffBlock serialization by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2194 +- Move Shutdown lock from Handler into Engines by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2179 +- Move HealthCheck lock from Handler into Engines by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2173 +- Implement Heap Map by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2137 +- Move selectStartGear lock from Handler into Engines by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2182 +- Add Heap Set by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2136 +- Shutdown TimeoutManager during node Shutdown by @abi87 in https://github.com/ava-labs/avalanchego/pull/1707 +- Redesign validator set management to enable tracking all subnets by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1857 +- Update local network readme by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2203 +- Use custom codec for validator metadata by @abi87 in https://github.com/ava-labs/avalanchego/pull/1510 +- Add RSA max key length test by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2205 +- Remove duplicate networking check by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2204 +- Update TestDialContext to use ManuallyTrack by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2209 +- Remove contains from validator manager interface by @ceyonur in https://github.com/ava-labs/avalanchego/pull/2198 +- Move the overridden manager into the node by @ceyonur in https://github.com/ava-labs/avalanchego/pull/2199 +- Remove `aggregate` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2213 +- Add log for ungraceful shutdown on startup by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2215 +- Add pebble database implementation by @danlaine in https://github.com/ava-labs/avalanchego/pull/1999 +- Add `TransferSubnetOwnershipTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2178 +- Revert networking AllowConnection change by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2219 +- Fix unexpected unlock by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2221 +- Improve logging for block verification failure by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2224 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.13...v1.10.14 + +## [v1.10.13](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.13) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `29` all plugins must update to be compatible. + +### Fixes + +- Added `Prefetcher` to the `merkledb` interface +- Fixed json marshalling of `TrackedSubnets` and `AllowedNodes` + +### What's Changed + +- Fix typo in block formation logic documentation by @kyoshisuki in https://github.com/ava-labs/avalanchego/pull/2158 +- Marshal blocks and transactions inside API calls by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2153 +- Remove lock options from the info api by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2149 +- Remove write lock option from the avm static API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2154 +- Remove write lock option from the avm wallet API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2155 +- Fix json marshalling of Sets by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2161 +- Rename `removeSubnetValidatorValidation` to `verifyRemoveSubnetValidatorTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2162 +- Remove lock options from the IPCs api by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2151 +- Remove write lock option from the xsvm API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2152 +- Remove lock options from the admin API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2150 +- Remove aliasing of `math` standard lib by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2163 +- Remove write lock option from the platformvm API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2157 +- Remove write lock option from the avm rpc API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2156 +- Remove context lock from API VM interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2165 +- Use set.Of rather than set.Add by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2164 +- Bump google.golang.org/grpc from 1.55.0 to 1.58.3 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2159 +- [x/merkledb] `Prefetcher` interface by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/2167 +- Validator Diffs: docs and UTs cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2037 +- MerkleDB Reduce buffer creation/memcopy on path construction by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2124 +- Fix some P-chain UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2117 + +### New Contributors + +- @kyoshisuki made their first contribution in https://github.com/ava-labs/avalanchego/pull/2158 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.12...v1.10.13 + +## [v1.10.12](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.12) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `28` and compatible with versions `v1.10.9 - v1.10.11`. + +### APIs + +- Added `avalanche_{chainID}_total_weight` metric +- Added `avalanche_{chainID}_num_validators` metric +- Added `avalanche_{chainID}_num_processing_ancestor_fetches_failed` metric +- Added `avalanche_{chainID}_num_processing_ancestor_fetches_dropped` metric +- Added `avalanche_{chainID}_num_processing_ancestor_fetches_succeeded` metric +- Added `avalanche_{chainID}_num_processing_ancestor_fetches_unneeded` metric +- Added `avalanche_{chainID}_num_missing_accepted_blocks` metric +- Added `avalanche_{chainID}_selected_vote_index_count` metric +- Added `avalanche_{chainID}_selected_vote_index_sum` metric + +### Configs + +- Added `--snow-preference-quorum-size` flag +- Added `--snow-confidence-quorum-size` flag +- Added `"fx-owner-cache-size"` to the P-chain config + +### Fixes + +- Fixed concurrent node shutdown and chain creation race +- Updated http2 implementation to patch CVE-2023-39325 +- Exited `network.dial` early to avoid goroutine leak when shutting down +- Reduced log level of `"failed to send peer list for handshake"` messages from `ERROR` to `DEBUG` +- Reduced log level of `"state pruning failed"` messages from `ERROR` to `WARN` + +### What's Changed + +- Add last accepted height to the snowman interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2091 +- Delete kurtosis CI jobs by @marun in https://github.com/ava-labs/avalanchego/pull/2068 +- e2e: Ensure all Issue* calls use the default context by @marun in https://github.com/ava-labs/avalanchego/pull/2069 +- Remove Finalized from the consensus interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2093 +- Remove embedding of `verify.Verifiable` in `FxCredential` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2089 +- Clarify decidable interface simple default parameter tests by @gyuho in https://github.com/ava-labs/avalanchego/pull/2094 +- snow/consensus/snowman/poll: remove "unused" no early term poller by @gyuho in https://github.com/ava-labs/avalanchego/pull/2095 +- Cleanup `.golangci.yml` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2097 +- Refactor `ancestor.Tree` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2099 +- Update AMI runner image and instance type by @charlie-ava in https://github.com/ava-labs/avalanchego/pull/1939 +- Add `tagalign` linter by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2084 +- Fix flaky BuildBlockIsIdempotent test by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2101 +- Make `network.dial` honor context cancellation. by @danlaine in https://github.com/ava-labs/avalanchego/pull/2061 +- Add preference lookups by height to the consensus interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2092 +- Remove duplicate pullQuery method by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2103 +- Add additional validator set metrics by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2051 +- Remove `snowball.Initialize` and `snowball.Factory` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2104 +- Remove initialize functions from the snowball package by @danlaine in https://github.com/ava-labs/avalanchego/pull/2105 +- Remove `genesis.State` by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2112 +- add `SetSubnetOwner` to `Chain` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2031 +- Move vote bubbling before poll termination by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2100 +- testing: Switch upgrade test to testnet fixture by @marun in https://github.com/ava-labs/avalanchego/pull/1887 +- Reduce archivedb key lengths by 1 byte by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2113 +- Cleanup uptime manager constructor by @abi87 in https://github.com/ava-labs/avalanchego/pull/2118 +- MerkleDB Compact Path Bytes by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2010 +- MerkleDB Path changes cleanup by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2120 +- Fix consensus engine interface comments by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2115 +- Standardize consensus variable names in tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2129 +- Prevent bytesNeeded overflow by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2130 +- Migrate xsvm from github.com/ava-labs/xsvm by @marun in https://github.com/ava-labs/avalanchego/pull/2045 +- Fix handling of wg in the networking dial test by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2132 +- Update go.mod and add update check by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2133 +- Reduce log level of failing to send a peerList message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2134 +- RPCChainVM fail-fast health RPCs by @hexfusion in https://github.com/ava-labs/avalanchego/pull/2123 +- MerkleDB allow warming node cache by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2128 +- Add vote bubbling metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2138 +- Reduce log level of an error during Prune by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2141 +- Exit chain creation routine before shutting down chain router by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2140 +- Merkle db fix type cast bug by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2142 +- Add Warp Payload Types by @nytzuga in https://github.com/ava-labs/avalanchego/pull/2116 +- Add height voting for chits by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2102 +- Add Heap Queue by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2135 +- Add additional payload.Hash examples by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2145 +- Split Alpha into AlphaPreference and AlphaConfidence by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2125 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.11...v1.10.12 + +## [v1.10.11](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.11) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `28` and compatible with versions `v1.10.9 - v1.10.10`. + +### Fixes + +- Prevented overzelous benching due to dropped AppRequests +- Populated the process file atomically to avoid racy reads + +### What's Changed + +- Rename platformvm/blocks to platformvm/block by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1980 +- RewardValidatorTx cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/1891 +- Cancel stale SH actions by @danlaine in https://github.com/ava-labs/avalanchego/pull/2003 +- e2e: Switch assertion library from gomega to testify by @marun in https://github.com/ava-labs/avalanchego/pull/1909 +- e2e: Add bootstrap checks to migrated kurtosis tests by @marun in https://github.com/ava-labs/avalanchego/pull/1935 +- Add `GetTransformSubnetTx` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2047 +- Add readme for the staking/local folder by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2046 +- use `IsCortinaActivated` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2048 +- add `D` upgrade boilerplate by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2049 +- e2e: Ensure interchain workflow coverage for the P-Chain by @marun in https://github.com/ava-labs/avalanchego/pull/1882 +- e2e: Switch to using default timed context everywhere by @marun in https://github.com/ava-labs/avalanchego/pull/1910 +- Remove indentation + confusing comment by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2053 +- Delete ErrDelegatorSubset by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2055 +- Fix default validator start time by @marun in https://github.com/ava-labs/avalanchego/pull/2058 +- Enable workflows to be triggered by merge queue by @marun in https://github.com/ava-labs/avalanchego/pull/2057 +- e2e: Migrate staking rewards test from kurtosis by @marun in https://github.com/ava-labs/avalanchego/pull/1767 +- Fix LRU documentation comment by @anusha-ctrl in https://github.com/ava-labs/avalanchego/pull/2036 +- Ignore AppResponse timeouts for benching by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2066 +- trace: provide appName and version from Config by @najeal in https://github.com/ava-labs/avalanchego/pull/1893 +- Update perms.WriteFile to write atomically by @marun in https://github.com/ava-labs/avalanchego/pull/2063 +- ArchiveDB by @nytzuga in https://github.com/ava-labs/avalanchego/pull/1911 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.10...v1.10.11 + +## [v1.10.10](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.10) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `28` and compatible with version `v1.10.9`. + +### APIs + +- Added `height` to the output of `platform.getCurrentSupply` + +### Configs + +- Added `proposerNumHistoricalBlocks` to subnet configs + +### Fixes + +- Fixed handling of `SIGTERM` signals in plugin processes prior to receiving a `Shutdown` message +- Fixed range proof commitment of empty proofs + +### What's Changed + +- e2e: Save network data for each test run as an uploaded artifact by @marun in https://github.com/ava-labs/avalanchego/pull/1856 +- e2e: Ensure interchain workflow coverage for X-Chain and C-Chain by @marun in https://github.com/ava-labs/avalanchego/pull/1871 +- MerkleDB Adjust New View function(s) by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1927 +- e2e: Migrate duplicate node id test from kurtosis by @marun in https://github.com/ava-labs/avalanchego/pull/1573 +- Add tracing levels to merkledb by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1933 +- [x/merkledb] Add Configuration for `RootGenConcurrency` by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/1936 +- e2e: Ensure testnet network dir is archived on failed test run by @marun in https://github.com/ava-labs/avalanchego/pull/1930 +- Merkle db cleanup view creation by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1934 +- Add async DB deletion helper by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1931 +- Implement SDK handler to drop messages from non-validators by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1917 +- Support proposervm historical block deletion by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1929 +- Remove thread pool by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1940 +- Merkledb split node storage into value and intermediate by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1918 +- `merkledb` -- remove unneeded codec test helper by @danlaine in https://github.com/ava-labs/avalanchego/pull/1943 +- `merkledb` -- add codec test and move helper by @danlaine in https://github.com/ava-labs/avalanchego/pull/1944 +- Add throttler implementation to SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1905 +- Add Throttled Handler implementation to SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1906 +- Change merkledb caches to be size based by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1947 +- Rename `node.marshal` to `node.bytes` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1951 +- e2e: Switch to a default network node count of 2 by @marun in https://github.com/ava-labs/avalanchego/pull/1928 +- MerkleDB Improve Node Size Calculation by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1950 +- `merkledb` -- remove unneeded return values by @danlaine in https://github.com/ava-labs/avalanchego/pull/1959 +- `sync` -- reduce test sizes by @danlaine in https://github.com/ava-labs/avalanchego/pull/1962 +- `merkledb` -- limit number of goroutines calculating node IDs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1960 +- Add gossip package to p2p SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1958 +- Improve state sync logging by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1955 +- Update golang to 1.20.8 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1826 +- Use odd-numbered request ids for SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1975 +- update iterator invariant by @danlaine in https://github.com/ava-labs/avalanchego/pull/1978 +- Document common usage of requestIDs for snow senders by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1981 +- e2e: Diagnose and fix flakes by @marun in https://github.com/ava-labs/avalanchego/pull/1941 +- `merkledb` -- `db_test.go` cleanup by @danlaine in https://github.com/ava-labs/avalanchego/pull/1954 +- `merkledb` -- make config fields uints by @danlaine in https://github.com/ava-labs/avalanchego/pull/1963 +- Only gracefully exit rpcchainvm server after Shutdown by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1988 +- Add contexts to SDK callbacks by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1977 +- Change max response size to target response size by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1995 +- Add sdk gossip handler metrics by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1997 +- Add p2p SDK Router metrics by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2000 +- Merkledb Attempt to reduce test runtime by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1990 +- longer timeout on windows UT by @danlaine in https://github.com/ava-labs/avalanchego/pull/2001 +- `sync` -- log tweaks by @danlaine in https://github.com/ava-labs/avalanchego/pull/2008 +- Add Validator Gossiper by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2015 +- database: comment that Get returns ErrNotFound if key is not present by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2018 +- Return `height` from `GetCurrentSupply` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2022 +- simplify platformvm `GetHeight` function by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2023 +- Merkle db fix range proof commit bug by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2019 +- Add `bag.Of` helper by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2027 +- Cleanup early poll termination logic by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2029 +- fix typo by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2030 +- Merkle db intermediate node key compression by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1987 +- Improve RPC Chain version mismatch error message by @martineckardt in https://github.com/ava-labs/avalanchego/pull/2021 +- Move subnet owner lookup to platformvm state by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2024 +- Fix fuzz tests; add iterator fuzz test by @danlaine in https://github.com/ava-labs/avalanchego/pull/1991 +- Refactor subnet validator primary network requirements by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2014 +- Rename events to event by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1973 +- Add function to initialize SampleableSet by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2017 +- add `IsCortinaActivated` helper by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2013 +- Fix P-chain Import by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2035 +- Rename avm/blocks package to avm/block by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1970 +- Merkledb Update rangeproof proto to be consistent with changeproof proto by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2040 +- `merkledb` -- encode lengths as uvarints by @danlaine in https://github.com/ava-labs/avalanchego/pull/2039 +- MerkleDB Remove GetNodeFromParent by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2041 + +### New Contributors + +- @martineckardt made their first contribution in https://github.com/ava-labs/avalanchego/pull/2021 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.9...v1.10.10 + +## [v1.10.9](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.9) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `28` all plugins must update to be compatible. + +### Configs + +- Changed the default value of `--network-compression-type` from `gzip` to `zstd` + +### Fixes + +- Marked corruptabledb as corrupted after encountering an error during iteration +- Fixed proposervm error handling during startup + +### What's Changed + +- `merkledb` -- verify range proof in fuzz test; fix bound error by @danlaine in https://github.com/ava-labs/avalanchego/pull/1789 +- Update default compression type to zstd by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1839 +- Migrate to `uber-go/mock` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1840 +- `corruptabledb` -- corrupt on iterator error by @danlaine in https://github.com/ava-labs/avalanchego/pull/1829 +- Add support for Maps to the reflect_codec by @nytzuga in https://github.com/ava-labs/avalanchego/pull/1790 +- Make linter fail if `github.com/golang/mock/gomock` is used by @danlaine in https://github.com/ava-labs/avalanchego/pull/1843 +- Firewoodize merkle db Part 1: Make Views ReadOnly by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1816 +- E2E tests -- use appropriate timeouts by @danlaine in https://github.com/ava-labs/avalanchego/pull/1851 +- e2e: Switch to testnet fixture by @marun in https://github.com/ava-labs/avalanchego/pull/1709 +- `secp256k1` -- add fuzz tests by @danlaine in https://github.com/ava-labs/avalanchego/pull/1809 +- Add fuzz test for complex codec unmarshalling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1846 +- Simplify exported interface of the primary wallet by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1849 +- Regenerate mocks by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1860 +- Remove history btree by @danlaine in https://github.com/ava-labs/avalanchego/pull/1861 +- `merkledb` -- Remove `CommitToParent` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1854 +- `merkledb` -- remove other history btree by @danlaine in https://github.com/ava-labs/avalanchego/pull/1862 +- `merkledb` -- add path fuzz test by @danlaine in https://github.com/ava-labs/avalanchego/pull/1852 +- fix range proof verification case by @danlaine in https://github.com/ava-labs/avalanchego/pull/1834 +- `merkledb` -- add change proof fuzz test; fix change proof verification by @danlaine in https://github.com/ava-labs/avalanchego/pull/1802 +- Warp readme by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1780 +- CODEOWNERS: add marun to tests by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1863 +- Add CI check that auto-generated code is up to date by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1828 +- `sync` -- change proof request can return range proof by @danlaine in https://github.com/ava-labs/avalanchego/pull/1772 +- Ensure consistent use of best-practice `set -o` in all scripts by @marun in https://github.com/ava-labs/avalanchego/pull/1864 +- GetCanonicalValidatorSet minimal ValidatorState iface by @darioush in https://github.com/ava-labs/avalanchego/pull/1875 +- `sync` -- handle fatal error by @danlaine in https://github.com/ava-labs/avalanchego/pull/1874 +- `merkledb` -- use `Maybe` for start bounds by @danlaine in https://github.com/ava-labs/avalanchego/pull/1872 +- Add C-chain wallet to the primary network by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1850 +- e2e: Refactor keychain and wallet creation to test helpers by @marun in https://github.com/ava-labs/avalanchego/pull/1870 +- Update account nonce on exportTx accept by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1881 +- `sync` -- add workheap test by @danlaine in https://github.com/ava-labs/avalanchego/pull/1879 +- `merkledb` -- commit to db only by @danlaine in https://github.com/ava-labs/avalanchego/pull/1885 +- Remove node/value lock from trieview by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1865 +- remove old todo by @danlaine in https://github.com/ava-labs/avalanchego/pull/1892 +- Fix race in TestHandlerDispatchInternal by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1895 +- Remove duplicate code from proposervm block acceptance by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1894 +- e2e: Bump permissionless subnets timeouts by @marun in https://github.com/ava-labs/avalanchego/pull/1897 +- `merkledb` -- codec remove err checks by @danlaine in https://github.com/ava-labs/avalanchego/pull/1899 +- Merkle db fix new return type by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1898 +- Add SDK Sampling interface by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1877 +- Add NoOpHandler implementation to SDK by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1903 +- Remove unused scripts by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1908 +- `merkledb` -- codec nits/cleanup by @danlaine in https://github.com/ava-labs/avalanchego/pull/1904 +- `merkledb` -- preallocate `bytes.Buffer` in codec by @danlaine in https://github.com/ava-labs/avalanchego/pull/1900 +- Proposervm height index repair fix by @abi87 in https://github.com/ava-labs/avalanchego/pull/1915 +- `merkledb` -- move and rename methods by @danlaine in https://github.com/ava-labs/avalanchego/pull/1919 +- Remove optional height indexing interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1896 +- `merkledb` -- nits by @danlaine in https://github.com/ava-labs/avalanchego/pull/1916 +- Fix code owners file by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1922 +- Drop invalid TLS certs during initial handshake by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1923 +- Restricted tls metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1924 + +### New Contributors + +- @nytzuga made their first contribution in https://github.com/ava-labs/avalanchego/pull/1790 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.8...v1.10.9 + +## [v1.10.8](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.8) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `27` and compatible with versions `v1.10.5 - v1.10.7`. + +**This update changes the local network genesis. This version will not be able to join local networks with prior versions.** + +**The first startup of the P-Chain will perform indexing operations. This indexing runs in the background and does not impact restart time. During this indexing the node will report increased CPU, memory, and disk usage.** + +### APIs + +- Added `platform.getBlockByHeight` + +### Configs + +- Added `--partial-sync-primary-network` flag to enable non-validators to optionally sync only the P-chain on the primary network +- Added P-chain cache size configuration `block-id-cache-size` + +### Fixes + +- Fixed P-chain GetValidatorSet regression for subnets +- Changed `x/sync` range/change proof bounds from `[]byte` to `Maybe[[]byte]` +- Fixed `x/sync` error handling from failure to send app messages + +### What's Changed + +- Removes calls to ctrl.Finish by @darioush in https://github.com/ava-labs/avalanchego/pull/1803 +- e2e: Remove unnecessary transaction status checking by @marun in https://github.com/ava-labs/avalanchego/pull/1786 +- fix p2p mockgen location by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1806 +- fix end proof verification by @danlaine in https://github.com/ava-labs/avalanchego/pull/1801 +- `merkledb` -- add proof fuzz test by @danlaine in https://github.com/ava-labs/avalanchego/pull/1804 +- `sync` -- re-add network client metrics by @danlaine in https://github.com/ava-labs/avalanchego/pull/1787 +- Add function to initialize set from elements by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1808 +- Add Maybe to the end bound of proofs (Part 1) by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1793 +- add go version to --version by @amirhasanzadehpy in https://github.com/ava-labs/avalanchego/pull/1819 +- e2e: Add local network fixture by @marun in https://github.com/ava-labs/avalanchego/pull/1700 +- Fix test flake in TestProposalTxsInMempool by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1822 +- `sync` -- remove todo by @danlaine in https://github.com/ava-labs/avalanchego/pull/1788 +- Add Maybe to the end bound of proofs (Part 2) by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1813 +- Move Maybe to its own package by @danlaine in https://github.com/ava-labs/avalanchego/pull/1817 +- `merkledb` -- clarify/improve change proof invariants by @danlaine in https://github.com/ava-labs/avalanchego/pull/1810 +- P-chain state prune + height index by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1719 +- Update maintainer of the debian packages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1825 +- Make platformvm implement `block.HeightIndexedChainVM` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1746 +- Add P-chain `GetBlockByHeight` API method by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1747 +- Update local genesis startTime by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1811 +- `sync` -- add handling for fatal error by @danlaine in https://github.com/ava-labs/avalanchego/pull/1690 +- Add error logs for unexpected proposervm BuildBlock failures by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1832 +- Fix subnet validator set public key initialization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1833 +- Document PendingTxs + BuildBlock consensus engine requirement by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1835 +- Bump github.com/supranational/blst from 0.3.11-0.20230406105308-e9dfc5ee724b to 0.3.11 by @dependabot in https://github.com/ava-labs/avalanchego/pull/1831 +- Add Primary Network Lite Sync Option by @abi87 in https://github.com/ava-labs/avalanchego/pull/1769 +- Check P-chain ShouldPrune during Initialize by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1836 + +### New Contributors + +- @amirhasanzadehpy made their first contribution in https://github.com/ava-labs/avalanchego/pull/1819 +- @dependabot made their first contribution in https://github.com/ava-labs/avalanchego/pull/1831 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.7...v1.10.8 + +## [v1.10.7](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.7) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). This release contains meaningful performance improvements and we recommend updating as soon as possible. + +The plugin version is unchanged at `27` and compatible with versions `v1.10.5 - v1.10.6`. + +### APIs + +- Modifed `platform.getValidatorsAt` to also return BLS public keys + +### Configs + +- Changed the default value of `--network-allow-private-ips` to `false` when the `--network-id` is either `fuji` or `mainnet` +- Added P-chain cache size configurations + - `block-cache-size` + - `tx-cache-size` + - `transformed-subnet-tx-cache-size` + - `reward-utxos-cache-size` + - `chain-cache-size` + - `chain-db-cache-size` +- Removed various long deprecated flags + - `--genesis` use `--genesis-file` instead + - `--genesis-content` use `--genesis-file-content` instead + - `--inbound-connection-throttling-cooldown` use `--network-inbound-connection-throttling-cooldown` instead + - `--inbound-connection-throttling-max-conns-per-sec` use `--network-inbound-connection-throttling-max-conns-per-sec` instead + - `--outbound-connection-throttling-rps` use `network-outbound-connection-throttling-rps` instead + - `--outbound-connection-timeout` use `network-outbound-connection-timeout` instead + - `--staking-enabled` use `sybil-protection-enabled` instead + - `--staking-disabled-weight` use `sybil-protection-disabled-weight` instead + - `--network-compression-enabled` use `--network-compression-type` instead + - `--consensus-gossip-frequency` use `--consensus-accepted-frontier-gossip-frequency` instead + +### Fixes + +- Fixed C-chain tx tracer crashes +- Fixed merkledb panic during state sync +- Fixed merkledb state sync stale target tracking + +### What's Changed + +- Remove deprecated configs by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1712 +- upgrade: Increase all ANR timeouts to 2m to ensure CI reliability by @marun in https://github.com/ava-labs/avalanchego/pull/1737 +- fix sync panic by @danlaine in https://github.com/ava-labs/avalanchego/pull/1736 +- remove `vm.state` re-assignment in tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1739 +- Expose BLS public keys from platform.getValidatorsAt by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1740 +- Fix validator set diff tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1744 +- Replace List() with Map() on validators.Set by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1745 +- vms/platformvm: configure state cache sizes #1522 by @najeal in https://github.com/ava-labs/avalanchego/pull/1677 +- Support both `stateBlk`s and `Block`s in `blockDB` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1748 +- Add `DefaultExecutionConfig` var to `platformvm` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1749 +- Remove hanging TODO from prior change by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1758 +- Write process context on node start to simplify test orchestration by @marun in https://github.com/ava-labs/avalanchego/pull/1729 +- x/sync: add locks for peerTracker by @darioush in https://github.com/ava-labs/avalanchego/pull/1756 +- Add ids length constants by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1759 +- [x/sync] Update target locking by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/1763 +- Export warp errors for external use by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1771 +- Remove unused networking constant by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1774 +- Change the default value of `--network-allow-private-ips` to `false` for `mainnet` and `fuji` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1773 +- Remove context.TODO from tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1778 +- Replace linkeddb iterator with native DB range queries by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1752 +- Add support for measuring key size in caches by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1781 +- Bump coreth to v0.12.5-rc.0 by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1775 +- Add metric for the number of elements in a cache by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1782 +- Evict blocks based on size by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1766 +- Add proposervm state metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1785 +- Register metercacher `len` metric by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1791 +- Reduce block cache sizes to 64 MiB by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1794 +- Add p2p sdk by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1799 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.5...v1.10.7 + +## [v1.10.5](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.5) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is updated to `27` all plugins must update to be compatible. + +**The first startup of the X-Chain will perform an indexing operation. This indexing runs in the background and does not impact restart time.** + +### APIs + +- Added `avalanche_network_clock_skew_sum` metric +- Added `avalanche_network_clock_skew_count` metric + +### Configs + +- Added `--tracing-headers` to allow specifying headers to the tracing indexer + +### Fixes + +- Fixed API handler crash for `lookupState` in `prestate` tracer +- Fixed API handler crash for LOG edge cases in the `callTracer` + +### What's Changed + +- stop persisting rejected blocks on P-chain by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1696 +- Ensure scripts/lint.sh failure when used with incompatible grep by @marun in https://github.com/ava-labs/avalanchego/pull/1711 +- sum peers clock skew into metric by @najeal in https://github.com/ava-labs/avalanchego/pull/1695 +- Make AVM implement `block.HeightIndexedChainVM` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1699 +- ProposerVM nits by @abi87 in https://github.com/ava-labs/avalanchego/pull/1688 +- Sorting -- Remove old `IsSortedAndUnique`, rename `IsSortedAndUniqueSortable` to `IsSortedAndUnique` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1666 +- Update snow consensus doc post X-chain linearization by @exdx in https://github.com/ava-labs/avalanchego/pull/1703 +- `merkledb` / `sync` -- remove TODOs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1718 +- remove cache TODOs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1721 +- Adjust `NewSizedCache` to take in a size function by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1725 +- Wallet issuance to return tx instead of tx id by @felipemadero in https://github.com/ava-labs/avalanchego/pull/1704 +- Add support for providing tracing headers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1727 +- Only return accepted blocks in `GetStatelessBlock` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1724 +- Proposermv fix goroutine leaks by @abi87 in https://github.com/ava-labs/avalanchego/pull/1713 +- Update warp msg format by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1686 +- Cleanup anr scripts by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1714 +- remove TrackBandwidth from NetworkClient by @danlaine in https://github.com/ava-labs/avalanchego/pull/1716 +- Bump network start timeout by @marun in https://github.com/ava-labs/avalanchego/pull/1730 +- e2e: Ensure e2e.test is built with portable BLST by @marun in https://github.com/ava-labs/avalanchego/pull/1734 +- e2e: Increase all ANR timeouts to 2m to ensure CI reliability. by @marun in https://github.com/ava-labs/avalanchego/pull/1733 + +### New Contributors + +- @exdx made their first contribution in https://github.com/ava-labs/avalanchego/pull/1703 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.4...v1.10.5 + +## [v1.10.4](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.4) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `26` and compatible with versions `v1.10.1 - v1.10.3`. + +**The first startup of the X-Chain will perform a pruning operation. This pruning runs in the background and does not impact restart time.** + +### APIs + +- Removed `avalanche_X_vm_avalanche_metervm_pending_txs_count` metric +- Removed `avalanche_X_vm_avalanche_metervm_pending_txs_sum` metric +- Removed `avalanche_X_vm_avalanche_metervm_get_tx_count` metric +- Removed `avalanche_X_vm_avalanche_metervm_get_tx_sum` metric +- Removed `avalanche_X_vm_avalanche_metervm_get_tx_err_count` metric +- Removed `avalanche_X_vm_avalanche_metervm_get_tx_err_sum` metric + +### Configs + +- Added `--staking-host` to allow binding only on a specific address for staking +- Added `checksums-enabled` to the X-chain and P-chain configs + +### Fixes + +- Fixed `proposervm` `preForkBlock.Status()` response after the fork has occurred +- Fixed C-chain logs collection error when no receipts occur in a block +- Fixed merkledb's `findNextKey` when an empty end proof is provided +- Fixed 0 length key issues with proof generation and verification +- Fixed Docker execution on non-amd64 architectures + +### What's Changed + +- e2e: Support testing on MacOS without requiring firewall exceptions by @marun in https://github.com/ava-labs/avalanchego/pull/1613 +- Reduce resource log level by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1622 +- Improve `snow/` tests with `require` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1503 +- Improve `x/` tests with `require` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1454 +- `sync` -- fix `TestFindNextKeyRandom` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1624 +- Improve `vms/` tests with `require` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1505 +- Improve `database/` tests with `require` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1506 +- Ban usage of `t.Fatal` and `t.Error` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1453 +- chore: fix typo in binary_snowflake.go by @eltociear in https://github.com/ava-labs/avalanchego/pull/1630 +- Discriminate window fit err msg from overdelegated error msg by @felipemadero in https://github.com/ava-labs/avalanchego/pull/1606 +- Remove MaxConnectionAge gRPC StreamID overflow mitigation by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1388 +- add fuzzing action by @danlaine in https://github.com/ava-labs/avalanchego/pull/1635 +- Remove dagState and GetUTXOFromID by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1632 +- Update all AVM tests for post-linearization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1631 +- Remove PendingTxs from the DAGVM interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1641 +- Remove GetTx from the DAGVM interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1642 +- Bump coreth v0.12.4 by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1646 +- [x/merkledb] Remove useless `err` check by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/1650 +- [x/merkledb] Trailing whitespace removal on README by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/1649 +- Remove unneeded functions from UniqueTx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1643 +- Simplify tx verification by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1654 +- `merkledb` -- fix `findNextKey` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1653 +- Cleanup X-chain UniqueTx Dependencies by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1656 +- Prune X-chain State by @coffeeavax in https://github.com/ava-labs/avalanchego/pull/1427 +- Support building docker image on ARM64 by @dshiell in https://github.com/ava-labs/avalanchego/pull/1103 +- remove goreleaser by @danlaine in https://github.com/ava-labs/avalanchego/pull/1660 +- Fix Dockerfile on non amd64 platforms by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1661 +- Improve metrics error message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1663 +- Remove X-chain UniqueTx by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1662 +- Add state checksums by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1658 +- Modify proposervm window by @najeal in https://github.com/ava-labs/avalanchego/pull/1638 +- sorting nit by @danlaine in https://github.com/ava-labs/avalanchego/pull/1665 +- `merkledb` -- rewrite and test range proof invariants; fix proof generation/veriifcation bugs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1629 +- Add minimum proposer window length by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1667 +- CI -- only run fuzz tests on ubuntu by @danlaine in https://github.com/ava-labs/avalanchego/pull/1636 +- `MerkleDB` -- remove codec version by @danlaine in https://github.com/ava-labs/avalanchego/pull/1671 +- `MerkleDB` -- use default config in all tests by @danlaine in https://github.com/ava-labs/avalanchego/pull/1590 +- `sync` -- reduce stuttering by @danlaine in https://github.com/ava-labs/avalanchego/pull/1672 +- `Sync` -- unexport field by @danlaine in https://github.com/ava-labs/avalanchego/pull/1673 +- `sync` -- nits and cleanup by @danlaine in https://github.com/ava-labs/avalanchego/pull/1674 +- `sync` -- remove unused code by @danlaine in https://github.com/ava-labs/avalanchego/pull/1676 +- Mark preForkBlocks after the fork as Rejected by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1683 +- `merkledb` -- fix comment by @danlaine in https://github.com/ava-labs/avalanchego/pull/1675 +- `MerkleDB` -- document codec by @danlaine in https://github.com/ava-labs/avalanchego/pull/1670 +- `sync` -- client cleanup by @danlaine in https://github.com/ava-labs/avalanchego/pull/1680 +- Update buf version to v1.23.1 by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1685 + +### New Contributors + +- @eltociear made their first contribution in https://github.com/ava-labs/avalanchego/pull/1630 +- @felipemadero made their first contribution in https://github.com/ava-labs/avalanchego/pull/1606 +- @dshiell made their first contribution in https://github.com/ava-labs/avalanchego/pull/1103 +- @najeal made their first contribution in https://github.com/ava-labs/avalanchego/pull/1638 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.3...v1.10.4 + +## [v1.10.3](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.3) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. The supported plugin version is `26`. + +**Users must specify the `--allowed-hosts-flag` to receive inbound API traffic from non-local hosts.** + +### APIs + +- Added health metrics based on tags + - `avalanche_health_checks_failing{tag="TAG"}` + - `avalanche_liveness_checks_failing{tag="TAG"}` + - `avalanche_readiness_checks_failing{tag="TAG"}` +- Removed P-chain VM percent connected metrics + - `avalanche_P_vm_percent_connected` + - `avalanche_P_vm_percent_connected_subnet{subnetID="SUBNETID"}` +- Added percent connected metrics by chain + - `avalanche_{ChainID}_percent_connected` +- Removed `avalanche_network_send_queue_portion_full` metric + +### Configs + +- Added `--http-allowed-hosts` with a default value of `localhost` +- Removed `--snow-mixed-query-num-push-vdr` +- Removed `--snow-mixed-query-num-push-non-vdr` +- Removed `minPercentConnectedStakeHealthy` from the subnet config + +### Fixes + +- Fixed `platformvm.GetValidatorSet` returning incorrect BLS public keys +- Fixed IPv6 literal binding with `--http-host` +- Fixed P2P message log format + +### What's Changed + +- `x/sync` -- Add proto for P2P messages by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1472 +- Bump Protobuf and tooling and add section to proto docs outlining buf publishing by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1552 +- Minor pchain UTs cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/1554 +- Add ping uptimes test by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1550 +- Add workflow to mark stale issues and PRs by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1443 +- Enforce inlining functions with a single error return in `require.NoError` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1500 +- `x/sync` / `x/merkledb` -- add `SyncableDB` interface by @danlaine in https://github.com/ava-labs/avalanchego/pull/1555 +- Rename beacon to boostrapper, define bootstrappers in JSON file for cross-language compatibility by @gyuho in https://github.com/ava-labs/avalanchego/pull/1439 +- add P-chain height indexing by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1447 +- Add P-chain `GetBlockByHeight` API method by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1448 +- `x/sync` -- use for sending Range Proofs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1537 +- Add test to ensure that database packing produces sorted values by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1560 +- Randomize unit test execution order to identify unwanted dependency by @marun in https://github.com/ava-labs/avalanchego/pull/1565 +- use `http.Error` instead of separately writing error code and message by @danlaine in https://github.com/ava-labs/avalanchego/pull/1564 +- Adding allowed http hosts flag by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1566 +- `x/sync` -- Use proto for sending Change Proofs by @danlaine in https://github.com/ava-labs/avalanchego/pull/1541 +- Only send `PushQuery` messages after building the block by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1428 +- Rename APIAllowedOrigins to HTTPAllowedOrigins by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1567 +- Add GetBalance examples for the P-chain and X-chain wallets by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1569 +- Reduce number of test iterations by @danlaine in https://github.com/ava-labs/avalanchego/pull/1568 +- Re-add upgrade tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1410 +- Remove lists from Chits messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1412 +- Add more X-chain tests by @coffeeavax in https://github.com/ava-labs/avalanchego/pull/1487 +- fix typo by @meaghanfitzgerald in https://github.com/ava-labs/avalanchego/pull/1570 +- Reduce the number of test health checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1571 +- Fix proposervm.GetAncestors test flake by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1572 +- Remove list from AcceptedFrontier message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1578 +- Remove version db from merkle db by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1534 +- `MerkleDB` -- add eviction batch size config by @danlaine in https://github.com/ava-labs/avalanchego/pull/1586 +- `MerkleDB` -- fix `onEvictCache.Flush` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1589 +- Revert P-Chain height index by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1591 +- `x/sync` -- Add `SyncableDB` proto by @danlaine in https://github.com/ava-labs/avalanchego/pull/1559 +- Clarify break on error during ancestors lookup by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1580 +- Add buf-push github workflow by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1556 +- Pchain bls key diff fix by @abi87 in https://github.com/ava-labs/avalanchego/pull/1584 +- Cleanup fx interface compliance by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1599 +- Improve metrics error msging by @anusha-ctrl in https://github.com/ava-labs/avalanchego/pull/1598 +- Separate health checks by tags by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1579 +- Separate subnet stake connected health and metrics from P-chain by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1358 +- Merkle db iterator by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1533 +- Fix unreadable message errors by @morrisettjohn in https://github.com/ava-labs/avalanchego/pull/1585 +- Log unexpected errors during GetValidatorSet by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1592 +- `merkleDB` -- add inner heap type to syncWorkHeap by @danlaine in https://github.com/ava-labs/avalanchego/pull/1582 +- `sync` -- explain algorithm in readme by @danlaine in https://github.com/ava-labs/avalanchego/pull/1600 +- Rename license header file to avoid unintended license indexing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1608 +- `merkledb` and `sync` -- use time based rand seed by @danlaine in https://github.com/ava-labs/avalanchego/pull/1607 +- add `local-prefixes` setting for `goimports` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1612 +- snow/engine/snowman: instantiate voter after issuer by @gyuho in https://github.com/ava-labs/avalanchego/pull/1610 +- Update CodeQL to v2 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1616 +- Remove old networking metric by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1619 +- Fix --http-host flag to support IPv6 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1620 + +### New Contributors + +- @marun made their first contribution in https://github.com/ava-labs/avalanchego/pull/1565 +- @meaghanfitzgerald made their first contribution in https://github.com/ava-labs/avalanchego/pull/1570 +- @anusha-ctrl made their first contribution in https://github.com/ava-labs/avalanchego/pull/1598 +- @morrisettjohn made their first contribution in https://github.com/ava-labs/avalanchego/pull/1585 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.2...v1.10.3 + +## [v1.10.2](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.2) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. The supported plugin version is `26`. + +### APIs + +- Significantly improved the performance of `platform.getStake` +- Added `portion_filled` metric for all metered caches +- Added resource metrics by process + - `avalanche_system_resources_num_cpu_cycles` + - `avalanche_system_resources_num_disk_read_bytes` + - `avalanche_system_resources_num_disk_reads` + - `avalanche_system_resources_num_disk_write_bytes` + - `avalanche_system_resources_num_disk_writes` + +### Configs + +- Deprecated `--genesis` in favor of `--genesis-file` +- Deprecated `--genesis-content` in favor of `--genesis-file-content` +- Deprecated `--inbound-connection-throttling-cooldown` in favor of `--network-inbound-connection-throttling-cooldown` +- Deprecated `--inbound-connection-throttling-max-conns-per-sec` in favor of `--network-inbound-connection-throttling-max-conns-per-sec` +- Deprecated `--outbound-connection-throttling-rps` in favor of `--network-outbound-connection-throttling-rps` +- Deprecated `--outbound-connection-timeout` in favor of `--network-outbound-connection-timeout` +- Deprecated `--staking-enabled` in favor of `--sybil-protection-enabled` +- Deprecated `--staking-disabled-weight` in favor of `--sybil-protection-disabled-weight` +- Deprecated `--consensus-gossip-frequency` in favor of `--consensus-accepted-frontier-gossip-frequency` + +### Fixes + +- Fixed `--network-compression-type` to correctly honor the requested compression type, rather than always using gzip +- Fixed CPU metrics on macos + +### What's Changed + +- use `require` library functions in tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1451 +- style nits in vm clients by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1449 +- utils/logging: add "Enabled" method to remove redundant verbo logs by @gyuho in https://github.com/ava-labs/avalanchego/pull/1461 +- ban `require.EqualValues` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1457 +- chains: do not hold write subnetsLock in health checks by @gyuho in https://github.com/ava-labs/avalanchego/pull/1460 +- remove zstd check by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1459 +- use `require.IsType` for type assertions in tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1458 +- vms/platformvm/service: nits (preallocate address slice, error msg) by @gyuho in https://github.com/ava-labs/avalanchego/pull/1477 +- ban `require.NotEqualValues` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1470 +- use `require` in `api` and `utils/password` packages by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1471 +- use "golang.org/x/term" as "golang.org/x/crypto/ssh/terminal" is deprecated by @gyuho in https://github.com/ava-labs/avalanchego/pull/1464 +- chains: move "msgChan" closer to the first use (readability) by @gyuho in https://github.com/ava-labs/avalanchego/pull/1484 +- ban function params for `require.ErrorIs` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1486 +- standardize imports by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1466 +- fix license header test by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1492 +- use blank identifier for interface compliance by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1493 +- codec: remove "SetMaxSize" from "Manager", remove unnecessary lock by @gyuho in https://github.com/ava-labs/avalanchego/pull/1481 +- config: disallow "ThrottlerConfig.MaxRecheckDelay" < 1 ms by @gyuho in https://github.com/ava-labs/avalanchego/pull/1435 +- ban `require.Equal` when testing for `0` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1495 +- Clean up MerkleDVB Sync Close lock by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1469 +- MerkleDB Cleanup by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1465 +- Remove comment referencing old IP based tracking by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1509 +- ban usage of `require.Len` when testing for length `0` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1496 +- ban usage of `require.Equal` when testing for length by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1497 +- ban usage of `nil` in require functions by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1498 +- Sized LRU cache by @abi87 in https://github.com/ava-labs/avalanchego/pull/1517 +- engine/snowman: clean up some comments in "bubbleVotes" unit tests by @gyuho in https://github.com/ava-labs/avalanchego/pull/1444 +- snow/networking/sender: add missing verbo check by @gyuho in https://github.com/ava-labs/avalanchego/pull/1504 +- Delete duplicate test var definitions by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1518 +- utils/bag: print generic type for bag elements by @gyuho in https://github.com/ava-labs/avalanchego/pull/1507 +- Fix incorrect test refactor by @abi87 in https://github.com/ava-labs/avalanchego/pull/1526 +- Pchain validators repackaging by @abi87 in https://github.com/ava-labs/avalanchego/pull/1284 +- Config overhaul by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1370 +- rename enabled staking to sybil protection enabled by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1441 +- Fix network compression type flag usage by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1532 +- Deprecate uptimes in pong message by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1362 +- Add CPU cycles and number of disk read/write metrics by pid by @coffeeavax in https://github.com/ava-labs/avalanchego/pull/1334 +- Fetch process resource stats as best-effort by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1543 +- Add serialization tests for transactions added in Banff by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1513 +- Log chain shutdown duration by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1545 +- add interface for MerkleDB by @danlaine in https://github.com/ava-labs/avalanchego/pull/1519 + +### New Contributors + +- @gyuho made their first contribution in https://github.com/ava-labs/avalanchego/pull/1461 +- @coffeeavax made their first contribution in https://github.com/ava-labs/avalanchego/pull/1334 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.1...v1.10.2 + +## [v1.10.1](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.1) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. The supported plugin version is `26`. + +### APIs + +- Enabled `avm.getBlockByHeight` to take in `height` as a string +- Added IDs to json formats + - `platform.getTx` now includes `id` in the `tx` response + - `platform.getBlock` now includes `id` in the `block` response and in the internal `tx` fields + - `avm.getTx` now includes `id` in the `tx` response + - `avm.getBlock` now includes `id` in the `block` response and in the internal `tx` fields + - `avm.getBlockByHeight` now includes `id` in the `block` response and in the internal `tx` fields +- Removed `avm.issueStopVertex` +- Fixed `wallet` methods to correctly allow issuance of dependent transactions after the X-chain linearization +- Added `validatorOnly` flag in `platform.getStake` +- Removed all avalanche consensus metrics +- Fixed `msgHandlingTime` metrics + +### Configs + +- Removed `--snow-avalanche-num-parents` +- Removed `--snow-avalanche-batch-size` + +### Fixes + +- Fixed panic when restarting partially completed X-chain snowman bootstrapping +- Fixed `--network-allow-private-ips` handling to correctly prevent outbound connections to private IP ranges +- Fixed UniformSampler to support sampling numbers between MaxInt64 and MaxUint64 +- Fixed data race in txID access during transaction gossip in the AVM + +### What's Changed + +- Add benchmark for gRPC GetValidatorSet by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1326 +- Add checks for database being closed in merkledb; other nits by @danlaine in https://github.com/ava-labs/avalanchego/pull/1333 +- Update linkedhashmap to only Rlock when possible by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1329 +- Remove no-op changes from history results by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1335 +- Cleanup type assertions in the linkedHashmap by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1341 +- Fix racy avm tx access by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1349 +- Update Fuji beacon ips by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1354 +- Remove duplicate TLS verification by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1364 +- Adjust Merkledb Trie invalidation locking by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1355 +- Use require in Avalanche bootstrapping tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1344 +- Add Proof size limit to sync client by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1269 +- Add stake priority helpers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1375 +- add contribution file by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1373 +- Remove max sample value by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1374 +- Prefetch rpcdb iterator batches by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1323 +- Temp fix for flaky Sync Test by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1378 +- Update merkle cache to be FIFO instead of LRU by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1353 +- Improve cost of BLS key serialization for gRPC by @hexfusion in https://github.com/ava-labs/avalanchego/pull/1343 +- [Issue-1368]: Panic in serializedPath.HasPrefix by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1371 +- Add ValidatorsOnly flag to GetStake by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1377 +- Use proto in `x/sync` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1336 +- Update incorrect fuji beacon IPs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1392 +- Update `api/` error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1393 +- refactor concurrent work limiting in sync in `x/sync` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1347 +- Remove check for impossible condition in `x/sync` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1348 +- Improve `codec/` error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1396 +- Improve `config/` error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1397 +- Improve `genesis/` error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1398 +- Improve various error handling locations by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1399 +- Improve `utils/` error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1400 +- Improve consensus error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1401 +- Improve secp256k1fx + merkledb error handling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1402 +- Ban usage of require.Error by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1346 +- Remove slice capacity hint in `x/sync` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1350 +- Simplify `syncWorkHeap` less function in `x/sync` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1351 +- Replace `switch` with `txs.Visitor` in X chain signer by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1404 +- Include IDs in json marshalling by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1408 +- Adjust find next key logic in x/Sync by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1331 +- Remove bitmask from writeMsgLen by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1342 +- Require `txID`s in PeerList messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1411 +- Allow dependent tx issuance over the wallet API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1413 +- Add support for proto `message.Tx` decoding by @danlaine in https://github.com/ava-labs/avalanchego/pull/1332 +- Remove avalanche bootstrapping -> avalanche consensus transition by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1345 +- Benchmark get canonical validator set by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1417 +- Simplify IP status calculation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1421 +- Honor AllowPrivateIPs config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1422 +- Update BLS signature ordering to avoid public key compression by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1416 +- Remove DAG based consensus by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1359 +- Remove IssueStopVertex message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1419 +- Fix msgHandlingTime by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1432 +- Change ChangeProofs to only have one list of key/value change instead of key/values and deleted by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1385 +- Update AMI generation workflow by @charlie-ava in https://github.com/ava-labs/avalanchego/pull/1289 +- Support `height` as a string in `avm.getBlockByHeight` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1437 +- Defer Snowman Bootstrapper parser initialization to Start by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1442 +- Cleanup proposervm ancestors packing @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1446 + +### New Contributors + +- @hexfusion made their first contribution in https://github.com/ava-labs/avalanchego/pull/1326 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.0...v1.10.1 + ## [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0) [This upgrade](https://medium.com/avalancheavax/cortina-x-chain-linearization-a1d9305553f6) linearizes the X-chain, introduces delegation batching to the P-chain, and increases the maximum block size on the C-chain. diff --git a/avalanchego/SECURITY.md b/avalanchego/SECURITY.md index 26e938c8..9a04ba25 100644 --- a/avalanchego/SECURITY.md +++ b/avalanchego/SECURITY.md @@ -4,13 +4,13 @@ Avalanche takes the security of the platform and of its users very seriously. We ## Reporting a Vulnerability -**Please do not file a public ticket** mentioning the vulnerability. To disclose a vulnerability submit it through our [Bug Bounty Program](https://hackenproof.com/avalanche). +**Please do not file a public ticket** mentioning the vulnerability. To disclose a vulnerability submit it through our [Bug Bounty Program](https://immunefi.com/bounty/avalanche/). Vulnerabilities must be disclosed to us privately with reasonable time to respond, and avoid compromise of other users and accounts, or loss of funds that are not your own. We do not reward spam or social engineering vulnerabilities. Do not test for or validate any security issues in the live Avalanche networks (Mainnet and Fuji testnet), confirm all exploits in a local private testnet. -Please refer to the [Bug Bounty Page](https://hackenproof.com/avalanche) for the most up-to-date program rules and scope. +Please refer to the [Bug Bounty Page](https://immunefi.com/bounty/avalanche/) for the most up-to-date program rules and scope. ## Supported Versions diff --git a/avalanchego/api/admin/client.go b/avalanchego/api/admin/client.go index 51cb0754..d5a81a81 100644 --- a/avalanchego/api/admin/client.go +++ b/avalanchego/api/admin/client.go @@ -1,14 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin import ( "context" - "fmt" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/rpc" ) @@ -26,9 +27,10 @@ type Client interface { GetChainAliases(ctx context.Context, chainID string, options ...rpc.Option) ([]string, error) Stacktrace(context.Context, ...rpc.Option) error LoadVMs(context.Context, ...rpc.Option) (map[ids.ID][]string, map[ids.ID]string, error) - SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) error + SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetLoggerLevel(ctx context.Context, loggerName string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) + DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) } // Client implementation for the Avalanche Platform Info API Endpoint @@ -97,7 +99,7 @@ func (c *client) SetLoggerLevel( logLevel, displayLevel string, options ...rpc.Option, -) error { +) (map[string]LogAndDisplayLevels, error) { var ( logLevelArg logging.Level displayLevelArg logging.Level @@ -106,20 +108,22 @@ func (c *client) SetLoggerLevel( if len(logLevel) > 0 { logLevelArg, err = logging.ToLevel(logLevel) if err != nil { - return fmt.Errorf("couldn't parse %q to log level", logLevel) + return nil, err } } if len(displayLevel) > 0 { displayLevelArg, err = logging.ToLevel(displayLevel) if err != nil { - return fmt.Errorf("couldn't parse %q to log level", displayLevel) + return nil, err } } - return c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ + res := &LoggerLevelReply{} + err = c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ LoggerName: loggerName, LogLevel: &logLevelArg, DisplayLevel: &displayLevelArg, - }, &api.EmptyReply{}, options...) + }, res, options...) + return res.LoggerLevels, err } func (c *client) GetLoggerLevel( @@ -127,7 +131,7 @@ func (c *client) GetLoggerLevel( loggerName string, options ...rpc.Option, ) (map[string]LogAndDisplayLevels, error) { - res := &GetLoggerLevelReply{} + res := &LoggerLevelReply{} err := c.requester.SendRequest(ctx, "admin.getLoggerLevel", &GetLoggerLevelArgs{ LoggerName: loggerName, }, res, options...) @@ -139,3 +143,23 @@ func (c *client) GetConfig(ctx context.Context, options ...rpc.Option) (interfac err := c.requester.SendRequest(ctx, "admin.getConfig", struct{}{}, &res, options...) return res, err } + +func (c *client) DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) { + keyStr, err := formatting.Encode(formatting.HexNC, key) + if err != nil { + return nil, err + } + + res := &DBGetReply{} + err = c.requester.SendRequest(ctx, "admin.dbGet", &DBGetArgs{ + Key: keyStr, + }, res, options...) + if err != nil { + return nil, err + } + + if err := rpcdb.ErrEnumToError[res.ErrorCode]; err != nil { + return nil, err + } + return formatting.Decode(formatting.HexNC, res.Value) +} diff --git a/avalanchego/api/admin/client_test.go b/avalanchego/api/admin/client_test.go index e034e398..ed352e1b 100644 --- a/avalanchego/api/admin/client_test.go +++ b/avalanchego/api/admin/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -16,24 +16,23 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var errTest = errors.New("non-nil error") +var ( + errTest = errors.New("non-nil error") -// SuccessResponseTest defines the expected result of an API call that returns SuccessResponse -type SuccessResponseTest struct { - Err error -} - -// GetSuccessResponseTests returns a list of possible SuccessResponseTests -func GetSuccessResponseTests() []SuccessResponseTest { - return []SuccessResponseTest{ + SuccessResponseTests = []struct { + name string + expectedErr error + }{ { - Err: nil, + name: "no error", + expectedErr: nil, }, { - Err: errTest, + name: "error", + expectedErr: errTest, }, } -} +) type mockClient struct { response interface{} @@ -63,8 +62,8 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re case *LoadVMsReply: response := mc.response.(*LoadVMsReply) *p = *response - case *GetLoggerLevelReply: - response := mc.response.(*GetLoggerLevelReply) + case *LoggerLevelReply: + response := mc.response.(*LoggerLevelReply) *p = *response case *interface{}: response := mc.response.(*interface{}) @@ -76,140 +75,100 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re } func TestStartCPUProfiler(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StartCPUProfiler(context.Background()) - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StartCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestStopCPUProfiler(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StopCPUProfiler(context.Background()) - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StopCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestMemoryProfile(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.MemoryProfile(context.Background()) - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.MemoryProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestLockProfile(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.LockProfile(context.Background()) - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.LockProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAlias(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Alias(context.Background(), "alias", "alias2") - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Alias(context.Background(), "alias", "alias2") + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAliasChain(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestGetChainAliases(t *testing.T) { t.Run("successful", func(t *testing.T) { + require := require.New(t) + expectedReply := []string{"alias1", "alias2"} mockClient := client{requester: NewMockClient(&GetChainAliasesReply{ Aliases: expectedReply, }, nil)} reply, err := mockClient.GetChainAliases(context.Background(), "chain") - require.NoError(t, err) - require.ElementsMatch(t, expectedReply, reply) + require.NoError(err) + require.Equal(expectedReply, reply) }) t.Run("failure", func(t *testing.T) { mockClient := client{requester: NewMockClient(&GetChainAliasesReply{}, errTest)} - _, err := mockClient.GetChainAliases(context.Background(), "chain") - require.ErrorIs(t, err, errTest) }) } func TestStacktrace(t *testing.T) { - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Stacktrace(context.Background()) - // if there is error as expected, the test passes - if err != nil && test.Err != nil { - continue - } - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Stacktrace(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestReloadInstalledVMs(t *testing.T) { t.Run("successful", func(t *testing.T) { + require := require.New(t) + expectedNewVMs := map[ids.ID][]string{ ids.GenerateTestID(): {"foo"}, ids.GenerateTestID(): {"bar"}, @@ -224,16 +183,14 @@ func TestReloadInstalledVMs(t *testing.T) { }, nil)} loadedVMs, failedVMs, err := mockClient.LoadVMs(context.Background()) - require.NoError(t, err) - require.Equal(t, expectedNewVMs, loadedVMs) - require.Equal(t, expectedFailedVMs, failedVMs) + require.NoError(err) + require.Equal(expectedNewVMs, loadedVMs) + require.Equal(expectedFailedVMs, failedVMs) }) t.Run("failure", func(t *testing.T) { mockClient := client{requester: NewMockClient(&LoadVMsReply{}, errTest)} - _, _, err := mockClient.LoadVMs(context.Background()) - require.ErrorIs(t, err, errTest) }) } @@ -243,58 +200,69 @@ func TestSetLoggerLevel(t *testing.T) { name string logLevel string displayLevel string - serviceErr bool - clientShouldErr bool + serviceResponse map[string]LogAndDisplayLevels + serviceErr error + clientErr error } tests := []test{ { - name: "Happy path", - logLevel: "INFO", - displayLevel: "INFO", - serviceErr: false, - clientShouldErr: false, + name: "Happy path", + logLevel: "INFO", + displayLevel: "INFO", + serviceResponse: map[string]LogAndDisplayLevels{ + "Happy path": {LogLevel: logging.Info, DisplayLevel: logging.Info}, + }, + serviceErr: nil, + clientErr: nil, }, { name: "Service errors", logLevel: "INFO", displayLevel: "INFO", - serviceErr: true, - clientShouldErr: true, + serviceResponse: nil, + serviceErr: errTest, + clientErr: errTest, }, { name: "Invalid log level", logLevel: "invalid", displayLevel: "INFO", - serviceErr: false, - clientShouldErr: true, + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, { name: "Invalid display level", logLevel: "INFO", displayLevel: "invalid", - serviceErr: false, - clientShouldErr: true, + serviceResponse: nil, + serviceErr: nil, + clientErr: logging.ErrUnknownLevel, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - var err error - if tt.serviceErr { - err = errTest + + c := client{ + requester: NewMockClient( + &LoggerLevelReply{ + LoggerLevels: tt.serviceResponse, + }, + tt.serviceErr, + ), } - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, err)} - err = mockClient.SetLoggerLevel( + res, err := c.SetLoggerLevel( context.Background(), "", tt.logLevel, tt.displayLevel, ) - if tt.clientShouldErr { - require.Error(err) - } else { - require.NoError(err) + require.ErrorIs(err, tt.clientErr) + if tt.clientErr != nil { + return } + require.Equal(tt.serviceResponse, res) }) } } @@ -304,8 +272,8 @@ func TestGetLoggerLevel(t *testing.T) { name string loggerName string serviceResponse map[string]LogAndDisplayLevels - serviceErr bool - clientShouldErr bool + serviceErr error + clientErr error } tests := []test{ { @@ -314,35 +282,38 @@ func TestGetLoggerLevel(t *testing.T) { serviceResponse: map[string]LogAndDisplayLevels{ "foo": {LogLevel: logging.Info, DisplayLevel: logging.Info}, }, - serviceErr: false, - clientShouldErr: false, + serviceErr: nil, + clientErr: nil, }, { name: "service errors", loggerName: "foo", serviceResponse: nil, - serviceErr: true, - clientShouldErr: true, + serviceErr: errTest, + clientErr: errTest, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - var err error - if tt.serviceErr { - err = errTest + + c := client{ + requester: NewMockClient( + &LoggerLevelReply{ + LoggerLevels: tt.serviceResponse, + }, + tt.serviceErr, + ), } - mockClient := client{requester: NewMockClient(&GetLoggerLevelReply{LoggerLevels: tt.serviceResponse}, err)} - res, err := mockClient.GetLoggerLevel( + res, err := c.GetLoggerLevel( context.Background(), tt.loggerName, ) - if tt.clientShouldErr { - require.Error(err) + require.ErrorIs(err, tt.clientErr) + if tt.clientErr != nil { return } - require.NoError(err) - require.EqualValues(tt.serviceResponse, res) + require.Equal(tt.serviceResponse, res) }) } } @@ -350,40 +321,38 @@ func TestGetLoggerLevel(t *testing.T) { func TestGetConfig(t *testing.T) { type test struct { name string - serviceErr bool - clientShouldErr bool + serviceErr error + clientErr error expectedResponse interface{} } var resp interface{} = "response" tests := []test{ { name: "Happy path", - serviceErr: false, - clientShouldErr: false, + serviceErr: nil, + clientErr: nil, expectedResponse: &resp, }, { name: "service errors", - serviceErr: true, - clientShouldErr: true, + serviceErr: errTest, + clientErr: errTest, expectedResponse: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - var err error - if tt.serviceErr { - err = errTest + + c := client{ + requester: NewMockClient(tt.expectedResponse, tt.serviceErr), } - mockClient := client{requester: NewMockClient(tt.expectedResponse, err)} - res, err := mockClient.GetConfig(context.Background()) - if tt.clientShouldErr { - require.Error(err) + res, err := c.GetConfig(context.Background()) + require.ErrorIs(err, tt.clientErr) + if tt.clientErr != nil { return } - require.NoError(err) - require.EqualValues("response", res) + require.Equal(resp, res) }) } } diff --git a/avalanchego/api/admin/key_value_reader.go b/avalanchego/api/admin/key_value_reader.go new file mode 100644 index 00000000..bfc7b2cc --- /dev/null +++ b/avalanchego/api/admin/key_value_reader.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" +) + +var _ database.KeyValueReader = (*KeyValueReader)(nil) + +type KeyValueReader struct { + client Client +} + +func NewKeyValueReader(client Client) *KeyValueReader { + return &KeyValueReader{ + client: client, + } +} + +func (r *KeyValueReader) Has(key []byte) (bool, error) { + _, err := r.client.DBGet(context.Background(), key) + if err == database.ErrNotFound { + return false, nil + } + return err == nil, err +} + +func (r *KeyValueReader) Get(key []byte) ([]byte, error) { + return r.client.DBGet(context.Background(), key) +} diff --git a/avalanchego/api/admin/service.go b/avalanchego/api/admin/service.go index 57ee8bfa..a7936b2a 100644 --- a/avalanchego/api/admin/service.go +++ b/avalanchego/api/admin/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -7,24 +7,28 @@ import ( "errors" "net/http" "path" + "sync" "github.com/gorilla/rpc/v2" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) const ( @@ -44,6 +48,7 @@ type Config struct { ProfileDir string LogFactory logging.Factory NodeConfig interface{} + DB database.Database ChainManager chains.Manager HTTPServer server.PathAdderWithReadLock VMRegistry registry.VMRegistry @@ -53,23 +58,24 @@ type Config struct { // Admin is the API service for node admin management type Admin struct { Config + lock sync.RWMutex profiler profiler.Profiler } // NewService returns a new admin API service. // All of the fields in [config] must be set. -func NewService(config Config) (*common.HTTPHandler, error) { - newServer := rpc.NewServer() +func NewService(config Config) (http.Handler, error) { + server := rpc.NewServer() codec := json.NewCodec() - newServer.RegisterCodec(codec, "application/json") - newServer.RegisterCodec(codec, "application/json;charset=UTF-8") - if err := newServer.RegisterService(&Admin{ - Config: config, - profiler: profiler.New(config.ProfileDir), - }, "admin"); err != nil { - return nil, err - } - return &common.HTTPHandler{Handler: newServer}, nil + server.RegisterCodec(codec, "application/json") + server.RegisterCodec(codec, "application/json;charset=UTF-8") + return server, server.RegisterService( + &Admin{ + Config: config, + profiler: profiler.New(config.ProfileDir), + }, + "admin", + ) } // StartCPUProfiler starts a cpu profile writing to the specified file @@ -79,6 +85,9 @@ func (a *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply zap.String("method", "startCPUProfiler"), ) + a.lock.Lock() + defer a.lock.Unlock() + return a.profiler.StartCPUProfiler() } @@ -89,6 +98,9 @@ func (a *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) zap.String("method", "stopCPUProfiler"), ) + a.lock.Lock() + defer a.lock.Unlock() + return a.profiler.StopCPUProfiler() } @@ -99,6 +111,9 @@ func (a *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) e zap.String("method", "memoryProfile"), ) + a.lock.Lock() + defer a.lock.Unlock() + return a.profiler.MemoryProfile() } @@ -109,6 +124,9 @@ func (a *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) err zap.String("method", "lockProfile"), ) + a.lock.Lock() + defer a.lock.Unlock() + return a.profiler.LockProfile() } @@ -157,6 +175,9 @@ func (a *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyRe return err } + a.lock.Lock() + defer a.lock.Unlock() + if err := a.ChainManager.Alias(chainID, args.Alias); err != nil { return err } @@ -201,16 +222,28 @@ func (a *Admin) Stacktrace(_ *http.Request, _ *struct{}, _ *api.EmptyReply) erro ) stacktrace := []byte(utils.GetStacktrace(true)) + + a.lock.Lock() + defer a.lock.Unlock() + return perms.WriteFile(stacktraceFile, stacktrace, perms.ReadWrite) } -// See SetLoggerLevel type SetLoggerLevelArgs struct { LoggerName string `json:"loggerName"` LogLevel *logging.Level `json:"logLevel"` DisplayLevel *logging.Level `json:"displayLevel"` } +type LogAndDisplayLevels struct { + LogLevel logging.Level `json:"logLevel"` + DisplayLevel logging.Level `json:"displayLevel"` +} + +type LoggerLevelReply struct { + LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` +} + // SetLoggerLevel sets the log level and/or display level for loggers. // If len([args.LoggerName]) == 0, sets the log/display level of all loggers. // Otherwise, sets the log/display level of the loggers named in that argument. @@ -220,7 +253,7 @@ type SetLoggerLevelArgs struct { // Sets the display level of these loggers to args.LogLevel. // If args.DisplayLevel == nil, doesn't set the display level of these loggers. // If args.DisplayLevel != nil, must be a valid string representation of a log level. -func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api.EmptyReply) error { +func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "setLoggerLevel"), @@ -233,14 +266,10 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api return errNoLogLevel } - var loggerNames []string - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - // Empty name means all loggers - loggerNames = a.LogFactory.GetLoggerNames() - } + a.lock.Lock() + defer a.lock.Unlock() + loggerNames := a.getLoggerNames(args.LoggerName) for _, name := range loggerNames { if args.LogLevel != nil { if err := a.LogFactory.SetLogLevel(name, *args.LogLevel); err != nil { @@ -253,55 +282,32 @@ func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api } } } - return nil -} -type LogAndDisplayLevels struct { - LogLevel logging.Level `json:"logLevel"` - DisplayLevel logging.Level `json:"displayLevel"` + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } -// See GetLoggerLevel type GetLoggerLevelArgs struct { LoggerName string `json:"loggerName"` } -// See GetLoggerLevel -type GetLoggerLevelReply struct { - LoggerLevels map[string]LogAndDisplayLevels `json:"loggerLevels"` -} - // GetLogLevel returns the log level and display level of all loggers. -func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *GetLoggerLevelReply) error { +func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), zap.String("method", "getLoggerLevels"), logging.UserString("loggerName", args.LoggerName), ) - reply.LoggerLevels = make(map[string]LogAndDisplayLevels) - var loggerNames []string - // Empty name means all loggers - if len(args.LoggerName) > 0 { - loggerNames = []string{args.LoggerName} - } else { - loggerNames = a.LogFactory.GetLoggerNames() - } - for _, name := range loggerNames { - logLevel, err := a.LogFactory.GetLogLevel(name) - if err != nil { - return err - } - displayLevel, err := a.LogFactory.GetDisplayLevel(name) - if err != nil { - return err - } - reply.LoggerLevels[name] = LogAndDisplayLevels{ - LogLevel: logLevel, - DisplayLevel: displayLevel, - } - } - return nil + a.lock.RLock() + defer a.lock.RUnlock() + + loggerNames := a.getLoggerNames(args.LoggerName) + + var err error + reply.LoggerLevels, err = a.getLogLevels(loggerNames) + return err } // GetConfig returns the config that the node was started with. @@ -329,8 +335,11 @@ func (a *Admin) LoadVMs(r *http.Request, _ *struct{}, reply *LoadVMsReply) error zap.String("method", "loadVMs"), ) + a.lock.Lock() + defer a.lock.Unlock() + ctx := r.Context() - loadedVMs, failedVMs, err := a.VMRegistry.ReloadWithReadLock(ctx) + loadedVMs, failedVMs, err := a.VMRegistry.Reload(ctx) if err != nil { return err } @@ -345,3 +354,62 @@ func (a *Admin) LoadVMs(r *http.Request, _ *struct{}, reply *LoadVMsReply) error reply.NewVMs, err = ids.GetRelevantAliases(a.VMManager, loadedVMs) return err } + +func (a *Admin) getLoggerNames(loggerName string) []string { + if len(loggerName) == 0 { + // Empty name means all loggers + return a.LogFactory.GetLoggerNames() + } + return []string{loggerName} +} + +func (a *Admin) getLogLevels(loggerNames []string) (map[string]LogAndDisplayLevels, error) { + loggerLevels := make(map[string]LogAndDisplayLevels) + for _, name := range loggerNames { + logLevel, err := a.LogFactory.GetLogLevel(name) + if err != nil { + return nil, err + } + displayLevel, err := a.LogFactory.GetDisplayLevel(name) + if err != nil { + return nil, err + } + loggerLevels[name] = LogAndDisplayLevels{ + LogLevel: logLevel, + DisplayLevel: displayLevel, + } + } + return loggerLevels, nil +} + +type DBGetArgs struct { + Key string `json:"key"` +} + +type DBGetReply struct { + Value string `json:"value"` + ErrorCode rpcdbpb.Error `json:"errorCode"` +} + +//nolint:stylecheck // renaming this method to DBGet would change the API method from "dbGet" to "dBGet" +func (a *Admin) DbGet(_ *http.Request, args *DBGetArgs, reply *DBGetReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "dbGet"), + logging.UserString("key", args.Key), + ) + + key, err := formatting.Decode(formatting.HexNC, args.Key) + if err != nil { + return err + } + + value, err := a.DB.Get(key) + if err != nil { + reply.ErrorCode = rpcdb.ErrorToErrEnum[err] + return rpcdb.ErrorToRPCError(err) + } + + reply.Value, err = formatting.Encode(formatting.HexNC, value) + return err +} diff --git a/avalanchego/api/admin/service_test.go b/avalanchego/api/admin/service_test.go index 0cebe8d6..936e7107 100644 --- a/avalanchego/api/admin/service_test.go +++ b/avalanchego/api/admin/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -7,20 +7,22 @@ import ( "net/http" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) type loadVMsTest struct { admin *Admin ctrl *gomock.Controller - mockLog *logging.MockLogger mockVMManager *vms.MockManager mockVMRegistry *registry.MockVMRegistry } @@ -28,18 +30,16 @@ type loadVMsTest struct { func initLoadVMsTest(t *testing.T) *loadVMsTest { ctrl := gomock.NewController(t) - mockLog := logging.NewMockLogger(ctrl) mockVMRegistry := registry.NewMockVMRegistry(ctrl) mockVMManager := vms.NewMockManager(ctrl) return &loadVMsTest{ admin: &Admin{Config: Config{ - Log: mockLog, + Log: logging.NoLog{}, VMRegistry: mockVMRegistry, VMManager: mockVMManager, }}, ctrl: ctrl, - mockLog: mockLog, mockVMManager: mockVMManager, mockVMRegistry: mockVMRegistry, } @@ -47,8 +47,9 @@ func initLoadVMsTest(t *testing.T) *loadVMsTest { // Tests behavior for LoadVMs if everything succeeds. func TestLoadVMsSuccess(t *testing.T) { + require := require.New(t) + resources := initLoadVMsTest(t) - defer resources.ctrl.Finish() id1 := ids.GenerateTestID() id2 := ids.GenerateTestID() @@ -66,38 +67,35 @@ func TestLoadVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) // execute test reply := LoadVMsReply{} - err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) - - require.Equal(t, expectedVMRegistry, reply.NewVMs) - require.Equal(t, err, nil) + require.NoError(resources.admin.LoadVMs(&http.Request{}, nil, &reply)) + require.Equal(expectedVMRegistry, reply.NewVMs) } // Tests behavior for LoadVMs if we fail to reload vms. func TestLoadVMsReloadFails(t *testing.T) { + require := require.New(t) + resources := initLoadVMsTest(t) - defer resources.ctrl.Finish() - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) // Reload fails - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(nil, nil, errTest) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(nil, nil, errTest) reply := LoadVMsReply{} err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) - - require.Equal(t, err, errTest) + require.ErrorIs(err, errTest) } // Tests behavior for LoadVMs if we fail to fetch our aliases func TestLoadVMsGetAliasesFails(t *testing.T) { + require := require.New(t) + resources := initLoadVMsTest(t) - defer resources.ctrl.Finish() id1 := ids.GenerateTestID() id2 := ids.GenerateTestID() @@ -108,13 +106,64 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { // every vm is at least aliased to itself. alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) + resources.mockVMRegistry.EXPECT().Reload(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) reply := LoadVMsReply{} err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) + require.ErrorIs(err, errTest) +} - require.Equal(t, err, errTest) +func TestServiceDBGet(t *testing.T) { + a := &Admin{Config: Config{ + Log: logging.NoLog{}, + DB: memdb.New(), + }} + + helloBytes := []byte("hello") + helloHex, err := formatting.Encode(formatting.HexNC, helloBytes) + require.NoError(t, err) + + worldBytes := []byte("world") + worldHex, err := formatting.Encode(formatting.HexNC, worldBytes) + require.NoError(t, err) + + require.NoError(t, a.DB.Put(helloBytes, worldBytes)) + + tests := []struct { + name string + key string + expectedValue string + expectedErrorCode rpcdbpb.Error + }{ + { + name: "key exists", + key: helloHex, + expectedValue: worldHex, + expectedErrorCode: rpcdbpb.Error_ERROR_UNSPECIFIED, + }, + { + name: "key doesn't exist", + key: "", + expectedValue: "", + expectedErrorCode: rpcdbpb.Error_ERROR_NOT_FOUND, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + reply := &DBGetReply{} + require.NoError(a.DbGet( + nil, + &DBGetArgs{ + Key: test.key, + }, + reply, + )) + require.Equal(test.expectedValue, reply.Value) + require.Equal(test.expectedErrorCode, reply.ErrorCode) + }) + } } diff --git a/avalanchego/api/auth/auth.go b/avalanchego/api/auth/auth.go index 733f276e..a8e4fa9c 100644 --- a/avalanchego/api/auth/auth.go +++ b/avalanchego/api/auth/auth.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -14,8 +14,6 @@ import ( "sync" "time" - jwt "github.com/golang-jwt/jwt/v4" - "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/utils/json" @@ -23,6 +21,8 @@ import ( "github.com/ava-labs/avalanchego/utils/password" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" + + jwt "github.com/golang-jwt/jwt/v4" ) const ( @@ -41,7 +41,7 @@ const ( var ( errNoToken = errors.New("auth token not provided") errAuthHeaderNotParsable = fmt.Errorf( - "couldn't parse auth token. Header \"%s\" should be \"%sTOKEN.GOES.HERE\"", + `couldn't parse auth token. Header "%s" should be "%sTOKEN.GOES.HERE"`, headerKey, headerValStart, ) @@ -50,7 +50,6 @@ var ( errTokenInsufficientPermission = errors.New("the provided auth token does not allow access to this endpoint") errWrongPassword = errors.New("incorrect password") errSamePassword = errors.New("new password can't be same as old password") - errNoPassword = errors.New("no password") errNoEndpoints = errors.New("must name at least one endpoint") errTooManyEndpoints = fmt.Errorf("can only name at most %d endpoints", maxEndpoints) @@ -121,7 +120,7 @@ func NewFromHash(log logging.Logger, endpoint string, pw password.Hash) Auth { func (a *auth) NewToken(pw string, duration time.Duration, endpoints []string) (string, error) { if pw == "" { - return "", errNoPassword + return "", password.ErrEmptyPassword } if l := len(endpoints); l == 0 { return "", errNoEndpoints @@ -170,7 +169,7 @@ func (a *auth) RevokeToken(tokenStr, pw string) error { return errNoToken } if pw == "" { - return errNoPassword + return password.ErrEmptyPassword } a.lock.Lock() diff --git a/avalanchego/api/auth/auth_test.go b/avalanchego/api/auth/auth_test.go index c86bcc22..fce886d6 100644 --- a/avalanchego/api/auth/auth_test.go +++ b/avalanchego/api/auth/auth_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -14,19 +14,20 @@ import ( "testing" "time" - jwt "github.com/golang-jwt/jwt/v4" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/password" + + jwt "github.com/golang-jwt/jwt/v4" ) var ( testPassword = "password!@#$%$#@!" hashedPassword = password.Hash{} - unAuthorizedResponseRegex = "^{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32600,\"message\":\"(.*)\"},\"id\":1}" + unAuthorizedResponseRegex = `^{"jsonrpc":"2.0","error":{"code":-32600,"message":"(.*)"},"id":1}` errTest = errors.New("non-nil error") + hostName = "http://127.0.0.1:9650" ) func init() { @@ -36,19 +37,23 @@ func init() { } // Always returns 200 (http.StatusOK) -var dummyHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) +var dummyHandler = http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}) func TestNewTokenWrongPassword(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) _, err := auth.NewToken("", defaultTokenLifespan, []string{"endpoint1, endpoint2"}) - require.Error(t, err, "should have failed because password is wrong") + require.ErrorIs(err, password.ErrEmptyPassword) _, err = auth.NewToken("notThePassword", defaultTokenLifespan, []string{"endpoint1, endpoint2"}) - require.Error(t, err, "should have failed because password is wrong") + require.ErrorIs(err, errWrongPassword) } func TestNewTokenHappyPath(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) now := time.Now() @@ -57,7 +62,7 @@ func TestNewTokenHappyPath(t *testing.T) { // Make a token endpoints := []string{"endpoint1", "endpoint2", "endpoint3"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) // Parse the token token, err := jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { @@ -65,23 +70,25 @@ func TestNewTokenHappyPath(t *testing.T) { defer auth.lock.RUnlock() return auth.password.Password[:], nil }) - require.NoError(t, err, "couldn't parse new token") + require.NoError(err) - claims, ok := token.Claims.(*endpointClaims) - require.True(t, ok, "expected auth token's claims to be type endpointClaims but is different type") - require.ElementsMatch(t, endpoints, claims.Endpoints, "token has wrong endpoint claims") + require.IsType(&endpointClaims{}, token.Claims) + claims := token.Claims.(*endpointClaims) + require.Equal(endpoints, claims.Endpoints) shouldExpireAt := jwt.NewNumericDate(now.Add(defaultTokenLifespan)) - require.Equal(t, shouldExpireAt, claims.ExpiresAt, "token expiration time is wrong") + require.Equal(shouldExpireAt, claims.ExpiresAt) } func TestTokenHasWrongSig(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) // Make a token endpoints := []string{"endpoint1", "endpoint2", "endpoint3"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) // Try to parse the token using the wrong password _, err = jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { @@ -89,7 +96,7 @@ func TestTokenHasWrongSig(t *testing.T) { defer auth.lock.RUnlock() return []byte(""), nil }) - require.Error(t, err, "should have failed because password is wrong") + require.ErrorIs(err, jwt.ErrSignatureInvalid) // Try to parse the token using the wrong password _, err = jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { @@ -97,94 +104,100 @@ func TestTokenHasWrongSig(t *testing.T) { defer auth.lock.RUnlock() return []byte("notThePassword"), nil }) - require.Error(t, err, "should have failed because password is wrong") + require.ErrorIs(err, jwt.ErrSignatureInvalid) } func TestChangePassword(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) password2 := "fejhkefjhefjhefhje" // #nosec G101 var err error err = auth.ChangePassword("", password2) - require.Error(t, err, "should have failed because old password is wrong") + require.ErrorIs(err, errWrongPassword) err = auth.ChangePassword("notThePassword", password2) - require.Error(t, err, "should have failed because old password is wrong") + require.ErrorIs(err, errWrongPassword) err = auth.ChangePassword(testPassword, "") - require.Error(t, err, "should have failed because new password is empty") + require.ErrorIs(err, password.ErrEmptyPassword) - err = auth.ChangePassword(testPassword, password2) - require.NoError(t, err, "should have succeeded") - require.True(t, auth.password.Check(password2), "password should have been changed") + require.NoError(auth.ChangePassword(testPassword, password2)) + require.True(auth.password.Check(password2)) password3 := "ufwhwohwfohawfhwdwd" // #nosec G101 err = auth.ChangePassword(testPassword, password3) - require.Error(t, err, "should have failed because old password is wrong") + require.ErrorIs(err, errWrongPassword) - err = auth.ChangePassword(password2, password3) - require.NoError(t, err, "should have succeeded") + require.NoError(auth.ChangePassword(password2, password3)) } func TestRevokeToken(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) - err = auth.RevokeToken(tokenStr, testPassword) - require.NoError(t, err, "should have succeeded") - require.Len(t, auth.revoked, 1, "revoked token list is incorrect") + require.NoError(auth.RevokeToken(tokenStr, testPassword)) + require.Len(auth.revoked, 1) } func TestWrapHandlerHappyPath(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusOK, rr.Code) + require.Equal(http.StatusOK, rr.Code) } } func TestWrapHandlerRevokedToken(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) - err = auth.RevokeToken(tokenStr, testPassword) - require.NoError(t, err) + require.NoError(auth.RevokeToken(tokenStr, testPassword)) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), errTokenRevoked.Error()) - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Contains(rr.Body.String(), errTokenRevoked.Error()) + require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) } } func TestWrapHandlerExpiredToken(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) auth.clock.Set(time.Now().Add(-2 * defaultTokenLifespan)) @@ -192,130 +205,142 @@ func TestWrapHandlerExpiredToken(t *testing.T) { // Make a token that expired well in the past endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), "expired") - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Contains(rr.Body.String(), "expired") + require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) } } func TestWrapHandlerNoAuthToken(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), errNoToken.Error()) - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Contains(rr.Body.String(), errNoToken.Error()) + require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) } } func TestWrapHandlerUnauthorizedEndpoint(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token endpoints := []string{"/ext/info"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) unauthorizedEndpoints := []string{"/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/info/foo"} wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range unauthorizedEndpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), errTokenInsufficientPermission.Error()) - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Contains(rr.Body.String(), errTokenInsufficientPermission.Error()) + require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) } } func TestWrapHandlerAuthEndpoint(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/info/foo"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) wrappedHandler := auth.WrapHandler(dummyHandler) req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650/ext/auth", strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusOK, rr.Code) + require.Equal(http.StatusOK, rr.Code) } func TestWrapHandlerAccessAll(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token that allows access to all endpoints endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/foo/info"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, []string{"*"}) - require.NoError(t, err) + require.NoError(err) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusOK, rr.Code) + require.Equal(http.StatusOK, rr.Code) } } func TestWriteUnauthorizedResponse(t *testing.T) { + require := require.New(t) + rr := httptest.NewRecorder() writeUnauthorizedResponse(rr, errTest) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Equal(t, "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32600,\"message\":\"non-nil error\"},\"id\":1}\n", rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Equal(`{"jsonrpc":"2.0","error":{"code":-32600,"message":"non-nil error"},"id":1}`+"\n", rr.Body.String()) } func TestWrapHandlerMutatedRevokedToken(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(t, err) + require.NoError(err) - err = auth.RevokeToken(tokenStr, testPassword) - require.NoError(t, err) + require.NoError(auth.RevokeToken(tokenStr, testPassword)) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s=", tokenStr)) // The appended = at the end looks like padding rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) + require.Equal(http.StatusUnauthorized, rr.Code) } } func TestWrapHandlerInvalidSigningMethod(t *testing.T) { + require := require.New(t) + auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) // Make a token endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} idBytes := [tokenIDByteLen]byte{} - if _, err := rand.Read(idBytes[:]); err != nil { - t.Fatal(err) - } + _, err := rand.Read(idBytes[:]) + require.NoError(err) id := base64.RawURLEncoding.EncodeToString(idBytes[:]) claims := endpointClaims{ @@ -327,19 +352,17 @@ func TestWrapHandlerInvalidSigningMethod(t *testing.T) { } token := jwt.NewWithClaims(jwt.SigningMethodHS512, &claims) tokenStr, err := token.SignedString(auth.password.Password[:]) - if err != nil { - t.Fatal(err) - } + require.NoError(err) wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", "Bearer "+tokenStr) + req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", headerValStart+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) - require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), errInvalidSigningMethod.Error()) - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) + require.Equal(http.StatusUnauthorized, rr.Code) + require.Contains(rr.Body.String(), errInvalidSigningMethod.Error()) + require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) } } diff --git a/avalanchego/api/auth/claims.go b/avalanchego/api/auth/claims.go index e2bf55d3..1cdda3d4 100644 --- a/avalanchego/api/auth/claims.go +++ b/avalanchego/api/auth/claims.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/avalanchego/api/auth/response.go b/avalanchego/api/auth/response.go index e87065c7..eca4b39d 100644 --- a/avalanchego/api/auth/response.go +++ b/avalanchego/api/auth/response.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/avalanchego/api/auth/service.go b/avalanchego/api/auth/service.go index 77517c17..badb544c 100644 --- a/avalanchego/api/auth/service.go +++ b/avalanchego/api/auth/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/avalanchego/api/common_args_responses.go b/avalanchego/api/common_args_responses.go index 458fdbf3..3517e058 100644 --- a/avalanchego/api/common_args_responses.go +++ b/avalanchego/api/common_args_responses.go @@ -1,12 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api import ( + "encoding/json" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" + + avajson "github.com/ava-labs/avalanchego/utils/json" ) // This file contains structs used in arguments and responses in services @@ -69,13 +72,13 @@ type GetBlockArgs struct { // GetBlockByHeightArgs is the parameters supplied to the GetBlockByHeight API type GetBlockByHeightArgs struct { - Height uint64 `json:"height"` + Height avajson.Uint64 `json:"height"` Encoding formatting.Encoding `json:"encoding"` } // GetBlockResponse is the response object for the GetBlock API. type GetBlockResponse struct { - Block interface{} `json:"block"` + Block json.RawMessage `json:"block"` // If GetBlockResponse.Encoding is formatting.Hex, GetBlockResponse.Block is // the string representation of the block under hex encoding. // If GetBlockResponse.Encoding is formatting.JSON, GetBlockResponse.Block @@ -84,7 +87,7 @@ type GetBlockResponse struct { } type GetHeightResponse struct { - Height json.Uint64 `json:"height"` + Height avajson.Uint64 `json:"height"` } // FormattedBlock defines a JSON formatted struct containing a block in Hex @@ -105,7 +108,7 @@ type GetTxReply struct { // the tx under hex encoding. // If [GetTxArgs.Encoding] is [JSON], [Tx] is the actual tx, which will be // returned as JSON to the caller. - Tx interface{} `json:"tx"` + Tx json.RawMessage `json:"tx"` Encoding formatting.Encoding `json:"encoding"` } @@ -137,7 +140,7 @@ type Index struct { type GetUTXOsArgs struct { Addresses []string `json:"addresses"` SourceChain string `json:"sourceChain"` - Limit json.Uint32 `json:"limit"` + Limit avajson.Uint32 `json:"limit"` StartIndex Index `json:"startIndex"` Encoding formatting.Encoding `json:"encoding"` } @@ -145,7 +148,7 @@ type GetUTXOsArgs struct { // GetUTXOsReply defines the GetUTXOs replies returned from the API type GetUTXOsReply struct { // Number of UTXOs returned - NumFetched json.Uint64 `json:"numFetched"` + NumFetched avajson.Uint64 `json:"numFetched"` // The UTXOs UTXOs []string `json:"utxos"` // The last UTXO that was returned, and the address it corresponds to. diff --git a/avalanchego/api/health/README.md b/avalanchego/api/health/README.md new file mode 100644 index 00000000..ddefd4de --- /dev/null +++ b/avalanchego/api/health/README.md @@ -0,0 +1,38 @@ +# Health Checking + +## Health Check Types + +### Readiness + +Readiness is a special type of health check. Readiness checks will only run until they pass for the first time. After a readiness check passes, it will never be run again. These checks are typically used to indicate that the startup of a component has finished. + +### Health + +Health checks typically indicate that a component is operating as expected. The health of a component may flip due to any arbitrary heuristic the component exposes. + +### Liveness + +Liveness checks are intended to indicate that a component has become unhealthy and has no way to recover. + +## Naming and Tags + +All registered checks must have a unique name which will be included in the health check results. + +Additionally, checks can optionally specify an arbitrary number of tags which can be used to group health checks together. + +### Special Tags + +- "All" is a tag that is automatically added for every check that is registered. +- "Application" checks are checks that are globally applicable. This means that it is not possible to filter application-wide health checks from a response. + +## Health Check Worker + +Readiness, Health, and Liveness checks are all implemented by using their own health check worker. + +A health check worker starts a goroutine that updates the health of all registered checks every `freq`. By default `freq` is set to `30s`. + +When a health check is added it will always initially report as unhealthy. + +Every health check runs in its own goroutine to maximize concurrency. It is guaranteed that no locks from the health checker are held during the execution of the health check. + +When the health check worker is stopped, it will finish executing any currently running health checks and then terminate its primary goroutine. After the health check worker is stopped, the health checks will never run again. diff --git a/avalanchego/api/health/checker.go b/avalanchego/api/health/checker.go index efc89517..b30e4506 100644 --- a/avalanchego/api/health/checker.go +++ b/avalanchego/api/health/checker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/avalanchego/api/health/client.go b/avalanchego/api/health/client.go index 7c615757..59daa555 100644 --- a/avalanchego/api/health/client.go +++ b/avalanchego/api/health/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/avalanchego/api/health/client_test.go b/avalanchego/api/health/client_test.go index 88d0696e..e019829e 100644 --- a/avalanchego/api/health/client_test.go +++ b/avalanchego/api/health/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -87,7 +87,7 @@ func TestClient(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) healthy, err := AwaitHealthy(ctx, c, time.Microsecond, nil) cancel() - require.Error(err) + require.ErrorIs(err, context.DeadlineExceeded) require.False(healthy) } @@ -95,7 +95,7 @@ func TestClient(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) healthy, err := AwaitReady(ctx, c, time.Microsecond, nil) cancel() - require.Error(err) + require.ErrorIs(err, context.DeadlineExceeded) require.False(healthy) } diff --git a/avalanchego/api/health/handler.go b/avalanchego/api/health/handler.go index a8bd8269..b2973ddc 100644 --- a/avalanchego/api/health/handler.go +++ b/avalanchego/api/health/handler.go @@ -1,24 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health import ( + "encoding/json" "net/http" - stdjson "encoding/json" - "github.com/gorilla/rpc/v2" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" + + avajson "github.com/ava-labs/avalanchego/utils/json" ) // NewGetAndPostHandler returns a health handler that supports GET and jsonrpc // POST requests. func NewGetAndPostHandler(log logging.Logger, reporter Reporter) (http.Handler, error) { newServer := rpc.NewServer() - codec := json.NewCodec() + codec := avajson.NewCodec() newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") @@ -60,7 +60,7 @@ func NewGetHandler(reporter func(tags ...string) (map[string]Result, bool)) http } // The encoder will call write on the writer, which will write the // header with a 200. - _ = stdjson.NewEncoder(w).Encode(APIReply{ + _ = json.NewEncoder(w).Encode(APIReply{ Checks: checks, Healthy: healthy, }) diff --git a/avalanchego/api/health/health.go b/avalanchego/api/health/health.go index 874944e7..9997d665 100644 --- a/avalanchego/api/health/health.go +++ b/avalanchego/api/health/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -8,17 +8,20 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/utils/logging" ) -// GlobalTag is the tag that is returned for all health check results, -// regardless of the tags passed to the Reporter. -// Registering a health check with this tag will ensure that it is always -// included in the results. -const GlobalTag = "global" +const ( + // AllTag is automatically added to every registered check. + AllTag = "all" + // ApplicationTag checks will act as if they specified every tag that has + // been registered. + // Registering a health check with this tag will ensure that it is always + // included in all health query results. + ApplicationTag = "application" +) var _ Health = (*health)(nil) @@ -59,17 +62,17 @@ type health struct { } func New(log logging.Logger, registerer prometheus.Registerer) (Health, error) { - readinessWorker, err := newWorker("readiness", registerer) + readinessWorker, err := newWorker(log, "readiness", registerer) if err != nil { return nil, err } - healthWorker, err := newWorker("health", registerer) + healthWorker, err := newWorker(log, "health", registerer) if err != nil { return nil, err } - livenessWorker, err := newWorker("liveness", registerer) + livenessWorker, err := newWorker(log, "liveness", registerer) return &health{ log: log, readiness: readinessWorker, @@ -93,7 +96,8 @@ func (h *health) RegisterLivenessCheck(name string, checker Checker, tags ...str func (h *health) Readiness(tags ...string) (map[string]Result, bool) { results, healthy := h.readiness.Results(tags...) if !healthy { - h.log.Warn("failing readiness check", + h.log.Warn("failing check", + zap.String("namespace", "readiness"), zap.Reflect("reason", results), ) } @@ -103,7 +107,8 @@ func (h *health) Readiness(tags ...string) (map[string]Result, bool) { func (h *health) Health(tags ...string) (map[string]Result, bool) { results, healthy := h.health.Results(tags...) if !healthy { - h.log.Warn("failing health check", + h.log.Warn("failing check", + zap.String("namespace", "health"), zap.Reflect("reason", results), ) } @@ -113,7 +118,8 @@ func (h *health) Health(tags ...string) (map[string]Result, bool) { func (h *health) Liveness(tags ...string) (map[string]Result, bool) { results, healthy := h.liveness.Results(tags...) if !healthy { - h.log.Warn("failing liveness check", + h.log.Warn("failing check", + zap.String("namespace", "liveness"), zap.Reflect("reason", results), ) } diff --git a/avalanchego/api/health/health_test.go b/avalanchego/api/health/health_test.go index d8d13331..d4970273 100644 --- a/avalanchego/api/health/health_test.go +++ b/avalanchego/api/health/health_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -12,7 +12,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/utils" @@ -58,18 +57,15 @@ func TestDuplicatedRegistations(t *testing.T) { h, err := New(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) - err = h.RegisterReadinessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterReadinessCheck("check", check)) err = h.RegisterReadinessCheck("check", check) require.ErrorIs(err, errDuplicateCheck) - err = h.RegisterHealthCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterHealthCheck("check", check)) err = h.RegisterHealthCheck("check", check) require.ErrorIs(err, errDuplicateCheck) - err = h.RegisterLivenessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterLivenessCheck("check", check)) err = h.RegisterLivenessCheck("check", check) require.ErrorIs(err, errDuplicateCheck) } @@ -85,8 +81,7 @@ func TestDefaultFailing(t *testing.T) { require.NoError(err) { - err = h.RegisterReadinessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterReadinessCheck("check", check)) readinessResult, readiness := h.Readiness() require.Len(readinessResult, 1) @@ -96,8 +91,7 @@ func TestDefaultFailing(t *testing.T) { } { - err = h.RegisterHealthCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterHealthCheck("check", check)) healthResult, health := h.Health() require.Len(healthResult, 1) @@ -107,8 +101,7 @@ func TestDefaultFailing(t *testing.T) { } { - err = h.RegisterLivenessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterLivenessCheck("check", check)) livenessResult, liveness := h.Liveness() require.Len(livenessResult, 1) @@ -128,12 +121,9 @@ func TestPassingChecks(t *testing.T) { h, err := New(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) - err = h.RegisterReadinessCheck("check", check) - require.NoError(err) - err = h.RegisterHealthCheck("check", check) - require.NoError(err) - err = h.RegisterLivenessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterReadinessCheck("check", check)) + require.NoError(h.RegisterHealthCheck("check", check)) + require.NoError(h.RegisterLivenessCheck("check", check)) h.Start(context.Background(), checkFreq) defer h.Stop() @@ -195,12 +185,9 @@ func TestPassingThenFailingChecks(t *testing.T) { h, err := New(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) - err = h.RegisterReadinessCheck("check", check) - require.NoError(err) - err = h.RegisterHealthCheck("check", check) - require.NoError(err) - err = h.RegisterLivenessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterReadinessCheck("check", check)) + require.NoError(h.RegisterHealthCheck("check", check)) + require.NoError(h.RegisterLivenessCheck("check", check)) h.Start(context.Background(), checkFreq) defer h.Stop() @@ -256,11 +243,10 @@ func TestDeadlockRegression(t *testing.T) { h.Start(context.Background(), time.Nanosecond) defer h.Stop() - for i := 0; i < 1000; i++ { + for i := 0; i < 100; i++ { lock.Lock() - err = h.RegisterHealthCheck(fmt.Sprintf("check-%d", i), check) + require.NoError(h.RegisterHealthCheck(fmt.Sprintf("check-%d", i), check)) lock.Unlock() - require.NoError(err) } awaitHealthy(t, h, true) @@ -275,16 +261,11 @@ func TestTags(t *testing.T) { h, err := New(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) - err = h.RegisterHealthCheck("check1", check) - require.NoError(err) - err = h.RegisterHealthCheck("check2", check, "tag1") - require.NoError(err) - err = h.RegisterHealthCheck("check3", check, "tag2") - require.NoError(err) - err = h.RegisterHealthCheck("check4", check, "tag1", "tag2") - require.NoError(err) - err = h.RegisterHealthCheck("check5", check, GlobalTag) - require.NoError(err) + require.NoError(h.RegisterHealthCheck("check1", check)) + require.NoError(h.RegisterHealthCheck("check2", check, "tag1")) + require.NoError(h.RegisterHealthCheck("check3", check, "tag2")) + require.NoError(h.RegisterHealthCheck("check4", check, "tag1", "tag2")) + require.NoError(h.RegisterHealthCheck("check5", check, ApplicationTag)) // default checks { @@ -374,8 +355,7 @@ func TestTags(t *testing.T) { { // now we'll add a new check which is unhealthy by default (notYetRunResult) - err = h.RegisterHealthCheck("check6", check, "tag1") - require.NoError(err) + require.NoError(h.RegisterHealthCheck("check6", check, "tag1")) awaitHealthy(t, h, false) @@ -395,9 +375,8 @@ func TestTags(t *testing.T) { require.Contains(healthResult, "check5") require.True(health) - // add global tag - err = h.RegisterHealthCheck("check7", check, GlobalTag) - require.NoError(err) + // add application tag + require.NoError(h.RegisterHealthCheck("check7", check, ApplicationTag)) awaitHealthy(t, h, false) diff --git a/avalanchego/api/health/metrics.go b/avalanchego/api/health/metrics.go index f735ec14..fdb7b2ed 100644 --- a/avalanchego/api/health/metrics.go +++ b/avalanchego/api/health/metrics.go @@ -1,24 +1,27 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health -import ( - "github.com/prometheus/client_golang/prometheus" -) +import "github.com/prometheus/client_golang/prometheus" type metrics struct { // failingChecks keeps track of the number of check failing - failingChecks prometheus.Gauge + failingChecks *prometheus.GaugeVec } func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { metrics := &metrics{ - failingChecks: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "checks_failing", - Help: "number of currently failing health checks", - }), + failingChecks: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "checks_failing", + Help: "number of currently failing health checks", + }, + []string{"tag"}, + ), } + metrics.failingChecks.WithLabelValues(AllTag).Set(0) + metrics.failingChecks.WithLabelValues(ApplicationTag).Set(0) return metrics, registerer.Register(metrics.failingChecks) } diff --git a/avalanchego/api/health/result.go b/avalanchego/api/health/result.go index f5253c66..e243cba1 100644 --- a/avalanchego/api/health/result.go +++ b/avalanchego/api/health/result.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health -import ( - "time" -) +import "time" // notYetRunResult is the result that is returned when a HealthCheck hasn't been // run yet. diff --git a/avalanchego/api/health/service.go b/avalanchego/api/health/service.go index 368d986c..7b485070 100644 --- a/avalanchego/api/health/service.go +++ b/avalanchego/api/health/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/avalanchego/api/health/service_test.go b/avalanchego/api/health/service_test.go index c12bce1f..af7c463d 100644 --- a/avalanchego/api/health/service_test.go +++ b/avalanchego/api/health/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -9,7 +9,6 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -31,17 +30,13 @@ func TestServiceResponses(t *testing.T) { health: h, } - err = h.RegisterReadinessCheck("check", check) - require.NoError(err) - err = h.RegisterHealthCheck("check", check) - require.NoError(err) - err = h.RegisterLivenessCheck("check", check) - require.NoError(err) + require.NoError(h.RegisterReadinessCheck("check", check)) + require.NoError(h.RegisterHealthCheck("check", check)) + require.NoError(h.RegisterLivenessCheck("check", check)) { reply := APIReply{} - err = s.Readiness(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Readiness(nil, &APIArgs{}, &reply)) require.Len(reply.Checks, 1) require.Contains(reply.Checks, "check") @@ -51,8 +46,7 @@ func TestServiceResponses(t *testing.T) { { reply := APIReply{} - err = s.Health(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Health(nil, &APIArgs{}, &reply)) require.Len(reply.Checks, 1) require.Contains(reply.Checks, "check") @@ -62,8 +56,7 @@ func TestServiceResponses(t *testing.T) { { reply := APIReply{} - err = s.Liveness(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Liveness(nil, &APIArgs{}, &reply)) require.Len(reply.Checks, 1) require.Contains(reply.Checks, "check") @@ -80,8 +73,7 @@ func TestServiceResponses(t *testing.T) { { reply := APIReply{} - err = s.Readiness(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Readiness(nil, &APIArgs{}, &reply)) result := reply.Checks["check"] require.Equal("", result.Details) @@ -92,8 +84,7 @@ func TestServiceResponses(t *testing.T) { { reply := APIReply{} - err = s.Health(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Health(nil, &APIArgs{}, &reply)) result := reply.Checks["check"] require.Equal("", result.Details) @@ -104,8 +95,7 @@ func TestServiceResponses(t *testing.T) { { reply := APIReply{} - err = s.Liveness(nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(s.Liveness(nil, &APIArgs{}, &reply)) result := reply.Checks["check"] require.Equal("", result.Details) @@ -170,14 +160,10 @@ func TestServiceTagResponse(t *testing.T) { h, err := New(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) - err = test.register(h, "check1", check) - require.NoError(err) - err = test.register(h, "check2", check, subnetID1.String()) - require.NoError(err) - err = test.register(h, "check3", check, subnetID2.String()) - require.NoError(err) - err = test.register(h, "check4", check, subnetID1.String(), subnetID2.String()) - require.NoError(err) + require.NoError(test.register(h, "check1", check)) + require.NoError(test.register(h, "check2", check, subnetID1.String())) + require.NoError(test.register(h, "check3", check, subnetID2.String())) + require.NoError(test.register(h, "check4", check, subnetID1.String(), subnetID2.String())) s := &Service{ log: logging.NoLog{}, @@ -187,8 +173,7 @@ func TestServiceTagResponse(t *testing.T) { // default checks { reply := APIReply{} - err = test.check(s, nil, &APIArgs{}, &reply) - require.NoError(err) + require.NoError(test.check(s, nil, &APIArgs{}, &reply)) require.Len(reply.Checks, 4) require.Contains(reply.Checks, "check1") require.Contains(reply.Checks, "check2") @@ -197,8 +182,7 @@ func TestServiceTagResponse(t *testing.T) { require.Equal(notYetRunResult, reply.Checks["check1"]) require.False(reply.Healthy) - err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) - require.NoError(err) + require.NoError(test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply)) require.Len(reply.Checks, 2) require.Contains(reply.Checks, "check2") require.Contains(reply.Checks, "check4") @@ -212,8 +196,7 @@ func TestServiceTagResponse(t *testing.T) { { reply := APIReply{} - err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) - require.NoError(err) + require.NoError(test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply)) require.Len(reply.Checks, 2) require.Contains(reply.Checks, "check2") require.Contains(reply.Checks, "check4") @@ -225,12 +208,10 @@ func TestServiceTagResponse(t *testing.T) { { // now we'll add a new check which is unhealthy by default (notYetRunResult) - err = test.register(h, "check5", check, subnetID1.String()) - require.NoError(err) + require.NoError(test.register(h, "check5", check, subnetID1.String())) reply := APIReply{} - err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) - require.NoError(err) + require.NoError(test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply)) require.Len(reply.Checks, 3) require.Contains(reply.Checks, "check2") require.Contains(reply.Checks, "check4") diff --git a/avalanchego/api/health/worker.go b/avalanchego/api/health/worker.go index 9db01b82..91fad853 100644 --- a/avalanchego/api/health/worker.go +++ b/avalanchego/api/health/worker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -7,27 +7,37 @@ import ( "context" "errors" "fmt" + "maps" + "slices" "sync" "time" "github.com/prometheus/client_golang/prometheus" - - "golang.org/x/exp/maps" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicateCheck = errors.New("duplicated check") +var ( + allTags = []string{AllTag} + + errRestrictedTag = errors.New("restricted tag") + errDuplicateCheck = errors.New("duplicated check") +) type worker struct { + log logging.Logger + namespace string metrics *metrics checksLock sync.RWMutex - checks map[string]Checker + checks map[string]*taggedChecker - resultsLock sync.RWMutex - results map[string]Result - tags map[string]set.Set[string] // tag -> set of check names + resultsLock sync.RWMutex + results map[string]Result + numFailingApplicationChecks int + tags map[string]set.Set[string] // tag -> set of check names startOnce sync.Once closeOnce sync.Once @@ -35,18 +45,36 @@ type worker struct { closer chan struct{} } -func newWorker(namespace string, registerer prometheus.Registerer) (*worker, error) { +type taggedChecker struct { + checker Checker + isApplicationCheck bool + tags []string +} + +func newWorker( + log logging.Logger, + namespace string, + registerer prometheus.Registerer, +) (*worker, error) { metrics, err := newMetrics(namespace, registerer) return &worker{ - metrics: metrics, - checks: make(map[string]Checker), - results: make(map[string]Result), - closer: make(chan struct{}), - tags: make(map[string]set.Set[string]), + log: log, + namespace: namespace, + metrics: metrics, + checks: make(map[string]*taggedChecker), + results: make(map[string]Result), + closer: make(chan struct{}), + tags: make(map[string]set.Set[string]), }, err } -func (w *worker) RegisterCheck(name string, checker Checker, tags ...string) error { +func (w *worker) RegisterCheck(name string, check Checker, tags ...string) error { + // We ensure [AllTag] isn't contained in [tags] to prevent metrics from + // double counting. + if slices.Contains(tags, AllTag) { + return fmt.Errorf("%w: %q", errRestrictedTag, AllTag) + } + w.checksLock.Lock() defer w.checksLock.Unlock() @@ -57,18 +85,36 @@ func (w *worker) RegisterCheck(name string, checker Checker, tags ...string) err w.resultsLock.Lock() defer w.resultsLock.Unlock() - w.checks[name] = checker - w.results[name] = notYetRunResult - - // Add the check to the tag + // Add the check to each tag for _, tag := range tags { names := w.tags[tag] names.Add(name) w.tags[tag] = names } + // Add the special AllTag descriptor + names := w.tags[AllTag] + names.Add(name) + w.tags[AllTag] = names + + applicationChecks := w.tags[ApplicationTag] + tc := &taggedChecker{ + checker: check, + isApplicationCheck: applicationChecks.Contains(name), + tags: tags, + } + w.checks[name] = tc + w.results[name] = notYetRunResult // Whenever a new check is added - it is failing - w.metrics.failingChecks.Inc() + w.log.Info("registered new check and initialized its state to failing", + zap.String("namespace", w.namespace), + zap.String("name", name), + zap.Strings("tags", tags), + ) + + // If this is a new application-wide check, then all of the registered tags + // now have one additional failing check. + w.updateMetrics(tc, false /*=healthy*/, true /*=register*/) return nil } @@ -92,41 +138,34 @@ func (w *worker) Results(tags ...string) (map[string]Result, bool) { w.resultsLock.RLock() defer w.resultsLock.RUnlock() - results := make(map[string]Result, len(w.results)) - healthy := true + // if no tags are specified, return all checks + if len(tags) == 0 { + tags = allTags + } - // if tags are specified, iterate through registered check names in the tag - if len(tags) > 0 { - names := set.Set[string]{} - // prepare tagSet for global tag - tagSet := set.NewSet[string](len(tags) + 1) - tagSet.Add(tags...) - // we always want to include the global tag - tagSet.Add(GlobalTag) - for tag := range tagSet { - if set, ok := w.tags[tag]; ok { - names.Union(set) - } - } - for name := range names { - if result, ok := w.results[name]; ok { - results[name] = result - healthy = healthy && result.Error == nil - } + names := set.Set[string]{} + tagSet := set.Of(tags...) + tagSet.Add(ApplicationTag) // we always want to include the application tag + for tag := range tagSet { + if set, ok := w.tags[tag]; ok { + names.Union(set) } - } else { // if tags are not specified, iterate through all registered check names - for name, result := range w.results { + } + + results := make(map[string]Result, names.Len()) + healthy := true + for name := range names { + if result, ok := w.results[name]; ok { results[name] = result healthy = healthy && result.Error == nil } } - return results, healthy } func (w *worker) Start(ctx context.Context, freq time.Duration) { w.startOnce.Do(func() { - detachedCtx := utils.Detach(ctx) + detachedCtx := context.WithoutCancel(ctx) w.wg.Add(1) go func() { ticker := time.NewTicker(freq) @@ -172,7 +211,7 @@ func (w *worker) runChecks(ctx context.Context) { wg.Wait() } -func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, check Checker) { +func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, check *taggedChecker) { defer wg.Done() start := time.Now() @@ -180,7 +219,7 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, // To avoid any deadlocks when [RegisterCheck] is called with a lock // that is grabbed by [check.HealthCheck], we ensure that no locks // are held when [check.HealthCheck] is called. - details, err := check.HealthCheck(ctx) + details, err := check.checker.HealthCheck(ctx) end := time.Now() result := Result{ @@ -204,10 +243,65 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, } if prevResult.Error == nil { - w.metrics.failingChecks.Inc() + w.log.Warn("check started failing", + zap.String("namespace", w.namespace), + zap.String("name", name), + zap.Strings("tags", check.tags), + zap.Error(err), + ) + w.updateMetrics(check, false /*=healthy*/, false /*=register*/) } } else if prevResult.Error != nil { - w.metrics.failingChecks.Dec() + w.log.Info("check started passing", + zap.String("namespace", w.namespace), + zap.String("name", name), + zap.Strings("tags", check.tags), + ) + w.updateMetrics(check, true /*=healthy*/, false /*=register*/) } w.results[name] = result } + +// updateMetrics updates the metrics for the given check. If [healthy] is true, +// then the check is considered healthy and the metrics are decremented. +// Otherwise, the check is considered unhealthy and the metrics are incremented. +// [register] must be true only if this is the first time the check is being +// registered. +func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { + if tc.isApplicationCheck { + // Note: [w.tags] will include AllTag. + for tag := range w.tags { + gauge := w.metrics.failingChecks.WithLabelValues(tag) + if healthy { + gauge.Dec() + } else { + gauge.Inc() + } + } + if healthy { + w.numFailingApplicationChecks-- + } else { + w.numFailingApplicationChecks++ + } + } else { + for _, tag := range tc.tags { + gauge := w.metrics.failingChecks.WithLabelValues(tag) + if healthy { + gauge.Dec() + } else { + gauge.Inc() + // If this is the first time this tag was registered, we also need to + // account for the currently failing application-wide checks. + if register && w.tags[tag].Len() == 1 { + gauge.Add(float64(w.numFailingApplicationChecks)) + } + } + } + gauge := w.metrics.failingChecks.WithLabelValues(AllTag) + if healthy { + gauge.Dec() + } else { + gauge.Inc() + } + } +} diff --git a/avalanchego/api/info/client.go b/avalanchego/api/info/client.go index f952f62a..6caafd42 100644 --- a/avalanchego/api/info/client.go +++ b/avalanchego/api/info/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info diff --git a/avalanchego/api/info/client_test.go b/avalanchego/api/info/client_test.go index 292a1841..7923ff94 100644 --- a/avalanchego/api/info/client_test.go +++ b/avalanchego/api/info/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info diff --git a/avalanchego/api/info/service.go b/avalanchego/api/info/service.go index b9b12062..929251d2 100644 --- a/avalanchego/api/info/service.go +++ b/avalanchego/api/info/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -9,23 +9,25 @@ import ( "net/http" "github.com/gorilla/rpc/v2" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/peer" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var errNoChainProvided = errors.New("argument 'chain' not given") @@ -34,11 +36,11 @@ var errNoChainProvided = errors.New("argument 'chain' not given") type Info struct { Parameters log logging.Logger + validators validators.Manager myIP ips.DynamicIPPort networking network.Network chainManager chains.Manager vmManager vms.Manager - validators validators.Set benchlist benchlist.Manager } @@ -59,37 +61,33 @@ type Parameters struct { VMManager vms.Manager } -// NewService returns a new admin API service func NewService( parameters Parameters, log logging.Logger, + validators validators.Manager, chainManager chains.Manager, vmManager vms.Manager, myIP ips.DynamicIPPort, network network.Network, - validators validators.Set, benchlist benchlist.Manager, -) (*common.HTTPHandler, error) { - newServer := rpc.NewServer() +) (http.Handler, error) { + server := rpc.NewServer() codec := json.NewCodec() - newServer.RegisterCodec(codec, "application/json") - newServer.RegisterCodec(codec, "application/json;charset=UTF-8") - if err := newServer.RegisterService(&Info{ - Parameters: parameters, - log: log, - chainManager: chainManager, - vmManager: vmManager, - myIP: myIP, - networking: network, - validators: validators, - benchlist: benchlist, - }, "info"); err != nil { - return nil, err - } - return &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: newServer, - }, nil + server.RegisterCodec(codec, "application/json") + server.RegisterCodec(codec, "application/json;charset=UTF-8") + return server, server.RegisterService( + &Info{ + Parameters: parameters, + log: log, + validators: validators, + chainManager: chainManager, + vmManager: vmManager, + myIP: myIP, + networking: network, + benchlist: benchlist, + }, + "info", + ) } // GetNodeVersionReply are the results from calling GetNodeVersion @@ -217,7 +215,7 @@ type PeersArgs struct { type Peer struct { peer.Info - Benched []ids.ID `json:"benched"` + Benched []string `json:"benched"` } // PeersReply are the results from calling Peers @@ -238,9 +236,18 @@ func (i *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error peers := i.networking.PeerInfo(args.NodeIDs) peerInfo := make([]Peer, len(peers)) for index, peer := range peers { + benchedIDs := i.benchlist.GetBenched(peer.ID) + benchedAliases := make([]string, len(benchedIDs)) + for idx, id := range benchedIDs { + alias, err := i.chainManager.PrimaryAlias(id) + if err != nil { + return fmt.Errorf("failed to get primary alias for chain ID %s: %w", id, err) + } + benchedAliases[idx] = alias + } peerInfo[index] = Peer{ Info: peer, - Benched: i.benchlist.GetBenched(peer.ID), + Benched: benchedAliases, } } @@ -319,6 +326,64 @@ func (i *Info) Uptime(_ *http.Request, args *UptimeRequest, reply *UptimeRespons return nil } +type ACP struct { + SupportWeight json.Uint64 `json:"supportWeight"` + Supporters set.Set[ids.NodeID] `json:"supporters"` + ObjectWeight json.Uint64 `json:"objectWeight"` + Objectors set.Set[ids.NodeID] `json:"objectors"` + AbstainWeight json.Uint64 `json:"abstainWeight"` +} + +type ACPsReply struct { + ACPs map[uint32]*ACP `json:"acps"` +} + +func (a *ACPsReply) getACP(acpNum uint32) *ACP { + acp, ok := a.ACPs[acpNum] + if !ok { + acp = &ACP{} + a.ACPs[acpNum] = acp + } + return acp +} + +func (i *Info) Acps(_ *http.Request, _ *struct{}, reply *ACPsReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "acps"), + ) + + reply.ACPs = make(map[uint32]*ACP, constants.CurrentACPs.Len()) + peers := i.networking.PeerInfo(nil) + for _, peer := range peers { + weight := json.Uint64(i.validators.GetWeight(constants.PrimaryNetworkID, peer.ID)) + if weight == 0 { + continue + } + + for acpNum := range peer.SupportedACPs { + acp := reply.getACP(acpNum) + acp.Supporters.Add(peer.ID) + acp.SupportWeight += weight + } + for acpNum := range peer.ObjectedACPs { + acp := reply.getACP(acpNum) + acp.Objectors.Add(peer.ID) + acp.ObjectWeight += weight + } + } + + totalWeight, err := i.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return err + } + for acpNum := range constants.CurrentACPs { + acp := reply.getACP(acpNum) + acp.AbstainWeight = json.Uint64(totalWeight) - acp.SupportWeight - acp.ObjectWeight + } + return nil +} + type GetTxFeeResponse struct { TxFee json.Uint64 `json:"txFee"` CreateAssetTxFee json.Uint64 `json:"createAssetTxFee"` @@ -353,6 +418,7 @@ func (i *Info) GetTxFee(_ *http.Request, _ *struct{}, reply *GetTxFeeResponse) e // GetVMsReply contains the response metadata for GetVMs type GetVMsReply struct { VMs map[ids.ID][]string `json:"vms"` + Fxs map[ids.ID]string `json:"fxs"` } // GetVMs lists the virtual machines installed on the node @@ -369,5 +435,10 @@ func (i *Info) GetVMs(_ *http.Request, _ *struct{}, reply *GetVMsReply) error { } reply.VMs, err = ids.GetRelevantAliases(i.VMManager, vmIDs) + reply.Fxs = map[ids.ID]string{ + secp256k1fx.ID: secp256k1fx.Name, + nftfx.ID: nftfx.Name, + propertyfx.ID: propertyfx.Name, + } return err } diff --git a/avalanchego/api/info/service_test.go b/avalanchego/api/info/service_test.go index 3492b641..95b7767a 100644 --- a/avalanchego/api/info/service_test.go +++ b/avalanchego/api/info/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -7,9 +7,8 @@ import ( "errors" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" @@ -21,32 +20,29 @@ var errTest = errors.New("non-nil error") type getVMsTest struct { info *Info ctrl *gomock.Controller - mockLog *logging.MockLogger mockVMManager *vms.MockManager } func initGetVMsTest(t *testing.T) *getVMsTest { ctrl := gomock.NewController(t) - - service := Info{} - mockLog := logging.NewMockLogger(ctrl) mockVMManager := vms.NewMockManager(ctrl) - - service.log = mockLog - service.VMManager = mockVMManager - return &getVMsTest{ - info: &service, + info: &Info{ + Parameters: Parameters{ + VMManager: mockVMManager, + }, + log: logging.NoLog{}, + }, ctrl: ctrl, - mockLog: mockLog, mockVMManager: mockVMManager, } } // Tests GetVMs in the happy-case func TestGetVMsSuccess(t *testing.T) { + require := require.New(t) + resources := initGetVMsTest(t) - defer resources.ctrl.Finish() id1 := ids.GenerateTestID() id2 := ids.GenerateTestID() @@ -61,49 +57,40 @@ func TestGetVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) reply := GetVMsReply{} - err := resources.info.GetVMs(nil, nil, &reply) - - require.Equal(t, expectedVMRegistry, reply.VMs) - require.Equal(t, err, nil) + require.NoError(resources.info.GetVMs(nil, nil, &reply)) + require.Equal(expectedVMRegistry, reply.VMs) } // Tests GetVMs if we fail to list our vms. func TestGetVMsVMsListFactoriesFails(t *testing.T) { resources := initGetVMsTest(t) - defer resources.ctrl.Finish() - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(nil, errTest) reply := GetVMsReply{} err := resources.info.GetVMs(nil, nil, &reply) - - require.Equal(t, errTest, err) + require.ErrorIs(t, err, errTest) } // Tests GetVMs if we can't get our vm aliases. func TestGetVMsGetAliasesFails(t *testing.T) { resources := initGetVMsTest(t) - defer resources.ctrl.Finish() id1 := ids.GenerateTestID() id2 := ids.GenerateTestID() vmIDs := []ids.ID{id1, id2} alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) reply := GetVMsReply{} err := resources.info.GetVMs(nil, nil, &reply) - - require.Equal(t, err, errTest) + require.ErrorIs(t, err, errTest) } diff --git a/avalanchego/api/ipcs/client.go b/avalanchego/api/ipcs/client.go index 95391f0f..121c1855 100644 --- a/avalanchego/api/ipcs/client.go +++ b/avalanchego/api/ipcs/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs diff --git a/avalanchego/api/ipcs/service.go b/avalanchego/api/ipcs/service.go index 65b09a18..2d1fe775 100644 --- a/avalanchego/api/ipcs/service.go +++ b/avalanchego/api/ipcs/service.go @@ -1,82 +1,78 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs import ( "net/http" + "sync" "github.com/gorilla/rpc/v2" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" ) -// IPCServer maintains the IPCs -type IPCServer struct { - httpServer server.Server - chainManager chains.Manager +type Service struct { log logging.Logger + chainManager chains.Manager + lock sync.RWMutex ipcs *ipcs.ChainIPCs } -// NewService returns a new IPCs API service -func NewService(log logging.Logger, chainManager chains.Manager, httpServer server.Server, ipcs *ipcs.ChainIPCs) (*common.HTTPHandler, error) { - ipcServer := &IPCServer{ - log: log, - chainManager: chainManager, - httpServer: httpServer, - - ipcs: ipcs, - } - - newServer := rpc.NewServer() +func NewService(log logging.Logger, chainManager chains.Manager, ipcs *ipcs.ChainIPCs) (http.Handler, error) { + server := rpc.NewServer() codec := json.NewCodec() - newServer.RegisterCodec(codec, "application/json") - newServer.RegisterCodec(codec, "application/json;charset=UTF-8") - - return &common.HTTPHandler{Handler: newServer}, newServer.RegisterService(ipcServer, "ipcs") + server.RegisterCodec(codec, "application/json") + server.RegisterCodec(codec, "application/json;charset=UTF-8") + return server, server.RegisterService( + &Service{ + log: log, + chainManager: chainManager, + ipcs: ipcs, + }, + "ipcs", + ) } -// PublishBlockchainArgs are the arguments for calling PublishBlockchain type PublishBlockchainArgs struct { BlockchainID string `json:"blockchainID"` } -// PublishBlockchainReply are the results from calling PublishBlockchain type PublishBlockchainReply struct { ConsensusURL string `json:"consensusURL"` DecisionsURL string `json:"decisionsURL"` } -// PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC -func (ipc *IPCServer) PublishBlockchain(_ *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { - ipc.log.Warn("deprecated API called", +// PublishBlockchain publishes the finalized accepted transactions from the +// blockchainID over the IPC +func (s *Service) PublishBlockchain(_ *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { + s.log.Warn("deprecated API called", zap.String("service", "ipcs"), zap.String("method", "publishBlockchain"), logging.UserString("blockchainID", args.BlockchainID), ) - chainID, err := ipc.chainManager.Lookup(args.BlockchainID) + chainID, err := s.chainManager.Lookup(args.BlockchainID) if err != nil { - ipc.log.Error("chain lookup failed", + s.log.Error("chain lookup failed", logging.UserString("blockchainID", args.BlockchainID), zap.Error(err), ) return err } - ipcs, err := ipc.ipcs.Publish(chainID) + s.lock.Lock() + defer s.lock.Unlock() + + ipcs, err := s.ipcs.Publish(chainID) if err != nil { - ipc.log.Error("couldn't publish chain", + s.log.Error("couldn't publish chain", logging.UserString("blockchainID", args.BlockchainID), zap.Error(err), ) @@ -89,31 +85,33 @@ func (ipc *IPCServer) PublishBlockchain(_ *http.Request, args *PublishBlockchain return nil } -// UnpublishBlockchainArgs are the arguments for calling UnpublishBlockchain type UnpublishBlockchainArgs struct { BlockchainID string `json:"blockchainID"` } // UnpublishBlockchain closes publishing of a blockchainID -func (ipc *IPCServer) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockchainArgs, _ *api.EmptyReply) error { - ipc.log.Warn("deprecated API called", +func (s *Service) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockchainArgs, _ *api.EmptyReply) error { + s.log.Warn("deprecated API called", zap.String("service", "ipcs"), zap.String("method", "unpublishBlockchain"), logging.UserString("blockchainID", args.BlockchainID), ) - chainID, err := ipc.chainManager.Lookup(args.BlockchainID) + chainID, err := s.chainManager.Lookup(args.BlockchainID) if err != nil { - ipc.log.Error("chain lookup failed", + s.log.Error("chain lookup failed", logging.UserString("blockchainID", args.BlockchainID), zap.Error(err), ) return err } - ok, err := ipc.ipcs.Unpublish(chainID) + s.lock.Lock() + defer s.lock.Unlock() + + ok, err := s.ipcs.Unpublish(chainID) if !ok { - ipc.log.Error("couldn't publish chain", + s.log.Error("couldn't publish chain", logging.UserString("blockchainID", args.BlockchainID), zap.Error(err), ) @@ -122,17 +120,20 @@ func (ipc *IPCServer) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockc return err } -// GetPublishedBlockchainsReply is the result from calling GetPublishedBlockchains type GetPublishedBlockchainsReply struct { Chains []ids.ID `json:"chains"` } // GetPublishedBlockchains returns blockchains being published -func (ipc *IPCServer) GetPublishedBlockchains(_ *http.Request, _ *struct{}, reply *GetPublishedBlockchainsReply) error { - ipc.log.Warn("deprecated API called", +func (s *Service) GetPublishedBlockchains(_ *http.Request, _ *struct{}, reply *GetPublishedBlockchainsReply) error { + s.log.Warn("deprecated API called", zap.String("service", "ipcs"), zap.String("method", "getPublishedBlockchains"), ) - reply.Chains = ipc.ipcs.GetPublishedBlockchains() + + s.lock.RLock() + defer s.lock.RUnlock() + + reply.Chains = s.ipcs.GetPublishedBlockchains() return nil } diff --git a/avalanchego/api/keystore/blockchain_keystore.go b/avalanchego/api/keystore/blockchain_keystore.go index 4c163b96..31a3bdc5 100644 --- a/avalanchego/api/keystore/blockchain_keystore.go +++ b/avalanchego/api/keystore/blockchain_keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/avalanchego/api/keystore/client.go b/avalanchego/api/keystore/client.go index 43442ace..9d12ea0d 100644 --- a/avalanchego/api/keystore/client.go +++ b/avalanchego/api/keystore/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/avalanchego/api/keystore/codec.go b/avalanchego/api/keystore/codec.go index df6c18ae..b925747c 100644 --- a/avalanchego/api/keystore/codec.go +++ b/avalanchego/api/keystore/codec.go @@ -1,27 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" ) const ( - maxPackerSize = 1 * units.GiB // max size, in bytes, of something being marshalled by Marshal() - maxSliceLength = 256 * 1024 + CodecVersion = 0 - codecVersion = 0 + maxPackerSize = 1 * units.GiB // max size, in bytes, of something being marshalled by Marshal() ) -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(maxSliceLength) - c = codec.NewManager(maxPackerSize) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(maxPackerSize) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/avalanchego/api/keystore/gkeystore/keystore_client.go b/avalanchego/api/keystore/gkeystore/keystore_client.go index 6bbfc6f9..87527a64 100644 --- a/avalanchego/api/keystore/gkeystore/keystore_client.go +++ b/avalanchego/api/keystore/gkeystore/keystore_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore diff --git a/avalanchego/api/keystore/gkeystore/keystore_server.go b/avalanchego/api/keystore/gkeystore/keystore_server.go index 9244939d..65e6e90e 100644 --- a/avalanchego/api/keystore/gkeystore/keystore_server.go +++ b/avalanchego/api/keystore/gkeystore/keystore_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore diff --git a/avalanchego/api/keystore/keystore.go b/avalanchego/api/keystore/keystore.go index fecd63b1..ed3c9d21 100644 --- a/avalanchego/api/keystore/keystore.go +++ b/avalanchego/api/keystore/keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/encdb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/json" @@ -28,8 +27,11 @@ const ( ) var ( - errEmptyUsername = errors.New("empty username") - errUserMaxLength = fmt.Errorf("username exceeds maximum length of %d chars", maxUserLen) + errEmptyUsername = errors.New("empty username") + errUserMaxLength = fmt.Errorf("username exceeds maximum length of %d chars", maxUserLen) + errUserAlreadyExists = errors.New("user already exists") + errIncorrectPassword = errors.New("incorrect password") + errNonexistentUser = errors.New("user doesn't exist") usersPrefix = []byte("users") bcsPrefix = []byte("bcs") @@ -100,22 +102,14 @@ type keystore struct { // Used to persist users and their data userDB database.Database bcDB database.Database - // BaseDB - // / \ - // UserDB BlockchainDB - // / | \ - // Usr Usr Usr - // / | \ - // BID BID BID } -func New(log logging.Logger, dbManager manager.Manager) Keystore { - currentDB := dbManager.Current() +func New(log logging.Logger, db database.Database) Keystore { return &keystore{ log: log, usernameToPassword: make(map[string]*password.Hash), - userDB: prefixdb.New(usersPrefix, currentDB.Database), - bcDB: prefixdb.New(bcsPrefix, currentDB.Database), + userDB: prefixdb.New(usersPrefix, db), + bcDB: prefixdb.New(bcsPrefix, db), } } @@ -158,7 +152,7 @@ func (ks *keystore) GetRawDatabase(bID ids.ID, username, pw string) (database.Da return nil, err } if passwordHash == nil || !passwordHash.Check(pw) { - return nil, fmt.Errorf("incorrect password for user %q", username) + return nil, fmt.Errorf("%w: user %q", errIncorrectPassword, username) } userDB := prefixdb.New([]byte(username), ks.bcDB) @@ -182,7 +176,7 @@ func (ks *keystore) CreateUser(username, pw string) error { return err } if passwordHash != nil { - return fmt.Errorf("user already exists: %s", username) + return fmt.Errorf("%w: %s", errUserAlreadyExists, username) } if err := password.IsValid(pw, password.OK); err != nil { @@ -194,7 +188,7 @@ func (ks *keystore) CreateUser(username, pw string) error { return err } - passwordBytes, err := c.Marshal(codecVersion, passwordHash) + passwordBytes, err := Codec.Marshal(CodecVersion, passwordHash) if err != nil { return err } @@ -224,9 +218,9 @@ func (ks *keystore) DeleteUser(username, pw string) error { case err != nil: return err case passwordHash == nil: - return fmt.Errorf("user doesn't exist: %s", username) + return fmt.Errorf("%w: %s", errNonexistentUser, username) case !passwordHash.Check(pw): - return fmt.Errorf("incorrect password for user %q", username) + return fmt.Errorf("%w: user %q", errIncorrectPassword, username) } userNameBytes := []byte(username) @@ -290,18 +284,18 @@ func (ks *keystore) ImportUser(username, pw string, userBytes []byte) error { return err } if passwordHash != nil { - return fmt.Errorf("user already exists: %s", username) + return fmt.Errorf("%w: %s", errUserAlreadyExists, username) } userData := user{} - if _, err := c.Unmarshal(userBytes, &userData); err != nil { + if _, err := Codec.Unmarshal(userBytes, &userData); err != nil { return err } if !userData.Hash.Check(pw) { - return fmt.Errorf("incorrect password for user %q", username) + return fmt.Errorf("%w: user %q", errIncorrectPassword, username) } - usrBytes, err := c.Marshal(codecVersion, &userData.Hash) + usrBytes, err := Codec.Marshal(CodecVersion, &userData.Hash) if err != nil { return err } @@ -342,7 +336,7 @@ func (ks *keystore) ExportUser(username, pw string) ([]byte, error) { return nil, err } if passwordHash == nil || !passwordHash.Check(pw) { - return nil, fmt.Errorf("incorrect password for user %q", username) + return nil, fmt.Errorf("%w: user %q", errIncorrectPassword, username) } userDB := prefixdb.New([]byte(username), ks.bcDB) @@ -361,7 +355,7 @@ func (ks *keystore) ExportUser(username, pw string) ([]byte, error) { } // Return the byte representation of the user - return c.Marshal(codecVersion, &userData) + return Codec.Marshal(CodecVersion, &userData) } func (ks *keystore) getPassword(username string) (*password.Hash, error) { @@ -383,6 +377,6 @@ func (ks *keystore) getPassword(username string) (*password.Hash, error) { } passwordHash = &password.Hash{} - _, err = c.Unmarshal(userBytes, passwordHash) + _, err = Codec.Unmarshal(userBytes, passwordHash) return passwordHash, err } diff --git a/avalanchego/api/keystore/service.go b/avalanchego/api/keystore/service.go index c0e823c2..aa56433e 100644 --- a/avalanchego/api/keystore/service.go +++ b/avalanchego/api/keystore/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -10,11 +10,8 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" ) type service struct { @@ -115,17 +112,3 @@ func (s *service) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Expor reply.Encoding = args.Encoding return nil } - -// CreateTestKeystore returns a new keystore that can be utilized for testing -func CreateTestKeystore() (Keystore, error) { - dbManager, err := manager.NewManagerFromDBs([]*manager.VersionedDatabase{ - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }) - if err != nil { - return nil, err - } - return New(logging.NoLog{}, dbManager), nil -} diff --git a/avalanchego/api/keystore/service_test.go b/avalanchego/api/keystore/service_test.go index 84c8980e..c011c92e 100644 --- a/avalanchego/api/keystore/service_test.go +++ b/avalanchego/api/keystore/service_test.go @@ -1,18 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore import ( - "bytes" - "fmt" + "encoding/hex" "math/rand" - "reflect" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/password" ) // strongPassword defines a password used for the following tests that @@ -20,49 +23,34 @@ import ( var strongPassword = "N_+=_jJ;^(<;{4,:*m6CET}'&N;83FYK.wtNpwp-Jt" // #nosec G101 func TestServiceListNoUsers(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} reply := ListUsersReply{} - if err := s.ListUsers(nil, nil, &reply); err != nil { - t.Fatal(err) - } - if len(reply.Users) != 0 { - t.Fatalf("No users should have been created yet") - } + require.NoError(s.ListUsers(nil, nil, &reply)) + require.Empty(reply.Users) } func TestServiceCreateUser(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - if err != nil { - t.Fatal(err) - } + }, &api.EmptyReply{})) } { reply := ListUsersReply{} - if err := s.ListUsers(nil, nil, &reply); err != nil { - t.Fatal(err) - } - if len(reply.Users) != 1 { - t.Fatalf("One user should have been created") - } - if user := reply.Users[0]; user != "bob" { - t.Fatalf("'bob' should have been a user that was created") - } + require.NoError(s.ListUsers(nil, nil, &reply)) + require.Len(reply.Users, 1) + require.Equal("bob", reply.Users[0]) } } @@ -70,16 +58,15 @@ func TestServiceCreateUser(t *testing.T) { func genStr(n int) string { b := make([]byte, n) rand.Read(b) // #nosec G404 - return fmt.Sprintf("%x", b)[:n] + return hex.EncodeToString(b)[:n] } // TestServiceCreateUserArgsCheck generates excessively long usernames or // passwords to assure the sanity checks on string length are not exceeded func TestServiceCreateUserArgsCheck(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -88,10 +75,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) { Username: genStr(maxUserLen + 1), Password: strongPassword, }, &reply) - - if err != errUserMaxLength { - t.Fatal("User was created when it should have been rejected due to too long a Username, err =", err) - } + require.ErrorIs(err, errUserMaxLength) } { @@ -100,31 +84,22 @@ func TestServiceCreateUserArgsCheck(t *testing.T) { Username: "shortuser", Password: genStr(maxUserLen + 1), }, &reply) - - if err == nil { - t.Fatal("User was created when it should have been rejected due to too long a Password, err =", err) - } + require.ErrorIs(err, password.ErrPassMaxLength) } { reply := ListUsersReply{} - if err := s.ListUsers(nil, nil, &reply); err != nil { - t.Fatal(err) - } - - if len(reply.Users) > 0 { - t.Fatalf("A user exists when there should be none") - } + require.NoError(s.ListUsers(nil, nil, &reply)) + require.Empty(reply.Users) } } // TestServiceCreateUserWeakPassword tests creating a new user with a weak // password to ensure the password strength check is working func TestServiceCreateUserWeakPassword(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { @@ -133,28 +108,21 @@ func TestServiceCreateUserWeakPassword(t *testing.T) { Username: "bob", Password: "weak", }, &reply) - - if err == nil { - t.Error("Expected error when testing weak password") - } + require.ErrorIs(err, password.ErrWeakPassword) } } func TestServiceCreateDuplicate(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - if err != nil { - t.Fatal(err) - } + }, &api.EmptyReply{})) } { @@ -162,94 +130,70 @@ func TestServiceCreateDuplicate(t *testing.T) { Username: "bob", Password: strongPassword, }, &api.EmptyReply{}) - if err == nil { - t.Fatalf("Should have errored due to the username already existing") - } + require.ErrorIs(err, errUserAlreadyExists) } } func TestServiceCreateUserNoName(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} reply := api.EmptyReply{} - if err := s.CreateUser(nil, &api.UserPass{ + err := s.CreateUser(nil, &api.UserPass{ Password: strongPassword, - }, &reply); err == nil { - t.Fatalf("Shouldn't have allowed empty username") - } + }, &reply) + require.ErrorIs(err, errEmptyUsername) } func TestServiceUseBlockchainDB(t *testing.T) { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - if err != nil { - t.Fatal(err) - } + }, &api.EmptyReply{})) } { db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) - if err != nil { - t.Fatal(err) - } - if err := db.Put([]byte("hello"), []byte("world")); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(db.Put([]byte("hello"), []byte("world"))) } { db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) - if err != nil { - t.Fatal(err) - } - if val, err := db.Get([]byte("hello")); err != nil { - t.Fatal(err) - } else if !bytes.Equal(val, []byte("world")) { - t.Fatalf("Should have read '%s' from the db", "world") - } + require.NoError(err) + val, err := db.Get([]byte("hello")) + require.NoError(err) + require.Equal([]byte("world"), val) } } func TestServiceExportImport(t *testing.T) { + require := require.New(t) + encodings := []formatting.Encoding{formatting.Hex} for _, encoding := range encodings { - ks, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + ks := New(logging.NoLog{}, memdb.New()) s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - if err != nil { - t.Fatal(err) - } + }, &api.EmptyReply{})) } { db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) - if err != nil { - t.Fatal(err) - } - if err := db.Put([]byte("hello"), []byte("world")); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(db.Put([]byte("hello"), []byte("world"))) } exportArgs := ExportUserArgs{ @@ -260,14 +204,9 @@ func TestServiceExportImport(t *testing.T) { Encoding: encoding, } exportReply := ExportUserReply{} - if err := s.ExportUser(nil, &exportArgs, &exportReply); err != nil { - t.Fatal(err) - } + require.NoError(s.ExportUser(nil, &exportArgs, &exportReply)) - newKS, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + newKS := New(logging.NoLog{}, memdb.New()) newS := service{ks: newKS.(*keystore)} { @@ -278,9 +217,7 @@ func TestServiceExportImport(t *testing.T) { }, User: exportReply.User, }, &api.EmptyReply{}) - if err == nil { - t.Fatal("Should have errored due to incorrect password") - } + require.ErrorIs(err, errIncorrectPassword) } { @@ -291,35 +228,26 @@ func TestServiceExportImport(t *testing.T) { }, User: exportReply.User, }, &api.EmptyReply{}) - if err == nil { - t.Fatal("Should have errored due to empty username") - } + require.ErrorIs(err, errEmptyUsername) } { - err := newS.ImportUser(nil, &ImportUserArgs{ + require.NoError(newS.ImportUser(nil, &ImportUserArgs{ UserPass: api.UserPass{ Username: "bob", Password: strongPassword, }, User: exportReply.User, Encoding: encoding, - }, &api.EmptyReply{}) - if err != nil { - t.Fatal(err) - } + }, &api.EmptyReply{})) } { db, err := newKS.GetDatabase(ids.Empty, "bob", strongPassword) - if err != nil { - t.Fatal(err) - } - if val, err := db.Get([]byte("hello")); err != nil { - t.Fatal(err) - } else if !bytes.Equal(val, []byte("world")) { - t.Fatalf("Should have read '%s' from the db", "world") - } + require.NoError(err) + val, err := db.Get([]byte("hello")) + require.NoError(err) + require.Equal([]byte("world"), val) } } } @@ -328,88 +256,85 @@ func TestServiceDeleteUser(t *testing.T) { testUser := "testUser" password := "passwTest@fake01ord" tests := []struct { - desc string - setup func(ks *keystore) error - request *api.UserPass - want *api.EmptyReply - wantError bool - }{{ - desc: "empty user name case", - request: &api.UserPass{}, - wantError: true, - }, { - desc: "user not exists case", - request: &api.UserPass{Username: "dummy"}, - wantError: true, - }, { - desc: "user exists and invalid password case", - request: &api.UserPass{Username: testUser, Password: "password"}, - wantError: true, - }, { - desc: "user exists and valid password case", - setup: func(ks *keystore) error { - s := service{ks: ks} - return s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}) + desc string + setup func(ks *keystore) error + request *api.UserPass + want *api.EmptyReply + expectedErr error + }{ + { + desc: "empty user name case", + request: &api.UserPass{}, + expectedErr: errEmptyUsername, }, - request: &api.UserPass{Username: testUser, Password: password}, - want: &api.EmptyReply{}, - }, { - desc: "delete a user, imported from import api case", - setup: func(ks *keystore) error { - s := service{ks: ks} + { + desc: "user not exists case", + request: &api.UserPass{Username: "dummy"}, + expectedErr: errNonexistentUser, + }, + { + desc: "user exists and invalid password case", + setup: func(ks *keystore) error { + s := service{ks: ks} + return s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}) + }, + request: &api.UserPass{Username: testUser, Password: "password"}, + expectedErr: errIncorrectPassword, + }, + { + desc: "user exists and valid password case", + setup: func(ks *keystore) error { + s := service{ks: ks} + return s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}) + }, + request: &api.UserPass{Username: testUser, Password: password}, + want: &api.EmptyReply{}, + }, + { + desc: "delete a user, imported from import api case", + setup: func(ks *keystore) error { + s := service{ks: ks} - reply := api.EmptyReply{} - if err := s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &reply); err != nil { - return err - } + reply := api.EmptyReply{} + if err := s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &reply); err != nil { + return err + } - // created data in bob db - db, err := ks.GetDatabase(ids.Empty, testUser, password) - if err != nil { - return err - } + // created data in bob db + db, err := ks.GetDatabase(ids.Empty, testUser, password) + if err != nil { + return err + } - return db.Put([]byte("hello"), []byte("world")) + return db.Put([]byte("hello"), []byte("world")) + }, + request: &api.UserPass{Username: testUser, Password: password}, + want: &api.EmptyReply{}, }, - request: &api.UserPass{Username: testUser, Password: password}, - want: &api.EmptyReply{}, - }} + } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - ksIntf, err := CreateTestKeystore() - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + ksIntf := New(logging.NoLog{}, memdb.New()) ks := ksIntf.(*keystore) s := service{ks: ks} if tt.setup != nil { - if err := tt.setup(ks); err != nil { - t.Fatalf("failed to create user setup in keystore: %v", err) - } + require.NoError(tt.setup(ks)) } got := &api.EmptyReply{} - err = s.DeleteUser(nil, tt.request, got) - if (err != nil) != tt.wantError { - t.Fatalf("DeleteUser() failed: error %v, wantError %v", err, tt.wantError) - } - - if !tt.wantError && !reflect.DeepEqual(tt.want, got) { - t.Fatalf("DeleteUser() failed: got %v, want %v", got, tt.want) + err := s.DeleteUser(nil, tt.request, got) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return } + require.Equal(tt.want, got) + require.NotContains(ks.usernameToPassword, testUser) // delete is successful - if err == nil { // delete is successful - if _, ok := ks.usernameToPassword[testUser]; ok { - t.Fatalf("DeleteUser() failed: expected the user %s should be delete from users map", testUser) - } - - // deleted user details should be available to create user again. - err := s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}) - if err != nil { - t.Fatalf("failed to create user: %v", err) - } - } + // deleted user details should be available to create user again. + require.NoError(s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{})) }) } } diff --git a/avalanchego/api/metrics/gatherer_test.go b/avalanchego/api/metrics/gatherer_test.go index 2059c1ab..334c361e 100644 --- a/avalanchego/api/metrics/gatherer_test.go +++ b/avalanchego/api/metrics/gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/avalanchego/api/metrics/multi_gatherer.go b/avalanchego/api/metrics/multi_gatherer.go index eb357668..4bd0900a 100644 --- a/avalanchego/api/metrics/multi_gatherer.go +++ b/avalanchego/api/metrics/multi_gatherer.go @@ -1,24 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics import ( + "cmp" "errors" "fmt" + "slices" "sync" "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" + "github.com/ava-labs/avalanchego/utils/metric" - "golang.org/x/exp/slices" + dto "github.com/prometheus/client_model/go" ) var ( - errDuplicatedPrefix = errors.New("duplicated prefix") - _ MultiGatherer = (*multiGatherer)(nil) + + errReregisterGatherer = errors.New("attempt to register existing gatherer") ) // MultiGatherer extends the Gatherer interface by allowing additional gatherers @@ -48,23 +50,19 @@ func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) { var results []*dto.MetricFamily for namespace, gatherer := range g.gatherers { - metrics, err := gatherer.Gather() + gatheredMetrics, err := gatherer.Gather() if err != nil { return nil, err } - for _, metric := range metrics { + for _, gatheredMetric := range gatheredMetrics { var name string - if metric.Name != nil { - if len(namespace) > 0 { - name = fmt.Sprintf("%s_%s", namespace, *metric.Name) - } else { - name = *metric.Name - } + if gatheredMetric.Name != nil { + name = metric.AppendNamespace(namespace, *gatheredMetric.Name) } else { name = namespace } - metric.Name = &name - results = append(results, metric) + gatheredMetric.Name = &name + results = append(results, gatheredMetric) } } // Because we overwrite every metric's name, we are guaranteed that there @@ -77,8 +75,13 @@ func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) g.lock.Lock() defer g.lock.Unlock() - if _, exists := g.gatherers[namespace]; exists { - return errDuplicatedPrefix + if existingGatherer, exists := g.gatherers[namespace]; exists { + return fmt.Errorf("%w for namespace %q; existing: %#v; new: %#v", + errReregisterGatherer, + namespace, + existingGatherer, + gatherer, + ) } g.gatherers[namespace] = gatherer @@ -86,7 +89,7 @@ func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) } func sortMetrics(m []*dto.MetricFamily) { - slices.SortFunc(m, func(i, j *dto.MetricFamily) bool { - return *i.Name < *j.Name + slices.SortFunc(m, func(i, j *dto.MetricFamily) int { + return cmp.Compare(*i.Name, *j.Name) }) } diff --git a/avalanchego/api/metrics/multi_gatherer_test.go b/avalanchego/api/metrics/multi_gatherer_test.go index aead517d..033e3e88 100644 --- a/avalanchego/api/metrics/multi_gatherer_test.go +++ b/avalanchego/api/metrics/multi_gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -27,14 +27,12 @@ func TestMultiGathererDuplicatedPrefix(t *testing.T) { g := NewMultiGatherer() og := NewOptionalGatherer() - err := g.Register("", og) - require.NoError(err) + require.NoError(g.Register("", og)) - err = g.Register("", og) - require.Equal(errDuplicatedPrefix, err) + err := g.Register("", og) + require.ErrorIs(err, errReregisterGatherer) - err = g.Register("lol", og) - require.NoError(err) + require.NoError(g.Register("lol", og)) } func TestMultiGathererAddedError(t *testing.T) { @@ -46,8 +44,7 @@ func TestMultiGathererAddedError(t *testing.T) { err: errTest, } - err := g.Register("", tg) - require.NoError(err) + require.NoError(g.Register("", tg)) mfs, err := g.Gather() require.ErrorIs(err, errTest) @@ -65,8 +62,7 @@ func TestMultiGathererNoAddedPrefix(t *testing.T) { }}, } - err := g.Register("", tg) - require.NoError(err) + require.NoError(g.Register("", tg)) mfs, err := g.Gather() require.NoError(err) @@ -85,8 +81,7 @@ func TestMultiGathererAddedPrefix(t *testing.T) { }}, } - err := g.Register(hello, tg) - require.NoError(err) + require.NoError(g.Register(hello, tg)) mfs, err := g.Gather() require.NoError(err) @@ -103,8 +98,7 @@ func TestMultiGathererJustPrefix(t *testing.T) { mfs: []*dto.MetricFamily{{}}, } - err := g.Register(hello, tg) - require.NoError(err) + require.NoError(g.Register(hello, tg)) mfs, err := g.Gather() require.NoError(err) @@ -130,8 +124,7 @@ func TestMultiGathererSorted(t *testing.T) { }, } - err := g.Register("", tg) - require.NoError(err) + require.NoError(g.Register("", tg)) mfs, err := g.Gather() require.NoError(err) diff --git a/avalanchego/api/metrics/optional_gatherer.go b/avalanchego/api/metrics/optional_gatherer.go index 4d917dfb..686856ef 100644 --- a/avalanchego/api/metrics/optional_gatherer.go +++ b/avalanchego/api/metrics/optional_gatherer.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics import ( - "errors" + "fmt" "sync" "github.com/prometheus/client_golang/prometheus" @@ -12,11 +12,7 @@ import ( dto "github.com/prometheus/client_model/go" ) -var ( - errDuplicatedRegister = errors.New("duplicated register") - - _ OptionalGatherer = (*optionalGatherer)(nil) -) +var _ OptionalGatherer = (*optionalGatherer)(nil) // OptionalGatherer extends the Gatherer interface by allowing the optional // registration of a single gatherer. If no gatherer is registered, Gather will @@ -54,7 +50,11 @@ func (g *optionalGatherer) Register(gatherer prometheus.Gatherer) error { defer g.lock.Unlock() if g.gatherer != nil { - return errDuplicatedRegister + return fmt.Errorf("%w; existing: %#v; new: %#v", + errReregisterGatherer, + g.gatherer, + gatherer, + ) } g.gatherer = gatherer return nil diff --git a/avalanchego/api/metrics/optional_gatherer_test.go b/avalanchego/api/metrics/optional_gatherer_test.go index 6b528886..20175070 100644 --- a/avalanchego/api/metrics/optional_gatherer_test.go +++ b/avalanchego/api/metrics/optional_gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -30,11 +30,9 @@ func TestOptionalGathererDuplicated(t *testing.T) { g := NewOptionalGatherer() og := NewOptionalGatherer() + require.NoError(g.Register(og)) err := g.Register(og) - require.NoError(err) - - err = g.Register(og) - require.Equal(errDuplicatedRegister, err) + require.ErrorIs(err, errReregisterGatherer) } func TestOptionalGathererAddedError(t *testing.T) { @@ -46,8 +44,7 @@ func TestOptionalGathererAddedError(t *testing.T) { err: errTest, } - err := g.Register(tg) - require.NoError(err) + require.NoError(g.Register(tg)) mfs, err := g.Gather() require.ErrorIs(err, errTest) @@ -65,8 +62,7 @@ func TestMultiGathererAdded(t *testing.T) { }}, } - err := g.Register(tg) - require.NoError(err) + require.NoError(g.Register(tg)) mfs, err := g.Gather() require.NoError(err) diff --git a/avalanchego/api/server/allowed_hosts.go b/avalanchego/api/server/allowed_hosts.go new file mode 100644 index 00000000..7d2812b2 --- /dev/null +++ b/avalanchego/api/server/allowed_hosts.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package server + +import ( + "net" + "net/http" + "strings" + + "github.com/ava-labs/avalanchego/utils/set" +) + +const wildcard = "*" + +var _ http.Handler = (*allowedHostsHandler)(nil) + +func filterInvalidHosts( + handler http.Handler, + allowed []string, +) http.Handler { + s := set.Set[string]{} + + for _, host := range allowed { + if host == wildcard { + // wildcards match all hostnames, so just return the base handler + return handler + } + s.Add(strings.ToLower(host)) + } + + return &allowedHostsHandler{ + handler: handler, + hosts: s, + } +} + +// allowedHostsHandler is an implementation of http.Handler that validates the +// http host header of incoming requests. This can prevent DNS rebinding attacks +// which do not utilize CORS-headers. Http request host headers are validated +// against a whitelist to determine whether the request should be dropped or +// not. +type allowedHostsHandler struct { + handler http.Handler + hosts set.Set[string] +} + +func (a *allowedHostsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // if the host header is missing we can serve this request because dns + // rebinding attacks rely on this header + if r.Host == "" { + a.handler.ServeHTTP(w, r) + return + } + + host, _, err := net.SplitHostPort(r.Host) + if err != nil { + // either invalid (too many colons) or no port specified + host = r.Host + } + + if ipAddr := net.ParseIP(host); ipAddr != nil { + // accept requests from ips + a.handler.ServeHTTP(w, r) + return + } + + // a specific hostname - we need to check the whitelist to see if we should + // accept this r + if a.hosts.Contains(strings.ToLower(host)) { + a.handler.ServeHTTP(w, r) + return + } + + http.Error(w, "invalid host specified", http.StatusForbidden) +} diff --git a/avalanchego/api/server/allowed_hosts_test.go b/avalanchego/api/server/allowed_hosts_test.go new file mode 100644 index 00000000..47b1a53d --- /dev/null +++ b/avalanchego/api/server/allowed_hosts_test.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package server + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAllowedHostsHandler_ServeHTTP(t *testing.T) { + tests := []struct { + name string + allowed []string + host string + serve bool + }{ + { + name: "no host header", + allowed: []string{"www.foobar.com"}, + host: "", + serve: true, + }, + { + name: "ip", + allowed: []string{"www.foobar.com"}, + host: "192.168.1.1", + serve: true, + }, + { + name: "hostname not allowed", + allowed: []string{"www.foobar.com"}, + host: "www.evil.com", + }, + { + name: "hostname allowed", + allowed: []string{"www.foobar.com"}, + host: "www.foobar.com", + serve: true, + }, + { + name: "wildcard", + allowed: []string{"*"}, + host: "www.foobar.com", + serve: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + baseHandler := &testHandler{} + + httpAllowedHostsHandler := filterInvalidHosts( + baseHandler, + test.allowed, + ) + + w := &httptest.ResponseRecorder{} + r := httptest.NewRequest("", "/", nil) + r.Host = test.host + + httpAllowedHostsHandler.ServeHTTP(w, r) + + if test.serve { + require.True(baseHandler.called) + return + } + + require.Equal(http.StatusForbidden, w.Code) + }) + } +} diff --git a/avalanchego/api/server/metrics.go b/avalanchego/api/server/metrics.go index 6556c3a0..e3b2d76c 100644 --- a/avalanchego/api/server/metrics.go +++ b/avalanchego/api/server/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -46,13 +46,12 @@ func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e ), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numProcessing), registerer.Register(m.numCalls), registerer.Register(m.totalDuration), ) - return m, errs.Err + return m, err } func (m *metrics) wrapHandler(chainName string, handler http.Handler) http.Handler { diff --git a/avalanchego/api/server/middleware_handler.go b/avalanchego/api/server/middleware_handler.go deleted file mode 100644 index 1e5b7192..00000000 --- a/avalanchego/api/server/middleware_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package server - -import ( - "net/http" -) - -type middlewareHandler struct { - before, after func() - handler http.Handler -} - -func (mh middlewareHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) { - if mh.before != nil { - mh.before() - } - if mh.after != nil { - defer mh.after() - } - mh.handler.ServeHTTP(writer, request) -} diff --git a/avalanchego/api/server/mock_server.go b/avalanchego/api/server/mock_server.go index 315e3d88..769df9ba 100644 --- a/avalanchego/api/server/mock_server.go +++ b/avalanchego/api/server/mock_server.go @@ -1,19 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/api/server (interfaces: Server) +// +// Generated by this command: +// +// mockgen -package=server -destination=api/server/mock_server.go github.com/ava-labs/avalanchego/api/server Server +// // Package server is a generated GoMock package. package server import ( + http "net/http" reflect "reflect" - sync "sync" snow "github.com/ava-labs/avalanchego/snow" common "github.com/ava-labs/avalanchego/snow/engine/common" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockServer is a mock of Server interface. @@ -42,7 +44,7 @@ func (m *MockServer) EXPECT() *MockServerMockRecorder { // AddAliases mocks base method. func (m *MockServer) AddAliases(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -52,16 +54,16 @@ func (m *MockServer) AddAliases(arg0 string, arg1 ...string) error { } // AddAliases indicates an expected call of AddAliases. -func (mr *MockServerMockRecorder) AddAliases(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliases(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliases", reflect.TypeOf((*MockServer)(nil).AddAliases), varargs...) } // AddAliasesWithReadLock mocks base method. func (m *MockServer) AddAliasesWithReadLock(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -71,38 +73,38 @@ func (m *MockServer) AddAliasesWithReadLock(arg0 string, arg1 ...string) error { } // AddAliasesWithReadLock indicates an expected call of AddAliasesWithReadLock. -func (mr *MockServerMockRecorder) AddAliasesWithReadLock(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliasesWithReadLock(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliasesWithReadLock", reflect.TypeOf((*MockServer)(nil).AddAliasesWithReadLock), varargs...) } // AddRoute mocks base method. -func (m *MockServer) AddRoute(arg0 *common.HTTPHandler, arg1 *sync.RWMutex, arg2, arg3 string) error { +func (m *MockServer) AddRoute(arg0 http.Handler, arg1, arg2 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRoute", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "AddRoute", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // AddRoute indicates an expected call of AddRoute. -func (mr *MockServerMockRecorder) AddRoute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRoute(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockServer)(nil).AddRoute), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockServer)(nil).AddRoute), arg0, arg1, arg2) } // AddRouteWithReadLock mocks base method. -func (m *MockServer) AddRouteWithReadLock(arg0 *common.HTTPHandler, arg1 *sync.RWMutex, arg2, arg3 string) error { +func (m *MockServer) AddRouteWithReadLock(arg0 http.Handler, arg1, arg2 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRouteWithReadLock", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "AddRouteWithReadLock", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // AddRouteWithReadLock indicates an expected call of AddRouteWithReadLock. -func (mr *MockServerMockRecorder) AddRouteWithReadLock(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRouteWithReadLock(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockServer)(nil).AddRouteWithReadLock), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockServer)(nil).AddRouteWithReadLock), arg0, arg1, arg2) } // Dispatch mocks base method. @@ -119,20 +121,6 @@ func (mr *MockServerMockRecorder) Dispatch() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dispatch", reflect.TypeOf((*MockServer)(nil).Dispatch)) } -// DispatchTLS mocks base method. -func (m *MockServer) DispatchTLS(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DispatchTLS", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DispatchTLS indicates an expected call of DispatchTLS. -func (mr *MockServerMockRecorder) DispatchTLS(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DispatchTLS", reflect.TypeOf((*MockServer)(nil).DispatchTLS), arg0, arg1) -} - // RegisterChain mocks base method. func (m *MockServer) RegisterChain(arg0 string, arg1 *snow.ConsensusContext, arg2 common.VM) { m.ctrl.T.Helper() @@ -140,7 +128,7 @@ func (m *MockServer) RegisterChain(arg0 string, arg1 *snow.ConsensusContext, arg } // RegisterChain indicates an expected call of RegisterChain. -func (mr *MockServerMockRecorder) RegisterChain(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) RegisterChain(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockServer)(nil).RegisterChain), arg0, arg1, arg2) } diff --git a/avalanchego/api/server/router.go b/avalanchego/api/server/router.go index 9732b8e6..6adadf60 100644 --- a/avalanchego/api/server/router.go +++ b/avalanchego/api/server/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -17,6 +17,7 @@ import ( var ( errUnknownBaseURL = errors.New("unknown base url") errUnknownEndpoint = errors.New("unknown endpoint") + errAlreadyReserved = errors.New("route is either already aliased or already maps to a handle") ) type router struct { @@ -71,7 +72,7 @@ func (r *router) AddRouter(base, endpoint string, handler http.Handler) error { func (r *router) addRouter(base, endpoint string, handler http.Handler) error { if r.reservedRoutes.Contains(base) { - return fmt.Errorf("couldn't route to %s as that route is either aliased or already maps to a handler", base) + return fmt.Errorf("%w: %s", errAlreadyReserved, base) } return r.forceAddRouter(base, endpoint, handler) @@ -116,7 +117,7 @@ func (r *router) AddAlias(base string, aliases ...string) error { for _, alias := range aliases { if r.reservedRoutes.Contains(alias) { - return fmt.Errorf("couldn't alias to %s as that route is either already aliased or already maps to a handler", alias) + return fmt.Errorf("%w: %s", errAlreadyReserved, alias) } } diff --git a/avalanchego/api/server/router_test.go b/avalanchego/api/server/router_test.go index dc53ccd2..f6676a37 100644 --- a/avalanchego/api/server/router_test.go +++ b/avalanchego/api/server/router_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -6,6 +6,8 @@ package server import ( "net/http" "testing" + + "github.com/stretchr/testify/require" ) type testHandler struct{ called bool } @@ -15,63 +17,44 @@ func (t *testHandler) ServeHTTP(_ http.ResponseWriter, _ *http.Request) { } func TestAliasing(t *testing.T) { + require := require.New(t) + r := newRouter() - if err := r.AddAlias("1", "2", "3"); err != nil { - t.Fatal(err) - } - if err := r.AddAlias("1", "4"); err != nil { - t.Fatal(err) - } - if err := r.AddAlias("5", "1"); err != nil { - t.Fatal(err) - } - if err := r.AddAlias("3", "6"); err != nil { - t.Fatal(err) - } - if err := r.AddAlias("7", "4"); err == nil { - t.Fatalf("Already reserved %s", "4") - } + require.NoError(r.AddAlias("1", "2", "3")) + require.NoError(r.AddAlias("1", "4")) + require.NoError(r.AddAlias("5", "1")) + require.NoError(r.AddAlias("3", "6")) + err := r.AddAlias("7", "4") + require.ErrorIs(err, errAlreadyReserved) handler1 := &testHandler{} - if err := r.AddRouter("2", "", handler1); err == nil { - t.Fatalf("Already reserved %s", "2") - } - if err := r.AddRouter("5", "", handler1); err != nil { - t.Fatal(err) - } - if handler, exists := r.routes["5"][""]; !exists { - t.Fatalf("Should have added %s", "5") - } else if handler != handler1 { - t.Fatalf("Registered unknown handler") - } - - if err := r.AddAlias("5", "7"); err != nil { - t.Fatal(err) - } - - if handler, exists := r.routes["7"][""]; !exists { - t.Fatalf("Should have added %s", "7") - } else if handler != handler1 { - t.Fatalf("Registered unknown handler") - } - - if handler, err := r.GetHandler("7", ""); err != nil { - t.Fatalf("Should have added %s", "7") - } else if handler != handler1 { - t.Fatalf("Registered unknown handler") - } + err = r.AddRouter("2", "", handler1) + require.ErrorIs(err, errAlreadyReserved) + require.NoError(r.AddRouter("5", "", handler1)) + + handler, exists := r.routes["5"][""] + require.True(exists) + require.Equal(handler1, handler) + + require.NoError(r.AddAlias("5", "7")) + + handler, exists = r.routes["7"][""] + require.True(exists) + require.Equal(handler1, handler) + + handler, err = r.GetHandler("7", "") + require.NoError(err) + require.Equal(handler1, handler) } func TestBlock(t *testing.T) { + require := require.New(t) r := newRouter() - if err := r.AddAlias("1", "1"); err != nil { - t.Fatal(err) - } + require.NoError(r.AddAlias("1", "1")) handler1 := &testHandler{} - if err := r.AddRouter("1", "", handler1); err == nil { - t.Fatalf("Permanently locked %s", "1") - } + err := r.AddRouter("1", "", handler1) + require.ErrorIs(err, errAlreadyReserved) } diff --git a/avalanchego/api/server/server.go b/avalanchego/api/server/server.go index e6e24b34..a761468c 100644 --- a/avalanchego/api/server/server.go +++ b/avalanchego/api/server/server.go @@ -1,27 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server import ( "context" - "crypto/tls" - "errors" "fmt" "net" "net/http" "net/url" "path" - "sync" "time" "github.com/NYTimes/gziphandler" - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/cors" - "go.uber.org/zap" + "golang.org/x/net/http2" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" @@ -29,22 +24,22 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) -const baseURL = "/ext" +const ( + baseURL = "/ext" + maxConcurrentStreams = 64 +) var ( - errUnknownLockOption = errors.New("invalid lock options") - _ PathAdder = readPathAdder{} _ Server = (*server)(nil) ) type PathAdder interface { // AddRoute registers a route to a handler. - AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error + AddRoute(handler http.Handler, base, endpoint string) error // AddAliases registers aliases to the server AddAliases(endpoint string, aliases ...string) error @@ -53,7 +48,7 @@ type PathAdder interface { type PathAdderWithReadLock interface { // AddRouteWithReadLock registers a route to a handler assuming the http // read lock is currently held. - AddRouteWithReadLock(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error + AddRouteWithReadLock(handler http.Handler, base, endpoint string) error // AddAliasesWithReadLock registers aliases to the server assuming the http read // lock is currently held. @@ -66,8 +61,6 @@ type Server interface { PathAdderWithReadLock // Dispatch starts the API server Dispatch() error - // DispatchTLS starts the API server with the provided TLS certificate - DispatchTLS(certBytes, keyBytes []byte) error // RegisterChain registers the API endpoints associated with this chain. // That is, add pairs to server so that API calls can be // made to the VM. @@ -88,9 +81,6 @@ type server struct { log logging.Logger // generates new logs for chains to write to factory logging.Factory - // Listens for HTTP traffic on this address - listenHost string - listenPort uint16 shutdownTimeout time.Duration @@ -103,14 +93,16 @@ type server struct { router *router srv *http.Server + + // Listener used to serve traffic + listener net.Listener } // New returns an instance of a Server. func New( log logging.Logger, factory logging.Factory, - host string, - port uint16, + listener net.Listener, allowedOrigins []string, shutdownTimeout time.Duration, nodeID ids.NodeID, @@ -119,6 +111,7 @@ func New( namespace string, registerer prometheus.Registerer, httpConfig HTTPConfig, + allowedHosts []string, wrappers ...Wrapper, ) (Server, error) { m, err := newMetrics(namespace, registerer) @@ -127,10 +120,11 @@ func New( } router := newRouter() + allowedHostsHandler := filterInvalidHosts(router, allowedHosts) corsHandler := cors.New(cors.Options{ AllowedOrigins: allowedOrigins, AllowCredentials: true, - }).Handler(router) + }).Handler(allowedHostsHandler) gzipHandler := gziphandler.GzipHandler(corsHandler) var handler http.Handler = http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { @@ -144,6 +138,20 @@ func New( handler = wrapper.WrapHandler(handler) } + httpServer := &http.Server{ + Handler: handler, + ReadTimeout: httpConfig.ReadTimeout, + ReadHeaderTimeout: httpConfig.ReadHeaderTimeout, + WriteTimeout: httpConfig.WriteTimeout, + IdleTimeout: httpConfig.IdleTimeout, + } + err = http2.ConfigureServer(httpServer, &http2.Server{ + MaxConcurrentStreams: maxConcurrentStreams, + }) + if err != nil { + return nil, err + } + log.Info("API created", zap.Strings("allowedOrigins", allowedOrigins), ) @@ -151,84 +159,23 @@ func New( return &server{ log: log, factory: factory, - listenHost: host, - listenPort: port, shutdownTimeout: shutdownTimeout, tracingEnabled: tracingEnabled, tracer: tracer, metrics: m, router: router, - srv: &http.Server{ - Handler: handler, - ReadTimeout: httpConfig.ReadTimeout, - ReadHeaderTimeout: httpConfig.ReadHeaderTimeout, - WriteTimeout: httpConfig.WriteTimeout, - IdleTimeout: httpConfig.IdleTimeout, - }, + srv: httpServer, + listener: listener, }, nil } func (s *server) Dispatch() error { - listenAddress := fmt.Sprintf("%s:%d", s.listenHost, s.listenPort) - listener, err := net.Listen("tcp", listenAddress) - if err != nil { - return err - } - - ipPort, err := ips.ToIPPort(listener.Addr().String()) - if err != nil { - s.log.Info("HTTP API server listening", - zap.String("address", listenAddress), - ) - } else { - s.log.Info("HTTP API server listening", - zap.String("host", s.listenHost), - zap.Uint16("port", ipPort.Port), - ) - } - - return s.srv.Serve(listener) -} - -func (s *server) DispatchTLS(certBytes, keyBytes []byte) error { - listenAddress := fmt.Sprintf("%s:%d", s.listenHost, s.listenPort) - cert, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return err - } - config := &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{cert}, - } - - listener, err := tls.Listen("tcp", listenAddress, config) - if err != nil { - return err - } - - ipPort, err := ips.ToIPPort(listener.Addr().String()) - if err != nil { - s.log.Info("HTTPS API server listening", - zap.String("address", listenAddress), - ) - } else { - s.log.Info("HTTPS API server listening", - zap.String("host", s.listenHost), - zap.Uint16("port", ipPort.Port), - ) - } - - return s.srv.Serve(listener) + return s.srv.Serve(s.listener) } func (s *server) RegisterChain(chainName string, ctx *snow.ConsensusContext, vm common.VM) { - var ( - handlers map[string]*common.HTTPHandler - err error - ) - ctx.Lock.Lock() - handlers, err = vm.CreateHandlers(context.TODO()) + handlers, err := vm.CreateHandlers(context.TODO()) ctx.Lock.Unlock() if err != nil { s.log.Error("failed to create handlers", @@ -264,46 +211,32 @@ func (s *server) RegisterChain(chainName string, ctx *snow.ConsensusContext, vm } } -func (s *server) addChainRoute(chainName string, handler *common.HTTPHandler, ctx *snow.ConsensusContext, base, endpoint string) error { +func (s *server) addChainRoute(chainName string, handler http.Handler, ctx *snow.ConsensusContext, base, endpoint string) error { url := fmt.Sprintf("%s/%s", baseURL, base) s.log.Info("adding route", zap.String("url", url), zap.String("endpoint", endpoint), ) if s.tracingEnabled { - handler = &common.HTTPHandler{ - LockOptions: handler.LockOptions, - Handler: api.TraceHandler(handler.Handler, chainName, s.tracer), - } - } - // Apply middleware to grab/release chain's lock before/after calling API method - h, err := lockMiddleware( - handler.Handler, - handler.LockOptions, - s.tracingEnabled, - s.tracer, - &ctx.Lock, - ) - if err != nil { - return err + handler = api.TraceHandler(handler, chainName, s.tracer) } // Apply middleware to reject calls to the handler before the chain finishes bootstrapping - h = rejectMiddleware(h, ctx) - h = s.metrics.wrapHandler(chainName, h) - return s.router.AddRouter(url, endpoint, h) + handler = rejectMiddleware(handler, ctx) + handler = s.metrics.wrapHandler(chainName, handler) + return s.router.AddRouter(url, endpoint, handler) } -func (s *server) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { - return s.addRoute(handler, lock, base, endpoint) +func (s *server) AddRoute(handler http.Handler, base, endpoint string) error { + return s.addRoute(handler, base, endpoint) } -func (s *server) AddRouteWithReadLock(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { +func (s *server) AddRouteWithReadLock(handler http.Handler, base, endpoint string) error { s.router.lock.RUnlock() defer s.router.lock.RLock() - return s.addRoute(handler, lock, base, endpoint) + return s.addRoute(handler, base, endpoint) } -func (s *server) addRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { +func (s *server) addRoute(handler http.Handler, base, endpoint string) error { url := fmt.Sprintf("%s/%s", baseURL, base) s.log.Info("adding route", zap.String("url", url), @@ -311,65 +244,11 @@ func (s *server) addRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, ) if s.tracingEnabled { - handler = &common.HTTPHandler{ - LockOptions: handler.LockOptions, - Handler: api.TraceHandler(handler.Handler, url, s.tracer), - } - } - - // Apply middleware to grab/release chain's lock before/after calling API method - h, err := lockMiddleware( - handler.Handler, - handler.LockOptions, - s.tracingEnabled, - s.tracer, - lock, - ) - if err != nil { - return err - } - h = s.metrics.wrapHandler(base, h) - return s.router.AddRouter(url, endpoint, h) -} - -// Wraps a handler by grabbing and releasing a lock before calling the handler. -func lockMiddleware( - handler http.Handler, - lockOption common.LockOption, - tracingEnabled bool, - tracer trace.Tracer, - lock *sync.RWMutex, -) (http.Handler, error) { - var ( - name string - lockedHandler http.Handler - ) - switch lockOption { - case common.WriteLock: - name = "writeLock" - lockedHandler = middlewareHandler{ - before: lock.Lock, - after: lock.Unlock, - handler: handler, - } - case common.ReadLock: - name = "readLock" - lockedHandler = middlewareHandler{ - before: lock.RLock, - after: lock.RUnlock, - handler: handler, - } - case common.NoLock: - return handler, nil - default: - return nil, errUnknownLockOption - } - - if !tracingEnabled { - return lockedHandler, nil + handler = api.TraceHandler(handler, url, s.tracer) } - return api.TraceHandler(lockedHandler, name, tracer), nil + handler = s.metrics.wrapHandler(base, handler) + return s.router.AddRouter(url, endpoint, handler) } // Reject middleware wraps a handler. If the chain that the context describes is @@ -377,9 +256,7 @@ func lockMiddleware( func rejectMiddleware(handler http.Handler, ctx *snow.ConsensusContext) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // If chain isn't done bootstrapping, ignore API calls if ctx.State.Get().State != snow.NormalOp { - w.WriteHeader(http.StatusServiceUnavailable) - // Doesn't matter if there's an error while writing. They'll get the StatusServiceUnavailable code. - _, _ = w.Write([]byte("API call rejected because chain is not done bootstrapping")) + http.Error(w, "API call rejected because chain is not done bootstrapping", http.StatusServiceUnavailable) } else { handler.ServeHTTP(w, r) } @@ -425,8 +302,8 @@ func PathWriterFromWithReadLock(pather PathAdderWithReadLock) PathAdder { } } -func (a readPathAdder) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { - return a.pather.AddRouteWithReadLock(handler, lock, base, endpoint) +func (a readPathAdder) AddRoute(handler http.Handler, base, endpoint string) error { + return a.pather.AddRouteWithReadLock(handler, base, endpoint) } func (a readPathAdder) AddAliases(endpoint string, aliases ...string) error { diff --git a/avalanchego/api/server/server_test.go b/avalanchego/api/server/server_test.go new file mode 100644 index 00000000..584ad24a --- /dev/null +++ b/avalanchego/api/server/server_test.go @@ -0,0 +1,74 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package server + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" +) + +func TestRejectMiddleware(t *testing.T) { + type test struct { + name string + handlerFunc func(*require.Assertions) http.Handler + state snow.State + expectedStatusCode int + } + + tests := []test{ + { + name: "chain is state syncing", + handlerFunc: func(require *require.Assertions) http.Handler { + return http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + require.Fail("shouldn't have called handler") + }) + }, + state: snow.StateSyncing, + expectedStatusCode: http.StatusServiceUnavailable, + }, + { + name: "chain is bootstrapping", + handlerFunc: func(require *require.Assertions) http.Handler { + return http.HandlerFunc(func(http.ResponseWriter, *http.Request) { + require.Fail("shouldn't have called handler") + }) + }, + state: snow.Bootstrapping, + expectedStatusCode: http.StatusServiceUnavailable, + }, + { + name: "chain is done bootstrapping", + handlerFunc: func(*require.Assertions) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusTeapot) + }) + }, + state: snow.NormalOp, + expectedStatusCode: http.StatusTeapot, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + ctx.State.Set(snow.EngineState{ + State: tt.state, + }) + + middleware := rejectMiddleware(tt.handlerFunc(require), ctx) + w := httptest.NewRecorder() + middleware.ServeHTTP(w, nil) + require.Equal(tt.expectedStatusCode, w.Code) + }) + } +} diff --git a/avalanchego/api/server/wrapper.go b/avalanchego/api/server/wrapper.go index 2a2a2763..b6cca85c 100644 --- a/avalanchego/api/server/wrapper.go +++ b/avalanchego/api/server/wrapper.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server -import ( - "net/http" -) +import "net/http" type Wrapper interface { // WrapHandler wraps an http.Handler. diff --git a/avalanchego/api/traced_handler.go b/avalanchego/api/traced_handler.go index 149be820..54bdd85f 100644 --- a/avalanchego/api/traced_handler.go +++ b/avalanchego/api/traced_handler.go @@ -1,17 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api import ( - "fmt" "net/http" "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ http.Handler = (*tracedHandler)(nil) @@ -25,7 +24,7 @@ type tracedHandler struct { func TraceHandler(h http.Handler, name string, tracer trace.Tracer) http.Handler { return &tracedHandler{ h: h, - serveHTTPTag: fmt.Sprintf("%s.ServeHTTP", name), + serveHTTPTag: name + ".ServeHTTP", tracer: tracer, } } diff --git a/avalanchego/app/app.go b/avalanchego/app/app.go index d091e60d..f84a60c9 100644 --- a/avalanchego/app/app.go +++ b/avalanchego/app/app.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package app @@ -11,33 +11,22 @@ import ( "syscall" "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/ulimit" ) -const ( - Header = ` _____ .__ .__ +const Header = ` _____ .__ .__ / _ \___ _______ | | _____ ____ ____ | |__ ____ ,_ o / /_\ \ \/ /\__ \ | | \__ \ / \_/ ___\| | \_/ __ \ / //\, / | \ / / __ \| |__/ __ \| | \ \___| Y \ ___/ \>> | \____|__ /\_/ (____ /____(____ /___| /\___ >___| /\___ > \\ \/ \/ \/ \/ \/ \/ \/` -) -var ( - stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) - httpPortName = fmt.Sprintf("%s-http", constants.AppName) - - _ App = (*app)(nil) -) +var _ App = (*app)(nil) type App interface { // Start kicks off the application and returns immediately. @@ -54,11 +43,44 @@ type App interface { ExitCode() (int, error) } -func New(config node.Config) App { - return &app{ - config: config, - node: &node.Node{}, +func New(config node.Config) (App, error) { + // Set the data directory permissions to be read write. + if err := perms.ChmodR(config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) + } + if err := perms.ChmodR(config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) } + + logFactory := logging.NewFactory(config.LoggingConfig) + log, err := logFactory.Make("main") + if err != nil { + logFactory.Close() + return nil, fmt.Errorf("failed to initialize log: %w", err) + } + + // update fd limit + fdLimit := config.FdLimit + if err := ulimit.Set(fdLimit, log); err != nil { + log.Fatal("failed to set fd-limit", + zap.Error(err), + ) + logFactory.Close() + return nil, err + } + + n, err := node.New(&config, logFactory, log) + if err != nil { + log.Stop() + logFactory.Close() + return nil, fmt.Errorf("failed to initialize node: %w", err) + } + + return &app{ + node: n, + log: log, + logFactory: logFactory, + }, nil } func Run(app App) int { @@ -99,135 +121,16 @@ func Run(app App) int { // app is a wrapper around a node that runs in this process type app struct { - config node.Config - node *node.Node - exitWG sync.WaitGroup + node *node.Node + log logging.Logger + logFactory logging.Factory + exitWG sync.WaitGroup } // Start the business logic of the node (as opposed to config reading, etc). // Does not block until the node is done. Errors returned from this method // are not logged. func (a *app) Start() error { - // Set the data directory permissions to be read write. - if err := perms.ChmodR(a.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) - } - if err := perms.ChmodR(a.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) - } - - // we want to create the logger after the plugin has started the app - logFactory := logging.NewFactory(a.config.LoggingConfig) - log, err := logFactory.Make("main") - if err != nil { - logFactory.Close() - return err - } - - // update fd limit - fdLimit := a.config.FdLimit - if err := ulimit.Set(fdLimit, log); err != nil { - log.Fatal("failed to set fd-limit", - zap.Error(err), - ) - logFactory.Close() - return err - } - - // Track if sybil control is enforced - if !a.config.EnableStaking { - log.Warn("sybil control is not enforced", - zap.String("reason", "staking is disabled"), - ) - } - - // TODO move this to config - // SupportsNAT() for NoRouter is false. - // Which means we tried to perform a NAT activity but we were not successful. - if a.config.AttemptedNATTraversal && !a.config.Nat.SupportsNAT() { - log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + - "Please confirm the settings in your router") - } - - if ip := a.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { - log.Warn("P2P IP is private, you will not be publicly discoverable", - zap.Stringer("ip", ip), - ) - } - - // An empty host is treated as a wildcard to match all addresses, so it is - // considered public. - hostIsPublic := a.config.HTTPHost == "" - if !hostIsPublic { - ip, err := ips.Lookup(a.config.HTTPHost) - if err != nil { - log.Fatal("failed to lookup HTTP host", - zap.String("host", a.config.HTTPHost), - zap.Error(err), - ) - logFactory.Close() - return err - } - hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() - - log.Debug("finished HTTP host lookup", - zap.String("host", a.config.HTTPHost), - zap.Stringer("ip", ip), - zap.Bool("isPublic", hostIsPublic), - ) - } - - mapper := nat.NewPortMapper(log, a.config.Nat) - - // Open staking port we want for NAT traversal to have the external port - // (config.IP.Port) to connect to our internal listening port - // (config.InternalStakingPort) which should be the same in most cases. - if port := a.config.IPPort.IPPort().Port; port != 0 { - mapper.Map( - port, - port, - stakingPortName, - a.config.IPPort, - a.config.IPResolutionFreq, - ) - } - - // Don't open the HTTP port if the HTTP server is private - if hostIsPublic { - log.Warn("HTTP server is binding to a potentially public host. "+ - "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", - zap.String("host", a.config.HTTPHost), - ) - - // For NAT traversal we want to route from the external port - // (config.ExternalHTTPPort) to our internal port (config.HTTPPort). - if a.config.HTTPPort != 0 { - mapper.Map( - a.config.HTTPPort, - a.config.HTTPPort, - httpPortName, - nil, - a.config.IPResolutionFreq, - ) - } - } - - // Regularly update our public IP. - // Note that if the node config said to not dynamically resolve and - // update our public IP, [p.config.IPUdater] is a no-op implementation. - go a.config.IPUpdater.Dispatch(log) - - if err := a.node.Initialize(&a.config, log, logFactory); err != nil { - log.Fatal("error initializing node", - zap.Error(err), - ) - mapper.UnmapAllPorts() - a.config.IPUpdater.Stop() - log.Stop() - logFactory.Close() - return err - } - // [p.ExitCode] will block until [p.exitWG.Done] is called a.exitWG.Add(1) go func() { @@ -235,22 +138,19 @@ func (a *app) Start() error { if r := recover(); r != nil { fmt.Println("caught panic", r) } - log.Stop() - logFactory.Close() + a.log.Stop() + a.logFactory.Close() a.exitWG.Done() }() defer func() { - mapper.UnmapAllPorts() - a.config.IPUpdater.Stop() - // If [p.node.Dispatch()] panics, then we should log the panic and // then re-raise the panic. This is why the above defer is broken // into two parts. - log.StopOnPanic() + a.log.StopOnPanic() }() err := a.node.Dispatch() - log.Debug("dispatch returned", + a.log.Debug("dispatch returned", zap.Error(err), ) }() diff --git a/avalanchego/cache/cache.go b/avalanchego/cache/cache.go index ea6c4168..10ecad2c 100644 --- a/avalanchego/cache/cache.go +++ b/avalanchego/cache/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -18,6 +18,12 @@ type Cacher[K comparable, V any] interface { // Flush removes all entries from the cache Flush() + + // Returns the number of elements currently in the cache + Len() int + + // Returns fraction of cache currently filled (0 --> 1) + PortionFilled() float64 } // Evictable allows the object to be notified when it is evicted diff --git a/avalanchego/cache/empty_cache.go b/avalanchego/cache/empty_cache.go new file mode 100644 index 00000000..3a70ea91 --- /dev/null +++ b/avalanchego/cache/empty_cache.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import "github.com/ava-labs/avalanchego/utils" + +var _ Cacher[struct{}, struct{}] = (*Empty[struct{}, struct{}])(nil) + +type Empty[K any, V any] struct{} + +func (*Empty[K, V]) Put(K, V) {} + +func (*Empty[K, V]) Get(K) (V, bool) { + return utils.Zero[V](), false +} + +func (*Empty[K, _]) Evict(K) {} + +func (*Empty[_, _]) Flush() {} + +func (*Empty[_, _]) Len() int { + return 0 +} + +func (*Empty[_, _]) PortionFilled() float64 { + return 0 +} diff --git a/avalanchego/cache/lru_cache.go b/avalanchego/cache/lru_cache.go index 07c0d9b7..2a8a7ebe 100644 --- a/avalanchego/cache/lru_cache.go +++ b/avalanchego/cache/lru_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -18,7 +18,7 @@ var _ Cacher[struct{}, struct{}] = (*LRU[struct{}, struct{}])(nil) type LRU[K comparable, V any] struct { lock sync.Mutex elements linkedhashmap.LinkedHashmap[K, V] - // If set to < 0, will be set internally to 1. + // If set to <= 0, will be set internally to 1. Size int } @@ -50,6 +50,20 @@ func (c *LRU[_, _]) Flush() { c.flush() } +func (c *LRU[_, _]) Len() int { + c.lock.Lock() + defer c.lock.Unlock() + + return c.len() +} + +func (c *LRU[_, _]) PortionFilled() float64 { + c.lock.Lock() + defer c.lock.Unlock() + + return c.portionFilled() +} + func (c *LRU[K, V]) put(key K, value V) { c.resize() @@ -81,6 +95,17 @@ func (c *LRU[K, V]) flush() { c.elements = linkedhashmap.New[K, V]() } +func (c *LRU[_, _]) len() int { + if c.elements == nil { + return 0 + } + return c.elements.Len() +} + +func (c *LRU[_, _]) portionFilled() float64 { + return float64(c.len()) / float64(c.Size) +} + // Initializes [c.elements] if it's nil. // Sets [c.size] to 1 if it's <= 0. // Removes oldest elements to make number of elements diff --git a/avalanchego/cache/lru_cache_benchmark_test.go b/avalanchego/cache/lru_cache_benchmark_test.go index 73acf90b..3ddf03cb 100644 --- a/avalanchego/cache/lru_cache_benchmark_test.go +++ b/avalanchego/cache/lru_cache_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -7,6 +7,8 @@ import ( "crypto/rand" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -16,9 +18,8 @@ func BenchmarkLRUCachePutSmall(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < smallLen; i++ { var id ids.ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } + _, err := rand.Read(id[:]) + require.NoError(b, err) cache.Put(id, n) } b.StopTimer() @@ -33,9 +34,8 @@ func BenchmarkLRUCachePutMedium(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < mediumLen; i++ { var id ids.ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } + _, err := rand.Read(id[:]) + require.NoError(b, err) cache.Put(id, n) } b.StopTimer() @@ -50,9 +50,8 @@ func BenchmarkLRUCachePutLarge(b *testing.B) { for n := 0; n < b.N; n++ { for i := 0; i < largeLen; i++ { var id ids.ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } + _, err := rand.Read(id[:]) + require.NoError(b, err) cache.Put(id, n) } b.StopTimer() diff --git a/avalanchego/cache/lru_cache_test.go b/avalanchego/cache/lru_cache_test.go index aaf4eb34..e8f0b288 100644 --- a/avalanchego/cache/lru_cache_test.go +++ b/avalanchego/cache/lru_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -6,59 +6,60 @@ package cache import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) func TestLRU(t *testing.T) { - cache := &LRU[ids.ID, int]{Size: 1} + cache := &LRU[ids.ID, int64]{Size: 1} TestBasic(t, cache) } func TestLRUEviction(t *testing.T) { - cache := &LRU[ids.ID, int]{Size: 2} + cache := &LRU[ids.ID, int64]{Size: 2} TestEviction(t, cache) } func TestLRUResize(t *testing.T) { - cache := LRU[ids.ID, int]{Size: 2} + require := require.New(t) + cache := LRU[ids.ID, int64]{Size: 2} id1 := ids.ID{1} id2 := ids.ID{2} - cache.Put(id1, 1) - cache.Put(id2, 2) + expectedVal1 := int64(1) + expectedVal2 := int64(2) + cache.Put(id1, expectedVal1) + cache.Put(id2, expectedVal2) + + val, found := cache.Get(id1) + require.True(found) + require.Equal(expectedVal1, val) - if val, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 1 { - t.Fatalf("Retrieved wrong value") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedVal2, val) cache.Size = 1 // id1 evicted - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieve value when none exists") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } + _, found = cache.Get(id1) + require.False(found) + + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedVal2, val) cache.Size = 0 // We reset the size to 1 in resize - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieve value when none exists") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } + _, found = cache.Get(id1) + require.False(found) + + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedVal2, val) } diff --git a/avalanchego/cache/lru_sized_cache.go b/avalanchego/cache/lru_sized_cache.go new file mode 100644 index 00000000..5dc9b5fd --- /dev/null +++ b/avalanchego/cache/lru_sized_cache.go @@ -0,0 +1,126 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "sync" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" +) + +var _ Cacher[struct{}, any] = (*sizedLRU[struct{}, any])(nil) + +// sizedLRU is a key value store with bounded size. If the size is attempted to +// be exceeded, then elements are removed from the cache until the bound is +// honored, based on evicting the least recently used value. +type sizedLRU[K comparable, V any] struct { + lock sync.Mutex + elements linkedhashmap.LinkedHashmap[K, V] + maxSize int + currentSize int + size func(K, V) int +} + +func NewSizedLRU[K comparable, V any](maxSize int, size func(K, V) int) Cacher[K, V] { + return &sizedLRU[K, V]{ + elements: linkedhashmap.New[K, V](), + maxSize: maxSize, + size: size, + } +} + +func (c *sizedLRU[K, V]) Put(key K, value V) { + c.lock.Lock() + defer c.lock.Unlock() + + c.put(key, value) +} + +func (c *sizedLRU[K, V]) Get(key K) (V, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + return c.get(key) +} + +func (c *sizedLRU[K, V]) Evict(key K) { + c.lock.Lock() + defer c.lock.Unlock() + + c.evict(key) +} + +func (c *sizedLRU[K, V]) Flush() { + c.lock.Lock() + defer c.lock.Unlock() + + c.flush() +} + +func (c *sizedLRU[_, _]) Len() int { + c.lock.Lock() + defer c.lock.Unlock() + + return c.len() +} + +func (c *sizedLRU[_, _]) PortionFilled() float64 { + c.lock.Lock() + defer c.lock.Unlock() + + return c.portionFilled() +} + +func (c *sizedLRU[K, V]) put(key K, value V) { + newEntrySize := c.size(key, value) + if newEntrySize > c.maxSize { + c.flush() + return + } + + if oldValue, ok := c.elements.Get(key); ok { + c.currentSize -= c.size(key, oldValue) + } + + // Remove elements until the size of elements in the cache <= [c.maxSize]. + for c.currentSize > c.maxSize-newEntrySize { + oldestKey, oldestValue, _ := c.elements.Oldest() + c.elements.Delete(oldestKey) + c.currentSize -= c.size(oldestKey, oldestValue) + } + + c.elements.Put(key, value) + c.currentSize += newEntrySize +} + +func (c *sizedLRU[K, V]) get(key K) (V, bool) { + value, ok := c.elements.Get(key) + if !ok { + return utils.Zero[V](), false + } + + c.elements.Put(key, value) // Mark [k] as MRU. + return value, true +} + +func (c *sizedLRU[K, _]) evict(key K) { + if value, ok := c.elements.Get(key); ok { + c.elements.Delete(key) + c.currentSize -= c.size(key, value) + } +} + +func (c *sizedLRU[K, V]) flush() { + c.elements = linkedhashmap.New[K, V]() + c.currentSize = 0 +} + +func (c *sizedLRU[_, _]) len() int { + return c.elements.Len() +} + +func (c *sizedLRU[_, _]) portionFilled() float64 { + return float64(c.currentSize) / float64(c.maxSize) +} diff --git a/avalanchego/cache/lru_sized_cache_test.go b/avalanchego/cache/lru_sized_cache_test.go new file mode 100644 index 00000000..ad1c8b40 --- /dev/null +++ b/avalanchego/cache/lru_sized_cache_test.go @@ -0,0 +1,52 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestSizedLRU(t *testing.T) { + cache := NewSizedLRU[ids.ID, int64](TestIntSize, TestIntSizeFunc) + + TestBasic(t, cache) +} + +func TestSizedLRUEviction(t *testing.T) { + cache := NewSizedLRU[ids.ID, int64](2*TestIntSize, TestIntSizeFunc) + + TestEviction(t, cache) +} + +func TestSizedLRUWrongKeyEvictionRegression(t *testing.T) { + require := require.New(t) + + cache := NewSizedLRU[string, struct{}]( + 3, + func(key string, _ struct{}) int { + return len(key) + }, + ) + + cache.Put("a", struct{}{}) + cache.Put("b", struct{}{}) + cache.Put("c", struct{}{}) + cache.Put("dd", struct{}{}) + + _, ok := cache.Get("a") + require.False(ok) + + _, ok = cache.Get("b") + require.False(ok) + + _, ok = cache.Get("c") + require.True(ok) + + _, ok = cache.Get("dd") + require.True(ok) +} diff --git a/avalanchego/cache/metercacher/cache.go b/avalanchego/cache/metercacher/cache.go index 56719312..c2ff666f 100644 --- a/avalanchego/cache/metercacher/cache.go +++ b/avalanchego/cache/metercacher/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher @@ -33,6 +33,8 @@ func (c *Cache[K, V]) Put(key K, value V) { c.Cacher.Put(key, value) end := c.clock.Time() c.put.Observe(float64(end.Sub(start))) + c.len.Set(float64(c.Cacher.Len())) + c.portionFilled.Set(c.Cacher.PortionFilled()) } func (c *Cache[K, V]) Get(key K) (V, bool) { @@ -48,3 +50,15 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { return value, has } + +func (c *Cache[K, _]) Evict(key K) { + c.Cacher.Evict(key) + c.len.Set(float64(c.Cacher.Len())) + c.portionFilled.Set(c.Cacher.PortionFilled()) +} + +func (c *Cache[_, _]) Flush() { + c.Cacher.Flush() + c.len.Set(float64(c.Cacher.Len())) + c.portionFilled.Set(c.Cacher.PortionFilled()) +} diff --git a/avalanchego/cache/metercacher/cache_test.go b/avalanchego/cache/metercacher/cache_test.go index bb40fec0..81f4eb72 100644 --- a/avalanchego/cache/metercacher/cache_test.go +++ b/avalanchego/cache/metercacher/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher @@ -7,19 +7,39 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" ) func TestInterface(t *testing.T) { - for _, test := range cache.CacherTests { - cache := &cache.LRU[ids.ID, int]{Size: test.Size} - c, err := New[ids.ID, int]("", prometheus.NewRegistry(), cache) - if err != nil { - t.Fatal(err) - } + type scenario struct { + description string + setup func(size int) cache.Cacher[ids.ID, int64] + } - test.Func(t, c) + scenarios := []scenario{ + { + description: "cache LRU", + setup: func(size int) cache.Cacher[ids.ID, int64] { + return &cache.LRU[ids.ID, int64]{Size: size} + }, + }, + { + description: "sized cache LRU", + setup: func(size int) cache.Cacher[ids.ID, int64] { + return cache.NewSizedLRU[ids.ID, int64](size*cache.TestIntSize, cache.TestIntSizeFunc) + }, + }, + } + + for _, scenario := range scenarios { + for _, test := range cache.CacherTests { + baseCache := scenario.setup(test.Size) + c, err := New("", prometheus.NewRegistry(), baseCache) + require.NoError(t, err) + test.Func(t, c) + } } } diff --git a/avalanchego/cache/metercacher/metrics.go b/avalanchego/cache/metercacher/metrics.go index c9ae019f..39e0d806 100644 --- a/avalanchego/cache/metercacher/metrics.go +++ b/avalanchego/cache/metercacher/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher @@ -16,7 +16,7 @@ func newAveragerMetric(namespace, name string, reg prometheus.Registerer, errs * return metric.NewAveragerWithErrs( namespace, name, - fmt.Sprintf("time (in ns) of a %s", name), + "time (in ns) of a "+name, reg, errs, ) @@ -33,11 +33,12 @@ func newCounterMetric(namespace, name string, reg prometheus.Registerer, errs *w } type metrics struct { - get, - put metric.Averager - - hit, - miss prometheus.Counter + get metric.Averager + put metric.Averager + len prometheus.Gauge + portionFilled prometheus.Gauge + hit prometheus.Counter + miss prometheus.Counter } func (m *metrics) Initialize( @@ -47,6 +48,22 @@ func (m *metrics) Initialize( errs := wrappers.Errs{} m.get = newAveragerMetric(namespace, "get", reg, &errs) m.put = newAveragerMetric(namespace, "put", reg, &errs) + m.len = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "len", + Help: "number of entries", + }, + ) + errs.Add(reg.Register(m.len)) + m.portionFilled = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "portion_filled", + Help: "fraction of cache filled", + }, + ) + errs.Add(reg.Register(m.portionFilled)) m.hit = newCounterMetric(namespace, "hit", reg, &errs) m.miss = newCounterMetric(namespace, "miss", reg, &errs) return errs.Err diff --git a/avalanchego/cache/test_cacher.go b/avalanchego/cache/test_cacher.go index 0fa81668..2e85502e 100644 --- a/avalanchego/cache/test_cacher.go +++ b/avalanchego/cache/test_cacher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -6,131 +6,141 @@ package cache import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) +const TestIntSize = ids.IDLen + 8 + +func TestIntSizeFunc(ids.ID, int64) int { + return TestIntSize +} + // CacherTests is a list of all Cacher tests var CacherTests = []struct { Size int - Func func(t *testing.T, c Cacher[ids.ID, int]) + Func func(t *testing.T, c Cacher[ids.ID, int64]) }{ {Size: 1, Func: TestBasic}, {Size: 2, Func: TestEviction}, } -func TestBasic(t *testing.T, cache Cacher[ids.ID, int]) { +func TestBasic(t *testing.T, cache Cacher[ids.ID, int64]) { + require := require.New(t) + id1 := ids.ID{1} - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieved value when none exists") - } + _, found := cache.Get(id1) + require.False(found) - expectedValue1 := 1 + expectedValue1 := int64(1) cache.Put(id1, expectedValue1) - if value, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if value != expectedValue1 { - t.Fatalf("Failed to retrieve correct value when one exists") - } + value, found := cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, value) cache.Put(id1, expectedValue1) - if value, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if value != expectedValue1 { - t.Fatalf("Failed to retrieve correct value when one exists") - } + value, found = cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, value) cache.Put(id1, expectedValue1) - if value, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if value != expectedValue1 { - t.Fatalf("Failed to retrieve correct value when one exists") - } + value, found = cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, value) id2 := ids.ID{2} - expectedValue2 := 2 + expectedValue2 := int64(2) cache.Put(id2, expectedValue2) - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieved value when none exists") - } - if value, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if value != expectedValue2 { - t.Fatalf("Failed to retrieve correct value when one exists") - } + _, found = cache.Get(id1) + require.False(found) + + value, found = cache.Get(id2) + require.True(found) + require.Equal(expectedValue2, value) } -func TestEviction(t *testing.T, cache Cacher[ids.ID, int]) { +func TestEviction(t *testing.T, cache Cacher[ids.ID, int64]) { + require := require.New(t) + id1 := ids.ID{1} id2 := ids.ID{2} id3 := ids.ID{3} - cache.Put(id1, 1) - cache.Put(id2, 2) - - if val, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 1 { - t.Fatalf("Retrieved wrong value") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } else if _, found := cache.Get(id3); found { - t.Fatalf("Retrieve value when none exists") - } - - cache.Put(id3, 3) - - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieve value when none exists") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } else if val, found := cache.Get(id3); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 3 { - t.Fatalf("Retrieved wrong value") - } + expectedValue1 := int64(1) + expectedValue2 := int64(2) + expectedValue3 := int64(3) + + require.Zero(cache.Len()) + + cache.Put(id1, expectedValue1) + + require.Equal(1, cache.Len()) + + cache.Put(id2, expectedValue2) + + require.Equal(2, cache.Len()) + + val, found := cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, val) + + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedValue2, val) + + _, found = cache.Get(id3) + require.False(found) + + cache.Put(id3, expectedValue3) + require.Equal(2, cache.Len()) + + _, found = cache.Get(id1) + require.False(found) + + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedValue2, val) + + val, found = cache.Get(id3) + require.True(found) + require.Equal(expectedValue3, val) cache.Get(id2) - cache.Put(id1, 1) - - if val, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 1 { - t.Fatalf("Retrieved wrong value") - } else if val, found := cache.Get(id2); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 2 { - t.Fatalf("Retrieved wrong value") - } else if _, found := cache.Get(id3); found { - t.Fatalf("Retrieved value when none exists") - } + cache.Put(id1, expectedValue1) + + val, found = cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, val) + + val, found = cache.Get(id2) + require.True(found) + require.Equal(expectedValue2, val) + + _, found = cache.Get(id3) + require.False(found) cache.Evict(id2) - cache.Put(id3, 3) - - if val, found := cache.Get(id1); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 1 { - t.Fatalf("Retrieved wrong value") - } else if _, found := cache.Get(id2); found { - t.Fatalf("Retrieved value when none exists") - } else if val, found := cache.Get(id3); !found { - t.Fatalf("Failed to retrieve value when one exists") - } else if val != 3 { - t.Fatalf("Retrieved wrong value") - } + cache.Put(id3, expectedValue3) + + val, found = cache.Get(id1) + require.True(found) + require.Equal(expectedValue1, val) + + _, found = cache.Get(id2) + require.False(found) + + val, found = cache.Get(id3) + require.True(found) + require.Equal(expectedValue3, val) cache.Flush() - if _, found := cache.Get(id1); found { - t.Fatalf("Retrieved value when none exists") - } else if _, found := cache.Get(id2); found { - t.Fatalf("Retrieved value when none exists") - } else if _, found := cache.Get(id3); found { - t.Fatalf("Retrieved value when none exists") - } + _, found = cache.Get(id1) + require.False(found) + _, found = cache.Get(id2) + require.False(found) + _, found = cache.Get(id3) + require.False(found) } diff --git a/avalanchego/cache/unique_cache.go b/avalanchego/cache/unique_cache.go index 24052d79..b958b1f3 100644 --- a/avalanchego/cache/unique_cache.go +++ b/avalanchego/cache/unique_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache diff --git a/avalanchego/cache/unique_cache_test.go b/avalanchego/cache/unique_cache_test.go index 0094a47a..199bdc87 100644 --- a/avalanchego/cache/unique_cache_test.go +++ b/avalanchego/cache/unique_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -6,6 +6,8 @@ package cache import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -23,50 +25,32 @@ func (e *evictable[_]) Evict() { } func TestEvictableLRU(t *testing.T) { + require := require.New(t) + cache := EvictableLRU[ids.ID, *evictable[ids.ID]]{} expectedValue1 := &evictable[ids.ID]{id: ids.ID{1}} - if returnedValue := cache.Deduplicate(expectedValue1); returnedValue != expectedValue1 { - t.Fatalf("Returned unknown value") - } else if expectedValue1.evicted != 0 { - t.Fatalf("Value was evicted unexpectedly") - } else if returnedValue := cache.Deduplicate(expectedValue1); returnedValue != expectedValue1 { - t.Fatalf("Returned unknown value") - } else if expectedValue1.evicted != 0 { - t.Fatalf("Value was evicted unexpectedly") - } + require.Equal(expectedValue1, cache.Deduplicate(expectedValue1)) + require.Zero(expectedValue1.evicted) + require.Equal(expectedValue1, cache.Deduplicate(expectedValue1)) + require.Zero(expectedValue1.evicted) expectedValue2 := &evictable[ids.ID]{id: ids.ID{2}} returnedValue := cache.Deduplicate(expectedValue2) - switch { - case returnedValue != expectedValue2: - t.Fatalf("Returned unknown value") - case expectedValue1.evicted != 1: - t.Fatalf("Value should have been evicted") - case expectedValue2.evicted != 0: - t.Fatalf("Value was evicted unexpectedly") - } + require.Equal(expectedValue2, returnedValue) + require.Equal(1, expectedValue1.evicted) + require.Zero(expectedValue2.evicted) cache.Size = 2 expectedValue3 := &evictable[ids.ID]{id: ids.ID{2}} returnedValue = cache.Deduplicate(expectedValue3) - switch { - case returnedValue != expectedValue2: - t.Fatalf("Returned unknown value") - case expectedValue1.evicted != 1: - t.Fatalf("Value should have been evicted") - case expectedValue2.evicted != 0: - t.Fatalf("Value was evicted unexpectedly") - } + require.Equal(expectedValue2, returnedValue) + require.Equal(1, expectedValue1.evicted) + require.Zero(expectedValue2.evicted) cache.Flush() - switch { - case expectedValue1.evicted != 1: - t.Fatalf("Value should have been evicted") - case expectedValue2.evicted != 1: - t.Fatalf("Value should have been evicted") - case expectedValue3.evicted != 0: - t.Fatalf("Value was evicted unexpectedly") - } + require.Equal(1, expectedValue1.evicted) + require.Equal(1, expectedValue2.evicted) + require.Zero(expectedValue3.evicted) } diff --git a/avalanchego/chains/atomic/codec.go b/avalanchego/chains/atomic/codec.go index bc2e93c2..290713b3 100644 --- a/avalanchego/chains/atomic/codec.go +++ b/avalanchego/chains/atomic/codec.go @@ -1,22 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic import ( + "math" + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const codecVersion = 0 +const CodecVersion = 0 -// codecManager is used to marshal and unmarshal dbElements and chain IDs. -var codecManager codec.Manager +// Codec is used to marshal and unmarshal dbElements and chain IDs. +var Codec codec.Manager func init() { - linearCodec := linearcodec.NewDefault() - codecManager = codec.NewDefaultManager() - if err := codecManager.RegisterCodec(codecVersion, linearCodec); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go b/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go index df63e8df..a6ba8125 100644 --- a/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go +++ b/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go index 649503a0..096a8117 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go index 3e2d0d38..0aaa71c0 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go index 715e0e43..02dfb732 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory @@ -37,19 +37,16 @@ func TestInterface(t *testing.T) { test(t, chainID0, chainID1, sm0, sm1, testDB) - err := conn0.Close() - require.NoError(err) - - err = conn1.Close() - require.NoError(err) + require.NoError(conn0.Close()) + require.NoError(conn1.Close()) } } func wrapSharedMemory(t *testing.T, sm atomic.SharedMemory, db database.Database) (atomic.SharedMemory, io.Closer) { + require := require.New(t) + listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() @@ -59,9 +56,7 @@ func wrapSharedMemory(t *testing.T, sm atomic.SharedMemory, db database.Database go grpcutils.Serve(listener, server) conn, err := grpcutils.Dial(listener.Addr().String()) - if err != nil { - t.Fatalf("Failed to dial: %s", err) - } + require.NoError(err) rpcsm := NewClient(sharedmemorypb.NewSharedMemoryClient(conn)) return rpcsm, conn diff --git a/avalanchego/chains/atomic/memory.go b/avalanchego/chains/atomic/memory.go index a8aa703f..76f5b645 100644 --- a/avalanchego/chains/atomic/memory.go +++ b/avalanchego/chains/atomic/memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -107,7 +107,7 @@ func sharedID(id1, id2 ids.ID) ids.ID { id1, id2 = id2, id1 } - combinedBytes, err := codecManager.Marshal(codecVersion, [2]ids.ID{id1, id2}) + combinedBytes, err := Codec.Marshal(CodecVersion, [2]ids.ID{id1, id2}) if err != nil { panic(err) } diff --git a/avalanchego/chains/atomic/memory_test.go b/avalanchego/chains/atomic/memory_test.go index faf461f8..7ca02e6d 100644 --- a/avalanchego/chains/atomic/memory_test.go +++ b/avalanchego/chains/atomic/memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -6,6 +6,8 @@ package atomic import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" ) @@ -19,32 +21,26 @@ func TestSharedID(t *testing.T) { sharedID0 := sharedID(blockchainID0, blockchainID1) sharedID1 := sharedID(blockchainID1, blockchainID0) - if sharedID0 != sharedID1 { - t.Fatalf("SharedMemory.sharedID should be communitive") - } + require.Equal(t, sharedID0, sharedID1) } func TestMemoryMakeReleaseLock(t *testing.T) { + require := require.New(t) + m := NewMemory(memdb.New()) sharedID := sharedID(blockchainID0, blockchainID1) lock0 := m.makeLock(sharedID) - if lock1 := m.makeLock(sharedID); lock0 != lock1 { - t.Fatalf("Memory.makeLock should have returned the same lock") - } + require.Equal(lock0, m.makeLock(sharedID)) m.releaseLock(sharedID) - if lock2 := m.makeLock(sharedID); lock0 != lock2 { - t.Fatalf("Memory.makeLock should have returned the same lock") - } + require.Equal(lock0, m.makeLock(sharedID)) m.releaseLock(sharedID) m.releaseLock(sharedID) - if lock3 := m.makeLock(sharedID); lock0 == lock3 { - t.Fatalf("Memory.releaseLock should have returned freed the lock") - } + require.Equal(lock0, m.makeLock(sharedID)) m.releaseLock(sharedID) } @@ -54,9 +50,7 @@ func TestMemoryUnknownFree(t *testing.T) { sharedID := sharedID(blockchainID0, blockchainID1) defer func() { - if recover() == nil { - t.Fatalf("Should have panicked due to an unknown free") - } + require.NotNil(t, recover()) }() m.releaseLock(sharedID) diff --git a/avalanchego/chains/atomic/mock_shared_memory.go b/avalanchego/chains/atomic/mock_shared_memory.go index 7f5d4b5a..0e631793 100644 --- a/avalanchego/chains/atomic/mock_shared_memory.go +++ b/avalanchego/chains/atomic/mock_shared_memory.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/chains/atomic (interfaces: SharedMemory) +// +// Generated by this command: +// +// mockgen -package=atomic -destination=chains/atomic/mock_shared_memory.go github.com/ava-labs/avalanchego/chains/atomic SharedMemory +// // Package atomic is a generated GoMock package. package atomic @@ -12,7 +14,7 @@ import ( database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockSharedMemory is a mock of SharedMemory interface. @@ -41,7 +43,7 @@ func (m *MockSharedMemory) EXPECT() *MockSharedMemoryMockRecorder { // Apply mocks base method. func (m *MockSharedMemory) Apply(arg0 map[ids.ID]*Requests, arg1 ...database.Batch) error { m.ctrl.T.Helper() - varargs := []interface{}{arg0} + varargs := []any{arg0} for _, a := range arg1 { varargs = append(varargs, a) } @@ -51,9 +53,9 @@ func (m *MockSharedMemory) Apply(arg0 map[ids.ID]*Requests, arg1 ...database.Bat } // Apply indicates an expected call of Apply. -func (mr *MockSharedMemoryMockRecorder) Apply(arg0 interface{}, arg1 ...interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Apply(arg0 any, arg1 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) + varargs := append([]any{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockSharedMemory)(nil).Apply), varargs...) } @@ -67,7 +69,7 @@ func (m *MockSharedMemory) Get(arg0 ids.ID, arg1 [][]byte) ([][]byte, error) { } // Get indicates an expected call of Get. -func (mr *MockSharedMemoryMockRecorder) Get(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Get(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSharedMemory)(nil).Get), arg0, arg1) } @@ -84,7 +86,7 @@ func (m *MockSharedMemory) Indexed(arg0 ids.ID, arg1 [][]byte, arg2, arg3 []byte } // Indexed indicates an expected call of Indexed. -func (mr *MockSharedMemoryMockRecorder) Indexed(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSharedMemoryMockRecorder) Indexed(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Indexed", reflect.TypeOf((*MockSharedMemory)(nil).Indexed), arg0, arg1, arg2, arg3, arg4) } diff --git a/avalanchego/chains/atomic/prefixes.go b/avalanchego/chains/atomic/prefixes.go index 08927384..adc21c36 100644 --- a/avalanchego/chains/atomic/prefixes.go +++ b/avalanchego/chains/atomic/prefixes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/shared_memory.go b/avalanchego/chains/atomic/shared_memory.go index 7b2f8a56..d90c5685 100644 --- a/avalanchego/chains/atomic/shared_memory.go +++ b/avalanchego/chains/atomic/shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/shared_memory_test.go b/avalanchego/chains/atomic/shared_memory_test.go index bb3266d8..1597d662 100644 --- a/avalanchego/chains/atomic/shared_memory_test.go +++ b/avalanchego/chains/atomic/shared_memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/state.go b/avalanchego/chains/atomic/state.go index c16a0a2e..1eed2380 100644 --- a/avalanchego/chains/atomic/state.go +++ b/avalanchego/chains/atomic/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -6,17 +6,21 @@ package atomic import ( "bytes" "errors" + "fmt" + "slices" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedOperation = errors.New("duplicated operation on provided value") +var ( + errDuplicatePut = errors.New("duplicate put") + errDuplicateRemove = errors.New("duplicate remove") +) type dbElement struct { // Present indicates the value was removed before existing. @@ -86,7 +90,7 @@ func (s *state) SetValue(e *Element) error { } // This key was written twice, which is invalid - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x Value=0x%x", errDuplicatePut, e.Key, e.Value) } if err != database.ErrNotFound { // An unexpected error occurred, so we should propagate that error @@ -107,7 +111,7 @@ func (s *state) SetValue(e *Element) error { Traits: e.Traits, } - valueBytes, err := codecManager.Marshal(codecVersion, &dbElem) + valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) if err != nil { return err } @@ -137,7 +141,7 @@ func (s *state) SetValue(e *Element) error { // // This implies that chains interacting with shared memory must be able to // generate their chain state without actually performing the read of shared -// memory. Shared memory should only be used to verify that the the transition +// memory. Shared memory should only be used to verify that the transition // being performed is valid. That ensures that such verification can be skipped // during bootstrapping. It is up to the chain to ensure this based on the // current engine state. @@ -151,7 +155,7 @@ func (s *state) RemoveValue(key []byte) error { // The value doesn't exist, so we should optimistically delete it dbElem := dbElement{Present: false} - valueBytes, err := codecManager.Marshal(codecVersion, &dbElem) + valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) if err != nil { return err } @@ -160,7 +164,7 @@ func (s *state) RemoveValue(key []byte) error { // Don't allow the removal of something that was already removed. if !value.Present { - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x", errDuplicateRemove, key) } // Remove [key] from the indexDB for each trait that has indexed this key. @@ -184,7 +188,7 @@ func (s *state) loadValue(key []byte) (*dbElement, error) { // The key was in the database value := &dbElement{} - _, err = codecManager.Unmarshal(valueBytes, value) + _, err = Codec.Unmarshal(valueBytes, value) return value, err } @@ -203,7 +207,7 @@ func (s *state) getKeys(traits [][]byte, startTrait, startKey []byte, limit int) lastKey := startKey // Iterate over the traits in order appending all of the keys that possess // the given [traits]. - utils.SortBytes(traits) + slices.SortFunc(traits, bytes.Compare) for _, trait := range traits { switch bytes.Compare(trait, startTrait) { case -1: diff --git a/avalanchego/chains/atomic/test_shared_memory.go b/avalanchego/chains/atomic/test_shared_memory.go index 0e7d918a..82b1cbef 100644 --- a/avalanchego/chains/atomic/test_shared_memory.go +++ b/avalanchego/chains/atomic/test_shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -31,12 +31,10 @@ var SharedMemoryTests = []func(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 func TestSharedMemoryPutAndGet(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, - }}}}) - - require.NoError(err) + }}}})) values, err := sm1.Get(chainID0, [][]byte{{0}}) require.NoError(err) @@ -73,12 +71,11 @@ func TestSharedMemoryLargePutGetAndRemove(t *testing.T, chainID0, chainID1 ids.I keys = append(keys, key) } - err = sm0.Apply(map[ids.ID]*Requests{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{ chainID1: { PutRequests: elems, }, - }) - require.NoError(err) + })) values, err := sm1.Get( chainID0, @@ -89,37 +86,33 @@ func TestSharedMemoryLargePutGetAndRemove(t *testing.T, chainID0, chainID1 ids.I require.Equal(elems[i].Value, value) } - err = sm1.Apply(map[ids.ID]*Requests{ + require.NoError(sm1.Apply(map[ids.ID]*Requests{ chainID0: { RemoveRequests: keys, }, - }) - - require.NoError(err) + })) } func TestSharedMemoryIndexed(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, Traits: [][]byte{ {2}, {3}, }, - }}}}) - require.NoError(err) + }}}})) - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{4}, Value: []byte{5}, Traits: [][]byte{ {2}, {3}, }, - }}}}) - require.NoError(err) + }}}})) values, _, _, err := sm0.Indexed(chainID1, [][]byte{{2}}, nil, nil, 1) require.NoError(err) @@ -183,8 +176,7 @@ func TestSharedMemoryLargeIndexed(t *testing.T, chainID0, chainID1 ids.ID, sm0, }) } - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: elems}}) - require.NoError(err) + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: elems}})) values, _, _, err := sm1.Indexed(chainID0, allTraits, nil, nil, len(elems)+1) require.NoError(err) @@ -193,6 +185,7 @@ func TestSharedMemoryLargeIndexed(t *testing.T, chainID0, chainID1 ids.ID, sm0, func TestSharedMemoryCantDuplicatePut(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, _ database.Database) { require := require.New(t) + err := sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{ { Key: []byte{0}, @@ -203,50 +196,50 @@ func TestSharedMemoryCantDuplicatePut(t *testing.T, _, chainID1 ids.ID, sm0, _ S Value: []byte{2}, }, }}}) - require.Error(err, "shouldn't be able to write duplicated keys") - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + // TODO: require error to be errDuplicatedOperation + require.Error(err) //nolint:forbidigo // currently returns grpc errors too + + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, - }}}}) - require.NoError(err) + }}}})) + err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, }}}}) - require.Error(err, "shouldn't be able to write duplicated keys") + // TODO: require error to be errDuplicatedOperation + require.Error(err) //nolint:forbidigo // currently returns grpc errors too } func TestSharedMemoryCantDuplicateRemove(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) - require.NoError(err) - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) - require.Error(err, "shouldn't be able to remove duplicated keys") + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}})) + + err := sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) + // TODO: require error to be errDuplicatedOperation + require.Error(err) //nolint:forbidigo // currently returns grpc errors too } func TestSharedMemoryCommitOnPut(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, db database.Database) { require := require.New(t) - err := db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) batch := db.NewBatch() - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, }}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -260,22 +253,18 @@ func TestSharedMemoryCommitOnPut(t *testing.T, _, chainID1 ids.ID, sm0, _ Shared func TestSharedMemoryCommitOnRemove(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, db database.Database) { require := require.New(t) - err := db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) batch := db.NewBatch() - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -292,8 +281,7 @@ func TestPutAndRemoveBatch(t *testing.T, chainID0, _ ids.ID, _, sm1 SharedMemory batch := db.NewBatch() - err := batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) batchChainsAndInputs := make(map[ids.ID]*Requests) @@ -307,9 +295,7 @@ func TestPutAndRemoveBatch(t *testing.T, chainID0, _ ids.ID, _, sm1 SharedMemory RemoveRequests: byteArr, } - err = sm1.Apply(batchChainsAndInputs, batch) - - require.NoError(err) + require.NoError(sm1.Apply(batchChainsAndInputs, batch)) val, err := db.Get([]byte{0}) require.NoError(err) @@ -341,24 +327,19 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha value := bytes[:elementSize] bytes = bytes[elementSize:] - err := batch.Put(key, value) - require.NoError(err) + require.NoError(batch.Put(key, value)) } - err = db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -375,16 +356,13 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha key := bytes[:elementSize] bytes = bytes[pairSize:] - err := batch.Delete(key) - require.NoError(err) + require.NoError(batch.Delete(key)) } - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{1}}}}, batch, - ) - - require.NoError(err) + )) batch.Reset() @@ -393,8 +371,7 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha key := bytes[:elementSize] bytes = bytes[pairSize:] - err := batch.Delete(key) - require.NoError(err) + require.NoError(batch.Delete(key)) } batchChainsAndInputs := make(map[ids.ID]*Requests) @@ -409,9 +386,8 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha RemoveRequests: byteArr, } - err = sm0.Apply( + require.NoError(sm0.Apply( batchChainsAndInputs, batch, - ) - require.NoError(err) + )) } diff --git a/avalanchego/chains/atomic/writer.go b/avalanchego/chains/atomic/writer.go index 9f3876a5..6bcdd86b 100644 --- a/avalanchego/chains/atomic/writer.go +++ b/avalanchego/chains/atomic/writer.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic -import ( - "github.com/ava-labs/avalanchego/database" -) +import "github.com/ava-labs/avalanchego/database" // WriteAll writes all of the batches to the underlying database of baseBatch. // Assumes all batches have the same underlying database. diff --git a/avalanchego/chains/linearizable_vm.go b/avalanchego/chains/linearizable_vm.go index abaf20ca..97fe9eb4 100644 --- a/avalanchego/chains/linearizable_vm.go +++ b/avalanchego/chains/linearizable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains @@ -7,13 +7,12 @@ import ( "context" "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - - dbManager "github.com/ava-labs/avalanchego/database/manager" ) var ( @@ -32,7 +31,7 @@ type initializeOnLinearizeVM struct { registerer metrics.OptionalGatherer ctx *snow.Context - dbManager dbManager.Manager + db database.Database genesisBytes []byte upgradeBytes []byte configBytes []byte @@ -47,7 +46,7 @@ func (vm *initializeOnLinearizeVM) Linearize(ctx context.Context, stopVertexID i return vm.vmToInitialize.Initialize( ctx, vm.ctx, - vm.dbManager, + vm.db, vm.genesisBytes, vm.upgradeBytes, vm.configBytes, @@ -65,10 +64,16 @@ type linearizeOnInitializeVM struct { stopVertexID ids.ID } +func NewLinearizeOnInitializeVM(vm vertex.LinearizableVMWithEngine) *linearizeOnInitializeVM { + return &linearizeOnInitializeVM{ + LinearizableVMWithEngine: vm, + } +} + func (vm *linearizeOnInitializeVM) Initialize( ctx context.Context, _ *snow.Context, - _ dbManager.Manager, + _ database.Database, _ []byte, _ []byte, _ []byte, diff --git a/avalanchego/chains/manager.go b/avalanchego/chains/manager.go index b3614139..8d8ce2a8 100644 --- a/avalanchego/chains/manager.go +++ b/avalanchego/chains/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains @@ -15,7 +15,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/health" @@ -23,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/meterdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -41,33 +42,35 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" "github.com/ava-labs/avalanchego/vms/metervm" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/proposervm" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/tracedvm" - dbManager "github.com/ava-labs/avalanchego/database/manager" - timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" - - avcon "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" aveng "github.com/ava-labs/avalanchego/snow/engine/avalanche" avbootstrap "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" avagetter "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" - - smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" smeng "github.com/ava-labs/avalanchego/snow/engine/snowman" smbootstrap "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap" snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" ) const ( @@ -77,21 +80,27 @@ const ( var ( // Commonly shared VM DB prefix - vmDBPrefix = []byte("vm") + VMDBPrefix = []byte("vm") // Bootstrapping prefixes for LinearizableVMs - vertexDBPrefix = []byte("vertex") - vertexBootstrappingDBPrefix = []byte("vertex_bs") - txBootstrappingDBPrefix = []byte("tx_bs") - blockBootstrappingDBPrefix = []byte("block_bs") + VertexDBPrefix = []byte("vertex") + VertexBootstrappingDBPrefix = []byte("vertex_bs") + TxBootstrappingDBPrefix = []byte("tx_bs") + BlockBootstrappingDBPrefix = []byte("block_bs") // Bootstrapping prefixes for ChainVMs - bootstrappingDB = []byte("bs") + ChainBootstrappingDBPrefix = []byte("bs") + + errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") + errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") + errNotBootstrapped = errors.New("subnets not bootstrapped") + errPartialSyncAsAValidator = errors.New("partial sync should not be configured for a validator") - errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") - errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") - errNotBootstrapped = errors.New("subnets not bootstrapped") - errNoPlatformSubnetConfig = errors.New("subnet config for platform chain not found") + fxs = map[ids.ID]fx.Factory{ + secp256k1fx.ID: &secp256k1fx.Factory{}, + nftfx.ID: &nftfx.Factory{}, + propertyfx.ID: &propertyfx.Factory{}, + } _ Manager = (*manager)(nil) ) @@ -105,9 +114,6 @@ var ( type Manager interface { ids.Aliaser - // Return the router this Manager is using to route consensus messages to chains - Router() router.Router - // Queues a chain to be created in the future after chain creator is unblocked. // This is only called from the P-chain thread to create other chains // Queued chains are created only after P-chain is bootstrapped. @@ -146,8 +152,8 @@ type ChainParameters struct { VMID ids.ID // The IDs of the feature extensions this chain is running. FxIDs []ids.ID - // Should only be set if the default beacons can't be used. - CustomBeacons validators.Set + // Invariant: Only used when [ID] is the P-chain ID. + CustomBeacons validators.Manager } type chain struct { @@ -155,7 +161,6 @@ type chain struct { Context *snow.ConsensusContext VM common.VM Handler handler.Handler - Beacons validators.Set } // ChainConfig is configuration settings for the current execution. @@ -167,45 +172,44 @@ type ChainConfig struct { } type ManagerConfig struct { - StakingEnabled bool // True iff the network has staking enabled - StakingCert tls.Certificate // needed to sign snowman++ blocks - StakingBLSKey *bls.SecretKey - TracingEnabled bool + SybilProtectionEnabled bool + StakingTLSCert tls.Certificate // needed to sign snowman++ blocks + StakingBLSKey *bls.SecretKey + TracingEnabled bool // Must not be used unless [TracingEnabled] is true as this may be nil. - Tracer trace.Tracer - Log logging.Logger - LogFactory logging.Factory - VMManager vms.Manager // Manage mappings from vm ID --> vm - BlockAcceptorGroup snow.AcceptorGroup - TxAcceptorGroup snow.AcceptorGroup - VertexAcceptorGroup snow.AcceptorGroup - DBManager dbManager.Manager - MsgCreator message.OutboundMsgBuilder // message creator, shared with network - Router router.Router // Routes incoming messages to the appropriate chain - Net network.Network // Sends consensus messages to other validators - Validators validators.Manager // Validators validating on this chain - NodeID ids.NodeID // The ID of this node - NetworkID uint32 // ID of the network this node is connected to - Server server.Server // Handles HTTP API calls - Keystore keystore.Keystore - AtomicMemory *atomic.Memory - AVAXAssetID ids.ID - XChainID ids.ID // ID of the X-Chain, - CChainID ids.ID // ID of the C-Chain, - CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully - TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators - Health health.Registerer - RetryBootstrap bool // Should Bootstrap be retried - RetryBootstrapWarnFrequency int // Max number of times to retry bootstrap before warning the node operator - SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig - ChainConfigs map[string]ChainConfig // alias -> ChainConfig + Tracer trace.Tracer + Log logging.Logger + LogFactory logging.Factory + VMManager vms.Manager // Manage mappings from vm ID --> vm + BlockAcceptorGroup snow.AcceptorGroup + TxAcceptorGroup snow.AcceptorGroup + VertexAcceptorGroup snow.AcceptorGroup + DB database.Database + MsgCreator message.OutboundMsgBuilder // message creator, shared with network + Router router.Router // Routes incoming messages to the appropriate chain + Net network.Network // Sends consensus messages to other validators + Validators validators.Manager // Validators validating on this chain + NodeID ids.NodeID // The ID of this node + NetworkID uint32 // ID of the network this node is connected to + PartialSyncPrimaryNetwork bool + Server server.Server // Handles HTTP API calls + Keystore keystore.Keystore + AtomicMemory *atomic.Memory + AVAXAssetID ids.ID + XChainID ids.ID // ID of the X-Chain, + CChainID ids.ID // ID of the C-Chain, + CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully + TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators + Health health.Registerer + SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig + ChainConfigs map[string]ChainConfig // alias -> ChainConfig // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node ShutdownNodeFunc func(exitCode int) MeterVMEnabled bool // Should each VM be wrapped with a MeterVM Metrics metrics.MultiGatherer - ConsensusGossipFrequency time.Duration - ConsensusAppConcurrency int + FrontierPollFrequency time.Duration + ConsensusAppConcurrency int // Max Time to spend fetching a container and its // ancestors when responding to a GetAncestors @@ -225,6 +229,8 @@ type ManagerConfig struct { StateSyncBeacons []ids.NodeID ChainDataDir string + + Subnets *Subnets } type manager struct { @@ -233,19 +239,20 @@ type manager struct { ids.Aliaser ManagerConfig + stakingSigner crypto.Signer + stakingCert *staking.Certificate + // Those notified when a chain is created registrants []Registrant // queue that holds chain create requests chainsQueue buffer.BlockingDeque[ChainParameters] // unblocks chain creator to start processing the queue - unblockChainCreatorCh chan struct{} + unblockChainCreatorCh chan struct{} + // shutdown the chain creator goroutine if the queue hasn't started to be + // processed. chainCreatorShutdownCh chan struct{} - - subnetsLock sync.Mutex - // Key: Subnet's ID - // Value: Subnet description - subnets map[ids.ID]subnets.Subnet + chainCreatorExited sync.WaitGroup chainsLock sync.Mutex // Key: Chain's ID @@ -261,7 +268,8 @@ func New(config *ManagerConfig) Manager { return &manager{ Aliaser: ids.NewAliaser(), ManagerConfig: *config, - subnets: make(map[ids.ID]subnets.Subnet), + stakingSigner: config.StakingTLSCert.PrivateKey.(crypto.Signer), + stakingCert: staking.CertificateFromX509(config.StakingTLSCert.Leaf), chains: make(map[ids.ID]handler.Handler), chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), unblockChainCreatorCh: make(chan struct{}), @@ -269,33 +277,13 @@ func New(config *ManagerConfig) Manager { } } -// Router that this chain manager is using to route consensus messages to chains -func (m *manager) Router() router.Router { - return m.ManagerConfig.Router -} - // QueueChainCreation queues a chain creation request // Invariant: Tracked Subnet must be checked before calling this function func (m *manager) QueueChainCreation(chainParams ChainParameters) { - m.subnetsLock.Lock() - subnetID := chainParams.SubnetID - sb, exists := m.subnets[subnetID] - if !exists { - sbConfig, ok := m.SubnetConfigs[subnetID] - if !ok { - // default to primary subnet config - sbConfig = m.SubnetConfigs[constants.PrimaryNetworkID] - } - sb = subnets.New(m.NodeID, sbConfig) - m.subnets[chainParams.SubnetID] = sb - } - addedChain := sb.AddChain(chainParams.ID) - m.subnetsLock.Unlock() - - if !addedChain { + if sb, _ := m.Subnets.GetOrCreate(chainParams.SubnetID); !sb.AddChain(chainParams.ID) { m.Log.Debug("skipping chain creation", zap.String("reason", "chain already staged"), - zap.Stringer("subnetID", subnetID), + zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), ) @@ -305,7 +293,7 @@ func (m *manager) QueueChainCreation(chainParams ChainParameters) { if ok := m.chainsQueue.PushRight(chainParams); !ok { m.Log.Warn("skipping chain creation", zap.String("reason", "couldn't enqueue chain"), - zap.Stringer("subnetID", subnetID), + zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), ) @@ -323,9 +311,7 @@ func (m *manager) createChain(chainParams ChainParameters) { zap.Stringer("vmID", chainParams.VMID), ) - m.subnetsLock.Lock() - sb := m.subnets[chainParams.SubnetID] - m.subnetsLock.Unlock() + sb, _ := m.Subnets.GetOrCreate(chainParams.SubnetID) // Note: buildChain builds all chain's relevant objects (notably engine and handler) // but does not start their operations. Starting of the handler (which could potentially @@ -359,7 +345,7 @@ func (m *manager) createChain(chainParams ChainParameters) { // created or not. This attempts to notify the node operator that their // node may not be properly validating the subnet they expect to be // validating. - healthCheckErr := fmt.Errorf("failed to create chain on subnet: %s", chainParams.SubnetID) + healthCheckErr := fmt.Errorf("failed to create chain on subnet %s: %w", chainParams.SubnetID, err) err := m.Health.RegisterHealthCheck( chainAlias, health.CheckerFunc(func(context.Context) (interface{}, error) { @@ -438,7 +424,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } consensusMetrics := prometheus.NewRegistry() - chainNamespace := fmt.Sprintf("%s_%s", constants.PlatformName, primaryAlias) + chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { return nil, fmt.Errorf("error while registering chain's metrics %w", err) } @@ -447,13 +433,13 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that // there are no conflicts when registering the Snowman consensus metrics. avalancheConsensusMetrics := prometheus.NewRegistry() - avalancheDAGNamespace := fmt.Sprintf("%s_avalanche", chainNamespace) + avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { return nil, fmt.Errorf("error while registering DAG metrics %w", err) } vmMetrics := metrics.NewOptionalGatherer() - vmNamespace := fmt.Sprintf("%s_vm", chainNamespace) + vmNamespace := metric.AppendNamespace(chainNamespace, "vm") if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { return nil, fmt.Errorf("error while registering vm's metrics %w", err) } @@ -476,7 +462,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c BCLookup: m, Metrics: vmMetrics, - WarpSigner: warp.NewSigner(m.StakingBLSKey, chainParams.ID), + WarpSigner: warp.NewSigner(m.StakingBLSKey, m.NetworkID, chainParams.ID), ValidatorState: m.validatorState, ChainDataDir: chainDataDir, @@ -501,69 +487,46 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } // TODO: Shutdown VM if an error occurs - fxs := make([]*common.Fx, len(chainParams.FxIDs)) + chainFxs := make([]*common.Fx, len(chainParams.FxIDs)) for i, fxID := range chainParams.FxIDs { - // Get a factory for the fx we want to use on our chain - fxFactory, err := m.VMManager.GetFactory(fxID) - if err != nil { - return nil, fmt.Errorf("error while getting fxFactory: %w", err) - } - - fx, err := fxFactory.New(chainLog) - if err != nil { - return nil, fmt.Errorf("error while creating fx: %w", err) + fxFactory, ok := fxs[fxID] + if !ok { + return nil, fmt.Errorf("fx %s not found", fxID) } - // Create the fx - fxs[i] = &common.Fx{ + chainFxs[i] = &common.Fx{ ID: fxID, - Fx: fx, + Fx: fxFactory.New(), } } - var vdrs validators.Set // Validators validating this blockchain - var ok bool - if m.StakingEnabled { - vdrs, ok = m.Validators.Get(chainParams.SubnetID) - } else { // Staking is disabled. Every peer validates every subnet. - vdrs, ok = m.Validators.Get(constants.PrimaryNetworkID) - } - if !ok { - return nil, fmt.Errorf("couldn't get validator set of subnet with ID %s. The subnet may not exist", chainParams.SubnetID) - } - - beacons := vdrs - if chainParams.CustomBeacons != nil { - beacons = chainParams.CustomBeacons - } - - bootstrapWeight := beacons.Weight() - var chain *chain switch vm := vm.(type) { case vertex.LinearizableVMWithEngine: chain, err = m.createAvalancheChain( ctx, chainParams.GenesisData, - vdrs, - beacons, + m.Validators, vm, - fxs, - bootstrapWeight, + chainFxs, sb, ) if err != nil { return nil, fmt.Errorf("error while creating new avalanche vm %w", err) } case block.ChainVM: + beacons := m.Validators + if chainParams.ID == constants.PlatformChainID { + beacons = chainParams.CustomBeacons + } + chain, err = m.createSnowmanChain( ctx, chainParams.GenesisData, - vdrs, + m.Validators, beacons, vm, - fxs, - bootstrapWeight, + chainFxs, sb, ) if err != nil { @@ -589,11 +552,9 @@ func (m *manager) AddRegistrant(r Registrant) { func (m *manager) createAvalancheChain( ctx *snow.ConsensusContext, genesisData []byte, - vdrs, - beacons validators.Set, + vdrs validators.Manager, vm vertex.LinearizableVMWithEngine, fxs []*common.Fx, - bootstrapWeight uint64, sb subnets.Subnet, ) (*chain, error) { ctx.Lock.Lock() @@ -604,18 +565,16 @@ func (m *manager) createAvalancheChain( State: snow.Initializing, }) - meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) + meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) if err != nil { return nil, err } - prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) - - db := prefixDBManager.Current() - vertexDB := prefixdb.New(vertexDBPrefix, db.Database) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, db.Database) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, db.Database) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, db.Database) + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(VertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { @@ -630,10 +589,6 @@ func (m *manager) createAvalancheChain( return nil, err } - // The channel through which a VM may send messages to the consensus engine - // VM uses this channel to notify engine that a block is ready to be made - msgChan := make(chan common.Message, defaultChannelSize) - // Passes messages from the avalanche engines to the network avalancheMessageSender, err := sender.New( ctx, @@ -695,11 +650,12 @@ func (m *manager) createAvalancheChain( return nil, fmt.Errorf("error while fetching chain config: %w", err) } + dagVM := vm if m.MeterVMEnabled { - vm = metervm.NewVertexVM(vm) + dagVM = metervm.NewVertexVM(dagVM) } if m.TracingEnabled { - vm = tracedvm.NewVertexVM(vm, m.Tracer) + dagVM = tracedvm.NewVertexVM(dagVM, m.Tracer) } // Handles serialization/deserialization of vertices and also the @@ -707,7 +663,7 @@ func (m *manager) createAvalancheChain( vtxManager := state.NewSerializer( state.SerializerConfig{ ChainID: ctx.ChainID, - VM: vm, + VM: dagVM, DB: vertexDB, Log: ctx.Log, CortinaTime: version.GetCortinaTime(ctx.NetworkID), @@ -730,14 +686,18 @@ func (m *manager) createAvalancheChain( ctx.Context.Metrics = avalancheRegisterer + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) + // The only difference between using avalancheMessageSender and // snowmanMessageSender here is where the metrics will be placed. Because we // end up using this sender after the linearization, we pass in // snowmanMessageSender here. - err = vm.Initialize( + err = dagVM.Initialize( context.TODO(), ctx.Context, - vmDBManager, + vmDB, genesisData, chainConfig.Upgrade, chainConfig.Config, @@ -750,21 +710,25 @@ func (m *manager) createAvalancheChain( } // Initialize the ProposerVM and the vm wrapped inside it - minBlockDelay := proposervm.DefaultMinBlockDelay + var ( + minBlockDelay = proposervm.DefaultMinBlockDelay + numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks + ) if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { minBlockDelay = subnetCfg.ProposerMinBlockDelay + numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks } m.Log.Info("creating proposervm wrapper", zap.Time("activationTime", m.ApricotPhase4Time), zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), zap.Duration("minBlockDelay", minBlockDelay), + zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) - untracedVMWrappedInsideProposerVM := &linearizeOnInitializeVM{ - LinearizableVMWithEngine: vm, - } + // Note: this does not use [dagVM] to ensure we use the [vm]'s height index. + untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm) var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM if m.TracingEnabled { @@ -775,11 +739,15 @@ func (m *manager) createAvalancheChain( // using. var vmWrappingProposerVM block.ChainVM = proposervm.New( vmWrappedInsideProposerVM, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - m.StakingCert.PrivateKey.(crypto.Signer), - m.StakingCert.Leaf, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + DurangoTime: version.GetDurangoTime(m.NetworkID), + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -792,13 +760,13 @@ func (m *manager) createAvalancheChain( // Note: linearizableVM is the VM that the Avalanche engines should be // using. linearizableVM := &initializeOnLinearizeVM{ - DAGVM: vm, + DAGVM: dagVM, vmToInitialize: vmWrappingProposerVM, vmToLinearize: untracedVMWrappedInsideProposerVM, registerer: snowmanRegisterer, ctx: ctx.Context, - dbManager: vmDBManager, + db: vmDB, genesisBytes: genesisData, upgradeBytes: chainConfig.Upgrade, configBytes: chainConfig.Config, @@ -807,48 +775,51 @@ func (m *manager) createAvalancheChain( appSender: snowmanMessageSender, } + bootstrapWeight, err := vdrs.TotalWeight(ctx.SubnetID) + if err != nil { + return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err) + } + consensusParams := sb.Config().ConsensusParameters sampleK := consensusParams.K if uint64(sampleK) > bootstrapWeight { sampleK = int(bootstrapWeight) } + connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + if err != nil { + return nil, fmt.Errorf("error creating peer tracker: %w", err) + } + vdrs.RegisterCallbackListener(ctx.SubnetID, connectedValidators) + // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( ctx, vdrs, msgChan, - m.ConsensusGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector sb, + connectedValidators, ) if err != nil { return nil, fmt.Errorf("error initializing network handler: %w", err) } - connectedPeers := tracker.NewPeers() - startupTracker := tracker.NewStartup(connectedPeers, (3*bootstrapWeight+3)/4) - beacons.RegisterCallbackListener(startupTracker) + connectedBeacons := tracker.NewPeers() + startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) + vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) - snowmanCommonCfg := common.Config{ - Ctx: ctx, - Beacons: beacons, - SampleK: sampleK, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - StartupTracker: startupTracker, - Sender: snowmanMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - snowGetHandler, err := snowgetter.New(vmWrappingProposerVM, snowmanCommonCfg) + snowGetHandler, err := snowgetter.New( + vmWrappingProposerVM, + snowmanMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -861,13 +832,14 @@ func (m *manager) createAvalancheChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized snowmanEngineConfig := smeng.Config{ - Ctx: snowmanCommonCfg.Ctx, - AllGetsServer: snowGetHandler, - VM: vmWrappingProposerVM, - Sender: snowmanCommonCfg.Sender, - Validators: vdrs, - Params: consensusParams.Parameters, - Consensus: snowmanConsensus, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vmWrappingProposerVM, + Sender: snowmanMessageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: snowmanConsensus, } snowmanEngine, err := smeng.New(snowmanEngineConfig) if err != nil { @@ -880,13 +852,20 @@ func (m *manager) createAvalancheChain( // create bootstrap gear bootstrapCfg := smbootstrap.Config{ - Config: snowmanCommonCfg, - AllGetsServer: snowGetHandler, - Blocked: blockBlocker, - VM: vmWrappingProposerVM, + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: vdrs, + SampleK: sampleK, + StartupTracker: startupTracker, + Sender: snowmanMessageSender, + BootstrapTracker: sb, + Timer: h, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + Blocked: blockBlocker, + VM: vmWrappingProposerVM, } - snowmanBootstrapper, err := smbootstrap.New( - context.TODO(), + var snowmanBootstrapper common.BootstrapableEngine + snowmanBootstrapper, err = smbootstrap.New( bootstrapCfg, snowmanEngine.Start, ) @@ -898,70 +877,43 @@ func (m *manager) createAvalancheChain( snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer) } - avalancheCommonCfg := common.Config{ - Ctx: ctx, - Beacons: beacons, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: avalancheMessageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := avagetter.New(vtxManager, avalancheCommonCfg) + avaGetHandler, err := avagetter.New( + vtxManager, + avalancheMessageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.AvalancheRegisterer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) } - // create bootstrap gear - avalancheBootstrapperConfig := avbootstrap.Config{ - Config: avalancheCommonCfg, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: vtxManager, - VM: linearizableVM, - } - - var avalancheConsensus avcon.Consensus = &avcon.Topological{} + // create engine gear + avalancheEngine := aveng.New(ctx, avaGetHandler, linearizableVM) if m.TracingEnabled { - avalancheConsensus = avcon.Trace(avalancheConsensus, m.Tracer) + avalancheEngine = common.TraceEngine(avalancheEngine, m.Tracer) } - // create engine gear - avalancheEngineConfig := aveng.Config{ - Ctx: ctx, - AllGetsServer: avaGetHandler, - VM: linearizableVM, - Manager: vtxManager, - Sender: avalancheMessageSender, - Validators: vdrs, - Params: consensusParams, - Consensus: avalancheConsensus, - } - avalancheEngine, err := aveng.New( - avalancheEngineConfig, - snowmanEngine.Start, - ) - if err != nil { - return nil, fmt.Errorf("error initializing avalanche engine: %w", err) + // create bootstrap gear + avalancheBootstrapperConfig := avbootstrap.Config{ + AllGetsServer: avaGetHandler, + Ctx: ctx, + Beacons: vdrs, + StartupTracker: startupTracker, + Sender: avalancheMessageSender, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: vtxManager, + VM: linearizableVM, } - - if m.TracingEnabled { - avalancheEngine = aveng.TraceEngine(avalancheEngine, m.Tracer) + if ctx.ChainID == m.XChainID { + avalancheBootstrapperConfig.StopVertexID = version.CortinaXChainStopVertexID[ctx.NetworkID] } avalancheBootstrapper, err := avbootstrap.New( - context.TODO(), avalancheBootstrapperConfig, - avalancheEngine.Start, snowmanBootstrapper.Start, ) if err != nil { @@ -993,7 +945,7 @@ func (m *manager) createAvalancheChain( return &chain{ Name: chainAlias, Context: ctx, - VM: vm, + VM: dagVM, Handler: h, }, nil } @@ -1002,11 +954,10 @@ func (m *manager) createAvalancheChain( func (m *manager) createSnowmanChain( ctx *snow.ConsensusContext, genesisData []byte, - vdrs, - beacons validators.Set, + vdrs validators.Manager, + beacons validators.Manager, vm block.ChainVM, fxs []*common.Fx, - bootstrapWeight uint64, sb subnets.Subnet, ) (*chain, error) { ctx.Lock.Lock() @@ -1017,25 +968,19 @@ func (m *manager) createSnowmanChain( State: snow.Initializing, }) - meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) + meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) if err != nil { return nil, err } - prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) - - db := prefixDBManager.Current() - bootstrappingDB := prefixdb.New(bootstrappingDB, db.Database) + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { return nil, err } - // The channel through which a VM may send messages to the consensus engine - // VM uses this channel to notify engine that a block is ready to be made - msgChan := make(chan common.Message, defaultChannelSize) - // Passes messages from the consensus engine to the network messageSender, err := sender.New( ctx, @@ -1092,7 +1037,7 @@ func (m *manager) createSnowmanChain( m.validatorState = validators.Trace(m.validatorState, "lockedState", m.Tracer) } - if !m.ManagerConfig.StakingEnabled { + if !m.ManagerConfig.SybilProtectionEnabled { m.validatorState = validators.NewNoValidatorsState(m.validatorState) ctx.ValidatorState = validators.NewNoValidatorsState(ctx.ValidatorState) } @@ -1118,14 +1063,19 @@ func (m *manager) createSnowmanChain( return nil, fmt.Errorf("error while fetching chain config: %w", err) } - minBlockDelay := proposervm.DefaultMinBlockDelay + var ( + minBlockDelay = proposervm.DefaultMinBlockDelay + numHistoricalBlocks = proposervm.DefaultNumHistoricalBlocks + ) if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { minBlockDelay = subnetCfg.ProposerMinBlockDelay + numHistoricalBlocks = subnetCfg.ProposerNumHistoricalBlocks } m.Log.Info("creating proposervm wrapper", zap.Time("activationTime", m.ApricotPhase4Time), zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), zap.Duration("minBlockDelay", minBlockDelay), + zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) @@ -1135,11 +1085,15 @@ func (m *manager) createSnowmanChain( vm = proposervm.New( vm, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - m.StakingCert.PrivateKey.(crypto.Signer), - m.StakingCert.Leaf, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + DurangoTime: version.GetDurangoTime(m.NetworkID), + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -1149,10 +1103,14 @@ func (m *manager) createSnowmanChain( vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer) } + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) + if err := vm.Initialize( context.TODO(), ctx.Context, - vmDBManager, + vmDB, genesisData, chainConfig.Upgrade, chainConfig.Config, @@ -1163,49 +1121,51 @@ func (m *manager) createSnowmanChain( return nil, err } + bootstrapWeight, err := beacons.TotalWeight(ctx.SubnetID) + if err != nil { + return nil, fmt.Errorf("error while fetching weight for subnet %s: %w", ctx.SubnetID, err) + } + consensusParams := sb.Config().ConsensusParameters sampleK := consensusParams.K if uint64(sampleK) > bootstrapWeight { sampleK = int(bootstrapWeight) } + connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + if err != nil { + return nil, fmt.Errorf("error creating peer tracker: %w", err) + } + vdrs.RegisterCallbackListener(ctx.SubnetID, connectedValidators) + // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( ctx, vdrs, msgChan, - m.ConsensusGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, subnetConnector, sb, + connectedValidators, ) if err != nil { return nil, fmt.Errorf("couldn't initialize message handler: %w", err) } - connectedPeers := tracker.NewPeers() - startupTracker := tracker.NewStartup(connectedPeers, (3*bootstrapWeight+3)/4) - beacons.RegisterCallbackListener(startupTracker) + connectedBeacons := tracker.NewPeers() + startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) + beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) - commonCfg := common.Config{ - Ctx: ctx, - Beacons: beacons, - SampleK: sampleK, - StartupTracker: startupTracker, - Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: messageSender, - BootstrapTracker: sb, - Timer: h, - RetryBootstrap: m.RetryBootstrap, - RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, - MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, - AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, - AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := snowgetter.New(vm, commonCfg) + snowGetHandler, err := snowgetter.New( + vm, + messageSender, + ctx.Log, + m.BootstrapMaxTimeGetAncestors, + m.BootstrapAncestorsMaxContainersSent, + ctx.Registerer, + ) if err != nil { return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } @@ -1218,13 +1178,15 @@ func (m *manager) createSnowmanChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized engineConfig := smeng.Config{ - Ctx: commonCfg.Ctx, - AllGetsServer: snowGetHandler, - VM: vm, - Sender: commonCfg.Sender, - Validators: vdrs, - Params: consensusParams.Parameters, - Consensus: consensus, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vm, + Sender: messageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: consensus, + PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } engine, err := smeng.New(engineConfig) if err != nil { @@ -1237,14 +1199,21 @@ func (m *manager) createSnowmanChain( // create bootstrap gear bootstrapCfg := smbootstrap.Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocked, - VM: vm, - Bootstrapped: bootstrapFunc, + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: beacons, + SampleK: sampleK, + StartupTracker: startupTracker, + Sender: messageSender, + BootstrapTracker: sb, + Timer: h, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + Blocked: blocked, + VM: vm, + Bootstrapped: bootstrapFunc, } - bootstrapper, err := smbootstrap.New( - context.TODO(), + var bootstrapper common.BootstrapableEngine + bootstrapper, err = smbootstrap.New( bootstrapCfg, engine.Start, ) @@ -1258,9 +1227,14 @@ func (m *manager) createSnowmanChain( // create state sync gear stateSyncCfg, err := syncer.NewConfig( - commonCfg, - m.StateSyncBeacons, snowGetHandler, + ctx, + startupTracker, + messageSender, + beacons, + sampleK, + bootstrapWeight/2+1, // must be > 50% + m.StateSyncBeacons, vm, ) if err != nil { @@ -1308,50 +1282,53 @@ func (m *manager) IsBootstrapped(id ids.ID) bool { return chain.Context().State.Get().State == snow.NormalOp } -func (m *manager) subnetsNotBootstrapped() []ids.ID { - m.subnetsLock.Lock() - defer m.subnetsLock.Unlock() - - subnetsBootstrapping := make([]ids.ID, 0, len(m.subnets)) - for subnetID, subnet := range m.subnets { - if !subnet.IsBootstrapped() { - subnetsBootstrapping = append(subnetsBootstrapping, subnetID) - } - } - return subnetsBootstrapping -} - func (m *manager) registerBootstrappedHealthChecks() error { bootstrappedCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { - subnetIDs := m.subnetsNotBootstrapped() - if len(subnetIDs) != 0 { + if subnetIDs := m.Subnets.Bootstrapping(); len(subnetIDs) != 0 { return subnetIDs, errNotBootstrapped } return []ids.ID{}, nil }) - if err := m.Health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck); err != nil { + if err := m.Health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { return fmt.Errorf("couldn't register bootstrapped readiness check: %w", err) } - if err := m.Health.RegisterHealthCheck("bootstrapped", bootstrappedCheck); err != nil { + if err := m.Health.RegisterHealthCheck("bootstrapped", bootstrappedCheck, health.ApplicationTag); err != nil { return fmt.Errorf("couldn't register bootstrapped health check: %w", err) } + + // We should only report unhealthy if the node is partially syncing the + // primary network and is a validator. + if !m.PartialSyncPrimaryNetwork { + return nil + } + + partialSyncCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { + // Note: The health check is skipped during bootstrapping to allow a + // node to sync the network even if it was previously a validator. + if !m.IsBootstrapped(constants.PlatformChainID) { + return "node is currently bootstrapping", nil + } + if _, ok := m.Validators.GetValidator(constants.PrimaryNetworkID, m.NodeID); !ok { + return "node is not a primary network validator", nil + } + + m.Log.Warn("node is a primary network validator", + zap.Error(errPartialSyncAsAValidator), + ) + return "node is a primary network validator", errPartialSyncAsAValidator + }) + + if err := m.Health.RegisterHealthCheck("validation", partialSyncCheck, health.ApplicationTag); err != nil { + return fmt.Errorf("couldn't register validation health check: %w", err) + } return nil } // Starts chain creation loop to process queued chains func (m *manager) StartChainCreator(platformParams ChainParameters) error { - // Get the Primary Network's subnet config. If it wasn't registered, then we - // throw a fatal error. - sbConfig, ok := m.SubnetConfigs[constants.PrimaryNetworkID] - if !ok { - return errNoPlatformSubnetConfig - } - - m.subnetsLock.Lock() - sb := subnets.New(m.NodeID, sbConfig) - m.subnets[platformParams.SubnetID] = sb + // Add the P-Chain to the Primary Network + sb, _ := m.Subnets.GetOrCreate(constants.PrimaryNetworkID) sb.AddChain(platformParams.ID) - m.subnetsLock.Unlock() // The P-chain is created synchronously to ensure that `VM.Initialize` has // finished before returning from this function. This is required because @@ -1360,11 +1337,14 @@ func (m *manager) StartChainCreator(platformParams ChainParameters) error { m.createChain(platformParams) m.Log.Info("starting chain creator") + m.chainCreatorExited.Add(1) go m.dispatchChainCreator() return nil } func (m *manager) dispatchChainCreator() { + defer m.chainCreatorExited.Done() + select { // This channel will be closed when Shutdown is called on the manager. case <-m.chainCreatorShutdownCh: @@ -1385,17 +1365,12 @@ func (m *manager) dispatchChainCreator() { } } -// Shutdown stops all the chains -func (m *manager) closeChainCreator() { - m.Log.Info("stopping chain creator") - m.chainsQueue.Close() - close(m.chainCreatorShutdownCh) -} - // Shutdown stops all the chains func (m *manager) Shutdown() { m.Log.Info("shutting down chain manager") - m.closeChainCreator() + m.chainsQueue.Close() + close(m.chainCreatorShutdownCh) + m.chainCreatorExited.Wait() m.ManagerConfig.Router.Shutdown(context.TODO()) } diff --git a/avalanchego/chains/registrant.go b/avalanchego/chains/registrant.go index 3a213704..cd3aa6e9 100644 --- a/avalanchego/chains/registrant.go +++ b/avalanchego/chains/registrant.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains diff --git a/avalanchego/chains/subnets.go b/avalanchego/chains/subnets.go new file mode 100644 index 00000000..fda66a7e --- /dev/null +++ b/avalanchego/chains/subnets.go @@ -0,0 +1,82 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "errors" + "sync" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/constants" +) + +var ErrNoPrimaryNetworkConfig = errors.New("no subnet config for primary network found") + +// Subnets holds the currently running subnets on this node +type Subnets struct { + nodeID ids.NodeID + configs map[ids.ID]subnets.Config + + lock sync.RWMutex + subnets map[ids.ID]subnets.Subnet +} + +// GetOrCreate returns a subnet running on this node, or creates one if it was +// not running before. Returns the subnet and if the subnet was created. +func (s *Subnets) GetOrCreate(subnetID ids.ID) (subnets.Subnet, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + if subnet, ok := s.subnets[subnetID]; ok { + return subnet, false + } + + // Default to the primary network config if a subnet config was not + // specified + config, ok := s.configs[subnetID] + if !ok { + config = s.configs[constants.PrimaryNetworkID] + } + + subnet := subnets.New(s.nodeID, config) + s.subnets[subnetID] = subnet + + return subnet, true +} + +// Bootstrapping returns the subnetIDs of any chains that are still +// bootstrapping. +func (s *Subnets) Bootstrapping() []ids.ID { + s.lock.RLock() + defer s.lock.RUnlock() + + subnetsBootstrapping := make([]ids.ID, 0, len(s.subnets)) + for subnetID, subnet := range s.subnets { + if !subnet.IsBootstrapped() { + subnetsBootstrapping = append(subnetsBootstrapping, subnetID) + } + } + + return subnetsBootstrapping +} + +// NewSubnets returns an instance of Subnets +func NewSubnets( + nodeID ids.NodeID, + configs map[ids.ID]subnets.Config, +) (*Subnets, error) { + if _, ok := configs[constants.PrimaryNetworkID]; !ok { + return nil, ErrNoPrimaryNetworkConfig + } + + s := &Subnets{ + nodeID: nodeID, + configs: configs, + subnets: make(map[ids.ID]subnets.Subnet), + } + + _, _ = s.GetOrCreate(constants.PrimaryNetworkID) + return s, nil +} diff --git a/avalanchego/chains/subnets_test.go b/avalanchego/chains/subnets_test.go new file mode 100644 index 00000000..231a8f97 --- /dev/null +++ b/avalanchego/chains/subnets_test.go @@ -0,0 +1,173 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/constants" +) + +func TestNewSubnets(t *testing.T) { + require := require.New(t) + config := map[ids.ID]subnets.Config{ + constants.PrimaryNetworkID: {}, + } + + subnets, err := NewSubnets(ids.EmptyNodeID, config) + require.NoError(err) + + subnet, ok := subnets.GetOrCreate(constants.PrimaryNetworkID) + require.False(ok) + require.Equal(config[constants.PrimaryNetworkID], subnet.Config()) +} + +func TestNewSubnetsNoPrimaryNetworkConfig(t *testing.T) { + require := require.New(t) + config := map[ids.ID]subnets.Config{} + + _, err := NewSubnets(ids.EmptyNodeID, config) + require.ErrorIs(err, ErrNoPrimaryNetworkConfig) +} + +func TestSubnetsGetOrCreate(t *testing.T) { + testSubnetID := ids.GenerateTestID() + + type args struct { + subnetID ids.ID + want bool + } + + tests := []struct { + name string + args []args + }{ + { + name: "adding duplicate subnet is a noop", + args: []args{ + { + subnetID: testSubnetID, + want: true, + }, + { + subnetID: testSubnetID, + }, + }, + }, + { + name: "adding unique subnets succeeds", + args: []args{ + { + subnetID: ids.GenerateTestID(), + want: true, + }, + { + subnetID: ids.GenerateTestID(), + want: true, + }, + { + subnetID: ids.GenerateTestID(), + want: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + config := map[ids.ID]subnets.Config{ + constants.PrimaryNetworkID: {}, + } + subnets, err := NewSubnets(ids.EmptyNodeID, config) + require.NoError(err) + + for _, arg := range tt.args { + _, got := subnets.GetOrCreate(arg.subnetID) + require.Equal(arg.want, got) + } + }) + } +} + +func TestSubnetConfigs(t *testing.T) { + testSubnetID := ids.GenerateTestID() + + tests := []struct { + name string + config map[ids.ID]subnets.Config + subnetID ids.ID + want subnets.Config + }{ + { + name: "default to primary network config", + config: map[ids.ID]subnets.Config{ + constants.PrimaryNetworkID: {}, + }, + subnetID: testSubnetID, + want: subnets.Config{}, + }, + { + name: "use subnet config", + config: map[ids.ID]subnets.Config{ + constants.PrimaryNetworkID: {}, + testSubnetID: { + GossipConfig: subnets.GossipConfig{ + AcceptedFrontierValidatorSize: 123456789, + }, + }, + }, + subnetID: testSubnetID, + want: subnets.Config{ + GossipConfig: subnets.GossipConfig{ + AcceptedFrontierValidatorSize: 123456789, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + subnets, err := NewSubnets(ids.EmptyNodeID, tt.config) + require.NoError(err) + + subnet, ok := subnets.GetOrCreate(tt.subnetID) + require.True(ok) + + require.Equal(tt.want, subnet.Config()) + }) + } +} + +func TestSubnetsBootstrapping(t *testing.T) { + require := require.New(t) + + config := map[ids.ID]subnets.Config{ + constants.PrimaryNetworkID: {}, + } + + subnets, err := NewSubnets(ids.EmptyNodeID, config) + require.NoError(err) + + subnetID := ids.GenerateTestID() + chainID := ids.GenerateTestID() + + subnet, ok := subnets.GetOrCreate(subnetID) + require.True(ok) + + // Start bootstrapping + subnet.AddChain(chainID) + bootstrapping := subnets.Bootstrapping() + require.Contains(bootstrapping, subnetID) + + // Finish bootstrapping + subnet.Bootstrapped(chainID) + require.Empty(subnets.Bootstrapping()) +} diff --git a/avalanchego/chains/test_manager.go b/avalanchego/chains/test_manager.go index e4dabea4..f7b98b29 100644 --- a/avalanchego/chains/test_manager.go +++ b/avalanchego/chains/test_manager.go @@ -1,12 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/networking/router" -) +import "github.com/ava-labs/avalanchego/ids" // TestManager implements Manager but does nothing. Always returns nil error. // To be used only in tests @@ -14,10 +11,6 @@ var TestManager Manager = testManager{} type testManager struct{} -func (testManager) Router() router.Router { - return nil -} - func (testManager) QueueChainCreation(ChainParameters) {} func (testManager) ForceCreateChain(ChainParameters) {} diff --git a/avalanchego/codec/codec.go b/avalanchego/codec/codec.go index 004b2553..7aacb908 100644 --- a/avalanchego/codec/codec.go +++ b/avalanchego/codec/codec.go @@ -1,9 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec -import "github.com/ava-labs/avalanchego/utils/wrappers" +import ( + "errors" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + ErrUnsupportedType = errors.New("unsupported type") + ErrMaxSliceLenExceeded = errors.New("max slice length exceeded") + ErrDoesNotImplementInterface = errors.New("does not implement interface") + ErrUnexportedField = errors.New("unexported field") + ErrExtraSpace = errors.New("trailing buffer space") +) // Codec marshals and unmarshals type Codec interface { diff --git a/avalanchego/codec/general_codec.go b/avalanchego/codec/general_codec.go index ac32b84e..3688065a 100644 --- a/avalanchego/codec/general_codec.go +++ b/avalanchego/codec/general_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/avalanchego/codec/hierarchycodec/codec.go b/avalanchego/codec/hierarchycodec/codec.go index af66bf24..db2ffed0 100644 --- a/avalanchego/codec/hierarchycodec/codec.go +++ b/avalanchego/codec/hierarchycodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec @@ -7,9 +7,11 @@ import ( "fmt" "reflect" "sync" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -42,28 +44,26 @@ type typeID struct { type hierarchyCodec struct { codec.Codec - lock sync.RWMutex - currentGroupID uint16 - nextTypeID uint16 - typeIDToType map[typeID]reflect.Type - typeToTypeID map[reflect.Type]typeID + lock sync.RWMutex + currentGroupID uint16 + nextTypeID uint16 + registeredTypes *bimap.BiMap[typeID, reflect.Type] } // New returns a new, concurrency-safe codec -func New(tagNames []string, maxSliceLen uint32) Codec { +func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { hCodec := &hierarchyCodec{ - currentGroupID: 0, - nextTypeID: 0, - typeIDToType: map[typeID]reflect.Type{}, - typeToTypeID: map[reflect.Type]typeID{}, + currentGroupID: 0, + nextTypeID: 0, + registeredTypes: bimap.New[typeID, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) return hCodec } // NewDefault returns a new codec with reasonable default values -func NewDefault() Codec { - return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +func NewDefault(durangoTime time.Time) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) } // SkipRegistrations some number of type IDs @@ -88,8 +88,8 @@ func (c *hierarchyCodec) RegisterType(val interface{}) error { defer c.lock.Unlock() valType := reflect.TypeOf(val) - if _, exists := c.typeToTypeID[valType]; exists { - return fmt.Errorf("type %v has already been registered", valType) + if c.registeredTypes.HasValue(valType) { + return fmt.Errorf("%w: %v", codec.ErrDuplicateType, valType) } valTypeID := typeID{ @@ -98,8 +98,7 @@ func (c *hierarchyCodec) RegisterType(val interface{}) error { } c.nextTypeID++ - c.typeIDToType[valTypeID] = valType - c.typeToTypeID[valType] = valTypeID + c.registeredTypes.Put(valTypeID, valType) return nil } @@ -112,7 +111,7 @@ func (c *hierarchyCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) c.lock.RLock() defer c.lock.RUnlock() - typeID, ok := c.typeToTypeID[valueType] // Get the type ID of the value being marshaled + typeID, ok := c.registeredTypes.GetKey(valueType) // Get the type ID of the value being marshaled if !ok { return fmt.Errorf("can't marshal unregistered type %q", valueType) } @@ -136,13 +135,17 @@ func (c *hierarchyCodec) UnpackPrefix(p *wrappers.Packer, valueType reflect.Type typeID: typeIDShort, } // Get a type that implements the interface - implementingType, ok := c.typeIDToType[t] + implementingType, ok := c.registeredTypes.GetValue(t) if !ok { return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: unknown type ID %+v", t) } // Ensure type actually does implement the interface if !implementingType.Implements(valueType) { - return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %s does not implement interface %s", implementingType, valueType) + return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %s %w %s", + implementingType, + codec.ErrDoesNotImplementInterface, + valueType, + ) } return reflect.New(implementingType).Elem(), nil // instance of the proper type } diff --git a/avalanchego/codec/hierarchycodec/codec_test.go b/avalanchego/codec/hierarchycodec/codec_test.go index 64a3cf8b..8149cdcc 100644 --- a/avalanchego/codec/hierarchycodec/codec_test.go +++ b/avalanchego/codec/hierarchycodec/codec_test.go @@ -1,24 +1,45 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec import ( "testing" + "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault() + c := NewDefault(mockable.MaxTime) test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New([]string{"tag1", "tag2"}, defaultMaxSliceLength) + c := New(mockable.MaxTime, []string{"tag1", "tag2"}, defaultMaxSliceLength) test(c, t) } } + +func TestEnforceSliceLen(t *testing.T) { + for _, test := range codec.EnforceSliceLenTests { + c := NewDefault(mockable.MaxTime) + test(c, t) + } +} + +func TestIgnoreSliceLen(t *testing.T) { + for _, test := range codec.IgnoreSliceLenTests { + c := NewDefault(time.Time{}) + test(c, t) + } +} + +func FuzzStructUnmarshalHierarchyCodec(f *testing.F) { + c := NewDefault(mockable.MaxTime) + codec.FuzzStructUnmarshal(c, f) +} diff --git a/avalanchego/codec/linearcodec/codec.go b/avalanchego/codec/linearcodec/codec.go index 30b317c8..6ad36b8a 100644 --- a/avalanchego/codec/linearcodec/codec.go +++ b/avalanchego/codec/linearcodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec @@ -7,15 +7,17 @@ import ( "fmt" "reflect" "sync" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( // default max length of a slice being marshalled by Marshal(). Should be <= math.MaxUint32. - defaultMaxSliceLength = 256 * 1024 + DefaultMaxSliceLength = 256 * 1024 ) var ( @@ -36,32 +38,30 @@ type Codec interface { type linearCodec struct { codec.Codec - lock sync.RWMutex - nextTypeID uint32 - typeIDToType map[uint32]reflect.Type - typeToTypeID map[reflect.Type]uint32 + lock sync.RWMutex + nextTypeID uint32 + registeredTypes *bimap.BiMap[uint32, reflect.Type] } // New returns a new, concurrency-safe codec; it allow to specify // both tagNames and maxSlicelenght -func New(tagNames []string, maxSliceLen uint32) Codec { +func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { hCodec := &linearCodec{ - nextTypeID: 0, - typeIDToType: map[uint32]reflect.Type{}, - typeToTypeID: map[reflect.Type]uint32{}, + nextTypeID: 0, + registeredTypes: bimap.New[uint32, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) return hCodec } // NewDefault is a convenience constructor; it returns a new codec with reasonable default values -func NewDefault() Codec { - return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +func NewDefault(durangoTime time.Time) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) } // NewCustomMaxLength is a convenience constructor; it returns a new codec with custom max length and default tags -func NewCustomMaxLength(maxSliceLen uint32) Codec { - return New([]string{reflectcodec.DefaultTagName}, maxSliceLen) +func NewCustomMaxLength(durangoTime time.Time, maxSliceLen uint32) Codec { + return New(durangoTime, []string{reflectcodec.DefaultTagName}, maxSliceLen) } // Skip some number of type IDs @@ -78,12 +78,11 @@ func (c *linearCodec) RegisterType(val interface{}) error { defer c.lock.Unlock() valType := reflect.TypeOf(val) - if _, exists := c.typeToTypeID[valType]; exists { - return fmt.Errorf("type %v has already been registered", valType) + if c.registeredTypes.HasValue(valType) { + return fmt.Errorf("%w: %v", codec.ErrDuplicateType, valType) } - c.typeIDToType[c.nextTypeID] = valType - c.typeToTypeID[valType] = c.nextTypeID + c.registeredTypes.Put(c.nextTypeID, valType) c.nextTypeID++ return nil } @@ -97,7 +96,7 @@ func (c *linearCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) err c.lock.RLock() defer c.lock.RUnlock() - typeID, ok := c.typeToTypeID[valueType] // Get the type ID of the value being marshaled + typeID, ok := c.registeredTypes.GetKey(valueType) // Get the type ID of the value being marshaled if !ok { return fmt.Errorf("can't marshal unregistered type %q", valueType) } @@ -114,13 +113,17 @@ func (c *linearCodec) UnpackPrefix(p *wrappers.Packer, valueType reflect.Type) ( return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %w", p.Err) } // Get a type that implements the interface - implementingType, ok := c.typeIDToType[typeID] + implementingType, ok := c.registeredTypes.GetValue(typeID) if !ok { return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: unknown type ID %d", typeID) } // Ensure type actually does implement the interface if !implementingType.Implements(valueType) { - return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %s does not implement interface %s", implementingType, valueType) + return reflect.Value{}, fmt.Errorf("couldn't unmarshal interface: %s %w %s", + implementingType, + codec.ErrDoesNotImplementInterface, + valueType, + ) } return reflect.New(implementingType).Elem(), nil // instance of the proper type } diff --git a/avalanchego/codec/linearcodec/codec_test.go b/avalanchego/codec/linearcodec/codec_test.go index 1e6b836a..3d2f3eff 100644 --- a/avalanchego/codec/linearcodec/codec_test.go +++ b/avalanchego/codec/linearcodec/codec_test.go @@ -1,24 +1,45 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec import ( "testing" + "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault() + c := NewDefault(mockable.MaxTime) test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New([]string{"tag1", "tag2"}, defaultMaxSliceLength) + c := New(mockable.MaxTime, []string{"tag1", "tag2"}, DefaultMaxSliceLength) test(c, t) } } + +func TestEnforceSliceLen(t *testing.T) { + for _, test := range codec.EnforceSliceLenTests { + c := NewDefault(mockable.MaxTime) + test(c, t) + } +} + +func TestIgnoreSliceLen(t *testing.T) { + for _, test := range codec.IgnoreSliceLenTests { + c := NewDefault(time.Time{}) + test(c, t) + } +} + +func FuzzStructUnmarshalLinearCodec(f *testing.F) { + c := NewDefault(mockable.MaxTime) + codec.FuzzStructUnmarshal(c, f) +} diff --git a/avalanchego/codec/manager.go b/avalanchego/codec/manager.go index 8cfefc6b..6fb48aaa 100644 --- a/avalanchego/codec/manager.go +++ b/avalanchego/codec/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec @@ -23,12 +23,13 @@ const ( ) var ( - errMarshalNil = errors.New("can't marshal nil pointer or interface") - errUnmarshalNil = errors.New("can't unmarshal nil") - errCantPackVersion = errors.New("couldn't pack codec version") - errCantUnpackVersion = errors.New("couldn't unpack codec version") - errUnknownVersion = errors.New("unknown codec version") - errDuplicatedVersion = errors.New("duplicated codec version") + ErrUnknownVersion = errors.New("unknown codec version") + ErrMarshalNil = errors.New("can't marshal nil pointer or interface") + ErrUnmarshalNil = errors.New("can't unmarshal nil") + ErrUnmarshalTooBig = errors.New("byte array exceeds maximum length") + ErrCantPackVersion = errors.New("couldn't pack codec version") + ErrCantUnpackVersion = errors.New("couldn't unpack codec version") + ErrDuplicatedVersion = errors.New("duplicated codec version") ) var _ Manager = (*manager)(nil) @@ -38,14 +39,10 @@ type Manager interface { // Associate the given codec with the given version ID RegisterCodec(version uint16, codec Codec) error - // Define the maximum size, in bytes, of something serialized/deserialized - // by this codec manager - SetMaxSize(int) - // Size returns the size, in bytes, of [value] when it's marshaled // using the codec with the given version. // RegisterCodec must have been called with that version. - // If [value] is nil, returns [errMarshalNil] + // If [value] is nil, returns [ErrMarshalNil] Size(version uint16, value interface{}) (int, error) // Marshal the given value using the codec with the given version. @@ -84,22 +81,15 @@ func (m *manager) RegisterCodec(version uint16, codec Codec) error { defer m.lock.Unlock() if _, exists := m.codecs[version]; exists { - return errDuplicatedVersion + return ErrDuplicatedVersion } m.codecs[version] = codec return nil } -// SetMaxSize of bytes allowed -func (m *manager) SetMaxSize(size int) { - m.lock.Lock() - m.maxSize = size - m.lock.Unlock() -} - func (m *manager) Size(version uint16, value interface{}) (int, error) { if value == nil { - return 0, errMarshalNil // can't marshal nil + return 0, ErrMarshalNil // can't marshal nil } m.lock.RLock() @@ -107,7 +97,7 @@ func (m *manager) Size(version uint16, value interface{}) (int, error) { m.lock.RUnlock() if !exists { - return 0, errUnknownVersion + return 0, ErrUnknownVersion } res, err := c.Size(value) @@ -119,15 +109,14 @@ func (m *manager) Size(version uint16, value interface{}) (int, error) { // To marshal an interface, [value] must be a pointer to the interface. func (m *manager) Marshal(version uint16, value interface{}) ([]byte, error) { if value == nil { - return nil, errMarshalNil // can't marshal nil + return nil, ErrMarshalNil // can't marshal nil } m.lock.RLock() c, exists := m.codecs[version] m.lock.RUnlock() - if !exists { - return nil, errUnknownVersion + return nil, ErrUnknownVersion } p := wrappers.Packer{ @@ -136,7 +125,7 @@ func (m *manager) Marshal(version uint16, value interface{}) ([]byte, error) { } p.PackShort(version) if p.Errored() { - return nil, errCantPackVersion // Should never happen + return nil, ErrCantPackVersion // Should never happen } return p.Bytes, c.MarshalInto(value, &p) } @@ -145,29 +134,26 @@ func (m *manager) Marshal(version uint16, value interface{}) ([]byte, error) { // interface. func (m *manager) Unmarshal(bytes []byte, dest interface{}) (uint16, error) { if dest == nil { - return 0, errUnmarshalNil + return 0, ErrUnmarshalNil } - m.lock.RLock() - if len(bytes) > m.maxSize { - m.lock.RUnlock() - return 0, fmt.Errorf("byte array exceeds maximum length, %d", m.maxSize) + if byteLen := len(bytes); byteLen > m.maxSize { + return 0, fmt.Errorf("%w: %d > %d", ErrUnmarshalTooBig, byteLen, m.maxSize) } p := wrappers.Packer{ Bytes: bytes, } - version := p.UnpackShort() if p.Errored() { // Make sure the codec version is correct - m.lock.RUnlock() - return 0, errCantUnpackVersion + return 0, ErrCantUnpackVersion } + m.lock.RLock() c, exists := m.codecs[version] m.lock.RUnlock() if !exists { - return version, errUnknownVersion + return version, ErrUnknownVersion } return version, c.Unmarshal(p.Bytes[p.Offset:], dest) } diff --git a/avalanchego/codec/mock_manager.go b/avalanchego/codec/mock_manager.go index 0dc2c439..36bbae57 100644 --- a/avalanchego/codec/mock_manager.go +++ b/avalanchego/codec/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/codec (interfaces: Manager) +// +// Generated by this command: +// +// mockgen -package=codec -destination=codec/mock_manager.go github.com/ava-labs/avalanchego/codec Manager +// // Package codec is a generated GoMock package. package codec @@ -10,7 +12,7 @@ package codec import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockManager is a mock of Manager interface. @@ -37,7 +39,7 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // Marshal mocks base method. -func (m *MockManager) Marshal(arg0 uint16, arg1 interface{}) ([]byte, error) { +func (m *MockManager) Marshal(arg0 uint16, arg1 any) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Marshal", arg0, arg1) ret0, _ := ret[0].([]byte) @@ -46,7 +48,7 @@ func (m *MockManager) Marshal(arg0 uint16, arg1 interface{}) ([]byte, error) { } // Marshal indicates an expected call of Marshal. -func (mr *MockManagerMockRecorder) Marshal(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Marshal(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockManager)(nil).Marshal), arg0, arg1) } @@ -60,25 +62,13 @@ func (m *MockManager) RegisterCodec(arg0 uint16, arg1 Codec) error { } // RegisterCodec indicates an expected call of RegisterCodec. -func (mr *MockManagerMockRecorder) RegisterCodec(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterCodec(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCodec", reflect.TypeOf((*MockManager)(nil).RegisterCodec), arg0, arg1) } -// SetMaxSize mocks base method. -func (m *MockManager) SetMaxSize(arg0 int) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetMaxSize", arg0) -} - -// SetMaxSize indicates an expected call of SetMaxSize. -func (mr *MockManagerMockRecorder) SetMaxSize(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMaxSize", reflect.TypeOf((*MockManager)(nil).SetMaxSize), arg0) -} - // Size mocks base method. -func (m *MockManager) Size(arg0 uint16, arg1 interface{}) (int, error) { +func (m *MockManager) Size(arg0 uint16, arg1 any) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Size", arg0, arg1) ret0, _ := ret[0].(int) @@ -87,13 +77,13 @@ func (m *MockManager) Size(arg0 uint16, arg1 interface{}) (int, error) { } // Size indicates an expected call of Size. -func (mr *MockManagerMockRecorder) Size(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Size(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockManager)(nil).Size), arg0, arg1) } // Unmarshal mocks base method. -func (m *MockManager) Unmarshal(arg0 []byte, arg1 interface{}) (uint16, error) { +func (m *MockManager) Unmarshal(arg0 []byte, arg1 any) (uint16, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Unmarshal", arg0, arg1) ret0, _ := ret[0].(uint16) @@ -102,7 +92,7 @@ func (m *MockManager) Unmarshal(arg0 []byte, arg1 interface{}) (uint16, error) { } // Unmarshal indicates an expected call of Unmarshal. -func (mr *MockManagerMockRecorder) Unmarshal(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Unmarshal(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmarshal", reflect.TypeOf((*MockManager)(nil).Unmarshal), arg0, arg1) } diff --git a/avalanchego/codec/reflectcodec/struct_fielder.go b/avalanchego/codec/reflectcodec/struct_fielder.go index 7a54182c..efac391e 100644 --- a/avalanchego/codec/reflectcodec/struct_fielder.go +++ b/avalanchego/codec/reflectcodec/struct_fielder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec @@ -6,81 +6,66 @@ package reflectcodec import ( "fmt" "reflect" - "strconv" "sync" -) - -const ( - // SliceLenTagName that specifies the length of a slice. - SliceLenTagName = "len" - // TagValue is the value the tag must have to be serialized. - TagValue = "true" + "github.com/ava-labs/avalanchego/codec" ) -var _ StructFielder = (*structFielder)(nil) +// TagValue is the value the tag must have to be serialized. +const TagValue = "true" -type FieldDesc struct { - Index int - MaxSliceLen uint32 -} +var _ StructFielder = (*structFielder)(nil) // StructFielder handles discovery of serializable fields in a struct. type StructFielder interface { // Returns the fields that have been marked as serializable in [t], which is - // a struct type. Additionally, returns the custom maximum length slice that - // may be serialized into the field, if any. + // a struct type. // Returns an error if a field has tag "[tagName]: [TagValue]" but the field // is un-exported. // GetSerializedField(Foo) --> [1,5,8] means Foo.Field(1), Foo.Field(5), // Foo.Field(8) are to be serialized/deserialized. - GetSerializedFields(t reflect.Type) ([]FieldDesc, error) + GetSerializedFields(t reflect.Type) ([]int, error) } -func NewStructFielder(tagNames []string, maxSliceLen uint32) StructFielder { +func NewStructFielder(tagNames []string) StructFielder { return &structFielder{ tags: tagNames, - maxSliceLen: maxSliceLen, - serializedFieldIndices: make(map[reflect.Type][]FieldDesc), + serializedFieldIndices: make(map[reflect.Type][]int), } } type structFielder struct { - lock sync.Mutex + lock sync.RWMutex // multiple tags per field can be specified. A field is serialized/deserialized // if it has at least one of the specified tags. tags []string - maxSliceLen uint32 - // Key: a struct type // Value: Slice where each element is index in the struct type of a field // that is serialized/deserialized e.g. Foo --> [1,5,8] means Foo.Field(1), // etc. are to be serialized/deserialized. We assume this cache is pretty // small (a few hundred keys at most) and doesn't take up much memory. - serializedFieldIndices map[reflect.Type][]FieldDesc + serializedFieldIndices map[reflect.Type][]int } -func (s *structFielder) GetSerializedFields(t reflect.Type) ([]FieldDesc, error) { +func (s *structFielder) GetSerializedFields(t reflect.Type) ([]int, error) { + if serializedFields, ok := s.getCachedSerializedFields(t); ok { // use pre-computed result + return serializedFields, nil + } + s.lock.Lock() defer s.lock.Unlock() - if s.serializedFieldIndices == nil { - s.serializedFieldIndices = make(map[reflect.Type][]FieldDesc) - } - if serializedFields, ok := s.serializedFieldIndices[t]; ok { // use pre-computed result - return serializedFields, nil - } numFields := t.NumField() - serializedFields := make([]FieldDesc, 0, numFields) + serializedFields := make([]int, 0, numFields) for i := 0; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) // Multiple tags per fields can be specified. // Serialize/Deserialize field if it has // any tag with the right value - captureField := false + var captureField bool for _, tag := range s.tags { if field.Tag.Get(tag) == TagValue { captureField = true @@ -91,19 +76,21 @@ func (s *structFielder) GetSerializedFields(t reflect.Type) ([]FieldDesc, error) continue } if !field.IsExported() { // Can only marshal exported fields - return nil, fmt.Errorf("can't marshal un-exported field %s", field.Name) - } - sliceLenField := field.Tag.Get(SliceLenTagName) - maxSliceLen := s.maxSliceLen - - if newLen, err := strconv.ParseUint(sliceLenField, 10, 31); err == nil { - maxSliceLen = uint32(newLen) + return nil, fmt.Errorf("can not marshal %w: %s", + codec.ErrUnexportedField, + field.Name, + ) } - serializedFields = append(serializedFields, FieldDesc{ - Index: i, - MaxSliceLen: maxSliceLen, - }) + serializedFields = append(serializedFields, i) } s.serializedFieldIndices[t] = serializedFields // cache result return serializedFields, nil } + +func (s *structFielder) getCachedSerializedFields(t reflect.Type) ([]int, bool) { + s.lock.RLock() + defer s.lock.RUnlock() + + cachedFields, ok := s.serializedFieldIndices[t] + return cachedFields, ok +} diff --git a/avalanchego/codec/reflectcodec/type_codec.go b/avalanchego/codec/reflectcodec/type_codec.go index a92ade18..7f567d0b 100644 --- a/avalanchego/codec/reflectcodec/type_codec.go +++ b/avalanchego/codec/reflectcodec/type_codec.go @@ -1,34 +1,37 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec import ( + "bytes" "errors" "fmt" "math" "reflect" + "slices" + "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( // DefaultTagName that enables serialization. - DefaultTagName = "serialize" + DefaultTagName = "serialize" + initialSliceLen = 16 ) var ( - ErrMaxMarshalSliceLimitExceeded = errors.New("maximum marshal slice limit exceeded") + _ codec.Codec = (*genericCodec)(nil) - errMarshalNil = errors.New("can't marshal nil pointer or interface") - errUnmarshalNil = errors.New("can't unmarshal nil") - errNeedPointer = errors.New("argument to unmarshal must be a pointer") - errExtraSpace = errors.New("trailing buffer space") + errMarshalNil = errors.New("can't marshal nil pointer or interface") + errUnmarshalNil = errors.New("can't unmarshal nil") + errNeedPointer = errors.New("argument to unmarshal must be a pointer") + errRecursiveInterfaceTypes = errors.New("recursive interface types") ) -var _ codec.Codec = (*genericCodec)(nil) - type TypeCodec interface { // UnpackPrefix unpacks the prefix of an interface from the given packer. // The prefix specifies the concrete type that the interface should be @@ -59,8 +62,8 @@ type TypeCodec interface { // `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. // 3. These typed members of a struct may be serialized: // bool, string, uint[8,16,32,64], int[8,16,32,64], -// structs, slices, arrays, interface. -// structs, slices and arrays can only be serialized if their constituent +// structs, slices, arrays, maps, interface. +// structs, slices, maps and arrays can only be serialized if their constituent // values can be. // 4. To marshal an interface, you must pass a pointer to the value // 5. To unmarshal an interface, you must call @@ -69,16 +72,18 @@ type TypeCodec interface { // 7. nil slices are marshaled as empty slices type genericCodec struct { typer TypeCodec + durangoTime time.Time // Time after which [maxSliceLen] will be ignored maxSliceLen uint32 fielder StructFielder } // New returns a new, concurrency-safe codec -func New(typer TypeCodec, tagNames []string, maxSliceLen uint32) codec.Codec { +func New(typer TypeCodec, tagNames []string, durangoTime time.Time, maxSliceLen uint32) codec.Codec { return &genericCodec{ typer: typer, + durangoTime: durangoTime, maxSliceLen: maxSliceLen, - fielder: NewStructFielder(tagNames, maxSliceLen), + fielder: NewStructFielder(tagNames), } } @@ -87,12 +92,16 @@ func (c *genericCodec) Size(value interface{}) (int, error) { return 0, errMarshalNil // can't marshal nil } - size, _, err := c.size(reflect.ValueOf(value)) + size, _, err := c.size(reflect.ValueOf(value), nil /*=typeStack*/) return size, err } -// size returns the size of the value along with whether the value is constant sized. -func (c *genericCodec) size(value reflect.Value) (int, bool, error) { +// size returns the size of the value along with whether the value is constant +// sized. +func (c *genericCodec) size( + value reflect.Value, + typeStack set.Set[reflect.Type], +) (int, bool, error) { switch valueKind := value.Kind(); valueKind { case reflect.Uint8: return wrappers.ByteLen, true, nil @@ -116,24 +125,28 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return wrappers.StringLen(value.String()), false, nil case reflect.Ptr: if value.IsNil() { - // Can't marshal nil pointers (but nil slices are fine) return 0, false, errMarshalNil } - return c.size(value.Elem()) + + return c.size(value.Elem(), typeStack) case reflect.Interface: if value.IsNil() { - // Can't marshal nil interfaces (but nil slices are fine) return 0, false, errMarshalNil } + underlyingValue := value.Interface() underlyingType := reflect.TypeOf(underlyingValue) - prefixSize := c.typer.PrefixSize(underlyingType) - valueSize, _, err := c.size(value.Elem()) - if err != nil { - return 0, false, err + if typeStack.Contains(underlyingType) { + return 0, false, fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) } - return prefixSize + valueSize, false, nil + typeStack.Add(underlyingType) + + prefixSize := c.typer.PrefixSize(underlyingType) + valueSize, _, err := c.size(value.Elem(), typeStack) + + typeStack.Remove(underlyingType) + return prefixSize + valueSize, false, err case reflect.Slice: numElts := value.Len() @@ -141,7 +154,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return wrappers.IntLen, false, nil } - size, constSize, err := c.size(value.Index(0)) + size, constSize, err := c.size(value.Index(0), typeStack) if err != nil { return 0, false, err } @@ -153,7 +166,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i)) + innerSize, _, err := c.size(value.Index(i), typeStack) if err != nil { return 0, false, err } @@ -167,7 +180,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { return 0, true, nil } - size, constSize, err := c.size(value.Index(0)) + size, constSize, err := c.size(value.Index(0), typeStack) if err != nil { return 0, false, err } @@ -179,7 +192,7 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { } for i := 1; i < numElts; i++ { - innerSize, _, err := c.size(value.Index(i)) + innerSize, _, err := c.size(value.Index(i), typeStack) if err != nil { return 0, false, err } @@ -197,8 +210,8 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { size int constSize = true ) - for _, fieldDesc := range serializedFields { - innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index)) + for _, fieldIndex := range serializedFields { + innerSize, innerConstSize, err := c.size(value.Field(fieldIndex), typeStack) if err != nil { return 0, false, err } @@ -207,6 +220,69 @@ func (c *genericCodec) size(value reflect.Value) (int, bool, error) { } return size, constSize, nil + case reflect.Map: + iter := value.MapRange() + if !iter.Next() { + return wrappers.IntLen, false, nil + } + + keySize, keyConstSize, err := c.size(iter.Key(), typeStack) + if err != nil { + return 0, false, err + } + valueSize, valueConstSize, err := c.size(iter.Value(), typeStack) + if err != nil { + return 0, false, err + } + + switch { + case keyConstSize && valueConstSize: + numElts := value.Len() + return wrappers.IntLen + numElts*(keySize+valueSize), false, nil + case keyConstSize: + var ( + numElts = 1 + totalValueSize = valueSize + ) + for iter.Next() { + valueSize, _, err := c.size(iter.Value(), typeStack) + if err != nil { + return 0, false, err + } + totalValueSize += valueSize + numElts++ + } + return wrappers.IntLen + numElts*keySize + totalValueSize, false, nil + case valueConstSize: + var ( + numElts = 1 + totalKeySize = keySize + ) + for iter.Next() { + keySize, _, err := c.size(iter.Key(), typeStack) + if err != nil { + return 0, false, err + } + totalKeySize += keySize + numElts++ + } + return wrappers.IntLen + totalKeySize + numElts*valueSize, false, nil + default: + totalSize := wrappers.IntLen + keySize + valueSize + for iter.Next() { + keySize, _, err := c.size(iter.Key(), typeStack) + if err != nil { + return 0, false, err + } + valueSize, _, err := c.size(iter.Value(), typeStack) + if err != nil { + return 0, false, err + } + totalSize += keySize + valueSize + } + return totalSize, false, nil + } + default: return 0, false, fmt.Errorf("can't evaluate marshal length of unknown kind %s", valueKind) } @@ -218,13 +294,17 @@ func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error return errMarshalNil // can't marshal nil } - return c.marshal(reflect.ValueOf(value), p, c.maxSliceLen) + return c.marshal(reflect.ValueOf(value), p, nil /*=typeStack*/) } // marshal writes the byte representation of [value] to [p] -// [value]'s underlying value must not be a nil pointer or interface +// // c.lock should be held for the duration of this function -func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSliceLen uint32) error { +func (c *genericCodec) marshal( + value reflect.Value, + p *wrappers.Packer, + typeStack set.Set[reflect.Type], +) error { switch valueKind := value.Kind(); valueKind { case reflect.Uint8: p.PackByte(uint8(value.Uint())) @@ -257,30 +337,45 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice p.PackBool(value.Bool()) return p.Err case reflect.Ptr: - if value.IsNil() { // Can't marshal nil (except nil slices) + if value.IsNil() { return errMarshalNil } - return c.marshal(value.Elem(), p, c.maxSliceLen) + + return c.marshal(value.Elem(), p, typeStack) case reflect.Interface: - if value.IsNil() { // Can't marshal nil (except nil slices) + if value.IsNil() { return errMarshalNil } + underlyingValue := value.Interface() underlyingType := reflect.TypeOf(underlyingValue) + if typeStack.Contains(underlyingType) { + return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, underlyingType) + } + typeStack.Add(underlyingType) if err := c.typer.PackPrefix(p, underlyingType); err != nil { return err } - if err := c.marshal(value.Elem(), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Elem(), p, typeStack); err != nil { return err } + typeStack.Remove(underlyingType) return p.Err case reflect.Slice: numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. - if uint32(numElts) > maxSliceLen { + if numElts > math.MaxInt32 { + return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", - ErrMaxMarshalSliceLimitExceeded, + codec.ErrMaxSliceLenExceeded, numElts, - maxSliceLen) + c.maxSliceLen, + ) } p.PackInt(uint32(numElts)) // pack # elements if p.Err != nil { @@ -299,23 +394,20 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice return p.Err } for i := 0; i < numElts; i++ { // Process each element in the slice - if err := c.marshal(value.Index(i), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Index(i), p, typeStack); err != nil { return err } } return nil case reflect.Array: - numElts := value.Len() if elemKind := value.Type().Kind(); elemKind == reflect.Uint8 { sliceVal := value.Convert(reflect.TypeOf([]byte{})) p.PackFixedBytes(sliceVal.Bytes()) return p.Err } - if uint32(numElts) > c.maxSliceLen { - return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", ErrMaxMarshalSliceLimitExceeded, numElts, c.maxSliceLen) - } + numElts := value.Len() for i := 0; i < numElts; i++ { // Process each element in the array - if err := c.marshal(value.Index(i), p, c.maxSliceLen); err != nil { + if err := c.marshal(value.Index(i), p, typeStack); err != nil { return err } } @@ -325,19 +417,91 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice if err != nil { return err } - for _, fieldDesc := range serializedFields { // Go through all fields of this struct that are serialized - if err := c.marshal(value.Field(fieldDesc.Index), p, fieldDesc.MaxSliceLen); err != nil { // Serialize the field and write to byte array + for _, fieldIndex := range serializedFields { // Go through all fields of this struct that are serialized + if err := c.marshal(value.Field(fieldIndex), p, typeStack); err != nil { // Serialize the field and write to byte array return err } } + return nil + case reflect.Map: + keys := value.MapKeys() + numElts := len(keys) + if numElts > math.MaxInt32 { + return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { + return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts, + c.maxSliceLen, + ) + } + p.PackInt(uint32(numElts)) // pack # elements + if p.Err != nil { + return p.Err + } + + // pack key-value pairs sorted by increasing key + type keyTuple struct { + key reflect.Value + startIndex int + endIndex int + } + + sortedKeys := make([]keyTuple, len(keys)) + startOffset := p.Offset + endOffset := p.Offset + for i, key := range keys { + if err := c.marshal(key, p, typeStack); err != nil { + return err + } + if p.Err != nil { + return fmt.Errorf("couldn't marshal map key %+v: %w ", key, p.Err) + } + sortedKeys[i] = keyTuple{ + key: key, + startIndex: endOffset, + endIndex: p.Offset, + } + endOffset = p.Offset + } + + slices.SortFunc(sortedKeys, func(a, b keyTuple) int { + aBytes := p.Bytes[a.startIndex:a.endIndex] + bBytes := p.Bytes[b.startIndex:b.endIndex] + return bytes.Compare(aBytes, bBytes) + }) + + allKeyBytes := slices.Clone(p.Bytes[startOffset:p.Offset]) + p.Offset = startOffset + for _, key := range sortedKeys { + // pack key + startIndex := key.startIndex - startOffset + endIndex := key.endIndex - startOffset + keyBytes := allKeyBytes[startIndex:endIndex] + p.PackFixedBytes(keyBytes) + if p.Err != nil { + return p.Err + } + + // serialize and pack value + if err := c.marshal(value.MapIndex(key.key), p, typeStack); err != nil { + return err + } + } + return nil default: - return fmt.Errorf("can't marshal unknown kind %s", valueKind) + return fmt.Errorf("%w: %s", codec.ErrUnsupportedType, valueKind) } } -// Unmarshal unmarshals [bytes] into [dest], where -// [dest] must be a pointer or interface +// Unmarshal unmarshals [bytes] into [dest], where [dest] must be a pointer or +// interface func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if dest == nil { return errUnmarshalNil @@ -350,18 +514,27 @@ func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if destPtr.Kind() != reflect.Ptr { return errNeedPointer } - if err := c.unmarshal(&p, destPtr.Elem(), c.maxSliceLen); err != nil { + if err := c.unmarshal(&p, destPtr.Elem(), nil /*=typeStack*/); err != nil { return err } if p.Offset != len(bytes) { - return errExtraSpace + return fmt.Errorf("%w: read %d provided %d", + codec.ErrExtraSpace, + p.Offset, + len(bytes), + ) } return nil } // Unmarshal from p.Bytes into [value]. [value] must be addressable. +// // c.lock should be held for the duration of this function -func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSliceLen uint32) error { +func (c *genericCodec) unmarshal( + p *wrappers.Packer, + value reflect.Value, + typeStack set.Set[reflect.Type], +) error { switch value.Kind() { case reflect.Uint8: value.SetUint(uint64(p.UnpackByte())) @@ -422,32 +595,38 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli if p.Err != nil { return fmt.Errorf("couldn't unmarshal slice: %w", p.Err) } - if numElts32 > maxSliceLen { + if numElts32 > math.MaxInt32 { return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", - ErrMaxMarshalSliceLimitExceeded, + codec.ErrMaxSliceLenExceeded, numElts32, - maxSliceLen) + math.MaxInt32, + ) } - if numElts32 > math.MaxInt32 { + if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", - ErrMaxMarshalSliceLimitExceeded, + codec.ErrMaxSliceLenExceeded, numElts32, - math.MaxInt32) + c.maxSliceLen, + ) } numElts := int(numElts32) + sliceType := value.Type() + innerType := sliceType.Elem() + // If this is a slice of bytes, manually unpack the bytes rather // than calling unmarshal on each byte. This improves performance. - if elemKind := value.Type().Elem().Kind(); elemKind == reflect.Uint8 { + if elemKind := innerType.Kind(); elemKind == reflect.Uint8 { value.SetBytes(p.UnpackFixedBytes(numElts)) return p.Err } - // set [value] to be a slice of the appropriate type/capacity (right now it is nil) - value.Set(reflect.MakeSlice(value.Type(), numElts, numElts)) - // Unmarshal each element into the appropriate index of the slice + // Unmarshal each element and append it into the slice. + value.Set(reflect.MakeSlice(sliceType, 0, initialSliceLen)) + zeroValue := reflect.Zero(innerType) for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal slice element: %w", err) + value.Set(reflect.Append(value, zeroValue)) + if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { + return err } } return nil @@ -464,8 +643,8 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli return nil } for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, value.Index(i), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal array element: %w", err) + if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { + return err } } return nil @@ -480,11 +659,18 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli if err != nil { return err } + intfImplementorType := intfImplementor.Type() + if typeStack.Contains(intfImplementorType) { + return fmt.Errorf("%w: %s", errRecursiveInterfaceTypes, intfImplementorType) + } + typeStack.Add(intfImplementorType) + // Unmarshal into the struct - if err := c.unmarshal(p, intfImplementor, c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal interface: %w", err) + if err := c.unmarshal(p, intfImplementor, typeStack); err != nil { + return err } - // And assign the filled struct to the value + + typeStack.Remove(intfImplementorType) value.Set(intfImplementor) return nil case reflect.Struct: @@ -494,9 +680,9 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli return fmt.Errorf("couldn't unmarshal struct: %w", err) } // Go through the fields and umarshal into them - for _, fieldDesc := range serializedFieldIndices { - if err := c.unmarshal(p, value.Field(fieldDesc.Index), fieldDesc.MaxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal struct: %w", err) + for _, fieldIndex := range serializedFieldIndices { + if err := c.unmarshal(p, value.Field(fieldIndex), typeStack); err != nil { + return err } } return nil @@ -506,11 +692,74 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli // Create a new pointer to a new value of the underlying type v := reflect.New(t) // Fill the value - if err := c.unmarshal(p, v.Elem(), c.maxSliceLen); err != nil { - return fmt.Errorf("couldn't unmarshal pointer: %w", err) + if err := c.unmarshal(p, v.Elem(), typeStack); err != nil { + return err } // Assign to the top-level struct's member value.Set(v) + return nil + case reflect.Map: + numElts32 := p.UnpackInt() + if p.Err != nil { + return fmt.Errorf("couldn't unmarshal map: %w", p.Err) + } + if numElts32 > math.MaxInt32 { + return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts32, + math.MaxInt32, + ) + } + if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { + return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", + codec.ErrMaxSliceLenExceeded, + numElts32, + c.maxSliceLen, + ) + } + + var ( + numElts = int(numElts32) + mapType = value.Type() + mapKeyType = mapType.Key() + mapValueType = mapType.Elem() + prevKey []byte + ) + + // Set [value] to be a new map of the appropriate type. + value.Set(reflect.MakeMap(mapType)) + + for i := 0; i < numElts; i++ { + mapKey := reflect.New(mapKeyType).Elem() + + keyStartOffset := p.Offset + + if err := c.unmarshal(p, mapKey, typeStack); err != nil { + return err + } + + // Get the key's byte representation and check that the new key is + // actually bigger (according to bytes.Compare) than the previous + // key. + // + // We do this to enforce that key-value pairs are sorted by + // increasing key. + keyBytes := p.Bytes[keyStartOffset:p.Offset] + if i != 0 && bytes.Compare(keyBytes, prevKey) <= 0 { + return fmt.Errorf("keys aren't sorted: (%s, %s)", prevKey, mapKey) + } + prevKey = keyBytes + + // Get the value + mapValue := reflect.New(mapValueType).Elem() + if err := c.unmarshal(p, mapValue, typeStack); err != nil { + return err + } + + // Assign the key-value pair in the map + value.SetMapIndex(mapKey, mapValue) + } + return nil default: return fmt.Errorf("can't unmarshal unknown type %s", value.Kind().String()) diff --git a/avalanchego/codec/registry.go b/avalanchego/codec/registry.go index 9031c94f..de87e1a9 100644 --- a/avalanchego/codec/registry.go +++ b/avalanchego/codec/registry.go @@ -1,8 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec +import "errors" + +var ErrDuplicateType = errors.New("duplicate type registration") + // Registry registers new types that can be marshaled into type Registry interface { RegisterType(interface{}) error diff --git a/avalanchego/codec/test_codec.go b/avalanchego/codec/test_codec.go index 8da7a1be..d58e2d81 100644 --- a/avalanchego/codec/test_codec.go +++ b/avalanchego/codec/test_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec @@ -8,44 +8,57 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/wrappers" ) -var Tests = []func(c GeneralCodec, t testing.TB){ - TestStruct, - TestRegisterStructTwice, - TestUInt32, - TestUIntPtr, - TestSlice, - TestMaxSizeSlice, - TestBool, - TestArray, - TestBigArray, - TestPointerToStruct, - TestSliceOfStruct, - TestInterface, - TestSliceOfInterface, - TestArrayOfInterface, - TestPointerToInterface, - TestString, - TestNilSlice, - TestSerializeUnexportedField, - TestSerializeOfNoSerializeField, - TestNilSliceSerialization, - TestEmptySliceSerialization, - TestSliceWithEmptySerialization, - TestSliceWithEmptySerializationOutOfMemory, - TestSliceTooLarge, - TestNegativeNumbers, - TestTooLargeUnmarshal, - TestUnmarshalInvalidInterface, - TestRestrictedSlice, - TestExtraSpace, - TestSliceLengthOverflow, -} - -var MultipleTagsTests = []func(c GeneralCodec, t testing.TB){ - TestMultipleTags, -} +var ( + Tests = []func(c GeneralCodec, t testing.TB){ + TestStruct, + TestRegisterStructTwice, + TestUInt32, + TestUIntPtr, + TestSlice, + TestMaxSizeSlice, + TestBool, + TestArray, + TestBigArray, + TestPointerToStruct, + TestSliceOfStruct, + TestInterface, + TestSliceOfInterface, + TestArrayOfInterface, + TestPointerToInterface, + TestString, + TestNilSlice, + TestSerializeUnexportedField, + TestSerializeOfNoSerializeField, + TestNilSliceSerialization, + TestEmptySliceSerialization, + TestSliceWithEmptySerialization, + TestSliceWithEmptySerializationOutOfMemory, + TestSliceTooLarge, + TestNegativeNumbers, + TestTooLargeUnmarshal, + TestUnmarshalInvalidInterface, + TestExtraSpace, + TestSliceLengthOverflow, + TestMap, + } + + MultipleTagsTests = []func(c GeneralCodec, t testing.TB){ + TestMultipleTags, + } + + EnforceSliceLenTests = []func(c GeneralCodec, t testing.TB){ + TestCanNotMarshalLargeSlices, + TestCanNotUnmarshalLargeSlices, + } + + IgnoreSliceLenTests = []func(c GeneralCodec, t testing.TB){ + TestCanMarshalLargeSlices, + } +) // The below structs and interfaces exist // for the sake of testing @@ -84,22 +97,39 @@ type MyInnerStruct3 struct { } type myStruct struct { - InnerStruct MyInnerStruct `serialize:"true"` - InnerStruct2 *MyInnerStruct `serialize:"true"` - Member1 int64 `serialize:"true"` - Member2 uint16 `serialize:"true"` - MyArray2 [5]string `serialize:"true"` - MyArray3 [3]MyInnerStruct `serialize:"true"` - MyArray4 [2]*MyInnerStruct2 `serialize:"true"` - MySlice []byte `serialize:"true"` - MySlice2 []string `serialize:"true"` - MySlice3 []MyInnerStruct `serialize:"true"` - MySlice4 []*MyInnerStruct2 `serialize:"true"` - MyArray [4]byte `serialize:"true"` - MyInterface Foo `serialize:"true"` - MySlice5 []Foo `serialize:"true"` - InnerStruct3 MyInnerStruct3 `serialize:"true"` - MyPointer *Foo `serialize:"true"` + InnerStruct MyInnerStruct `serialize:"true"` + InnerStruct2 *MyInnerStruct `serialize:"true"` + Member1 int64 `serialize:"true"` + Member2 uint16 `serialize:"true"` + MyArray2 [5]string `serialize:"true"` + MyArray3 [3]MyInnerStruct `serialize:"true"` + MyArray4 [2]*MyInnerStruct2 `serialize:"true"` + MySlice []byte `serialize:"true"` + MySlice2 []string `serialize:"true"` + MySlice3 []MyInnerStruct `serialize:"true"` + MySlice4 []*MyInnerStruct2 `serialize:"true"` + MyArray [4]byte `serialize:"true"` + MyInterface Foo `serialize:"true"` + MySlice5 []Foo `serialize:"true"` + InnerStruct3 MyInnerStruct3 `serialize:"true"` + MyPointer *Foo `serialize:"true"` + MyMap1 map[string]string `serialize:"true"` + MyMap2 map[int32][]MyInnerStruct3 `serialize:"true"` + MyMap3 map[MyInnerStruct2][]int32 `serialize:"true"` + MyMap4 map[int32]*int32 `serialize:"true"` + MyMap5 map[int32]int32 `serialize:"true"` + MyMap6 map[[5]int32]int32 `serialize:"true"` + MyMap7 map[interface{}]interface{} `serialize:"true"` + Uint8 uint8 `serialize:"true"` + Int8 int8 `serialize:"true"` + Uint16 uint16 `serialize:"true"` + Int16 int16 `serialize:"true"` + Uint32 uint32 `serialize:"true"` + Int32 int32 `serialize:"true"` + Uint64 uint64 `serialize:"true"` + Int64 int64 `serialize:"true"` + Bool bool `serialize:"true"` + String string `serialize:"true"` } // Test marshaling/unmarshaling a complicated struct @@ -107,6 +137,24 @@ func TestStruct(codec GeneralCodec, t testing.TB) { require := require.New(t) temp := Foo(&MyInnerStruct{}) + myMap3 := make(map[MyInnerStruct2][]int32) + myMap3[MyInnerStruct2{false}] = []int32{991, 12} + myMap3[MyInnerStruct2{true}] = []int32{1911, 1921} + + myMap4 := make(map[int32]*int32) + zero := int32(0) + one := int32(1) + myMap4[0] = &zero + myMap4[1] = &one + + myMap6 := make(map[[5]int32]int32) + myMap6[[5]int32{0, 1, 2, 3, 4}] = 1 + myMap6[[5]int32{1, 2, 3, 4, 5}] = 2 + + myMap7 := make(map[interface{}]interface{}) + myMap7["key"] = "value" + myMap7[int32(1)] = int32(2) + myStructInstance := myStruct{ InnerStruct: MyInnerStruct{"hello"}, InnerStruct2: &MyInnerStruct{"yello"}, @@ -130,12 +178,55 @@ func TestStruct(codec GeneralCodec, t testing.TB) { F: &MyInnerStruct2{}, }, MyPointer: &temp, + MyMap1: map[string]string{ + "test": "test", + }, + MyMap2: map[int32][]MyInnerStruct3{ + 199921: { + { + Str: "str-1", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + { + Str: "str-2", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + }, + 1921: { + { + Str: "str0", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + { + Str: "str1", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + }, + }, + MyMap3: myMap3, + MyMap4: myMap4, + MyMap6: myMap6, + MyMap7: myMap7, } manager := NewDefaultManager() // Register the types that may be unmarshaled into interfaces require.NoError(codec.RegisterType(&MyInnerStruct{})) require.NoError(codec.RegisterType(&MyInnerStruct2{})) + require.NoError(codec.RegisterType("")) + require.NoError(codec.RegisterType(int32(0))) require.NoError(manager.RegisterCodec(0, codec)) myStructBytes, err := manager.Marshal(0, myStructInstance) @@ -143,13 +234,18 @@ func TestStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myStructInstance) require.NoError(err) - require.Equal(len(myStructBytes), bytesLen) + require.Len(myStructBytes, bytesLen) myStructUnmarshaled := &myStruct{} version, err := manager.Unmarshal(myStructBytes, myStructUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + // In myStructInstance MyMap4 is nil and in myStructUnmarshaled MyMap4 is an + // empty map + require.Empty(myStructUnmarshaled.MyMap5) + myStructUnmarshaled.MyMap5 = nil + + require.Zero(version) require.Equal(myStructInstance, *myStructUnmarshaled) } @@ -157,7 +253,8 @@ func TestRegisterStructTwice(codec GeneralCodec, t testing.TB) { require := require.New(t) require.NoError(codec.RegisterType(&MyInnerStruct{})) - require.Error(codec.RegisterType(&MyInnerStruct{})) + err := codec.RegisterType(&MyInnerStruct{}) + require.ErrorIs(err, ErrDuplicateType) } func TestUInt32(codec GeneralCodec, t testing.TB) { @@ -166,20 +263,19 @@ func TestUInt32(codec GeneralCodec, t testing.TB) { number := uint32(500) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, number) require.NoError(err) bytesLen, err := manager.Size(0, number) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var numberUnmarshaled uint32 version, err := manager.Unmarshal(bytes, &numberUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(number, numberUnmarshaled) } @@ -188,12 +284,11 @@ func TestUIntPtr(codec GeneralCodec, t testing.TB) { manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) number := uintptr(500) - _, err = manager.Marshal(0, number) - require.Error(err) + _, err := manager.Marshal(0, number) + require.ErrorIs(err, ErrUnsupportedType) } func TestSlice(codec GeneralCodec, t testing.TB) { @@ -201,20 +296,19 @@ func TestSlice(codec GeneralCodec, t testing.TB) { mySlice := []bool{true, false, true, true} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var sliceUnmarshaled []bool version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, sliceUnmarshaled) } @@ -226,20 +320,19 @@ func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { mySlice[0] = "first!" mySlice[math.MaxUint16-1] = "last!" manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var sliceUnmarshaled []string version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, sliceUnmarshaled) } @@ -249,20 +342,19 @@ func TestBool(codec GeneralCodec, t testing.TB) { myBool := true manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myBool) require.NoError(err) bytesLen, err := manager.Size(0, myBool) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var boolUnmarshaled bool version, err := manager.Unmarshal(bytes, &boolUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myBool, boolUnmarshaled) } @@ -272,20 +364,19 @@ func TestArray(codec GeneralCodec, t testing.TB) { myArr := [5]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArr) require.NoError(err) bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrUnmarshaled [5]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArr, myArrUnmarshaled) } @@ -295,20 +386,19 @@ func TestBigArray(codec GeneralCodec, t testing.TB) { myArr := [30000]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArr) require.NoError(err) bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrUnmarshaled [30000]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArr, myArrUnmarshaled) } @@ -318,27 +408,25 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { myPtr := &MyInnerStruct{Str: "Hello!"} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myPtr) require.NoError(err) bytesLen, err := manager.Size(0, myPtr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myPtrUnmarshaled *MyInnerStruct version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myPtr, myPtrUnmarshaled) } // Test marshalling a slice of structs func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { require := require.New(t) - mySlice := []MyInnerStruct3{ { Str: "One", @@ -351,24 +439,22 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { F: &MyInnerStruct{"Six"}, }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var mySliceUnmarshaled []MyInnerStruct3 version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, mySliceUnmarshaled) } @@ -376,12 +462,10 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { func TestInterface(codec GeneralCodec, t testing.TB) { require := require.New(t) - err := codec.RegisterType(&MyInnerStruct2{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct2{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) var f Foo = &MyInnerStruct2{true} bytes, err := manager.Marshal(0, &f) @@ -389,12 +473,12 @@ func TestInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, &f) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var unmarshaledFoo Foo version, err := manager.Unmarshal(bytes, &unmarshaledFoo) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(f, unmarshaledFoo) } @@ -410,24 +494,22 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var mySliceUnmarshaled []Foo version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, mySliceUnmarshaled) } @@ -443,24 +525,22 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArray) require.NoError(err) bytesLen, err := manager.Size(0, myArray) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrayUnmarshaled [2]Foo version, err := manager.Unmarshal(bytes, &myArrayUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArray, myArrayUnmarshaled) } @@ -471,24 +551,22 @@ func TestPointerToInterface(codec GeneralCodec, t testing.TB) { var myinnerStruct Foo = &MyInnerStruct{Str: "Hello!"} myPtr := &myinnerStruct - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, &myPtr) require.NoError(err) bytesLen, err := manager.Size(0, &myPtr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myPtrUnmarshaled *Foo version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myPtr, myPtrUnmarshaled) } @@ -498,20 +576,19 @@ func TestString(codec GeneralCodec, t testing.TB) { myString := "Ayy" manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myString) require.NoError(err) bytesLen, err := manager.Size(0, myString) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var stringUnmarshaled string version, err := manager.Unmarshal(bytes, &stringUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myString, stringUnmarshaled) } @@ -525,21 +602,20 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { myStruct := structWithSlice{Slice: nil} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myStruct) require.NoError(err) bytesLen, err := manager.Size(0, myStruct) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var structUnmarshaled structWithSlice version, err := manager.Unmarshal(bytes, &structUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) - require.Equal(0, len(structUnmarshaled.Slice)) + require.Zero(version) + require.Empty(structUnmarshaled.Slice) } // Ensure that trying to serialize a struct with an unexported member @@ -558,14 +634,13 @@ func TestSerializeUnexportedField(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) - _, err = manager.Marshal(0, myS) - require.Error(err) + _, err := manager.Marshal(0, myS) + require.ErrorIs(err, ErrUnexportedField) _, err = manager.Size(0, myS) - require.Error(err) + require.ErrorIs(err, ErrUnexportedField) } func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { @@ -582,20 +657,19 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { UnmarkedField: "No declared serialize", } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) marshalled, err := manager.Marshal(0, myS) require.NoError(err) bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Equal(len(marshalled), bytesLen) + require.Len(marshalled, bytesLen) unmarshalled := s{} version, err := manager.Unmarshal(marshalled, &unmarshalled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) expectedUnmarshalled := s{SerializedField: "Serialize me"} require.Equal(expectedUnmarshalled, unmarshalled) @@ -610,8 +684,7 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &simpleSliceStruct{} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version, then nil slice marshaled as 0 length slice @@ -621,13 +694,13 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) - require.Equal(0, len(valUnmarshaled.Arr)) + require.Zero(version) + require.Empty(valUnmarshaled.Arr) } // Test marshaling a slice that has 0 elements (but isn't nil) @@ -639,8 +712,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version (uint16) and 0 for size (uint32) @@ -650,12 +722,12 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(val, valUnmarshaled) } @@ -670,8 +742,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), @@ -683,13 +754,13 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) unmarshaled := nestedSliceStruct{} version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) - require.Equal(uint16(0), version) - require.Equal(1000, len(unmarshaled.Arr)) + require.Zero(version) + require.Len(unmarshaled.Arr, 1000) } func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { @@ -702,14 +773,13 @@ func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ Arr: make([]emptyStruct, math.MaxInt32), } - _, err = manager.Marshal(0, val) - require.Error(err) + _, err := manager.Marshal(0, val) + require.ErrorIs(err, ErrMaxSliceLenExceeded) bytesLen, err := manager.Size(0, val) require.NoError(err) @@ -720,13 +790,12 @@ func TestSliceTooLarge(codec GeneralCodec, t testing.TB) { require := require.New(t) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := []struct{}{} b := []byte{0x00, 0x00, 0xff, 0xff, 0xff, 0xff} - _, err = manager.Unmarshal(b, &val) - require.Error(err) + _, err := manager.Unmarshal(b, &val) + require.ErrorIs(err, ErrMaxSliceLenExceeded) } // Ensure serializing structs with negative number members works @@ -741,8 +810,7 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) myS := s{-1, -2, -3, -4} bytes, err := manager.Marshal(0, myS) @@ -750,12 +818,12 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) mySUnmarshaled := s{} version, err := manager.Unmarshal(bytes, &mySUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myS, mySUnmarshaled) } @@ -769,12 +837,11 @@ func TestTooLargeUnmarshal(codec GeneralCodec, t testing.TB) { bytes := []byte{0, 0, 0, 0} manager := NewManager(3) - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) s := inner{} - _, err = manager.Unmarshal(bytes, &s) - require.Error(err) + _, err := manager.Unmarshal(bytes, &s) + require.ErrorIs(err, ErrUnmarshalTooBig) } type outerInterface interface { @@ -807,36 +874,14 @@ func TestUnmarshalInvalidInterface(codec GeneralCodec, t testing.TB) { s := outer{} version, err := manager.Unmarshal(bytes, &s) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) } { bytes := []byte{0, 0, 0, 0, 0, 1} s := outer{} _, err := manager.Unmarshal(bytes, &s) - require.Error(err) - } -} - -// Ensure deserializing slices that have been length restricted errors correctly -func TestRestrictedSlice(codec GeneralCodec, t testing.TB) { - require := require.New(t) - - type inner struct { - Bytes []byte `serialize:"true" len:"2"` + require.ErrorIs(err, ErrDoesNotImplementInterface) } - bytes := []byte{0, 0, 0, 0, 0, 3, 0, 1, 2} - - manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) - - s := inner{} - _, err = manager.Unmarshal(bytes, &s) - require.Error(err) - - s.Bytes = []byte{0, 1, 2} - _, err = manager.Marshal(0, s) - require.Error(err) } // Test unmarshaling something with extra data @@ -844,22 +889,21 @@ func TestExtraSpace(codec GeneralCodec, t testing.TB) { require := require.New(t) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) // codec version 0x0000 then 0x01 for b then 0x02 as extra data. byteSlice := []byte{0x00, 0x00, 0x01, 0x02} var b byte - _, err = manager.Unmarshal(byteSlice, &b) - require.Error(err) + _, err := manager.Unmarshal(byteSlice, &b) + require.ErrorIs(err, ErrExtraSpace) } -// Ensure deserializing slices that have been length restricted errors correctly +// Ensure deserializing slices whose lengths exceed MaxInt32 error correctly func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { require := require.New(t) type inner struct { - Vals []uint32 `serialize:"true" len:"2"` + Vals []uint32 `serialize:"true"` } bytes := []byte{ // Codec Version: @@ -869,20 +913,19 @@ func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) s := inner{} - _, err = manager.Unmarshal(bytes, &s) - require.Error(err) + _, err := manager.Unmarshal(bytes, &s) + require.ErrorIs(err, ErrMaxSliceLenExceeded) } type MultipleVersionsStruct struct { - BothTags string `tag1:"true" tag2:"true"` + BothTags string `tag1:"true" tag2:"true"` SingleTag1 string `tag1:"true"` - SingleTag2 string `tag2:"true"` + SingleTag2 string ` tag2:"true"` EitherTags1 string `tag1:"false" tag2:"true"` - EitherTags2 string `tag1:"true" tag2:"false"` + EitherTags2 string `tag1:"true" tag2:"false"` NoTags string `tag1:"false" tag2:"false"` } @@ -901,8 +944,7 @@ func TestMultipleTags(codec GeneralCodec, t testing.TB) { for _, codecVersion := range []uint16{0, 1, 2022} { require := require.New(t) - err := manager.RegisterCodec(codecVersion, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(codecVersion, codec)) bytes, err := manager.Marshal(codecVersion, inputs) require.NoError(err) @@ -919,3 +961,147 @@ func TestMultipleTags(codec GeneralCodec, t testing.TB) { require.Empty(output.NoTags) } } + +func TestMap(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + data1 := map[string]MyInnerStruct2{ + "test": {true}, + "bar": {false}, + } + + data2 := map[string]MyInnerStruct2{ + "bar": {false}, + "test": {true}, + } + + data3 := map[string]MyInnerStruct2{ + "bar": {false}, + } + + outerMap := make(map[int32]map[string]MyInnerStruct2) + outerMap[3] = data1 + outerMap[19] = data2 + + outerArray := [3]map[string]MyInnerStruct2{ + data1, + data2, + data3, + } + + manager := NewDefaultManager() + require.NoError(manager.RegisterCodec(0, codec)) + + data1Bytes, err := manager.Marshal(0, data1) + require.NoError(err) + + // data1 and data2 should have the same byte representation even though + // their key-value pairs were defined in a different order. + data2Bytes, err := manager.Marshal(0, data2) + require.NoError(err) + require.Equal(data1Bytes, data2Bytes) + + // Make sure Size returns the correct size for the marshalled data + data1Size, err := manager.Size(0, data1) + require.NoError(err) + require.Len(data1Bytes, data1Size) + + var unmarshalledData1 map[string]MyInnerStruct2 + _, err = manager.Unmarshal(data1Bytes, &unmarshalledData1) + require.NoError(err) + require.Equal(data1, unmarshalledData1) + + outerMapBytes, err := manager.Marshal(0, outerMap) + require.NoError(err) + + outerMapSize, err := manager.Size(0, outerMap) + require.NoError(err) + require.Len(outerMapBytes, outerMapSize) + + var unmarshalledOuterMap map[int32]map[string]MyInnerStruct2 + _, err = manager.Unmarshal(outerMapBytes, &unmarshalledOuterMap) + require.NoError(err) + require.Equal(outerMap, unmarshalledOuterMap) + + outerArrayBytes, err := manager.Marshal(0, outerArray) + require.NoError(err) + + outerArraySize, err := manager.Size(0, outerArray) + require.NoError(err) + require.Len(outerArrayBytes, outerArraySize) +} + +func TestCanNotMarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + data := make([]uint16, 1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + _, err := manager.Marshal(0, data) + require.ErrorIs(err, ErrMaxSliceLenExceeded) +} + +func TestCanNotUnmarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + writer := wrappers.Packer{ + Bytes: make([]byte, 2+4+2_000_000), + } + writer.PackShort(0) + writer.PackInt(1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + var data []uint16 + _, err := manager.Unmarshal(writer.Bytes, &data) + require.ErrorIs(err, ErrMaxSliceLenExceeded) +} + +func TestCanMarshalLargeSlices(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + data := make([]uint16, 1_000_000) + + manager := NewManager(math.MaxInt) + require.NoError(manager.RegisterCodec(0, codec)) + + bytes, err := manager.Marshal(0, data) + require.NoError(err) + + var unmarshalledData []uint16 + _, err = manager.Unmarshal(bytes, &unmarshalledData) + require.NoError(err) + require.Equal(data, unmarshalledData) +} + +func FuzzStructUnmarshal(codec GeneralCodec, f *testing.F) { + manager := NewDefaultManager() + // Register the types that may be unmarshaled into interfaces + require.NoError(f, codec.RegisterType(&MyInnerStruct{})) + require.NoError(f, codec.RegisterType(&MyInnerStruct2{})) + require.NoError(f, codec.RegisterType("")) + require.NoError(f, codec.RegisterType(int32(0))) + require.NoError(f, manager.RegisterCodec(0, codec)) + + f.Fuzz(func(t *testing.T, bytes []byte) { + require := require.New(t) + + myParsedStruct := &myStruct{} + version, err := manager.Unmarshal(bytes, myParsedStruct) + if err != nil { + return + } + require.Zero(version) + + marshalled, err := manager.Marshal(version, myParsedStruct) + require.NoError(err) + require.Equal(bytes, marshalled) + + size, err := manager.Size(version, myParsedStruct) + require.NoError(err) + require.Len(bytes, size) + }) +} diff --git a/avalanchego/config/config.go b/avalanchego/config/config.go index ea71bf6e..5371c0a2 100644 --- a/avalanchego/config/config.go +++ b/avalanchego/config/config.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( - "context" "crypto/tls" "encoding/base64" "encoding/json" @@ -12,7 +11,6 @@ import ( "fmt" "io/fs" "math" - "net" "os" "path/filepath" "strings" @@ -25,12 +23,10 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" @@ -41,7 +37,6 @@ import ( "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/password" @@ -59,58 +54,91 @@ const ( chainConfigFileName = "config" chainUpgradeFileName = "upgrade" subnetConfigFileExt = ".json" - ipResolutionTimeout = 30 * time.Second + + authDeprecationMsg = "Auth API is deprecated" + ipcDeprecationMsg = "IPC API is deprecated" + keystoreDeprecationMsg = "keystore API is deprecated" + acceptedFrontierGossipDeprecationMsg = "push-based accepted frontier gossip is deprecated" + peerListPushGossipDeprecationMsg = "push-based peer list gossip is deprecated" ) var ( // Deprecated key --> deprecation message (i.e. which key replaces it) - deprecatedKeys = map[string]string{ - NetworkCompressionEnabledKey: fmt.Sprintf("use --%s instead", NetworkCompressionTypeKey), - } - - errInvalidStakerWeights = errors.New("staking weights must be positive") - errStakingDisableOnPublicNetwork = errors.New("staking disabled on public network") - errAuthPasswordTooWeak = errors.New("API auth password is not strong enough") - errInvalidUptimeRequirement = errors.New("uptime requirement must be in the range [0, 1]") - errMinValidatorStakeAboveMax = errors.New("minimum validator stake can't be greater than maximum validator stake") - errInvalidDelegationFee = errors.New("delegation fee must be in the range [0, 1,000,000]") - errInvalidMinStakeDuration = errors.New("min stake duration must be > 0") - errMinStakeDurationAboveMax = errors.New("max stake duration can't be less than min stake duration") - errStakeMaxConsumptionTooLarge = fmt.Errorf("max stake consumption must be less than or equal to %d", reward.PercentDenominator) - errStakeMaxConsumptionBelowMin = errors.New("stake max consumption can't be less than min stake consumption") - errStakeMintingPeriodBelowMin = errors.New("stake minting period can't be less than max stake duration") - errCannotTrackPrimaryNetwork = errors.New("cannot track primary network") - errStakingKeyContentUnset = fmt.Errorf("%s key not set but %s set", StakingTLSKeyContentKey, StakingCertContentKey) - errStakingCertContentUnset = fmt.Errorf("%s key set but %s not set", StakingTLSKeyContentKey, StakingCertContentKey) - errMissingStakingSigningKeyFile = errors.New("missing staking signing key file") - errTracingEndpointEmpty = fmt.Errorf("%s cannot be empty", TracingEndpointKey) - errPluginDirNotADirectory = errors.New("plugin dir is not a directory") - errZstdNotSupported = errors.New("zstd compression not supported until v1.10") + // TODO: deprecate "BootstrapIDsKey" and "BootstrapIPsKey" + commitThresholdDeprecationMsg = fmt.Sprintf("use --%s instead", SnowCommitThresholdKey) + deprecatedKeys = map[string]string{ + APIAuthRequiredKey: authDeprecationMsg, + APIAuthPasswordKey: authDeprecationMsg, + APIAuthPasswordFileKey: authDeprecationMsg, + + IpcAPIEnabledKey: ipcDeprecationMsg, + IpcsChainIDsKey: ipcDeprecationMsg, + IpcsPathKey: ipcDeprecationMsg, + + KeystoreAPIEnabledKey: keystoreDeprecationMsg, + + ConsensusGossipAcceptedFrontierValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierPeerSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptPeerSizeKey: acceptedFrontierGossipDeprecationMsg, + + NetworkPeerListValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListNonValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListPeersGossipSizeKey: peerListPushGossipDeprecationMsg, + NetworkPeerListGossipFreqKey: peerListPushGossipDeprecationMsg, + + SnowRogueCommitThresholdKey: commitThresholdDeprecationMsg, + SnowVirtuousCommitThresholdKey: commitThresholdDeprecationMsg, + } + + errConflictingACPOpinion = errors.New("supporting and objecting to the same ACP") + errConflictingImplicitACPOpinion = errors.New("objecting to enabled ACP") + errSybilProtectionDisabledStakerWeights = errors.New("sybil protection disabled weights must be positive") + errSybilProtectionDisabledOnPublicNetwork = errors.New("sybil protection disabled on public network") + errAuthPasswordTooWeak = errors.New("API auth password is not strong enough") + errInvalidUptimeRequirement = errors.New("uptime requirement must be in the range [0, 1]") + errMinValidatorStakeAboveMax = errors.New("minimum validator stake can't be greater than maximum validator stake") + errInvalidDelegationFee = errors.New("delegation fee must be in the range [0, 1,000,000]") + errInvalidMinStakeDuration = errors.New("min stake duration must be > 0") + errMinStakeDurationAboveMax = errors.New("max stake duration can't be less than min stake duration") + errStakeMaxConsumptionTooLarge = fmt.Errorf("max stake consumption must be less than or equal to %d", reward.PercentDenominator) + errStakeMaxConsumptionBelowMin = errors.New("stake max consumption can't be less than min stake consumption") + errStakeMintingPeriodBelowMin = errors.New("stake minting period can't be less than max stake duration") + errCannotTrackPrimaryNetwork = errors.New("cannot track primary network") + errStakingKeyContentUnset = fmt.Errorf("%s key not set but %s set", StakingTLSKeyContentKey, StakingCertContentKey) + errStakingCertContentUnset = fmt.Errorf("%s key set but %s not set", StakingTLSKeyContentKey, StakingCertContentKey) + errMissingStakingSigningKeyFile = errors.New("missing staking signing key file") + errTracingEndpointEmpty = fmt.Errorf("%s cannot be empty", TracingEndpointKey) + errPluginDirNotADirectory = errors.New("plugin dir is not a directory") + errCannotReadDirectory = errors.New("cannot read directory") + errUnmarshalling = errors.New("unmarshalling failed") + errFileDoesNotExist = errors.New("file does not exist") + errGzipDeprecatedMsg = errors.New("gzip compression is not supported, use zstd or no compression") ) -func getConsensusConfig(v *viper.Viper) avalanche.Parameters { - return avalanche.Parameters{ - Parameters: snowball.Parameters{ - K: v.GetInt(SnowSampleSizeKey), - Alpha: v.GetInt(SnowQuorumSizeKey), - // During the X-chain linearization we require BetaVirtuous and - // BetaRogue to be equal. Therefore we use the more conservative - // BetaRogue value for both BetaVirtuous and BetaRogue. - // - // TODO: After the X-chain linearization use the - // SnowVirtuousCommitThresholdKey as before. - BetaVirtuous: v.GetInt(SnowRogueCommitThresholdKey), - BetaRogue: v.GetInt(SnowRogueCommitThresholdKey), - ConcurrentRepolls: v.GetInt(SnowConcurrentRepollsKey), - OptimalProcessing: v.GetInt(SnowOptimalProcessingKey), - MaxOutstandingItems: v.GetInt(SnowMaxProcessingKey), - MaxItemProcessingTime: v.GetDuration(SnowMaxTimeProcessingKey), - MixedQueryNumPushVdr: int(v.GetUint(SnowMixedQueryNumPushVdrKey)), - MixedQueryNumPushNonVdr: int(v.GetUint(SnowMixedQueryNumPushNonVdrKey)), - }, - BatchSize: v.GetInt(SnowAvalancheBatchSizeKey), - Parents: v.GetInt(SnowAvalancheNumParentsKey), - } +func getConsensusConfig(v *viper.Viper) snowball.Parameters { + p := snowball.Parameters{ + K: v.GetInt(SnowSampleSizeKey), + AlphaPreference: v.GetInt(SnowPreferenceQuorumSizeKey), + AlphaConfidence: v.GetInt(SnowConfidenceQuorumSizeKey), + BetaVirtuous: v.GetInt(SnowCommitThresholdKey), + BetaRogue: v.GetInt(SnowCommitThresholdKey), + ConcurrentRepolls: v.GetInt(SnowConcurrentRepollsKey), + OptimalProcessing: v.GetInt(SnowOptimalProcessingKey), + MaxOutstandingItems: v.GetInt(SnowMaxProcessingKey), + MaxItemProcessingTime: v.GetDuration(SnowMaxTimeProcessingKey), + } + if v.IsSet(SnowQuorumSizeKey) { + p.AlphaPreference = v.GetInt(SnowQuorumSizeKey) + p.AlphaConfidence = p.AlphaPreference + } + if v.IsSet(SnowRogueCommitThresholdKey) { + p.BetaVirtuous = v.GetInt(SnowRogueCommitThresholdKey) + p.BetaRogue = v.GetInt(SnowRogueCommitThresholdKey) + } + return p } func getLoggingConfig(v *viper.Viper) (logging.Config, error) { @@ -232,14 +260,15 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { MetricsAPIEnabled: v.GetBool(MetricsAPIEnabledKey), HealthAPIEnabled: v.GetBool(HealthAPIEnabledKey), }, - HTTPHost: v.GetString(HTTPHostKey), - HTTPPort: uint16(v.GetUint(HTTPPortKey)), - HTTPSEnabled: v.GetBool(HTTPSEnabledKey), - HTTPSKey: httpsKey, - HTTPSCert: httpsCert, - APIAllowedOrigins: v.GetStringSlice(HTTPAllowedOrigins), - ShutdownTimeout: v.GetDuration(HTTPShutdownTimeoutKey), - ShutdownWait: v.GetDuration(HTTPShutdownWaitKey), + HTTPHost: v.GetString(HTTPHostKey), + HTTPPort: uint16(v.GetUint(HTTPPortKey)), + HTTPSEnabled: v.GetBool(HTTPSEnabledKey), + HTTPSKey: httpsKey, + HTTPSCert: httpsCert, + HTTPAllowedOrigins: v.GetStringSlice(HTTPAllowedOrigins), + HTTPAllowedHosts: v.GetStringSlice(HTTPAllowedHostsKey), + ShutdownTimeout: v.GetDuration(HTTPShutdownTimeoutKey), + ShutdownWait: v.GetDuration(HTTPShutdownWaitKey), } config.APIAuthConfig, err = getAPIAuthConfig(v) @@ -309,45 +338,57 @@ func getGossipConfig(v *viper.Viper) subnets.GossipConfig { func getNetworkConfig( v *viper.Viper, - stakingEnabled bool, + networkID uint32, + sybilProtectionEnabled bool, halflife time.Duration, - networkID uint32, // TODO remove after cortina upgrade ) (network.Config, error) { // Set the max number of recent inbound connections upgraded to be // equal to the max number of inbound connections per second. - maxInboundConnsPerSec := v.GetFloat64(InboundThrottlerMaxConnsPerSecKey) - upgradeCooldown := v.GetDuration(InboundConnUpgradeThrottlerCooldownKey) + maxInboundConnsPerSec := v.GetFloat64(NetworkInboundThrottlerMaxConnsPerSecKey) + upgradeCooldown := v.GetDuration(NetworkInboundConnUpgradeThrottlerCooldownKey) upgradeCooldownInSeconds := upgradeCooldown.Seconds() maxRecentConnsUpgraded := int(math.Ceil(maxInboundConnsPerSec * upgradeCooldownInSeconds)) - var ( - compressionType compression.Type - err error - ) - if v.IsSet(NetworkCompressionTypeKey) { - if v.IsSet(NetworkCompressionEnabledKey) { - return network.Config{}, fmt.Errorf("cannot set both %q and %q", NetworkCompressionTypeKey, NetworkCompressionEnabledKey) - } + compressionType, err := compression.TypeFromString(v.GetString(NetworkCompressionTypeKey)) + if err != nil { + return network.Config{}, err + } + if compressionType == compression.TypeGzip { + return network.Config{}, errGzipDeprecatedMsg + } - compressionType, err = compression.TypeFromString(v.GetString(NetworkCompressionTypeKey)) - if err != nil { - return network.Config{}, err - } - } else { - if v.GetBool(NetworkCompressionEnabledKey) { - compressionType = constants.DefaultNetworkCompressionType - } else { - compressionType = compression.TypeNone + allowPrivateIPs := !constants.ProductionNetworkIDs.Contains(networkID) + if v.IsSet(NetworkAllowPrivateIPsKey) { + allowPrivateIPs = v.GetBool(NetworkAllowPrivateIPsKey) + } + + var supportedACPs set.Set[uint32] + for _, acp := range v.GetIntSlice(ACPSupportKey) { + if acp < 0 || acp > math.MaxInt32 { + return network.Config{}, fmt.Errorf("invalid ACP: %d", acp) } + supportedACPs.Add(uint32(acp)) } - cortinaTime := version.GetCortinaTime(networkID) - if compressionType == compression.TypeZstd && !time.Now().After(cortinaTime) { - // TODO remove after cortina upgrade - return network.Config{}, errZstdNotSupported + var objectedACPs set.Set[uint32] + for _, acp := range v.GetIntSlice(ACPObjectKey) { + if acp < 0 || acp > math.MaxInt32 { + return network.Config{}, fmt.Errorf("invalid ACP: %d", acp) + } + objectedACPs.Add(uint32(acp)) + } + if supportedACPs.Overlaps(objectedACPs) { + return network.Config{}, errConflictingACPOpinion } + if constants.ScheduledACPs.Overlaps(objectedACPs) { + return network.Config{}, errConflictingImplicitACPOpinion + } + + // Because this node version has scheduled these ACPs, we should notify + // peers that we support these upgrades. + supportedACPs.Union(constants.ScheduledACPs) + config := network.Config{ - // Throttling ThrottlerConfig: network.ThrottlerConfig{ MaxInboundConnsPerSec: maxInboundConnsPerSec, InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ @@ -382,7 +423,7 @@ func getNetworkConfig( }, HealthConfig: network.HealthConfig{ - Enabled: stakingEnabled, + Enabled: sybilProtectionEnabled, MaxTimeSinceMsgSent: v.GetDuration(NetworkHealthMaxTimeSinceMsgSentKey), MaxTimeSinceMsgReceived: v.GetDuration(NetworkHealthMaxTimeSinceMsgReceivedKey), MaxPortionSendQueueBytesFull: v.GetFloat64(NetworkHealthMaxPortionSendQueueFillKey), @@ -395,8 +436,8 @@ func getNetworkConfig( ProxyReadHeaderTimeout: v.GetDuration(NetworkTCPProxyReadTimeoutKey), DialerConfig: dialer.Config{ - ThrottleRps: v.GetUint32(OutboundConnectionThrottlingRpsKey), - ConnectionTimeout: v.GetDuration(OutboundConnectionTimeoutKey), + ThrottleRps: v.GetUint32(NetworkOutboundConnectionThrottlingRpsKey), + ConnectionTimeout: v.GetDuration(NetworkOutboundConnectionTimeoutKey), }, TLSKeyLogFile: v.GetString(NetworkTLSKeyLogFileKey), @@ -412,6 +453,8 @@ func getNetworkConfig( PeerListNonValidatorGossipSize: v.GetUint32(NetworkPeerListNonValidatorGossipSizeKey), PeerListPeersGossipSize: v.GetUint32(NetworkPeerListPeersGossipSizeKey), PeerListGossipFreq: v.GetDuration(NetworkPeerListGossipFreqKey), + PeerListPullGossipFreq: v.GetDuration(NetworkPeerListPullGossipFreqKey), + PeerListBloomResetFreq: v.GetDuration(NetworkPeerListBloomResetFreqKey), }, DelayConfig: network.DelayConfig{ @@ -422,10 +465,13 @@ func getNetworkConfig( MaxClockDifference: v.GetDuration(NetworkMaxClockDifferenceKey), CompressionType: compressionType, PingFrequency: v.GetDuration(NetworkPingFrequencyKey), - AllowPrivateIPs: v.GetBool(NetworkAllowPrivateIPsKey), + AllowPrivateIPs: allowPrivateIPs, UptimeMetricFreq: v.GetDuration(UptimeMetricFreqKey), MaximumInboundMessageTimeout: v.GetDuration(NetworkMaximumInboundTimeoutKey), + SupportedACPs: supportedACPs, + ObjectedACPs: objectedACPs, + RequireValidatorToConnect: v.GetBool(NetworkRequireValidatorToConnectKey), PeerReadBufferSize: int(v.GetUint(NetworkPeerReadBufferSizeKey)), PeerWriteBufferSize: int(v.GetUint(NetworkPeerWriteBufferSizeKey)), @@ -441,9 +487,17 @@ func getNetworkConfig( case config.HealthConfig.MaxPortionSendQueueBytesFull < 0 || config.HealthConfig.MaxPortionSendQueueBytesFull > 1: return network.Config{}, fmt.Errorf("%s must be in [0,1]", NetworkHealthMaxPortionSendQueueFillKey) case config.DialerConfig.ConnectionTimeout < 0: - return network.Config{}, fmt.Errorf("%q must be >= 0", OutboundConnectionTimeoutKey) + return network.Config{}, fmt.Errorf("%q must be >= 0", NetworkOutboundConnectionTimeoutKey) case config.PeerListGossipFreq < 0: return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListGossipFreqKey) + case config.PeerListPullGossipFreq < 0: + return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListPullGossipFreqKey) + case config.PeerListBloomResetFreq < 0: + return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListBloomResetFreqKey) + case config.ThrottlerConfig.InboundMsgThrottlerConfig.CPUThrottlerConfig.MaxRecheckDelay < constants.MinInboundThrottlerMaxRecheckDelay: + return network.Config{}, fmt.Errorf("%s must be >= %d", InboundThrottlerCPUMaxRecheckDelayKey, constants.MinInboundThrottlerMaxRecheckDelay) + case config.ThrottlerConfig.InboundMsgThrottlerConfig.DiskThrottlerConfig.MaxRecheckDelay < constants.MinInboundThrottlerMaxRecheckDelay: + return network.Config{}, fmt.Errorf("%s must be >= %d", InboundThrottlerDiskMaxRecheckDelayKey, constants.MinInboundThrottlerMaxRecheckDelay) case config.MaxReconnectDelay < 0: return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkMaxReconnectDelayKey) case config.InitialReconnectDelay < 0: @@ -464,8 +518,11 @@ func getNetworkConfig( return config, nil } -func getBenchlistConfig(v *viper.Viper, consensusParameters avalanche.Parameters) (benchlist.Config, error) { - alpha := consensusParameters.Alpha +func getBenchlistConfig(v *viper.Viper, consensusParameters snowball.Parameters) (benchlist.Config, error) { + // AlphaConfidence is used here to ensure that benching can't cause a + // liveness failure. If AlphaPreference were used, the benchlist may grow to + // a point that committing would be extremely unlikely to happen. + alpha := consensusParameters.AlphaConfidence k := consensusParameters.K config := benchlist.Config{ Threshold: v.GetInt(BenchlistFailThresholdKey), @@ -522,14 +579,14 @@ func getStateSyncConfig(v *viper.Viper) (node.StateSyncConfig, error) { func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, error) { config := node.BootstrapConfig{ - RetryBootstrap: v.GetBool(RetryBootstrapKey), - RetryBootstrapWarnFrequency: v.GetInt(RetryBootstrapWarnFrequencyKey), BootstrapBeaconConnectionTimeout: v.GetDuration(BootstrapBeaconConnectionTimeoutKey), BootstrapMaxTimeGetAncestors: v.GetDuration(BootstrapMaxTimeGetAncestorsKey), BootstrapAncestorsMaxContainersSent: int(v.GetUint(BootstrapAncestorsMaxContainersSentKey)), BootstrapAncestorsMaxContainersReceived: int(v.GetUint(BootstrapAncestorsMaxContainersReceivedKey)), } + // TODO: Add a "BootstrappersKey" flag to more clearly enforce ID and IP + // length equality. ipsSet := v.IsSet(BootstrapIPsKey) idsSet := v.IsSet(BootstrapIDsKey) if ipsSet && !idsSet { @@ -539,112 +596,64 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, return node.BootstrapConfig{}, fmt.Errorf("set %q but didn't set %q", BootstrapIDsKey, BootstrapIPsKey) } - bootstrapIPs, bootstrapIDs := genesis.SampleBeacons(networkID, 5) - if ipsSet { - bootstrapIPs = strings.Split(v.GetString(BootstrapIPsKey), ",") - } - for _, ip := range bootstrapIPs { + bootstrapIPs := strings.Split(v.GetString(BootstrapIPsKey), ",") + config.Bootstrappers = make([]genesis.Bootstrapper, 0, len(bootstrapIPs)) + for _, bootstrapIP := range bootstrapIPs { + ip := strings.TrimSpace(bootstrapIP) if ip == "" { continue } + addr, err := ips.ToIPPort(ip) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap ip %s: %w", ip, err) } - config.BootstrapIPs = append(config.BootstrapIPs, addr) + config.Bootstrappers = append(config.Bootstrappers, genesis.Bootstrapper{ + // ID is populated below + IP: ips.IPDesc(addr), + }) } - if idsSet { - bootstrapIDs = strings.Split(v.GetString(BootstrapIDsKey), ",") - } - for _, id := range bootstrapIDs { + bootstrapIDs := strings.Split(v.GetString(BootstrapIDsKey), ",") + bootstrapNodeIDs := make([]ids.NodeID, 0, len(bootstrapIDs)) + for _, bootstrapID := range bootstrapIDs { + id := strings.TrimSpace(bootstrapID) if id == "" { continue } + nodeID, err := ids.NodeIDFromString(id) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap peer id %s: %w", id, err) } - config.BootstrapIDs = append(config.BootstrapIDs, nodeID) + bootstrapNodeIDs = append(bootstrapNodeIDs, nodeID) } - lenIPs := len(config.BootstrapIPs) - lenIDs := len(config.BootstrapIDs) - if lenIPs != lenIDs { - return node.BootstrapConfig{}, fmt.Errorf("expected the number of bootstrapIPs (%d) to match the number of bootstrapIDs (%d)", lenIPs, lenIDs) + if len(config.Bootstrappers) != len(bootstrapNodeIDs) { + return node.BootstrapConfig{}, fmt.Errorf("expected the number of bootstrapIPs (%d) to match the number of bootstrapIDs (%d)", len(config.Bootstrappers), len(bootstrapNodeIDs)) + } + for i, nodeID := range bootstrapNodeIDs { + config.Bootstrappers[i].ID = nodeID } return config, nil } func getIPConfig(v *viper.Viper) (node.IPConfig, error) { - ipResolutionService := v.GetString(PublicIPResolutionServiceKey) - ipResolutionFreq := v.GetDuration(PublicIPResolutionFreqKey) - if ipResolutionFreq <= 0 { + ipConfig := node.IPConfig{ + PublicIP: v.GetString(PublicIPKey), + PublicIPResolutionService: v.GetString(PublicIPResolutionServiceKey), + PublicIPResolutionFreq: v.GetDuration(PublicIPResolutionFreqKey), + ListenHost: v.GetString(StakingHostKey), + ListenPort: uint16(v.GetUint(StakingPortKey)), + } + if ipConfig.PublicIPResolutionFreq <= 0 { return node.IPConfig{}, fmt.Errorf("%q must be > 0", PublicIPResolutionFreqKey) } - - stakingPort := uint16(v.GetUint(StakingPortKey)) - publicIP := v.GetString(PublicIPKey) - if publicIP != "" && ipResolutionService != "" { + if ipConfig.PublicIP != "" && ipConfig.PublicIPResolutionService != "" { return node.IPConfig{}, fmt.Errorf("only one of --%s and --%s can be given", PublicIPKey, PublicIPResolutionServiceKey) } - - if publicIP != "" { - // User specified a specific public IP to use. - ip := net.ParseIP(publicIP) - if ip == nil { - return node.IPConfig{}, fmt.Errorf("invalid IP Address %s", publicIP) - } - return node.IPConfig{ - IPPort: ips.NewDynamicIPPort(ip, stakingPort), - IPUpdater: dynamicip.NewNoUpdater(), - IPResolutionFreq: ipResolutionFreq, - Nat: nat.NewNoRouter(), - }, nil - } - if ipResolutionService != "" { - // User specified to use dynamic IP resolution. - resolver, err := dynamicip.NewResolver(ipResolutionService) - if err != nil { - return node.IPConfig{}, fmt.Errorf("couldn't create IP resolver: %w", err) - } - - // Use that to resolve our public IP. - ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) - defer cancel() - ip, err := resolver.Resolve(ctx) - if err != nil { - return node.IPConfig{}, fmt.Errorf("couldn't resolve public IP: %w", err) - } - ipPort := ips.NewDynamicIPPort(ip, stakingPort) - - return node.IPConfig{ - IPPort: ipPort, - IPUpdater: dynamicip.NewUpdater( - ipPort, - resolver, - ipResolutionFreq, - ), - IPResolutionFreq: ipResolutionFreq, - Nat: nat.NewNoRouter(), - }, nil - } - - // User didn't specify a public IP to use, and they didn't specify a public IP resolution - // service to use. Try to resolve public IP with NAT traversal. - nat := nat.GetRouter() - ip, err := nat.ExternalIP() - if err != nil { - return node.IPConfig{}, fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) - } - return node.IPConfig{ - Nat: nat, - AttemptedNATTraversal: true, - IPPort: ips.NewDynamicIPPort(ip, stakingPort), - IPUpdater: dynamicip.NewNoUpdater(), - IPResolutionFreq: ipResolutionFreq, - }, nil + return ipConfig, nil } func getProfilerConfig(v *viper.Viper) (profiler.Config, error) { @@ -791,18 +800,19 @@ func getStakingSigner(v *viper.Viper) (*bls.SecretKey, error) { func getStakingConfig(v *viper.Viper, networkID uint32) (node.StakingConfig, error) { config := node.StakingConfig{ - EnableStaking: v.GetBool(StakingEnabledKey), - DisabledStakingWeight: v.GetUint64(StakingDisabledWeightKey), - StakingKeyPath: GetExpandedArg(v, StakingTLSKeyPathKey), - StakingCertPath: GetExpandedArg(v, StakingCertPathKey), - StakingSignerPath: GetExpandedArg(v, StakingSignerKeyPathKey), + SybilProtectionEnabled: v.GetBool(SybilProtectionEnabledKey), + SybilProtectionDisabledWeight: v.GetUint64(SybilProtectionDisabledWeightKey), + PartialSyncPrimaryNetwork: v.GetBool(PartialSyncPrimaryNetworkKey), + StakingKeyPath: GetExpandedArg(v, StakingTLSKeyPathKey), + StakingCertPath: GetExpandedArg(v, StakingCertPathKey), + StakingSignerPath: GetExpandedArg(v, StakingSignerKeyPathKey), } - if !config.EnableStaking && config.DisabledStakingWeight == 0 { - return node.StakingConfig{}, errInvalidStakerWeights + if !config.SybilProtectionEnabled && config.SybilProtectionDisabledWeight == 0 { + return node.StakingConfig{}, errSybilProtectionDisabledStakerWeights } - if !config.EnableStaking && networkID == constants.MainnetID { - return node.StakingConfig{}, errStakingDisableOnPublicNetwork + if !config.SybilProtectionEnabled && networkID == constants.MainnetID { + return node.StakingConfig{}, errSybilProtectionDisabledOnPublicNetwork } var err error @@ -869,14 +879,14 @@ func getTxFeeConfig(v *viper.Viper, networkID uint32) genesis.TxFeeConfig { func getGenesisData(v *viper.Viper, networkID uint32, stakingCfg *genesis.StakingConfig) ([]byte, ids.ID, error) { // try first loading genesis content directly from flag/env-var - if v.IsSet(GenesisConfigContentKey) { - genesisData := v.GetString(GenesisConfigContentKey) + if v.IsSet(GenesisFileContentKey) { + genesisData := v.GetString(GenesisFileContentKey) return genesis.FromFlag(networkID, genesisData, stakingCfg) } // if content is not specified go for the file - if v.IsSet(GenesisConfigFileKey) { - genesisFileName := GetExpandedArg(v, GenesisConfigFileKey) + if v.IsSet(GenesisFileKey) { + genesisFileName := GetExpandedArg(v, GenesisFileKey) return genesis.FromFile(networkID, genesisFileName, stakingCfg) } @@ -925,7 +935,8 @@ func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, e } return node.DatabaseConfig{ - Name: v.GetString(DBTypeKey), + Name: v.GetString(DBTypeKey), + ReadOnly: v.GetBool(DBReadOnlyKey), Path: filepath.Join( GetExpandedArg(v, DBPathKey), constants.NetworkName(networkID), @@ -952,7 +963,7 @@ func getAliases(v *viper.Viper, name string, contentKey string, fileKey string) if !exists { if v.IsSet(fileKey) { - return nil, fmt.Errorf("%s file does not exist in %v", name, aliasFilePath) + return nil, fmt.Errorf("%w: %s", errFileDoesNotExist, aliasFilePath) } return nil, nil } @@ -965,7 +976,7 @@ func getAliases(v *viper.Viper, name string, contentKey string, fileKey string) aliasMap := make(map[ids.ID][]string) if err := json.Unmarshal(fileBytes, &aliasMap); err != nil { - return nil, fmt.Errorf("problem unmarshaling %s: %w", name, err) + return nil, fmt.Errorf("%w on %s: %w", errUnmarshalling, name, err) } return aliasMap, nil } @@ -978,23 +989,6 @@ func getChainAliases(v *viper.Viper) (map[ids.ID][]string, error) { return getAliases(v, "chain aliases", ChainAliasesContentKey, ChainAliasesFileKey) } -func getVMAliaser(v *viper.Viper) (ids.Aliaser, error) { - vmAliases, err := getVMAliases(v) - if err != nil { - return nil, err - } - - aliser := ids.NewAliaser() - for vmID, aliases := range vmAliases { - for _, alias := range aliases { - if err := aliser.Alias(vmID, alias); err != nil { - return nil, err - } - } - } - return aliser, nil -} - // getPathFromDirKey reads flag value from viper instance and then checks the folder existence func getPathFromDirKey(v *viper.Viper, configKey string) (string, error) { configDir := GetExpandedArg(v, configKey) @@ -1008,7 +1002,7 @@ func getPathFromDirKey(v *viper.Viper, configKey string) (string, error) { } if v.IsSet(configKey) { // user specified a config dir explicitly, but dir does not exist. - return "", fmt.Errorf("cannot read directory: %v", cleanPath) + return "", fmt.Errorf("%w: %s", errCannotReadDirectory, cleanPath) } return "", nil } @@ -1116,6 +1110,11 @@ func getSubnetConfigsFromFlags(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]s return nil, err } + if config.ConsensusParameters.Alpha != nil { + config.ConsensusParameters.AlphaPreference = *config.ConsensusParameters.Alpha + config.ConsensusParameters.AlphaConfidence = config.ConsensusParameters.AlphaPreference + } + if err := config.Valid(); err != nil { return nil, err } @@ -1161,7 +1160,12 @@ func getSubnetConfigsFromDir(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]sub config := getDefaultSubnetConfig(v) if err := json.Unmarshal(file, &config); err != nil { - return nil, err + return nil, fmt.Errorf("%w: %w", errUnmarshalling, err) + } + + if config.ConsensusParameters.Alpha != nil { + config.ConsensusParameters.AlphaPreference = *config.ConsensusParameters.Alpha + config.ConsensusParameters.AlphaConfidence = config.ConsensusParameters.AlphaPreference } if err := config.Valid(); err != nil { @@ -1176,10 +1180,11 @@ func getSubnetConfigsFromDir(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]sub func getDefaultSubnetConfig(v *viper.Viper) subnets.Config { return subnets.Config{ - ConsensusParameters: getConsensusConfig(v), - ValidatorOnly: false, - GossipConfig: getGossipConfig(v), - ProposerMinBlockDelay: proposervm.DefaultMinBlockDelay, + ConsensusParameters: getConsensusConfig(v), + ValidatorOnly: false, + GossipConfig: getGossipConfig(v), + ProposerMinBlockDelay: proposervm.DefaultMinBlockDelay, + ProposerNumHistoricalBlocks: proposervm.DefaultNumHistoricalBlocks, } } @@ -1258,10 +1263,12 @@ func getTraceConfig(v *viper.Viper) (trace.Config, error) { Type: exporterType, Endpoint: endpoint, Insecure: v.GetBool(TracingInsecureKey), - // TODO add support for headers + Headers: v.GetStringMapString(TracingHeadersKey), }, Enabled: true, TraceSampleRate: v.GetFloat64(TracingSampleRateKey), + AppName: constants.AppName, + Version: version.Current.String(), }, nil } @@ -1305,9 +1312,9 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // Gossiping - nodeConfig.ConsensusGossipFrequency = v.GetDuration(ConsensusGossipFrequencyKey) - if nodeConfig.ConsensusGossipFrequency < 0 { - return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusGossipFrequencyKey) + nodeConfig.FrontierPollFrequency = v.GetDuration(ConsensusFrontierPollFrequencyKey) + if nodeConfig.FrontierPollFrequency < 0 { + return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusFrontierPollFrequencyKey) } // App handling @@ -1372,7 +1379,6 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // Router - nodeConfig.ConsensusRouter = &router.ChainRouter{} nodeConfig.RouterHealthConfig, err = getRouterHealthConfig(v, healthCheckAveragerHalflife) if err != nil { return node.Config{}, err @@ -1388,7 +1394,12 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // Network Config - nodeConfig.NetworkConfig, err = getNetworkConfig(v, nodeConfig.EnableStaking, healthCheckAveragerHalflife, nodeConfig.NetworkID) + nodeConfig.NetworkConfig, err = getNetworkConfig( + v, + nodeConfig.NetworkID, + nodeConfig.SybilProtectionEnabled, + healthCheckAveragerHalflife, + ) if err != nil { return node.Config{}, err } @@ -1438,15 +1449,6 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { return node.Config{}, err } - // Node health - nodeConfig.MinPercentConnectedStakeHealthy = map[ids.ID]float64{ - constants.PrimaryNetworkID: calcMinConnectedStake(primaryNetworkConfig.ConsensusParameters.Parameters), - } - - for subnetID, config := range subnetConfigs { - nodeConfig.MinPercentConnectedStakeHealthy[subnetID] = calcMinConnectedStake(config.ConsensusParameters.Parameters) - } - // Chain Configs nodeConfig.ChainConfigs, err = getChainConfigs(v) if err != nil { @@ -1460,7 +1462,7 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // VM Aliases - nodeConfig.VMAliaser, err = getVMAliaser(v) + nodeConfig.VMAliases, err = getVMAliases(v) if err != nil { return node.Config{}, err } @@ -1497,19 +1499,12 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { nodeConfig.ChainDataDir = GetExpandedArg(v, ChainDataDirKey) + nodeConfig.ProcessContextFilePath = GetExpandedArg(v, ProcessContextFileKey) + nodeConfig.ProvidedFlags = providedFlags(v) return nodeConfig, nil } -// calcMinConnectedStake takes [consensusParams] as input and calculates the -// expected min connected stake percentage according to alpha and k. -func calcMinConnectedStake(consensusParams snowball.Parameters) float64 { - alpha := consensusParams.Alpha - k := consensusParams.K - r := float64(alpha) / float64(k) - return r*(1-constants.MinConnectedStakeBuffer) + constants.MinConnectedStakeBuffer -} - func providedFlags(v *viper.Viper) map[string]interface{} { settings := v.AllSettings() customSettings := make(map[string]interface{}, len(settings)) diff --git a/avalanchego/config/config_test.go b/avalanchego/config/config_test.go index de15d8fe..2fab7457 100644 --- a/avalanchego/config/config_test.go +++ b/avalanchego/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -14,20 +14,19 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/subnets" ) func TestGetChainConfigsFromFiles(t *testing.T) { tests := map[string]struct { - configs map[string]string - upgrades map[string]string - errMessage string - expected map[string]chains.ChainConfig + configs map[string]string + upgrades map[string]string + expected map[string]chains.ChainConfig }{ "no chain configs": { configs: map[string]string{}, @@ -73,7 +72,7 @@ func TestGetChainConfigsFromFiles(t *testing.T) { // Create custom configs for key, value := range test.configs { chainDir := filepath.Join(chainsDir, key) - setupFile(t, chainDir, chainConfigFileName+".ex", value) + setupFile(t, chainDir, chainConfigFileName+".ex", value) //nolint:goconst } for key, value := range test.upgrades { chainDir := filepath.Join(chainsDir, key) @@ -85,14 +84,7 @@ func TestGetChainConfigsFromFiles(t *testing.T) { // Parse config require.Equal(root, v.GetString(ChainConfigDirKey)) chainConfigs, err := getChainConfigs(v) - if len(test.errMessage) > 0 { - require.Error(err) - if err != nil { - require.Contains(err.Error(), test.errMessage) - } - } else { - require.NoError(err) - } + require.NoError(err) require.Equal(test.expected, chainConfigs) }) } @@ -100,32 +92,34 @@ func TestGetChainConfigsFromFiles(t *testing.T) { func TestGetChainConfigsDirNotExist(t *testing.T) { tests := map[string]struct { - structure string - file map[string]string - errMessage string - expected map[string]chains.ChainConfig + structure string + file map[string]string + expectedErr error + expected map[string]chains.ChainConfig }{ "cdir not exist": { - structure: "/", - file: map[string]string{"config.ex": "noeffect"}, - errMessage: "cannot read directory", - expected: nil, + structure: "/", + file: map[string]string{"config.ex": "noeffect"}, + expectedErr: errCannotReadDirectory, + expected: nil, }, "cdir is file ": { - structure: "/", - file: map[string]string{"cdir": "noeffect"}, - errMessage: "cannot read directory", - expected: nil, + structure: "/", + file: map[string]string{"cdir": "noeffect"}, + expectedErr: errCannotReadDirectory, + expected: nil, }, "chain subdir not exist": { - structure: "/cdir/", - file: map[string]string{"config.ex": "noeffect"}, - expected: map[string]chains.ChainConfig{}, + structure: "/cdir/", + file: map[string]string{"config.ex": "noeffect"}, + expectedErr: nil, + expected: map[string]chains.ChainConfig{}, }, "full structure": { - structure: "/cdir/C/", - file: map[string]string{"config.ex": "hello"}, - expected: map[string]chains.ChainConfig{"C": {Config: []byte("hello"), Upgrade: []byte(nil)}}, + structure: "/cdir/C/", + file: map[string]string{"config.ex": "hello"}, + expectedErr: nil, + expected: map[string]chains.ChainConfig{"C": {Config: []byte("hello"), Upgrade: []byte(nil)}}, }, } @@ -150,14 +144,8 @@ func TestGetChainConfigsDirNotExist(t *testing.T) { // don't read with getConfigFromViper since it's very slow. chainConfigs, err := getChainConfigs(v) - switch { - case len(test.errMessage) > 0: - require.Error(err) - require.Contains(err.Error(), test.errMessage) - default: - require.NoError(err) - require.Equal(test.expected, chainConfigs) - } + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expected, chainConfigs) }) } } @@ -183,7 +171,6 @@ func TestSetChainConfigDefaultDir(t *testing.T) { func TestGetChainConfigsFromFlags(t *testing.T) { tests := map[string]struct { fullConfigs map[string]chains.ChainConfig - errMessage string expected map[string]chains.ChainConfig }{ "no chain configs": { @@ -244,14 +231,7 @@ func TestGetChainConfigsFromFlags(t *testing.T) { // Parse config chainConfigs, err := getChainConfigs(v) - if len(test.errMessage) > 0 { - require.Error(err) - if err != nil { - require.Contains(err.Error(), test.errMessage) - } - } else { - require.NoError(err) - } + require.NoError(err) require.Equal(test.expected, chainConfigs) }) } @@ -259,14 +239,14 @@ func TestGetChainConfigsFromFlags(t *testing.T) { func TestGetVMAliasesFromFile(t *testing.T) { tests := map[string]struct { - givenJSON string - expected map[ids.ID][]string - errMessage string + givenJSON string + expected map[ids.ID][]string + expectedErr error }{ "wrong vm id": { - givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, - expected: nil, - errMessage: "problem unmarshaling vm aliases", + givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, + expected: nil, + expectedErr: errUnmarshalling, }, "vm id": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": ["vm1","vm2"], @@ -279,7 +259,7 @@ func TestGetVMAliasesFromFile(t *testing.T) { m[id2] = []string{"vm3", "vm4"} return m }(), - errMessage: "", + expectedErr: nil, }, } @@ -293,27 +273,22 @@ func TestGetVMAliasesFromFile(t *testing.T) { setupFile(t, root, "aliases.json", test.givenJSON) v := setupViper(configFilePath) vmAliases, err := getVMAliases(v) - if len(test.errMessage) > 0 { - require.Error(err) - require.Contains(err.Error(), test.errMessage) - } else { - require.NoError(err) - require.Equal(test.expected, vmAliases) - } + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expected, vmAliases) }) } } func TestGetVMAliasesFromFlag(t *testing.T) { tests := map[string]struct { - givenJSON string - expected map[ids.ID][]string - errMessage string + givenJSON string + expected map[ids.ID][]string + expectedErr error }{ "wrong vm id": { - givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, - expected: nil, - errMessage: "problem unmarshaling vm aliases", + givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, + expected: nil, + expectedErr: errUnmarshalling, }, "vm id": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": ["vm1","vm2"], @@ -326,7 +301,7 @@ func TestGetVMAliasesFromFlag(t *testing.T) { m[id2] = []string{"vm3", "vm4"} return m }(), - errMessage: "", + expectedErr: nil, }, } @@ -340,13 +315,8 @@ func TestGetVMAliasesFromFlag(t *testing.T) { v.Set(VMAliasesContentKey, encodedFileContent) vmAliases, err := getVMAliases(v) - if len(test.errMessage) > 0 { - require.Error(err) - require.Contains(err.Error(), test.errMessage) - } else { - require.NoError(err) - require.Equal(test.expected, vmAliases) - } + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expected, vmAliases) }) } } @@ -380,9 +350,8 @@ func TestGetVMAliasesDirNotExists(t *testing.T) { configFilePath := setupConfigJSON(t, root, configJSON) v := setupViper(configFilePath) vmAliases, err := getVMAliases(v) + require.ErrorIs(err, errFileDoesNotExist) require.Nil(vmAliases) - require.Error(err) - require.Contains(err.Error(), "vm aliases file does not exist") // do not set it explicitly configJSON = "{}" @@ -394,11 +363,14 @@ func TestGetVMAliasesDirNotExists(t *testing.T) { } func TestGetSubnetConfigsFromFile(t *testing.T) { + subnetID, err := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") + require.NoError(t, err) + tests := map[string]struct { - givenJSON string - testF func(*require.Assertions, map[ids.ID]subnets.Config) - errMessage string - fileName string + fileName string + givenJSON string + testF func(*require.Assertions, map[ids.ID]subnets.Config) + expectedErr error }{ "wrong config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", @@ -406,7 +378,7 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Nil(given) }, - errMessage: "invalid character", + expectedErr: errUnmarshalling, }, "subnet is not tracked": { fileName: "Gmt4fuNsGJAd2PX86LBvycGaBpgCYKbuULdCLZs3SEs1Jx1LU.json", @@ -414,6 +386,7 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, + expectedErr: nil, }, "wrong extension": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.yaml", @@ -421,30 +394,30 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, + expectedErr: nil, }, "invalid consensus parameters": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", - givenJSON: `{"consensusParameters":{"k": 111, "alpha":1234} }`, + givenJSON: `{"consensusParameters":{"k": 111, "alphaPreference":1234} }`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Nil(given) }, - errMessage: "fails the condition that: alpha <= k", + expectedErr: snowball.ErrParametersInvalid, }, "correct config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", - givenJSON: `{"validatorOnly": true, "consensusParameters":{"parents": 111, "alpha":16} }`, + givenJSON: `{"validatorOnly": true, "consensusParameters":{"alphaConfidence":16} }`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) - require.Equal(true, config.ValidatorOnly) - require.Equal(111, config.ConsensusParameters.Parents) - require.Equal(16, config.ConsensusParameters.Alpha) + require.True(config.ValidatorOnly) + require.Equal(16, config.ConsensusParameters.AlphaConfidence) // must still respect defaults require.Equal(20, config.ConsensusParameters.K) }, - errMessage: "", + expectedErr: nil, }, "gossip config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", @@ -458,84 +431,89 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { require.Equal(20, config.ConsensusParameters.K) require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) }, - errMessage: "", + expectedErr: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { require := require.New(t) + root := t.TempDir() subnetPath := filepath.Join(root, "subnets") + configJSON := fmt.Sprintf(`{%q: %q}`, SubnetConfigDirKey, subnetPath) configFilePath := setupConfigJSON(t, root, configJSON) - subnetID, err := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") - require.NoError(err) + setupFile(t, subnetPath, test.fileName, test.givenJSON) + v := setupViper(configFilePath) subnetConfigs, err := getSubnetConfigs(v, []ids.ID{subnetID}) - if len(test.errMessage) > 0 { - require.Error(err) - require.Contains(err.Error(), test.errMessage) - } else { - require.NoError(err) - test.testF(require, subnetConfigs) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return } + test.testF(require, subnetConfigs) }) } } func TestGetSubnetConfigsFromFlags(t *testing.T) { + subnetID, err := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") + require.NoError(t, err) + tests := map[string]struct { - givenJSON string - testF func(*require.Assertions, map[ids.ID]subnets.Config) - errMessage string + givenJSON string + testF func(*require.Assertions, map[ids.ID]subnets.Config) + expectedErr error }{ "no configs": { givenJSON: `{}`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, - errMessage: "", + expectedErr: nil, }, "entry with no config": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i":{}}`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { - require.True(len(given) == 1) + require.Len(given, 1) id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) // should respect defaults require.Equal(20, config.ConsensusParameters.K) }, + expectedErr: nil, }, "subnet is not tracked": { givenJSON: `{"Gmt4fuNsGJAd2PX86LBvycGaBpgCYKbuULdCLZs3SEs1Jx1LU":{"validatorOnly":true}}`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, + expectedErr: nil, }, "invalid consensus parameters": { givenJSON: `{ "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": { "consensusParameters": { "k": 111, - "alpha": 1234 + "alphaPreference": 1234 } } }`, testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, - errMessage: "fails the condition that: alpha <= k", + expectedErr: snowball.ErrParametersInvalid, }, "correct config": { givenJSON: `{ "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": { "consensusParameters": { "k": 30, - "alpha": 20, - "parents": 111 + "alphaPreference": 16, + "alphaConfidence": 20 }, "validatorOnly": true } @@ -544,23 +522,22 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) - require.Equal(true, config.ValidatorOnly) - require.Equal(111, config.ConsensusParameters.Parents) - require.Equal(20, config.ConsensusParameters.Alpha) + require.True(config.ValidatorOnly) + require.Equal(16, config.ConsensusParameters.AlphaPreference) + require.Equal(20, config.ConsensusParameters.AlphaConfidence) require.Equal(30, config.ConsensusParameters.K) // must still respect defaults require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) require.Equal(256, config.ConsensusParameters.MaxOutstandingItems) }, - errMessage: "", + expectedErr: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { require := require.New(t) - subnetID, err := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") - require.NoError(err) + encodedFileContent := base64.StdEncoding.EncodeToString([]byte(test.givenJSON)) // build viper config @@ -568,25 +545,15 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { v.Set(SubnetConfigContentKey, encodedFileContent) subnetConfigs, err := getSubnetConfigs(v, []ids.ID{subnetID}) - if len(test.errMessage) > 0 { - require.Error(err) - require.Contains(err.Error(), test.errMessage) - } else { - require.NoError(err) - test.testF(require, subnetConfigs) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return } + test.testF(require, subnetConfigs) }) } } -func TestCalcMinConnectedStake(t *testing.T) { - v := setupViperFlags() - defaultParams := getConsensusConfig(v) - defaultExpectedMinStake := 0.8 - minStake := calcMinConnectedStake(defaultParams.Parameters) - require.Equal(t, defaultExpectedMinStake, minStake) -} - // setups config json file and writes content func setupConfigJSON(t *testing.T, rootPath string, value string) string { configFilePath := filepath.Join(rootPath, "config.json") @@ -596,9 +563,11 @@ func setupConfigJSON(t *testing.T, rootPath string, value string) string { // setups file creates necessary path and writes value to it. func setupFile(t *testing.T, path string, fileName string, value string) { - require.NoError(t, os.MkdirAll(path, 0o700)) + require := require.New(t) + + require.NoError(os.MkdirAll(path, 0o700)) filePath := filepath.Join(path, fileName) - require.NoError(t, os.WriteFile(filePath, []byte(value), 0o600)) + require.NoError(os.WriteFile(filePath, []byte(value), 0o600)) } func setupViperFlags() *viper.Viper { diff --git a/avalanchego/config/flags.go b/avalanchego/config/flags.go index c30f6d27..767d449d 100644 --- a/avalanchego/config/flags.go +++ b/avalanchego/config/flags.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -15,10 +15,13 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ulimit" "github.com/ava-labs/avalanchego/utils/units" ) @@ -29,6 +32,8 @@ const ( AvalancheGoDataDirVar = "AVALANCHEGO_DATA_DIR" defaultUnexpandedDataDir = "$" + AvalancheGoDataDirVar + + DefaultProcessContextFilename = "process.json" ) var ( @@ -49,6 +54,7 @@ var ( defaultSubnetConfigDir = filepath.Join(defaultConfigDir, "subnets") defaultPluginDir = filepath.Join(defaultUnexpandedDataDir, "plugins") defaultChainDataDir = filepath.Join(defaultUnexpandedDataDir, "chainData") + defaultProcessContextPath = filepath.Join(defaultUnexpandedDataDir, DefaultProcessContextFilename) ) func deprecateFlags(fs *pflag.FlagSet) error { @@ -80,13 +86,17 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.String(ConfigContentTypeKey, "json", "Specifies the format of the base64 encoded config content. Available values: 'json', 'yaml', 'toml'") // Genesis - fs.String(GenesisConfigFileKey, "", fmt.Sprintf("Specifies a genesis config file (ignored when running standard networks or if %s is specified)", - GenesisConfigContentKey)) - fs.String(GenesisConfigContentKey, "", "Specifies base64 encoded genesis content") + fs.String(GenesisFileKey, "", fmt.Sprintf("Specifies a genesis config file path. Ignored when running standard networks or if %s is specified", + GenesisFileContentKey)) + fs.String(GenesisFileContentKey, "", "Specifies base64 encoded genesis content") // Network ID fs.String(NetworkNameKey, constants.MainnetName, "Network ID this node will connect to") + // ACP flagging + fs.IntSlice(ACPSupportKey, nil, "ACPs to support adoption") + fs.IntSlice(ACPObjectKey, nil, "ACPs to object adoption") + // AVAX fees fs.Uint64(TxFeeKey, genesis.LocalParams.TxFee, "Transaction fee, in nAVAX") fs.Uint64(CreateAssetTxFeeKey, genesis.LocalParams.CreateAssetTxFee, "Transaction fee, in nAVAX, for transactions that create new assets") @@ -99,7 +109,8 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(AddSubnetDelegatorFeeKey, genesis.LocalParams.AddSubnetDelegatorFee, "Transaction fee, in nAVAX, for transactions that add new subnet delegators") // Database - fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Should be one of {%s, %s}", leveldb.Name, memdb.Name)) + fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) fs.String(DBConfigContentKey, "", "Specifies base64 encoded database config content") @@ -116,31 +127,25 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Bool(LogDisableDisplayPluginLogsKey, false, "Disables displaying plugin logs in stdout.") // Peer List Gossip - gossipHelpMsg := fmt.Sprintf( - "Gossip [%s] validator IPs to [%s] validators, [%s] non-validators, and [%s] validating or non-validating peers every [%s]", - NetworkPeerListNumValidatorIPsKey, - NetworkPeerListValidatorGossipSizeKey, - NetworkPeerListNonValidatorGossipSizeKey, - NetworkPeerListPeersGossipSizeKey, - NetworkPeerListGossipFreqKey, - ) - fs.Uint(NetworkPeerListNumValidatorIPsKey, constants.DefaultNetworkPeerListNumValidatorIPs, gossipHelpMsg) - fs.Uint(NetworkPeerListValidatorGossipSizeKey, constants.DefaultNetworkPeerListValidatorGossipSize, gossipHelpMsg) - fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, constants.DefaultNetworkPeerListNonValidatorGossipSize, gossipHelpMsg) - fs.Uint(NetworkPeerListPeersGossipSizeKey, constants.DefaultNetworkPeerListPeersGossipSize, gossipHelpMsg) - fs.Duration(NetworkPeerListGossipFreqKey, constants.DefaultNetworkPeerListGossipFreq, gossipHelpMsg) + fs.Uint(NetworkPeerListNumValidatorIPsKey, constants.DefaultNetworkPeerListNumValidatorIPs, "Number of validator IPs to gossip to other nodes") + fs.Uint(NetworkPeerListValidatorGossipSizeKey, constants.DefaultNetworkPeerListValidatorGossipSize, "Number of validators that the node will gossip peer list to") + fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, constants.DefaultNetworkPeerListNonValidatorGossipSize, "Number of non-validators that the node will gossip peer list to") + fs.Uint(NetworkPeerListPeersGossipSizeKey, constants.DefaultNetworkPeerListPeersGossipSize, "Number of total peers (including non-validators and validators) that the node will gossip peer list to") + fs.Duration(NetworkPeerListGossipFreqKey, constants.DefaultNetworkPeerListGossipFreq, "Frequency to gossip peers to other nodes") + fs.Duration(NetworkPeerListPullGossipFreqKey, constants.DefaultNetworkPeerListPullGossipFreq, "Frequency to request peers from other nodes") + fs.Duration(NetworkPeerListBloomResetFreqKey, constants.DefaultNetworkPeerListBloomResetFreq, "Frequency to recalculate the bloom filter used to request new peers from other nodes") // Public IP Resolution - fs.String(PublicIPKey, "", "Public IP of this node for P2P communication. If empty, try to discover with NAT") + fs.String(PublicIPKey, "", "Public IP of this node for P2P communication") fs.Duration(PublicIPResolutionFreqKey, 5*time.Minute, "Frequency at which this node resolves/updates its public IP and renew NAT mappings, if applicable") - fs.String(PublicIPResolutionServiceKey, "", fmt.Sprintf("Only acceptable values are 'ifconfigco', 'opendns' or 'ifconfigme'. When provided, the node will use that service to periodically resolve/update its public IP. Ignored if %s is set", PublicIPKey)) + fs.String(PublicIPResolutionServiceKey, "", fmt.Sprintf("Only acceptable values are %q, %q or %q. When provided, the node will use that service to periodically resolve/update its public IP", dynamicip.OpenDNSName, dynamicip.IFConfigCoName, dynamicip.IFConfigMeName)) // Inbound Connection Throttling - fs.Duration(InboundConnUpgradeThrottlerCooldownKey, constants.DefaultInboundConnUpgradeThrottlerCooldown, "Upgrade an inbound connection from a given IP at most once per this duration. If 0, don't rate-limit inbound connection upgrades") - fs.Float64(InboundThrottlerMaxConnsPerSecKey, constants.DefaultInboundThrottlerMaxConnsPerSec, "Max number of inbound connections to accept (from all peers) per second") + fs.Duration(NetworkInboundConnUpgradeThrottlerCooldownKey, constants.DefaultInboundConnUpgradeThrottlerCooldown, "Upgrade an inbound connection from a given IP at most once per this duration. If 0, don't rate-limit inbound connection upgrades") + fs.Float64(NetworkInboundThrottlerMaxConnsPerSecKey, constants.DefaultInboundThrottlerMaxConnsPerSec, "Max number of inbound connections to accept (from all peers) per second") // Outbound Connection Throttling - fs.Uint(OutboundConnectionThrottlingRpsKey, constants.DefaultOutboundConnectionThrottlingRps, "Make at most this number of outgoing peer connection attempts per second") - fs.Duration(OutboundConnectionTimeoutKey, constants.DefaultOutboundConnectionTimeout, "Timeout when dialing a peer") + fs.Uint(NetworkOutboundConnectionThrottlingRpsKey, constants.DefaultOutboundConnectionThrottlingRps, "Make at most this number of outgoing peer connection attempts per second") + fs.Duration(NetworkOutboundConnectionTimeoutKey, constants.DefaultOutboundConnectionTimeout, "Timeout when dialing a peer") // Timeouts fs.Duration(NetworkInitialTimeoutKey, constants.DefaultNetworkInitialTimeout, "Initial timeout value of the adaptive timeout manager") fs.Duration(NetworkMinimumTimeoutKey, constants.DefaultNetworkMinimumTimeout, "Minimum timeout value of the adaptive timeout manager") @@ -152,11 +157,13 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(NetworkPingTimeoutKey, constants.DefaultPingPongTimeout, "Timeout value for Ping-Pong with a peer") fs.Duration(NetworkPingFrequencyKey, constants.DefaultPingFrequency, "Frequency of pinging other peers") - fs.Bool(NetworkCompressionEnabledKey, constants.DefaultNetworkCompressionEnabled, "If true, compress certain outbound messages. This node will be able to parse compressed inbound messages regardless of this flag's value") - fs.String(NetworkCompressionTypeKey, constants.DefaultNetworkCompressionType.String(), fmt.Sprintf("Compression type for outbound messages. Must be one of [%s, %s, %s]", compression.TypeGzip, compression.TypeZstd, compression.TypeNone)) + fs.String(NetworkCompressionTypeKey, constants.DefaultNetworkCompressionType.String(), fmt.Sprintf("Compression type for outbound messages. Must be one of [%s, %s]", compression.TypeZstd, compression.TypeNone)) fs.Duration(NetworkMaxClockDifferenceKey, constants.DefaultNetworkMaxClockDifference, "Max allowed clock difference value between this node and peers") - fs.Bool(NetworkAllowPrivateIPsKey, constants.DefaultNetworkAllowPrivateIPs, "Allows the node to initiate outbound connection attempts to peers with private IPs") + // Note: The default value is set to false here because the default + // networkID is mainnet. The real default value of NetworkAllowPrivateIPs is + // based on the networkID. + fs.Bool(NetworkAllowPrivateIPsKey, false, fmt.Sprintf("Allows the node to initiate outbound connection attempts to peers with private IPs. If the provided --%s is one of [%s, %s, %s, %s] the default is false. Oterhwise, the default is true", NetworkNameKey, constants.FlareName, constants.SongbirdName, constants.CostwoName, constants.CostonName)) fs.Bool(NetworkRequireValidatorToConnectKey, constants.DefaultNetworkRequireValidatorToConnect, "If true, this node will only maintain a connection with another node if this node is a validator, the other node is a validator, or the other node is a beacon") fs.Uint(NetworkPeerReadBufferSizeKey, constants.DefaultNetworkPeerReadBufferSize, "Size, in bytes, of the buffer that we read peer messages into (there is one buffer per peer)") fs.Uint(NetworkPeerWriteBufferSizeKey, constants.DefaultNetworkPeerWriteBufferSize, "Size, in bytes, of the buffer that we write peer messages into (there is one buffer per peer)") @@ -177,9 +184,9 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(BenchlistMinFailingDurationKey, constants.DefaultBenchlistMinFailingDuration, "Minimum amount of time messages to a peer must be failing before the peer is benched") // Router - fs.Duration(ConsensusGossipFrequencyKey, constants.DefaultConsensusGossipFrequency, "Frequency of gossiping accepted frontiers") fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") + fs.Duration(ConsensusFrontierPollFrequencyKey, constants.DefaultFrontierPollFrequency, "Frequency of polling for new consensus frontiers") fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, constants.DefaultConsensusGossipAcceptedFrontierPeerSize, "Number of peers to gossip to when gossiping accepted frontier") @@ -206,14 +213,15 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(OutboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, "Max number of bytes a node can take from the outbound message throttler's at-large allocation. Must be at least the max message size") // HTTP APIs - fs.String(HTTPHostKey, "127.0.0.1", "Address of the HTTP server") - fs.Uint(HTTPPortKey, DefaultHTTPPort, "Port of the HTTP server") + fs.String(HTTPHostKey, "127.0.0.1", "Address of the HTTP server. If the address is empty or a literal unspecified IP address, the server will bind on all available unicast and anycast IP addresses of the local system") + fs.Uint(HTTPPortKey, DefaultHTTPPort, "Port of the HTTP server. If the port is 0 a port number is automatically chosen") fs.Bool(HTTPSEnabledKey, false, "Upgrade the HTTP server to HTTPs") fs.String(HTTPSKeyFileKey, "", fmt.Sprintf("TLS private key file for the HTTPs server. Ignored if %s is specified", HTTPSKeyContentKey)) fs.String(HTTPSKeyContentKey, "", "Specifies base64 encoded TLS private key for the HTTPs server") fs.String(HTTPSCertFileKey, "", fmt.Sprintf("TLS certificate file for the HTTPs server. Ignored if %s is specified", HTTPSCertContentKey)) fs.String(HTTPSCertContentKey, "", "Specifies base64 encoded TLS certificate for the HTTPs server") fs.String(HTTPAllowedOrigins, "*", "Origins to allow on the HTTP port. Defaults to * which allows all origins. Example: https://*.avax.network https://*.avax-test.network") + fs.StringSlice(HTTPAllowedHostsKey, []string{"localhost"}, "List of acceptable host names in API requests. Provide the wildcard ('*') to accept requests from all hosts. API requests where the Host field is empty or an IP address will always be accepted. An API call whose HTTP Host field isn't acceptable will receive a 403 error code") fs.Duration(HTTPShutdownWaitKey, 0, "Duration to wait after receiving SIGTERM or SIGINT before initiating shutdown. The /health endpoint will return unhealthy during this duration") fs.Duration(HTTPShutdownTimeoutKey, 10*time.Second, "Maximum duration to wait for existing connections to complete during node shutdown") fs.Duration(HTTPReadTimeoutKey, 30*time.Second, "Maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout") @@ -249,8 +257,8 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(NetworkHealthMaxOutstandingDurationKey, 5*time.Minute, "Node reports unhealthy if there has been a request outstanding for this duration") // Staking - fs.Uint(StakingPortKey, DefaultStakingPort, "Port of the consensus server") - fs.Bool(StakingEnabledKey, true, "Enable staking. If enabled, Network TLS is required") + fs.String(StakingHostKey, "", "Address of the consensus server. If the address is empty or a literal unspecified IP address, the server will bind on all available unicast and anycast IP addresses of the local system") // Bind to all interfaces by default. + fs.Uint(StakingPortKey, DefaultStakingPort, "Port of the consensus server. If the port is 0 a port number is automatically chosen") fs.Bool(StakingEphemeralCertEnabledKey, false, "If true, the node uses an ephemeral staking TLS key and certificate, and has an ephemeral node ID") fs.String(StakingTLSKeyPathKey, defaultStakingTLSKeyPath, fmt.Sprintf("Path to the TLS private key for staking. Ignored if %s is specified", StakingTLSKeyContentKey)) fs.String(StakingTLSKeyContentKey, "", "Specifies base64 encoded TLS private key for staking") @@ -259,8 +267,9 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Bool(StakingEphemeralSignerEnabledKey, false, "If true, the node uses an ephemeral staking signer key") fs.String(StakingSignerKeyPathKey, defaultStakingSignerKeyPath, fmt.Sprintf("Path to the signer private key for staking. Ignored if %s is specified", StakingSignerKeyContentKey)) fs.String(StakingSignerKeyContentKey, "", "Specifies base64 encoded signer private key for staking") - - fs.Uint64(StakingDisabledWeightKey, 100, "Weight to provide to each peer when staking is disabled") + fs.Bool(SybilProtectionEnabledKey, true, "Enables sybil protection. If enabled, Network TLS is required") + fs.Uint64(SybilProtectionDisabledWeightKey, 100, "Weight to provide to each peer when sybil protection is disabled") + fs.Bool(PartialSyncPrimaryNetworkKey, false, "Only sync the P-chain on the Primary Network. If the node is a Primary Network validator, it will report unhealthy") // Uptime Requirement fs.Float64(UptimeRequirementKey, genesis.LocalParams.UptimeRequirement, "Fraction of time a validator must be online to receive rewards") // Minimum Stake required to validate the Primary Network @@ -287,30 +296,29 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.String(StateSyncIDsKey, "", "Comma separated list of state sync peer ids to connect to. Example: NodeID-JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,NodeID-8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") // Bootstrapping + // TODO: combine "BootstrapIPsKey" and "BootstrapIDsKey" into one flag fs.String(BootstrapIPsKey, "", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") fs.String(BootstrapIDsKey, "", "Comma separated list of bootstrap peer ids to connect to. Example: NodeID-JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,NodeID-8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") - fs.Bool(RetryBootstrapKey, true, "Specifies whether bootstrap should be retried") - fs.Int(RetryBootstrapWarnFrequencyKey, 50, "Specifies how many times bootstrap should be retried before warning the operator") fs.Duration(BootstrapBeaconConnectionTimeoutKey, time.Minute, "Timeout before emitting a warn log when connecting to bootstrapping beacons") fs.Duration(BootstrapMaxTimeGetAncestorsKey, 50*time.Millisecond, "Max Time to spend fetching a container and its ancestors when responding to a GetAncestors") fs.Uint(BootstrapAncestorsMaxContainersSentKey, 2000, "Max number of containers in an Ancestors message sent by this node") fs.Uint(BootstrapAncestorsMaxContainersReceivedKey, 2000, "This node reads at most this many containers from an incoming Ancestors message") // Consensus - fs.Int(SnowSampleSizeKey, 20, "Number of nodes to query for each network poll") - fs.Int(SnowQuorumSizeKey, 15, "Alpha value to use for required number positive results") - // TODO: Replace this temporary flag description after the X-chain - // linearization with "Beta value to use for virtuous transactions" - fs.Int(SnowVirtuousCommitThresholdKey, 15, "This flag is temporarily ignored due to the X-chain linearization") - fs.Int(SnowRogueCommitThresholdKey, 20, "Beta value to use for rogue transactions") - fs.Int(SnowAvalancheNumParentsKey, 5, "Number of vertexes for reference from each new vertex") - fs.Int(SnowAvalancheBatchSizeKey, 30, "Number of operations to batch in each new vertex") - fs.Int(SnowConcurrentRepollsKey, 4, "Minimum number of concurrent polls for finalizing consensus") - fs.Int(SnowOptimalProcessingKey, 10, "Optimal number of processing containers in consensus") - fs.Int(SnowMaxProcessingKey, 256, "Maximum number of processing items to be considered healthy") - fs.Duration(SnowMaxTimeProcessingKey, 30*time.Second, "Maximum amount of time an item should be processing and still be healthy") - fs.Uint(SnowMixedQueryNumPushVdrKey, 10, fmt.Sprintf("If this node is a validator, when a container is inserted into consensus, send a Push Query to %s validators and a Pull Query to the others. Must be <= k.", SnowMixedQueryNumPushVdrKey)) - fs.Uint(SnowMixedQueryNumPushNonVdrKey, 0, fmt.Sprintf("If this node is not a validator, when a container is inserted into consensus, send a Push Query to %s validators and a Pull Query to the others. Must be <= k.", SnowMixedQueryNumPushNonVdrKey)) + fs.Int(SnowSampleSizeKey, snowball.DefaultParameters.K, "Number of nodes to query for each network poll") + fs.Int(SnowQuorumSizeKey, snowball.DefaultParameters.AlphaConfidence, "Threshold of nodes required to update this node's preference and increase its confidence in a network poll") + fs.Int(SnowPreferenceQuorumSizeKey, snowball.DefaultParameters.AlphaPreference, fmt.Sprintf("Threshold of nodes required to update this node's preference in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) + fs.Int(SnowConfidenceQuorumSizeKey, snowball.DefaultParameters.AlphaConfidence, fmt.Sprintf("Threshold of nodes required to increase this node's confidence in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) + + fs.Int(SnowCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for transactions") + // TODO: Remove these once enough time has passed with SnowCommitThresholdKey + fs.Int(SnowVirtuousCommitThresholdKey, snowball.DefaultParameters.BetaVirtuous, "This flag is temporarily ignored due to the X-chain linearization") + fs.Int(SnowRogueCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for rogue transactions") + + fs.Int(SnowConcurrentRepollsKey, snowball.DefaultParameters.ConcurrentRepolls, "Minimum number of concurrent polls for finalizing consensus") + fs.Int(SnowOptimalProcessingKey, snowball.DefaultParameters.OptimalProcessing, "Optimal number of processing containers in consensus") + fs.Int(SnowMaxProcessingKey, snowball.DefaultParameters.MaxOutstandingItems, "Maximum number of processing items to be considered healthy") + fs.Duration(SnowMaxTimeProcessingKey, snowball.DefaultParameters.MaxItemProcessingTime, "Maximum amount of time an item should be processing and still be healthy") // ProposerVM fs.Bool(ProposerVMUseCurrentHeightKey, false, "Have the ProposerVM always report the last accepted P-chain block height") @@ -376,7 +384,9 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.String(TracingEndpointKey, "localhost:4317", "The endpoint to send trace data to") fs.Bool(TracingInsecureKey, true, "If true, don't use TLS when sending trace data") fs.Float64(TracingSampleRateKey, 0.1, "The fraction of traces to sample. If >= 1, always sample. If <= 0, never sample") - // TODO add flag to take in headers to send from exporter + fs.StringToString(TracingHeadersKey, map[string]string{}, "The headers to provide the trace indexer") + + fs.String(ProcessContextFileKey, defaultProcessContextPath, "The path to write process context to (including PID, API URI, and staking address).") } // BuildFlagSet returns a complete set of flags for avalanchego diff --git a/avalanchego/config/keys.go b/avalanchego/config/keys.go index 01c33643..b2ccc16f 100644 --- a/avalanchego/config/keys.go +++ b/avalanchego/config/keys.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -10,9 +10,11 @@ const ( ConfigContentKey = "config-file-content" ConfigContentTypeKey = "config-file-content-type" VersionKey = "version" - GenesisConfigFileKey = "genesis" - GenesisConfigContentKey = "genesis-content" + GenesisFileKey = "genesis-file" + GenesisFileContentKey = "genesis-file-content" NetworkNameKey = "network-id" + ACPSupportKey = "acp-support" + ACPObjectKey = "acp-object" TxFeeKey = "tx-fee" CreateAssetTxFeeKey = "create-asset-tx-fee" CreateSubnetTxFeeKey = "create-subnet-tx-fee" @@ -34,16 +36,13 @@ const ( StakeMintingPeriodKey = "stake-minting-period" StakeSupplyCapKey = "stake-supply-cap" DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" DBPathKey = "db-dir" DBConfigFileKey = "db-config-file" DBConfigContentKey = "db-config-file-content" PublicIPKey = "public-ip" PublicIPResolutionFreqKey = "public-ip-resolution-frequency" PublicIPResolutionServiceKey = "public-ip-resolution-service" - InboundConnUpgradeThrottlerCooldownKey = "inbound-connection-throttling-cooldown" - InboundThrottlerMaxConnsPerSecKey = "inbound-connection-throttling-max-conns-per-sec" - OutboundConnectionThrottlingRpsKey = "outbound-connection-throttling-rps" - OutboundConnectionTimeoutKey = "outbound-connection-timeout" HTTPHostKey = "http-host" HTTPPortKey = "http-port" HTTPSEnabledKey = "http-tls-enabled" @@ -52,6 +51,7 @@ const ( HTTPSCertFileKey = "http-tls-cert-file" HTTPSCertContentKey = "http-tls-cert-file-content" HTTPAllowedOrigins = "http-allowed-origins" + HTTPAllowedHostsKey = "http-allowed-hosts" HTTPShutdownTimeoutKey = "http-shutdown-timeout" HTTPShutdownWaitKey = "http-shutdown-wait" HTTPReadTimeoutKey = "http-read-timeout" @@ -65,8 +65,8 @@ const ( StateSyncIDsKey = "state-sync-ids" BootstrapIPsKey = "bootstrap-ips" BootstrapIDsKey = "bootstrap-ids" + StakingHostKey = "staking-host" StakingPortKey = "staking-port" - StakingEnabledKey = "staking-enabled" StakingEphemeralCertEnabledKey = "staking-ephemeral-cert-enabled" StakingTLSKeyPathKey = "staking-tls-key-file" StakingTLSKeyContentKey = "staking-tls-key-file-content" @@ -75,7 +75,8 @@ const ( StakingEphemeralSignerEnabledKey = "staking-ephemeral-signer-enabled" StakingSignerKeyPathKey = "staking-signer-key-file" StakingSignerKeyContentKey = "staking-signer-key-file-content" - StakingDisabledWeightKey = "staking-disabled-weight" + SybilProtectionEnabledKey = "sybil-protection-enabled" + SybilProtectionDisabledWeightKey = "sybil-protection-disabled-weight" NetworkInitialTimeoutKey = "network-initial-timeout" NetworkMinimumTimeoutKey = "network-minimum-timeout" NetworkMaximumTimeoutKey = "network-maximum-timeout" @@ -93,12 +94,13 @@ const ( NetworkPeerListNonValidatorGossipSizeKey = "network-peer-list-non-validator-gossip-size" NetworkPeerListPeersGossipSizeKey = "network-peer-list-peers-gossip-size" NetworkPeerListGossipFreqKey = "network-peer-list-gossip-frequency" + NetworkPeerListPullGossipFreqKey = "network-peer-list-pull-gossip-frequency" + NetworkPeerListBloomResetFreqKey = "network-peer-list-bloom-reset-frequency" NetworkInitialReconnectDelayKey = "network-initial-reconnect-delay" NetworkReadHandshakeTimeoutKey = "network-read-handshake-timeout" NetworkPingTimeoutKey = "network-ping-timeout" NetworkPingFrequencyKey = "network-ping-frequency" NetworkMaxReconnectDelayKey = "network-max-reconnect-delay" - NetworkCompressionEnabledKey = "network-compression-enabled" // TODO this is deprecated. Eventually remove it and constants.DefaultNetworkCompressionEnabled NetworkCompressionTypeKey = "network-compression-type" NetworkMaxClockDifferenceKey = "network-max-clock-difference" NetworkAllowPrivateIPsKey = "network-allow-private-ips" @@ -108,6 +110,10 @@ const ( NetworkTCPProxyEnabledKey = "network-tcp-proxy-enabled" NetworkTCPProxyReadTimeoutKey = "network-tcp-proxy-read-timeout" NetworkTLSKeyLogFileKey = "network-tls-key-log-file-unsafe" + NetworkInboundConnUpgradeThrottlerCooldownKey = "network-inbound-connection-throttling-cooldown" + NetworkInboundThrottlerMaxConnsPerSecKey = "network-inbound-connection-throttling-max-conns-per-sec" + NetworkOutboundConnectionThrottlingRpsKey = "network-outbound-connection-throttling-rps" + NetworkOutboundConnectionTimeoutKey = "network-outbound-connection-timeout" BenchlistFailThresholdKey = "benchlist-fail-threshold" BenchlistDurationKey = "benchlist-duration" BenchlistMinFailingDurationKey = "benchlist-min-failing-duration" @@ -122,16 +128,16 @@ const ( LogDisableDisplayPluginLogsKey = "log-disable-display-plugin-logs" SnowSampleSizeKey = "snow-sample-size" SnowQuorumSizeKey = "snow-quorum-size" + SnowPreferenceQuorumSizeKey = "snow-preference-quorum-size" + SnowConfidenceQuorumSizeKey = "snow-confidence-quorum-size" SnowVirtuousCommitThresholdKey = "snow-virtuous-commit-threshold" SnowRogueCommitThresholdKey = "snow-rogue-commit-threshold" - SnowAvalancheNumParentsKey = "snow-avalanche-num-parents" - SnowAvalancheBatchSizeKey = "snow-avalanche-batch-size" + SnowCommitThresholdKey = "snow-commit-threshold" SnowConcurrentRepollsKey = "snow-concurrent-repolls" SnowOptimalProcessingKey = "snow-optimal-processing" SnowMaxProcessingKey = "snow-max-processing" SnowMaxTimeProcessingKey = "snow-max-time-processing" - SnowMixedQueryNumPushVdrKey = "snow-mixed-query-num-push-vdr" - SnowMixedQueryNumPushNonVdrKey = "snow-mixed-query-num-push-non-vdr" + PartialSyncPrimaryNetworkKey = "partial-sync-primary-network" TrackSubnetsKey = "track-subnets" AdminAPIEnabledKey = "api-admin-enabled" InfoAPIEnabledKey = "api-info-enabled" @@ -142,8 +148,9 @@ const ( IpcsChainIDsKey = "ipcs-chain-ids" IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" - ConsensusGossipFrequencyKey = "consensus-gossip-frequency" ConsensusAppConcurrencyKey = "consensus-app-concurrency" + ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" + ConsensusFrontierPollFrequencyKey = "consensus-frontier-poll-frequency" ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" @@ -153,7 +160,6 @@ const ( AppGossipValidatorSizeKey = "consensus-app-gossip-validator-size" AppGossipNonValidatorSizeKey = "consensus-app-gossip-non-validator-size" AppGossipPeerSizeKey = "consensus-app-gossip-peer-size" - ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" ProposerVMUseCurrentHeightKey = "proposervm-use-current-height" FdLimitKey = "fd-limit" IndexEnabledKey = "index-enabled" @@ -162,8 +168,6 @@ const ( RouterHealthMaxOutstandingRequestsKey = "router-health-max-outstanding-requests" HealthCheckFreqKey = "health-check-frequency" HealthCheckAveragerHalflifeKey = "health-check-averager-halflife" - RetryBootstrapKey = "bootstrap-retry-enabled" - RetryBootstrapWarnFrequencyKey = "bootstrap-retry-warn-frequency" PluginDirKey = "plugin-dir" BootstrapBeaconConnectionTimeoutKey = "bootstrap-beacon-connection-timeout" BootstrapMaxTimeGetAncestorsKey = "bootstrap-max-time-get-ancestors" @@ -211,4 +215,6 @@ const ( TracingInsecureKey = "tracing-insecure" TracingSampleRateKey = "tracing-sample-rate" TracingExporterTypeKey = "tracing-exporter-type" + TracingHeadersKey = "tracing-headers" + ProcessContextFileKey = "process-context-file" ) diff --git a/avalanchego/config/viper.go b/avalanchego/config/viper.go index 1e236ea3..59ecf194 100644 --- a/avalanchego/config/viper.go +++ b/avalanchego/config/viper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config diff --git a/avalanchego/database/batch.go b/avalanchego/database/batch.go index b097dc60..f3187a1f 100644 --- a/avalanchego/database/batch.go +++ b/avalanchego/database/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -7,7 +7,7 @@ package database -import "golang.org/x/exp/slices" +import "slices" // Batch is a write-only database that commits changes to its host database // when Write is called. A batch cannot be used concurrently. diff --git a/avalanchego/database/benchmark_database.go b/avalanchego/database/benchmark_database.go index 9f4ae21c..43af10db 100644 --- a/avalanchego/database/benchmark_database.go +++ b/avalanchego/database/benchmark_database.go @@ -1,28 +1,29 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database import ( - "fmt" "math/rand" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/units" ) var ( // Benchmarks is a list of all database benchmarks - Benchmarks = []func(b *testing.B, db Database, name string, keys, values [][]byte){ - BenchmarkGet, - BenchmarkPut, - BenchmarkDelete, - BenchmarkBatchPut, - BenchmarkBatchDelete, - BenchmarkBatchWrite, - BenchmarkParallelGet, - BenchmarkParallelPut, - BenchmarkParallelDelete, + Benchmarks = map[string]func(b *testing.B, db Database, keys, values [][]byte){ + "Get": BenchmarkGet, + "Put": BenchmarkPut, + "Delete": BenchmarkDelete, + "BatchPut": BenchmarkBatchPut, + "BatchDelete": BenchmarkBatchDelete, + "BatchWrite": BenchmarkBatchWrite, + "ParallelGet": BenchmarkParallelGet, + "ParallelPut": BenchmarkParallelPut, + "ParallelDelete": BenchmarkParallelDelete, } // BenchmarkSizes to use with each benchmark BenchmarkSizes = [][]int{ @@ -35,6 +36,8 @@ var ( // Writes size data into the db in order to setup reads in subsequent tests. func SetupBenchmark(b *testing.B, count int, keySize, valueSize int) ([][]byte, [][]byte) { + require := require.New(b) + b.Helper() keys := make([][]byte, count) @@ -43,218 +46,159 @@ func SetupBenchmark(b *testing.B, count int, keySize, valueSize int) ([][]byte, keyBytes := make([]byte, keySize) valueBytes := make([]byte, valueSize) _, err := rand.Read(keyBytes) // #nosec G404 - if err != nil { - b.Fatal(err) - } + require.NoError(err) _, err = rand.Read(valueBytes) // #nosec G404 - if err != nil { - b.Fatal(err) - } + require.NoError(err) keys[i], values[i] = keyBytes, valueBytes } return keys, values } // BenchmarkGet measures the time it takes to get an operation from a database. -func BenchmarkGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkGet(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - for i, key := range keys { - value := values[i] - if err := db.Put(key, value); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } - } + require := require.New(b) - b.ResetTimer() + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - // Reads b.N values from the db - for i := 0; i < b.N; i++ { - if _, err := db.Get(keys[i%count]); err != nil { - b.Fatalf("Unexpected error in Get %s", err) - } - } - }) + b.ResetTimer() + + // Reads b.N values from the db + for i := 0; i < b.N; i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } } // BenchmarkPut measures the time it takes to write an operation to a database. -func BenchmarkPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkPut(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - // Writes b.N values to the db - for i := 0; i < b.N; i++ { - if err := db.Put(keys[i%count], values[i%count]); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } - } - }) + // Writes b.N values to the db + for i := 0; i < b.N; i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } } // BenchmarkDelete measures the time it takes to delete a (k, v) from a database. -func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkDelete(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - // Writes random values of size _size_ to the database - for i, key := range keys { - value := values[i] - if err := db.Put(key, value); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } - } + require := require.New(b) + + // Writes random values of size _size_ to the database + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - // Deletes b.N values from the db - for i := 0; i < b.N; i++ { - if err := db.Delete(keys[i%count]); err != nil { - b.Fatalf("Unexpected error in Delete %s", err) - } - } - }) + // Deletes b.N values from the db + for i := 0; i < b.N; i++ { + require.NoError(db.Delete(keys[i%count])) + } } // BenchmarkBatchPut measures the time it takes to batch put. -func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchPut(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - if err := batch.Put(keys[i%count], values[i%count]); err != nil { - b.Fatalf("Unexpected error in batch.Put: %s", err) - } - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Put(keys[i%count], values[i%count])) + } } // BenchmarkBatchDelete measures the time it takes to batch delete. -func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchDelete(b *testing.B, db Database, keys, _ [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - if err := batch.Delete(keys[i%count]); err != nil { - b.Fatalf("Unexpected error in batch.Delete: %s", err) - } - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Delete(keys[i%count])) + } } // BenchmarkBatchWrite measures the time it takes to batch write. -func BenchmarkBatchWrite(b *testing.B, db Database, name string, keys, values [][]byte) { - count := len(keys) - if count == 0 { - b.Fatal("no keys") - } +func BenchmarkBatchWrite(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.write", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i, key := range keys { - value := values[i] + require := require.New(b) - if err := batch.Put(key, value); err != nil { - b.Fatalf("Unexpected error in batch.Put: %s", err) - } - } + batch := db.NewBatch() + for i, key := range keys { + value := values[i] + require.NoError(batch.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := batch.Write(); err != nil { - b.Fatalf("Unexpected error in batch.Write: %s", err) - } - } - }) + for i := 0; i < b.N; i++ { + require.NoError(batch.Write()) + } } // BenchmarkParallelGet measures the time it takes to read in parallel. -func BenchmarkParallelGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelGet(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - for i, key := range keys { - value := values[i] - if err := db.Put(key, value); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } - } + require := require.New(b) + + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for i := 0; pb.Next(); i++ { - if _, err := db.Get(keys[i%count]); err != nil { - b.Fatalf("Unexpected error in Get %s", err) - } - } - }) + b.RunParallel(func(pb *testing.PB) { + for i := 0; pb.Next(); i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } }) } // BenchmarkParallelPut measures the time it takes to write to the db in parallel. -func BenchmarkParallelPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelPut(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") - } - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - b.RunParallel(func(pb *testing.PB) { - // Write N values to the db - for i := 0; pb.Next(); i++ { - if err := db.Put(keys[i%count], values[i%count]); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } - } - }) + b.RunParallel(func(pb *testing.PB) { + // Write N values to the db + for i := 0; pb.Next(); i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } }) } // BenchmarkParallelDelete measures the time it takes to delete a (k, v) from the db. -func BenchmarkParallelDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelDelete(b *testing.B, db Database, keys, values [][]byte) { + require.NotEmpty(b, keys) count := len(keys) - if count == 0 { - b.Fatal("no keys") + + require := require.New(b) + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) } + b.ResetTimer() - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - for i, key := range keys { - value := values[i] - if err := db.Put(key, value); err != nil { - b.Fatalf("Unexpected error in Put %s", err) - } + b.RunParallel(func(pb *testing.PB) { + // Deletes b.N values from the db + for i := 0; pb.Next(); i++ { + require.NoError(db.Delete(keys[i%count])) } - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - // Deletes b.N values from the db - for i := 0; pb.Next(); i++ { - if err := db.Delete(keys[i%count]); err != nil { - b.Fatalf("Unexpected error in Delete %s", err) - } - } - }) }) } diff --git a/avalanchego/database/common.go b/avalanchego/database/common.go index a27b0d27..651b8fe5 100644 --- a/avalanchego/database/common.go +++ b/avalanchego/database/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/avalanchego/database/corruptabledb/db.go b/avalanchego/database/corruptabledb/db.go index a9a945ff..d5bd6a71 100644 --- a/avalanchego/database/corruptabledb/db.go +++ b/avalanchego/database/corruptabledb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb @@ -89,6 +89,34 @@ func (db *Database) NewBatch() database.Batch { } } +func (db *Database) NewIterator() database.Iterator { + return &iterator{ + Iterator: db.Database.NewIterator(), + db: db, + } +} + +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return &iterator{ + Iterator: db.Database.NewIteratorWithStart(start), + db: db, + } +} + +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return &iterator{ + Iterator: db.Database.NewIteratorWithPrefix(prefix), + db: db, + } +} + +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + return &iterator{ + Iterator: db.Database.NewIteratorWithStartAndPrefix(start, prefix), + db: db, + } +} + func (db *Database) corrupted() error { db.errorLock.RLock() defer db.errorLock.RUnlock() @@ -127,3 +155,24 @@ func (b *batch) Write() error { } return b.db.handleError(b.Batch.Write()) } + +type iterator struct { + database.Iterator + db *Database +} + +func (it *iterator) Next() bool { + if err := it.db.corrupted(); err != nil { + return false + } + val := it.Iterator.Next() + _ = it.db.handleError(it.Iterator.Error()) + return val +} + +func (it *iterator) Error() error { + if err := it.db.corrupted(); err != nil { + return err + } + return it.db.handleError(it.Iterator.Error()) +} diff --git a/avalanchego/database/corruptabledb/db_test.go b/avalanchego/database/corruptabledb/db_test.go index 6c05036e..2de4d67a 100644 --- a/avalanchego/database/corruptabledb/db_test.go +++ b/avalanchego/database/corruptabledb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb @@ -9,6 +9,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -16,20 +17,29 @@ import ( var errTest = errors.New("non-nil error") +func newDB() *Database { + baseDB := memdb.New() + return New(baseDB) +} + func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db := New(baseDB) - test(t, db) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, newDB()) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - baseDB := memdb.New() - db := New(baseDB) - test(f, db) - } +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB()) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, newDB()) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB()) } // TestCorruption tests to make sure corruptabledb wrapper works as expected. @@ -55,8 +65,7 @@ func TestCorruption(t *testing.T) { corruptableBatch := db.NewBatch() require.NotNil(t, corruptableBatch) - err := corruptableBatch.Put(key, value) - require.NoError(t, err) + require.NoError(t, corruptableBatch.Put(key, value)) return corruptableBatch.Write() }, @@ -65,14 +74,131 @@ func TestCorruption(t *testing.T) { return err }, } - baseDB := memdb.New() - // wrap this db - corruptableDB := New(baseDB) + corruptableDB := newDB() _ = corruptableDB.handleError(errTest) for name, testFn := range tests { t.Run(name, func(tt *testing.T) { err := testFn(corruptableDB) - require.ErrorIsf(tt, err, errTest, "not received the corruption error") + require.ErrorIs(tt, err, errTest) + }) + } +} + +func TestIterator(t *testing.T) { + errIter := errors.New("iterator error") + + type test struct { + name string + databaseErrBefore error + modifyIter func(*gomock.Controller, *iterator) + op func(*require.Assertions, *iterator) + expectedErr error + } + + tests := []test{ + { + name: "corrupted database; Next", + databaseErrBefore: errTest, + expectedErr: errTest, + modifyIter: func(*gomock.Controller, *iterator) {}, + op: func(require *require.Assertions, iter *iterator) { + require.False(iter.Next()) + }, + }, + { + name: "Next corrupts database", + databaseErrBefore: nil, + expectedErr: errIter, + modifyIter: func(ctrl *gomock.Controller, iter *iterator) { + mockInnerIter := database.NewMockIterator(ctrl) + mockInnerIter.EXPECT().Next().Return(false) + mockInnerIter.EXPECT().Error().Return(errIter) + iter.Iterator = mockInnerIter + }, + op: func(require *require.Assertions, iter *iterator) { + require.False(iter.Next()) + }, + }, + { + name: "corrupted database; Error", + databaseErrBefore: errTest, + expectedErr: errTest, + modifyIter: func(*gomock.Controller, *iterator) {}, + op: func(require *require.Assertions, iter *iterator) { + err := iter.Error() + require.ErrorIs(err, errTest) + }, + }, + { + name: "Error corrupts database", + databaseErrBefore: nil, + expectedErr: errIter, + modifyIter: func(ctrl *gomock.Controller, iter *iterator) { + mockInnerIter := database.NewMockIterator(ctrl) + mockInnerIter.EXPECT().Error().Return(errIter) + iter.Iterator = mockInnerIter + }, + op: func(require *require.Assertions, iter *iterator) { + err := iter.Error() + require.ErrorIs(err, errIter) + }, + }, + { + name: "corrupted database; Key", + databaseErrBefore: errTest, + expectedErr: errTest, + modifyIter: func(*gomock.Controller, *iterator) {}, + op: func(_ *require.Assertions, iter *iterator) { + _ = iter.Key() + }, + }, + { + name: "corrupted database; Value", + databaseErrBefore: errTest, + expectedErr: errTest, + modifyIter: func(*gomock.Controller, *iterator) {}, + op: func(_ *require.Assertions, iter *iterator) { + _ = iter.Value() + }, + }, + { + name: "corrupted database; Release", + databaseErrBefore: errTest, + expectedErr: errTest, + modifyIter: func(*gomock.Controller, *iterator) {}, + op: func(_ *require.Assertions, iter *iterator) { + iter.Release() + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + // Make a database + corruptableDB := newDB() + // Put a key-value pair in the database. + require.NoError(corruptableDB.Put([]byte{0}, []byte{1})) + + // Mark database as corupted, if applicable + _ = corruptableDB.handleError(tt.databaseErrBefore) + + // Make an iterator + iter := &iterator{ + Iterator: corruptableDB.NewIterator(), + db: corruptableDB, + } + + // Modify the iterator (optional) + tt.modifyIter(ctrl, iter) + + // Do an iterator operation + tt.op(require, iter) + + err := corruptableDB.corrupted() + require.ErrorIs(err, tt.expectedErr) }) } } diff --git a/avalanchego/database/database.go b/avalanchego/database/database.go index 89993a81..938c7f63 100644 --- a/avalanchego/database/database.go +++ b/avalanchego/database/database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -21,6 +21,7 @@ type KeyValueReader interface { Has(key []byte) (bool, error) // Get retrieves the given key if it's present in the key-value data store. + // Returns ErrNotFound if the key is not present in the key-value data store. // // Note: [key] is safe to modify and read after calling Get. // The returned byte slice is safe to read, but cannot be modified. diff --git a/avalanchego/database/encdb/codec.go b/avalanchego/database/encdb/codec.go new file mode 100644 index 00000000..62223b4f --- /dev/null +++ b/avalanchego/database/encdb/codec.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package encdb + +import ( + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" +) + +const CodecVersion = 0 + +var Codec codec.Manager + +func init() { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewDefaultManager() + + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { + panic(err) + } +} diff --git a/avalanchego/database/encdb/db.go b/avalanchego/database/encdb/db.go index 42518bef..2bdacb46 100644 --- a/avalanchego/database/encdb/db.go +++ b/avalanchego/database/encdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb @@ -7,22 +7,15 @@ import ( "context" "crypto/cipher" "crypto/rand" + "slices" "sync" "golang.org/x/crypto/chacha20poly1305" - "golang.org/x/exp/slices" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/hashing" ) -const ( - codecVersion = 0 -) - var ( _ database.Database = (*Database)(nil) _ database.Batch = (*batch)(nil) @@ -32,7 +25,6 @@ var ( // Database encrypts all values that are provided type Database struct { lock sync.RWMutex - codec codec.Manager cipher cipher.AEAD db database.Database closed bool @@ -42,16 +34,10 @@ type Database struct { func New(password []byte, db database.Database) (*Database, error) { h := hashing.ComputeHash256(password) aead, err := chacha20poly1305.NewX(h) - if err != nil { - return nil, err - } - c := linearcodec.NewDefault() - manager := codec.NewDefaultManager() return &Database{ - codec: manager, cipher: aead, db: db, - }, manager.RegisterCodec(codecVersion, c) + }, err } func (db *Database) Has(key []byte) (bool, error) { @@ -297,7 +283,7 @@ func (db *Database) encrypt(plaintext []byte) ([]byte, error) { return nil, err } ciphertext := db.cipher.Seal(nil, nonce, plaintext, nil) - return db.codec.Marshal(codecVersion, &encryptedValue{ + return Codec.Marshal(CodecVersion, &encryptedValue{ Ciphertext: ciphertext, Nonce: nonce, }) @@ -305,7 +291,7 @@ func (db *Database) encrypt(plaintext []byte) ([]byte, error) { func (db *Database) decrypt(ciphertext []byte) ([]byte, error) { val := encryptedValue{} - if _, err := db.codec.Unmarshal(ciphertext, &val); err != nil { + if _, err := Codec.Unmarshal(ciphertext, &val); err != nil { return nil, err } return db.cipher.Open(nil, val.Nonce, val.Ciphertext, nil) diff --git a/avalanchego/database/encdb/db_test.go b/avalanchego/database/encdb/db_test.go index fe64ecc1..b3dfdfed 100644 --- a/avalanchego/database/encdb/db_test.go +++ b/avalanchego/database/encdb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -15,38 +16,43 @@ import ( const testPassword = "lol totally a secure password" //nolint:gosec func TestInterface(t *testing.T) { - for _, test := range database.Tests { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - if err != nil { - t.Fatal(err) - } + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + unencryptedDB := memdb.New() + db, err := New([]byte(testPassword), unencryptedDB) + require.NoError(t, err) - test(t, db) + test(t, db) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - if err != nil { - require.NoError(f, err) - } - test(f, db) - } +func newDB(t testing.TB) database.Database { + unencryptedDB := memdb.New() + db, err := New([]byte(testPassword), unencryptedDB) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - if err != nil { - b.Fatal(err) - } - bench(b, db, "encdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("encdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/avalanchego/database/errors.go b/avalanchego/database/errors.go index ee46521b..24f93aa8 100644 --- a/avalanchego/database/errors.go +++ b/avalanchego/database/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/avalanchego/database/helpers.go b/avalanchego/database/helpers.go index e56245b1..7e66c58f 100644 --- a/avalanchego/database/helpers.go +++ b/avalanchego/database/helpers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database @@ -14,11 +14,20 @@ import ( const ( Uint64Size = 8 // bytes + BoolSize = 1 // bytes + BoolFalse = 0x00 + BoolTrue = 0x01 + // kvPairOverhead is an estimated overhead for a kv pair in a database. kvPairOverhead = 8 // bytes ) -var errWrongSize = errors.New("value has unexpected size") +var ( + boolFalseKey = []byte{BoolFalse} + boolTrueKey = []byte{BoolTrue} + + errWrongSize = errors.New("value has unexpected size") +) func PutID(db KeyValueWriter, key []byte, val ids.ID) error { return db.Put(key, val[:]) @@ -114,9 +123,9 @@ func ParseTimestamp(b []byte) (time.Time, error) { func PutBool(db KeyValueWriter, key []byte, b bool) error { if b { - return db.Put(key, []byte{1}) + return db.Put(key, boolTrueKey) } - return db.Put(key, []byte{0}) + return db.Put(key, boolFalseKey) } func GetBool(db KeyValueReader, key []byte) (bool, error) { @@ -124,12 +133,12 @@ func GetBool(db KeyValueReader, key []byte) (bool, error) { switch { case err != nil: return false, err - case len(b) != 1: - return false, fmt.Errorf("length should be 1 but is %d", len(b)) - case b[0] != 0 && b[0] != 1: - return false, fmt.Errorf("should be 0 or 1 but is %v", b[0]) + case len(b) != BoolSize: + return false, fmt.Errorf("length should be %d but is %d", BoolSize, len(b)) + case b[0] != BoolFalse && b[0] != BoolTrue: + return false, fmt.Errorf("should be %d or %d but is %d", BoolFalse, BoolTrue, b[0]) } - return b[0] == 1, nil + return b[0] == BoolTrue, nil } func Count(db Iteratee) (int, error) { @@ -161,11 +170,12 @@ func IsEmpty(db Iteratee) (bool, error) { return !iterator.Next(), iterator.Error() } -func Clear(readerDB Iteratee, deleterDB KeyValueDeleter) error { - return ClearPrefix(readerDB, deleterDB, nil) +func AtomicClear(readerDB Iteratee, deleterDB KeyValueDeleter) error { + return AtomicClearPrefix(readerDB, deleterDB, nil) } -func ClearPrefix(readerDB Iteratee, deleterDB KeyValueDeleter, prefix []byte) error { +// AtomicClearPrefix deletes from [deleterDB] all keys in [readerDB] that have the given [prefix]. +func AtomicClearPrefix(readerDB Iteratee, deleterDB KeyValueDeleter, prefix []byte) error { iterator := readerDB.NewIteratorWithPrefix(prefix) defer iterator.Release() @@ -177,3 +187,51 @@ func ClearPrefix(readerDB Iteratee, deleterDB KeyValueDeleter, prefix []byte) er } return iterator.Error() } + +// Remove all key-value pairs from [db]. +// Writes each batch when it reaches [writeSize]. +func Clear(db Database, writeSize int) error { + return ClearPrefix(db, nil, writeSize) +} + +// Removes all keys with the given [prefix] from [db]. +// Writes each batch when it reaches [writeSize]. +func ClearPrefix(db Database, prefix []byte, writeSize int) error { + b := db.NewBatch() + it := db.NewIteratorWithPrefix(prefix) + // Defer the release of the iterator inside a closure to guarantee that the + // latest, not the first, iterator is released on return. + defer func() { + it.Release() + }() + + for it.Next() { + key := it.Key() + if err := b.Delete(key); err != nil { + return err + } + + // Avoid too much memory pressure by periodically writing to the + // database. + if b.Size() < writeSize { + continue + } + + if err := b.Write(); err != nil { + return err + } + b.Reset() + + // Reset the iterator to release references to now deleted keys. + if err := it.Error(); err != nil { + return err + } + it.Release() + it = db.NewIteratorWithPrefix(prefix) + } + + if err := b.Write(); err != nil { + return err + } + return it.Error() +} diff --git a/avalanchego/database/helpers_test.go b/avalanchego/database/helpers_test.go new file mode 100644 index 00000000..1cce64c8 --- /dev/null +++ b/avalanchego/database/helpers_test.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package database + +import ( + "math/rand" + "slices" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" +) + +func TestSortednessUint64(t *testing.T) { + seed := time.Now().UnixNano() + t.Log("Seed: ", seed) + rand := rand.New(rand.NewSource(seed)) //#nosec G404 + + ints := make([]uint64, 1024) + for i := range ints { + ints[i] = rand.Uint64() + } + slices.Sort(ints) + + intBytes := make([][]byte, 1024) + for i, val := range ints { + intBytes[i] = PackUInt64(val) + } + require.True(t, utils.IsSortedBytes(intBytes)) +} + +func TestSortednessUint32(t *testing.T) { + seed := time.Now().UnixNano() + t.Log("Seed: ", seed) + rand := rand.New(rand.NewSource(seed)) //#nosec G404 + + ints := make([]uint32, 1024) + for i := range ints { + ints[i] = rand.Uint32() + } + slices.Sort(ints) + + intBytes := make([][]byte, 1024) + for i, val := range ints { + intBytes[i] = PackUInt32(val) + } + require.True(t, utils.IsSortedBytes(intBytes)) +} diff --git a/avalanchego/database/iterator.go b/avalanchego/database/iterator.go index dab02b4f..75126006 100644 --- a/avalanchego/database/iterator.go +++ b/avalanchego/database/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -33,9 +33,13 @@ type Iterator interface { Error() error // Key returns the key of the current key/value pair, or nil if done. + // If the database is closed, must still report the current contents. + // Behavior is undefined after Release is called. Key() []byte // Value returns the value of the current key/value pair, or nil if done. + // If the database is closed, must still report the current contents. + // Behavior is undefined after Release is called. Value() []byte // Release releases associated resources. Release should always succeed and diff --git a/avalanchego/database/leveldb/db.go b/avalanchego/database/leveldb/db.go index 4a8b6518..6c096061 100644 --- a/avalanchego/database/leveldb/db.go +++ b/avalanchego/database/leveldb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb @@ -9,22 +9,19 @@ import ( "encoding/json" "fmt" "math" + "slices" "sync" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" - "go.uber.org/zap" - "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" @@ -69,6 +66,9 @@ var ( _ database.Database = (*Database)(nil) _ database.Batch = (*batch)(nil) _ database.Iterator = (*iter)(nil) + + ErrInvalidConfig = errors.New("invalid config") + ErrCouldNotOpen = errors.New("could not open") ) // Database is a persistent key-value store. Apart from basic data storage @@ -198,7 +198,7 @@ func New(file string, configBytes []byte, log logging.Logger, namespace string, } if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return nil, fmt.Errorf("failed to parse db config: %w", err) + return nil, fmt.Errorf("%w: %w", ErrInvalidConfig, err) } } @@ -228,7 +228,7 @@ func New(file string, configBytes []byte, log logging.Logger, namespace string, db, err = leveldb.RecoverFile(file, nil) } if err != nil { - return nil, err + return nil, fmt.Errorf("%w: %w", ErrCouldNotOpen, err) } wrappedDB := &Database{ diff --git a/avalanchego/database/leveldb/db_test.go b/avalanchego/database/leveldb/db_test.go index 703bc412..8352e53b 100644 --- a/avalanchego/database/leveldb/db_test.go +++ b/avalanchego/database/leveldb/db_test.go @@ -1,13 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -15,57 +15,60 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - t.Fatalf("leveldb.New(%q, logging.NoLog{}) errored with %s", folder, err) - } - - defer db.Close() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + folder := t.TempDir() + db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) - test(t, db) + test(t, db) - // The database may have been closed by the test, so we don't care if it - // errors here. - _ = db.Close() + _ = db.Close() + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - folder := f.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - require.NoError(f, err) - } +func newDB(t testing.TB) database.Database { + folder := t.TempDir() + db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + db := newDB(f) + defer db.Close() - defer db.Close() + database.FuzzKeyValue(f, db) +} - test(f, db) +func FuzzNewIteratorWithPrefix(f *testing.F) { + db := newDB(f) + defer db.Close() - // The database may have been closed by the test, so we don't care if it - // errors here. - _ = db.Close() - } + database.FuzzNewIteratorWithPrefix(f, db) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + defer db.Close() + + database.FuzzNewIteratorWithStartAndPrefix(f, db) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - folder := b.TempDir() - - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - b.Fatal(err) - } + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("leveldb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) - bench(b, db, "leveldb", keys, values) + bench(b, db, keys, values) - // The database may have been closed by the test, so we don't care if it - // errors here. - _ = db.Close() + // The database may have been closed by the test, so we don't care if it + // errors here. + _ = db.Close() + }) } } } diff --git a/avalanchego/database/leveldb/metrics.go b/avalanchego/database/leveldb/metrics.go index 8b2971a3..004e1774 100644 --- a/avalanchego/database/leveldb/metrics.go +++ b/avalanchego/database/leveldb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb @@ -7,10 +7,9 @@ import ( "strconv" "github.com/prometheus/client_golang/prometheus" - "github.com/syndtr/goleveldb/leveldb" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var levelLabels = []string{"level"} @@ -180,8 +179,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { currentStats: &leveldb.DBStats{}, } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.writesDelayedCount), reg.Register(m.writesDelayedDuration), reg.Register(m.writeIsDelayed), @@ -206,7 +204,7 @@ func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { reg.Register(m.nonLevel0Compactions), reg.Register(m.seekCompactions), ) - return m, errs.Err + return m, err } func (db *Database) updateMetrics() error { diff --git a/avalanchego/database/linkeddb/codec.go b/avalanchego/database/linkeddb/codec.go index 7780690b..f1982e1c 100644 --- a/avalanchego/database/linkeddb/codec.go +++ b/avalanchego/database/linkeddb/codec.go @@ -1,29 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const ( - codecVersion = 0 -) +const CodecVersion = 0 -// c does serialization and deserialization -var ( - c codec.Manager -) +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/avalanchego/database/linkeddb/linkeddb.go b/avalanchego/database/linkeddb/linkeddb.go index 597e1258..b7bc6867 100644 --- a/avalanchego/database/linkeddb/linkeddb.go +++ b/avalanchego/database/linkeddb/linkeddb.go @@ -1,14 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb import ( + "slices" "sync" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" ) @@ -316,7 +314,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { return node{}, err } n := node{} - _, err = c.Unmarshal(nodeBytes, &n) + _, err = Codec.Unmarshal(nodeBytes, &n) if err == nil { ldb.nodeCache.Put(keyStr, &n) } @@ -325,7 +323,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { func (ldb *linkedDB) putNode(key []byte, n node) error { ldb.updatedNodes[string(key)] = &n - nodeBytes, err := c.Marshal(codecVersion, n) + nodeBytes, err := Codec.Marshal(CodecVersion, n) if err != nil { return err } @@ -339,7 +337,7 @@ func (ldb *linkedDB) deleteNode(key []byte) error { func (ldb *linkedDB) resetBatch() { ldb.headKeyIsUpdated = false - maps.Clear(ldb.updatedNodes) + clear(ldb.updatedNodes) ldb.batch.Reset() } diff --git a/avalanchego/database/linkeddb/linkeddb_test.go b/avalanchego/database/linkeddb/linkeddb_test.go index 9ee698dc..815cac73 100644 --- a/avalanchego/database/linkeddb/linkeddb_test.go +++ b/avalanchego/database/linkeddb/linkeddb_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb @@ -23,38 +23,34 @@ func TestLinkedDB(t *testing.T) { has, err := ldb.Has(key) require.NoError(err) - require.False(has, "db unexpectedly had key %s", key) + require.False(has) _, err = ldb.Get(key) - require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") + require.Equal(database.ErrNotFound, err) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) - err = ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) has, err = ldb.Has(key) require.NoError(err) - require.True(has, "db should have had key %s", key) + require.True(has) v, err := ldb.Get(key) require.NoError(err) require.Equal(value, v) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) has, err = ldb.Has(key) require.NoError(err) - require.False(has, "db unexpectedly had key %s", key) + require.False(has) _, err = ldb.Get(key) - require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") + require.Equal(database.ErrNotFound, err) iterator := db.NewIterator() - next := iterator.Next() - require.False(next, "database should be empty") + require.False(iterator.Next()) iterator.Release() } @@ -68,22 +64,18 @@ func TestLinkedDBDuplicatedPut(t *testing.T) { value1 := []byte("world1") value2 := []byte("world2") - err := ldb.Put(key, value1) - require.NoError(err) + require.NoError(ldb.Put(key, value1)) - err = ldb.Put(key, value2) - require.NoError(err) + require.NoError(ldb.Put(key, value2)) v, err := ldb.Get(key) require.NoError(err) require.Equal(value2, v) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) iterator := db.NewIterator() - next := iterator.Next() - require.False(next, "database should be empty") + require.False(iterator.Next()) iterator.Release() } @@ -100,11 +92,9 @@ func TestLinkedDBMultiplePuts(t *testing.T) { value2 := []byte("world2") value3 := []byte("world3") - err := ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) - err = ldb.Put(key2, value2) - require.NoError(err) + require.NoError(ldb.Put(key2, value2)) v, err := ldb.Get(key1) require.NoError(err) @@ -114,23 +104,17 @@ func TestLinkedDBMultiplePuts(t *testing.T) { require.NoError(err) require.Equal(value2, v) - err = ldb.Delete(key2) - require.NoError(err) + require.NoError(ldb.Delete(key2)) - err = ldb.Put(key2, value2) - require.NoError(err) + require.NoError(ldb.Put(key2, value2)) - err = ldb.Put(key3, value3) - require.NoError(err) + require.NoError(ldb.Put(key3, value3)) - err = ldb.Delete(key2) - require.NoError(err) + require.NoError(ldb.Delete(key2)) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) - err = ldb.Delete(key3) - require.NoError(err) + require.NoError(ldb.Delete(key3)) iterator := db.NewIterator() next := iterator.Next() @@ -154,8 +138,7 @@ func TestEmptyLinkedDBIterator(t *testing.T) { v := iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err := iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -169,8 +152,7 @@ func TestLinkedDBLoadHeadKey(t *testing.T) { key := []byte("hello") value := []byte("world") - err := ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) ldb = NewDefault(db) @@ -193,8 +175,7 @@ func TestLinkedDBLoadHeadKey(t *testing.T) { v = iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -208,8 +189,7 @@ func TestSingleLinkedDBIterator(t *testing.T) { key := []byte("hello") value := []byte("world") - err := ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) iterator := ldb.NewIterator() next := iterator.Next() @@ -230,8 +210,7 @@ func TestSingleLinkedDBIterator(t *testing.T) { v = iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -247,11 +226,9 @@ func TestMultipleLinkedDBIterator(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIterator() next := iterator.Next() @@ -275,8 +252,7 @@ func TestMultipleLinkedDBIterator(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -292,11 +268,9 @@ func TestMultipleLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIteratorWithStart(key1) next := iterator.Next() @@ -320,8 +294,7 @@ func TestMultipleLinkedDBIteratorStart(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -337,11 +310,9 @@ func TestSingleLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIteratorWithStart(key0) @@ -357,8 +328,7 @@ func TestSingleLinkedDBIteratorStart(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -377,11 +347,9 @@ func TestEmptyLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iter := ldb.NewIteratorWithStart(key2) @@ -393,8 +361,7 @@ func TestEmptyLinkedDBIteratorStart(t *testing.T) { } require.Equal(2, i) - err = iter.Error() - require.NoError(err) + require.NoError(iter.Error()) iter.Release() } @@ -412,15 +379,13 @@ func TestLinkedDBIsEmpty(t *testing.T) { key := []byte("hello") value := []byte("world") - err = ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) isEmpty, err = ldb.IsEmpty() require.NoError(err) require.False(isEmpty) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) isEmpty, err = ldb.IsEmpty() require.NoError(err) @@ -441,22 +406,19 @@ func TestLinkedDBHeadKey(t *testing.T) { key1 := []byte("hello1") value1 := []byte("world1") - err = ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) headKey, err := ldb.HeadKey() require.NoError(err) require.Equal(key0, headKey) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) headKey, err = ldb.HeadKey() require.NoError(err) require.Equal(key1, headKey) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) headKey, err = ldb.HeadKey() require.NoError(err) @@ -477,24 +439,21 @@ func TestLinkedDBHead(t *testing.T) { key1 := []byte("hello1") value1 := []byte("world1") - err = ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) headKey, headVal, err := ldb.Head() require.NoError(err) require.Equal(key0, headKey) require.Equal(value0, headVal) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) headKey, headVal, err = ldb.Head() require.NoError(err) require.Equal(key1, headKey) require.Equal(value1, headVal) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) headKey, headVal, err = ldb.Head() require.NoError(err) diff --git a/avalanchego/database/manager/manager.go b/avalanchego/database/manager/manager.go deleted file mode 100644 index 45d3cb08..00000000 --- a/avalanchego/database/manager/manager.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/corruptabledb" - "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" -) - -var ( - errNonSortedAndUniqueDBs = errors.New("managed databases were not sorted and unique") - errNoDBs = errors.New("no dbs given") -) - -var _ Manager = (*manager)(nil) - -type Manager interface { - // Current returns the database with the current database version. - Current() *VersionedDatabase - - // Previous returns the database prior to the current database and true if a - // previous database exists. - Previous() (*VersionedDatabase, bool) - - // GetDatabases returns all the managed databases in order from current to - // the oldest version. - GetDatabases() []*VersionedDatabase - - // Close all of the databases controlled by the manager. - Close() error - - // NewPrefixDBManager returns a new database manager with each of its - // databases prefixed with [prefix]. - NewPrefixDBManager(prefix []byte) Manager - - // NewNestedPrefixDBManager returns a new database manager where each of its - // databases has the nested prefix [prefix] applied to it. - NewNestedPrefixDBManager(prefix []byte) Manager - - // NewMeterDBManager returns a new database manager with each of its - // databases wrapped with a meterdb instance to support metrics on database - // performance. - NewMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) - - // NewCompleteMeterDBManager wraps each database instance with a meterdb - // instance. The namespace is concatenated with the version of the database. - // Note: calling this more than once with the same [namespace] will cause a - // conflict error for the [registerer]. - NewCompleteMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) -} - -type manager struct { - // databases with the current version at index 0 and prior versions in - // descending order - // invariant: len(databases) > 0 - databases []*VersionedDatabase -} - -// NewLevelDB creates a database manager of levelDBs at [filePath] by creating a -// database instance from each directory with a version <= [currentVersion]. If -// [includePreviousVersions], opens previous database versions and includes them -// in the returned Manager. -func NewLevelDB( - dbDirPath string, - dbConfig []byte, - log logging.Logger, - currentVersion *version.Semantic, - namespace string, - reg prometheus.Registerer, -) (Manager, error) { - return new( - leveldb.New, - dbDirPath, - dbConfig, - log, - currentVersion, - namespace, - reg, - ) -} - -// new creates a database manager at [filePath] by creating a database instance -// from each directory with a version <= [currentVersion]. If -// [includePreviousVersions], opens previous database versions and includes them -// in the returned Manager. -func new( - newDB func(string, []byte, logging.Logger, string, prometheus.Registerer) (database.Database, error), - dbDirPath string, - dbConfig []byte, - log logging.Logger, - currentVersion *version.Semantic, - namespace string, - reg prometheus.Registerer, -) (Manager, error) { - currentDBPath := filepath.Join(dbDirPath, currentVersion.String()) - - currentDB, err := newDB(currentDBPath, dbConfig, log, namespace, reg) - if err != nil { - return nil, fmt.Errorf("couldn't create db at %s: %w", currentDBPath, err) - } - - wrappedDB := corruptabledb.New(currentDB) - - manager := &manager{ - databases: []*VersionedDatabase{ - { - Database: wrappedDB, - Version: currentVersion, - }, - }, - } - - // Open old database versions and add them to [manager] - err = filepath.Walk(dbDirPath, func(path string, info os.FileInfo, err error) error { - // the walkFn is called with a non-nil error argument if an os.Lstat - // or Readdirnames call returns an error. Both cases are considered - // fatal in the traversal. - // Reference: https://golang.org/pkg/path/filepath/#WalkFunc - if err != nil { - return err - } - // Skip the root directory - if path == dbDirPath { - return nil - } - - // If the database directory contains any files, ignore them. - if !info.IsDir() { - return nil - } - _, dbName := filepath.Split(path) - dbVersion, err := version.Parse(dbName) - if err != nil { - // If the database directory contains any directories that don't - // match the expected version format, ignore them. - return filepath.SkipDir - } - - // If [dbVersion] is greater than or equal to the specified version - // skip over creating the new database to avoid creating the same db - // twice or creating a database with a version ahead of the desired one. - if cmp := dbVersion.Compare(currentVersion); cmp >= 0 { - return filepath.SkipDir - } - - versionStr := strings.ReplaceAll(dbName, ".", "_") - var dbNamespace string - if len(namespace) > 0 { - dbNamespace = fmt.Sprintf("%s_%s", namespace, versionStr) - } else { - dbNamespace = versionStr - } - - db, err := newDB(path, dbConfig, log, dbNamespace, reg) - if err != nil { - return fmt.Errorf("couldn't create db at %s: %w", path, err) - } - - manager.databases = append(manager.databases, &VersionedDatabase{ - Database: corruptabledb.New(db), - Version: dbVersion, - }) - - return filepath.SkipDir - }) - utils.Sort(manager.databases) - - // If an error occurred walking [dbDirPath] close the - // database manager and return the original error here. - if err != nil { - _ = manager.Close() - return nil, err - } - - return manager, nil -} - -// NewMemDB returns a database manager with a single memdb instance with -// [currentVersion]. -func NewMemDB(currentVersion *version.Semantic) Manager { - return &manager{ - databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: currentVersion, - }, - }, - } -} - -// NewManagerFromDBs -func NewManagerFromDBs(dbs []*VersionedDatabase) (Manager, error) { - if len(dbs) == 0 { - return nil, errNoDBs - } - utils.Sort(dbs) - sortedAndUnique := utils.IsSortedAndUniqueSortable(dbs) - if !sortedAndUnique { - return nil, errNonSortedAndUniqueDBs - } - return &manager{ - databases: dbs, - }, nil -} - -func (m *manager) Current() *VersionedDatabase { - return m.databases[0] -} - -func (m *manager) Previous() (*VersionedDatabase, bool) { - if len(m.databases) < 2 { - return nil, false - } - return m.databases[1], true -} - -func (m *manager) GetDatabases() []*VersionedDatabase { - return m.databases -} - -func (m *manager) Close() error { - errs := wrappers.Errs{} - for _, db := range m.databases { - errs.Add(db.Close()) - } - return errs.Err -} - -// NewPrefixDBManager creates a new manager with each database instance prefixed -// by [prefix] -func (m *manager) NewPrefixDBManager(prefix []byte) Manager { - m, _ = m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - return &VersionedDatabase{ - Database: prefixdb.New(prefix, vdb.Database), - Version: vdb.Version, - }, nil - }) - return m -} - -// NewNestedPrefixDBManager creates a new manager with each database instance -// wrapped with a nested prfix of [prefix] -func (m *manager) NewNestedPrefixDBManager(prefix []byte) Manager { - m, _ = m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - return &VersionedDatabase{ - Database: prefixdb.NewNested(prefix, vdb.Database), - Version: vdb.Version, - }, nil - }) - return m -} - -// NewMeterDBManager wraps the current database instance with a meterdb instance. -// Note: calling this more than once with the same [namespace] will cause a conflict error for the [registerer] -func (m *manager) NewMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) { - currentDB := m.Current() - currentMeterDB, err := meterdb.New(namespace, registerer, currentDB.Database) - if err != nil { - return nil, err - } - newManager := &manager{ - databases: make([]*VersionedDatabase, len(m.databases)), - } - copy(newManager.databases[1:], m.databases[1:]) - // Overwrite the current database with the meter DB - newManager.databases[0] = &VersionedDatabase{ - Database: currentMeterDB, - Version: currentDB.Version, - } - return newManager, nil -} - -// NewCompleteMeterDBManager wraps each database instance with a meterdb instance. The namespace -// is concatenated with the version of the database. Note: calling this more than once -// with the same [namespace] will cause a conflict error for the [registerer] -func (m *manager) NewCompleteMeterDBManager(namespace string, registerer prometheus.Registerer) (Manager, error) { - return m.wrapManager(func(vdb *VersionedDatabase) (*VersionedDatabase, error) { - mdb, err := meterdb.New(fmt.Sprintf("%s_%s", namespace, strings.ReplaceAll(vdb.Version.String(), ".", "_")), registerer, vdb.Database) - if err != nil { - return nil, err - } - return &VersionedDatabase{ - Database: mdb, - Version: vdb.Version, - }, nil - }) -} - -// wrapManager returns a new database manager with each managed database wrapped -// by the [wrap] function. If an error is returned by wrap, the error is -// returned immediately. If [wrap] never returns an error, then wrapManager is -// guaranteed to never return an error. The function wrap must return a database -// that can be closed without closing the underlying database. -func (m *manager) wrapManager(wrap func(db *VersionedDatabase) (*VersionedDatabase, error)) (*manager, error) { - newManager := &manager{ - databases: make([]*VersionedDatabase, 0, len(m.databases)), - } - for _, db := range m.databases { - wrappedDB, err := wrap(db) - if err != nil { - // ignore additional errors in favor of returning the original error - _ = newManager.Close() - return nil, err - } - newManager.databases = append(newManager.databases, wrappedDB) - } - return newManager, nil -} diff --git a/avalanchego/database/manager/manager_test.go b/avalanchego/database/manager/manager_test.go deleted file mode 100644 index ffa82bb9..00000000 --- a/avalanchego/database/manager/manager_test.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "os" - "path/filepath" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" -) - -func TestNewSingleLevelDB(t *testing.T) { - require := require.New(t) - dir := t.TempDir() - - v1 := version.Semantic1_0_0 - - dbPath := filepath.Join(dir, v1.String()) - db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - err = db.Close() - require.NoError(err) - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err) - - semDB := manager.Current() - cmp := semDB.Version.Compare(v1) - require.Equal(0, cmp, "incorrect version on current database") - - _, exists := manager.Previous() - require.False(exists, "there should be no previous database") - - dbs := manager.GetDatabases() - require.Len(dbs, 1) - - err = manager.Close() - require.NoError(err) -} - -func TestNewCreatesSingleDB(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - v1 := version.Semantic1_0_0 - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err) - - semDB := manager.Current() - cmp := semDB.Version.Compare(v1) - require.Equal(0, cmp, "incorrect version on current database") - - _, exists := manager.Previous() - require.False(exists, "there should be no previous database") - - dbs := manager.GetDatabases() - require.Len(dbs, 1) - - err = manager.Close() - require.NoError(err) -} - -func TestNewInvalidMemberPresent(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - v1 := &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - } - v2 := &version.Semantic{ - Major: 1, - Minor: 2, - Patch: 0, - } - - dbPath1 := filepath.Join(dir, v1.String()) - db1, err := leveldb.New(dbPath1, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - dbPath2 := filepath.Join(dir, v2.String()) - db2, err := leveldb.New(dbPath2, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - err = db2.Close() - require.NoError(err) - - _, err = NewLevelDB(dir, nil, logging.NoLog{}, v2, "", prometheus.NewRegistry()) - require.Error(err, "expected to error creating the manager due to an open db") - - err = db1.Close() - require.NoError(err) - - f, err := os.Create(filepath.Join(dir, "dummy")) - require.NoError(err) - - err = f.Close() - require.NoError(err) - - db, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(err, "expected not to error with a non-directory file being present") - - err = db.Close() - require.NoError(err) -} - -func TestNewSortsDatabases(t *testing.T) { - require := require.New(t) - - dir := t.TempDir() - - vers := []*version.Semantic{ - { - Major: 2, - Minor: 1, - Patch: 2, - }, - { - Major: 2, - Minor: 0, - Patch: 2, - }, - { - Major: 1, - Minor: 3, - Patch: 2, - }, - { - Major: 1, - Minor: 0, - Patch: 2, - }, - { - Major: 1, - Minor: 0, - Patch: 1, - }, - } - - for _, version := range vers { - dbPath := filepath.Join(dir, version.String()) - db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(err) - - err = db.Close() - require.NoError(err) - } - - manager, err := NewLevelDB(dir, nil, logging.NoLog{}, vers[0], "", prometheus.NewRegistry()) - require.NoError(err) - - defer func() { - err = manager.Close() - require.NoError(err, "problem closing database manager") - }() - - semDB := manager.Current() - cmp := semDB.Version.Compare(vers[0]) - require.Equal(0, cmp, "incorrect version on current database") - - prev, exists := manager.Previous() - require.True(exists, "expected to find a previous database") - cmp = prev.Version.Compare(vers[1]) - require.Equal(0, cmp, "incorrect version on previous database") - - dbs := manager.GetDatabases() - require.Equal(len(vers), len(dbs)) - - for i, db := range dbs { - cmp = db.Version.Compare(vers[i]) - require.Equal(0, cmp, "expected to find database version %s, but found %s", vers[i], db.Version.String()) - } -} - -func TestPrefixDBManager(t *testing.T) { - require := require.New(t) - - db := memdb.New() - - prefix0 := []byte{0} - db0 := prefixdb.New(prefix0, db) - - prefix1 := []byte{1} - db1 := prefixdb.New(prefix1, db0) - - k0 := []byte{'s', 'c', 'h', 'n', 'i'} - v0 := []byte{'t', 'z', 'e', 'l'} - k1 := []byte{'c', 'u', 'r', 'r', 'y'} - v1 := []byte{'w', 'u', 'r', 's', 't'} - - require.NoError(db0.Put(k0, v0)) - require.NoError(db1.Put(k1, v1)) - require.NoError(db0.Close()) - require.NoError(db1.Close()) - - m := &manager{databases: []*VersionedDatabase{ - { - Database: db, - Version: version.Semantic1_0_0, - }, - }} - - m0 := m.NewPrefixDBManager(prefix0) - m1 := m0.NewPrefixDBManager(prefix1) - - val, err := m0.Current().Database.Get(k0) - require.NoError(err) - require.Equal(v0, val) - - val, err = m1.Current().Database.Get(k1) - require.NoError(err) - require.Equal(v1, val) -} - -func TestNestedPrefixDBManager(t *testing.T) { - require := require.New(t) - - db := memdb.New() - - prefix0 := []byte{0} - db0 := prefixdb.NewNested(prefix0, db) - - prefix1 := []byte{1} - db1 := prefixdb.NewNested(prefix1, db0) - - k0 := []byte{'s', 'c', 'h', 'n', 'i'} - v0 := []byte{'t', 'z', 'e', 'l'} - k1 := []byte{'c', 'u', 'r', 'r', 'y'} - v1 := []byte{'w', 'u', 'r', 's', 't'} - - require.NoError(db0.Put(k0, v0)) - require.NoError(db1.Put(k1, v1)) - require.NoError(db0.Close()) - require.NoError(db1.Close()) - - m := &manager{databases: []*VersionedDatabase{ - { - Database: db, - Version: version.Semantic1_0_0, - }, - }} - - m0 := m.NewNestedPrefixDBManager(prefix0) - m1 := m0.NewNestedPrefixDBManager(prefix1) - - val, err := m0.Current().Database.Get(k0) - require.NoError(err) - require.Equal(v0, val) - - val, err = m1.Current().Database.Get(k1) - require.NoError(err) - require.Equal(v1, val) -} - -func TestMeterDBManager(t *testing.T) { - require := require.New(t) - - registry := prometheus.NewRegistry() - - m := &manager{databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 2, - Minor: 0, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 5, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }} - - // Create meterdb manager with fresh registry and confirm - // that there are no errors registering metrics for multiple - // versioned databases. - manager, err := m.NewMeterDBManager("", registry) - require.NoError(err) - - dbs := manager.GetDatabases() - require.Len(dbs, 3) - - _, ok := dbs[0].Database.(*meterdb.Database) - require.True(ok) - _, ok = dbs[1].Database.(*meterdb.Database) - require.False(ok) - _, ok = dbs[2].Database.(*meterdb.Database) - require.False(ok) - - // Confirm that the error from a name conflict is handled correctly - _, err = m.NewMeterDBManager("", registry) - require.Error(err) -} - -func TestCompleteMeterDBManager(t *testing.T) { - require := require.New(t) - - registry := prometheus.NewRegistry() - - m := &manager{databases: []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 2, - Minor: 0, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 5, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: version.Semantic1_0_0, - }, - }} - - // Create complete meterdb manager with fresh registry and confirm - // that there are no errors registering metrics for multiple - // versioned databases. - manager, err := m.NewCompleteMeterDBManager("", registry) - require.NoError(err) - - dbs := manager.GetDatabases() - require.Len(dbs, 3) - - _, ok := dbs[0].Database.(*meterdb.Database) - require.True(ok) - _, ok = dbs[1].Database.(*meterdb.Database) - require.True(ok) - _, ok = dbs[2].Database.(*meterdb.Database) - require.True(ok) - - // Confirm that the error from a name conflict is handled correctly - _, err = m.NewCompleteMeterDBManager("", registry) - require.Error(err) -} - -func TestNewManagerFromDBs(t *testing.T) { - require := require.New(t) - - versions := []*version.Semantic{ - { - Major: 3, - Minor: 2, - Patch: 0, - }, - { - Major: 1, - Minor: 2, - Patch: 0, - }, - { - Major: 1, - Minor: 1, - Patch: 1, - }, - } - m, err := NewManagerFromDBs( - []*VersionedDatabase{ - { - Database: memdb.New(), - Version: versions[2], - }, - { - Database: memdb.New(), - Version: versions[1], - }, - { - Database: memdb.New(), - Version: versions[0], - }, - }) - require.NoError(err) - - dbs := m.GetDatabases() - require.Len(dbs, len(versions)) - for i, db := range dbs { - require.Equal(0, db.Version.Compare(versions[i])) - } -} - -func TestNewManagerFromNoDBs(t *testing.T) { - require := require.New(t) - // Should error if no dbs are given - _, err := NewManagerFromDBs(nil) - require.Error(err) -} - -func TestNewManagerFromNonUniqueDBs(t *testing.T) { - require := require.New(t) - - _, err := NewManagerFromDBs( - []*VersionedDatabase{ - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - }, - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 1, - Patch: 0, - }, // Duplicate - }, - { - Database: memdb.New(), - Version: &version.Semantic{ - Major: 1, - Minor: 2, - Patch: 0, - }, - }, - }) - require.Error(err) -} diff --git a/avalanchego/database/manager/versioned_database.go b/avalanchego/database/manager/versioned_database.go deleted file mode 100644 index 6ff983a9..00000000 --- a/avalanchego/database/manager/versioned_database.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package manager - -import ( - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/version" -) - -var _ utils.Sortable[*VersionedDatabase] = (*VersionedDatabase)(nil) - -type VersionedDatabase struct { - Database database.Database - Version *version.Semantic -} - -// Close the underlying database -func (db *VersionedDatabase) Close() error { - return db.Database.Close() -} - -// Note this sorts in descending order (newest version --> oldest version) -func (db *VersionedDatabase) Less(other *VersionedDatabase) bool { - return db.Version.Compare(other.Version) > 0 -} diff --git a/avalanchego/database/memdb/db.go b/avalanchego/database/memdb/db.go index 92b687af..603fae11 100644 --- a/avalanchego/database/memdb/db.go +++ b/avalanchego/database/memdb/db.go @@ -1,15 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb import ( "context" + "slices" "strings" "sync" - "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/database" ) diff --git a/avalanchego/database/memdb/db_test.go b/avalanchego/database/memdb/db_test.go index c7518978..90dc459f 100644 --- a/avalanchego/database/memdb/db_test.go +++ b/avalanchego/database/memdb/db_test.go @@ -1,32 +1,43 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - test(t, New()) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, New()) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - test(f, New()) - } +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, New()) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, New()) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New()) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New() - bench(b, db, "memdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("memdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New() + bench(b, db, keys, values) + }) } } } diff --git a/avalanchego/database/meterdb/db.go b/avalanchego/database/meterdb/db.go index a2640ca2..fd3b3b77 100644 --- a/avalanchego/database/meterdb/db.go +++ b/avalanchego/database/meterdb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb diff --git a/avalanchego/database/meterdb/db_test.go b/avalanchego/database/meterdb/db_test.go index 7cc60257..48a8966b 100644 --- a/avalanchego/database/meterdb/db_test.go +++ b/avalanchego/database/meterdb/db_test.go @@ -1,13 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -15,38 +15,43 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - if err != nil { - t.Fatal(err) - } + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + db, err := New("", prometheus.NewRegistry(), baseDB) + require.NoError(t, err) - test(t, db) + test(t, db) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - if err != nil { - require.NoError(f, err) - } - test(f, db) - } +func newDB(t testing.TB) database.Database { + baseDB := memdb.New() + db, err := New("", prometheus.NewRegistry(), baseDB) + require.NoError(t, err) + return db +} + +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, newDB(f)) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, newDB(f)) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, newDB(f)) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - if err != nil { - b.Fatal(err) - } - bench(b, db, "meterdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("meterdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/avalanchego/database/meterdb/metrics.go b/avalanchego/database/meterdb/metrics.go index a0a20e9d..f311607c 100644 --- a/avalanchego/database/meterdb/metrics.go +++ b/avalanchego/database/meterdb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb @@ -15,7 +15,7 @@ import ( func newSizeMetric(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { return metric.NewAveragerWithErrs( namespace, - fmt.Sprintf("%s_size", name), + name+"_size", fmt.Sprintf("bytes passed in a %s call", name), reg, errs, @@ -26,7 +26,7 @@ func newTimeMetric(namespace, name string, reg prometheus.Registerer, errs *wrap return metric.NewAveragerWithErrs( namespace, name, - fmt.Sprintf("time (in ns) of a %s", name), + "time (in ns) of a "+name, reg, errs, ) diff --git a/avalanchego/database/mock_batch.go b/avalanchego/database/mock_batch.go index 778bee90..e3762514 100644 --- a/avalanchego/database/mock_batch.go +++ b/avalanchego/database/mock_batch.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/database (interfaces: Batch) +// +// Generated by this command: +// +// mockgen -package=database -destination=database/mock_batch.go github.com/ava-labs/avalanchego/database Batch +// // Package database is a generated GoMock package. package database @@ -10,7 +12,7 @@ package database import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockBatch is a mock of Batch interface. @@ -45,7 +47,7 @@ func (m *MockBatch) Delete(arg0 []byte) error { } // Delete indicates an expected call of Delete. -func (mr *MockBatchMockRecorder) Delete(arg0 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Delete(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBatch)(nil).Delete), arg0) } @@ -73,7 +75,7 @@ func (m *MockBatch) Put(arg0, arg1 []byte) error { } // Put indicates an expected call of Put. -func (mr *MockBatchMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Put(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockBatch)(nil).Put), arg0, arg1) } @@ -87,7 +89,7 @@ func (m *MockBatch) Replay(arg0 KeyValueWriterDeleter) error { } // Replay indicates an expected call of Replay. -func (mr *MockBatchMockRecorder) Replay(arg0 interface{}) *gomock.Call { +func (mr *MockBatchMockRecorder) Replay(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Replay", reflect.TypeOf((*MockBatch)(nil).Replay), arg0) } diff --git a/avalanchego/database/mock_iterator.go b/avalanchego/database/mock_iterator.go new file mode 100644 index 00000000..77856c92 --- /dev/null +++ b/avalanchego/database/mock_iterator.go @@ -0,0 +1,107 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/database (interfaces: Iterator) +// +// Generated by this command: +// +// mockgen -package=database -destination=database/mock_iterator.go github.com/ava-labs/avalanchego/database Iterator +// + +// Package database is a generated GoMock package. +package database + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockIterator is a mock of Iterator interface. +type MockIterator struct { + ctrl *gomock.Controller + recorder *MockIteratorMockRecorder +} + +// MockIteratorMockRecorder is the mock recorder for MockIterator. +type MockIteratorMockRecorder struct { + mock *MockIterator +} + +// NewMockIterator creates a new mock instance. +func NewMockIterator(ctrl *gomock.Controller) *MockIterator { + mock := &MockIterator{ctrl: ctrl} + mock.recorder = &MockIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIterator) EXPECT() *MockIteratorMockRecorder { + return m.recorder +} + +// Error mocks base method. +func (m *MockIterator) Error() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Error") + ret0, _ := ret[0].(error) + return ret0 +} + +// Error indicates an expected call of Error. +func (mr *MockIteratorMockRecorder) Error() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockIterator)(nil).Error)) +} + +// Key mocks base method. +func (m *MockIterator) Key() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Key") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Key indicates an expected call of Key. +func (mr *MockIteratorMockRecorder) Key() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockIterator)(nil).Key)) +} + +// Next mocks base method. +func (m *MockIterator) Next() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockIteratorMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next)) +} + +// Release mocks base method. +func (m *MockIterator) Release() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Release") +} + +// Release indicates an expected call of Release. +func (mr *MockIteratorMockRecorder) Release() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockIterator)(nil).Release)) +} + +// Value mocks base method. +func (m *MockIterator) Value() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Value") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Value indicates an expected call of Value. +func (mr *MockIteratorMockRecorder) Value() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockIterator)(nil).Value)) +} diff --git a/avalanchego/database/pebble/batch.go b/avalanchego/database/pebble/batch.go new file mode 100644 index 00000000..a53b962d --- /dev/null +++ b/avalanchego/database/pebble/batch.go @@ -0,0 +1,111 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package pebble + +import ( + "fmt" + + "github.com/cockroachdb/pebble" + + "github.com/ava-labs/avalanchego/database" +) + +var _ database.Batch = (*batch)(nil) + +// Not safe for concurrent use. +type batch struct { + batch *pebble.Batch + db *Database + size int + + // True iff [batch] has been written to the database + // since the last time [Reset] was called. + written bool +} + +func (db *Database) NewBatch() database.Batch { + return &batch{ + db: db, + batch: db.pebbleDB.NewBatch(), + } +} + +func (b *batch) Put(key, value []byte) error { + b.size += len(key) + len(value) + pebbleByteOverHead + return b.batch.Set(key, value, pebble.Sync) +} + +func (b *batch) Delete(key []byte) error { + b.size += len(key) + pebbleByteOverHead + return b.batch.Delete(key, pebble.Sync) +} + +func (b *batch) Size() int { + return b.size +} + +// Assumes [b.db.lock] is not held. +func (b *batch) Write() error { + b.db.lock.RLock() + defer b.db.lock.RUnlock() + + // Committing to a closed database makes pebble panic + // so make sure [b.db] isn't closed. + if b.db.closed { + return database.ErrClosed + } + + if !b.written { + // This batch has not been written to the database yet. + if err := updateError(b.batch.Commit(pebble.Sync)); err != nil { + return err + } + b.written = true + return nil + } + + // pebble doesn't support writing a batch twice so we have to clone + // [b] and commit the clone. + batchClone := b.db.pebbleDB.NewBatch() + + // Copy the batch. + if err := batchClone.Apply(b.batch, nil); err != nil { + return err + } + + // Commit the new batch. + return updateError(batchClone.Commit(pebble.Sync)) +} + +func (b *batch) Reset() { + b.batch.Reset() + b.written = false + b.size = 0 +} + +func (b *batch) Replay(w database.KeyValueWriterDeleter) error { + reader := b.batch.Reader() + for { + kind, k, v, ok := reader.Next() + if !ok { + return nil + } + switch kind { + case pebble.InternalKeyKindSet: + if err := w.Put(k, v); err != nil { + return err + } + case pebble.InternalKeyKindDelete: + if err := w.Delete(k); err != nil { + return err + } + default: + return fmt.Errorf("%w: %v", errInvalidOperation, kind) + } + } +} + +func (b *batch) Inner() database.Batch { + return b +} diff --git a/avalanchego/database/pebble/batch_test.go b/avalanchego/database/pebble/batch_test.go new file mode 100644 index 00000000..3d657a87 --- /dev/null +++ b/avalanchego/database/pebble/batch_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package pebble + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" +) + +// Note: TestInterface tests other batch functionality. +func TestBatch(t *testing.T) { + require := require.New(t) + dirName := t.TempDir() + + db, err := New(dirName, DefaultConfigBytes, logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(err) + + batchIntf := db.NewBatch() + batch, ok := batchIntf.(*batch) + require.True(ok) + + require.False(batch.written) + + key1, value1 := []byte("key1"), []byte("value1") + require.NoError(batch.Put(key1, value1)) + require.Equal(len(key1)+len(value1)+pebbleByteOverHead, batch.Size()) + + require.NoError(batch.Write()) + + require.True(batch.written) + + got, err := db.Get(key1) + require.NoError(err) + require.Equal(value1, got) + + batch.Reset() + require.False(batch.written) + require.Zero(batch.Size()) + + require.NoError(db.Close()) +} diff --git a/avalanchego/database/pebble/db.go b/avalanchego/database/pebble/db.go new file mode 100644 index 00000000..77259a21 --- /dev/null +++ b/avalanchego/database/pebble/db.go @@ -0,0 +1,295 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package pebble + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "slices" + "sync" + + "github.com/cockroachdb/pebble" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" +) + +const ( + Name = "pebble" + + // pebbleByteOverHead is the number of bytes of constant overhead that + // should be added to a batch size per operation. + pebbleByteOverHead = 8 +) + +var ( + _ database.Database = (*Database)(nil) + + errInvalidOperation = errors.New("invalid operation") + + defaultCacheSize = 512 * units.MiB + DefaultConfig = Config{ + CacheSize: defaultCacheSize, + BytesPerSync: 512 * units.KiB, + WALBytesPerSync: 0, // Default to no background syncing. + MemTableStopWritesThreshold: 8, + MemTableSize: defaultCacheSize / 4, + MaxOpenFiles: 4096, + MaxConcurrentCompactions: 1, + } + + DefaultConfigBytes []byte +) + +func init() { + var err error + DefaultConfigBytes, err = json.Marshal(DefaultConfig) + if err != nil { + panic(err) + } +} + +type Database struct { + lock sync.RWMutex + pebbleDB *pebble.DB + closed bool + openIterators set.Set[*iter] +} + +type Config struct { + CacheSize int `json:"cacheSize"` + BytesPerSync int `json:"bytesPerSync"` + WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing + MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` + MemTableSize int `json:"memTableSize"` + MaxOpenFiles int `json:"maxOpenFiles"` + MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` +} + +// TODO: Add metrics +func New(file string, configBytes []byte, log logging.Logger, _ string, _ prometheus.Registerer) (database.Database, error) { + cfg := DefaultConfig + if len(configBytes) > 0 { + if err := json.Unmarshal(configBytes, &cfg); err != nil { + return nil, err + } + } + + opts := &pebble.Options{ + Cache: pebble.NewCache(int64(cfg.CacheSize)), + BytesPerSync: cfg.BytesPerSync, + Comparer: pebble.DefaultComparer, + WALBytesPerSync: cfg.WALBytesPerSync, + MemTableStopWritesThreshold: cfg.MemTableStopWritesThreshold, + MemTableSize: cfg.MemTableSize, + MaxOpenFiles: cfg.MaxOpenFiles, + MaxConcurrentCompactions: func() int { return cfg.MaxConcurrentCompactions }, + } + opts.Experimental.ReadSamplingMultiplier = -1 // Disable seek compaction + + log.Info( + "opening pebble", + zap.Reflect("config", cfg), + ) + + db, err := pebble.Open(file, opts) + return &Database{ + pebbleDB: db, + openIterators: set.Set[*iter]{}, + }, err +} + +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.closed { + return database.ErrClosed + } + + db.closed = true + + for iter := range db.openIterators { + iter.lock.Lock() + iter.release() + iter.lock.Unlock() + } + db.openIterators.Clear() + + return updateError(db.pebbleDB.Close()) +} + +func (db *Database) HealthCheck(_ context.Context) (interface{}, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return nil, database.ErrClosed + } + return nil, nil +} + +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return false, database.ErrClosed + } + + _, closer, err := db.pebbleDB.Get(key) + if err == pebble.ErrNotFound { + return false, nil + } + if err != nil { + return false, updateError(err) + } + return true, closer.Close() +} + +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return nil, database.ErrClosed + } + + data, closer, err := db.pebbleDB.Get(key) + if err != nil { + return nil, updateError(err) + } + return slices.Clone(data), closer.Close() +} + +func (db *Database) Put(key []byte, value []byte) error { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return database.ErrClosed + } + + return updateError(db.pebbleDB.Set(key, value, pebble.Sync)) +} + +func (db *Database) Delete(key []byte) error { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return database.ErrClosed + } + + return updateError(db.pebbleDB.Delete(key, pebble.Sync)) +} + +func (db *Database) Compact(start []byte, end []byte) error { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return database.ErrClosed + } + + if end == nil { + // The database.Database spec treats a nil [limit] as a key after all keys + // but pebble treats a nil [limit] as a key before all keys in Compact. + // Use the greatest key in the database as the [limit] to get the desired behavior. + it := db.pebbleDB.NewIter(&pebble.IterOptions{}) + + if !it.Last() { + // The database is empty. + return it.Close() + } + + end = it.Key() + if err := it.Close(); err != nil { + return err + } + } + + if pebble.DefaultComparer.Compare(start, end) >= 1 { + // pebble requires [start] < [end] + return nil + } + + return updateError(db.pebbleDB.Compact(start, end, true /* parallelize */)) +} + +func (db *Database) NewIterator() database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, nil) +} + +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + db.lock.Lock() + defer db.lock.Unlock() + + if db.closed { + return &iter{ + db: db, + closed: true, + err: database.ErrClosed, + } + } + + iter := &iter{ + db: db, + iter: db.pebbleDB.NewIter(keyRange(start, prefix)), + } + db.openIterators.Add(iter) + return iter +} + +// Converts a pebble-specific error to its Avalanche equivalent, if applicable. +func updateError(err error) error { + switch err { + case pebble.ErrClosed: + return database.ErrClosed + case pebble.ErrNotFound: + return database.ErrNotFound + default: + return err + } +} + +func keyRange(start, prefix []byte) *pebble.IterOptions { + opt := &pebble.IterOptions{ + LowerBound: prefix, + UpperBound: prefixToUpperBound(prefix), + } + if bytes.Compare(start, prefix) == 1 { + opt.LowerBound = start + } + return opt +} + +// Returns an upper bound that stops after all keys with the given [prefix]. +// Assumes the Database uses bytes.Compare for key comparison and not a custom +// comparer. +func prefixToUpperBound(prefix []byte) []byte { + for i := len(prefix) - 1; i >= 0; i-- { + if prefix[i] != 0xFF { + upperBound := make([]byte, i+1) + copy(upperBound, prefix) + upperBound[i]++ + return upperBound + } + } + return nil +} diff --git a/avalanchego/database/pebble/db_test.go b/avalanchego/database/pebble/db_test.go new file mode 100644 index 00000000..ec6dd3e0 --- /dev/null +++ b/avalanchego/database/pebble/db_test.go @@ -0,0 +1,156 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package pebble + +import ( + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func newDB(t testing.TB) *Database { + folder := t.TempDir() + db, err := New(folder, DefaultConfigBytes, logging.NoLog{}, "pebble", prometheus.NewRegistry()) + require.NoError(t, err) + return db.(*Database) +} + +func TestInterface(t *testing.T) { + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := newDB(t) + test(t, db) + _ = db.Close() + }) + } +} + +func FuzzKeyValue(f *testing.F) { + db := newDB(f) + database.FuzzKeyValue(f, db) + _ = db.Close() +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + db := newDB(f) + database.FuzzNewIteratorWithPrefix(f, db) + _ = db.Close() +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := newDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db) + _ = db.Close() +} + +func BenchmarkInterface(b *testing.B) { + for _, size := range database.BenchmarkSizes { + keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("pebble_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) + bench(b, db, keys, values) + _ = db.Close() + }) + } + } +} + +func TestKeyRange(t *testing.T) { + type test struct { + start []byte + prefix []byte + expectedLower []byte + expectedUpper []byte + } + + tests := []test{ + { + start: nil, + prefix: nil, + expectedLower: nil, + expectedUpper: nil, + }, + { + start: nil, + prefix: []byte{}, + expectedLower: []byte{}, + expectedUpper: nil, + }, + { + start: nil, + prefix: []byte{0x00}, + expectedLower: []byte{0x00}, + expectedUpper: []byte{0x01}, + }, + { + start: []byte{0x00, 0x02}, + prefix: []byte{0x00}, + expectedLower: []byte{0x00, 0x02}, + expectedUpper: []byte{0x01}, + }, + { + start: []byte{0x01}, + prefix: []byte{0x00}, + expectedLower: []byte{0x01}, + expectedUpper: []byte{0x01}, + }, + { + start: nil, + prefix: []byte{0x01}, + expectedLower: []byte{0x01}, + expectedUpper: []byte{0x02}, + }, + { + start: nil, + prefix: []byte{0xFF}, + expectedLower: []byte{0xFF}, + expectedUpper: nil, + }, + { + start: []byte{0x00}, + prefix: []byte{0xFF}, + expectedLower: []byte{0xFF}, + expectedUpper: nil, + }, + { + start: nil, + prefix: []byte{0x01, 0x02}, + expectedLower: []byte{0x01, 0x02}, + expectedUpper: []byte{0x01, 0x03}, + }, + { + start: []byte{0x01, 0x02}, + prefix: []byte{0x01, 0x02}, + expectedLower: []byte{0x01, 0x02}, + expectedUpper: []byte{0x01, 0x03}, + }, + { + start: []byte{0x01, 0x02, 0x05}, + prefix: []byte{0x01, 0x02}, + expectedLower: []byte{0x01, 0x02, 0x05}, + expectedUpper: []byte{0x01, 0x03}, + }, + { + start: nil, + prefix: []byte{0x01, 0x02, 0xFF}, + expectedLower: []byte{0x01, 0x02, 0xFF}, + expectedUpper: []byte{0x01, 0x03}, + }, + } + + for _, tt := range tests { + t.Run(string(tt.start)+" "+string(tt.prefix), func(t *testing.T) { + require := require.New(t) + bounds := keyRange(tt.start, tt.prefix) + require.Equal(tt.expectedLower, bounds.LowerBound) + require.Equal(tt.expectedUpper, bounds.UpperBound) + }) + } +} diff --git a/avalanchego/database/pebble/iterator.go b/avalanchego/database/pebble/iterator.go new file mode 100644 index 00000000..ab7d8aad --- /dev/null +++ b/avalanchego/database/pebble/iterator.go @@ -0,0 +1,132 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package pebble + +import ( + "errors" + "fmt" + "slices" + "sync" + + "github.com/cockroachdb/pebble" + + "github.com/ava-labs/avalanchego/database" +) + +var ( + _ database.Iterator = (*iter)(nil) + + errCouldntGetValue = errors.New("couldnt get iterator value") +) + +type iter struct { + // [lock] ensures that only one goroutine can access [iter] at a time. + // Note that [Database.Close] calls [iter.Release] so we need [lock] to ensure + // that the user and [Database.Close] don't execute [iter.Release] concurrently. + // Invariant: [Database.lock] is never grabbed while holding [lock]. + lock sync.Mutex + + db *Database + iter *pebble.Iterator + + initialized bool + closed bool + err error + + hasNext bool + nextKey []byte + nextVal []byte +} + +// Must not be called with [db.lock] held. +func (it *iter) Next() bool { + it.lock.Lock() + defer it.lock.Unlock() + + switch { + case it.err != nil: + it.hasNext = false + return false + case it.closed: + it.hasNext = false + it.err = database.ErrClosed + return false + case !it.initialized: + it.hasNext = it.iter.First() + it.initialized = true + default: + it.hasNext = it.iter.Next() + } + + if !it.hasNext { + return false + } + + it.nextKey = it.iter.Key() + + var err error + it.nextVal, err = it.iter.ValueAndErr() + if err != nil { + it.hasNext = false + it.err = fmt.Errorf("%w: %w", errCouldntGetValue, err) + return false + } + + return true +} + +func (it *iter) Error() error { + it.lock.Lock() + defer it.lock.Unlock() + + if it.err != nil || it.closed { + return it.err + } + return updateError(it.iter.Error()) +} + +func (it *iter) Key() []byte { + it.lock.Lock() + defer it.lock.Unlock() + + if !it.hasNext { + return nil + } + return slices.Clone(it.nextKey) +} + +func (it *iter) Value() []byte { + it.lock.Lock() + defer it.lock.Unlock() + + if !it.hasNext { + return nil + } + return slices.Clone(it.nextVal) +} + +func (it *iter) Release() { + it.db.lock.Lock() + defer it.db.lock.Unlock() + + it.lock.Lock() + defer it.lock.Unlock() + + it.release() +} + +// Assumes [it.lock] and [it.db.lock] are held. +func (it *iter) release() { + if it.closed { + return + } + + // Remove the iterator from the list of open iterators. + it.db.openIterators.Remove(it) + + it.closed = true + if err := it.iter.Close(); err != nil { + it.err = updateError(err) + } +} diff --git a/avalanchego/database/prefixdb/db.go b/avalanchego/database/prefixdb/db.go index f4ba04e3..86984536 100644 --- a/avalanchego/database/prefixdb/db.go +++ b/avalanchego/database/prefixdb/db.go @@ -1,14 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb import ( "context" + "slices" "sync" - "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -39,29 +38,57 @@ type Database struct { closed bool } +func newDB(prefix []byte, db database.Database) *Database { + return &Database{ + dbPrefix: prefix, + db: db, + bufferPool: sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufCap) + }, + }, + } +} + // New returns a new prefixed database func New(prefix []byte, db database.Database) *Database { if prefixDB, ok := db.(*Database); ok { - simplePrefix := make([]byte, len(prefixDB.dbPrefix)+len(prefix)) - copy(simplePrefix, prefixDB.dbPrefix) - copy(simplePrefix[len(prefixDB.dbPrefix):], prefix) - return NewNested(simplePrefix, prefixDB.db) + return newDB( + JoinPrefixes(prefixDB.dbPrefix, prefix), + prefixDB.db, + ) } - return NewNested(prefix, db) + return newDB( + MakePrefix(prefix), + db, + ) } // NewNested returns a new prefixed database without attempting to compress // prefixes. func NewNested(prefix []byte, db database.Database) *Database { - return &Database{ - dbPrefix: hashing.ComputeHash256(prefix), - db: db, - bufferPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufCap) - }, - }, - } + return newDB( + MakePrefix(prefix), + db, + ) +} + +func MakePrefix(prefix []byte) []byte { + return hashing.ComputeHash256(prefix) +} + +func JoinPrefixes(firstPrefix, secondPrefix []byte) []byte { + simplePrefix := make([]byte, len(firstPrefix)+len(secondPrefix)) + copy(simplePrefix, firstPrefix) + copy(simplePrefix[len(firstPrefix):], secondPrefix) + return MakePrefix(simplePrefix) +} + +func PrefixKey(prefix, key []byte) []byte { + prefixedKey := make([]byte, len(prefix)+len(key)) + copy(prefixedKey, prefix) + copy(prefixedKey[len(prefix):], key) + return prefixedKey } // Assumes that it is OK for the argument to db.db.Has diff --git a/avalanchego/database/prefixdb/db_test.go b/avalanchego/database/prefixdb/db_test.go index 4ea308a3..f928d2f6 100644 --- a/avalanchego/database/prefixdb/db_test.go +++ b/avalanchego/database/prefixdb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" @@ -11,29 +12,39 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := memdb.New() - test(t, New([]byte("hello"), db)) - test(t, New([]byte("world"), db)) - test(t, New([]byte("wor"), New([]byte("ld"), db))) - test(t, New([]byte("ld"), New([]byte("wor"), db))) - test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) - test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := memdb.New() + test(t, New([]byte("hello"), db)) + test(t, New([]byte("world"), db)) + test(t, New([]byte("wor"), New([]byte("ld"), db))) + test(t, New([]byte("ld"), New([]byte("wor"), db))) + test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) + test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - test(f, New([]byte(""), memdb.New())) - } +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, New([]byte(""), memdb.New())) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, New([]byte(""), memdb.New())) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New([]byte(""), memdb.New())) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New([]byte("hello"), memdb.New()) - bench(b, db, "prefixdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("prefixdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New([]byte("hello"), memdb.New()) + bench(b, db, keys, values) + }) } } } diff --git a/avalanchego/database/rpcdb/db_client.go b/avalanchego/database/rpcdb/db_client.go index 8a6f004c..c71ccd06 100644 --- a/avalanchego/database/rpcdb/db_client.go +++ b/avalanchego/database/rpcdb/db_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -6,13 +6,13 @@ package rpcdb import ( "context" "encoding/json" + "sync" "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) @@ -43,7 +43,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { if err != nil { return false, err } - return resp.Has, errEnumToError[resp.Err] + return resp.Has, ErrEnumToError[resp.Err] } // Get attempts to return the value that was mapped to the key that was provided @@ -54,7 +54,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { if err != nil { return nil, err } - return resp.Value, errEnumToError[resp.Err] + return resp.Value, ErrEnumToError[resp.Err] } // Put attempts to set the value this key maps to @@ -66,7 +66,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Delete attempts to remove any mapping from the key @@ -77,7 +77,7 @@ func (db *DatabaseClient) Delete(key []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // NewBatch returns a new batch @@ -108,10 +108,7 @@ func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) da Err: err, } } - return &iterator{ - db: db, - id: resp.Id, - } + return newIterator(db, resp.Id) } // Compact attempts to optimize the space utilization in the provided range @@ -123,7 +120,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Close attempts to close the database @@ -133,7 +130,7 @@ func (db *DatabaseClient) Close() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (db *DatabaseClient) HealthCheck(ctx context.Context) (interface{}, error) { @@ -178,7 +175,7 @@ func (b *batch) Write() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (b *batch) Inner() database.Batch { @@ -189,8 +186,77 @@ type iterator struct { db *DatabaseClient id uint64 - data []*rpcdbpb.PutRequest - errs wrappers.Errs + data []*rpcdbpb.PutRequest + fetchedData chan []*rpcdbpb.PutRequest + + errLock sync.RWMutex + err error + + reqUpdateError chan chan struct{} + + once sync.Once + onClose chan struct{} + onClosed chan struct{} +} + +func newIterator(db *DatabaseClient, id uint64) *iterator { + it := &iterator{ + db: db, + id: id, + fetchedData: make(chan []*rpcdbpb.PutRequest), + reqUpdateError: make(chan chan struct{}), + onClose: make(chan struct{}), + onClosed: make(chan struct{}), + } + go it.fetch() + return it +} + +// Invariant: fetch is the only thread with access to send requests to the +// server's iterator. This is needed because iterators are not thread safe and +// the server expects the client (us) to only ever issue one request at a time +// for a given iterator id. +func (it *iterator) fetch() { + defer func() { + resp, err := it.db.client.IteratorRelease(context.Background(), &rpcdbpb.IteratorReleaseRequest{ + Id: it.id, + }) + if err != nil { + it.setError(err) + } else { + it.setError(ErrEnumToError[resp.Err]) + } + + close(it.fetchedData) + close(it.onClosed) + }() + + for { + resp, err := it.db.client.IteratorNext(context.Background(), &rpcdbpb.IteratorNextRequest{ + Id: it.id, + }) + if err != nil { + it.setError(err) + return + } + + if len(resp.Data) == 0 { + return + } + + for { + select { + case it.fetchedData <- resp.Data: + case onUpdated := <-it.reqUpdateError: + it.updateError() + close(onUpdated) + continue + case <-it.onClose: + return + } + break + } + } } // Next attempts to move the iterator to the next element and returns if this @@ -198,7 +264,7 @@ type iterator struct { func (it *iterator) Next() bool { if it.db.closed.Get() { it.data = nil - it.errs.Add(database.ErrClosed) + it.setError(database.ErrClosed) return false } if len(it.data) > 1 { @@ -207,32 +273,24 @@ func (it *iterator) Next() bool { return true } - resp, err := it.db.client.IteratorNext(context.Background(), &rpcdbpb.IteratorNextRequest{ - Id: it.id, - }) - if err != nil { - it.errs.Add(err) - return false - } - it.data = resp.Data + it.data = <-it.fetchedData return len(it.data) > 0 } // Error returns any that occurred while iterating func (it *iterator) Error() error { - if it.errs.Errored() { - return it.errs.Err + if err := it.getError(); err != nil { + return err } - resp, err := it.db.client.IteratorError(context.Background(), &rpcdbpb.IteratorErrorRequest{ - Id: it.id, - }) - if err != nil { - it.errs.Add(err) - } else { - it.errs.Add(errEnumToError[resp.Err]) + onUpdated := make(chan struct{}) + select { + case it.reqUpdateError <- onUpdated: + <-onUpdated + case <-it.onClosed: } - return it.errs.Err + + return it.getError() } // Key returns the key of the current element @@ -253,12 +311,39 @@ func (it *iterator) Value() []byte { // Release frees any resources held by the iterator func (it *iterator) Release() { - resp, err := it.db.client.IteratorRelease(context.Background(), &rpcdbpb.IteratorReleaseRequest{ + it.once.Do(func() { + close(it.onClose) + <-it.onClosed + }) +} + +func (it *iterator) updateError() { + resp, err := it.db.client.IteratorError(context.Background(), &rpcdbpb.IteratorErrorRequest{ Id: it.id, }) if err != nil { - it.errs.Add(err) + it.setError(err) } else { - it.errs.Add(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) + } +} + +func (it *iterator) setError(err error) { + if err == nil { + return + } + + it.errLock.Lock() + defer it.errLock.Unlock() + + if it.err == nil { + it.err = err } } + +func (it *iterator) getError() error { + it.errLock.RLock() + defer it.errLock.RUnlock() + + return it.err +} diff --git a/avalanchego/database/rpcdb/db_server.go b/avalanchego/database/rpcdb/db_server.go index e9e13573..8a07a672 100644 --- a/avalanchego/database/rpcdb/db_server.go +++ b/avalanchego/database/rpcdb/db_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -50,8 +50,8 @@ func (db *DatabaseServer) Has(_ context.Context, req *rpcdbpb.HasRequest) (*rpcd has, err := db.db.Has(req.Key) return &rpcdbpb.HasResponse{ Has: has, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Get delegates the Get call to the managed database and returns the result @@ -59,34 +59,34 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbpb.GetRequest) (*rpcd value, err := db.db.Get(req.Key) return &rpcdbpb.GetResponse{ Value: value, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbpb.PutRequest) (*rpcdbpb.PutResponse, error) { err := db.db.Put(req.Key, req.Value) - return &rpcdbpb.PutResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.PutResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Delete delegates the Delete call to the managed database and returns the // result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbpb.DeleteRequest) (*rpcdbpb.DeleteResponse, error) { err := db.db.Delete(req.Key) - return &rpcdbpb.DeleteResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.DeleteResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Compact delegates the Compact call to the managed database and returns the // result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbpb.CompactRequest) (*rpcdbpb.CompactResponse, error) { err := db.db.Compact(req.Start, req.Limit) - return &rpcdbpb.CompactResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CompactResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Close delegates the Close call to the managed database and returns the result func (db *DatabaseServer) Close(context.Context, *rpcdbpb.CloseRequest) (*rpcdbpb.CloseResponse, error) { err := db.db.Close() - return &rpcdbpb.CloseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CloseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // HealthCheck performs a heath check against the underlying database. @@ -109,22 +109,22 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbpb.WriteBatchR for _, put := range req.Puts { if err := batch.Put(put.Key, put.Value); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } for _, del := range req.Deletes { if err := batch.Delete(del.Key); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } err := batch.Write() return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator @@ -177,7 +177,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbpb.Iterator return nil, errUnknownIterator } err := it.Error() - return &rpcdbpb.IteratorErrorResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorErrorResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // IteratorRelease attempts to release the resources allocated to an iterator @@ -193,5 +193,5 @@ func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbpb.Iterat err := it.Error() it.Release() - return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorReleaseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } diff --git a/avalanchego/database/rpcdb/db_test.go b/avalanchego/database/rpcdb/db_test.go index eec557ed..cc0cca36 100644 --- a/avalanchego/database/rpcdb/db_test.go +++ b/avalanchego/database/rpcdb/db_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb import ( "context" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -18,20 +19,19 @@ import ( ) type testDatabase struct { - client *DatabaseClient - server *memdb.Database - closeFn func() + client *DatabaseClient + server *memdb.Database } func setupDB(t testing.TB) *testDatabase { + require := require.New(t) + db := &testDatabase{ server: memdb.New(), } listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() @@ -41,51 +41,56 @@ func setupDB(t testing.TB) *testDatabase { go grpcutils.Serve(listener, server) conn, err := grpcutils.Dial(listener.Addr().String()) - if err != nil { - t.Fatalf("Failed to dial: %s", err) - } + require.NoError(err) db.client = NewClient(rpcdbpb.NewDatabaseClient(conn)) - db.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return db } func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := setupDB(t) - test(t, db.client) - - db.closeFn() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := setupDB(t) + test(t, db.client) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - db := setupDB(f) - test(f, db.client) +func FuzzKeyValue(f *testing.F) { + db := setupDB(f) + database.FuzzKeyValue(f, db.client) +} - db.closeFn() - } +func FuzzNewIteratorWithPrefix(f *testing.F) { + db := setupDB(f) + database.FuzzNewIteratorWithPrefix(f, db.client) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + db := setupDB(f) + database.FuzzNewIteratorWithStartAndPrefix(f, db.client) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := setupDB(b) - bench(b, db.client, "rpcdb", keys, values) - db.closeFn() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("rpcdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := setupDB(b) + bench(b, db.client, keys, values) + }) } } } func TestHealthCheck(t *testing.T) { - require := require.New(t) - scenarios := []struct { name string testDatabase *testDatabase @@ -112,6 +117,8 @@ func TestHealthCheck(t *testing.T) { } for _, scenario := range scenarios { t.Run(scenario.name, func(t *testing.T) { + require := require.New(t) + baseDB := setupDB(t) db := corruptabledb.New(baseDB.server) defer db.Close() @@ -119,26 +126,15 @@ func TestHealthCheck(t *testing.T) { // check db HealthCheck _, err := db.HealthCheck(context.Background()) - if err == nil && scenario.wantErr { - t.Fatalf("wanted error got nil") - return - } if scenario.wantErr { - require.Containsf(err.Error(), scenario.wantErrMsg, "expected error containing %q, got %s", scenario.wantErrMsg, err) + require.Error(err) //nolint:forbidigo + require.Contains(err.Error(), scenario.wantErrMsg) return } require.NoError(err) // check rpc HealthCheck _, err = baseDB.client.HealthCheck(context.Background()) - if err == nil && scenario.wantErr { - t.Fatalf("wanted error got nil") - return - } - if scenario.wantErr { - require.Containsf(err.Error(), scenario.wantErrMsg, "expected error containing %q, got %s", scenario.wantErrMsg, err) - return - } require.NoError(err) }) } diff --git a/avalanchego/database/rpcdb/errors.go b/avalanchego/database/rpcdb/errors.go index 8a1fae2f..52788cc0 100644 --- a/avalanchego/database/rpcdb/errors.go +++ b/avalanchego/database/rpcdb/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -10,18 +10,18 @@ import ( ) var ( - errEnumToError = map[rpcdbpb.Error]error{ + ErrEnumToError = map[rpcdbpb.Error]error{ rpcdbpb.Error_ERROR_CLOSED: database.ErrClosed, rpcdbpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, } - errorToErrEnum = map[error]rpcdbpb.Error{ + ErrorToErrEnum = map[error]rpcdbpb.Error{ database.ErrClosed: rpcdbpb.Error_ERROR_CLOSED, database.ErrNotFound: rpcdbpb.Error_ERROR_NOT_FOUND, } ) -func errorToRPCError(err error) error { - if _, ok := errorToErrEnum[err]; ok { +func ErrorToRPCError(err error) error { + if _, ok := ErrorToErrEnum[err]; ok { return nil } return err diff --git a/avalanchego/database/test_database.go b/avalanchego/database/test_database.go index 51039171..99654282 100644 --- a/avalanchego/database/test_database.go +++ b/avalanchego/database/test_database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database @@ -6,14 +6,14 @@ package database import ( "bytes" "io" + "math" + "math/rand" + "slices" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "golang.org/x/exp/slices" - + "go.uber.org/mock/gomock" + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "github.com/ava-labs/avalanchego/utils" @@ -21,45 +21,44 @@ import ( ) // Tests is a list of all database tests -var Tests = []func(t *testing.T, db Database){ - TestSimpleKeyValue, - TestEmptyKey, - TestKeyEmptyValue, - TestSimpleKeyValueClosed, - TestNewBatchClosed, - TestBatchPut, - TestBatchDelete, - TestBatchReset, - TestBatchReuse, - TestBatchRewrite, - TestBatchReplay, - TestBatchReplayPropagateError, - TestBatchInner, - TestBatchLargeSize, - TestIteratorSnapshot, - TestIterator, - TestIteratorStart, - TestIteratorPrefix, - TestIteratorStartPrefix, - TestIteratorMemorySafety, - TestIteratorClosed, - TestIteratorError, - TestIteratorErrorAfterRelease, - TestCompactNoPanic, - TestMemorySafetyDatabase, - TestMemorySafetyBatch, - TestClear, - TestClearPrefix, - TestModifyValueAfterPut, - TestModifyValueAfterBatchPut, - TestModifyValueAfterBatchPutReplay, - TestConcurrentBatches, - TestManySmallConcurrentKVPairBatches, - TestPutGetEmpty, -} - -var FuzzTests = []func(*testing.F, Database){ - FuzzKeyValue, +var Tests = map[string]func(t *testing.T, db Database){ + "SimpleKeyValue": TestSimpleKeyValue, + "OverwriteKeyValue": TestOverwriteKeyValue, + "EmptyKey": TestEmptyKey, + "KeyEmptyValue": TestKeyEmptyValue, + "SimpleKeyValueClosed": TestSimpleKeyValueClosed, + "NewBatchClosed": TestNewBatchClosed, + "BatchPut": TestBatchPut, + "BatchDelete": TestBatchDelete, + "BatchReset": TestBatchReset, + "BatchReuse": TestBatchReuse, + "BatchRewrite": TestBatchRewrite, + "BatchReplay": TestBatchReplay, + "BatchReplayPropagateError": TestBatchReplayPropagateError, + "BatchInner": TestBatchInner, + "BatchLargeSize": TestBatchLargeSize, + "IteratorSnapshot": TestIteratorSnapshot, + "Iterator": TestIterator, + "IteratorStart": TestIteratorStart, + "IteratorPrefix": TestIteratorPrefix, + "IteratorStartPrefix": TestIteratorStartPrefix, + "IteratorMemorySafety": TestIteratorMemorySafety, + "IteratorClosed": TestIteratorClosed, + "IteratorError": TestIteratorError, + "IteratorErrorAfterRelease": TestIteratorErrorAfterRelease, + "CompactNoPanic": TestCompactNoPanic, + "MemorySafetyDatabase": TestMemorySafetyDatabase, + "MemorySafetyBatch": TestMemorySafetyBatch, + "AtomicClear": TestAtomicClear, + "Clear": TestClear, + "AtomicClearPrefix": TestAtomicClearPrefix, + "ClearPrefix": TestClearPrefix, + "ModifyValueAfterPut": TestModifyValueAfterPut, + "ModifyValueAfterBatchPut": TestModifyValueAfterBatchPut, + "ModifyValueAfterBatchPutReplay": TestModifyValueAfterBatchPutReplay, + "ConcurrentBatches": TestConcurrentBatches, + "ManySmallConcurrentKVPairBatches": TestManySmallConcurrentKVPairBatches, + "PutGetEmpty": TestPutGetEmpty, } // TestSimpleKeyValue tests to make sure that simple Put + Get + Delete + Has @@ -100,6 +99,22 @@ func TestSimpleKeyValue(t *testing.T, db Database) { require.NoError(db.Delete(key)) } +func TestOverwriteKeyValue(t *testing.T, db Database) { + require := require.New(t) + + key := []byte("hello") + value1 := []byte("world1") + value2 := []byte("world2") + + require.NoError(db.Put(key, value1)) + + require.NoError(db.Put(key, value2)) + + gotValue, err := db.Get(key) + require.NoError(err) + require.Equal(value2, gotValue) +} + func TestKeyEmptyValue(t *testing.T, db Database) { require := require.New(t) @@ -432,8 +447,6 @@ func TestBatchRewrite(t *testing.T, db Database) { // contents. func TestBatchReplay(t *testing.T, db Database) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) key1 := []byte("hello1") @@ -469,8 +482,6 @@ func TestBatchReplay(t *testing.T, db Database) { // propagate any returned error during Replay. func TestBatchReplayPropagateError(t *testing.T, db Database) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) key1 := []byte("hello1") @@ -634,7 +645,7 @@ func TestIterator(t *testing.T, db Database) { require.NoError(iterator.Error()) } -// TestIteratorStart tests to make sure the the iterator can be configured to +// TestIteratorStart tests to make sure the iterator can be configured to // start mid way through the database. func TestIteratorStart(t *testing.T, db Database) { require := require.New(t) @@ -919,13 +930,34 @@ func TestCompactNoPanic(t *testing.T, db Database) { require.NoError(db.Put(key2, value2)) require.NoError(db.Put(key3, value3)) + // Test compacting with nil bounds require.NoError(db.Compact(nil, nil)) + + // Test compacting when start > end + require.NoError(db.Compact([]byte{2}, []byte{1})) + + // Test compacting when start > largest key + require.NoError(db.Compact([]byte{255}, nil)) + require.NoError(db.Close()) - require.Equal(ErrClosed, db.Compact(nil, nil)) + err := db.Compact(nil, nil) + require.ErrorIs(err, ErrClosed) +} + +func TestAtomicClear(t *testing.T, db Database) { + testClear(t, db, func(db Database) error { + return AtomicClear(db, db) + }) } -// TestClear tests to make sure the deletion helper works as expected. func TestClear(t *testing.T, db Database) { + testClear(t, db, func(db Database) error { + return Clear(db, math.MaxInt) + }) +} + +// testClear tests to make sure the deletion helper works as expected. +func testClear(t *testing.T, db Database, clearF func(Database) error) { require := require.New(t) key1 := []byte("hello1") @@ -945,17 +977,29 @@ func TestClear(t *testing.T, db Database) { require.NoError(err) require.Equal(3, count) - require.NoError(Clear(db, db)) + require.NoError(clearF(db)) count, err = Count(db) require.NoError(err) - require.Equal(0, count) + require.Zero(count) require.NoError(db.Close()) } -// TestClearPrefix tests to make sure prefix deletion works as expected. +func TestAtomicClearPrefix(t *testing.T, db Database) { + testClearPrefix(t, db, func(db Database, prefix []byte) error { + return AtomicClearPrefix(db, db, prefix) + }) +} + func TestClearPrefix(t *testing.T, db Database) { + testClearPrefix(t, db, func(db Database, prefix []byte) error { + return ClearPrefix(db, prefix, math.MaxInt) + }) +} + +// testClearPrefix tests to make sure prefix deletion works as expected. +func testClearPrefix(t *testing.T, db Database, clearF func(Database, []byte) error) { require := require.New(t) key1 := []byte("hello1") @@ -975,7 +1019,7 @@ func TestClearPrefix(t *testing.T, db Database) { require.NoError(err) require.Equal(3, count) - require.NoError(ClearPrefix(db, db, []byte("hello"))) + require.NoError(clearF(db, []byte("hello"))) count, err = Count(db) require.NoError(err) @@ -1162,3 +1206,133 @@ func FuzzKeyValue(f *testing.F, db Database) { require.Equal(ErrNotFound, err) }) } + +func FuzzNewIteratorWithPrefix(f *testing.F, db Database) { + const ( + maxKeyLen = 32 + maxValueLen = 32 + ) + + f.Fuzz(func( + t *testing.T, + randSeed int64, + prefix []byte, + numKeyValues uint, + ) { + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + // Put a bunch of key-values + expected := map[string][]byte{} + for i := 0; i < int(numKeyValues); i++ { + key := make([]byte, r.Intn(maxKeyLen)) + _, _ = r.Read(key) // #nosec G404 + + value := make([]byte, r.Intn(maxValueLen)) + _, _ = r.Read(value) // #nosec G404 + + if len(value) == 0 { + // Consistently treat zero length values as nil + // so that we can compare [expected] and [got] with + // require.Equal, which treats nil and empty byte + // as being unequal, whereas the database treats + // them as being equal. + value = nil + } + + if bytes.HasPrefix(key, prefix) { + expected[string(key)] = value + } + + require.NoError(db.Put(key, value)) + } + expectedList := maps.Keys(expected) + slices.Sort(expectedList) + + iter := db.NewIteratorWithPrefix(prefix) + defer iter.Release() + + // Assert the iterator returns the expected key-values. + numIterElts := 0 + for iter.Next() { + val := iter.Value() + if len(val) == 0 { + val = nil + } + require.Equal(expectedList[numIterElts], string(iter.Key())) + require.Equal(expected[string(iter.Key())], val) + numIterElts++ + } + require.Len(expectedList, numIterElts) + + // Clear the database for the next fuzz iteration. + require.NoError(AtomicClear(db, db)) + }) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F, db Database) { + const ( + maxKeyLen = 32 + maxValueLen = 32 + ) + + f.Fuzz(func( + t *testing.T, + randSeed int64, + start []byte, + prefix []byte, + numKeyValues uint, + ) { + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + expected := map[string][]byte{} + + // Put a bunch of key-values + for i := 0; i < int(numKeyValues); i++ { + key := make([]byte, r.Intn(maxKeyLen)) + _, _ = r.Read(key) // #nosec G404 + + value := make([]byte, r.Intn(maxValueLen)) + _, _ = r.Read(value) // #nosec G404 + + if len(value) == 0 { + // Consistently treat zero length values as nil + // so that we can compare [expected] and [got] with + // require.Equal, which treats nil and empty byte + // as being unequal, whereas the database treats + // them as being equal. + value = nil + } + + if bytes.HasPrefix(key, prefix) && bytes.Compare(key, start) >= 0 { + expected[string(key)] = value + } + + require.NoError(db.Put(key, value)) + } + + expectedList := maps.Keys(expected) + slices.Sort(expectedList) + + iter := db.NewIteratorWithStartAndPrefix(start, prefix) + defer iter.Release() + + // Assert the iterator returns the expected key-values. + numIterElts := 0 + for iter.Next() { + val := iter.Value() + if len(val) == 0 { + val = nil + } + keyStr := string(iter.Key()) + require.Equal(expectedList[numIterElts], keyStr) + require.Equal(expected[keyStr], val) + numIterElts++ + } + require.Len(expectedList, numIterElts) + + // Clear the database for the next fuzz iteration. + require.NoError(AtomicClear(db, db)) + }) +} diff --git a/avalanchego/database/versiondb/db.go b/avalanchego/database/versiondb/db.go index d65dca94..16876a43 100644 --- a/avalanchego/database/versiondb/db.go +++ b/avalanchego/database/versiondb/db.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb import ( "context" + "slices" "strings" "sync" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" ) @@ -210,7 +208,7 @@ func (db *Database) Abort() { } func (db *Database) abort() { - maps.Clear(db.mem) + clear(db.mem) } // CommitBatch returns a batch that contains all uncommitted puts/deletes. diff --git a/avalanchego/database/versiondb/db_test.go b/avalanchego/database/versiondb/db_test.go index 417fc1ec..0ff801df 100644 --- a/avalanchego/database/versiondb/db_test.go +++ b/avalanchego/database/versiondb/db_test.go @@ -1,31 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb import ( - "bytes" + "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - test(t, New(baseDB)) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + test(t, New(baseDB)) + }) } } -func FuzzInterface(f *testing.F) { - for _, test := range database.FuzzTests { - baseDB := memdb.New() - test(f, New(baseDB)) - } +func FuzzKeyValue(f *testing.F) { + database.FuzzKeyValue(f, New(memdb.New())) +} + +func FuzzNewIteratorWithPrefix(f *testing.F) { + database.FuzzNewIteratorWithPrefix(f, New(memdb.New())) +} + +func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { + database.FuzzNewIteratorWithStartAndPrefix(f, New(memdb.New())) } func TestIterate(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) @@ -35,204 +46,137 @@ func TestIterate(t *testing.T) { key2 := []byte("z") value2 := []byte("world2") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } - - if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on db.Commit: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Commit()) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) - if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on database.Put: %s", err) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + + require.NoError(iterator.Error()) + + require.NoError(db.Put(key2, value2)) iterator = db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value2) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) - if err := db.Delete(key1); err != nil { - t.Fatalf("Unexpected error on database.Delete: %s", err) - } + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + + require.NoError(iterator.Error()) + require.NoError(db.Delete(key1)) iterator = db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value2) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) - if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on database.Commit: %s", err) - } else if err := db.Put(key2, value1); err != nil { - t.Fatalf("Unexpected error on database.Put: %s", err) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + + require.NoError(iterator.Error()) + + require.NoError(db.Commit()) + require.NoError(db.Put(key2, value1)) iterator = db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value1, iterator.Value()) - if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on database.Commit: %s", err) - } else if err := db.Put(key1, value2); err != nil { - t.Fatalf("Unexpected error on database.Put: %s", err) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + + require.NoError(iterator.Error()) + + require.NoError(db.Commit()) + require.NoError(db.Put(key1, value2)) iterator = db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value2) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) - } else if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + + require.NoError(iterator.Error()) } func TestCommit(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) - if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on db.Commit: %s", err) - } + require.NoError(db.Commit()) key1 := []byte("hello1") value1 := []byte("world1") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) - if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on db.Commit: %s", err) - } + require.NoError(db.Commit()) - if value, err := db.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, value1) { - t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if value, err := baseDB.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, value1) { - t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) - } + value, err := db.Get(key1) + require.NoError(err) + require.Equal(value1, value) + value, err = baseDB.Get(key1) + require.NoError(err) + require.Equal(value1, value) } func TestCommitClosed(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) key1 := []byte("hello1") value1 := []byte("world1") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } else if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } else if err := db.Commit(); err != database.ErrClosed { - t.Fatalf("Expected %s on db.Commit", database.ErrClosed) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Close()) + require.Equal(database.ErrClosed, db.Commit()) } func TestCommitClosedWrite(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) @@ -241,14 +185,13 @@ func TestCommitClosedWrite(t *testing.T) { baseDB.Close() - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } else if err := db.Commit(); err != database.ErrClosed { - t.Fatalf("Expected %s on db.Commit", database.ErrClosed) - } + require.NoError(db.Put(key1, value1)) + require.Equal(database.ErrClosed, db.Commit()) } func TestCommitClosedDelete(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) @@ -256,88 +199,72 @@ func TestCommitClosedDelete(t *testing.T) { baseDB.Close() - if err := db.Delete(key1); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } else if err := db.Commit(); err != database.ErrClosed { - t.Fatalf("Expected %s on db.Commit", database.ErrClosed) - } + require.NoError(db.Delete(key1)) + require.Equal(database.ErrClosed, db.Commit()) } func TestAbort(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) key1 := []byte("hello1") value1 := []byte("world1") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) - if value, err := db.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, value1) { - t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if has, err := baseDB.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) - } + value, err := db.Get(key1) + require.NoError(err) + require.Equal(value1, value) + has, err := baseDB.Has(key1) + require.NoError(err) + require.False(has) db.Abort() - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) - } else if has, err := baseDB.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) - } + has, err = db.Has(key1) + require.NoError(err) + require.False(has) + has, err = baseDB.Has(key1) + require.NoError(err) + require.False(has) } func TestCommitBatch(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) key1 := []byte("hello1") value1 := []byte("world1") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } else if has, err := baseDB.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("Unexpected result of db.Has: %v", has) - } + require.NoError(db.Put(key1, value1)) + has, err := baseDB.Has(key1) + require.NoError(err) + require.False(has) batch, err := db.CommitBatch() - if err != nil { - t.Fatalf("Unexpected error on db.CommitBatch: %s", err) - } + require.NoError(err) db.Abort() - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("Unexpected result of db.Has: %v", has) - } else if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } - - if value, err := db.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, value1) { - t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if value, err := baseDB.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, value1) { - t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) - } + has, err = db.Has(key1) + require.NoError(err) + require.False(has) + require.NoError(batch.Write()) + + value, err := db.Get(key1) + require.NoError(err) + require.Equal(value1, value) + value, err = baseDB.Get(key1) + require.NoError(err) + require.Equal(value1, value) } func TestSetDatabase(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() newDB := memdb.New() db := New(baseDB) @@ -345,48 +272,43 @@ func TestSetDatabase(t *testing.T) { key1 := []byte("hello1") value1 := []byte("world1") - if err := db.SetDatabase(newDB); err != nil { - t.Fatalf("Unexpected error on db.SetDatabase: %s", err) - } + require.NoError(db.SetDatabase(newDB)) - if db.GetDatabase() != newDB { - t.Fatalf("Unexpected database from db.GetDatabase") - } else if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } else if err := db.Commit(); err != nil { - t.Fatalf("Unexpected error on db.Commit: %s", err) - } else if has, err := baseDB.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) - } else if has, err := newDB.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has Returned: %v ; Expected: %v", has, true) - } + require.Equal(newDB, db.GetDatabase()) + + require.NoError(db.Put(key1, value1)) + require.NoError(db.Commit()) + + has, err := baseDB.Has(key1) + require.NoError(err) + require.False(has) + + has, err = newDB.Has(key1) + require.NoError(err) + require.True(has) } func TestSetDatabaseClosed(t *testing.T) { + require := require.New(t) + baseDB := memdb.New() db := New(baseDB) - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } else if err := db.SetDatabase(memdb.New()); err != database.ErrClosed { - t.Fatalf("Expected %s on db.SetDatabase", database.ErrClosed) - } else if db.GetDatabase() != nil { - t.Fatalf("Unexpected database from db.GetDatabase") - } + require.NoError(db.Close()) + require.Equal(database.ErrClosed, db.SetDatabase(memdb.New())) + require.Nil(db.GetDatabase()) } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db := New(baseDB) - bench(b, db, "versiondb", keys, values) - _ = db.Close() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("versiondb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + baseDB := memdb.New() + db := New(baseDB) + bench(b, db, keys, values) + _ = db.Close() + }) } } } diff --git a/avalanchego/genesis/aliases.go b/avalanchego/genesis/aliases.go index b12e50d6..2c0407d1 100644 --- a/avalanchego/genesis/aliases.go +++ b/avalanchego/genesis/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -15,6 +15,20 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var ( + PChainAliases = []string{"P", "platform"} + XChainAliases = []string{"X", "avm"} + CChainAliases = []string{"C", "evm"} + VMAliases = map[ids.ID][]string{ + constants.PlatformVMID: {"platform"}, + constants.AVMID: {"avm"}, + constants.EVMID: {"evm"}, + secp256k1fx.ID: {"secp256k1fx"}, + nftfx.ID: {"nftfx"}, + propertyfx.ID: {"propertyfx"}, + } +) + // Aliases returns the default aliases based on the network ID func Aliases(genesisBytes []byte) (map[string][]string, map[ids.ID][]string, error) { apiAliases := map[string][]string{ @@ -26,7 +40,7 @@ func Aliases(genesisBytes []byte) (map[string][]string, map[ids.ID][]string, err }, } chainAliases := map[ids.ID][]string{ - constants.PlatformChainID: {"P", "platform"}, + constants.PlatformChainID: PChainAliases, } genesis, err := genesis.Parse(genesisBytes) // TODO let's not re-create genesis to do aliasing @@ -45,7 +59,7 @@ func Aliases(genesisBytes []byte) (map[string][]string, map[ids.ID][]string, err path.Join(constants.ChainAliasPrefix, "X"), path.Join(constants.ChainAliasPrefix, "avm"), } - chainAliases[chainID] = GetXChainAliases() + chainAliases[chainID] = XChainAliases case constants.EVMID: apiAliases[endpoint] = []string{ "C", @@ -53,27 +67,8 @@ func Aliases(genesisBytes []byte) (map[string][]string, map[ids.ID][]string, err path.Join(constants.ChainAliasPrefix, "C"), path.Join(constants.ChainAliasPrefix, "evm"), } - chainAliases[chainID] = GetCChainAliases() + chainAliases[chainID] = CChainAliases } } return apiAliases, chainAliases, nil } - -func GetCChainAliases() []string { - return []string{"C", "evm"} -} - -func GetXChainAliases() []string { - return []string{"X", "avm"} -} - -func GetVMAliases() map[ids.ID][]string { - return map[ids.ID][]string{ - constants.PlatformVMID: {"platform"}, - constants.AVMID: {"avm"}, - constants.EVMID: {"evm"}, - secp256k1fx.ID: {"secp256k1fx"}, - nftfx.ID: {"nftfx"}, - propertyfx.ID: {"propertyfx"}, - } -} diff --git a/avalanchego/genesis/beacons.go b/avalanchego/genesis/beacons.go deleted file mode 100644 index 191dc067..00000000 --- a/avalanchego/genesis/beacons.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package genesis - -import ( - "github.com/ava-labs/avalanchego/utils/sampler" -) - -// getIPs returns the beacon IPs for each network -func getIPs(_ uint32) []string { - return nil -} - -// getNodeIDs returns the beacon node IDs for each network -func getNodeIDs(_ uint32) []string { - return nil -} - -// SampleBeacons returns the some beacons this node should connect to -func SampleBeacons(networkID uint32, count int) ([]string, []string) { - ips := getIPs(networkID) - ids := getNodeIDs(networkID) - - if numIPs := len(ips); numIPs < count { - count = numIPs - } - - sampledIPs := make([]string, 0, count) - sampledIDs := make([]string, 0, count) - - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(ips))) - indices, _ := s.Sample(count) - for _, index := range indices { - sampledIPs = append(sampledIPs, ips[int(index)]) - sampledIDs = append(sampledIDs, ids[int(index)]) - } - - return sampledIPs, sampledIDs -} diff --git a/avalanchego/genesis/bootstrappers.go b/avalanchego/genesis/bootstrappers.go new file mode 100644 index 00000000..4f39279e --- /dev/null +++ b/avalanchego/genesis/bootstrappers.go @@ -0,0 +1,58 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "encoding/json" + "fmt" + + _ "embed" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/sampler" +) + +var ( + //go:embed bootstrappers.json + bootstrappersPerNetworkJSON []byte + + bootstrappersPerNetwork map[string][]Bootstrapper +) + +func init() { + if err := json.Unmarshal(bootstrappersPerNetworkJSON, &bootstrappersPerNetwork); err != nil { + panic(fmt.Sprintf("failed to decode bootstrappers.json %v", err)) + } +} + +// Represents the relationship between the nodeID and the nodeIP. +// The bootstrapper is sometimes called "anchor" or "beacon" node. +type Bootstrapper struct { + ID ids.NodeID `json:"id"` + IP ips.IPDesc `json:"ip"` +} + +// GetBootstrappers returns all default bootstrappers for the provided network. +func GetBootstrappers(networkID uint32) []Bootstrapper { + networkName := constants.NetworkIDToNetworkName[networkID] + return bootstrappersPerNetwork[networkName] +} + +// SampleBootstrappers returns the some beacons this node should connect to +func SampleBootstrappers(networkID uint32, count int) []Bootstrapper { + bootstrappers := GetBootstrappers(networkID) + count = min(count, len(bootstrappers)) + + s := sampler.NewUniform() + s.Initialize(uint64(len(bootstrappers))) + indices, _ := s.Sample(count) + + sampled := make([]Bootstrapper, 0, len(indices)) + for _, index := range indices { + sampled = append(sampled, bootstrappers[int(index)]) + } + return sampled +} diff --git a/avalanchego/genesis/bootstrappers.json b/avalanchego/genesis/bootstrappers.json new file mode 100644 index 00000000..6faacac9 --- /dev/null +++ b/avalanchego/genesis/bootstrappers.json @@ -0,0 +1,6 @@ +{ + "mainnet": [ + ], + "fuji": [ + ] +} diff --git a/avalanchego/genesis/config.go b/avalanchego/genesis/config.go index 5dfe8984..79822c4b 100644 --- a/avalanchego/genesis/config.go +++ b/avalanchego/genesis/config.go @@ -1,12 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis import ( + "cmp" "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -16,10 +18,14 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) -var _ utils.Sortable[Allocation] = Allocation{} +var ( + _ utils.Sortable[Allocation] = Allocation{} + + errInvalidGenesisJSON = errors.New("could not unmarshal genesis JSON") +) type LockedAmount struct { Amount uint64 `json:"amount"` @@ -48,15 +54,18 @@ func (a Allocation) Unparse(networkID uint32) (UnparsedAllocation, error) { return ua, err } -func (a Allocation) Less(other Allocation) bool { - return a.InitialAmount < other.InitialAmount || - (a.InitialAmount == other.InitialAmount && a.AVAXAddr.Less(other.AVAXAddr)) +func (a Allocation) Compare(other Allocation) int { + if amountCmp := cmp.Compare(a.InitialAmount, other.InitialAmount); amountCmp != 0 { + return amountCmp + } + return a.AVAXAddr.Compare(other.AVAXAddr) } type Staker struct { - NodeID ids.NodeID `json:"nodeID"` - RewardAddress ids.ShortID `json:"rewardAddress"` - DelegationFee uint32 `json:"delegationFee"` + NodeID ids.NodeID `json:"nodeID"` + RewardAddress ids.ShortID `json:"rewardAddress"` + DelegationFee uint32 `json:"delegationFee"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` } func (s Staker) Unparse(networkID uint32) (UnparsedStaker, error) { @@ -69,6 +78,7 @@ func (s Staker) Unparse(networkID uint32) (UnparsedStaker, error) { NodeID: s.NodeID, RewardAddress: avaxAddr, DelegationFee: s.DelegationFee, + Signer: s.Signer, }, err } @@ -170,10 +180,6 @@ var ( // genesis. CostwoConfig Config - // StagingConfig is the config that should be used to generate a flare - // staging genesis. - StagingConfig Config - // LocalFlareConfig is the config that should be used to generate a localFlare // genesis. LocalFlareConfig Config @@ -192,64 +198,60 @@ func init() { unparsedLocalConfig := UnparsedConfig{} unparsedFlareConfig := UnparsedConfig{} unparsedCostwoConfig := UnparsedConfig{} - unparsedStagingConfig := UnparsedConfig{} unparsedLocalFlareConfig := UnparsedConfig{} unparsedSongbirdConfig := UnparsedConfig{} unparsedCostonConfig := UnparsedConfig{} - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( json.Unmarshal(mainnetGenesisConfigJSON, &unparsedMainnetConfig), json.Unmarshal([]byte(localGenesisConfigJSON), &unparsedLocalConfig), json.Unmarshal(flareGenesisConfigJSON, &unparsedFlareConfig), json.Unmarshal(costwoGenesisConfigJSON, &unparsedCostwoConfig), - json.Unmarshal(stagingGenesisConfigJSON, &unparsedStagingConfig), json.Unmarshal(localFlareGenesisConfigJSON, &unparsedLocalFlareConfig), json.Unmarshal([]byte(songbirdGenesisConfigJSON), &unparsedSongbirdConfig), json.Unmarshal([]byte(costonGenesisConfigJSON), &unparsedCostonConfig), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } - mainnetConfig, err := unparsedMainnetConfig.Parse() - errs.Add(err) - MainnetConfig = mainnetConfig - - localConfig, err := unparsedLocalConfig.Parse() - localConfig.CChainGenesis = localCChainGenesis - errs.Add(err) - LocalConfig = localConfig - - flareConfig, err := unparsedFlareConfig.Parse() - errs.Add(err) - FlareConfig = flareConfig + MainnetConfig, err = unparsedMainnetConfig.Parse() + if err != nil { + panic(err) + } - costwoConfig, err := unparsedCostwoConfig.Parse() - errs.Add(err) - CostwoConfig = costwoConfig + LocalConfig, err = unparsedLocalConfig.Parse() + if err != nil { + panic(err) + } + LocalConfig.CChainGenesis = localCChainGenesis - stagingConfig, err := unparsedStagingConfig.Parse() - errs.Add(err) - StagingConfig = stagingConfig + FlareConfig, err = unparsedFlareConfig.Parse() + if err != nil { + panic(err) + } - localFlareConfig, err := unparsedLocalFlareConfig.Parse() - errs.Add(err) - LocalFlareConfig = localFlareConfig + CostwoConfig, err = unparsedCostwoConfig.Parse() + if err != nil { + panic(err) + } - songbirdConfig, err := unparsedSongbirdConfig.Parse() - songbirdConfig.CChainGenesis = songbirdCChainGenesis - errs.Add(err) - SongbirdConfig = songbirdConfig + LocalFlareConfig, err = unparsedLocalFlareConfig.Parse() + if err != nil { + panic(err) + } - costonConfig, err := unparsedCostonConfig.Parse() - costonConfig.CChainGenesis = costonCChainGenesis - errs.Add(err) - CostonConfig = costonConfig + SongbirdConfig, err = unparsedSongbirdConfig.Parse() + if err != nil { + panic(err) + } + SongbirdConfig.CChainGenesis = songbirdCChainGenesis - if errs.Errored() { - panic(errs.Err) + CostonConfig, err = unparsedCostonConfig.Parse() + if err != nil { + panic(err) } + CostonConfig.CChainGenesis = costonCChainGenesis } func GetConfig(networkID uint32) *Config { @@ -262,8 +264,6 @@ func GetConfig(networkID uint32) *Config { return &FlareConfig case constants.CostwoID: return &CostwoConfig - case constants.StagingID: - return &StagingConfig case constants.LocalFlareID: return &LocalFlareConfig case constants.SongbirdID: @@ -298,7 +298,7 @@ func GetConfigContent(genesisContent string) (*Config, error) { func parseGenesisJSONBytesToConfig(bytes []byte) (*Config, error) { var unparsedConfig UnparsedConfig if err := json.Unmarshal(bytes, &unparsedConfig); err != nil { - return nil, fmt.Errorf("could not unmarshal JSON: %w", err) + return nil, fmt.Errorf("%w: %w", errInvalidGenesisJSON, err) } config, err := unparsedConfig.Parse() diff --git a/avalanchego/genesis/config_test.go b/avalanchego/genesis/config_test.go index c7fea58c..8a9bc96a 100644 --- a/avalanchego/genesis/config_test.go +++ b/avalanchego/genesis/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -11,57 +11,43 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestAllocationLess(t *testing.T) { +func TestAllocationCompare(t *testing.T) { type test struct { name string alloc1 Allocation alloc2 Allocation - expected bool + expected int } tests := []test{ { name: "equal", alloc1: Allocation{}, alloc2: Allocation{}, - expected: false, + expected: 0, }, { - name: "first initial amount smaller", + name: "initial amount smaller", alloc1: Allocation{}, alloc2: Allocation{ InitialAmount: 1, }, - expected: true, + expected: -1, }, { - name: "first initial amount larger", - alloc1: Allocation{ - InitialAmount: 1, - }, - alloc2: Allocation{}, - expected: false, - }, - { - name: "first bytes smaller", + name: "bytes smaller", alloc1: Allocation{}, alloc2: Allocation{ AVAXAddr: ids.ShortID{1}, }, - expected: true, - }, - { - name: "first bytes larger", - alloc1: Allocation{ - AVAXAddr: ids.ShortID{1}, - }, - alloc2: Allocation{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - require.Equal(tt.expected, tt.alloc1.Less(tt.alloc2)) + + require.Equal(tt.expected, tt.alloc1.Compare(tt.alloc2)) + require.Equal(-tt.expected, tt.alloc2.Compare(tt.alloc1)) }) } } diff --git a/avalanchego/genesis/genesis.go b/avalanchego/genesis/genesis.go index b94d0d7b..136e2060 100644 --- a/avalanchego/genesis/genesis.go +++ b/avalanchego/genesis/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -33,13 +33,19 @@ const ( ) var ( - errStakeDurationTooHigh = errors.New("initial stake duration larger than maximum configured") - errNoInitiallyStakedFunds = errors.New("initial staked funds cannot be empty") - errNoSupply = errors.New("initial supply must be > 0") - errNoStakeDuration = errors.New("initial stake duration must be > 0") - errNoStakers = errors.New("initial stakers must be > 0") - errNoCChainGenesis = errors.New("C-Chain genesis cannot be empty") - errNoTxs = errors.New("genesis creates no transactions") + errStakeDurationTooHigh = errors.New("initial stake duration larger than maximum configured") + errNoInitiallyStakedFunds = errors.New("initial staked funds cannot be empty") + errNoSupply = errors.New("initial supply must be > 0") + errNoStakeDuration = errors.New("initial stake duration must be > 0") + errNoStakers = errors.New("initial stakers must be > 0") + errNoCChainGenesis = errors.New("C-Chain genesis cannot be empty") + errNoTxs = errors.New("genesis creates no transactions") + errNoAllocationToStake = errors.New("no allocation to stake") + errDuplicateInitiallyStakedAddress = errors.New("duplicate initially staked address") + errConflictingNetworkIDs = errors.New("conflicting networkIDs") + errFutureStartTime = errors.New("startTime cannot be in the future") + errInitialStakeDurationTooLow = errors.New("initial stake duration is too low") + errOverridesStandardNetworkConfig = errors.New("overrides standard network genesis config") ) // validateInitialStakedFunds ensures all staked @@ -76,7 +82,8 @@ func validateInitialStakedFunds(config *Config) error { } return fmt.Errorf( - "address %s is duplicated in initial staked funds", + "%w: %s", + errDuplicateInitiallyStakedAddress, avaxAddr, ) } @@ -96,7 +103,8 @@ func validateInitialStakedFunds(config *Config) error { } return fmt.Errorf( - "address %s does not have an allocation to stake", + "%w in address %s", + errNoAllocationToStake, avaxAddr, ) } @@ -110,7 +118,8 @@ func validateInitialStakedFunds(config *Config) error { func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) error { if networkID != config.NetworkID { return fmt.Errorf( - "networkID %d specified but genesis config contains networkID %d", + "%w: expected %d but config contains %d", + errConflictingNetworkIDs, networkID, config.NetworkID, ) @@ -127,7 +136,8 @@ func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) startTime := time.Unix(int64(config.StartTime), 0) if time.Since(startTime) < 0 { return fmt.Errorf( - "start time cannot be in the future: %s", + "%w: %s", + errFutureStartTime, startTime, ) } @@ -153,10 +163,9 @@ func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) offsetTimeRequired := config.InitialStakeDurationOffset * uint64(len(config.InitialStakers)-1) if offsetTimeRequired > config.InitialStakeDuration { return fmt.Errorf( - "initial stake duration is %d but need at least %d with offset of %d", - config.InitialStakeDuration, + "%w must be at least %d", + errInitialStakeDurationTooLow, offsetTimeRequired, - config.InitialStakeDurationOffset, ) } @@ -195,9 +204,9 @@ func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]b switch networkID { case constants.FlareID, constants.SongbirdID, constants.CostwoID, constants.CostonID, constants.LocalFlareID, constants.LocalID: return nil, ids.ID{}, fmt.Errorf( - "cannot override genesis config for standard network %s (%d)", + "%w: %s", + errOverridesStandardNetworkConfig, constants.NetworkName(networkID), - networkID, ) } @@ -237,9 +246,9 @@ func FromFlag(networkID uint32, genesisContent string, stakingCfg *StakingConfig switch networkID { case constants.FlareID, constants.SongbirdID, constants.CostwoID, constants.CostonID, constants.LocalFlareID, constants.LocalID: return nil, ids.ID{}, fmt.Errorf( - "cannot override genesis config for standard network %s (%d)", + "%w: %s", + errOverridesStandardNetworkConfig, constants.NetworkName(networkID), - networkID, ) } @@ -332,8 +341,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { return nil, ids.ID{}, fmt.Errorf("couldn't calculate the initial supply: %w", err) } - initiallyStaked := set.Set[ids.ShortID]{} - initiallyStaked.Add(config.InitialStakedFunds...) + initiallyStaked := set.Of(config.InitialStakedFunds...) skippedAllocations := []Allocation(nil) // Specify the initial state of the Platform Chain @@ -410,8 +418,8 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { delegationFee := json.Uint32(staker.DelegationFee) platformvmArgs.Validators = append(platformvmArgs.Validators, - api.PermissionlessValidator{ - Staker: api.Staker{ + api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(genesisTime.Unix()), EndTime: json.Uint64(endStakingTime.Unix()), NodeID: staker.NodeID, @@ -422,6 +430,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { }, Staked: utxos, ExactDelegationFee: &delegationFee, + Signer: staker.Signer, }, ) } @@ -548,6 +557,7 @@ func VMGenesis(genesisBytes []byte, vmID ids.ID) (*pchaintxs.Tx, error) { func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { parser, err := xchaintxs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -569,7 +579,7 @@ func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { genesisTx := genesis.Txs[0] tx := xchaintxs.Tx{Unsigned: &genesisTx.CreateAssetTx} - if err := parser.InitializeGenesisTx(&tx); err != nil { + if err := tx.Initialize(genesisCodec); err != nil { return ids.Empty, err } return tx.ID(), nil diff --git a/avalanchego/genesis/genesis_local.go b/avalanchego/genesis/genesis_local.go index 03578e6a..0ef0a622 100644 --- a/avalanchego/genesis/genesis_local.go +++ b/avalanchego/genesis/genesis_local.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -6,20 +6,29 @@ package genesis import ( "time" + "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/reward" ) -var ( - EWOQKey *secp256k1.PrivateKey +const ( + VMRQKeyStr = "vmRQiZeXEXYMyJhEiqdC2z5JhuDbxL8ix9UVvjgMu2Er1NepE" + VMRQKeyFormattedStr = secp256k1.PrivateKeyPrefix + VMRQKeyStr + + EWOQKeyStr = "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN" + EWOQKeyFormattedStr = secp256k1.PrivateKeyPrefix + EWOQKeyStr ) var ( + VMRQKey *secp256k1.PrivateKey + EWOQKey *secp256k1.PrivateKey + localGenesisConfigJSON = `{ "networkID": 12345, "allocations": [], - "startTime": 1630987200, + "startTime": 1743984000, "initialStakeDuration": 31536000, "initialStakeDurationOffset": 5400, "initialStakedFunds": [], @@ -112,3 +121,20 @@ var ( }, } ) + +func init() { + errs := wrappers.Errs{} + vmrqBytes, err := cb58.Decode(VMRQKeyStr) + errs.Add(err) + ewoqBytes, err := cb58.Decode(EWOQKeyStr) + errs.Add(err) + + VMRQKey, err = secp256k1.ToPrivateKey(vmrqBytes) + errs.Add(err) + EWOQKey, err = secp256k1.ToPrivateKey(ewoqBytes) + errs.Add(err) + + if errs.Err != nil { + panic(errs.Err) + } +} diff --git a/avalanchego/genesis/genesis_local.json b/avalanchego/genesis/genesis_local.json index 3fe5a4bb..cee32310 100644 --- a/avalanchego/genesis/genesis_local.json +++ b/avalanchego/genesis/genesis_local.json @@ -38,7 +38,7 @@ ] } ], - "startTime": 1660536000, + "startTime": 1743984000, "initialStakeDuration": 31536000, "initialStakeDurationOffset": 5400, "initialStakedFunds": [ diff --git a/avalanchego/genesis/genesis_mainnet.go b/avalanchego/genesis/genesis_mainnet.go index 4e55c891..3808174e 100644 --- a/avalanchego/genesis/genesis_mainnet.go +++ b/avalanchego/genesis/genesis_mainnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/genesis/genesis_staging.go b/avalanchego/genesis/genesis_staging.go deleted file mode 100644 index 26f48e58..00000000 --- a/avalanchego/genesis/genesis_staging.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package genesis - -import ( - "time" - - _ "embed" - - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" -) - -var ( - //go:embed genesis_staging.json - stagingGenesisConfigJSON []byte - - // StagingParams are the params used for the flare staging network - StagingParams = Params{ - TxFeeConfig: TxFeeConfig{ - TxFee: units.MilliAvax, - CreateAssetTxFee: units.MilliAvax, - CreateSubnetTxFee: 100 * units.MegaAvax, - CreateBlockchainTxFee: 100 * units.MegaAvax, - }, - StakingConfig: StakingConfig{ - UptimeRequirement: .8, // 80% - MinValidatorStake: 1 * units.Avax, - MaxValidatorStake: 10000 * units.Avax, - MinDelegatorStake: 0, - MinDelegationFee: 0, - MinStakeDuration: 24 * time.Hour, - MaxStakeDuration: 365 * 24 * time.Hour, - RewardConfig: reward.Config{ - MaxConsumptionRate: .12 * reward.PercentDenominator, - MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: 365 * 24 * time.Hour, - SupplyCap: 0 * units.MegaAvax, - }, - }, - } -) diff --git a/avalanchego/genesis/genesis_staging.json b/avalanchego/genesis/genesis_staging.json deleted file mode 100644 index c9dc8358..00000000 --- a/avalanchego/genesis/genesis_staging.json +++ /dev/null @@ -1,126 +0,0 @@ -{ - "networkID": 161, - "allocations": [ - { - "ethAddr": "0xb3d82b1367d362de99ab59a658165aff520cbd4d", - "avaxAddr": "X-staging1g65uqn6t77p656w64023nh8nd9updzmx8ffr3e", - "initialAmount": 0, - "unlockSchedule": [ - { - "amount": 50000000000000, - "locktime": 1654056000 - } - ] - } - ], - "startTime": 1685592000, - "initialStakeDuration": 31536000, - "initialStakeDurationOffset": 5400, - "initialStakedFunds": [ - "X-staging1g65uqn6t77p656w64023nh8nd9updzmx8ffr3e" - ], - "initialStakers": [ - { - "nodeID": "NodeID-F2qXdeHH7ikN2isCU59JjQkDp2rsVz56N", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-CkGefuKuxWw3v1vzGkjPfkrLPdP5GMPWQ", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-yHDJnnZANij1wQYnZ7e8zvG47zvRz74R", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-KcMcpTRa9iqa1TaeZ66TFGWQSQgwGT5Yg", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-3ukjHcvtrXUvNXj556DyfGYCEtWNbhq34", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-EGEVMoGBBPnDo1PSTEJ8D2U7riFr596NF", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-Q3BmUmUUBVayRN4TNVvoqa1p2oUbs4LWZ", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-DyCqArfcYYomNYk1A72yqJnu5K6GokE8a", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-Lk58WoYrrAakt7wR9WbHsa9teqiLWTW35", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-Mg5GYBvRYXPJ8eebBAbnAoL7psUirtTRQ", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-PBv8vzEcFrfapbbnKVJvWLq5HsSKUgEev", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-NWPEfFujWBfihfF8uA6tAeG3rWQRqisCS", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-CvB1avUcDiCfdgVDnMcLuiR2nCp4M8dKY", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-DqyFn4gGgMM6MTBNyoqWGu6UDgyu9bJNg", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-MpYXNZWGEoR3Jgv95FAiATXdFhNKfkpid", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-F1rLi9bU5oyLMNCb39BwuvHRdFUrag5F3", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-Li3wCFFJc4WjtDfgiV8d9D8tHcrXGkYTW", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-HPErAiAi3scehK6hFN7S2mU2u8fyJaQ4v", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-6CrNPDrF4MURmxnCNX7vAgKoTL1q5mUPx", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - }, - { - "nodeID": "NodeID-Gx8AV9cWSyQ3mArMspSLJBTW31nuoUUmw", - "rewardAddress": "X-staging18jma8ppw3nhx5r4ap8clazz0dps7rv5uvzsl80", - "delegationFee": 0 - } - ], - "cChainGenesis": "{\"config\":{\"chainId\":161,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0100000000000000000000000000000000000000\",\"alloc\":{\"0x1000000000000000000000000000000000000001\":{\"balance\":\"0x0\",\"code\":\"0x608060405234801561001057600080fd5b50600436106101005760003560e01c8063c2f56d4c11610097578063ec7424a011610066578063ec7424a014610284578063f417c9d81461028c578063f5f59a4a14610294578063f64b6fda1461029c57610100565b8063c2f56d4c146101f0578063cfd1fdad14610216578063dd86215714610259578063eaebf6d31461026157610100565b80635f8c940d116100d35780635f8c940d1461018157806371c5ecb11461018957806371e24574146101a65780637ff6faa6146101cc57610100565b8063273c463b1461010557806329be4db21461012d5780633c70b3571461015c5780634b8a125f14610179575b600080fd5b61012b6004803603602081101561011b57600080fd5b50356001600160a01b031661030c565b005b61014a6004803603602081101561014357600080fd5b503561033b565b60408051918252519081900360200190f35b61014a6004803603602081101561017257600080fd5b503561049a565b61014a61059c565b61014a6105a4565b61014a6004803603602081101561019f57600080fd5b50356105a9565b61014a600480360360208110156101bc57600080fd5b50356001600160a01b03166105c1565b6101d46105d6565b604080516001600160a01b039092168252519081900360200190f35b6101d46004803603602081101561020657600080fd5b50356001600160a01b03166105dd565b6102456004803603608081101561022c57600080fd5b50803590602081013590604081013590606001356105f8565b604080519115158252519081900360200190f35b61014a6106d4565b61012b6004803603604081101561027757600080fd5b508035906020013561072c565b61014a6107d0565b61014a6107d6565b61014a6107dc565b61012b600480360360208110156102b257600080fd5b8101906020810181356401000000008111156102cd57600080fd5b8201836020820111156102df57600080fd5b8035906020019184600183028401116401000000008311171561030157600080fd5b5090925090506107e1565b33600090815260106020526040902080546001600160a01b0319166001600160a01b0392909216919091179055565b336000908152601060205260408120546001600160a01b03168061035c5750335b6001831161036957600080fd5b6001600160a01b03811660009081526011602052604090206009015460001984019081111561039757600080fd5b6001600160a01b03821660009081526011602052604081206003600019840106600381106103c157fe5b600390810291909101546001600160a01b0385166000908152601160205260409020909250908306600381106103f357fe5b60030201600101549350600060116000856001600160a01b03166001600160a01b031681526020019081526020016000206000016003848161043157fe5b066003811061043c57fe5b6003020160020154905084818560405160200180848152602001838152602001826001600160a01b03168152602001935050505060405160208183030381529060405280519060200120821461049157600080fd5b50505050919050565b6000600360125410156104e7576040805162461bcd60e51b815260206004820152601060248201526f746f74616c42756666657273203c203360801b604482015290519081900360640190fd5b600360125403821115610531576040805162461bcd60e51b815260206004820152600d60248201526c1b9bdd08199a5b985b1a5e9959609a1b604482015290519081900360640190fd5b611a408210806105475750601254611a42190182115b610582576040805162461bcd60e51b8152602060048201526007602482015266195e1c1a5c995960ca1b604482015290519081900360640190fd5b6013611a408306611a40811061059457fe5b015492915050565b636184740081565b600381565b601381611a4081106105ba57600080fd5b0154905081565b60116020526000908152604090206009015481565b620dead181565b6010602052600090815260409020546001600160a01b031681565b6000605a63618473ff19420104851461064d576040805162461bcd60e51b81526020600482015260126024820152713bb937b73390313ab33332b9273ab6b132b960711b604482015290519081900360640190fd5b336000818152601160208181526040808420600981018b905581516060810183528a81528084018a9052918201889052949093525290600387066003811061069157fe5b600302016000820151816000015560208201518160010155604082015181600201559050506012548511156106c8575060016106cc565b5060005b949350505050565b600060036012541015610721576040805162461bcd60e51b815260206004820152601060248201526f746f74616c42756666657273203c203360801b604482015290519081900360640190fd5b506012546002190190565b6003821161073957600080fd5b605a63618473ff19420104821461074f57600080fd5b601254821161075d57600080fd5b334114801561076e575041620dead1145b156107cc576012829055806013611a40600219850106611a40811061078f57fe5b01556040805182815290516002198401917f8ffd19aa79a62d0764e560d21b1245698310783be781d7d80b38233d4d7d288c919081900360200190a25b5050565b60125481565b611a4081565b605a81565b7f7cbc6812801238dea8eb58356bb62b95dbce8dc28498aa30e7d2c6873ed36cc73342848460405180856001600160a01b03168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a1505056fea26469706673582212201835668a4aedf7b8dd6cb953219e7b126f41acff165aaa41023acd8f3dd8edf064736f6c63430007060033\"},\"0x1000000000000000000000000000000000000002\":{\"balance\":\"0x0\",\"code\":\"0x608060405234801561001057600080fd5b50600436106101f05760003560e01c8063870196b81161010f578063d48a38df116100a2578063e9de7d6011610071578063e9de7d6014610397578063ecdda0dd146103aa578063ed21b6e4146103bd578063f5a98383146103d4576101f0565b8063d48a38df1461035c578063dded1b4714610364578063e17f212e1461036c578063e371aef014610381576101f0565b8063aea36b53116100de578063aea36b5314610326578063b00c0b7614610339578063be0522e01461034c578063c9f960eb14610354576101f0565b8063870196b8146102f05780638be2fb86146103035780639d6a890f1461030b578063a6817ace1461031e576101f0565b806362da19a511610187578063689c499911610156578063689c4999146102ac57806372993615146102bf57806374e6310e146102c75780637fec8d38146102e8576101f0565b806362da19a514610270578063639031431461027857806363d4a53a1461028057806367fc402914610299576101f0565b80635267a15d116101c35780635267a15d146102385780635aa6e6751461024d5780635ff270791461025557806362354e0314610268576101f0565b806310663750146101f55780631d76dea1146102135780634f6a77b51461021b5780635042916c14610223575b600080fd5b6101fd6103dc565b60405161020a9190613397565b60405180910390f35b6101fd6103e2565b6101fd6103e8565b6102366102313660046130a6565b6103ee565b005b610240610435565b60405161020a919061317d565b61024061045a565b610236610263366004612ffe565b6104f0565b610240610845565b6101fd610850565b6101fd610856565b610288610860565b60405161020a959493929190613283565b6102366102a7366004612ffe565b61089b565b6102366102ba366004612f0e565b610983565b6101fd6109c0565b6102da6102d5366004612ffe565b6109c6565b60405161020a9291906133a0565b6101fd610a6c565b6102366102fe3660046130a6565b610acc565b6101fd610c08565b610236610319366004612e3c565b610c0e565b6101fd610c10565b610236610334366004612e3c565b610c16565b610236610347366004612e58565b610ca5565b610240610d50565b610240610d5f565b6101fd610d9b565b6101fd610da1565b610374610da7565b60405161020a9190613322565b610389610db7565b60405161020a929190613375565b6102366103a53660046130a6565b610dd8565b6102886103b83660046130be565b610f10565b6103c561126d565b60405161020a9392919061320e565b610236611432565b60055481565b60095481565b60085481565b600054600160b01b900460ff16806104105750600054600160a81b900460ff16155b156104275761041d6114ec565b600c819055610432565b610432600036611523565b50565b7f714f205b2abd25bef1d06a1af944e38c113fe6160375c4e1d6d5cf28848e77195490565b60008054600160a81b900460ff1661047d576000546001600160a01b03166104ea565b60076001609c1b016001600160a01b031663732524946040518163ffffffff1660e01b815260040160206040518083038186803b1580156104bd57600080fd5b505afa1580156104d1573d6000803e3d6000fd5b505050506040513d60208110156104e757600080fd5b50515b90505b90565b60408051630debfda360e41b8152336004820152905160076001609c1b019163debfda30916024808301926020929190829003018186803b15801561053457600080fd5b505afa158015610548573d6000803e3d6000fd5b505050506040513d602081101561055e57600080fd5b50516105a1576040805162461bcd60e51b815260206004820152600d60248201526c37b7363c9032bc32b1baba37b960991b604482015290519081900360640190fd5b6001600160e01b031981166000908152600160205260409020805461060d576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b8054421015610663576040805162461bcd60e51b815260206004820152601960248201527f74696d656c6f636b3a206e6f7420616c6c6f7765642079657400000000000000604482015290519081900360640190fd5b6000816001018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156106fd5780601f106106d2576101008083540402835291602001916106fd565b820191906000526020600020905b8154815290600101906020018083116106e057829003601f168201915b5050506001600160e01b03198616600090815260016020819052604082208281559495509092506107319150830182612ccb565b50506000805460ff60b01b1916600160b01b178155604051825130918491819060208401908083835b602083106107795780518252601f19909201916020918201910161075a565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d80600081146107db576040519150601f19603f3d011682016040523d82523d6000602084013e6107e0565b606091505b50506000805460ff60b01b19169055604080516001600160e01b03198716815242602082015281519293507fa7326b57fc9cfe267aaea5e7f0b01757154d265620a0585819416ee9ddd2c438929081900390910190a161083f816116a6565b50505050565b60076001609c1b0181565b60045481565b60006104ea6116c3565b600b5460609081908190819060009061088a90600160c01b90046001600160401b03166001610f10565b945094509450945094509091929394565b6108a36116ce565b6001600160e01b0319811660009081526001602052604090205461090e576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b604080516001600160e01b03198316815242602082015281517f7735b2391c38a81419c513e30ca578db7158eadd7101511b23e221c654d19cf8929181900390910190a16001600160e01b0319811660009081526001602081905260408220828155919061097e90830182612ccb565b505050565b600054600160b01b900460ff16806109a55750600054600160a81b900460ff16155b15610427576109b26114ec565b6109bb8161172d565b610432565b60075481565b600160208181526000928352604092839020805481840180548651600296821615610100026000190190911695909504601f81018590048502860185019096528585529094919392909190830182828015610a625780601f10610a3757610100808354040283529160200191610a62565b820191906000526020600020905b815481529060010190602001808311610a4557829003601f168201915b5050505050905082565b6002546000906001600160a01b0316610ab157610ab1306040518060400160405280600e81526020016d696e666c6174696f6e207a65726f60901b81525060006119da565b60026001609c1b013314610ac457600080fd5b6104ea611b9e565b600054600160b01b900460ff1680610aee5750600054600160a81b900460ff16155b1561042757610afb6114ec565b600854610b0b90606e606461244f565b811115604051806040016040528060118152602001700dac2f040dad2dce840e8dede40d0d2ced607b1b81525090610b5f5760405162461bcd60e51b8152600401610b569190613362565b60405180910390fd5b5060408051808201909152601081526f6d6178206d696e74206973207a65726f60801b602082015281610ba55760405162461bcd60e51b8152600401610b569190613362565b5062015180600a54014211604051806040016040528060128152602001711d1a5b594819d85c081d1bdbc81cda1bdc9d60721b81525090610bf95760405162461bcd60e51b8152600401610b569190613362565b50600881905542600a55610432565b60035481565bfe5b600a5481565b600054600160b01b900460ff1680610c385750600054600160a81b900460ff16155b1561042757610c456114ec565b6000610c4f610435565b6001600160a01b0316146040518060400160405280600b81526020016a185b1c9958591e481cd95d60aa1b81525090610c9b5760405162461bcd60e51b8152600401610b569190613362565b506109bb8161255d565b610cad610435565b6001600160a01b0316336001600160a01b031614610d09576040805162461bcd60e51b815260206004820152601460248201527337b7363c9030b2323932b9b9903ab83230ba32b960611b604482015290519081900360640190fd5b610d42610d3d83836040518060400160405280600e81526020016d20b2323932b9b9aab83230ba32b960911b815250612581565b61255d565b610d4c82826126ae565b5050565b6002546001600160a01b031681565b600f5460009060ff16610d8c57600f805460ff191660011790556000610d836128d6565b91506104ed9050565b610d9461045a565b90506104ed565b600c5481565b60065481565b600054600160a81b900460ff1681565b600b546001600160c01b03811690600160c01b90046001600160401b031682565b60025460408051808201909152600d81526c3737ba1034b7333630ba34b7b760991b602082015233916001600160a01b03168214610e295760405162461bcd60e51b8152600401610b569190613362565b5060085482111560405180604001604052806007815260200166746f6f2062696760c81b81525090610e6e5760405162461bcd60e51b8152600401610b569190613362565b5042610e786116c3565b10604051806040016040528060098152602001683a37b79037b33a32b760b91b81525090610eb95760405162461bcd60e51b8152600401610b569190613362565b508115610d4c5742600955600454610ed190836128f6565b6004556040517f4c4f1efc376f31abeb51b72c5f9ed81cf4016591312bb02337e58149dcfaaab490610f04908490613397565b60405180910390a15050565b606080606080600060148054905087106040518060400160405280601081526020016f0e6e8c2e4e840d2dcc8caf040d0d2ced60831b81525090610f675760405162461bcd60e51b8152600401610b569190613362565b506014546000908888011115610f8257601454889003610f84565b865b9050806001600160401b0381118015610f9c57600080fd5b50604051908082528060200260200182016040528015610fc6578160200160208202803683370190505b509550806001600160401b0381118015610fdf57600080fd5b50604051908082528060200260200182016040528015611009578160200160208202803683370190505b509450806001600160401b038111801561102257600080fd5b5060405190808252806020026020018201604052801561105657816020015b60608152602001906001900390816110415790505b509350806001600160401b038111801561106f57600080fd5b50604051908082528060200260200182016040528015611099578160200160208202803683370190505b50925060005b818110156112515760006014828b01815481106110b857fe5b6000918252602080832090910154808352601390915260409091205489519192506001600160c01b0316908990849081106110ef57fe5b6020026020010181815250506013600082815260200190815260200160002060000160189054906101000a90046001600160401b03166001600160401b031687838151811061113a57fe5b6020908102919091018101919091526000828152601382526040908190206002908101805483516001821615610100026000190190911692909204601f810185900485028301850190935282825290929091908301828280156111de5780601f106111b3576101008083540402835291602001916111de565b820191906000526020600020905b8154815290600101906020018083116111c157829003601f168201915b50505050508683815181106111ef57fe5b60200260200101819052506013600082815260200190815260200160002060010160009054906101000a90046001600160a01b031685838151811061123057fe5b6001600160a01b03909216602092830291909101909101525060010161109f565b5050600b549497939650919450926001600160c01b0316919050565b60105460609081908190806001600160401b038111801561128d57600080fd5b506040519080825280602002602001820160405280156112b7578160200160208202803683370190505b509350806001600160401b03811180156112d057600080fd5b506040519080825280602002602001820160405280156112fa578160200160208202803683370190505b509250806001600160401b038111801561131357600080fd5b5060405190808252806020026020018201604052801561133d578160200160208202803683370190505b50915060005b8181101561142b5760006010828154811061135a57fe5b9060005260206000200160009054906101000a90046001600160a01b031690508086838151811061138757fe5b60200260200101906001600160a01b031690816001600160a01b03168152505060116000826001600160a01b03166001600160a01b03168152602001908152602001600020548583815181106113d957fe5b60200260200101818152505060126000826001600160a01b03166001600160a01b031681526020019081526020016000205484838151811061141757fe5b602090810291909101015250600101611343565b5050909192565b61143a6116ce565b600054600160a81b900460ff1615611499576040805162461bcd60e51b815260206004820152601a60248201527f616c726561647920696e2070726f64756374696f6e206d6f6465000000000000604482015290519081900360640190fd5b60008054600161ff0160a01b031916600160a81b1790556040805160076001609c1b01815290517f83af113638b5422f9e977cebc0aaf0eaf2188eb9a8baae7f9d46c42b33a1560c9181900360200190a1565b600054600160b01b900460ff16156115195733301461150757fe5b6000805460ff60b01b19169055611521565b6115216116ce565b565b61152b6116ce565b600082359050600060076001609c1b016001600160a01b0316636221a54b6040518163ffffffff1660e01b815260040160206040518083038186803b15801561157357600080fd5b505afa158015611587573d6000803e3d6000fd5b505050506040513d602081101561159d57600080fd5b505160408051808201825242830180825282516020601f89018190048102820181019094528781529394509290918281019190889088908190840183828082843760009201829052509390945250506001600160e01b0319861681526001602081815260409092208451815584830151805191945061162193928501920190612d0f565b509050507fed948300a3694aa01d4a6b258bfd664350193d770c0b51f8387277f6d83ea3b68382878760405180856001600160e01b0319168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a15050505050565b3d604051818101604052816000823e82156116bf578181f35b8181fd5b600954620143700190565b6116d661045a565b6001600160a01b0316336001600160a01b031614611521576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b8051604080518082019091526008815267746f6f206d616e7960c01b6020820152600a8211156117705760405162461bcd60e51b8152600401610b569190613362565b50611779612959565b60005b8181101561097e5760006001600160a01b031683828151811061179b57fe5b6020026020010151600001516001600160a01b031614156040518060400160405280600c81526020016b61646472657373207a65726f60a01b815250906117f55760405162461bcd60e51b8152600401610b569190613362565b5060105460005b81811015611899576010818154811061181157fe5b60009182526020909120015485516001600160a01b039091169086908590811061183757fe5b6020026020010151600001516001600160a01b031614156040518060400160405280600b81526020016a647570206164647265737360a81b815250906118905760405162461bcd60e51b8152600401610b569190613362565b506001016117fc565b5060108483815181106118a857fe5b6020908102919091018101515182546001810184556000938452919092200180546001600160a01b0319166001600160a01b0390921691909117905583518490839081106118f257fe5b6020026020010151602001516011600086858151811061190e57fe5b6020026020010151600001516001600160a01b03166001600160a01b031681526020019081526020016000208190555060006012600086858151811061195057fe5b6020026020010151600001516001600160a01b03166001600160a01b03168152602001908152602001600020819055507f86d03f430c7616021073d7a71766f632f1ce19f289aa989534d9f4732253eb598483815181106119ad57fe5b60200260200101516000015160016040516119c992919061332d565b60405180910390a15060010161177c565b600083836040516020016119ef929190613191565b60408051808303601f190181529181528151602092830120600081815260139093529120805491925090600160c01b90046001600160401b0316611ad65760148054600180820183556000929092527fce6d7b5282bd9a3661ae061feed1dbda4e52ab073b1f9285be6e155d9c38d4ec01839055810180546001600160a01b0319166001600160a01b038716179055611a89846040612a09565b8051611a9f916002840191602090910190612d0f565b5060145460018201805467ffffffffffffffff60a01b1916600160a01b6000199093016001600160401b0316929092029190911790555b8054436001600160c01b038181166001600160401b03600160c01b80860482166001019091160291909316176001600160c01b0319169190911782556040517f7a459ed083a9b267865360013a5ad6dbc07e5befe6e4f71671c940fdd4206bee91611b4791889190889088906131d6565b60405180910390a1600b80546001600160c01b0319811660016001600160c01b0392831681018316919091178084559301549216600160a01b9092046001600160401b0316600160c01b0291909117905550505050565b6000600354431415611bb2575060006104ed565b43600355600d544790811115611f88576000611bdb600e54600d546128f690919063ffffffff16565b905080821415611d5057600e54600554611bf590826128f6565b6005556040517fa42d823c276ad1990284418c303209194a75fa95a901f19752a9f65a407ffa8c90611c28908390613397565b60405180910390a1600260009054906101000a90046001600160a01b03166001600160a01b031663c611c2c5826040518263ffffffff1660e01b81526004016000604051808303818588803b158015611c8057600080fd5b505af193505050508015611c92575060015b611d0157611c9e61342b565b80611ca95750611cbb565b611cb5308260006119da565b50611cfc565b611cfc306040518060400160405280601d81526020017f756e6b6e6f776e206572726f722e20726563656976654d696e74696e6700000081525060006119da565b611d4a565b600654611d0e90826128f6565b6006556040517f12773bf711e11ec0b058c3856d441d726d2dc89113706c4f4175571f1e830c5a90611d41908390613397565b60405180910390a15b50611f86565b80821015611db2576000611d6f600d5484612abd90919063ffffffff16565b600754909150611d7f90826128f6565b6007556040517f3fe36bcb00188390b2b40f1ab66c58f660aea67fe98b9f80667f692e1a9ab36890611d41908390613397565b600e54600554611dc1916128f6565b600555600e54600d54600091611de291611ddc908690612abd565b90612abd565b600754909150611df290826128f6565b600755600e546040517fa42d823c276ad1990284418c303209194a75fa95a901f19752a9f65a407ffa8c91611e2691613397565b60405180910390a17f3fe36bcb00188390b2b40f1ab66c58f660aea67fe98b9f80667f692e1a9ab36881604051611e5d9190613397565b60405180910390a1600260009054906101000a90046001600160a01b03166001600160a01b031663c611c2c5600e546040518263ffffffff1660e01b81526004016000604051808303818588803b158015611eb757600080fd5b505af193505050508015611ec9575060015b611f3857611ed561342b565b80611ee05750611ef2565b611eec308260006119da565b50611f33565b611f33306040518060400160405280601d81526020017f756e6b6e6f776e206572726f722e20726563656976654d696e74696e6700000081525060006119da565b611f84565b600e54600654611f47916128f6565b600655600e546040517f12773bf711e11ec0b058c3856d441d726d2dc89113706c4f4175571f1e830c5a91611f7b91613397565b60405180910390a15b505b505b60105460005b818110156123a757600060108281548110611fa557fe5b60009182526020808320909101546001600160a01b031680835260129091526040909120549091508015612031576001600160a01b0382166000908152601260205260409081902060001983019055517f9895eddb1e8569b1dae526135aa5cab97f982fdc3b0ff7e17920c95e3b9bda629061202490849084906131bd565b60405180910390a161239d565b6001600160a01b038216600090815260116020526040812054905a90506204a76881101561209b577f9b5c4be38598cb8d8b6e07727d2303d1d9fc2dfc31ad323170f5ea4dcc1f914a85870360405161208a9190613397565b60405180910390a1505050506123a7565b620493df19810182158015906120b057508083105b156120b85750815b846001600160a01b0316636d0e8c34826040518263ffffffff1660e01b8152600401602060405180830381600088803b1580156120f457600080fd5b5087f193505050508015612125575060408051601f3d908101601f1916820190925261212291810190612fde565b60015b61235c5761213161342b565b8061213c57506121c3565b61214986825a86036119da565b856001600160a01b031663e22fdece6040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561218457600080fd5b505af1158015612198573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906121bc9190612fde565b5050612357565b60005a90506000841180156121e15750836121de8483612abd565b10155b156122b457612215866040518060400160405280600a8152602001696f7574206f662067617360b01b8152508386036119da565b6000866001600160a01b031663e22fdece6040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561225257600080fd5b505af1158015612266573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061228a9190612fde565b9050806122ae57600c546001600160a01b0388166000908152601260205260409020555b50612355565b6122e086604051806040016040528060078152602001663ab735b737bbb760c91b8152508386036119da565b856001600160a01b031663e22fdece6040518163ffffffff1660e01b8152600401602060405180830381600087803b15801561231b57600080fd5b505af115801561232f573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906123539190612fde565b505b505b612399565b507fe7aa66356adbd5e839ef210626f6d8f6f72109c17fadf4c4f9ca82b315ae79b4855a84036040516123909291906131bd565b60405180910390a15b5050505b5050600101611f8e565b506123b0612b1a565b925082156123fa57600e8390556040517f34f843cef0df42035141347873da1758a6643358831b5ba5b1580be947644f92906123ed908590613397565b60405180910390a1612400565b6000600e555b47600d55600061240e612b33565b905047811461244957612449306040518060400160405280600e81526020016d6f7574206f662062616c616e636560901b81525060006119da565b50505090565b6000808211612498576040805162461bcd60e51b815260206004820152601060248201526f4469766973696f6e206279207a65726f60801b604482015290519081900360640190fd5b836124a557506000612556565b838302838582816124b257fe5b0414156124cb578281816124c257fe5b04915050612556565b60008386816124d657fe5b04905060008487816124e457fe5b06905060008587816124f257fe5b049050600086888161250057fe5b06905061254e61251a886125148685612b52565b90612bab565b6125486125278686612b52565b6125486125348987612b52565b6125488d6125428c8b612b52565b90612b52565b906128f6565b955050505050505b9392505050565b7f714f205b2abd25bef1d06a1af944e38c113fe6160375c4e1d6d5cf28848e771955565b600080826040516020018080602001828103825283818151815260200191508051906020019080838360005b838110156125c55781810151838201526020016125ad565b50505050905090810190601f1680156125f25780820380516001836020036101000a031916815260200191505b50925050506040516020818303038152906040528051906020012090506000805b865181101561265a5786818151811061262857fe5b60200260200101518314156126525785818151811061264357fe5b6020026020010151915061265a565b600101612613565b506001600160a01b0381166126a5576040805162461bcd60e51b815260206004820152600c60248201526b61646472657373207a65726f60a01b604482015290519081900360640190fd5b95945050505050565b60006126dc83836040518060400160405280600981526020016824b7333630ba34b7b760b91b815250612581565b6002546040519192507f4bdd1012a7d55ed9afad8675a125e1b68c7c15f712c0f3d5cddac69c3b9798059161271e9184916001600160a01b0390911690613348565b60405180910390a1600280546001600160a01b0319166001600160a01b038316179055600854612758576a31a17e847807b1bc0000006008555b60105480612767575050610d4c565b6000816001600160401b038111801561277f57600080fd5b506040519080825280602002602001820160405280156127b957816020015b6127a6612d9b565b81526020019060019003908161279e5790505b50905060005b828110156128c5576000601082815481106127d657fe5b600091825260208220015460408051637afadd3960e11b815290516001600160a01b039092169350612869928a928a92869263f5f5ba7292600480840193829003018186803b15801561282857600080fd5b505afa15801561283c573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f191682016040526128649190810190613026565b612581565b83838151811061287557fe5b6020908102919091018101516001600160a01b03928316905290821660009081526011909152604090205483518490849081106128ae57fe5b6020908102919091018101510152506001016127bf565b506128cf8161172d565b5050505050565b600073fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd76104ea81612c12565b600082820183811015612950576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b90505b92915050565b60105460005b81811015610d4c576010805460009190600019810190811061297d57fe5b600091825260209091200154601080546001600160a01b03909216925090806129a257fe5b600082815260208120820160001990810180546001600160a01b03191690559091019091556040517f86d03f430c7616021073d7a71766f632f1ce19f289aa989534d9f4732253eb59916129f89184919061332d565b60405180910390a15060010161295f565b6060600083905082815111612a215783915050612953565b6000836001600160401b0381118015612a3957600080fd5b506040519080825280601f01601f191660200182016040528015612a64576020820181803683370190505b50905060005b84811015612ab457828181518110612a7e57fe5b602001015160f81c60f81b828281518110612a9557fe5b60200101906001600160f81b031916908160001a905350600101612a6a565b50949350505050565b600082821115612b14576040805162461bcd60e51b815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b60006104ea600554600454612abd90919063ffffffff16565b60006104ea600754612548600654600554612abd90919063ffffffff16565b600082612b6157506000612953565b82820282848281612b6e57fe5b04146129505760405162461bcd60e51b81526004018080602001828103825260218152602001806134e56021913960400191505060405180910390fd5b6000808211612c01576040805162461bcd60e51b815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b818381612c0a57fe5b049392505050565b600054600160a01b900460ff1615612c68576040805162461bcd60e51b8152602060048201526014602482015273696e697469616c6973656420213d2066616c736560601b604482015290519081900360640190fd5b60008054600160a01b60ff60a01b19909116176001600160a01b0319166001600160a01b03831690811790915560408051918252517f9789733827840833afc031fb2ef9ab6894271f77bad2085687cf4ae5c7bee4db916020908290030190a150565b50805460018160011615610100020316600290046000825580601f10612cf15750610432565b601f0160209004906000526020600020908101906104329190612db2565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282612d455760008555612d8b565b82601f10612d5e57805160ff1916838001178555612d8b565b82800160010185558215612d8b579182015b82811115612d8b578251825591602001919060010190612d70565b50612d97929150612db2565b5090565b604080518082019091526000808252602082015290565b5b80821115612d975760008155600101612db3565b600082601f830112612dd7578081fd5b81356020612dec612de7836133dc565b6133b9565b8281528181019085830183850287018401881015612e08578586fd5b855b85811015612e2f578135612e1d816134cf565b84529284019290840190600101612e0a565b5090979650505050505050565b600060208284031215612e4d578081fd5b8135612950816134cf565b60008060408385031215612e6a578081fd5b82356001600160401b0380821115612e80578283fd5b818501915085601f830112612e93578283fd5b81356020612ea3612de7836133dc565b82815281810190858301838502870184018b1015612ebf578788fd5b8796505b84871015612ee1578035835260019690960195918301918301612ec3565b5096505086013592505080821115612ef7578283fd5b50612f0485828601612dc7565b9150509250929050565b60006020808385031215612f20578182fd5b82356001600160401b0380821115612f36578384fd5b818501915085601f830112612f49578384fd5b8135612f57612de7826133dc565b818152848101908486016040808502870188018b1015612f75578889fd5b8896505b84871015612fcf5780828c031215612f8f578889fd5b80518181018181108882111715612fa257fe5b82528235612faf816134cf565b815282890135898201528452600196909601959287019290810190612f79565b50909998505050505050505050565b600060208284031215612fef578081fd5b81518015158114612950578182fd5b60006020828403121561300f578081fd5b81356001600160e01b031981168114612950578182fd5b600060208284031215613037578081fd5b81516001600160401b038082111561304d578283fd5b818401915084601f830112613060578283fd5b81518181111561306c57fe5b61307f601f8201601f19166020016133b9565b9150808252856020828501011115613095578384fd5b612ab48160208401602086016133f9565b6000602082840312156130b7578081fd5b5035919050565b600080604083850312156130d0578182fd5b50508035926020909101359150565b6000815180845260208085019450808401835b838110156131175781516001600160a01b0316875295820195908201906001016130f2565b509495945050505050565b6000815180845260208085019450808401835b8381101561311757815187529582019590820190600101613135565b600081518084526131698160208601602086016133f9565b601f01601f19169290920160200192915050565b6001600160a01b0391909116815260200190565b6001600160a01b03831681526040602082018190526000906131b590830184613151565b949350505050565b6001600160a01b03929092168252602082015260400190565b600060018060a01b0386168252846020830152608060408301526131fd6080830185613151565b905082606083015295945050505050565b606080825284519082018190526000906020906080840190828801845b828110156132505781516001600160a01b03168452928401929084019060010161322b565b505050838103828501526132648187613122565b91505082810360408401526132798185613122565b9695505050505050565b600060a0825261329660a0830188613122565b6020838203818501526132a98289613122565b848103604086015287518082529092508183019082810284018301838a01865b838110156132f757601f198784030185526132e5838351613151565b948601949250908501906001016132c9565b5050868103606088015261330b818a6130df565b955050505050508260808301529695505050505050565b901515815260200190565b6001600160a01b039290921682521515602082015260400190565b6001600160a01b0392831681529116602082015260400190565b6000602082526125566020830184613151565b6001600160c01b039290921682526001600160401b0316602082015260400190565b90815260200190565b6000838252604060208301526131b56040830184613151565b6040518181016001600160401b03811182821017156133d457fe5b604052919050565b60006001600160401b038211156133ef57fe5b5060209081020190565b60005b838110156134145781810151838201526020016133fc565b8381111561083f5750506000910152565b60e01c90565b600060443d101561343b576104ed565b600481823e6308c379a061344f8251613425565b14613459576104ed565b6040513d600319016004823e80513d6001600160401b03816024840111818411171561348857505050506104ed565b828401925082519150808211156134a257505050506104ed565b503d830160208284010111156134ba575050506104ed565b601f01601f1916810160200160405291505090565b6001600160a01b038116811461043257600080fdfe536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77a264697066735822122030eddfa4dd80d24bdbf9a6285452baf4ff9e4206a23d9b24930e0c5c4994211864736f6c63430007060033\"},\"0x1000000000000000000000000000000000000003\":{\"balance\":\"0x0\",\"code\":\"0x608060405234801561001057600080fd5b506004361061018e5760003560e01c80639d986f91116100de578063c9f960eb11610097578063e17f212e11610071578063e17f212e1461064c578063e2db5a5214610668578063f5a9838314610794578063ffacb84e1461079c5761018e565b8063c9f960eb1461061f578063cd4b691414610627578063d89601fd146106445761018e565b80639d986f91146103f95780639ec2b58114610425578063aea36b53146104c6578063b00c0b76146104ec578063b39c68581461060f578063c0156bcc146106175761018e565b806371e1fad91161014b5780637ac420ad116101255780637ac420ad146103825780638c9d28b6146103a85780638fc6f667146103b05780639d6a890f146103d35761018e565b806371e1fad91461023157806374e6310e1461023957806376794efb146102df5761018e565b80633b56f098146101935780635267a15d146101ad5780635aa6e675146101d15780635ff27079146101d957806362354e031461020257806367fc40291461020a575b600080fd5b61019b6107f4565b60408051918252519081900360200190f35b6101b56107fc565b604080516001600160a01b039092168252519081900360200190f35b6101b5610821565b610200600480360360208110156101ef57600080fd5b50356001600160e01b0319166108b7565b005b6101b5610c0c565b6102006004803603602081101561022057600080fd5b50356001600160e01b031916610c17565b6101b5610cff565b6102606004803603602081101561024f57600080fd5b50356001600160e01b031916610d0e565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156102a357818101518382015260200161028b565b50505050905090810190601f1680156102d05780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b610200600480360360408110156102f557600080fd5b810190602081018135600160201b81111561030f57600080fd5b82018360208201111561032157600080fd5b803590602001918460208302840111600160201b8311171561034257600080fd5b9190808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152509295505091359250610db4915050565b61019b6004803603602081101561039857600080fd5b50356001600160a01b0316610ecd565b6101b5610ee8565b610200600480360360408110156103c657600080fd5b5080359060200135610ef7565b610200600480360360208110156103e957600080fd5b50356001600160a01b0316611180565b6102006004803603604081101561040f57600080fd5b506001600160a01b038135169060200135611185565b6102006004803603602081101561043b57600080fd5b810190602081018135600160201b81111561045557600080fd5b82018360208201111561046757600080fd5b803590602001918460208302840111600160201b8311171561048857600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550611235945050505050565b610200600480360360208110156104dc57600080fd5b50356001600160a01b0316611384565b6102006004803603604081101561050257600080fd5b810190602081018135600160201b81111561051c57600080fd5b82018360208201111561052e57600080fd5b803590602001918460208302840111600160201b8311171561054f57600080fd5b9190808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152509295949360208101935035915050600160201b81111561059e57600080fd5b8201836020820111156105b057600080fd5b803590602001918460208302840111600160201b831117156105d157600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550611451945050505050565b6101b56114fc565b61019b61150b565b6101b5611510565b61019b6004803603602081101561063d57600080fd5b5035611530565b61019b61154a565b6106546115f4565b604080519115158252519081900360200190f35b6102006004803603608081101561067e57600080fd5b81359190810190604081016020820135600160201b81111561069f57600080fd5b8201836020820111156106b157600080fd5b803590602001918460208302840111600160201b831117156106d257600080fd5b9190808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152509295949360208101935035915050600160201b81111561072157600080fd5b82018360208201111561073357600080fd5b803590602001918460208302840111600160201b8311171561075457600080fd5b9190808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152509295505091359250611604915050565b610200611e1f565b6107a4611ed9565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156107e05781810151838201526020016107c8565b505050509050019250505060405180910390f35b600160801b81565b7f714f205b2abd25bef1d06a1af944e38c113fe6160375c4e1d6d5cf28848e77195490565b60008054600160a81b900460ff16610844576000546001600160a01b03166108b1565b60076001609c1b016001600160a01b031663732524946040518163ffffffff1660e01b815260040160206040518083038186803b15801561088457600080fd5b505afa158015610898573d6000803e3d6000fd5b505050506040513d60208110156108ae57600080fd5b50515b90505b90565b60408051630debfda360e41b8152336004820152905160076001609c1b019163debfda30916024808301926020929190829003018186803b1580156108fb57600080fd5b505afa15801561090f573d6000803e3d6000fd5b505050506040513d602081101561092557600080fd5b5051610968576040805162461bcd60e51b815260206004820152600d60248201526c37b7363c9032bc32b1baba37b960991b604482015290519081900360640190fd5b6001600160e01b03198116600090815260016020526040902080546109d4576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b8054421015610a2a576040805162461bcd60e51b815260206004820152601960248201527f74696d656c6f636b3a206e6f7420616c6c6f7765642079657400000000000000604482015290519081900360640190fd5b6000816001018054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610ac45780601f10610a9957610100808354040283529160200191610ac4565b820191906000526020600020905b815481529060010190602001808311610aa757829003601f168201915b5050506001600160e01b0319861660009081526001602081905260408220828155949550909250610af8915083018261247b565b50506000805460ff60b01b1916600160b01b178155604051825130918491819060208401908083835b60208310610b405780518252601f199092019160209182019101610b21565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d8060008114610ba2576040519150601f19603f3d011682016040523d82523d6000602084013e610ba7565b606091505b50506000805460ff60b01b19169055604080516001600160e01b03198716815242602082015281519293507fa7326b57fc9cfe267aaea5e7f0b01757154d265620a0585819416ee9ddd2c438929081900390910190a1610c0681611f3b565b50505050565b60076001609c1b0181565b610c1f611f58565b6001600160e01b03198116600090815260016020526040902054610c8a576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b604080516001600160e01b03198316815242602082015281517f7735b2391c38a81419c513e30ca578db7158eadd7101511b23e221c654d19cf8929181900390910190a16001600160e01b03198116600090815260016020819052604082208281559190610cfa9083018261247b565b505050565b6004546001600160a01b031690565b600160208181526000928352604092839020805481840180548651600296821615610100026000190190911695909504601f81018590048502860185019096528585529094919392909190830182828015610daa5780601f10610d7f57610100808354040283529160200191610daa565b820191906000526020600020905b815481529060010190602001808311610d8d57829003601f168201915b5050505050905082565b600454604080518082019091526016815275566f7465722077686974656c6973746572206f6e6c7960501b6020820152906001600160a01b03163314610e785760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015610e3d578181015183820152602001610e25565b50505050905090810190601f168015610e6a5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060005b8251811015610cfa57816001901b1960056000858481518110610e9b57fe5b6020908102919091018101516001600160a01b0316825281019190915260400160002080549091169055600101610e7c565b6001600160a01b031660009081526005602052604090205490565b6002546001600160a01b031690565b600360009054906101000a90046001600160a01b03166001600160a01b03166308a7f4026040518163ffffffff1660e01b815260040160206040518083038186803b158015610f4557600080fd5b505afa158015610f59573d6000803e3d6000fd5b505050506040513d6020811015610f6f57600080fd5b505160408051808201909152600e81526d15dc9bdb99c8195c1bd8da081a5960921b6020820152908314610fe45760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b506000828152600860209081526040808320338452825291829020548251808401909352601983527f4475706c6963617465207375626d697420696e2065706f63680000000000000091830191909152156110805760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b50336000908152600560205260409020541515806110ad57503360009081526007602052604090205460ff165b6040518060400160405280600f81526020016e139bdd081dda1a5d195b1a5cdd1959608a1b815250906111215760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b50600082815260086020908152604080832033808552908352928190208490558051848152429281019290925280518593927f5e2f64e70eafef31c2f48c8ef140b36406531c36ab0faaede30843202c16f6a892908290030190a35050565bfe5b50565b600454604080518082019091526016815275566f7465722077686974656c6973746572206f6e6c7960501b6020820152906001600160a01b0316331461120c5760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b506001600160a01b0390911660009081526005602052604090208054600190921b919091179055565b6003546040805180820190915260118152704654534f206d616e61676572206f6e6c7960781b6020820152906001600160a01b031633146112b75760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b5060065460005b8181101561131557600060076000600684815481106112d957fe5b6000918252602080832091909101546001600160a01b031683528201929092526040019020805460ff19169115159190911790556001016112be565b5050805160005b818110156113705760016007600085848151811061133657fe5b6020908102919091018101516001600160a01b03168252810191909152604001600020805460ff191691151591909117905560010161131c565b508151610cfa9060069060208501906124bf565b600054600160b01b900460ff16806113a65750600054600160a81b900460ff16155b15611446576113b3611fb9565b60006113bd6107fc565b6001600160a01b0316146040518060400160405280600b81526020016a105b1c9958591e481cd95d60aa1b815250906114375760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b5061144181611fee565b611182565b611182600036612012565b6114596107fc565b6001600160a01b0316336001600160a01b0316146114b5576040805162461bcd60e51b815260206004820152601460248201527337b7363c9030b2323932b9b9903ab83230ba32b960611b604482015290519081900360640190fd5b6114ee6114e983836040518060400160405280600e81526020016d20b2323932b9b9aab83230ba32b960911b815250612195565b611fee565b6114f882826122c2565b5050565b6003546001600160a01b031690565b603281565b600073fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd76108b1816123c2565b60006009603283066032811061154257fe5b015492915050565b600080600360009054906101000a90046001600160a01b03166001600160a01b03166308a7f4026040518163ffffffff1660e01b815260040160206040518083038186803b15801561159b57600080fd5b505afa1580156115af573d6000803e3d6000fd5b505050506040513d60208110156115c557600080fd5b50519050806115d85760009150506108b4565b60096032600019830106603281106115ec57fe5b015491505090565b600054600160a81b900460ff1681565b8251825160408051808201909152601a81527f4172726179206c656e6774687320646f206e6f74206d61746368000000000000602082015290821461168a5760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b5060408051808201909152601781527f546f6f20736d616c6c2072616e646f6d206e756d6265720000000000000000006020820152600160801b8310156117125760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b5083838333604051602001808060200180602001858152602001846001600160a01b03168152602001838103835287818151815260200191508051906020019060200280838360005b8381101561177357818101518382015260200161175b565b50505050905001838103825286818151815260200191508051906020019060200280838360005b838110156117b257818101518382015260200161179a565b50505050905001965050505050505060405160208183030381529060405280519060200120600860008781526020019081526020016000206000336001600160a01b03166001600160a01b0316815260200190815260200160002054146040518060600160405280602381526020016125b660239139906118745760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610e3d578181015183820152602001610e25565b506002546040516313968ea760e31b81526020600482018181528751602484015287516000946001600160a01b031693639cb47538938a939283926044019180860191028083838b5b838110156118d55781810151838201526020016118bd565b505050509050019250505060006040518083038186803b1580156118f857600080fd5b505afa15801561190c573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f19168201604052602081101561193557600080fd5b8101908080516040519392919084600160201b82111561195457600080fd5b90830190602082018581111561196957600080fd5b82518660208202830111600160201b8211171561198557600080fd5b82525081516020918201928201910280838360005b838110156119b257818101518382015260200161199a565b5050505091909101604090815233600090815260056020529081205495965093508392505050846119e4576000611a7c565b836000815181106119f157fe5b60200260200101516001600160a01b031663f72cab28338b6040518363ffffffff1660e01b815260040180836001600160a01b0316815260200182815260200192505050602060405180830381600087803b158015611a4f57600080fd5b505af1158015611a63573d6000803e3d6000fd5b505050506040513d6020811015611a7957600080fd5b50515b90506000805b86811015611c8f578015801590611aac5750898181518110611aa057fe5b60200260200101518210155b15611b2957604080518082018252601b81527f4654534f20696e6469636573206e6f7420696e6372656173696e6700000000006020808301918252925162461bcd60e51b81526004810193845282516024820152825192939283926044909201919080838360008315610e3d578181015183820152602001610e25565b898181518110611b3557fe5b60200260200101519150816001901b851660001415611bde5783611bde573360009081526007602052604090205460ff1615611b745760019350611bde565b604080518082018252600f81526e139bdd081dda1a5d195b1a5cdd1959608a1b6020808301918252925162461bcd60e51b81526004810193845282516024820152825192939283926044909201919080838360008315610e3d578181015183820152602001610e25565b858181518110611bea57fe5b60200260200101516001600160a01b031663c1f6c36e338d8c8581518110611c0e57fe5b6020026020010151876040518563ffffffff1660e01b815260040180856001600160a01b03168152602001848152602001838152602001828152602001945050505050600060405180830381600087803b158015611c6b57600080fd5b505af1158015611c7f573d6000803e3d6000fd5b505060019092019150611a829050565b5060008a8152600860209081526040808320338452825280832083905580518083018b81528183019283528c5160608301528c518c958e95929493608001928681019202908190849084905b83811015611cf3578181015183820152602001611cdb565b5050505090500193505050506040516020818303038152906040528051906020012060001c600960328c81611d2457fe5b0660328110611d2f57fe5b016000828254019250508190555089336001600160a01b03167fafffa539ac1cad89751c875d871abadc6deb7bd51bf6baea004fc71ca0a48fa5878b8b42604051808060200180602001858152602001848152602001838103835287818151815260200191508051906020019060200280838360005b83811015611dbd578181015183820152602001611da5565b50505050905001838103825286818151815260200191508051906020019060200280838360005b83811015611dfc578181015183820152602001611de4565b50505050905001965050505050505060405180910390a350505050505050505050565b611e27611f58565b600054600160a81b900460ff1615611e86576040805162461bcd60e51b815260206004820152601a60248201527f616c726561647920696e2070726f64756374696f6e206d6f6465000000000000604482015290519081900360640190fd5b60008054600161ff0160a01b031916600160a81b1790556040805160076001609c1b01815290517f83af113638b5422f9e977cebc0aaf0eaf2188eb9a8baae7f9d46c42b33a1560c9181900360200190a1565b60606006805480602002602001604051908101604052809291908181526020018280548015611f3157602002820191906000526020600020905b81546001600160a01b03168152600190910190602001808311611f13575b5050505050905090565b3d604051818101604052816000823e8215611f54578181f35b8181fd5b611f60610821565b6001600160a01b0316336001600160a01b031614611fb7576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b565b600054600160b01b900460ff1615611fe657333014611fd457fe5b6000805460ff60b01b19169055611fb7565b611fb7611f58565b7f714f205b2abd25bef1d06a1af944e38c113fe6160375c4e1d6d5cf28848e771955565b61201a611f58565b600082359050600060076001609c1b016001600160a01b0316636221a54b6040518163ffffffff1660e01b815260040160206040518083038186803b15801561206257600080fd5b505afa158015612076573d6000803e3d6000fd5b505050506040513d602081101561208c57600080fd5b505160408051808201825242830180825282516020601f89018190048102820181019094528781529394509290918281019190889088908190840183828082843760009201829052509390945250506001600160e01b0319861681526001602081815260409092208451815584830151805191945061211093928501920190612524565b509050507fed948300a3694aa01d4a6b258bfd664350193d770c0b51f8387277f6d83ea3b68382878760405180856001600160e01b0319168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a15050505050565b600080826040516020018080602001828103825283818151815260200191508051906020019080838360005b838110156121d95781810151838201526020016121c1565b50505050905090810190601f1680156122065780820380516001836020036101000a031916815260200191505b50925050506040516020818303038152906040528051906020012090506000805b865181101561226e5786818151811061223c57fe5b60200260200101518314156122665785818151811061225757fe5b6020026020010151915061226e565b600101612227565b506001600160a01b0381166122b9576040805162461bcd60e51b815260206004820152600c60248201526b61646472657373207a65726f60a01b604482015290519081900360640190fd5b95945050505050565b6122f182826040518060400160405280600c81526020016b4674736f526567697374727960a01b815250612195565b600260006101000a8154816001600160a01b0302191690836001600160a01b0316021790555061234582826040518060400160405280600b81526020016a233a39b7a6b0b730b3b2b960a91b815250612195565b600360006101000a8154816001600160a01b0302191690836001600160a01b0316021790555061239e82826040518060400160405280601081526020016f2b37ba32b92bb434ba32b634b9ba32b960811b815250612195565b600480546001600160a01b0319166001600160a01b03929092169190911790555050565b600054600160a01b900460ff1615612418576040805162461bcd60e51b8152602060048201526014602482015273696e697469616c6973656420213d2066616c736560601b604482015290519081900360640190fd5b60008054600160a01b60ff60a01b19909116176001600160a01b0319166001600160a01b03831690811790915560408051918252517f9789733827840833afc031fb2ef9ab6894271f77bad2085687cf4ae5c7bee4db916020908290030190a150565b50805460018160011615610100020316600290046000825580601f106124a15750611182565b601f01602090049060005260206000209081019061118291906125a0565b828054828255906000526020600020908101928215612514579160200282015b8281111561251457825182546001600160a01b0319166001600160a01b039091161782556020909201916001909101906124df565b506125209291506125a0565b5090565b828054600181600116156101000203166002900490600052602060002090601f01602090048101928261255a5760008555612514565b82601f1061257357805160ff1916838001178555612514565b82800160010185558215612514579182015b82811115612514578251825591602001919060010190612585565b5b8082111561252057600081556001016125a156fe507269636520616c72656164792072657665616c6564206f72206e6f742076616c6964a2646970667358221220104881e87eece1bfc725f4ae6495e2bc314c075c27d3a82f608948e8e4d8953f64736f6c63430007060033\"},\"0x1000000000000000000000000000000000000004\":{\"balance\":\"0x476a3d4ef278dc3746d6bac0\",\"code\":\"0x608060405234801561001057600080fd5b506004361061010b5760003560e01c806375a0fef9116100a2578063d8952a4911610071578063d8952a491461029a578063e17f212e146102c8578063e1a1a5dc146102e4578063ec8d87771461030a578063f5a98383146103275761010b565b806375a0fef91461025c57806395645e34146102645780639d6a890f1461026c578063c9f960eb146102925761010b565b8063616d7c9f116100de578063616d7c9f1461017f57806362354e031461018757806367fc40291461018f57806374e6310e146101b65761010b565b8063100223bb1461011057806329d71f6d1461012a5780635aa6e675146101325780635ff2707914610156575b600080fd5b61011861032f565b60408051918252519081900360200190f35b610118610335565b61013a610345565b604080516001600160a01b039092168252519081900360200190f35b61017d6004803603602081101561016c57600080fd5b50356001600160e01b0319166103da565b005b61013a61072f565b61013a61073e565b61017d600480360360208110156101a557600080fd5b50356001600160e01b031916610749565b6101dd600480360360208110156101cc57600080fd5b50356001600160e01b031916610831565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610220578181015183820152602001610208565b50505050905090810190601f16801561024d5780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b61013a6108d7565b61013a6108e6565b61017d6004803603602081101561028257600080fd5b50356001600160a01b03166108f5565b61013a6108fa565b61017d600480360360408110156102b057600080fd5b506001600160a01b038135811691602001351661091a565b6102d0610aef565b604080519115158252519081900360200190f35b61017d600480360360208110156102fa57600080fd5b50356001600160a01b0316610aff565b61017d6004803603602081101561032057600080fd5b5035610c8b565b61017d610e29565b60055481565b6b0257b4b8c0aa5cf8f500000081565b60008054600160a81b900460ff16610368576000546001600160a01b03166103d5565b60076001609c1b016001600160a01b031663732524946040518163ffffffff1660e01b815260040160206040518083038186803b1580156103a857600080fd5b505afa1580156103bc573d6000803e3d6000fd5b505050506040513d60208110156103d257600080fd5b50515b905090565b60408051630debfda360e41b8152336004820152905160076001609c1b019163debfda30916024808301926020929190829003018186803b15801561041e57600080fd5b505afa158015610432573d6000803e3d6000fd5b505050506040513d602081101561044857600080fd5b505161048b576040805162461bcd60e51b815260206004820152600d60248201526c37b7363c9032bc32b1baba37b960991b604482015290519081900360640190fd5b6001600160e01b03198116600090815260016020526040902080546104f7576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b805442101561054d576040805162461bcd60e51b815260206004820152601960248201527f74696d656c6f636b3a206e6f7420616c6c6f7765642079657400000000000000604482015290519081900360640190fd5b6000816001018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105e75780601f106105bc576101008083540402835291602001916105e7565b820191906000526020600020905b8154815290600101906020018083116105ca57829003601f168201915b5050506001600160e01b031986166000908152600160208190526040822082815594955090925061061b915083018261129d565b50506000805460ff60b01b1916600160b01b178155604051825130918491819060208401908083835b602083106106635780518252601f199092019160209182019101610644565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d80600081146106c5576040519150601f19603f3d011682016040523d82523d6000602084013e6106ca565b606091505b50506000805460ff60b01b19169055604080516001600160e01b03198716815242602082015281519293507fa7326b57fc9cfe267aaea5e7f0b01757154d265620a0585819416ee9ddd2c438929081900390910190a161072981610ee3565b50505050565b6004546001600160a01b031681565b60076001609c1b0181565b610751610f00565b6001600160e01b031981166000908152600160205260409020546107bc576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b604080516001600160e01b03198316815242602082015281517f7735b2391c38a81419c513e30ca578db7158eadd7101511b23e221c654d19cf8929181900390910190a16001600160e01b0319811660009081526001602081905260408220828155919061082c9083018261129d565b505050565b600160208181526000928352604092839020805481840180548651600296821615610100026000190190911695909504601f810185900485028601850190965285855290949193929091908301828280156108cd5780601f106108a2576101008083540402835291602001916108cd565b820191906000526020600020905b8154815290600101906020018083116108b057829003601f168201915b5050505050905082565b6002546001600160a01b031681565b6003546001600160a01b031681565bfe5b50565b600073fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd76103d581610f61565b600054600160b01b900460ff168061093c5750600054600160a81b900460ff16155b15610ae05761094961101a565b6003546001600160a01b031615801561096b57506004546001600160a01b0316155b6040518060400160405280600b81526020016a185b1c9958591e481cd95d60aa1b81525090610a185760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156109dd5781810151838201526020016109c5565b50505050905090810190601f168015610a0a5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b506001600160a01b03821615801590610a3957506001600160a01b03811615155b6040518060400160405280600c81526020016b61646472657373207a65726f60a01b81525090610aaa5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b50600380546001600160a01b038085166001600160a01b0319928316179092556004805492841692909116919091179055610aeb565b610aeb60003661104f565b5050565b600054600160a81b900460ff1681565b600054600160b01b900460ff1680610b215750600054600160a81b900460ff16155b15610c8057610b2e61101a565b60025460408051808201909152600b81526a185b1c9958591e481cd95d60aa1b6020820152906001600160a01b031615610ba95760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b506003546001600160a01b0382811691161480610bd357506004546001600160a01b038281169116145b6040518060400160405280600d81526020016c77726f6e67206164647265737360981b81525090610c455760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b50600280546001600160a01b0319166001600160a01b038381169182179092556003549091161415610c7b57610c7b81476111d2565b6108f7565b6108f760003661104f565b6002546001600160a01b031633148015610caf57506004546001600160a01b031633145b60405180604001604052806011815260200170646973747269627574696f6e206f6e6c7960781b81525090610d255760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b504262263b80600554011115604051806040016040528060098152602001683a37b79037b33a32b760b91b81525090610d9f5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b506040805180820190915260088152670e8dede40daeac6d60c31b60208201526b0257b4b8c0aa5cf8f5000000821115610e1a5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b50426005556108f733826111d2565b610e31610f00565b600054600160a81b900460ff1615610e90576040805162461bcd60e51b815260206004820152601a60248201527f616c726561647920696e2070726f64756374696f6e206d6f6465000000000000604482015290519081900360640190fd5b60008054600161ff0160a01b031916600160a81b1790556040805160076001609c1b01815290517f83af113638b5422f9e977cebc0aaf0eaf2188eb9a8baae7f9d46c42b33a1560c9181900360200190a1565b3d604051818101604052816000823e8215610efc578181f35b8181fd5b610f08610345565b6001600160a01b0316336001600160a01b031614610f5f576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b565b600054600160a01b900460ff1615610fb7576040805162461bcd60e51b8152602060048201526014602482015273696e697469616c6973656420213d2066616c736560601b604482015290519081900360640190fd5b60008054600160a01b60ff60a01b19909116176001600160a01b0319166001600160a01b03831690811790915560408051918252517f9789733827840833afc031fb2ef9ab6894271f77bad2085687cf4ae5c7bee4db916020908290030190a150565b600054600160b01b900460ff16156110475733301461103557fe5b6000805460ff60b01b19169055610f5f565b610f5f610f00565b611057610f00565b600082359050600060076001609c1b016001600160a01b0316636221a54b6040518163ffffffff1660e01b815260040160206040518083038186803b15801561109f57600080fd5b505afa1580156110b3573d6000803e3d6000fd5b505050506040513d60208110156110c957600080fd5b505160408051808201825242830180825282516020601f89018190048102820181019094528781529394509290918281019190889088908190840183828082843760009201829052509390945250506001600160e01b0319861681526001602081815260409092208451815584830151805191945061114d939285019201906112e1565b509050507fed948300a3694aa01d4a6b258bfd664350193d770c0b51f8387277f6d83ea3b68382878760405180856001600160e01b0319168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a15050505050565b6040516000906001600160a01b0384169083908381818185875af1925050503d806000811461121d576040519150601f19603f3d011682016040523d82523d6000602084013e611222565b606091505b5050905080604051806040016040528060118152602001701cd95b9908199d5b991cc819985a5b1959607a1b815250906107295760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109dd5781810151838201526020016109c5565b50805460018160011615610100020316600290046000825580601f106112c357506108f7565b601f0160209004906000526020600020908101906108f7919061136d565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282611317576000855561135d565b82601f1061133057805160ff191683800117855561135d565b8280016001018555821561135d579182015b8281111561135d578251825591602001919060010190611342565b5061136992915061136d565b5090565b5b80821115611369576000815560010161136e56fea2646970667358221220d6eda04eadcca443b6024fa323652bc9e6c4c31659587a37fbd1b4fb2008a0c264736f6c63430007060033\"},\"0x1000000000000000000000000000000000000005\":{\"balance\":\"0x409f9cbc7c4a04c220000000\",\"code\":\"0x608060405234801561001057600080fd5b50600436106100ea5760003560e01c806397249db71161008c578063d4456c9711610066578063d4456c9714610269578063e17f212e1461028f578063ec8d8777146102ab578063f5a98383146102c8576100ea565b806397249db7146102335780639d6a890f1461023b578063c9f960eb14610261576100ea565b80635ff27079116100c85780635ff270791461013557806362354e031461015e57806367fc40291461016657806374e6310e1461018d576100ea565b8063100223bb146100ef5780634841422e146101095780635aa6e6751461012d575b600080fd5b6100f76102d0565b60408051918252519081900360200190f35b6101116102d6565b604080516001600160a01b039092168252519081900360200190f35b6101116102e5565b61015c6004803603602081101561014b57600080fd5b50356001600160e01b03191661037a565b005b6101116106cf565b61015c6004803603602081101561017c57600080fd5b50356001600160e01b0319166106da565b6101b4600480360360208110156101a357600080fd5b50356001600160e01b0319166107c2565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156101f75781810151838201526020016101df565b50505050905090810190601f1680156102245780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b6100f7610868565b61015c6004803603602081101561025157600080fd5b50356001600160a01b0316610877565b61011161087c565b61015c6004803603602081101561027f57600080fd5b50356001600160a01b031661089c565b6102976109af565b604080519115158252519081900360200190f35b61015c600480360360208110156102c157600080fd5b50356109bf565b61015c610bf8565b60035481565b6002546001600160a01b031681565b60008054600160a81b900460ff16610308576000546001600160a01b0316610375565b60076001609c1b016001600160a01b031663732524946040518163ffffffff1660e01b815260040160206040518083038186803b15801561034857600080fd5b505afa15801561035c573d6000803e3d6000fd5b505050506040513d602081101561037257600080fd5b50515b905090565b60408051630debfda360e41b8152336004820152905160076001609c1b019163debfda30916024808301926020929190829003018186803b1580156103be57600080fd5b505afa1580156103d2573d6000803e3d6000fd5b505050506040513d60208110156103e857600080fd5b505161042b576040805162461bcd60e51b815260206004820152600d60248201526c37b7363c9032bc32b1baba37b960991b604482015290519081900360640190fd5b6001600160e01b0319811660009081526001602052604090208054610497576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b80544210156104ed576040805162461bcd60e51b815260206004820152601960248201527f74696d656c6f636b3a206e6f7420616c6c6f7765642079657400000000000000604482015290519081900360640190fd5b6000816001018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156105875780601f1061055c57610100808354040283529160200191610587565b820191906000526020600020905b81548152906001019060200180831161056a57829003601f168201915b5050506001600160e01b03198616600090815260016020819052604082208281559495509092506105bb9150830182610fa1565b50506000805460ff60b01b1916600160b01b178155604051825130918491819060208401908083835b602083106106035780518252601f1990920191602091820191016105e4565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d8060008114610665576040519150601f19603f3d011682016040523d82523d6000602084013e61066a565b606091505b50506000805460ff60b01b19169055604080516001600160e01b03198716815242602082015281519293507fa7326b57fc9cfe267aaea5e7f0b01757154d265620a0585819416ee9ddd2c438929081900390910190a16106c981610cb2565b50505050565b60076001609c1b0181565b6106e2610ccf565b6001600160e01b0319811660009081526001602052604090205461074d576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b604080516001600160e01b03198316815242602082015281517f7735b2391c38a81419c513e30ca578db7158eadd7101511b23e221c654d19cf8929181900390910190a16001600160e01b031981166000908152600160208190526040822082815591906107bd90830182610fa1565b505050565b600160208181526000928352604092839020805481840180548651600296821615610100026000190190911695909504601f8101859004850286018501909652858552909491939290919083018282801561085e5780601f106108335761010080835404028352916020019161085e565b820191906000526020600020905b81548152906001019060200180831161084157829003601f168201915b5050505050905082565b6a14adf4b7320334b900000081565bfe5b50565b600073fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd761037581610d30565b600054600160b01b900460ff16806108be5750600054600160a81b900460ff16155b156109a4576108cb610de9565b60025460408051808201909152600b81526a185b1c9958591e481cd95d60aa1b6020820152906001600160a01b0316156109835760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b83811015610948578181015183820152602001610930565b50505050905090810190601f1680156109755780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600280546001600160a01b0319166001600160a01b038316179055610879565b610879600036610e1e565b600054600160a81b900460ff1681565b600254604080518082019091526013815272696e63656e7469766520706f6f6c206f6e6c7960681b6020820152906001600160a01b03163314610a435760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610948578181015183820152602001610930565b504262014370600354011115604051806040016040528060098152602001683a37b79037b33a32b760b91b81525090610abd5760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610948578181015183820152602001610930565b506040805180820190915260088152670e8dede40daeac6d60c31b60208201526a14adf4b7320334b9000000821115610b375760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610948578181015183820152602001610930565b5042600355604051600090339083908381818185875af1925050503d8060008114610b7e576040519150601f19603f3d011682016040523d82523d6000602084013e610b83565b606091505b50509050806040518060400160405280600b81526020016a1c1d5b1b0819985a5b195960aa1b815250906107bd5760405162461bcd60e51b8152602060048201818152835160248401528351909283926044909101919085019080838360008315610948578181015183820152602001610930565b610c00610ccf565b600054600160a81b900460ff1615610c5f576040805162461bcd60e51b815260206004820152601a60248201527f616c726561647920696e2070726f64756374696f6e206d6f6465000000000000604482015290519081900360640190fd5b60008054600161ff0160a01b031916600160a81b1790556040805160076001609c1b01815290517f83af113638b5422f9e977cebc0aaf0eaf2188eb9a8baae7f9d46c42b33a1560c9181900360200190a1565b3d604051818101604052816000823e8215610ccb578181f35b8181fd5b610cd76102e5565b6001600160a01b0316336001600160a01b031614610d2e576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b565b600054600160a01b900460ff1615610d86576040805162461bcd60e51b8152602060048201526014602482015273696e697469616c6973656420213d2066616c736560601b604482015290519081900360640190fd5b60008054600160a01b60ff60a01b19909116176001600160a01b0319166001600160a01b03831690811790915560408051918252517f9789733827840833afc031fb2ef9ab6894271f77bad2085687cf4ae5c7bee4db916020908290030190a150565b600054600160b01b900460ff1615610e1657333014610e0457fe5b6000805460ff60b01b19169055610d2e565b610d2e610ccf565b610e26610ccf565b600082359050600060076001609c1b016001600160a01b0316636221a54b6040518163ffffffff1660e01b815260040160206040518083038186803b158015610e6e57600080fd5b505afa158015610e82573d6000803e3d6000fd5b505050506040513d6020811015610e9857600080fd5b505160408051808201825242830180825282516020601f89018190048102820181019094528781529394509290918281019190889088908190840183828082843760009201829052509390945250506001600160e01b03198616815260016020818152604090922084518155848301518051919450610f1c93928501920190610fe5565b509050507fed948300a3694aa01d4a6b258bfd664350193d770c0b51f8387277f6d83ea3b68382878760405180856001600160e01b0319168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a15050505050565b50805460018160011615610100020316600290046000825580601f10610fc75750610879565b601f0160209004906000526020600020908101906108799190611071565b828054600181600116156101000203166002900490600052602060002090601f01602090048101928261101b5760008555611061565b82601f1061103457805160ff1916838001178555611061565b82800160010185558215611061579182015b82811115611061578251825591602001919060010190611046565b5061106d929150611071565b5090565b5b8082111561106d576000815560010161107256fea26469706673582212208f1eab37d024ae46e4f77c977acc16b01ba88ed6b653ca64761a6a129030a0be64736f6c63430007060033\"},\"0x1000000000000000000000000000000000000006\":{\"balance\":\"0x0c9a470dee8dcc8239ad6c40\",\"code\":\"0x608060405234801561001057600080fd5b50600436106101375760003560e01c806374e6310e116100b8578063c9f960eb1161007c578063c9f960eb146103d7578063ce592dfb146103df578063d5261971146103e7578063e17f212e146103ef578063ea1e63211461040b578063f5a983831461043157610137565b806374e6310e146102de5780638dc7b1a91461038457806390568f81146103a15780639d6a890f146103a9578063c602a77e146103cf57610137565b80635ff27079116100ff5780635ff27079146101a757806361235585146101d057806362354e031461029257806367fc40291461029a5780636de80541146102c157610137565b80633f630c3a1461013c57806346a549b5146101565780634a4b698a1461015e5780635aa6e675146101975780635dcb01701461019f575b600080fd5b610144610439565b60408051918252519081900360200190f35b61014461043f565b61017b6004803603602081101561017457600080fd5b5035610445565b604080516001600160a01b039092168252519081900360200190f35b61017b61046f565b610144610504565b6101ce600480360360208110156101bd57600080fd5b50356001600160e01b03191661050a565b005b6101ce600480360360408110156101e657600080fd5b81019060208101813564010000000081111561020157600080fd5b82018360208201111561021357600080fd5b8035906020019184602083028401116401000000008311171561023557600080fd5b91939092909160208101903564010000000081111561025357600080fd5b82018360208201111561026557600080fd5b8035906020019184602083028401116401000000008311171561028757600080fd5b50909250905061085f565b61017b610bd5565b6101ce600480360360208110156102b057600080fd5b50356001600160e01b031916610be0565b6101ce600480360360208110156102d757600080fd5b5035610cc8565b610305600480360360208110156102f457600080fd5b50356001600160e01b031916610ed5565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610348578181015183820152602001610330565b50505050905090810190601f1680156103755780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b6101ce6004803603602081101561039a57600080fd5b5035610f7b565b610144611027565b6101ce600480360360208110156103bf57600080fd5b50356001600160a01b031661102d565b61014461102f565b61017b611035565b610144611055565b6101ce61105b565b6103f7611303565b604080519115158252519081900360200190f35b6101446004803603602081101561042157600080fd5b50356001600160a01b0316611313565b6101ce611325565b60055481565b60075481565b6003818154811061045557600080fd5b6000918252602090912001546001600160a01b0316905081565b60008054600160a81b900460ff16610492576000546001600160a01b03166104ff565b60076001609c1b016001600160a01b031663732524946040518163ffffffff1660e01b815260040160206040518083038186803b1580156104d257600080fd5b505afa1580156104e6573d6000803e3d6000fd5b505050506040513d60208110156104fc57600080fd5b50515b905090565b60065481565b60408051630debfda360e41b8152336004820152905160076001609c1b019163debfda30916024808301926020929190829003018186803b15801561054e57600080fd5b505afa158015610562573d6000803e3d6000fd5b505050506040513d602081101561057857600080fd5b50516105bb576040805162461bcd60e51b815260206004820152600d60248201526c37b7363c9032bc32b1baba37b960991b604482015290519081900360640190fd5b6001600160e01b0319811660009081526001602052604090208054610627576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b805442101561067d576040805162461bcd60e51b815260206004820152601960248201527f74696d656c6f636b3a206e6f7420616c6c6f7765642079657400000000000000604482015290519081900360640190fd5b6000816001018054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156107175780601f106106ec57610100808354040283529160200191610717565b820191906000526020600020905b8154815290600101906020018083116106fa57829003601f168201915b5050506001600160e01b031986166000908152600160208190526040822082815594955090925061074b915083018261198b565b50506000805460ff60b01b1916600160b01b178155604051825130918491819060208401908083835b602083106107935780518252601f199092019160209182019101610774565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d80600081146107f5576040519150601f19603f3d011682016040523d82523d6000602084013e6107fa565b606091505b50506000805460ff60b01b19169055604080516001600160e01b03198716815242602082015281519293507fa7326b57fc9cfe267aaea5e7f0b01757154d265620a0585819416ee9ddd2c438929081900390910190a1610859816113df565b50505050565b600054600160b01b900460ff16806108815750600054600160a81b900460ff16155b15610bca5761088e6113fc565b604080518082019091526008815267746f6f206d616e7960c01b60208201526103e884111561093b5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156109005781810151838201526020016108e8565b50505050905090810190601f16801561092d5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060408051808201909152601781527f617272617973206c656e67746873206d69736d6174636800000000000000000060208201528382146109be5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b5060065460408051808201909152600f81526e185b1c9958591e481cdd185c9d1959608a1b60208201529015610a355760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b5060006004600086866000818110610a4957fe5b905060200201356001600160a01b03166001600160a01b03166001600160a01b03168152602001908152602001600020541115610a8557610bc5565b60005b61ffff8116841115610b6057600085858361ffff16818110610aa657fe5b905060200201356001600160a01b031690506000610ae96105dc61271087878761ffff16818110610ad357fe5b905060200201356114339092919063ffffffff16565b60038054600181019091557fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b0180546001600160a01b0319166001600160a01b0385169081179091556000908152600460205260409020819055600554909150610b539082611541565b6005555050600101610a88565b507fc21490756c6f0185a8ad2363084fd0a45b06707979f77786b5e681bddc1d2fa1848460405180806020018281038252848482818152602001925060200280828437600083820152604051601f909101601f19169092018290039550909350505050a15b610859565b6108596000366115a4565b60076001609c1b0181565b610be8611727565b6001600160e01b03198116600090815260016020526040902054610c53576040805162461bcd60e51b815260206004820152601a60248201527f74696d656c6f636b3a20696e76616c69642073656c6563746f72000000000000604482015290519081900360640190fd5b604080516001600160e01b03198316815242602082015281517f7735b2391c38a81419c513e30ca578db7158eadd7101511b23e221c654d19cf8929181900390910190a16001600160e01b03198116600090815260016020819052604082208281559190610cc39083018261198b565b505050565b600054600160b01b900460ff1680610cea5750600054600160a81b900460ff16155b15610ec757610cf76113fc565b6006541580610d07575042600654115b6040518060400160405280600f81526020016e185b1c9958591e481cdd185c9d1959608a1b81525090610d7b5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b5080600654108015610d8f57506007548111155b60405180604001604052806015815260200174077726f6e672073746172742074696d657374616d7605c1b81525090610e095760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b5060068190556040805182815290517fd17095b0cb6319c5430ce8553f9ce78001789e1f94f47f377653a16198f2a90b9181900360200190a147610e4b611786565b11156040518060400160405280600f81526020016e62616c616e636520746f6f206c6f7760881b81525090610ec15760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b50610ed2565b610ed26000366115a4565b50565b600160208181526000928352604092839020805481840180548651600296821615610100026000190190911695909504601f81018590048502860185019096528585529094919392909190830182828015610f715780601f10610f4657610100808354040283529160200191610f71565b820191906000526020600020905b815481529060010190602001808311610f5457829003601f168201915b5050505050905082565b600054600160b01b900460ff1680610f9d5750600054600160a81b900460ff16155b15610ec757610faa6113fc565b60075460408051808201909152600b81526a185b1c9958591e481cd95d60aa1b6020820152901561101c5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b506007819055610ed2565b60085481565bfe5b60095481565b600073fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd76104ff8161179f565b60035490565b6006541580159061106d575042600654105b6040518060400160405280600b81526020016a1b9bdd081cdd185c9d195960aa1b815250906110dd5760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b50600280541415611135576040805162461bcd60e51b815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c00604482015290519081900360640190fd5b6002805560095460035460009161115191603290910190611858565b6009549091505b8181101561127b5760006003828154811061116f57fe5b60009182526020808320909101546001600160a01b031680835260049091526040822080549290556003805491935090849081106111a957fe5b600091825260209091200180546001600160a01b03191690556008546111cf9082611541565b6008556040516000906001600160a01b0384169061520890849084818181858888f193505050503d8060008114611222576040519150601f19603f3d011682016040523d82523d6000602084013e611227565b606091505b5050905080611270576040805183815290516001600160a01b038516917fa63e265bae965e6aae4283d17bf7ba298580ba5924f711fe7da1a7ee4ae19ef5919081900360200190a25b505050600101611158565b5060095560016002554761128d611786565b11156040518060400160405280600f81526020016e62616c616e636520746f6f206c6f7760881b81525090610ed25760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156109005781810151838201526020016108e8565b600054600160a81b900460ff1681565b60046020526000908152604090205481565b61132d611727565b600054600160a81b900460ff161561138c576040805162461bcd60e51b815260206004820152601a60248201527f616c726561647920696e2070726f64756374696f6e206d6f6465000000000000604482015290519081900360640190fd5b60008054600161ff0160a01b031916600160a81b1790556040805160076001609c1b01815290517f83af113638b5422f9e977cebc0aaf0eaf2188eb9a8baae7f9d46c42b33a1560c9181900360200190a1565b3d604051818101604052816000823e82156113f8578181f35b8181fd5b600054600160b01b900460ff16156114295733301461141757fe5b6000805460ff60b01b19169055611431565b611431611727565b565b600080821161147c576040805162461bcd60e51b815260206004820152601060248201526f4469766973696f6e206279207a65726f60801b604482015290519081900360640190fd5b836114895750600061153a565b8383028385828161149657fe5b0414156114af578281816114a657fe5b0491505061153a565b60008386816114ba57fe5b04905060008487816114c857fe5b06905060008587816114d657fe5b04905060008688816114e457fe5b0690506115326114fe886114f8868561186e565b906118c7565b61152c61150b868661186e565b61152c611518898761186e565b61152c8d6115268c8b61186e565b9061186e565b90611541565b955050505050505b9392505050565b60008282018381101561159b576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b90505b92915050565b6115ac611727565b600082359050600060076001609c1b016001600160a01b0316636221a54b6040518163ffffffff1660e01b815260040160206040518083038186803b1580156115f457600080fd5b505afa158015611608573d6000803e3d6000fd5b505050506040513d602081101561161e57600080fd5b505160408051808201825242830180825282516020601f89018190048102820181019094528781529394509290918281019190889088908190840183828082843760009201829052509390945250506001600160e01b031986168152600160208181526040909220845181558483015180519194506116a2939285019201906119cf565b509050507fed948300a3694aa01d4a6b258bfd664350193d770c0b51f8387277f6d83ea3b68382878760405180856001600160e01b0319168152602001848152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f191690920182900397509095505050505050a15050505050565b61172f61046f565b6001600160a01b0316336001600160a01b031614611431576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b60006104ff60085460055461192e90919063ffffffff16565b600054600160a01b900460ff16156117f5576040805162461bcd60e51b8152602060048201526014602482015273696e697469616c6973656420213d2066616c736560601b604482015290519081900360640190fd5b60008054600160a01b60ff60a01b19909116176001600160a01b0319166001600160a01b03831690811790915560408051918252517f9789733827840833afc031fb2ef9ab6894271f77bad2085687cf4ae5c7bee4db916020908290030190a150565b6000818310611867578161159b565b5090919050565b60008261187d5750600061159e565b8282028284828161188a57fe5b041461159b5760405162461bcd60e51b8152600401808060200182810382526021815260200180611a716021913960400191505060405180910390fd5b600080821161191d576040805162461bcd60e51b815260206004820152601a60248201527f536166654d6174683a206469766973696f6e206279207a65726f000000000000604482015290519081900360640190fd5b81838161192657fe5b049392505050565b600082821115611985576040805162461bcd60e51b815260206004820152601e60248201527f536166654d6174683a207375627472616374696f6e206f766572666c6f770000604482015290519081900360640190fd5b50900390565b50805460018160011615610100020316600290046000825580601f106119b15750610ed2565b601f016020900490600052602060002090810190610ed29190611a5b565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282611a055760008555611a4b565b82601f10611a1e57805160ff1916838001178555611a4b565b82800160010185558215611a4b579182015b82811115611a4b578251825591602001919060010190611a30565b50611a57929150611a5b565b5090565b5b80821115611a575760008155600101611a5c56fe536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77a26469706673582212200a099b8a92ff304466ca5f5faaa0b60520e845df624f343fef130857a6d6701364736f6c63430007060033\"},\"0x1000000000000000000000000000000000000007\":{\"balance\":\"0x0\",\"code\":\"0x608060405234801561001057600080fd5b50600436106100935760003560e01c80637ff6faa6116100665780637ff6faa614610198578063cf0ea268146101a0578063cfc1625414610258578063debfda301461027e578063ef09e78f146102b857610093565b80631d452e46146100985780631e891c0a1461013d5780636221a54b1461015a5780637325249414610174575b600080fd5b61013b600480360360208110156100ae57600080fd5b8101906020810181356401000000008111156100c957600080fd5b8201836020820111156100db57600080fd5b803590602001918460208302840111640100000000831117156100fd57600080fd5b919080806020026020016040519081016040528093929190818152602001838360200280828437600092019190915250929550610310945050505050565b005b61013b6004803603602081101561015357600080fd5b503561036d565b6101626104b6565b60408051918252519081900360200190f35b61017c6104cd565b604080516001600160a01b039092168252519081900360200190f35b61017c6104dc565b61013b600480360360608110156101b657600080fd5b6001600160a01b03823516916020810135918101906060810160408201356401000000008111156101e657600080fd5b8201836020820111156101f857600080fd5b8035906020019184602083028401116401000000008311171561021a57600080fd5b9190808060200260200160405190810160405280939291908181526020018383602002808284376000920191909152509295506104e3945050505050565b61013b6004803603602081101561026e57600080fd5b50356001600160a01b0316610645565b6102a46004803603602081101561029457600080fd5b50356001600160a01b0316610714565b604080519115158252519081900360200190f35b6102c0610732565b60408051602080825283518183015283519192839290830191858101910280838360005b838110156102fc5781810151838201526020016102e4565b505050509050019250505060405180910390f35b6000546001600160a01b03163314610361576040805162461bcd60e51b815260206004820152600f60248201526e6f6e6c7920676f7665726e616e636560881b604482015290519081900360640190fd5b61036a81610794565b50565b600054600160a01b900467ffffffffffffffff168114156103d5576040805162461bcd60e51b815260206004820152601860248201527f74696d656c6f636b203d3d205f6e657754696d656c6f636b0000000000000000604482015290519081900360640190fd5b6301e133808110610422576040805162461bcd60e51b815260206004820152601260248201527174696d656c6f636b20746f6f206c6172676560701b604482015290519081900360640190fd5b3341148015610433575041620dead0145b1561036a5760005460408051428152600160a01b90920467ffffffffffffffff166020830152818101839052517feb86fa0729fdcf66bda3d834e93bf513d3740be7f7a4a6cab0dd318f1df8514f916060908290030190a16000805467ffffffffffffffff8316600160a01b0267ffffffffffffffff60a01b1990911617905550565b600054600160a01b900467ffffffffffffffff1690565b6000546001600160a01b031690565b620dead081565b3373fffec6c83c8bf5c3f4ae0ccf8c45ce20e4560bd71461054b576040805162461bcd60e51b815260206004820152601760248201527f6f6e6c792067656e6573697320676f7665726e616e6365000000000000000000604482015290519081900360640190fd5b600054600160e01b900460ff16156105a0576040805162461bcd60e51b8152602060048201526013602482015272185b1c9958591e481a5b9a5d1a585b1a5cd959606a1b604482015290519081900360640190fd5b6301e1338082106105ed576040805162461bcd60e51b815260206004820152601260248201527174696d656c6f636b20746f6f206c6172676560701b604482015290519081900360640190fd5b60008054600160e01b60ff60e01b19909116176001600160a01b0319166001600160a01b0385161767ffffffffffffffff60a01b1916600160a01b67ffffffffffffffff85160217905561064081610794565b505050565b6000546001600160a01b03828116911614156106925760405162461bcd60e51b815260040180806020018281038252602381526020018061098d6023913960400191505060405180910390fd5b33411480156106a3575041620dead0145b1561036a57600054604080514281526001600160a01b03928316602082015291831682820152517f7e1a30031de5a45b59b70d6a9f61956645cf3cf9468588f31f4217f7c770d7cc9181900360600190a1600080546001600160a01b0383166001600160a01b031990911617905550565b6001600160a01b031660009081526002602052604090205460ff1690565b6060600180548060200260200160405190810160405280929190818152602001828054801561078a57602002820191906000526020600020905b81546001600160a01b0316815260019091019060200180831161076c575b5050505050905090565b7fa2c44af5dca41c60e42e3fc93e9fc4dd6e5d2c14ededf08259d3372874ac085442600183604051808481526020018060200180602001838103835285818154815260200191508054801561081257602002820191906000526020600020905b81546001600160a01b031681526001909101906020018083116107f4575b50508381038252845181528451602091820191808701910280838360005b83811015610848578181015183820152602001610830565b505050509050019550505050505060405180910390a15b600154156108e6576000600260006001808080549050038154811061088057fe5b6000918252602080832091909101546001600160a01b031683528201929092526040019020805460ff191691151591909117905560018054806108bf57fe5b600082815260209020810160001990810180546001600160a01b031916905501905561085f565b60005b815181101561098857600182828151811061090057fe5b60209081029190910181015182546001808201855560009485529284200180546001600160a01b0319166001600160a01b039092169190911790558351909160029185908590811061094e57fe5b6020908102919091018101516001600160a01b03168252810191909152604001600020805460ff19169115159190911790556001016108e9565b505056fe676f7665726e616e636541646472657373203d3d205f6e6577476f7665726e616e6365a2646970667358221220f132b17253afa0a9fb201c52c06d6530e245825243ae357d043f3f80b654bd9964736f6c63430007060033\"},\"0x628B0E1A5215fb2610347eEDbf9ceE68043D7c92\":{\"balance\":\"0x0826beb130b933d81ba3b600\"},\"0x81D2A8b4BbF71F9e9d3284BA0Da90c636f3803f7\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0x8c862EE155a14cb15A18e96684Bbd45ed3E21c23\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0x25B2E9CAf1993439faEDE57E2eBC1321a27fb31d\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0xf3Ce3535Cd6c71A6290Cf2134AE936ac6C369861\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0x0f253Ea4aA19b319038ce6669f62E74ef34Bb35B\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0xcc4620bb63F22a439779181406eE1256192ABc5A\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0xdA02ee21d047A1cfecDb09866Ed37dB93f310dB7\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0xfF964a10041E0bD830dbC9b310014096355E7F68\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0x09Da958D3104890F77d97D1436A1E3567ea86644\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0xD935f3B878Cac8d0E549c6CCe5aFdf7BFA63E6cE\":{\"balance\":\"0x05df4e8823e66100df0c0380\"},\"0x9298Bb6E42d6950dcdcccB1309CB6ab5fD29ad63\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x4bd02eF1D3A241c9Bf4A5129c5350D0449F5D3d6\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x51583EAE1e9f77b30226190077bd5c236A3A55E1\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x91b689EE73793B247C7F98b9CC7d9bF03ea20b12\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xC552c18A9793c39Ff1DB48b425B5757120095897\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x7825fF4d745F090bc009CE46ea366eB40439b9B8\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xc0e1F5a7290c7ae02197B47eBEc8151fF4BDe724\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xD956e1ed4C0C090f6AC7B2450863dE3acF3F8eF2\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xd929789Cb189223b7744eC81DB010D10079D4b3F\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xa0FE681BdB9fdEAD4609297Cb18a47AB26257B28\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xf00cfeC4132EDb8f73b6f9A4e9CdE9930D5EEEDC\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xC789e17A7fb06e32A0018a0C2670E3763b121916\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xdCA14Af311bd966aa493Cc47FDaB88144eF3c7Fa\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x1F655DB637fAeAc5442f1C4854eA03d766f087f5\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x7FDBD31918177bf3726268cCB17A6ae2b411b902\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x2B01FDC03Ad99B52742Fed5F8D607770C1fe8d4a\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x949a27d37EBb454D36E4894243ac987b3A7628b4\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xFe8f28231E306FBE363846D2f37363AF372972aB\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x7efC34Bd86EA18A795E452D7cCd6f05dae76b49b\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x9cA8744DDC3c9d245A68B6c7039eD15086E47791\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xa63509E754FF14C1849cB252E8c95cbee57c8e25\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x0eBeCf7D66571012E49501CD198C94a250f0876E\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x6aBa5C8f8870Ec63A30Ff321F742149a19113fcf\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xec73ec40425219a601fAA28c408973A4151a990f\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x2a53aFC8b991aBBC445EC46c07E80b1a957C9662\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xC3E98AF398F23C7aC3BDda1Ef6A1FEDAF4f1135B\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x0BA52f5166f15Fb95042C33D75e4063188a39c7E\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xbCbf18F795D228a7907affcea3b9d70906D9EF47\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x954dE0fAe7228Ad71a180FaB83E9B958dbF0fC02\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x3eB8c610a9F15760f44CCEf8C17A72b3e86d2cdb\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x16413235C65e99335951565347909FD7F23cd342\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xE59c44447EE92aAEe6AF9F5dA52767C445D17272\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xB770bBB4f818751082976CAA17bEa6A05Ce1Dd4e\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x56359EFfDF4c648A3E696bd36a1dBCA6dd2ec54c\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x789c29183452a587f1a5268908ad2A7479425C1B\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xcb61A3800bFD975Df2Fd0c3D2238F9eA9736e088\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xBE32DACDaDDE5AE87Bc41DB388283147CED10706\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xdfa74Ee0A47743B544D6450918Fab56143cA50E0\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x96bF57a27C56a5C1098E9a1bD77FB9A765286Ae9\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x5fF689f0c90925730206Fb085085599c402B415E\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xd25b927b94F09D03D53b5Bb80E26f6220654202b\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xeaf6a1477f5fcc1D5076B75d4de29d6A83fd48A6\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x3C945afe9B975afC773CE092b493A8AAAcbd3320\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xa80Da52630406A720C34fB68308487EBf088C478\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x236F6959fEBae430b8Caf3c8eDd208A18276d4e5\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x95D074024Ca18B432ED55b1149Bf8C9ff4b9eCAA\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x6cab54cd753C463Bad34febE341e31CF3e854F5a\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xb706eFA1F4f42a7010CCc5C7597D757575d973Db\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0x049b73e9683F9A52A03666d69a3F7e9378b0DA95\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xca67Fa752E2707C4656c3E99a15fE3cf2FB1FFC4\":{\"balance\":\"0x019d971e4fe8401e74000000\"},\"0xf2423b39664A742608543A7DC5fae4a80A3eCb67\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0x3Cca008b4C57a5C2A79Ee8fE11Cf9D67dB0A3f79\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0x2258e7Ad1D8AC70FAB053CF59c027960e94DB7d1\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0xCFbB73026f415d0b4168232846caed3CD8e305CE\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0xBA293E5B8caFADe5ccEEa191Cbd3e0E869aA4931\":{\"balance\":\"0x0295be96e640669720000000\"},\"0x8165A514054101643a5170e60bcB5d6E79FF34B9\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0x3AA8EefC6f3a42AB2cbA98FEA39397cca297B18b\":{\"balance\":\"0x033b2e3c9fd0803ce8000000\"},\"0x6c7f7757E8587E4068B8Cb9f713852eF2Ae3abaf\":{\"balance\":\"0xba1d9a70c21cda81000000\"},\"0xc506a2E354BC10649907a2086dAE2BEED3E760fE\":{\"balance\":\"0xba1d9a70c21cda81000000\"},\"0xff71960A8a2597fbD18F81A79D5171CBf27C5665\":{\"balance\":\"0x0121836204bc2ce21e000000\"},\"0xc9F314887750169424bd78770ccfd5AAC87A4b5F\":{\"balance\":\"0x0121836204bc2ce21e000000\"},\"0x67f467CdbEe74631F516607BEBD145789B2C2220\":{\"balance\":\"0x0121836204bc2ce21e000000\"},\"0x4598A6c05910ab914F0CbAAca1911Cd337d10D29\":{\"balance\":\"0x032d26d12e980b600000\"},\"0x785a3983B5FDEa45e1cc49a41Cd38b5b00687e97\":{\"balance\":\"0xd3c21bcecceda1000000\"},\"0xc783df8a850f42e7F7e57013759C285caa701eB6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeAD9C93b79Ae7C1591b1FB5323BD777E86e150d4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE5904695748fe4A84b40b3fc79De2277660BD1D3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x92561F28Ec438Ee9831D00D1D59fbDC981b762b2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2fFd013AaA7B5a7DA93336C2251075202b33FB2B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9FC9C2DfBA3b6cF204C37a5F690619772b926e39\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFbC51a9582D031f2ceaaD3959256596C5D3a5468\":{\"balance\":\"100000000000000000000000000000000\"},\"0x84Fae3d3Cba24A97817b2a18c2421d462dbBCe9f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfa3BdC8709226Da0dA13A4d904c8b66f16c3c8BA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c365935CA8710200C7595F0a72EB6023A7706Cd\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD7de703D9BBC4602242D0f3149E5fFCD30Eb3ADF\":{\"balance\":\"100000000000000000000000000000000\"},\"0x532792B73C0C6E7565912E7039C59986f7E1dD1f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEa960515F8b4C237730F028cBAcF0a28E7F45dE0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3d91185a02774C70287F6c74Dd26d13DFB58ff16\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5585738127d12542a8fd6C71c19d2E4CECDaB08a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0e0b5a3F244686Cf9E7811754379B9114D42f78B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x704cF59B16Fd50Efd575342B46Ce9C5e07076A4a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0a057a7172d0466AEF80976D7E8c80647DfD35e3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x68dfc526037E9030c8F813D014919CC89E7d4d74\":{\"balance\":\"100000000000000000000000000000000\"},\"0x26C43a1D431A4e5eE86cD55Ed7Ef9Edf3641e901\":{\"balance\":\"100000000000000000000000000000000\"},\"0x23FFE739ebb7Cd7f736C1e428B93181429AFc395\":{\"balance\":\"100000000000000000000000000000000\"},\"0x650240A1F1024Fe55e6F2ed56679aB430E338581\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb6ee2A471FF933a9a8B16352386665F2D0121dd3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x856571E47e17a5b6A6353F8A4483EB49946E5697\":{\"balance\":\"100000000000000000000000000000000\"},\"0x26a90CC1E44fFc533f49401C1f2269a6080E4e21\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2E3bfF5d8F20FDb941adC794F9BF3deA0416988f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x90F97F8367aa1Ce1c3314Dbf4Ff1c9419b113E0A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x19321ca3A6784186dDba167d4F8d167F46dbe0dB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf7aB7b05822D1038f30080cc4A4f27267D8a852C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc30F8ECb6d246aF5fD385F00e2004215d78e29E6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x447DCbCab6b56F80Da53b93050e7d0c9CB3D2a63\":{\"balance\":\"100000000000000000000000000000000\"},\"0x37f900870430a080987015F3a36C6C65ed2c4d55\":{\"balance\":\"100000000000000000000000000000000\"},\"0x42D889d00B9b853dBB9b77e074fCb65c1b46d35F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3769119B83C2f53c182a4F725917065179795542\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2CAE9302f38b62425eebfe83fe78d49D6f2f8707\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD5147E4F21385355C2c4735830F716cd5A50f778\":{\"balance\":\"100000000000000000000000000000000\"},\"0xba2ac417f2D878C48028400B26fEC3ecc0aDf8A3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4f6E59dd6186aCA90Bb15ae6E4993BC3A570C013\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9E32c568279a54874Ef0DCad4F0E34f04C34eA45\":{\"balance\":\"100000000000000000000000000000000\"},\"0xcBb8F758007062AD35073D306A89216917dC370b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x20a236b2ad0F6FFF7702Cf4Ceec77b17ae140d2f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfce184e61f4951516B07A060D480976D4b43f20a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0F390D58fa98Ee9e127750cd7e3b76a3CAfA47DA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x13D31df6059080eD4E67150b1825d882A7CAFB7c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x228d7f657B958C361F348D74213750C4cE62E19c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2513e65a65feB610AdF8eFc0A30179AdAc17E33F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFa1d546f59258d144cF4d53B33EacEa7dc368edb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDC5E866F6Dd7e1A69EB807E1aDF445Dae895b3B0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x17962bbB6DBABF7368b23C719d2dC96Ca8Cd1269\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd6389341Db0009B23B565e54Fbb985bDf0C5DE71\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4e022A3735Ac9d3191cC9221Aa2df2E1c796EFbA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7d9d5E84670e44460C9aB8CbC2FF7b7610932382\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7F912dE764E68e9205aDFbc309498153c3336376\":{\"balance\":\"100000000000000000000000000000000\"},\"0x520a92cc51a29ADD12Bd4C4AF72F651a11398F85\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5BD95b0FfAf0C5C255B92C7492b709CD98EF2bB0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5b0e445C3EeAeA758D37950B7f99eB1fE3E6277d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x99184c49aFe527BF483074A9E598f561805B513f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x09940Fb83D71e70F30E39EFEC83053D7d9346Df8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x42E3Ad34e59Bc7F1B8f020bE535F7A4A76C2A7b4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x289A2D8699C558aF5BB4Cd50575e018e3cF251aE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe55110392D061B40219F8BF79F3273A95291a172\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2eD99292B9592DF81d4f06Bc238d6218f5c9836B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe7385441f3785Ca377baCacF908CD0b9b7D8bfc4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1689a35D11Dc26079605776C8EB343242bA2c28D\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf594c05787A0d23B1135b23Bc590292c6FCA4081\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6299348ff3B9E7cdd5A32ef81A2F87cc68C794Df\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE5D33C4b3b8E516C83f27151BeAd5787E6A1f80d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x087313eEC041a19D94d1EF241434d594A462e2e4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB99E9D73d5d0abb703874A60d844C3CbBe22E098\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9DE259527C97f7c156e19d3dd0180601FAEbeBda\":{\"balance\":\"100000000000000000000000000000000\"},\"0x561b3214777C7Ef68934917AAf223Fb1119c745a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x157393968B7c2d01202ACa1f35cCF46049F9d3E8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x20E31bF696b7349C3EC5C0E732aD4920aaCF1D3e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x44362BCF6Ea3dB06e5F3116E1eEf0e2Afd63e14D\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd228cDD43cba10E9c98F380FA0273973DEC2C955\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa89ca00dB07d314Ee792d0e1F241595eA111292a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8F38dbbc3E3e3a8B003eE505B0D58Da17C3Ec617\":{\"balance\":\"100000000000000000000000000000000\"},\"0x92661785B3D8C41b59e7a9bef38996Cb10C27053\":{\"balance\":\"100000000000000000000000000000000\"},\"0x45179FDE9F7049B6d8A942fA5Fb2C40003809733\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F4679176faF6edb547439c3e3F767aCd916E6dC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x395B7Eea1F71A052C549d5A2b9f5890c773A0E7C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xedCdC766aA7DbB84004428ee0d35075375270E9B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd5ac30b45b404dCbce34c713F1005522760b91A1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x999f9880ACE3752EB49cAEea01F247075eE60d6b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDcA1Dc39cE877567D49cb907e37C7CD19116F8ef\":{\"balance\":\"100000000000000000000000000000000\"},\"0x85E6a34e900D901a40820843d3FF11B44E5374AE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9577e69ca21b8B5eAB153FFca33E04E064f2745f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf96CeFF99BeCC79DD9645F3b7f60E7bc8c34ed19\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAb8FdFF7Ce6E33D4ab6959e3130AA46CacA2c128\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd028D88B4F26227Fdc550c387C01Bf89F8C83cbE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x172e45210d8aAC16d4196529640bc702EcEaF5d1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB77C202Ea30E6b8E3bBed342d45348f629Ca1dBA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x24906b3Dc5e4d3b120FB7C01A2771d8cBA3732B2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xee0411B166B4AC0F9e995fb1b8A5e21adD6B6752\":{\"balance\":\"100000000000000000000000000000000\"},\"0x74204Da71eD7537FC8f8150CFce9f6a5664819B4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb452Ed1Fabb6502adE6AC464F17B74553a47946f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x74923cc7164bdCDFF7A4147EBF8d847a119c5610\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf84d636b208AAcE4AB706469Bc40a31Ae61fF2Ef\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC50702300CCb1AA80EF8353d9135c5e47b45453C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1267805E1A92F46901D0d7a60630482b8A59240b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8ec9813CdfC80Fe7Fc24325cbba7C8258756d2ca\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEe935da47bB2BF1c8be75f37F1A9512D09Dc9459\":{\"balance\":\"100000000000000000000000000000000\"},\"0x52ea8ef624dEDAD1181ecEd8913d95745C1293C2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6a0970d7beB061889bBd4B39884E09000391d248\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8CcD9F923AC2ddf03b024CeF2df8830cf9d4FcA3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8F7F18B96e2F310098789C25bE303bc0B24e3002\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8b4983E6621c48A45DDF9630Aa732819f27C86bc\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8A3019f2C85398609bA83FB90cb7eeA1416512e4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3201F55A5580c8C6E3d8D166Be80415d0BB3801D\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFc98e3cB5803a99247Fe54A77D2181d8D14FA901\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaEf9bb7375496d40DF484223a51A330400434705\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBfe5618099dDE3080da6EE0df4D8D07CbbAe0037\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaF1903f7d42aF0E99FFf4332af76824D98F7E63A\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd9A5437a8110995cEDC79f3A4f2c05844D4EAa3d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCCDbDED25EE3dc4eC97fBbA976E8f3E5dF5C72B3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFFeBFEDdE0C90db9Ea739C40c0B509591AFD662b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1425983077c306fce4291e09D089656Ccf844995\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6E4294B48a3c4762cd3B7ab69716965E75C273Af\":{\"balance\":\"100000000000000000000000000000000\"},\"0x44ae55919480f37A87f7af00f990e0990F305219\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEe23FdbD4AB43B19E1816545cdCE0dE2AdeF71Aa\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa1072B8464aeb4B9Bc8967250d95C8c622F28598\":{\"balance\":\"100000000000000000000000000000000\"},\"0x33F31D2A00431Fc634e7439AEFE8071AEb4125D9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf71C03dbb50F54dBb487526268a94a3E6A4647d7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC4fAB9Daac9d1045F6e401EFD10d4be2bBe4dF03\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDaf1f85C5180b24209D2ED9761A1Aba708bD0cF8\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd7504E72fa848E576A28603A6e016985b40253ae\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8636fe2DB78859fE9Ba8872829aeB9D49Da57328\":{\"balance\":\"100000000000000000000000000000000\"},\"0x76Ae032f4E84E535B76825d48B2244C80d6AdD3d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc14bcD373C2D17B264c32a5C3d5E2b2A4C3725B2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2393B14Cd3B76a6729Bfa11071d6d477A8543c56\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3e6d22A63F40DBe2a97E2FF21746f1f5ac10e975\":{\"balance\":\"100000000000000000000000000000000\"},\"0x05a3b4382A28cfC4dafF2B8B79Eddc62bb6d3124\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6EdE5C082dfbF511331d1C984709b5c2476c8f5c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5b6618f8729E74AcB74D2Fe2e65755f5312E95e5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4D430E4bf3603696890799af3588fe8A8c57E4Ed\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5d7B288e0E5140d480a505f412b3337a62faa126\":{\"balance\":\"100000000000000000000000000000000\"},\"0x09Ac4613c261a678e436a009617A07eE86edF26F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4C424e8FFb8f562bF7d5f3b3F64b523354A6eC81\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf40a227706A848a09Ba013382DB7662aC3Adf723\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa95FC6feA88e2Bbf7070462A9b3F85445aefA0dF\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAA643da998a40Da88BC7BBEde01182f3e651c13C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfB6ADAA2915843CA3A8E80304469Cc1286Ea3a1c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1a17D722A71d75f4E6D600041cCBA90D29c8b1f3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFa2183af46B7A8317f0c2f568CD131B912ae9E93\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD0131729628315ceeb65a6BBCFEC4dd2a4bB2810\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDA405bf4Ee57f95Db80901c7F45b0172909494ba\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9d80A2d9417a52732B3543EF0C895c36C38A80ff\":{\"balance\":\"100000000000000000000000000000000\"},\"0x95bBAb624D7Fb54e8b0d417CCea0DfF4B52639fe\":{\"balance\":\"100000000000000000000000000000000\"},\"0x17498781497cb4eD7f94698c6F7A8DD4Ed908e12\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c5aCff73BCe48F4CaCf5565D8f18392B0fFf3C1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9a34F81afcaFCEa2dE06e9229F4Ab81B68dD15ca\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC67164f40A75b1aa802Ad634C7eB58538F00Ac3e\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeF8DD0Eb060Af5DFa793bc4Fb1Ba1D6bA1106EeA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x60948c1dA9D62187e7e72f89b1e5BFb788F084Ce\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5bbC3d63D5f1180bfB2a4286D3211Bf4CF175057\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8d8534af5CA90Cd0930d1E560939709BA87D5919\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2C21A2F6f0f2F1Dd48403f4Fe41Fd3D7D711FE8E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xecd4B1bcA600029D2e742D5098d5878555C6c560\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB1284B2C389D8BF6348C7A02EF38E588D4e67EF8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5eD3a020F43dF27f3E59B4c743aFEBD92bc46202\":{\"balance\":\"100000000000000000000000000000000\"},\"0x837B8103FF0541ea8eBDd21ef6A5F2F7a0dF5Ae6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD7Fba4a5aA18713A6aFB5B6f9aD4c317e6456A2E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x04940DC90057C056559633E41c35012b26FdDD8A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9EEEE4FD7574c56bFd589E8Dc08c470F3B431633\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3778d4516D12389194E8685A265Fa7F5E21ECd42\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6013e8aAca9a5C37Ba52610D4bB5a3bda16a55D3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2f9b13237038afA70Fac4D5922f77b3E514f9F19\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9a39Cbb2871146fFD5d05a848DE94c59bB659e9e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9a1E43c547f29bb5eac09b98fb43E89f23F57e8B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6cF14909d42EFD1DE11bE9763bDeA9268c89C668\":{\"balance\":\"100000000000000000000000000000000\"},\"0x31fB5e464ca8c9C6c3EE5b6A839E86A35D56384E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4d3460DB855D5c93052e7916810D63d15E56b7F2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3524a66A2Bd2b06C94E568f8dd0D887E00C57629\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4458926F52C0322917D04406892AfAbe758dB3B2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x149Ec481f0aD4363D32ee66f2b232F4e259F394d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xABc9EFD85Fc0d189cDF88E17C6D5AFb9e6BCD232\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7dC755Ebe8b8AC95B08b9B0b1F40C683503E78Bb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe2ab648B877b4196D1916061EC82Ea5935577F12\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD58Cd1CE482bfF11F58135edF971138Ac82EDFb6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2388773c4974e051fc85144afC66883b61C242AE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x31EA79bc8D4Fa55f8528704464c2286aDb07A06e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x103ff70897D2a966BD2F69783FECEbfBCECc9486\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa3095E418101D10A5e63ebf47166F3cE28CF7959\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB3045658685e7789a9A1a81988187d47b42b2335\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc6656d75039a8b1B4E6846454c61C0FA4CeCa3f3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA8be0fEa56c43bfc4d95e5a6e96DD4a5969EFFf2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x29fbb69677FcB4c7AAd675159647a07D2DdA1A1a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x825070F7f6b1099C8E4238950b727ae290db31c4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x818f9278F77451F9701f09707f345C6751B5AB66\":{\"balance\":\"100000000000000000000000000000000\"},\"0x494f53218404526A8feB5713FDDF491f1222C0De\":{\"balance\":\"100000000000000000000000000000000\"},\"0x29e8CBCccc737C36a03edF4D2e14ba336e0E2c3F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x06D9C2eA1EaCda50e806dBe66C46dc61d3A6E3D3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0A336792CBFEE34A85653eD6edB0bB97bf998d9d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x844719e597b9d0fB038905f262626f44ea925862\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4F9aCa238f29097666c2F50eF19eF15e1Ab1f45c\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb7b56063B28fA014446121Da7c54Ab642Ce17e7A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x531007Ef24FfDd3F6759175288DC19E76536016B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEBFd40fe4411606120B89b6D1E4b3879573C6235\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8ae391fe43f833CD476f69A225eF15CAd261C8a6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7318A8cc3589c99642933c2E737fF97301B91383\":{\"balance\":\"100000000000000000000000000000000\"},\"0x48AE239526F49A3Db40b0e98c08b6c5971da51Cf\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc1116C985757e65b5aAdfc4B596B4feeF00962F4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x09D141472DFCe60a54515E43307074f8aC931B88\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9837828BDE2b8960012C190D3F28a64b07E58dcF\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe65c0ec865c9062bfcD8F68698AEe5af5ee71c43\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA75Ec064b9E2939F7DA1FeB4BAbb390B9677FacB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf3A94b5453a910C60Af2e9f38673e833270106Ab\":{\"balance\":\"100000000000000000000000000000000\"},\"0x896602A6F91136A8Ebc921cbB2C6943Ab6961002\":{\"balance\":\"100000000000000000000000000000000\"},\"0x622C94affE2C3a30da80998434ab75C33877F4bF\":{\"balance\":\"100000000000000000000000000000000\"},\"0x681009434cd22Ec4c4658235eDb6f4A7421f1B53\":{\"balance\":\"100000000000000000000000000000000\"},\"0xea110becAECa3898921CC118F923F30094b183a4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD1771FF76A0CC182DA1ddB999aD3ab79F4A374AF\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB55c0e5Da385A64BE1F44eE360da122541E2E6f6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc267540bcac9fE4f745c2daD5cC361F6cc5d5d6B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x636FA2D6ecF4769d57028421E14181D16ca18729\":{\"balance\":\"100000000000000000000000000000000\"},\"0x531D5f8Aed6581Bdf430067164D750Bc534cB308\":{\"balance\":\"100000000000000000000000000000000\"},\"0x55affaeCcc2F6C92eFeCBdb89FB4A314300e7Fdd\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA2F0460B16Da4216AC763bd599d489D29A9A5936\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1B40f5C11beAD10D257198E9E32f4596C531AF25\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd75cc0aB2A9522c1959Cac75AE6FE8B5FBaC36d5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x21dECFc9B16284f7Cd584B3AbdDD2ddc40Cc3E1b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8CAb06b83D3eF05e1328e2235E54428e9731c1dE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x817a85D19af38DF4903B327882CCfE2637F2F7f9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC28c7358C05049cD63ce5855148a993A4C262c8d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5a1ed9293CBB829B6e7c479863221469473504D6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9c47B4f74A3C0C8ABe44a75Fab233d74f400C185\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3Da3a23c4BB36118AA98FEb03Dd357d5A0081149\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb504C5dce116fFC75e279438b26ed1CBc7b3bc0A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x960F9E968e3d5c085B103EC353cA8f33066172ED\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF29e6fB8cF722C55Bf01930F6AABe68C4017e532\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeF41A6917fB9c5364c239B99F013345DD0a6219a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x30caDc8b9D78BDf07aFB3740E9E430d290c1B3DC\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD30d45A10A783AD5dEf435F38cab246a2fcc84c1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa5D0FF7f34b1eb12303C2eF31fa90A87fcb64bE6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x563f90c4e220B34912B38952E781b9D424634Adb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC4082a2b6D1Ac2e945021a711C593247d85f1Ab1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb87831BEe032AfFe884158feDd25DF858b048D97\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7ED972faacc55171885c3F9382Bb4C9BD47607eC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4457B0d1cE4175d13ec2E1eD6E9C03015b66072B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7B1C26Bb767Ef6B8cBA4139a8f8eec045b087001\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbF46093A85B1221855B7d757e1F153db1d71CFbE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c605eeD5c057c5950426C969e8d0026D455edFb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd291eD07FdB95df18C267FeFa8D8C30719bdb996\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaB95EE035b194fe2020C6B53c94Fa62a6e120ab7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdd2366Fd99aAEE2D933A0c158A052DC924026a3c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3aB2b7F5333969b4803b97638E7263d08A58479f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x19377255a8f49e1c5Af37f9eE81e12127428aeca\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2b70cC7d7652675D7223D475bA7AA93E8794F8e3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc1ad1De12D77dc7A9E22B9a4bDa0eb6eb77dDA1e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x705013b60C59C0e4cbaEed8e90192109C499DAA9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1BC1A923a7A326542d507013B2ea6e6277e2ACa7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2B5EA993766FF41BEDDc8A5310F8E5445663037e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x398eAB9D416D647038501654D29CDff8A585E532\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc623c3c2458e4217fA212c76a0aC82165ae6b696\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1649e1b2337087c0626f3f3fe7BefB404b82F234\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8A8DafF227D30aCce694e293ed58819154ecB3A3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x878D1313789f787014efe5c751b3b1F7A1BB454f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x147d72BF17D179B024A9e54bc7dc721720D1a78c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1c0917222642F72ff0885c72eB01362B29e3d764\":{\"balance\":\"100000000000000000000000000000000\"},\"0x47Ca1e20D8E33cCa95C3fd91Ae0244E5Fc6cd829\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa62ca2207A5068f1cf714127737227b10e149ea4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdD072CDaf124d06E41bB0cDa4cb1d2B88bbCc71b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB241A20DAe31D2e84725ccDa6b4d9B1bFA346Ba6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe78b5EC75a2371cd6EF1e5625D5a1C93A55C5de6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaCDB46238D915B6bBa26651eC88F5b41ec76E282\":{\"balance\":\"100000000000000000000000000000000\"},\"0xcF6f0b6Ec7352174b7FdF3A87BaA66D827F033Aa\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA49d8F0134AC7b8dE656C6F76cdC525786970C62\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE0cD75931d4E5947f25E645d3917A6bE2D9293cb\":{\"balance\":\"100000000000000000000000000000000\"},\"0x819379fD297DF0ED76A274A6F44B77885D13FB13\":{\"balance\":\"100000000000000000000000000000000\"},\"0x057cf47f73D3646f048bcd80788D0A8F468Bd518\":{\"balance\":\"100000000000000000000000000000000\"},\"0x969B00A6a83D512c5288AA2f75c4fe7e7cC693b4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2a5BDaa4A2ee52A66aDbe0Fe23e2ccF7D529814f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd39DED7D3A5b2D3F2608358f339fFb2f274217A8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5cfEFa692321e8e78b3c131133139E7702756B9a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x967dEc44bFF01a319cc877e466f03cb7a4af1606\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeA8601DD6537420934a3B49E97fFA45BE0C641a1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd300023cE15203A494e2292E48b4829140023c52\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBA960169AcB1F0C932Bb1019EF51cb12FD6f7d6a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd6c2D7553583802edb6Cc0e97e0991AE410B69Ae\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCF46f63b2AC90a37801D4a59b4aCd5c0D382821e\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeb093af59Da26d6432a8acd5E496D27d4EdB44A7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c278c8FFbbdf505e4f9AFbC59b9F33d4dA3844a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb0201CBBd8e74A13a0465C34989A222180B8d194\":{\"balance\":\"100000000000000000000000000000000\"},\"0x56F174Ecad22195e25bCb9e95e6451cc19a5C25B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8f23A8Ad84187d587982585259875aB008a8704d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xad9d4567852EBEeFAf2f7FEEA6640f44CdeFBd9F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x694c471A06585d3e6F11B3837968C890676F6518\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5549E1db9deD2B74a4bB0073F496c00D606aBDF3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0b1eF77c2060Df2B2CA9818F22304D19e1Cb0391\":{\"balance\":\"100000000000000000000000000000000\"},\"0x154eEe19CC5f99EeE951D6A8f0E47d46981b7b13\":{\"balance\":\"100000000000000000000000000000000\"},\"0x58677ad589D7ed0465401a20F6F21951CacA43D2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2dD1fB20AA87Db551F67b6201141957089043851\":{\"balance\":\"100000000000000000000000000000000\"},\"0x252EA5735a4B117eB143B0be1550996a3798631D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5cFEcdbFaf34E82698548308C3E0Ec7EEdfef181\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFb8471690d0d4ee898e6fBf8E0CD2fCa39B4793A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4A258E545254b339CA654C11aB79B22f0a7a84Ea\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1Ba5998ED713Bf298e76EAf17F98F13F79b76C1a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAC7f6Bbb17fC8dF8be088333002A2598175BdC5F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEd1A4f6Fa392843697AEfe0b68C1d709A2e0E35D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3bBC7916c6DC16e1fE9aBA9f63f83B1531e3D696\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe1B0148a43D977761c2c9255F1249aed4C0D0E7b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbE4188324694E7EC1946D0F7699AFBBc31A2A628\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c559a8D3afF9051F5f57Ab1407070Aa61086532\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6620459ae16647d714513A044558ABcDE12bEf78\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe31e9557fF002Cd8D57e915571410DA20A0BCBD2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB6dc5C9B738DE0FE5e94543bA0C9311d2A5c599a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF78633EfA2ED9a983866FF1EEe3eab56835867b7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD0E115400175E1C2cf00172478AA790F8FD7Ce1E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x356DaAb68218CbD3aeCDBD32a5068451e152d6E4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x43c748E8AD85D47b4aAa3cC6Ea1F349444fAB587\":{\"balance\":\"100000000000000000000000000000000\"},\"0x88A054838aA1AdDC9E77A18b476eC956f8e84FDb\":{\"balance\":\"100000000000000000000000000000000\"},\"0x35Eb42497Fc314DaCc69527f7AA7ec9D71CD56b1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb6c1947d4526b0c9E5691379EA866948F8096a11\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf1e0cBe283f3a95D3AEE30D4A850a7eA11398565\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2ba8eBFbfaBEE0453BECab43aa56FCAD2C200153\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7F83d4Cd33d3CF6415de8FD46790c5D4B4372bfb\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4a4cf61DF7bad1660DfAbf632506e68E26F48b80\":{\"balance\":\"100000000000000000000000000000000\"},\"0x25BB179b17902cCD43034a6544b9aA5580BE255E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1ffcC5A9B0fa320172616D02467a0f68BE5A4724\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbD51b8840b59F0C130A68eDAfC3745e41625c03d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf7157477902fA49F648C497Ce5AaEEF8CF287738\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF1F649F8978840e5DF380b34D67676cff319F6CB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc97816df28e58c478A08038610317eC35bFF51dd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8825cF11b25D0642a7CbF0192Ac495BaD40B52E9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x28ecA76dFDaE4151F1BF7fc0E5203B0d84547693\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc6Ed21b23053035F616F2c2C8a8C3fE02aC536f7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7Bd792e4A682bCa2B874C2c2D7D669f506ff8BF6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8Cf4a329531d95Cf3039e1Bd27d70B2988165831\":{\"balance\":\"100000000000000000000000000000000\"},\"0x09a5Fd2b840CAf1A2c8006e90858968749a08605\":{\"balance\":\"100000000000000000000000000000000\"},\"0x463cB9f954f155273f9CCf08dEd5B47bF749f2fa\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6bEFD88d6B8754C09F3e9720a77C438F8bB8E14e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4832601C0f1dA25F67bCdBb1168727a31A67611C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x847bCE8d6b81589032147FFd37BFCa555894DE24\":{\"balance\":\"100000000000000000000000000000000\"},\"0x235Ea581aA51136B090CBE7f59c7C480Ff4067B9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF9313393A8E010f4C7d8C7a454fe83BA1e106f68\":{\"balance\":\"100000000000000000000000000000000\"},\"0x44CE46beb99F105C3d935243262b76Aecc937392\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd82B0b673a285DEF5FA1F1011E6Fb716ccf43244\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9D9B1eD165F01d27d0c5bb1534177a1FC5003ce7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3e8489dC02916c4B3EE39E808251062b40ec20F8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x05332Acf904f86C040e2076065c42e9dE2ec4867\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0Ea1aC52e8D271995893e2940ad3546242bF0AfE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBbc14CBD386Ca3E6928e4a1EC4B5040cab7b422b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x08f8B4c35f9Bf9D4F704715d869bCFa605728C37\":{\"balance\":\"100000000000000000000000000000000\"},\"0x40Ff1280E20bEd8d7BF0f9c7253d40eC9f4ca5D9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBec7dfe29E0B7A663d137daa38824347d3c7e7f6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6ABE3DEdfb0E4746CE26d0B2FE126588ee6b37be\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8225da7f0B3e719D213acD260992B21b076cEB63\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9A3721112320b03d087e61d36A3100EEf954DB5F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9986AEA413Ac15c1A9Db714B6A74024731Ce1132\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfde1529F5Dea42859AC5C458d46EA67bF0958980\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2cD129F2af1e0Eb7E8Ae0AD06EFdad449bd04030\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd0b3940C889F9C6c8De81E85b9bA701cb24c632E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb03A55F0581712085DBBD29584E891e5b53D94a3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x47fDcD05315E8cd03dBF736fa42dae83375f3d4D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x16F27075A76bAE892BfDfa1490e20C9ca3734430\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd71A3E7D6E207418E5194c27C224Cd2CC37934e6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1F92e89a280472BE72d72e690328844aAdFEbaBe\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0e33b5400Eb83f5490769922F9c67B7ac74FC1a7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xcD742d9F463ea741F2C295ecdabE720D2C28379a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x696EA71C93d53FFd630d26026c2e72252081eC88\":{\"balance\":\"100000000000000000000000000000000\"},\"0x22D36C42C9A7Fe1200fCcD59d9bFecA3e36e9283\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa21383AaA2779d95acEbB51D7c9B874E2046929E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x665c0E202667aa73A379d9a85f86f523A8De2a9A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x65Ad1B4C2529436df045BF4405EFb4657820D79A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x02DD448Bbd3AAD0224a87BEAf5848501F7116De9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x72B87F033c16B597B00B47Df01ad59408fEa3aFB\":{\"balance\":\"100000000000000000000000000000000\"},\"0x862F9fE64C5caf03a13cEA9b7313baF98C5418b4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF34612dcfED6F0a47119d8068098FcD83C9f5229\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCb6a29Ca66828Ebfd21114e5e3e7E224b26d17F8\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF25D17a3A7CCAE8864Ce2bAd7FF53b5e5eB2d3a5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5f2833C83D48dCb51faE40A13BbaFcD96B874255\":{\"balance\":\"100000000000000000000000000000000\"},\"0x74E9259a6aCe209f7862c5DefA2E541a7506ec20\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8BCd984F86EB5f4F7FE23e2B275A52A7e0A550Df\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDeC65d5Df11752c2507068c3a05F58F9c30D36Ce\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCdbb689dBAEB012CC5693Cc12ca394CD93DC1434\":{\"balance\":\"100000000000000000000000000000000\"},\"0x443711C422f80030B7Ea9f1b42f0127f4AB3245e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6019bf8fb4EB27699616B7dbD5c824cEB08Dd37E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x284ED985f94071865377902C57Ed15839E4D47c5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x468F89E7B93194A4eF76F9C518C5c99bf703adC3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3B67EFc7030510c218005f0e6cEeDBd145447FB5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0269581b33Eb83dF5a33F0aCa374D990257c13a5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1B499d79c55a44F0082110cb0E1FDAf15d94e939\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4b3194860112a840E35599A6246A5Fd1E117D7C6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEA1A04b55B1Ba87Dac18EC1F8a76ba95B0378d9f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x73E80B736a19b10A2ea16e1a3Ff54F9dE01Cf06E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa075b958Ad232480f081e490A10ae6fd781F1f54\":{\"balance\":\"100000000000000000000000000000000\"},\"0x44f2F94Dbdfb67bd1f9f7Ab5B5fC3524A9A37596\":{\"balance\":\"100000000000000000000000000000000\"},\"0x866F69FB7D36124D2A22d2E09451e79B9A8c1922\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0C9AbB93682a0baaFcD16dC20F5617DBc499CdEc\":{\"balance\":\"100000000000000000000000000000000\"},\"0x767aC3110c612B332D0524Ecb9AB24839f3a71E2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6e42D5C1dF423941e4Da984D3B44EFd2498A86a6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2705b4966F4D52b77b4aBfBc969d4089f34A47dB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xebB2e83Cf6eA0fFc18cEeC7cd3910edDE2e83e11\":{\"balance\":\"100000000000000000000000000000000\"},\"0xec0eaBAdFd4b473224b3ea4eA1396e27c3dEF725\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA810e8FfF39e7270A4C93058b75788055BAB84f6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x63220c8d229f1763Ba97b711D1fed7cDbE78acbC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2723eb21DbD539dD3741CFB82250679241Edb120\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8c0c9030a91c1Db91B90C30b85E529D285AF3AE5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6EbBf3bE6f31fC0437Bd14aA0c14C357CD2Fc864\":{\"balance\":\"100000000000000000000000000000000\"},\"0x890A5ED079c0Ef6DEe70f43aB7829466f81F93CB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeFA752bC4100400D2E0C0a13BBDB0BE06b7cC287\":{\"balance\":\"100000000000000000000000000000000\"},\"0x38980757E0BEbCd9A371DE025A18B8DB517F6b8a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x32D5b114575167fD8222D4B331c54E647BEC20Cc\":{\"balance\":\"100000000000000000000000000000000\"},\"0x70067edF7EBbF612834278988F390a6A070BBe97\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7bF69F53e0B8e884e279E14b2B40d3EF26d9246A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1f52aaB99696244103146D783C62419D8dB52bC2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x86A010f6EC977d603D71e55B6bada74BBB7B9672\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2c0289cF407cB48a2BB2837095Ad53F956EEb334\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6350192329A4A3cD50DB2E378a07bcD59beB8234\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7743B07B17a218cda814Ac698B5770623902B3Dc\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfd18e536fD8DAa35c21DfF0ff0213702fb385bE2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5658E20e19d3faE07f80c1aF55e1a488e24E0A3c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x987e8a64EF3ed853dF0fd9aeB61cB2313f5F2E4b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x52f9fC7eF6C8108f565b1b7853b666F645B48C42\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2F1A18143b1Dc39c3Ae7182fD6F85dDc2e69356A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x360FC70e64aaf7F230133E42801ee7A08A1e9329\":{\"balance\":\"100000000000000000000000000000000\"},\"0x78C8027D4cf1d0b9F80c5C60e75C887a68d95689\":{\"balance\":\"100000000000000000000000000000000\"},\"0x92665B827d19722a9cdBF6b62a054728A01A8ad4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x24432107F1ca9c3E2CAcace13D7839e3BcdbeA24\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5f7E6c66Bf9Ef2493Ae244F315434A23aB02958a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x66D89c9c8f7520663ce878A1330Fc0CB92061d1a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC63190813E47CA081F529FA0b2E8C99C8639fB5F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x98cC860034b2E84873c1F848162715ccA3024700\":{\"balance\":\"100000000000000000000000000000000\"},\"0x025C5E44261A4c1Dd25409aDf2368572DD594f0f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0Be35e76c165082aCDEAb7Fc5cDb0BcDcc8dB167\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9c8F619ffcc2D7dae1a337a532c64023c80F2633\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA8e0CF2178f1398Dc7BB4c29A11b1Bbd0aeB6393\":{\"balance\":\"100000000000000000000000000000000\"},\"0x93e95d0913963a433CA6730F035D81E5a6ADbFfd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x98ddD07ebFca0ff85aDd3134A7f24aeFeAb60A0e\":{\"balance\":\"100000000000000000000000000000000\"},\"0xad966D350f18e8C561B99fC5336A9e8A395C1Bbf\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc153AD3b419e3622Fc16F57FA92399d85e119937\":{\"balance\":\"100000000000000000000000000000000\"},\"0x553956A741c97B624D2E9DF182d82607BaEE9BC9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5fdD8d87D18cAc06c4531F8f47bb55D21E7c0F02\":{\"balance\":\"100000000000000000000000000000000\"},\"0x929aBaD38AE562858A6754060d63BFCB2e3d51fE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE97A06B3952C8C515b21C5C260190b017b5b4fc5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x45CAF7332098B7DbefdB4435D646CAC0c12EdFca\":{\"balance\":\"100000000000000000000000000000000\"},\"0x967E811271578443b52469Bf3F76072136Cd6CA4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9e19BEd4d1C213a84eE0e5e6B1B1f9A001008188\":{\"balance\":\"100000000000000000000000000000000\"},\"0x517bd751e077e7B0cA04F89cFb85D2CCC3A14281\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8541d2cd95925F44f39c818a31564ACA6ebc71de\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAf4E214B0D636d22239e4Ba4C4d19b21F366D5e6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb795855048b34A4FEf4C3A589021Eb4953fD8B27\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2a7564119Ff4E3b4dDe4545490b9CF096B6179EE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF1124518eB0297e5194880ad6Ba3fa10EB6064d2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFad5bda11A42BAbA893CDF7E4E2D104828f6122C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x660e80f1020aae7281008eDc64831753a0043734\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf04B1B41Cc92e25a17dBC5C587979b32Cb14Ad0E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6Ec2dC3C714cB122791a9145c18e5e7E90eaD89D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3a2EAaD7b568B82117047c90C1FD78B3Bf6FD806\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF079cB18614A26Df1d2A929DB53339e101319329\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeEd9842433eed39cdBA35725a2e29E44BB053399\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB32f130c7ee18E1FE166b00ca11164Eae2051a8B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x06078a9ffeEA8CAEcD45631D67aA489A71B1f24c\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEF25Ddeef2483Ce4B2273d38D656aEcbD5a77485\":{\"balance\":\"100000000000000000000000000000000\"},\"0x340a5951bdC42712fc1eca90890539b5529B9E07\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc262cA9892D6275056474F07fDf692D30221c8d1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8FaC8c9b163E21b4294A5bEAb01C9817D8bfda76\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB8d4b22c829B630a9Ea506a581ff7b6e607F5260\":{\"balance\":\"100000000000000000000000000000000\"},\"0x03218129eB0F8eE840138Cdc1D350283B61d6eC3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa8Cb09d4385636C9a49fCc1663550Fa61C938a18\":{\"balance\":\"100000000000000000000000000000000\"},\"0x33383f6a7d83A6307b35EdfDfa816b4D57Ee726d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c431c4e05d3C914219370bF94375384E16B03ba\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4EaDc983623f7d686388539c3296D283BBb5B2f1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1fE570f5AeEcd3b2683984000d84b1E59868979d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2Ed589Afd0E744f96AE0b74A72A9004992f5BC71\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3D4923ceC22FCa7cc2545218d3EAEE17297D9068\":{\"balance\":\"100000000000000000000000000000000\"},\"0x46E3ED71f57fd0e99803E5FB78ccb0d06604935E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x734bEBe1d26894Ec495B445c85D61B07093027A9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7B3513df8BCdE1e1f10322d623426De7f7ECF18a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9a881bf67979Cd01A9f4d9143233b868760aF094\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE0973E6be85Eee8e37c6df823330AF91521C39eD\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5013A8349EB02fd299732e7C417A975B203d5b56\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2d34ec9EBf98362b94b1867037B9eE3A9903272b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB9a465A44DA2B8815Fe3f6b736d43307Bdd96c37\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd26bb343d60Ca0a817AF4b46E044d50c384A73a2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB4fa78B0321369a87c361799a1Cf4eaC30bCf04B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x84DC41C44b925604330dD02ADb62371824063D67\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9D5870e0b1D0e8194a99d0d7CCEbdb6A1eb7563d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5C3f6074C1B17d96b98884B3758D3553873262A4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBc065fB9Ea26A300FcAb623F7410792dF3505EAA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x688581E157b5Cc3309F409a676DFEB8dC22DA719\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD8490B905D579951BCa37926C3F4b60b3B60177C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC674808DF1025555E38bFA2F7AE7CbCF57A5d7D4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x63c552dab9f499b44ceA15169626e946A6141e02\":{\"balance\":\"100000000000000000000000000000000\"},\"0x86D814297a6B649B6e6DBD265c92Cb7A038E9F13\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F3B96880D666988C2c39b7eF10dB3a2c6965cB5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x52111f88E3fDD827fFBDcf8F6e077e58B424069e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9FDeC9F825C41a7692FF31c66770cF95d3143e2a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x600Cee7CCa9e47641934D10E3a6eEC10e4c276bc\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc86327ad45daF8c7646e149505D8f203C5606638\":{\"balance\":\"100000000000000000000000000000000\"},\"0x53CBBaa3b85aaf3cEB6bA880cD3206d57Ca00EA6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6F140e3Adb9468E721Aa940cCF5c0379b717b8f5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9512b0360dDf41Ae65993742f2171cC81597dc31\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8ef26f157fC9D1dDc15A78697E3B111BA8842811\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8Bf08e3FacB66301934c6EEe45Bf5e99Ab7F317A\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCf99327cE80Ad3987cb2AeAF8639F78B57f09802\":{\"balance\":\"100000000000000000000000000000000\"},\"0x716F5EcB51AA679eBdC99d16e99850C995655270\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4F071A959c8A44EdD4e12b7Eb6398FBF9E5edAd1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC2CDf7B09D3AC7853fAfCB9b854b7a760e25A303\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3A516F79Df69376E7F2b9aBc675e7EB246d547Ae\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf408BE6815AA7d7BA98008214432ECf229390170\":{\"balance\":\"100000000000000000000000000000000\"},\"0x89c35D911D3Ed110d37005999E6d8Be68b30Cd9F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x11D7cD7AbBdA4c1cEDcAe3dEf37458979FA8Dc0a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x60b4F46dfAb7BCD1b6bEA33A446bA392d6A9f321\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2f7016cd226417c2d6B2430dE40CD9e15F0D119d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x09ba1ea23E1c965D6d44Bc2AD7BE72CB7960459b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAE5825862ebcF9D1aA24926b72d8B5fe6B9F072d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1bf3dC06490f034383b3BF5181552b36E1BC4AA1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC8BfD3F4675066e0Ec4231791Ca4987dc9A5039f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFc3f5bFc9f334a7A0fC9d01C080506dF206F77f1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x30Cac314Da1b401340CF5A7d2227c3c8d188cBF1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeaF3c95D8Fc013C4a5ea6678233390625291c20E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf347D4AC6a967AbFaE283d21f5BB41375E15fF5C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x372372778Da16D37f7290a4BdFB9379375969e7e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x11F542ef4a3a27d2F76c0BA1A69f9C5d305067AC\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE04b86200cb31af05F3e741241c0a850f68978f9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeC19D4ed074090e2759B39f7F809B3A0a9832cFc\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF18BA00325e3d17DcBaED349C2c91e1D4aa15646\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC9a53b7369Eed499dc1E98a80013E22674193C5B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf935a0D068cE24de132F1509dB11E55223Fb1ABE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x490c4C39F1A0ab55084805274F2C128740aff9D5\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC3913a084B4DbACf7A1e55D81c202C829A820C4D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1FA8CEc228dca5A4243A4d940f3D1E95D3D65618\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA49E1B80C2Be870c8D74C996EFA58F03F559b948\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFB99b878eD4456C3a6eeC12135dD21613060EFfC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x44178c3D9858E86979DEbf4f122d4d1D910cb792\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3a88ce653a41402600718955aEB1DF71Af573e8D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1D11930Fa2635356257661877389bFc2Dd022284\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb874753324674c83E88889783a2880159C3FdbB0\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf06eEaaC63F37a93130dC700fF983FFB04503fe9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x20948E33258654d81df4bCAbDFed385e82a02522\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE39794CeEe24F58a186C145b43404608a678A4fd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6E8907243a546Da5b70752848cB194e319822D44\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd90344aC3437E8e2d255cd506b6DD1C54af2AEC3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe393502F1bb5baA6a46641F1B7a76433A6a0624B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb4b1C841E854B2817381C4BD990E5c49b3a785F3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7Cce9C769426d3315B07e72071A61d596B7be06E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6A3b2720FAc06E966816f9CbFDb0e111Ba372D17\":{\"balance\":\"100000000000000000000000000000000\"},\"0x830e5D225686E4600138FA5Ae3b362a0D8A4BAa1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0117fdd46130b2ba02cF837C1E8F3ABa2eF8A98d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x45510c134FcBa994965b1e7381Be8d91da10b239\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF66b44Cf1b0C1Fbf69ADf5FB4cb519aA9d30F173\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfd64B1A3917d326c5184fC91FF3A49DdD1255985\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC1b0efC5f03844D02D0DC8F81CBD1ED3493882c8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9D2B9e128a42DfCf5c8A54d5d6140EA5ba96E5eD\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8ae6f6829F89f9e6A64AE5d4d31DB2CAC8f0b0AA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0EE7d032D120E7bB28581FA345cFB03c38160a08\":{\"balance\":\"100000000000000000000000000000000\"},\"0xcf4c6D7DF747a0947a04b1033133Cf8C569e8270\":{\"balance\":\"100000000000000000000000000000000\"},\"0x570245D5d27a141C1D3a11C62C60F04BdE28aac1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8e8B612e2488e572d9af0c5c04A9E5E841fcb408\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7495a7476eF5B15D4b07459eE90783Ec6c0aEF74\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb7f4Cd5475311bE3F130cdf9a93A89e21f3Fe09C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x56f433FCe086875322CdCea2Be877494F9E0Ff87\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6DA209b31763b161fE5CFc2831b50AC3be256008\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd78Bf520b46D8b8eb0351F9B97487a770fd63089\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7B72457349678bceC2941457F9fD6d825649c517\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3d736583FbB72bCA80f863fc1caf043cb2981026\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA2F4148F94d0BAF33b1ADE5529F31c75ab50C3Ed\":{\"balance\":\"100000000000000000000000000000000\"},\"0x40A858f233b5408D0803003DdAB77F1549E2aeF0\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb509A9958D452D95F8c1E7aE386EFAeb4bD13f8d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x85c18317B8d1026d3F0CC3A950226deEBabBd897\":{\"balance\":\"100000000000000000000000000000000\"},\"0x378A820aEa05aD5fd16818192F04b556A21e2cC9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEe9e6630f800369a1eeF08E4b460A6d5Ea586266\":{\"balance\":\"100000000000000000000000000000000\"},\"0x54269ca47236c0b5C4ef413248cE797F80C02b39\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD8af3ed219A16cFf15b874A98E8C45C5b730DC10\":{\"balance\":\"100000000000000000000000000000000\"},\"0x27433aBBdE014a29fd7CF0A09c6DDC0e68E7f1b4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c9590ab84f9A4B61A15CB220126EC31995ac738\":{\"balance\":\"100000000000000000000000000000000\"},\"0x39F763B14996481909AB447983fa361338d049B7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa2b7BF6E168781e1df22311DC49B929Bc3E5852a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE8384583539075458C09eA4c906B64E33023ab80\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe9cfA3ce223FE4d9de0219f4f8c3B4EbB029C074\":{\"balance\":\"100000000000000000000000000000000\"},\"0x850b5A9B511B124137301293D48f1d62001EDCaa\":{\"balance\":\"100000000000000000000000000000000\"},\"0x58e174191E06786DEc77b5Cc2F4E2506197D3789\":{\"balance\":\"100000000000000000000000000000000\"},\"0x914A0D45C30ffBF7bD4bA852E3EcdfC83723D7dC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x22D10d7A637Fe4Cdaa0856293202E1D12F7BB43E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x08C01b9822d900CEf92e510C3f5F2dB63d102b4E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x13ACfb6025Be068d00009Cbb0eFDDF3Cd8dC2fc9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9F376358861E1ed815cF2db61ea047143Ad27F6B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1f4Ab9148Bfec95DF02025DF27cc270202A25663\":{\"balance\":\"100000000000000000000000000000000\"},\"0xED273259634F89DB660272e860E182666598516d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x010c95e7a7C3731406f4e225c75D4Cd313183713\":{\"balance\":\"100000000000000000000000000000000\"},\"0x43fd94DCa92C4F13BeCE17a82B915a28400A78A4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2F9C174C53c5e01340F76b5d9B78efC269A4Cf35\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb4941A74f2aE10a39258272d3edEbFceb8b3F449\":{\"balance\":\"100000000000000000000000000000000\"},\"0x885f84fE563B0828502191382c86642B92789DfB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCd83d0b7f35052EE2d7fFdB31B5c7A6D3e926F8C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x809c87913EA65FD5aA15cCE10069b9F9f5Dc3565\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0E78f9cAeD0A31De53CedAa5B99b7614F01ae893\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6133A9246bc1383b26BBE45Ff0E2520A3bF02751\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfc5bcceB87ba6C9f3Bd8711bf5C14fA7ef523fD7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdC80fE1FE917448178eF30411bcF8c638C197994\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe9648a4E4Dd0b7d15E8f0cf6529177A8eB8853B5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x06Cdb994802d95298bd2582f96C65e409036F1bA\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9f8104221E4406216522ED423ddecBB419441E65\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7BdE4995C371B3988388e51c9c65d69E22278dC7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x80458480c1dc7f2d20607fbE82f6c36acD012f71\":{\"balance\":\"100000000000000000000000000000000\"},\"0x42581d4393f6244D9859df14d7f7E8E96eA3542b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC3e5F5CE5776D86b85eA756133d57aAfDB809255\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaaA28CABAdd672316c73443629202B6006F1ca76\":{\"balance\":\"100000000000000000000000000000000\"},\"0x477f80c62dcF70CbAFf887AF387a5E510683bCD6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa875E6c541F092B585B9F68f5d11e6edae102F7A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x31f3170d541a339A9e0b304F519F1318c78654E0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3f4B0Cb789a7BcBd566829Fb764c155C078d8bB8\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe62C6a15347650650729ef03372f9D5bcdE86061\":{\"balance\":\"100000000000000000000000000000000\"},\"0x75597d2EeF2E7bDE52305EBA44E5A8EC196acc8B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x61Ec7D8BE9c2c3D3aD30Ba5cC5D676Df30CE07FC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8032c883d23214AECdE234Aa902faf9306dC63A2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x26Ad76C3eBfa66Bfe3F2b3b8c37f47684bdA63B7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb1511427E08d06Be611970Ae803C1087f8Fd6335\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0B67421525aBfFBe4a15C0E6478c653Aa56Acbc3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe5330DbE8ffd44a5cf0579466Ef8882869471d60\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4273BAe93Cf9940eF56A56ce2606910CCD35aB38\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc3c477CBE8563942538134C186703B9A5f7D9Baa\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa9cc61f5E6e992Ac8ebD67F0D3cE08d4724bbb65\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8c17934C279C258fEadCc7E27A448EdDD03cBA8a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2084ff44dA7c3142DFD6E79bD2DD0846975c45C7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4fbC4F91EF5a13F0Ac78a6815ec8C17B8db748Cd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1d8Cf4F474B33619594143284b2FF42866011e13\":{\"balance\":\"100000000000000000000000000000000\"},\"0x660E9a1470803bAffBB2BEAcf13D70EbaAE8d9B9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFa19aA516C1Df06A081b43dB5E2Bd4D33b05dF77\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaE2B394CA5F49C0eb453FAC6544374a7E44eAf7F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x18c38799A23D994713624abF25613d8F5b96478C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x430f16C7DC938E731aDAB7b32127aFD8300cff6e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7A552CC3911f630D4566B899A58f053D1BCa14E4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7B4E919548E7cF48532927c057b844c70d87366A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x48dE8c40AB749Ef4aF1Aa96676dc408bf03C0f2B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1993aE40035FF12e06dd5641799f9Eb8498A968B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x06Bc47Eb1e827b142C4edCe424f41C641B6068Af\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0DeAeEe016672931231037612212209cAD4E8A70\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDACb2Fb431eDC402B510Fd9b13b702deDBd8c536\":{\"balance\":\"100000000000000000000000000000000\"},\"0x86174EAFC32D25dF83bcab01F465b9EABec96AAd\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC47b8A6b8B3Cd97Db5880571e86de852EbAfa12e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2EfB1F3471B14822f1dA94C508f13C3ec71aF97c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x87b86BfFaB44E27b4A2239af7fd5b7a6B1Ed90D6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF28671c14F71c452676b03261eDAce11325e5798\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdD47107cA66158AA3ff1d7D494feba1cDaA8F93D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0cDa464e19BC0e59Be183d21fE1DF7cbC367DC8f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0b093dF5cDc9ba398dcb3bE4530eEDd65b27A2a9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe37A61B23D5056aD005c719f08ea98EEAA53d885\":{\"balance\":\"100000000000000000000000000000000\"},\"0x72b852bd99b7e473279f39F2C8dB41da574b1E32\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6B31Bf6Cfcd672951f0af5a024d1ab90D73625EA\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB483aF466bA7a5861D986b6CbE7A8452252Ac29B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x627895457CcDd90aC8D7C811E7e78a83CF68e4F2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x65293A45f865666cc0E6D0a59f52E315bA63b308\":{\"balance\":\"100000000000000000000000000000000\"},\"0x434d19541dB0731D22D6376b868FD8F072d1ec0F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2a795fe8fdD3E5d8044709b45D8aFb78632068Dd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4E714432916212d2940adEE42966A8093e858c3B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf483159D88130a81b796Dbf4eC65bEa5CF5b0823\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1F79994E78f8330F24B6183Ab263263F9F30c278\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC23b992c278a573253CDFcC6554EF19bCbD478D1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x53A6c629374C2592AB1604F266B5AeEC31BdEcd5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x750F2547b79f2d469300E3f7ca8e90ec3ea347f4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3ED58cDBFe8629ab4CF1a34839BA9B48FCCB047B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x65eB5235Be582fE1b261C45961fa41980f4D1165\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC24e3760A94FA13A3817E36Ab78c6ba5D699CF3B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd988bEf29A246D60D763EFE4b1e27bf117bD98c4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8CD72e845249F49e291d10F897EA543806836db6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x79cEf81410d0611d2beb94ed06c5c163ADE67E8E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA79e417Ff1a926556cC351c3f1Ea3c7B33e9CF87\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbE59261FE356f4f18A1eF26855754aCFBAd3cf24\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfCB49DDB9330aBc55Bf8184BC1127757e981782d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCcDF6ec7F36A01168a72a34C74F6Aab16e9b2CdD\":{\"balance\":\"100000000000000000000000000000000\"},\"0x69CC01204949a92a69886Cbe78b5546D58e21746\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAe033550Df7adEaE221a3f9f444Aa3810e34C6a6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc424d0A9b92EB4f08dA191760b69946A6CA10085\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa418466864e054c4C26d9ea948420Fb5bA0Fc990\":{\"balance\":\"100000000000000000000000000000000\"},\"0x88889f08C7187880cD6Bf06fAFD72C7974d72908\":{\"balance\":\"100000000000000000000000000000000\"},\"0x01E84542f70208255526374B008C0dc6F982d461\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCD86cFCf0e8021BfBD8cE452A6d9814B29383265\":{\"balance\":\"100000000000000000000000000000000\"},\"0x89715612091C41C3dfBd7E418f4Fb5DB3e732348\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F973720Df3d628Fe6C4F97a18e078e0C4A478C2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfa79b99230FecC72F57624f42F4783b07d7ab4A5\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB6882De9B6B0640BE1052CA8bEd5798505b893F3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x82114de9E819164204175b10d5e8ebD169327d3B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2f2b30C3CbA31a94b130b2D18729FF39CEd22810\":{\"balance\":\"100000000000000000000000000000000\"},\"0x86CADBeE39576dda2c2F52458Ad7cEA997b2298A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4c926459E11874C1416a7d1d008543f5FD1f475B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3679303Ffb359c89FcC1a580f643B7d649463E81\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0E85A905B870D836aE43c4978ce5b14E0001362a\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa27a134e55197c6EC7c46CB5EC0C44A0CB1aF548\":{\"balance\":\"100000000000000000000000000000000\"},\"0x46c9A3Ab004fb5cb710c579460e0A11C77AA0e74\":{\"balance\":\"100000000000000000000000000000000\"},\"0xecbC4011D149A86407ea76E71bB1850ee705a14f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x97f84e2965E3eefC2a784144294693872d3E1454\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE0F4AAA3Dd3249e5Dbef56C0bc2f63b5b12a0C1c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0A087301639af48274AF98348DD1C9bB7d6cf901\":{\"balance\":\"100000000000000000000000000000000\"},\"0x89eFa8cb1B959FA332f1F10CE94d2aEdd3B15958\":{\"balance\":\"100000000000000000000000000000000\"},\"0x42cFA37c308f6434C276f3bDF118dBa23123f45f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x561D7081BBBC1bA0045CeCEf503C19fb82ebe074\":{\"balance\":\"100000000000000000000000000000000\"},\"0x57269331a3Ca4cEb5b08a9c897e01FC2075Fd62e\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC2A42EccA74E0aCDFF66Ba23Dc2289c5395029f7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x64585AAA58CCF4c43a3a8973253D035F42F6f770\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe5ce5A19C862e154d404dda97E7c1e6d20Fe37a0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3c9Df888499c7a5ebaf456B589bC39f752337C09\":{\"balance\":\"100000000000000000000000000000000\"},\"0x668e67199E5005F9e22f830DA7d15C27036E6BdC\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd0607835EBC3C6329b3b8F6228662E57bdaDcda3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8ABB18B5bebEDBf94F0F942CE4042F33a6c23333\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf56B0A52C53E63333B97b2E370eD4523CA2B48a4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x137E076D1A3C2f2a4d026Bc7A3585cA6Cf82A645\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE6bcbd13f214e7222B97B407a1bd58fAd9972A7e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7741CCa7b222801DE2e02e5e1e8C3303337D14f0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9b697Bb3bA5B7AA6b0B6644c2D1DCf470cbF2931\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0CB02dFe351E030435Baf14359CcDfFE0E78Af9d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5A5e9C7f03918D4725A09d6498FFF3c7263642Fa\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA4eCF59018b19f5673F96501f5C3B874F9e3844B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x628c24E4412721cF8AE2b81De7e540709BA61519\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe6808bd1e6D2F15Eef15CDE4E8a88F2b150B06d2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD873Add826b261d8CC215D16e521D7E196EB3765\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd92e7E084b3887f6A3CEBE22e69922c2Bf804bd5\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDEDAFD5B8108736a1F271228FBa38492fcFd51Ee\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF4ADdadc96A5e5Cb98eF95eF9d995EA5cb3b3749\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2e195f59F1cd6d3eecf97351947abe66bF246198\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7408926b9bFF8EbDD17Cc0A174f3DE0336e6323F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x785c7935621cEEB727C7CAa046ca517Fb19AeA85\":{\"balance\":\"100000000000000000000000000000000\"},\"0x049348803A6D9593f0BA679d9f05193510ED9338\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb0167E91D9327A43b2DB28b1a675380C05eF139E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5441C24a509f7043b5F0BfF8515c10842739447A\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA57eA89602204d3C7CE92b8070165E9879316BFc\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F6c598b68F4917ec4F54f03aba489fbD65BE9Ae\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6BC0881F2bBae93f0b4C2E2cbE6c8A342fFc0e0f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xea39f792191b0780f88CAabdfd0f539803850944\":{\"balance\":\"100000000000000000000000000000000\"},\"0x49Eb8558508FeeED913C9DE7168F616AD5925B32\":{\"balance\":\"100000000000000000000000000000000\"},\"0xce06B2dC5B2Ea30dF5a7A2252979A26A2123153E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x070aC35cCa5989865fb6BE7083A5907f9D636eca\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfEF1dEe249829FBf4a7f78be610F8C73eB04855f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0cE93e3201c3AC455E3a934Cb049F90ED481697b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9008c39bfd01596357008a5e7f34E595b066c139\":{\"balance\":\"100000000000000000000000000000000\"},\"0x10C08B856615136c103F1b5E24e2b3136809DF94\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdeC49F825BF611B8271CD4F419969230De6614ed\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfCca3ADE76c5a04CF70BD0A3f4FfF979eaC141ca\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa3CD797Ab21f7CB670d37363Bbd4a74f2A2DcBBB\":{\"balance\":\"100000000000000000000000000000000\"},\"0xcB81156d606389E8fB1DB80A93209B9E2c575e72\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0Dc75b41f57Fe00E47CFba69Ba61Cf3c271863Cb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc2D9Dfa376d197C1c6FcE02Adb9ACeE8fd85803E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x40f0f512fB0A2F090246B01F6E9edd2B2558422e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x364C95F8F2C7A1656165e939533DB3278635Af76\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0e0e937329F464fbFae556AA277FE2F4AfD9a45E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x484994f1b8604F600c4F3ec1E65D5afa3576aCc4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xED8752b8a78111eA55fcbe48bB58adD6cab6A7BE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFaB935df5cbB0C4BF485C2Ccb557503fea07A949\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE10bE9DcDdb5588Ac83857B489AC8775292a196a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x597E8e4835816a621e0E635Cce6304cD9b6d1B45\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3366663D826C830E108361439606Ab82AF3b9dc8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1a93Aa4e4d00700A56AF1265CC080627cDE56522\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1133e938080622e323cA9522040FaFcdcb40B926\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeE079715EdcD6bC5ffaF8949DC1dDC468Ae1f82b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDa3a9FD4bf83df35e588b7Ae2fADB42e0dbDc70F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8Ff0377764aC3663E72314f0bF2ef96eaA97E282\":{\"balance\":\"100000000000000000000000000000000\"},\"0x34Bd64794e5411bA62854caec04f12aa7459F975\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD7b8580C16ffa01230Ce7335e0eEafEc9118354F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE19D5f3e3df4dFfe88a420CA7355177677C18066\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC828e09C3272Ed2AB2F2E179c4eeD331908e6384\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3C0C651F74DD2162Ded4AB611280dD3969B60a84\":{\"balance\":\"100000000000000000000000000000000\"},\"0x057b9a5bD82a3bBb9D51FD45C919eA585BebE67a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8aBDb10A1EA697E0471AC1D7fFF88c7aC92bd5d2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x04Bf364F32F5Aaf4144935dFbDAf39489e4C291E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x907482f779C352ee590DDD446E26d148251Ef417\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe75AaAA24Edb53b6e7faCDEda96C9CDd3AF93C53\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC0405f4a4A10a8D624bB94e193D5445Caca28e81\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbA42574bACB0487343C7D3C1765AC798C1002C76\":{\"balance\":\"100000000000000000000000000000000\"},\"0x254dE1b17aD22FE34C7c36fB12EB601BD39e0fd1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0472cEB9F340FAD1b15E2B06eCDD34b6f989DF95\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBb884F173EF7e55363adcf93Da3A8ec4DF1376B1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDa493AE071Ce294a8335e918A4720E757e7D1817\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0f2a7DeE298bE712dD35f5607d87CDCa6dc8Aa20\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe8E02a97A77A5D95e1275A31c21f6f9D8Bcd7910\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0A1A546b21a090EAD8785a88F50878e671900780\":{\"balance\":\"100000000000000000000000000000000\"},\"0x615FB4Df1C6EDC0Bd900cb3025fD8bB318c47E77\":{\"balance\":\"100000000000000000000000000000000\"},\"0x00D0DC642D2A5F053807A73F8ebCAaB2eE57DAF9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x22A4E556EA6683111C8D90966924B36B89530aAE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD02cDEc33Bdf2CFaA4b0Fa84b7BE2Ae3F37a100E\":{\"balance\":\"100000000000000000000000000000000\"},\"0x513AE86cC6f17457C67836c59D0DB9431bAE292A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3fFE89cFb94E2c640Ab665Aa43Ff7aF00B79e43c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3Cfc4040f0556D7B89CF54ce39702AE82E91272b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCa0a654bd79995c1268Ae6C3d431008df987ace9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x96c8945eb534F729a3326D13A02d2C6258595486\":{\"balance\":\"100000000000000000000000000000000\"},\"0x14E9190974B749cf22dD78abE932c1f354224DF9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3b8885163203732A78c291466F25EaDbDA8CE635\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1Ee4Cd4a47D1B656c5ba3A0ca9AB7D34042Bb759\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6eE778c7779A6eF0d22a710fb36838fFe76E6Bf1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7fcF4425B2510a63e30E0f47D387242702181641\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8156566F8730d3a379F1703408767e317cf679DE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F08153037ee56973029F711710D65956feC1Ea2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6502Ae96E8B2eD310bACD897F3BaE0d7BeD93dd1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAFd320d0BF1bd7F3a6a7B5B0930d7A619F8957f8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2F75c8485D3e885CA3e47B93D31196Ae022e2Cf5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6AC000b4EDbf4559D55A64979a192D05bF113E14\":{\"balance\":\"100000000000000000000000000000000\"},\"0x612A0B178a40B7D628463B928ae6Fc904ADaa882\":{\"balance\":\"100000000000000000000000000000000\"},\"0x31AbDd2B134c92200Efc731C2Ee7893CC8871a7F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFdfC0e8faEA7c3d0bdC5BF6ec6E1c681Ef4e8242\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5F4B557E487007f6F1145FCe9E4a42665DFE7F38\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA8d59C349bfC154a81e67ac174603a7458d5F157\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7dbBC365a8F739a18762815f34ED29a99e64F5f2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2ab3f465cA3D3112eb584b65B863Fa0D6e27aD82\":{\"balance\":\"100000000000000000000000000000000\"},\"0x623aEdC09abb2900624545E07e6469585510Ce8b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaF4f4cc95F9DFff1E2e049F32e3385dc42C5A9A4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb1Feb102FB9Eea85349E1FF4F6A02F2D8FCe862d\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb25c4729E1F2CA6E90a225bC9D3C228b64349e0d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8D3EE1A8137642aC7f2c6930c4239554F22097d6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x61ef0953395CB847BE80CE69E78f1Ed58ec0C806\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7aaD5942A4b741912432a24cE5202e3ebBb63F4F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5E95aE43d0024A851C0E07f6308feC3006091770\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBaF1DDC5CF4ee09CF9C3F70f62f22562D375d7d0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x83569f2ed7Ebd48A638f70c256d35bd1838b8A7c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0076a1D0088Ee73EF4E5f7D0783530DdFFf90B4A\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAc3E26b3DBb751101bdD6b16d09aA2AeD0265030\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAE5EC73AE62d3F3E1F9020149b89F8F0bF9D5E05\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6c4A8D8A2D6DA7857024500903C9FB90F6989A68\":{\"balance\":\"100000000000000000000000000000000\"},\"0x07958969CeB4f51E731f6be1a9F2e1D28B13435d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9B060289e677d87DaCb6933dC9d67624644A03e4\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2648c940Db02627F9ddCcEaFe3878C3b838B0362\":{\"balance\":\"100000000000000000000000000000000\"},\"0x241efa98F8110b1AC88a3B918668Ad60203EAF1B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x95B373eb12cE4840F86D591e2a25A7C5136c0F22\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDA6254963e5C0dB5eEd3FeF227021593d738E4c7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE7be70653095D17e7E0f0db5B36c25dfa7378fF7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6ea872a70F99E7dd0d802de0d74dEb32A42DF28f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0A219a7Ce6eA411F373d59431965a23Cd4c65a42\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc3C8C273926A39F612A2E93c8A9c57f46C2F60af\":{\"balance\":\"100000000000000000000000000000000\"},\"0x82047e445D6ff4Df1d780294bfeb0Bd64F2b29CE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x23F0371be5a57E1625997ED60050a80b1413AF2c\":{\"balance\":\"100000000000000000000000000000000\"},\"0xDCf80f6c601340056CFC11BA051C0FE69fEDc65d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3d261601C0433381870d16AaE2bFD0F890411bc1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4402ee5eA0AEd550810E61AC59C50B05F5BA9a52\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf1f8cbbB5bF20852d087737295d9487b2C8f9fB1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5b978C61d3Ebba76b519Bf8d0712D18e5F86D2c6\":{\"balance\":\"100000000000000000000000000000000\"},\"0xEe539A28275E9d63C2AD612D2048d6f10abEea82\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe5aB8fc1720cdc84980785C4Ac0bFdeFC263d86B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9f05d585284Baa3cecFf0e47446Fa9C1750fc4b7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC5A3C42E4779F2E6EFF32eb548D7c6fC1aa22393\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1feBf5d1824B9159F815b6073E69F9877b494e46\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC5162D0F7C49cab9e4329fC081BC96cD3F4151c1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x28Ec4b9Bb554ebAc2602954e1FdCCa4a9d67C6D9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x46CcD72c8e4E1fcE3398eDB9a25b66756B72E889\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb142609F372d57c2Dc5E0fEdE218dFf4Be6b0877\":{\"balance\":\"100000000000000000000000000000000\"},\"0x830F9C51662F52Ab1deF7067779FCDCdc491335a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x88F8c0dCdEB68F435BEbFbDAf984A1031ce4a355\":{\"balance\":\"100000000000000000000000000000000\"},\"0x03237296a99bEd8BD3B860A6e5362b2779b2D00D\":{\"balance\":\"100000000000000000000000000000000\"},\"0x46e1ED298C22680b7e1821125D5805DdDC1A5972\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6B05aa0e8e72811F7778eB44AF7aD02D913771ED\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8e8473474b75E36891D506C4dF928036A68C8666\":{\"balance\":\"100000000000000000000000000000000\"},\"0x11E28eBAF5F0254ef84712073DAe0137D229560B\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfbA308d7c61385E0f3eE0fd38e310dAC48a18B65\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF4E021377420Afe90c1A7D2b8968904946633a64\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA27d897e9947475140d6441F6B281Db9E1fBF278\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfab12A48f0CA675530BE5Ba7f6A67Cf6F91D60BE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2a6597c7bD868e68cC0EdF321735c58851785A40\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB3F9C7FdA446a6D974707eBA889C72CFe795f167\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3b2d2C549014C975405BF2F37d016eDfAB853257\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaF246C52bF09CC1652337ccd9e3a9f0F0e6D7c8a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8EDd91ac504f578eB76eC50b816d7A81F2a2b996\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe9c47f38E9DAFbfFedDAe8393bF225b3f02164D2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xAE9088735Fd4767b08fb2a426FfBf885b45e7a6f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE4aB204227c3442FF2e5489f5c22fdF60cC7E20F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb64315F63C434f3954fdeF0BD834c9E61aCE4e11\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeec572e47016f76530f0AE6432A8c2ffd12AECA3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x60714c5d1d75A0C6D2c8C75ABb6d2a2cB76c4F46\":{\"balance\":\"100000000000000000000000000000000\"},\"0x00c2B1388A83F05Dd6ce89e4D277E541468bF4E3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x01121CEa027c43D90B2Cf7DBDB7E8E26e717e753\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB89Ac1264a304C0Bf6C9C2422973000Cd45DE21A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x85784dB4adeDc910269B22081991393912c85692\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1767a912D4622558E120c98582AfDFcaB0D583a2\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1BB6e40563aF3fB2Bc71128815bC97cD09613983\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE2aD2ffd025FbEAD5dAd5A429a51B7a2061baf6c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x64854DF1016b44AcE87a4E24F6487Ca7766740c3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8AF3113986a2D82BC86d2f98EB21858A5f83Ce82\":{\"balance\":\"100000000000000000000000000000000\"},\"0x319BbB7d0dC546E800323606371A46a6366d3787\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdFA2F0Fac6F45b021e54360ef59745e800F3d3a6\":{\"balance\":\"100000000000000000000000000000000\"},\"0x33a2b0d026A220765a8a240Cf6EC786ACaB480F7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3cB27E47F199fE662Bee9a8FD1E12C8D5793eF1C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4D32d341A43da2B22f706F12245592869EE5a801\":{\"balance\":\"100000000000000000000000000000000\"},\"0xaC93382128b110264f5fE8cae52694E22c635Bd0\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6cAf4b1c487Ded67EDc1415b9E3A49Ed15e71436\":{\"balance\":\"100000000000000000000000000000000\"},\"0x633c4E4796Cc8832A1a94B382AB41F6CB4F97ed3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x207be6207696f99dE529ce10282713D79Ca1DA52\":{\"balance\":\"100000000000000000000000000000000\"},\"0x492dC60aC4d2B5AcA25905e156D6C9700E8713E5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2bC1eac485fb46462990cAb84E8E4Ea6B0983C5c\":{\"balance\":\"100000000000000000000000000000000\"},\"0x24b925b98397a7475c841D0B59CE1D74630b944f\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa4080272331813407C706D87a8C140e564808e1F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB4BeFd85837BB472D4ba318DE567841CbDf45A48\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa362C304cDf8547021BE4C126ac2244938986cf9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe2Bc6342263ebd3872BfA56c9614A12aa3f0a7A0\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBECF7d6FF3071bC53a1750541376211af2B425c8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x16e7802149775754Bf737280f657576eE044DDfC\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7463A676E3320Fe8DdFe53Ffb45e1e76741b14fA\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF70266a4A70609E886572A9B75cEFDf05Ee7622F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4783C9ff91875c5958f99d85120C3d7dB7E45494\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9A894eFc3B46e28fE6f62B38C75324cE0E81FCCb\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF62e26Ee7940322a68591F1B8F999B3De84C2490\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB3FcfA6387B43e402082e17fc8A8652aA27ca143\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0e1b990bA0D3910514640fdd4Bd0f902E2092c5F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x03ab12fC8ceE66c4E2baAA6AFF85673da037b156\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa69a01Cc98229390030C0A239b17464b9aC92548\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdddd49FaCa5FA2737eAcd409074670D746c098EE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x34F3Add7DaAEb696C578C28f3D330782Da7d1AFe\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6340Dad3fed0186c6D959c4974E5954939057546\":{\"balance\":\"100000000000000000000000000000000\"},\"0x531Ed6b1E385d199F92f96c43Ce4E443c7cd8A56\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5BB7a165170Bb207F00D97f595BB5C5B539bfbbD\":{\"balance\":\"100000000000000000000000000000000\"},\"0x71405914f8eBC453014Bb533966f0F0C0ff1AAD4\":{\"balance\":\"100000000000000000000000000000000\"},\"0xba13c9f1A1c973e6Ab6eBE0FcFc687dE06be88f8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x290a761e12Db29FF168f76598311d60348bE4c73\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7fcd8448C87B3849AC37ccD1f7ACBE2b8F97c273\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2cf69da1c505041Ea5F50050504a67608Cea3E57\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6229a6F9bEd1b0683BAEB4DfD8848CF1Cf0Ce44e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x753f14AE7220021Bde75A084b76F420294509245\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8C894824fC26D042867B1A258cDE001dF819a47C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3906F7ba8C19d95aDb22294a775e4b994390Aeac\":{\"balance\":\"100000000000000000000000000000000\"},\"0x595Ee42544DA97C531DeBddd6ecD15247B178e82\":{\"balance\":\"100000000000000000000000000000000\"},\"0x14D6e32d3a4AA970d2379f4C3396415727397463\":{\"balance\":\"100000000000000000000000000000000\"},\"0x901f257e6FbcCDd111d505FC759D3bD4084F9E53\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6639eB5A73FBdfd5c385c2cedcB908e330a1900B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7C9A0FCCaa3e0730181D6D20Af4af2fe597ac3B3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x45c32aADf48486e8b90C2C1b94C27a937305E56C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfCDA509Bb5977e04cC68EfeA5d8e0deA5a29adAE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xFFB1685Dc6e85981996d5D4e6Dae08DA3fC08CFF\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa7562C241680Ab7c01f783F2B13DA4F18880c72b\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2c424Bb892C724539F4eecceDcAC075d314529BF\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe009D9f8EAD34b61c7E18949944524F563b414d5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x6B43eC5633488D44f655B676F4FAb711770BF082\":{\"balance\":\"100000000000000000000000000000000\"},\"0x348455DE3028e266A560Fd4e14190563aaCf7c0B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x731EAFe9F3f1c72fDA5f93aEDa51a2d9A3eD0b6E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF10a8CF9bDCE3bFfC4C2BEea6Ca48767098C2F8d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x07fE8D42864f3695032B5Ef7eD95F468Df5cfC00\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1aB044dF729A177D81b1b8DCefD4fEC92DF99f14\":{\"balance\":\"100000000000000000000000000000000\"},\"0x835f485eaC0Aa97940bbFF5c746c718d451109c5\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE4664AA2c384E103Ed3AA30902Fd1F53Ab79F21e\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdEb6A8eeC36d9301efF3fc06f5143ef507Aaa4E3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5FaeDD84418Df7Ba461F5e3edB4234FCAEab062A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8F857E8b38AD8682D134009E1DAF3f1816905EE3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x96B14eC8e473be8987497cE686f2aF085e8718d3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4cAE514Ffab8ca5992Fef92dF3A6Ed470bAB4e8f\":{\"balance\":\"100000000000000000000000000000000\"},\"0x88aC10c092Fd728b17f4f63f844c8059096087b9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x30BeC77539e68Dd2432c66F55566bC54d90Ef2a8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x426aA98d552E1afcC3f78c2b0D53e82E1d557041\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF3350833F4029A2500d7d022A39174Bb05c75220\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4890C9AfAED68B78af0c5a4156c15E8428f0e926\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4501D21b533f236f9048A24C5943B693d728FBc1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9BAe5599f30E5bb0f14B49cd19F422433a3f1476\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF165e51D8DA2431B0E22E53d21B6a4aB6c1AbAA9\":{\"balance\":\"100000000000000000000000000000000\"},\"0x68b172ea70D03Caa42F16EB12387494895D56572\":{\"balance\":\"100000000000000000000000000000000\"},\"0x2d2AE65B7882Ca29815e3bC36517db08ce7ffccd\":{\"balance\":\"100000000000000000000000000000000\"},\"0x21Bd0167C66f1292780bB46791F3b2A61533fEad\":{\"balance\":\"100000000000000000000000000000000\"},\"0x424c41081727c0d0b3f5154cA93CbFbB93b7E24e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x64A834073be7E7b3bE00Ad267a46fcb7805BAA15\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc9b72Ad2084d56f97A314462eB4E46BcF08E929e\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5BCAADEFa9f4218EdB73AcE31035835759ea4d41\":{\"balance\":\"100000000000000000000000000000000\"},\"0x35729dd7c295818caEdBC119C85529c91c788e6C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4da27ED18c78D83c9d1908898C13cA36A9Cd80F1\":{\"balance\":\"100000000000000000000000000000000\"},\"0xdb6E611b56dcD7E4164366dee4D8C20bef6ca38d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9eF7e51dBe375181A8a966837de4D3980239c91C\":{\"balance\":\"100000000000000000000000000000000\"},\"0xA38C9b5104Ec030561595F90024051eaaa5b2f2b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf31a106E793E10DCa6A64C2145A9921017AAAB2A\":{\"balance\":\"100000000000000000000000000000000\"},\"0x1cBEe73952d62ddBFED557548b024c1dE641221B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x081eA257a31b413C9aaF1De9dFE6A3eA902cEff7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF6c3BD74C137566a1220940e77962640eB05B602\":{\"balance\":\"100000000000000000000000000000000\"},\"0x41469a3451cF514858eeFA40Ea045E841ADFA563\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc9C2A6Fd96966F2e2ec4B5E6185164511b79296d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3fFf997a1cE27C241CdC83e5F4bD1D525B061987\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0Bb5c025631FDdE4D6D6b8BA485Ae3a3D4d3A3d2\":{\"balance\":\"100000000000000000000000000000000\"},\"0xC2335e6bF4f2dc4C8986dDB5dDCAF096b665F029\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD2F9cc847b858CCE8DfF299e73dBb2c00388D91A\":{\"balance\":\"100000000000000000000000000000000\"},\"0xeb98E5682c5fdbB151827b38218096469ED4d4e5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x094314E2d69197701E3b65692B9527bCCD3813C8\":{\"balance\":\"100000000000000000000000000000000\"},\"0xbE06ff8673a6fdA5750fFfA082598a4F1F23Fd17\":{\"balance\":\"100000000000000000000000000000000\"},\"0x724F716C7ef32033ea017B9b6aF4E287db523686\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8aDaD2A23807838A709eBb71A0D0F7B57f97D7c3\":{\"balance\":\"100000000000000000000000000000000\"},\"0xB0A119492E086E7290a3ECff6f5546659B2C9C40\":{\"balance\":\"100000000000000000000000000000000\"},\"0x481eE22BAA3FB86643B8148e5DC293ba38A16D39\":{\"balance\":\"100000000000000000000000000000000\"},\"0x9fc34899387eddCe47214Cf2CEfB61f95c551a00\":{\"balance\":\"100000000000000000000000000000000\"},\"0x03958e1129Ea46AE9C2d799E809874465cB77bc3\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7Ccf738a4A051e9FA10c85E6fceA2e5a11a6Ad88\":{\"balance\":\"100000000000000000000000000000000\"},\"0x083C768Ac444151A12Eb92683026deFAeBF0916B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x39436D09A8ba455eF4b99388a79071960403855c\":{\"balance\":\"100000000000000000000000000000000\"},\"0xF80E3ADac67a8ad78c4176f9DE9F23ee268a0B8d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x7F1d27a16D434366f3b0797e5aFd286F21D1643b\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBCb058F1a542473208f47Cd59D4320ec797a6670\":{\"balance\":\"100000000000000000000000000000000\"},\"0xe51605047a50fc70143d98CB0b090Bb1B157B6ae\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE5c53fB8544778bb17bA4522723166CEe54AF872\":{\"balance\":\"100000000000000000000000000000000\"},\"0xCdE1959E46d18efeb0afa7BFD7C8835727683780\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4bbc6Cecd418BEaed98DA3736FE6612188131ed7\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0Cc292621Af3f604eB8e5Ce504E302536713D23d\":{\"balance\":\"100000000000000000000000000000000\"},\"0x48fA2fda14f2C7cdEBd97fcF2D32D72462d9De83\":{\"balance\":\"100000000000000000000000000000000\"},\"0x118ABb7d96b2a5e9c11c0EEfEb518152810588f5\":{\"balance\":\"100000000000000000000000000000000\"},\"0x718D43607325D146d966890456e09f6657c167Ef\":{\"balance\":\"100000000000000000000000000000000\"},\"0x896903F9B1F98FcDaECbF6632eBc44455a24385C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8729D32E5e896890E0Fbf5E6F168aFCF01E487f0\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa08F4E24fb72fb095048906Bd664441d1644042F\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBA310A4A43cBEda67cf55e0f91C4449739c50442\":{\"balance\":\"100000000000000000000000000000000\"},\"0x08B522c1BBC0e3e5094eB853aF8432a958Eda970\":{\"balance\":\"100000000000000000000000000000000\"},\"0x90E87E067435eb3884D5ecA757Afe7DF7F4cb609\":{\"balance\":\"100000000000000000000000000000000\"},\"0xa6a8d5B1611595188B931d1Cd8688eA4150c08f9\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc3FeeB734Cee7267326D807C777Ff750E882BdED\":{\"balance\":\"100000000000000000000000000000000\"},\"0x40D3c73818a699a9Cc726Eb8EFBD3d023F073514\":{\"balance\":\"100000000000000000000000000000000\"},\"0x18ebaEEaf41cE1C71Adc3A3E3f0bDBA5E494a3E1\":{\"balance\":\"100000000000000000000000000000000\"},\"0x72f52078CA687262AaF2b4ae52bfF69Bc8c391fE\":{\"balance\":\"100000000000000000000000000000000\"},\"0xb3B2EcAf38C51E6Fa6C7264E2073272ED673b0AE\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8C58EE9218bCC66f392505aa2a37d625aFFcA39E\":{\"balance\":\"100000000000000000000000000000000\"},\"0xE5C97f51B2cc1e91cf9Df5Db795b7d81E408aA7C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x06Ed033d777ecC6e9Dc0ca2082cb08a6Ed1e9295\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4bD0a47162D36DF4b2E9D3341205EDcd8C02cf56\":{\"balance\":\"100000000000000000000000000000000\"},\"0xf83C55F2fBF7DAd3D0d3c702d15173A4a8a77FF0\":{\"balance\":\"100000000000000000000000000000000\"},\"0xBFf7E233CDF4906903c62cdE36936b4BBc3c790a\":{\"balance\":\"100000000000000000000000000000000\"},\"0x8D7FBd58cD17De6bAa9B65caB2aBd3B749bf2372\":{\"balance\":\"100000000000000000000000000000000\"},\"0x260E7Ca8b6eb5CF033cC7522151085D554712d6B\":{\"balance\":\"100000000000000000000000000000000\"},\"0x864F6d1379D39632c89CD51cA25223cBCc6A7C3F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x86f3765d32E5110b5a49585be724c571A07a2767\":{\"balance\":\"100000000000000000000000000000000\"},\"0x17A253AaF300d4a20970675Df3CcABe938716746\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc52d2e106635B9E1B5C05d641532D8B22587a30C\":{\"balance\":\"100000000000000000000000000000000\"},\"0x07E17CDbb0Ae57E89583076b467c0de20934e1f8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x784892ACD0d6B858Ff405372c25E9629fc304E1F\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0460866E91F11CF5c906d6b2812E7d2c626618fA\":{\"balance\":\"100000000000000000000000000000000\"},\"0xd5F793372e9dfEb7CecEF1c35ad9595fbFa12e88\":{\"balance\":\"100000000000000000000000000000000\"},\"0x0B63d67989Fa94e702Bd976fc33d83308b7ca1b7\":{\"balance\":\"100000000000000000000000000000000\"},\"0xc655cd1e5aA4B2c64297250A7D3be1389F1BF075\":{\"balance\":\"100000000000000000000000000000000\"},\"0x5763245ebf105C3E85c1872Fa7A3F737B03C8AAB\":{\"balance\":\"100000000000000000000000000000000\"},\"0x89111Cde256e71FAF3E0B8849642041ae6dD17de\":{\"balance\":\"100000000000000000000000000000000\"},\"0x4234B2AFA2788755793F022D1f74559D20Ae5143\":{\"balance\":\"100000000000000000000000000000000\"},\"0x82f319D1B0eAa2F245E91596fB37D6EC51e65914\":{\"balance\":\"100000000000000000000000000000000\"},\"0xD4ccD6DfB1f844FACa190282b90DF126bF6d6AD8\":{\"balance\":\"100000000000000000000000000000000\"},\"0x89E11E5CCbd2C3D93abbF335c3ce5f7F76e7eF15\":{\"balance\":\"100000000000000000000000000000000\"},\"0x67734C2A9805E0bEB8F2144bac61B6a8A1dC7443\":{\"balance\":\"100000000000000000000000000000000\"},\"0x3EC71a4c026a315D7D2415e901A1Cc923AEE1474\":{\"balance\":\"100000000000000000000000000000000\"},\"0xfffEc6C83c8BF5c3F4AE0cCF8c45CE20E4560BD7\":{\"balance\":\"100000000000000000000000000000000\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}", - "message": "staging" -} \ No newline at end of file diff --git a/avalanchego/genesis/genesis_test.go b/avalanchego/genesis/genesis_test.go index d11e2436..7ab119c9 100644 --- a/avalanchego/genesis/genesis_test.go +++ b/avalanchego/genesis/genesis_test.go @@ -1,20 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis import ( "encoding/base64" + "encoding/hex" "encoding/json" "fmt" + "os" "path/filepath" "testing" "time" - _ "embed" - "github.com/stretchr/testify/require" + _ "embed" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/hashing" @@ -36,22 +38,23 @@ var ( func TestValidateConfig(t *testing.T) { tests := map[string]struct { - networkID uint32 - config *Config - err string + networkID uint32 + config *Config + expectedErr error }{ "mainnet": { - networkID: 1, - config: &MainnetConfig, + networkID: 1, + config: &MainnetConfig, + expectedErr: nil, }, "local": { networkID: 162, config: &LocalFlareConfig, }, "mainnet (networkID mismatch)": { - networkID: 2, - config: &MainnetConfig, - err: "networkID 2 specified but genesis config contains networkID 1", + networkID: 2, + config: &MainnetConfig, + expectedErr: errConflictingNetworkIDs, }, "invalid start time": { networkID: 162, @@ -60,7 +63,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.StartTime = 999999999999999 return &thisConfig }(), - err: "start time cannot be in the future", + expectedErr: errFutureStartTime, }, "no initial supply": { networkID: 162, @@ -69,7 +72,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.Allocations = []Allocation{} return &thisConfig }(), - err: errNoSupply.Error(), + expectedErr: errNoSupply, }, "no initial stakers": { networkID: 162, @@ -78,7 +81,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakers = []Staker{} return &thisConfig }(), - err: errNoStakers.Error(), + expectedErr: errNoStakers, }, "invalid initial stake duration": { networkID: 162, @@ -87,7 +90,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakeDuration = 0 return &thisConfig }(), - err: errNoStakeDuration.Error(), + expectedErr: errNoStakeDuration, }, "too large initial stake duration": { networkID: 12345, @@ -96,7 +99,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakeDuration = uint64(genesisStakingCfg.MaxStakeDuration+time.Second) / uint64(time.Second) return &thisConfig }(), - err: errStakeDurationTooHigh.Error(), + expectedErr: errStakeDurationTooHigh, }, "invalid stake offset": { networkID: 14, @@ -105,7 +108,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakeDurationOffset = 100000000 return &thisConfig }(), - err: "initial stake duration is 31536000 but need at least 1900000000 with offset of 100000000", + expectedErr: errInitialStakeDurationTooLow, }, "empty initial staked funds": { networkID: 162, @@ -114,7 +117,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakedFunds = []ids.ShortID(nil) return &thisConfig }(), - err: errNoInitiallyStakedFunds.Error(), + expectedErr: errNoInitiallyStakedFunds, }, "duplicate initial staked funds": { networkID: 162, @@ -123,7 +126,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakedFunds = append(thisConfig.InitialStakedFunds, thisConfig.InitialStakedFunds[0]) return &thisConfig }(), - err: "duplicated in initial staked funds", + expectedErr: errDuplicateInitiallyStakedAddress, }, "empty C-Chain genesis": { networkID: 162, @@ -132,7 +135,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.CChainGenesis = "" return &thisConfig }(), - err: errNoCChainGenesis.Error(), + expectedErr: errNoCChainGenesis, }, "empty message": { networkID: 162, @@ -141,20 +144,14 @@ func TestValidateConfig(t *testing.T) { thisConfig.Message = "" return &thisConfig }(), + expectedErr: nil, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - require := require.New(t) - err := validateConfig(test.networkID, test.config, genesisStakingCfg) - if len(test.err) > 0 { - require.Error(err) - require.Contains(err.Error(), test.err) - return - } - require.NoError(err) + require.ErrorIs(t, err, test.expectedErr) }) } } @@ -164,61 +161,62 @@ func TestGenesisFromFile(t *testing.T) { networkID uint32 customConfig []byte missingFilepath string - err string - expected string + expectedErr error + expectedHash string }{ "flare": { networkID: constants.FlareID, customConfig: customGenesisConfigJSON, - err: "cannot override genesis config for standard network flare (14)", + expectedErr: errOverridesStandardNetworkConfig, }, "songbird": { networkID: constants.SongbirdID, customConfig: customGenesisConfigJSON, - err: "cannot override genesis config for standard network songbird (5)", + expectedErr: errOverridesStandardNetworkConfig, }, "songbird (with custom specified)": { networkID: constants.SongbirdID, customConfig: []byte(localGenesisConfigJSON), // won't load - err: "cannot override genesis config for standard network songbird (5)", + expectedErr: errOverridesStandardNetworkConfig, }, "local": { networkID: constants.LocalID, customConfig: customGenesisConfigJSON, - err: "cannot override genesis config for standard network local (12345)", + expectedErr: errOverridesStandardNetworkConfig, }, "local (with custom specified)": { networkID: constants.LocalID, customConfig: customGenesisConfigJSON, - err: "cannot override genesis config for standard network local (12345)", + expectedErr: errOverridesStandardNetworkConfig, }, "custom": { networkID: 9999, customConfig: customGenesisConfigJSON, - expected: "a1d1838586db85fe94ab1143560c3356df9ba2445794b796bba050be89f4fcb4", + expectedErr: nil, + expectedHash: "a1d1838586db85fe94ab1143560c3356df9ba2445794b796bba050be89f4fcb4", }, "custom (networkID mismatch)": { networkID: 9999, customConfig: []byte(localGenesisConfigJSON), - err: "networkID 9999 specified but genesis config contains networkID 12345", + expectedErr: errConflictingNetworkIDs, }, "custom (invalid format)": { networkID: 9999, customConfig: invalidGenesisConfigJSON, - err: "unable to load provided genesis config", + expectedErr: errInvalidGenesisJSON, }, "custom (missing filepath)": { networkID: 9999, missingFilepath: "missing.json", - err: "unable to load provided genesis config", + expectedErr: os.ErrNotExist, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - // test loading of genesis from file - require := require.New(t) + + // test loading of genesis from file var customFile string if len(test.customConfig) > 0 { customFile = filepath.Join(t.TempDir(), "config.json") @@ -230,18 +228,14 @@ func TestGenesisFromFile(t *testing.T) { } genesisBytes, _, err := FromFile(test.networkID, customFile, genesisStakingCfg) - if len(test.err) > 0 { - require.Error(err) - require.Contains(err.Error(), test.err) - return - } - require.NoError(err) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr == nil { + genesisHash := hex.EncodeToString(hashing.ComputeHash256(genesisBytes)) + require.Equal(test.expectedHash, genesisHash, "genesis hash mismatch") - genesisHash := fmt.Sprintf("%x", hashing.ComputeHash256(genesisBytes)) - require.Equal(test.expected, genesisHash, "genesis hash mismatch") - - _, err = genesis.Parse(genesisBytes) - require.NoError(err) + _, err = genesis.Parse(genesisBytes) + require.NoError(err) + } }) } } @@ -250,52 +244,53 @@ func TestGenesisFromFlag(t *testing.T) { tests := map[string]struct { networkID uint32 customConfig []byte - err string - expected string + expectedErr error + expectedHash string }{ "flare": { - networkID: constants.FlareID, - err: "cannot override genesis config for standard network flare (14)", + networkID: constants.FlareID, + expectedErr: errOverridesStandardNetworkConfig, }, "songbird": { - networkID: constants.SongbirdID, - err: "cannot override genesis config for standard network songbird (5)", + networkID: constants.SongbirdID, + expectedErr: errOverridesStandardNetworkConfig, }, "local": { - networkID: constants.LocalID, - err: "cannot override genesis config for standard network local (12345)", + networkID: constants.LocalID, + expectedErr: errOverridesStandardNetworkConfig, }, "local (with custom specified)": { networkID: constants.LocalID, customConfig: customGenesisConfigJSON, - err: "cannot override genesis config for standard network local (12345)", + expectedErr: errOverridesStandardNetworkConfig, }, "custom": { networkID: 9999, customConfig: customGenesisConfigJSON, - expected: "a1d1838586db85fe94ab1143560c3356df9ba2445794b796bba050be89f4fcb4", + expectedErr: nil, + expectedHash: "a1d1838586db85fe94ab1143560c3356df9ba2445794b796bba050be89f4fcb4", }, "custom (networkID mismatch)": { networkID: 9999, customConfig: []byte(localGenesisConfigJSON), - err: "networkID 9999 specified but genesis config contains networkID 12345", + expectedErr: errConflictingNetworkIDs, }, "custom (invalid format)": { networkID: 9999, customConfig: invalidGenesisConfigJSON, - err: "unable to load genesis content from flag", + expectedErr: errInvalidGenesisJSON, }, "custom (missing content)": { - networkID: 9999, - err: "unable to load genesis content from flag", + networkID: 9999, + expectedErr: errInvalidGenesisJSON, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - // test loading of genesis content from flag/env-var - require := require.New(t) + + // test loading of genesis content from flag/env-var var genBytes []byte if len(test.customConfig) == 0 { // try loading a default config @@ -319,18 +314,14 @@ func TestGenesisFromFlag(t *testing.T) { content := base64.StdEncoding.EncodeToString(genBytes) genesisBytes, _, err := FromFlag(test.networkID, content, genesisStakingCfg) - if len(test.err) > 0 { - require.Error(err) - require.Contains(err.Error(), test.err) - return - } - require.NoError(err) - - genesisHash := fmt.Sprintf("%x", hashing.ComputeHash256(genesisBytes)) - require.Equal(test.expected, genesisHash, "genesis hash mismatch") + require.ErrorIs(err, test.expectedErr) + if test.expectedErr == nil { + genesisHash := hex.EncodeToString(hashing.ComputeHash256(genesisBytes)) + require.Equal(test.expectedHash, genesisHash, "genesis hash mismatch") - _, err = genesis.Parse(genesisBytes) - require.NoError(err) + _, err = genesis.Parse(genesisBytes) + require.NoError(err) + } }) } } @@ -348,10 +339,6 @@ func TestGenesis(t *testing.T) { networkID: constants.SongbirdID, expectedID: "2ACyRqRc8H5VT7DDGn4qadKfct4iTPe9buQKhAjiDyotSVkeoi", }, - { - networkID: constants.LocalID, - expectedID: "pA6uxpovoxuKFwNxGndoX9YTYDUWCFnqwDodVWvS43UWT6Zde", - }, } for _, test := range tests { t.Run(constants.NetworkIDToNetworkName[test.networkID], func(t *testing.T) { @@ -377,15 +364,28 @@ func TestVMGenesis(t *testing.T) { vmTest []vmTest }{ { - networkID: constants.MainnetID, + networkID: constants.FlareID, vmTest: []vmTest{ { vmID: constants.AVMID, - expectedID: "2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM", + expectedID: "fK5e6T3EniMqagBkxXjAug9EbhFDZbEzPPr4f22uwMoP5i2cJ", }, { vmID: constants.EVMID, - expectedID: "2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5", + expectedID: "umkbhSrjVw5nUvy1eo25AdrjRkPBdtzAMewuxA2rqEx4YMo4c", + }, + }, + }, + { + networkID: constants.CostwoID, + vmTest: []vmTest{ + { + vmID: constants.AVMID, + expectedID: "FJuSwZuP85eyBpuBrKECnpPedGyXoDy2hP9q4JD8qBTZGxYbJ", + }, + { + vmID: constants.EVMID, + expectedID: "vE8M98mEQH6wk56sStD1ML8HApTgSqfJZLk9gQ3Fsd4i6m3Bi", }, }, }, @@ -451,8 +451,8 @@ func TestAVAXAssetID(t *testing.T) { expectedID string }{ { - networkID: constants.MainnetID, - expectedID: "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + networkID: constants.FlareID, + expectedID: "2MxKSeEWXViLdYyDhW1SQ46AECZEbE2bnVRZptv42JrxqyUX5k", }, { networkID: constants.SongbirdID, diff --git a/avalanchego/genesis/params.go b/avalanchego/genesis/params.go index 7336c0b3..3ef65b0a 100644 --- a/avalanchego/genesis/params.go +++ b/avalanchego/genesis/params.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -69,8 +69,6 @@ func GetTxFeeConfig(networkID uint32) TxFeeConfig { return FlareParams.TxFeeConfig case constants.CostwoID: return CostwoParams.TxFeeConfig - case constants.StagingID: - return StagingParams.TxFeeConfig case constants.LocalFlareID: return LocalFlareParams.TxFeeConfig case constants.SongbirdID: @@ -92,8 +90,6 @@ func GetStakingConfig(networkID uint32) StakingConfig { return FlareParams.StakingConfig case constants.CostwoID: return CostwoParams.StakingConfig - case constants.StagingID: - return StagingParams.StakingConfig case constants.LocalFlareID: return LocalFlareParams.StakingConfig case constants.SongbirdID: diff --git a/avalanchego/genesis/unparsed_config.go b/avalanchego/genesis/unparsed_config.go index 9831d835..2ace7647 100644 --- a/avalanchego/genesis/unparsed_config.go +++ b/avalanchego/genesis/unparsed_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) var errInvalidETHAddress = errors.New("invalid eth address") @@ -54,15 +55,17 @@ func (ua UnparsedAllocation) Parse() (Allocation, error) { } type UnparsedStaker struct { - NodeID ids.NodeID `json:"nodeID"` - RewardAddress string `json:"rewardAddress"` - DelegationFee uint32 `json:"delegationFee"` + NodeID ids.NodeID `json:"nodeID"` + RewardAddress string `json:"rewardAddress"` + DelegationFee uint32 `json:"delegationFee"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` } func (us UnparsedStaker) Parse() (Staker, error) { s := Staker{ NodeID: us.NodeID, DelegationFee: us.DelegationFee, + Signer: us.Signer, } _, _, avaxAddrBytes, err := address.Parse(us.RewardAddress) diff --git a/avalanchego/go.mod b/avalanchego/go.mod index 6276845a..3598b9ab 100644 --- a/avalanchego/go.mod +++ b/avalanchego/go.mod @@ -2,7 +2,6 @@ module github.com/ava-labs/avalanchego // Changes to the minimum golang version must also be replicated in // scripts/build_avalanche.sh -// scripts/local.Dockerfile // Dockerfile // README.md // go.mod (here, only major.minor can be specified) @@ -12,121 +11,145 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 - github.com/ava-labs/coreth v0.12.0-rc.2 - github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 + github.com/ava-labs/coreth v0.13.0-rc.0 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 + github.com/ethereum/go-ethereum v1.12.0 github.com/golang-jwt/jwt/v4 v4.3.0 - github.com/golang/mock v1.6.0 github.com/google/btree v1.1.2 + github.com/google/renameio/v2 v2.0.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/holiman/bloomfilter/v2 v2.0.3 github.com/huin/goupnp v1.0.3 github.com/jackpal/gateway v1.0.6 github.com/jackpal/go-nat-pmp v1.0.2 + github.com/leanovate/gopter v0.2.9 + github.com/mitchellh/mapstructure v1.5.0 github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d - github.com/onsi/ginkgo/v2 v2.4.0 - github.com/onsi/gomega v1.24.0 + github.com/onsi/ginkgo/v2 v2.13.1 + github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible - github.com/spaolacci/murmur3 v1.1.0 + github.com/spf13/cast v1.5.0 + github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.8.1 - github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 + github.com/stretchr/testify v1.8.4 + github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a + github.com/thepudds/fzgen v0.4.2 + github.com/tyler-smith/go-bip32 v1.0.0 go.opentelemetry.io/otel v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 go.opentelemetry.io/otel/sdk v1.11.0 go.opentelemetry.io/otel/trace v1.11.0 - go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 - golang.org/x/sync v0.1.0 - golang.org/x/term v0.5.0 - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac + go.uber.org/goleak v1.2.1 + go.uber.org/mock v0.4.0 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.17.0 + golang.org/x/exp v0.0.0-20231127185646-65229373498e + golang.org/x/net v0.19.0 + golang.org/x/sync v0.5.0 + golang.org/x/term v0.15.0 + golang.org/x/time v0.0.0-20220922220347-f3bd1da661af gonum.org/v1/gonum v0.11.0 - google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c - google.golang.org/grpc v1.50.1 - google.golang.org/protobuf v1.28.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 + google.golang.org/grpc v1.58.3 + google.golang.org/protobuf v1.31.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect + github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect github.com/VictoriaMetrics/fastcache v1.10.0 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/deckarep/golang-set v1.8.0 // indirect - github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect - github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf // indirect - github.com/ethereum/go-ethereum v1.10.26 // indirect + github.com/deckarep/golang-set/v2 v2.1.0 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect + github.com/frankban/quicktest v1.14.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-stack/stack v1.8.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/uuid v1.2.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect - github.com/holiman/uint256 v1.2.0 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/rjeczalik/notify v0.9.3 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sanity-io/litter v1.5.1 // indirect github.com/spf13/afero v1.8.2 // indirect - github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 // indirect + github.com/status-im/keycard-go v0.2.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect - github.com/tyler-smith/go-bip39 v1.0.2 // indirect + github.com/tyler-smith/go-bip39 v1.1.0 // indirect + github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zondax/hid v0.9.1 // indirect - github.com/zondax/ledger-go v0.14.1 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/urfave/cli.v1 v1.20.0 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.0 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/avalanchego/go.sum b/avalanchego/go.sum index 71de57d5..f6588c9e 100644 --- a/avalanchego/go.sum +++ b/avalanchego/go.sum @@ -36,35 +36,41 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e h1:ahyvB3q25YnZWly5Gq1ekg6jcmWaGj/vG/MhF4aisoc= +github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= +github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 h1:TVi9JEdKNU/RevYZ9PyW4pULbEdS+KQDA9Ki2DUvuAs= -github.com/ava-labs/avalanche-network-runner-sdk v0.3.0/go.mod h1:SgKJvtqvgo/Bl/c8fxEHCLaSxEbzimYfBopcfrajxQk= -github.com/ava-labs/coreth v0.12.0-rc.2 h1:UNyGhuC2HxZ8eCLZiZON8xRiJkNHVZ75zknu/xqkKBA= -github.com/ava-labs/coreth v0.12.0-rc.2/go.mod h1:ZGhoIZTWbIaTmzEbprXu0hLtLdoE2PSTEFnCTYr0BRk= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/ava-labs/coreth v0.13.0-rc.0 h1:V2l3qj2ek3geKDJAnF2M94mYJK8kg2kePixujfJ0bmk= +github.com/ava-labs/coreth v0.13.0-rc.0/go.mod h1:eUMbBLDhlZASJjcbf0gIcD2GMn2rRRCUxC8MXLt5QQk= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -100,12 +106,18 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e h1:0XBUw73chJ1VYSsfvcPvVT7auykAJce9FpRr10L6Qhw= +github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -114,75 +126,133 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= -github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= +github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s= -github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= +github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -193,8 +263,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -210,10 +278,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -229,8 +300,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -246,86 +319,135 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/gateway v1.0.6 h1:/MJORKvJEwNVldtGVJC2p2cwCnsSoLn3hl3zxmZT7tk= github.com/jackpal/gateway v1.0.6/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -335,141 +457,196 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= +github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8= github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= -github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sanity-io/litter v1.5.1 h1:dwnrSypP6q56o3lFxTU+t2fwQ9A+U5qrXVO4Qg9KwVU= +github.com/sanity-io/litter v1.5.1/go.mod h1:5Z71SvaYy5kcGtyglXOC9rrUi3c1E8CamFWjQsazTh0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E= -github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 h1:rVKS9JjtqE4/PscoIsP46sRnJhfq8YFbjlk0fUJTRnY= -github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thepudds/fzgen v0.4.2 h1:HlEHl5hk2/cqEomf2uK5SA/FeJc12s/vIHmOG+FbACw= +github.com/thepudds/fzgen v0.4.2/go.mod h1:kHCWdsv5tdnt32NIHYDdgq083m6bMtaY0M+ipiO9xWE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= -github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tyler-smith/go-bip32 v1.0.0 h1:sDR9juArbUgX+bO/iblgZnMPeWY1KZMUC2AFUJdv5KE= +github.com/tyler-smith/go-bip32 v1.0.0/go.mod h1:onot+eHknzV4BVPwrzqY5OoVpyCvnwD7lMawL5aQupE= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= -github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= -github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -493,26 +670,34 @@ go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -523,8 +708,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -538,6 +723,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -549,22 +735,28 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -588,12 +780,12 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -603,9 +795,7 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -617,22 +807,26 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -641,8 +835,8 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -655,8 +849,6 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -670,25 +862,29 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -698,19 +894,25 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -742,6 +944,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -751,9 +954,13 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -787,6 +994,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -824,11 +1032,18 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -845,10 +1060,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -862,8 +1078,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -872,22 +1088,26 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -899,6 +1119,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/avalanchego/ids/aliases.go b/avalanchego/ids/aliases.go index f0f10139..484c6f8a 100644 --- a/avalanchego/ids/aliases.go +++ b/avalanchego/ids/aliases.go @@ -1,24 +1,39 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( + "errors" "fmt" "sync" ) +var ( + ErrNoIDWithAlias = errors.New("there is no ID with alias") + errNoAliasForID = errors.New("there is no alias for ID") + errAliasAlreadyMapped = errors.New("alias already mapped to an ID") +) + // AliaserReader allows one to lookup the aliases given to an ID. type AliaserReader interface { + // Lookup returns the ID associated with alias Lookup(alias string) (ID, error) + + // PrimaryAlias returns the first alias of [id] PrimaryAlias(id ID) (string, error) + + // Aliases returns the aliases of an ID Aliases(id ID) ([]string, error) } -// Aliaser allows one to give an ID aliases. An ID can have arbitrarily many -// aliases; two IDs may not have the same alias. +// AliaserWriter allows one to give an ID aliases. An ID can have arbitrarily +// many aliases; two IDs may not have the same alias. type AliaserWriter interface { + // Alias gives [id] the alias [alias] Alias(id ID, alias string) error + + // RemoveAliases of the provided ID RemoveAliases(id ID) } @@ -27,6 +42,9 @@ type AliaserWriter interface { type Aliaser interface { AliaserReader AliaserWriter + + // PrimaryAliasOrDefault returns the first alias of [id], or ID string as a + // default if no alias exists PrimaryAliasOrDefault(id ID) string } @@ -43,7 +61,6 @@ func NewAliaser() Aliaser { } } -// Lookup returns the ID associated with alias func (a *aliaser) Lookup(alias string) (ID, error) { a.lock.RLock() defer a.lock.RUnlock() @@ -51,22 +68,20 @@ func (a *aliaser) Lookup(alias string) (ID, error) { if id, ok := a.dealias[alias]; ok { return id, nil } - return ID{}, fmt.Errorf("there is no ID with alias %s", alias) + return ID{}, fmt.Errorf("%w: %s", ErrNoIDWithAlias, alias) } -// PrimaryAlias returns the first alias of [id] func (a *aliaser) PrimaryAlias(id ID) (string, error) { a.lock.RLock() defer a.lock.RUnlock() aliases := a.aliases[id] if len(aliases) == 0 { - return "", fmt.Errorf("there is no alias for ID %s", id) + return "", fmt.Errorf("%w: %s", errNoAliasForID, id) } return aliases[0], nil } -// PrimaryAliasOrDefault returns the first alias of [id], or ID string as default func (a *aliaser) PrimaryAliasOrDefault(id ID) string { alias, err := a.PrimaryAlias(id) if err != nil { @@ -75,7 +90,6 @@ func (a *aliaser) PrimaryAliasOrDefault(id ID) string { return alias } -// Aliases returns the aliases of an ID func (a *aliaser) Aliases(id ID) ([]string, error) { a.lock.RLock() defer a.lock.RUnlock() @@ -83,13 +97,12 @@ func (a *aliaser) Aliases(id ID) ([]string, error) { return a.aliases[id], nil } -// Alias gives [id] the alias [alias] func (a *aliaser) Alias(id ID, alias string) error { a.lock.Lock() defer a.lock.Unlock() if _, exists := a.dealias[alias]; exists { - return fmt.Errorf("%s is already used as an alias for an ID", alias) + return fmt.Errorf("%w: %s", errAliasAlreadyMapped, alias) } a.dealias[alias] = id @@ -97,7 +110,6 @@ func (a *aliaser) Alias(id ID, alias string) error { return nil } -// RemoveAliases of the provided ID func (a *aliaser) RemoveAliases(id ID) { a.lock.Lock() defer a.lock.Unlock() diff --git a/avalanchego/ids/aliases_test.go b/avalanchego/ids/aliases_test.go index 624d40eb..6c77d744 100644 --- a/avalanchego/ids/aliases_test.go +++ b/avalanchego/ids/aliases_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -22,17 +22,13 @@ func TestPrimaryAliasOrDefaultTest(t *testing.T) { aliaser := NewAliaser() id1 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} id2 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := aliaser.Alias(id2, "Batman") - require.NoError(err) + require.NoError(aliaser.Alias(id2, "Batman")) - err = aliaser.Alias(id2, "Dark Knight") - require.NoError(err) + require.NoError(aliaser.Alias(id2, "Dark Knight")) res := aliaser.PrimaryAliasOrDefault(id1) require.Equal(res, id1.String()) expected := "Batman" - res = aliaser.PrimaryAliasOrDefault(id2) - require.NoError(err) - require.Equal(expected, res) + require.Equal(expected, aliaser.PrimaryAliasOrDefault(id2)) } diff --git a/avalanchego/ids/bits.go b/avalanchego/ids/bits.go index a884578f..bb358670 100644 --- a/avalanchego/ids/bits.go +++ b/avalanchego/ids/bits.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/avalanchego/ids/bits_test.go b/avalanchego/ids/bits_test.go index 429da100..feb38190 100644 --- a/avalanchego/ids/bits_test.go +++ b/avalanchego/ids/bits_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -10,6 +10,8 @@ import ( "strings" "testing" "time" + + "github.com/stretchr/testify/require" ) func flip(b uint8) uint8 { @@ -38,46 +40,40 @@ func Check(start, stop int, id1, id2 ID) bool { } func TestEqualSubsetEarlyStop(t *testing.T) { + require := require.New(t) + id1 := ID{0xf0, 0x0f} id2 := ID{0xf0, 0x1f} - if !EqualSubset(0, 12, id1, id2) { - t.Fatalf("Should have passed: %08b %08b == %08b %08b", id1[0], id1[1], id2[0], id2[1]) - } else if EqualSubset(0, 13, id1, id2) { - t.Fatalf("Should not have passed: %08b %08b == %08b %08b", id1[0], id1[1], id2[0], id2[1]) - } + require.True(EqualSubset(0, 12, id1, id2)) + require.False(EqualSubset(0, 13, id1, id2)) } func TestEqualSubsetLateStart(t *testing.T) { id1 := ID{0x1f, 0xf8} id2 := ID{0x10, 0x08} - if !EqualSubset(4, 12, id1, id2) { - t.Fatalf("Should have passed: %08b %08b == %08b %08b", id1[0], id1[1], id2[0], id2[1]) - } + require.True(t, EqualSubset(4, 12, id1, id2)) } func TestEqualSubsetSameByte(t *testing.T) { id1 := ID{0x18} id2 := ID{0xfc} - if !EqualSubset(3, 5, id1, id2) { - t.Fatalf("Should have passed: %08b == %08b", id1[0], id2[0]) - } + require.True(t, EqualSubset(3, 5, id1, id2)) } func TestEqualSubsetBadMiddle(t *testing.T) { id1 := ID{0x18, 0xe8, 0x55} id2 := ID{0x18, 0x8e, 0x55} - if EqualSubset(0, 8*3, id1, id2) { - t.Fatalf("Should not have passed: %08b == %08b", id1[1], id2[1]) - } + require.False(t, EqualSubset(0, 8*3, id1, id2)) } func TestEqualSubsetAll3Bytes(t *testing.T) { rand.Seed(time.Now().UnixNano()) seed := uint64(rand.Int63()) // #nosec G404 + t.Logf("seed: %d", seed) id1 := ID{}.Prefix(seed) for i := 0; i < BitsPerByte; i++ { @@ -87,12 +83,7 @@ func TestEqualSubsetAll3Bytes(t *testing.T) { for start := 0; start < BitsPerByte*3; start++ { for end := start; end <= BitsPerByte*3; end++ { - if EqualSubset(start, end, id1, id2) != Check(start, end, id1, id2) { - t.Fatalf("Subset failed on seed %d:\ns = %d\ne = %d\n%08b %08b %08b == %08b %08b %08b", - seed, start, end, - id1[0], id1[1], id1[2], - id2[0], id2[1], id2[2]) - } + require.Equal(t, Check(start, end, id1, id2), EqualSubset(start, end, id1, id2)) } } } @@ -104,77 +95,77 @@ func TestEqualSubsetOutOfBounds(t *testing.T) { id1 := ID{0x18, 0xe8, 0x55} id2 := ID{0x18, 0x8e, 0x55} - if EqualSubset(0, math.MaxInt32, id1, id2) { - t.Fatalf("Should not have passed") - } + require.False(t, EqualSubset(0, math.MaxInt32, id1, id2)) } func TestFirstDifferenceSubsetEarlyStop(t *testing.T) { + require := require.New(t) + id1 := ID{0xf0, 0x0f} id2 := ID{0xf0, 0x1f} - if _, found := FirstDifferenceSubset(0, 12, id1, id2); found { - t.Fatalf("Shouldn't have found a difference: %08b %08b == %08b %08b", id1[0], id1[1], id2[0], id2[1]) - } else if index, found := FirstDifferenceSubset(0, 13, id1, id2); !found { - t.Fatalf("Should have found a difference: %08b %08b == %08b %08b", id1[0], id1[1], id2[0], id2[1]) - } else if index != 12 { - t.Fatalf("Found a difference at index %d expected %d: %08b %08b == %08b %08b", index, 12, id1[0], id1[1], id2[0], id2[1]) - } + _, found := FirstDifferenceSubset(0, 12, id1, id2) + require.False(found) + + index, found := FirstDifferenceSubset(0, 13, id1, id2) + require.True(found) + require.Equal(12, index) } func TestFirstDifferenceEqualByte4(t *testing.T) { + require := require.New(t) + id1 := ID{0x10} id2 := ID{0x00} - if _, found := FirstDifferenceSubset(0, 4, id1, id2); found { - t.Fatalf("Shouldn't have found a difference: %08b == %08b", id1[0], id2[0]) - } else if index, found := FirstDifferenceSubset(0, 5, id1, id2); !found { - t.Fatalf("Should have found a difference: %08b == %08b", id1[0], id2[0]) - } else if index != 4 { - t.Fatalf("Found a difference at index %d expected %d: %08b == %08b", index, 4, id1[0], id2[0]) - } + _, found := FirstDifferenceSubset(0, 4, id1, id2) + require.False(found) + + index, found := FirstDifferenceSubset(0, 5, id1, id2) + require.True(found) + require.Equal(4, index) } func TestFirstDifferenceEqualByte5(t *testing.T) { + require := require.New(t) + id1 := ID{0x20} id2 := ID{0x00} - if _, found := FirstDifferenceSubset(0, 5, id1, id2); found { - t.Fatalf("Shouldn't have found a difference: %08b == %08b", id1[0], id2[0]) - } else if index, found := FirstDifferenceSubset(0, 6, id1, id2); !found { - t.Fatalf("Should have found a difference: %08b == %08b", id1[0], id2[0]) - } else if index != 5 { - t.Fatalf("Found a difference at index %d expected %d: %08b == %08b", index, 5, id1[0], id2[0]) - } + _, found := FirstDifferenceSubset(0, 5, id1, id2) + require.False(found) + + index, found := FirstDifferenceSubset(0, 6, id1, id2) + require.True(found) + require.Equal(5, index) } func TestFirstDifferenceSubsetMiddle(t *testing.T) { + require := require.New(t) + id1 := ID{0xf0, 0x0f, 0x11} id2 := ID{0xf0, 0x1f, 0xff} - if index, found := FirstDifferenceSubset(0, 24, id1, id2); !found { - t.Fatalf("Should have found a difference: %08b %08b %08b == %08b %08b %08b", id1[0], id1[1], id1[2], id2[0], id2[1], id2[2]) - } else if index != 12 { - t.Fatalf("Found a difference at index %d expected %d: %08b %08b %08b == %08b %08b %08b", index, 12, id1[0], id1[1], id1[2], id2[0], id2[1], id2[2]) - } + index, found := FirstDifferenceSubset(0, 24, id1, id2) + require.True(found) + require.Equal(12, index) } func TestFirstDifferenceStartMiddle(t *testing.T) { + require := require.New(t) + id1 := ID{0x1f, 0x0f, 0x11} id2 := ID{0x0f, 0x1f, 0xff} - if index, found := FirstDifferenceSubset(0, 24, id1, id2); !found { - t.Fatalf("Should have found a difference: %08b %08b %08b == %08b %08b %08b", id1[0], id1[1], id1[2], id2[0], id2[1], id2[2]) - } else if index != 4 { - t.Fatalf("Found a difference at index %d expected %d: %08b %08b %08b == %08b %08b %08b", index, 4, id1[0], id1[1], id1[2], id2[0], id2[1], id2[2]) - } + index, found := FirstDifferenceSubset(0, 24, id1, id2) + require.True(found) + require.Equal(4, index) } func TestFirstDifferenceVacuous(t *testing.T) { id1 := ID{0xf0, 0x0f, 0x11} id2 := ID{0xf0, 0x1f, 0xff} - if _, found := FirstDifferenceSubset(0, 0, id1, id2); found { - t.Fatalf("Shouldn't have found a difference") - } + _, found := FirstDifferenceSubset(0, 0, id1, id2) + require.False(t, found) } diff --git a/avalanchego/ids/galiasreader/alias_reader_client.go b/avalanchego/ids/galiasreader/alias_reader_client.go index aa77f9ec..319d7508 100644 --- a/avalanchego/ids/galiasreader/alias_reader_client.go +++ b/avalanchego/ids/galiasreader/alias_reader_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader diff --git a/avalanchego/ids/galiasreader/alias_reader_server.go b/avalanchego/ids/galiasreader/alias_reader_server.go index 48f31bf7..eeb9083c 100644 --- a/avalanchego/ids/galiasreader/alias_reader_server.go +++ b/avalanchego/ids/galiasreader/alias_reader_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader diff --git a/avalanchego/ids/galiasreader/alias_reader_test.go b/avalanchego/ids/galiasreader/alias_reader_test.go index 87a462f4..899c13a2 100644 --- a/avalanchego/ids/galiasreader/alias_reader_test.go +++ b/avalanchego/ids/galiasreader/alias_reader_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package galiasreader @@ -19,9 +19,7 @@ func TestInterface(t *testing.T) { for _, test := range ids.AliasTests { listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} w := ids.NewAliaser() diff --git a/avalanchego/ids/id.go b/avalanchego/ids/id.go index 264f1ca1..91eacfdb 100644 --- a/avalanchego/ids/id.go +++ b/avalanchego/ids/id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -15,7 +15,10 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const nullStr = "null" +const ( + IDLen = 32 + nullStr = "null" +) var ( // Empty is a useful all zero value @@ -27,7 +30,7 @@ var ( ) // ID wraps a 32 byte hash used as an identifier -type ID [32]byte +type ID [IDLen]byte // ToID attempt to convert a byte slice into an id func ToID(bytes []byte) (ID, error) { @@ -43,12 +46,21 @@ func FromString(idStr string) (ID, error) { return ToID(bytes) } +// FromStringOrPanic is the same as FromString, but will panic on error +func FromStringOrPanic(idStr string) ID { + id, err := FromString(idStr) + if err != nil { + panic(err) + } + return id +} + func (id ID) MarshalJSON() ([]byte, error) { str, err := cb58.Encode(id[:]) if err != nil { return nil, err } - return []byte("\"" + str + "\""), nil + return []byte(`"` + str + `"`), nil } func (id *ID) UnmarshalJSON(b []byte) error { @@ -84,7 +96,7 @@ func (id *ID) UnmarshalText(text []byte) error { // This will return a new id and not modify the original id. func (id ID) Prefix(prefixes ...uint64) ID { packer := wrappers.Packer{ - Bytes: make([]byte, len(prefixes)*wrappers.LongLen+hashing.HashLen), + Bytes: make([]byte, len(prefixes)*wrappers.LongLen+IDLen), } for _, prefix := range prefixes { @@ -95,6 +107,16 @@ func (id ID) Prefix(prefixes ...uint64) ID { return hashing.ComputeHash256Array(packer.Bytes) } +// XOR this id and the provided id and return the resulting id. +// +// Note: this id is not modified. +func (id ID) XOR(other ID) ID { + for i, b := range other { + id[i] ^= b + } + return id +} + // Bit returns the bit value at the ith index of the byte array. Returns 0 or 1 func (id ID) Bit(i uint) int { byteIndex := i / BitsPerByte @@ -132,6 +154,6 @@ func (id ID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ID) Less(other ID) bool { - return bytes.Compare(id[:], other[:]) < 0 +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) } diff --git a/avalanchego/ids/id_test.go b/avalanchego/ids/id_test.go index 60aeefd1..930a323e 100644 --- a/avalanchego/ids/id_test.go +++ b/avalanchego/ids/id_test.go @@ -1,33 +1,43 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( - "bytes" "encoding/json" - "reflect" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/cb58" ) func TestID(t *testing.T) { + require := require.New(t) + id := ID{24} idCopy := ID{24} prefixed := id.Prefix(0) - if id != idCopy { - t.Fatalf("ID.Prefix mutated the ID") - } - if nextPrefix := id.Prefix(0); prefixed != nextPrefix { - t.Fatalf("ID.Prefix not consistent") - } + require.Equal(idCopy, id) + require.Equal(prefixed, id.Prefix(0)) +} + +func TestIDXOR(t *testing.T) { + require := require.New(t) + + id1 := ID{1} + id3 := ID{3} + + require.Equal(ID{2}, id1.XOR(id3)) + require.Equal(ID{1}, id1) } func TestIDBit(t *testing.T) { + require := require.New(t) + id0 := ID{1 << 0} id1 := ID{1 << 1} id2 := ID{1 << 2} @@ -38,54 +48,49 @@ func TestIDBit(t *testing.T) { id7 := ID{1 << 7} id8 := ID{0, 1 << 0} - switch { - case id0.Bit(0) != 1: - t.Fatalf("Wrong bit") - case id1.Bit(1) != 1: - t.Fatalf("Wrong bit") - case id2.Bit(2) != 1: - t.Fatalf("Wrong bit") - case id3.Bit(3) != 1: - t.Fatalf("Wrong bit") - case id4.Bit(4) != 1: - t.Fatalf("Wrong bit") - case id5.Bit(5) != 1: - t.Fatalf("Wrong bit") - case id6.Bit(6) != 1: - t.Fatalf("Wrong bit") - case id7.Bit(7) != 1: - t.Fatalf("Wrong bit") - case id8.Bit(8) != 1: - t.Fatalf("Wrong bit") - } + require.Equal(1, id0.Bit(0)) + require.Equal(1, id1.Bit(1)) + require.Equal(1, id2.Bit(2)) + require.Equal(1, id3.Bit(3)) + require.Equal(1, id4.Bit(4)) + require.Equal(1, id5.Bit(5)) + require.Equal(1, id6.Bit(6)) + require.Equal(1, id7.Bit(7)) + require.Equal(1, id8.Bit(8)) } func TestFromString(t *testing.T) { + require := require.New(t) + id := ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'} idStr := id.String() id2, err := FromString(idStr) - if err != nil { - t.Fatal(err) - } - if id != id2 { - t.Fatal("Expected FromString to be inverse of String but it wasn't") - } + require.NoError(err) + require.Equal(id, id2) } func TestIDFromStringError(t *testing.T) { tests := []struct { - in string + in string + expectedErr error }{ - {""}, - {"foo"}, - {"foobar"}, + { + in: "", + expectedErr: cb58.ErrBase58Decoding, + }, + { + in: "foo", + expectedErr: cb58.ErrMissingChecksum, + }, + { + in: "foobar", + expectedErr: cb58.ErrBadChecksum, + }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { _, err := FromString(tt.in) - if err == nil { - t.Error("Unexpected success") - } + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -97,22 +102,26 @@ func TestIDMarshalJSON(t *testing.T) { out []byte err error }{ - {"ID{}", ID{}, []byte("\"11111111111111111111111111111111LpoYY\""), nil}, { - "ID(\"ava labs\")", + "ID{}", + ID{}, + []byte(`"11111111111111111111111111111111LpoYY"`), + nil, + }, + { + `ID("ava labs")`, ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + []byte(`"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7"`), nil, }, } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { + require := require.New(t) + out, err := tt.in.MarshalJSON() - if err != tt.err { - t.Errorf("Expected err %s, got error %v", tt.err, err) - } else if !bytes.Equal(out, tt.out) { - t.Errorf("got %q, expected %q", out, tt.out) - } + require.ErrorIs(err, tt.err) + require.Equal(tt.out, out) }) } } @@ -124,23 +133,27 @@ func TestIDUnmarshalJSON(t *testing.T) { out ID err error }{ - {"ID{}", []byte("null"), ID{}, nil}, { - "ID(\"ava labs\")", - []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + "ID{}", + []byte("null"), + ID{}, + nil, + }, + { + `ID("ava labs")`, + []byte(`"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7"`), ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, nil, }, } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { + require := require.New(t) + foo := ID{} err := foo.UnmarshalJSON(tt.in) - if err != tt.err { - t.Errorf("Expected err %s, got error %v", tt.err, err) - } else if foo != tt.out { - t.Errorf("got %q, expected %q", foo, tt.out) - } + require.ErrorIs(err, tt.err) + require.Equal(tt.out, foo) }) } } @@ -148,10 +161,7 @@ func TestIDUnmarshalJSON(t *testing.T) { func TestIDHex(t *testing.T) { id := ID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'} expected := "617661206c616273000000000000000000000000000000000000000000000000" - actual := id.Hex() - if actual != expected { - t.Fatalf("got %s, expected %s", actual, expected) - } + require.Equal(t, expected, id.Hex()) } func TestIDString(t *testing.T) { @@ -165,10 +175,7 @@ func TestIDString(t *testing.T) { } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { - result := tt.id.String() - if result != tt.expected { - t.Errorf("got %q, expected %q", result, tt.expected) - } + require.Equal(t, tt.expected, tt.id.String()) }) } } @@ -185,57 +192,53 @@ func TestSortIDs(t *testing.T) { {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, } - if !reflect.DeepEqual(ids, expected) { - t.Fatal("[]ID was not sorted lexographically") - } + require.Equal(t, expected, ids) } func TestIDMapMarshalling(t *testing.T) { + require := require.New(t) + originalMap := map[ID]int{ {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 1, {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 2, } mapJSON, err := json.Marshal(originalMap) - if err != nil { - t.Fatal(err) - } + require.NoError(err) var unmarshalledMap map[ID]int - err = json.Unmarshal(mapJSON, &unmarshalledMap) - if err != nil { - t.Fatal(err) - } + require.NoError(json.Unmarshal(mapJSON, &unmarshalledMap)) - if len(originalMap) != len(unmarshalledMap) { - t.Fatalf("wrong map lengths") - } - for originalID, num := range originalMap { - if unmarshalledMap[originalID] != num { - t.Fatalf("map was incorrectly Unmarshalled") - } - } + require.Equal(originalMap, unmarshalledMap) } -func TestIDLess(t *testing.T) { - require := require.New(t) +func TestIDCompare(t *testing.T) { + tests := []struct { + a ID + b ID + expected int + }{ + { + a: ID{1}, + b: ID{0}, + expected: 1, + }, + { + a: ID{1}, + b: ID{1}, + expected: 0, + }, + { + a: ID{1, 0}, + b: ID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := ID{} - id2 := ID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{0} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1, 0} - id2 = ID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/avalanchego/ids/node_id.go b/avalanchego/ids/node_id.go index 83301577..7e9e94a1 100644 --- a/avalanchego/ids/node_id.go +++ b/avalanchego/ids/node_id.go @@ -1,22 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( "bytes" - "crypto/x509" + "errors" "fmt" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) -const NodeIDPrefix = "NodeID-" +const ( + NodeIDPrefix = "NodeID-" + NodeIDLen = ShortIDLen +) var ( EmptyNodeID = NodeID{} + errShortNodeID = errors.New("insufficient NodeID length") + _ utils.Sortable[NodeID] = NodeID{} ) @@ -31,7 +37,7 @@ func (id NodeID) Bytes() []byte { } func (id NodeID) MarshalJSON() ([]byte, error) { - return []byte("\"" + id.String() + "\""), nil + return []byte(`"` + id.String() + `"`), nil } func (id NodeID) MarshalText() ([]byte, error) { @@ -43,7 +49,7 @@ func (id *NodeID) UnmarshalJSON(b []byte) error { if str == nullStr { // If "null", do nothing return nil } else if len(str) <= 2+len(NodeIDPrefix) { - return fmt.Errorf("expected NodeID length to be > %d", 2+len(NodeIDPrefix)) + return fmt.Errorf("%w: expected to be > %d", errShortNodeID, 2+len(NodeIDPrefix)) } lastIndex := len(str) - 1 @@ -60,8 +66,8 @@ func (id *NodeID) UnmarshalText(text []byte) error { return id.UnmarshalJSON(text) } -func (id NodeID) Less(other NodeID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id NodeID) Compare(other NodeID) int { + return bytes.Compare(id[:], other[:]) } // ToNodeID attempt to convert a byte slice into a node id @@ -70,7 +76,7 @@ func ToNodeID(bytes []byte) (NodeID, error) { return NodeID(nodeID), err } -func NodeIDFromCert(cert *x509.Certificate) NodeID { +func NodeIDFromCert(cert *staking.Certificate) NodeID { return hashing.ComputeHash160Array( hashing.ComputeHash256(cert.Raw), ) diff --git a/avalanchego/ids/node_id_test.go b/avalanchego/ids/node_id_test.go index 52c90c8e..2c94450f 100644 --- a/avalanchego/ids/node_id_test.go +++ b/avalanchego/ids/node_id_test.go @@ -1,58 +1,62 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids import ( - "bytes" "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/cb58" ) func TestNodeIDEquality(t *testing.T) { + require := require.New(t) + id := NodeID{24} idCopy := NodeID{24} - if id != idCopy { - t.Fatalf("ID.Prefix mutated the ID") - } + require.Equal(id, idCopy) id2 := NodeID{} - if id == id2 { - t.Fatal("expected Node IDs to be unequal") - } + require.NotEqual(id, id2) } func TestNodeIDFromString(t *testing.T) { + require := require.New(t) + id := NodeID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'} idStr := id.String() id2, err := NodeIDFromString(idStr) - if err != nil { - t.Fatal(err) - } - if id != id2 { - t.Fatal("Expected FromString to be inverse of String but it wasn't") - } + require.NoError(err) + require.Equal(id, id2) expected := "NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz" - if idStr != expected { - t.Fatalf("expected %s but got %s", expected, idStr) - } + require.Equal(expected, idStr) } func TestNodeIDFromStringError(t *testing.T) { tests := []struct { - in string + in string + expectedErr error }{ - {""}, - {"foo"}, - {"foobar"}, + { + in: "", + expectedErr: cb58.ErrBase58Decoding, + }, + { + in: "foo", + expectedErr: cb58.ErrMissingChecksum, + }, + { + in: "foobar", + expectedErr: cb58.ErrBadChecksum, + }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { _, err := FromString(tt.in) - if err == nil { - t.Error("Unexpected success") - } + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -64,83 +68,88 @@ func TestNodeIDMarshalJSON(t *testing.T) { out []byte err error }{ - {"NodeID{}", NodeID{}, []byte("\"NodeID-111111111111111111116DBWJs\""), nil}, { - "ID(\"ava labs\")", + "NodeID{}", + NodeID{}, + []byte(`"NodeID-111111111111111111116DBWJs"`), + nil, + }, + { + `ID("ava labs")`, NodeID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), nil, }, } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { + require := require.New(t) + out, err := tt.in.MarshalJSON() - if err != tt.err { - t.Errorf("Expected err %s, got error %v", tt.err, err) - } else if !bytes.Equal(out, tt.out) { - t.Errorf("got %q, expected %q", out, tt.out) - } + require.ErrorIs(err, tt.err) + require.Equal(tt.out, out) }) } } func TestNodeIDUnmarshalJSON(t *testing.T) { tests := []struct { - label string - in []byte - out NodeID - shouldErr bool + label string + in []byte + out NodeID + expectedErr error }{ - {"NodeID{}", []byte("null"), NodeID{}, false}, { - "NodeID(\"ava labs\")", - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + "NodeID{}", + []byte("null"), + NodeID{}, + nil, + }, + { + `NodeID("ava labs")`, + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), NodeID{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - false, + nil, }, { "missing start quote", - []byte("NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz\""), + []byte(`NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"`), NodeID{}, - true, + errMissingQuotes, }, { "missing end quote", - []byte("\"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz"), + []byte(`"NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz`), NodeID{}, - true, + errMissingQuotes, }, { "NodeID-", - []byte("\"NodeID-\""), + []byte(`"NodeID-"`), NodeID{}, - true, + errShortNodeID, }, { "NodeID-1", - []byte("\"NodeID-1\""), + []byte(`"NodeID-1"`), NodeID{}, - true, + cb58.ErrMissingChecksum, }, { "NodeID-9tLMkeWFhWXd8QZc4rSiS5meuVXF5kRsz1", - []byte("\"NodeID-1\""), + []byte(`"NodeID-1"`), NodeID{}, - true, + cb58.ErrMissingChecksum, }, } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { + require := require.New(t) + foo := NodeID{} err := foo.UnmarshalJSON(tt.in) - switch { - case err == nil && tt.shouldErr: - t.Errorf("Expected no error but got error %v", err) - case err != nil && !tt.shouldErr: - t.Errorf("unxpected error: %v", err) - case foo != tt.out: - t.Errorf("got %q, expected %q", foo, tt.out) - } + require.ErrorIs(err, tt.expectedErr) + require.Equal(tt.out, foo) }) } } @@ -156,60 +165,54 @@ func TestNodeIDString(t *testing.T) { } for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { - result := tt.id.String() - if result != tt.expected { - t.Errorf("got %q, expected %q", result, tt.expected) - } + require.Equal(t, tt.expected, tt.id.String()) }) } } func TestNodeIDMapMarshalling(t *testing.T) { + require := require.New(t) + originalMap := map[NodeID]int{ {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 1, {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 2, } mapJSON, err := json.Marshal(originalMap) - if err != nil { - t.Fatal(err) - } + require.NoError(err) var unmarshalledMap map[NodeID]int - err = json.Unmarshal(mapJSON, &unmarshalledMap) - if err != nil { - t.Fatal(err) - } - - if len(originalMap) != len(unmarshalledMap) { - t.Fatalf("wrong map lengths") - } - for originalID, num := range originalMap { - if unmarshalledMap[originalID] != num { - t.Fatalf("map was incorrectly Unmarshalled") - } - } + require.NoError(json.Unmarshal(mapJSON, &unmarshalledMap)) + require.Equal(originalMap, unmarshalledMap) } -func TestNodeIDLess(t *testing.T) { - require := require.New(t) +func TestNodeIDCompare(t *testing.T) { + tests := []struct { + a NodeID + b NodeID + expected int + }{ + { + a: NodeID{1}, + b: NodeID{0}, + expected: 1, + }, + { + a: NodeID{1}, + b: NodeID{1}, + expected: 0, + }, + { + a: NodeID{1, 0}, + b: NodeID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := NodeID{} - id2 := NodeID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/avalanchego/ids/request_id.go b/avalanchego/ids/request_id.go index 779f819d..e1d94598 100644 --- a/avalanchego/ids/request_id.go +++ b/avalanchego/ids/request_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/avalanchego/ids/short.go b/avalanchego/ids/short.go index 1ae86348..7c01dca4 100644 --- a/avalanchego/ids/short.go +++ b/avalanchego/ids/short.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -14,6 +14,8 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) +const ShortIDLen = 20 + // ShortEmpty is a useful all zero value var ( ShortEmpty = ShortID{} @@ -22,7 +24,7 @@ var ( ) // ShortID wraps a 20 byte hash as an identifier -type ShortID [20]byte +type ShortID [ShortIDLen]byte // ToShortID attempt to convert a byte slice into an id func ToShortID(bytes []byte) (ShortID, error) { @@ -52,7 +54,7 @@ func (id ShortID) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return []byte("\"" + str + "\""), nil + return []byte(`"` + str + `"`), nil } func (id *ShortID) UnmarshalJSON(b []byte) error { @@ -108,8 +110,8 @@ func (id ShortID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ShortID) Less(other ShortID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id ShortID) Compare(other ShortID) int { + return bytes.Compare(id[:], other[:]) } // ShortIDsToStrings converts an array of shortIDs to an array of their string diff --git a/avalanchego/ids/test_aliases.go b/avalanchego/ids/test_aliases.go index 06a7fe3a..ce9991f5 100644 --- a/avalanchego/ids/test_aliases.go +++ b/avalanchego/ids/test_aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -17,13 +17,13 @@ var AliasTests = []func(require *require.Assertions, r AliaserReader, w AliaserW func AliaserLookupErrorTest(require *require.Assertions, r AliaserReader, _ AliaserWriter) { _, err := r.Lookup("Batman") - require.Error(err, "expected an error due to missing alias") + // TODO: require error to be errNoIDWithAlias + require.Error(err) //nolint:forbidigo // currently returns grpc errors too } func AliaserLookupTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id := ID{'K', 'a', 't', 'e', ' ', 'K', 'a', 'n', 'e'} - err := w.Alias(id, "Batwoman") - require.NoError(err) + require.NoError(w.Alias(id, "Batwoman")) res, err := r.Lookup("Batwoman") require.NoError(err) @@ -40,11 +40,9 @@ func AliaserAliasesEmptyTest(require *require.Assertions, r AliaserReader, _ Ali func AliaserAliasesTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := w.Alias(id, "Batman") - require.NoError(err) - err = w.Alias(id, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id, "Batman")) + require.NoError(w.Alias(id, "Dark Knight")) aliases, err := r.Aliases(id) require.NoError(err) @@ -56,14 +54,13 @@ func AliaserAliasesTest(require *require.Assertions, r AliaserReader, w AliaserW func AliaserPrimaryAliasTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id1 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} id2 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := w.Alias(id2, "Batman") - require.NoError(err) - err = w.Alias(id2, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id2, "Batman")) + require.NoError(w.Alias(id2, "Dark Knight")) - _, err = r.PrimaryAlias(id1) - require.Error(err) + _, err := r.PrimaryAlias(id1) + // TODO: require error to be errNoAliasForID + require.Error(err) //nolint:forbidigo // currently returns grpc errors too expected := "Batman" res, err := r.PrimaryAlias(id2) @@ -74,33 +71,28 @@ func AliaserPrimaryAliasTest(require *require.Assertions, r AliaserReader, w Ali func AliaserAliasClashTest(require *require.Assertions, _ AliaserReader, w AliaserWriter) { id1 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} id2 := ID{'D', 'i', 'c', 'k', ' ', 'G', 'r', 'a', 'y', 's', 'o', 'n'} - err := w.Alias(id1, "Batman") - require.NoError(err) - err = w.Alias(id2, "Batman") - require.Error(err) + require.NoError(w.Alias(id1, "Batman")) + + err := w.Alias(id2, "Batman") + // TODO: require error to be errAliasAlreadyMapped + require.Error(err) //nolint:forbidigo // currently returns grpc errors too } func AliaserRemoveAliasTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id1 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} id2 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} - err := w.Alias(id1, "Batman") - require.NoError(err) - err = w.Alias(id1, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id1, "Batman")) + require.NoError(w.Alias(id1, "Dark Knight")) w.RemoveAliases(id1) - _, err = r.PrimaryAlias(id1) - require.Error(err) - - err = w.Alias(id2, "Batman") - require.NoError(err) - - err = w.Alias(id2, "Dark Knight") - require.NoError(err) + _, err := r.PrimaryAlias(id1) + // TODO: require error to be errNoAliasForID + require.Error(err) //nolint:forbidigo // currently returns grpc errors too - err = w.Alias(id1, "Dark Night Rises") - require.NoError(err) + require.NoError(w.Alias(id2, "Batman")) + require.NoError(w.Alias(id2, "Dark Knight")) + require.NoError(w.Alias(id1, "Dark Night Rises")) } diff --git a/avalanchego/ids/test_generator.go b/avalanchego/ids/test_generator.go index 2c1344af..2df95ec0 100644 --- a/avalanchego/ids/test_generator.go +++ b/avalanchego/ids/test_generator.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids -import ( - "sync/atomic" -) +import "sync/atomic" var offset = uint64(0) @@ -25,3 +23,12 @@ func GenerateTestShortID() ShortID { func GenerateTestNodeID() NodeID { return NodeID(GenerateTestShortID()) } + +// BuildTestNodeID is an utility to build NodeID from bytes in UTs +// It must not be used in production code. In production code we should +// use ToNodeID, which performs proper length checking. +func BuildTestNodeID(src []byte) NodeID { + res := NodeID{} + copy(res[:], src) + return res +} diff --git a/avalanchego/indexer/client.go b/avalanchego/indexer/client.go index 785018e3..821059a1 100644 --- a/avalanchego/indexer/client.go +++ b/avalanchego/indexer/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/avalanchego/indexer/client_test.go b/avalanchego/indexer/client_test.go index 4fb34cf9..95124a21 100644 --- a/avalanchego/indexer/client_test.go +++ b/avalanchego/indexer/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -42,7 +42,7 @@ func TestIndexClient(t *testing.T) { } index, err := client.GetIndex(context.Background(), ids.Empty) require.NoError(err) - require.EqualValues(5, index) + require.Equal(uint64(5), index) } { // Test GetLastAccepted @@ -64,9 +64,9 @@ func TestIndexClient(t *testing.T) { } container, index, err := client.GetLastAccepted(context.Background()) require.NoError(err) - require.EqualValues(id, container.ID) - require.EqualValues(bytes, container.Bytes) - require.EqualValues(index, 10) + require.Equal(id, container.ID) + require.Equal(bytes, container.Bytes) + require.Equal(uint64(10), index) } { // Test GetContainerRange @@ -88,8 +88,8 @@ func TestIndexClient(t *testing.T) { containers, err := client.GetContainerRange(context.Background(), 1, 10) require.NoError(err) require.Len(containers, 1) - require.EqualValues(id, containers[0].ID) - require.EqualValues(bytes, containers[0].Bytes) + require.Equal(id, containers[0].ID) + require.Equal(bytes, containers[0].Bytes) } { // Test IsAccepted @@ -125,8 +125,8 @@ func TestIndexClient(t *testing.T) { } container, index, err := client.GetContainerByID(context.Background(), id) require.NoError(err) - require.EqualValues(id, container.ID) - require.EqualValues(bytes, container.Bytes) - require.EqualValues(index, 10) + require.Equal(id, container.ID) + require.Equal(bytes, container.Bytes) + require.Equal(uint64(10), index) } } diff --git a/avalanchego/indexer/codec.go b/avalanchego/indexer/codec.go new file mode 100644 index 00000000..afde4750 --- /dev/null +++ b/avalanchego/indexer/codec.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package indexer + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" +) + +const CodecVersion = 0 + +var Codec codec.Manager + +func init() { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt) + + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { + panic(err) + } +} diff --git a/avalanchego/indexer/container.go b/avalanchego/indexer/container.go index c640fdd9..2bbb68e5 100644 --- a/avalanchego/indexer/container.go +++ b/avalanchego/indexer/container.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/avalanchego/indexer/examples/p-chain/main.go b/avalanchego/indexer/examples/p-chain/main.go index 257591f4..b690ebf0 100644 --- a/avalanchego/indexer/examples/p-chain/main.go +++ b/avalanchego/indexer/examples/p-chain/main.go @@ -1,25 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main import ( "context" - "fmt" "log" "time" "github.com/ava-labs/avalanchego/indexer" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/proposervm/block" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/wallet/subnet/primary" + + platformvmblock "github.com/ava-labs/avalanchego/vms/platformvm/block" + proposervmblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) // This example program continuously polls for the next P-Chain block // and prints the ID of the block and its transactions. func main() { var ( - uri = fmt.Sprintf("%s/ext/index/P/block", primary.LocalAPIURI) + uri = primary.LocalAPIURI + "/ext/index/P/block" client = indexer.NewClient(uri) ctx = context.Background() nextIndex uint64 @@ -33,12 +34,12 @@ func main() { } platformvmBlockBytes := container.Bytes - proposerVMBlock, err := block.Parse(container.Bytes) + proposerVMBlock, err := proposervmblock.Parse(container.Bytes, version.DefaultUpgradeTime) if err == nil { platformvmBlockBytes = proposerVMBlock.Block() } - platformvmBlock, err := blocks.Parse(blocks.Codec, platformvmBlockBytes) + platformvmBlock, err := platformvmblock.Parse(platformvmblock.Codec, platformvmBlockBytes) if err != nil { log.Fatalf("failed to parse platformvm block: %s\n", err) } diff --git a/avalanchego/indexer/examples/x-chain-blocks/main.go b/avalanchego/indexer/examples/x-chain-blocks/main.go index a995f961..2687e5a0 100644 --- a/avalanchego/indexer/examples/x-chain-blocks/main.go +++ b/avalanchego/indexer/examples/x-chain-blocks/main.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main import ( "context" - "fmt" "log" "time" "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/wallet/chain/x" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -19,7 +19,7 @@ import ( // and prints the ID of the block and its transactions. func main() { var ( - uri = fmt.Sprintf("%s/ext/index/X/block", primary.LocalAPIURI) + uri = primary.LocalAPIURI + "/ext/index/X/block" client = indexer.NewClient(uri) ctx = context.Background() nextIndex uint64 @@ -32,7 +32,7 @@ func main() { continue } - proposerVMBlock, err := block.Parse(container.Bytes) + proposerVMBlock, err := block.Parse(container.Bytes, version.DefaultUpgradeTime) if err != nil { log.Fatalf("failed to parse proposervm block: %s\n", err) } diff --git a/avalanchego/indexer/index.go b/avalanchego/indexer/index.go index 07de46ee..16a127c9 100644 --- a/avalanchego/indexer/index.go +++ b/avalanchego/indexer/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -6,28 +6,23 @@ package indexer import ( "errors" "fmt" - "io" "sync" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - // Maximum number of containers IDs that can be fetched at a time - // in a call to GetContainerRange - MaxFetchedByRange = 1024 -) +// Maximum number of containers IDs that can be fetched at a time in a call to +// GetContainerRange +const MaxFetchedByRange = 1024 var ( // Maps to the byte representation of the next accepted index @@ -35,28 +30,18 @@ var ( indexToContainerPrefix = []byte{0x01} containerToIDPrefix = []byte{0x02} errNoneAccepted = errors.New("no containers have been accepted") - errNumToFetchZero = fmt.Errorf("numToFetch must be in [1,%d]", MaxFetchedByRange) + errNumToFetchInvalid = fmt.Errorf("numToFetch must be in [1,%d]", MaxFetchedByRange) + errNoContainerAtIndex = errors.New("no container at index") - _ Index = (*index)(nil) + _ snow.Acceptor = (*index)(nil) ) -// Index indexes containers in their order of acceptance -// Index is thread-safe. -// Index assumes that Accept is called before the container is committed to the -// database of the VM that the container exists in. -type Index interface { - snow.Acceptor - GetContainerByIndex(index uint64) (Container, error) - GetContainerRange(startIndex uint64, numToFetch uint64) ([]Container, error) - GetLastAccepted() (Container, error) - GetIndex(id ids.ID) (uint64, error) - GetContainerByID(id ids.ID) (Container, error) - io.Closer -} - -// indexer indexes all accepted transactions by the order in which they were accepted +// index indexes containers in their order of acceptance +// +// Invariant: index is thread-safe. +// Invariant: index assumes that Accept is called, before the container is +// committed to the database of the VM, in the order they were accepted. type index struct { - codec codec.Manager clock mockable.Clock lock sync.RWMutex // The index of the next accepted transaction @@ -72,21 +57,20 @@ type index struct { log logging.Logger } -// Returns a new, thread-safe Index. -// Closes [baseDB] on close. +// Create a new thread-safe index. +// +// Invariant: Closes [baseDB] on close. func newIndex( baseDB database.Database, log logging.Logger, - codec codec.Manager, clock mockable.Clock, -) (Index, error) { +) (*index, error) { vDB := versiondb.New(baseDB) indexToContainer := prefixdb.New(indexToContainerPrefix, vDB) containerToIndex := prefixdb.New(containerToIDPrefix, vDB) i := &index{ clock: clock, - codec: codec, baseDB: baseDB, vDB: vDB, indexToContainer: indexToContainer, @@ -115,14 +99,12 @@ func newIndex( // Close this index func (i *index) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( i.indexToContainer.Close(), i.containerToIndex.Close(), i.vDB.Close(), i.baseDB.Close(), ) - return errs.Err } // Index that the given transaction is accepted @@ -153,7 +135,7 @@ func (i *index) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container ) // Persist index --> Container nextAcceptedIndexBytes := database.PackUInt64(i.nextAcceptedIndex) - bytes, err := i.codec.Marshal(codecVersion, Container{ + bytes, err := Codec.Marshal(CodecVersion, Container{ ID: containerID, Bytes: containerBytes, Timestamp: i.clock.Time().UnixNano(), @@ -195,7 +177,7 @@ func (i *index) GetContainerByIndex(index uint64) (Container, error) { func (i *index) getContainerByIndex(index uint64) (Container, error) { lastAcceptedIndex, ok := i.lastAcceptedIndex() if !ok || index > lastAcceptedIndex { - return Container{}, fmt.Errorf("no container at index %d", index) + return Container{}, fmt.Errorf("%w %d", errNoContainerAtIndex, index) } indexBytes := database.PackUInt64(index) return i.getContainerByIndexBytes(indexBytes) @@ -212,7 +194,7 @@ func (i *index) getContainerByIndexBytes(indexBytes []byte) (Container, error) { return Container{}, fmt.Errorf("couldn't read from database: %w", err) } var container Container - if _, err := i.codec.Unmarshal(containerBytes, &container); err != nil { + if _, err := Codec.Unmarshal(containerBytes, &container); err != nil { return Container{}, fmt.Errorf("couldn't unmarshal container: %w", err) } return container, nil @@ -224,10 +206,8 @@ func (i *index) getContainerByIndexBytes(indexBytes []byte) (Container, error) { // [numToFetch] should be in [0, MaxFetchedByRange] func (i *index) GetContainerRange(startIndex, numToFetch uint64) ([]Container, error) { // Check arguments for validity - if numToFetch == 0 { - return nil, errNumToFetchZero - } else if numToFetch > MaxFetchedByRange { - return nil, fmt.Errorf("requested %d but maximum page size is %d", numToFetch, MaxFetchedByRange) + if numToFetch == 0 || numToFetch > MaxFetchedByRange { + return nil, fmt.Errorf("%w but is %d", errNumToFetchInvalid, numToFetch) } i.lock.RLock() @@ -241,7 +221,7 @@ func (i *index) GetContainerRange(startIndex, numToFetch uint64) ([]Container, e } // Calculate the last index we will fetch - lastIndex := math.Min(startIndex+numToFetch-1, lastAcceptedIndex) + lastIndex := min(startIndex+numToFetch-1, lastAcceptedIndex) // [lastIndex] is always >= [startIndex] so this is safe. // [numToFetch] is limited to [MaxFetchedByRange] so [containers] is bounded in size. containers := make([]Container, int(lastIndex)-int(startIndex)+1) diff --git a/avalanchego/indexer/index_test.go b/avalanchego/indexer/index_test.go index 79ef4a5b..127aa64b 100644 --- a/avalanchego/indexer/index_test.go +++ b/avalanchego/indexer/index_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -8,12 +8,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -24,16 +22,13 @@ func TestIndex(t *testing.T) { // Setup pageSize := uint64(64) require := require.New(t) - codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) baseDB := memdb.New() db := versiondb.New(baseDB) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - indexIntf, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx := indexIntf.(*index) // Populate "containers" with random IDs/bytes containers := map[ids.ID][]byte{} @@ -44,13 +39,12 @@ func TestIndex(t *testing.T) { // Accept each container and after each, make assertions i := uint64(0) for containerID, containerBytes := range containers { - err = idx.Accept(ctx, containerID, containerBytes) - require.NoError(err) + require.NoError(idx.Accept(ctx, containerID, containerBytes)) lastAcceptedIndex, ok := idx.lastAcceptedIndex() require.True(ok) - require.EqualValues(i, lastAcceptedIndex) - require.EqualValues(i+1, idx.nextAcceptedIndex) + require.Equal(i, lastAcceptedIndex) + require.Equal(i+1, idx.nextAcceptedIndex) gotContainer, err := idx.GetContainerByID(containerID) require.NoError(err) @@ -58,7 +52,7 @@ func TestIndex(t *testing.T) { gotIndex, err := idx.GetIndex(containerID) require.NoError(err) - require.EqualValues(i, gotIndex) + require.Equal(i, gotIndex) gotContainer, err = idx.GetContainerByIndex(i) require.NoError(err) @@ -85,9 +79,8 @@ func TestIndex(t *testing.T) { require.NoError(db.Commit()) require.NoError(idx.Close()) db = versiondb.New(baseDB) - indexIntf, err = newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + idx, err = newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx = indexIntf.(*index) // Get all of the containers containersList, err := idx.GetContainerRange(0, pageSize) @@ -104,9 +97,9 @@ func TestIndex(t *testing.T) { for _, container := range containersList { require.False(sawContainers.Contains(container.ID)) // Should only see this container once require.Contains(containers, container.ID) - require.EqualValues(containers[container.ID], container.Bytes) + require.Equal(containers[container.ID], container.Bytes) // Timestamps should be non-decreasing - require.True(container.Timestamp >= lastTimestamp) + require.GreaterOrEqual(container.Timestamp, lastTimestamp) lastTimestamp = container.Timestamp sawContainers.Add(container.ID) } @@ -115,24 +108,20 @@ func TestIndex(t *testing.T) { func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { // Setup require := require.New(t) - codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) db := memdb.New() - ctx := snow.DefaultConsensusContextTest() - indexIntf, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) - idx := indexIntf.(*index) // Insert [MaxFetchedByRange] + 1 containers for i := uint64(0); i < MaxFetchedByRange+1; i++ { - err = idx.Accept(ctx, ids.GenerateTestID(), utils.RandomBytes(32)) - require.NoError(err) + require.NoError(idx.Accept(ctx, ids.GenerateTestID(), utils.RandomBytes(32))) } // Page size too large _, err = idx.GetContainerRange(0, MaxFetchedByRange+1) - require.Error(err) + require.ErrorIs(err, errNumToFetchInvalid) // Make sure data is right containers, err := idx.GetContainerRange(0, MaxFetchedByRange) @@ -150,19 +139,17 @@ func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { containers, err = idx.GetContainerRange(MaxFetchedByRange-1, MaxFetchedByRange) require.NoError(err) require.Len(containers, 2) - require.EqualValues(containers[1], containers2[MaxFetchedByRange-1]) - require.EqualValues(containers[0], containers2[MaxFetchedByRange-2]) + require.Equal(containers[1], containers2[MaxFetchedByRange-1]) + require.Equal(containers[0], containers2[MaxFetchedByRange-2]) } func TestDontIndexSameContainerTwice(t *testing.T) { // Setup require := require.New(t) - codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) db := memdb.New() - ctx := snow.DefaultConsensusContextTest() - idx, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + idx, err := newIndex(db, logging.NoLog{}, mockable.Clock{}) require.NoError(err) // Accept the same container twice @@ -170,8 +157,8 @@ func TestDontIndexSameContainerTwice(t *testing.T) { require.NoError(idx.Accept(ctx, containerID, []byte{1, 2, 3})) require.NoError(idx.Accept(ctx, containerID, []byte{4, 5, 6})) _, err = idx.GetContainerByIndex(1) - require.Error(err, "should not have accepted same container twice") + require.ErrorIs(err, errNoContainerAtIndex) gotContainer, err := idx.GetContainerByID(containerID) require.NoError(err) - require.EqualValues(gotContainer.Bytes, []byte{1, 2, 3}, "should not have accepted same container twice") + require.Equal([]byte{1, 2, 3}, gotContainer.Bytes) } diff --git a/avalanchego/indexer/indexer.go b/avalanchego/indexer/indexer.go index 8936e57f..c20f13f6 100644 --- a/avalanchego/indexer/indexer.go +++ b/avalanchego/indexer/indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -6,17 +6,13 @@ package indexer import ( "fmt" "io" - "math" "sync" "github.com/gorilla/rpc/v2" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -25,7 +21,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -33,26 +28,18 @@ import ( ) const ( - indexNamePrefix = "index-" - codecVersion = uint16(0) - // Max size, in bytes, of something serialized by this indexer - // Assumes no containers are larger than math.MaxUint32 - // wrappers.IntLen accounts for the size of the container bytes - // wrappers.LongLen accounts for the timestamp of the container - // hashing.HashLen accounts for the container ID - // wrappers.ShortLen accounts for the codec version - codecMaxSize = int(constants.DefaultMaxMessageSize) + wrappers.IntLen + wrappers.LongLen + hashing.HashLen + wrappers.ShortLen + indexNamePrefix = "index-" + txPrefix = 0x01 + vtxPrefix = 0x02 + blockPrefix = 0x03 + isIncompletePrefix = 0x04 + previouslyIndexedPrefix = 0x05 ) var ( - txPrefix = byte(0x01) - vtxPrefix = byte(0x02) - blockPrefix = byte(0x03) - isIncompletePrefix = byte(0x04) - previouslyIndexedPrefix = byte(0x05) - hasRunKey = []byte{0x07} - _ Indexer = (*indexer)(nil) + + hasRunKey = []byte{0x07} ) // Config for an indexer @@ -81,7 +68,6 @@ type Indexer interface { // NewIndexer returns a new Indexer and registers a new endpoint on the given API server. func NewIndexer(config Config) (Indexer, error) { indexer := &indexer{ - codec: codec.NewManager(codecMaxSize), log: config.Log, db: config.DB, allowIncompleteIndex: config.AllowIncompleteIndex, @@ -89,19 +75,13 @@ func NewIndexer(config Config) (Indexer, error) { blockAcceptorGroup: config.BlockAcceptorGroup, txAcceptorGroup: config.TxAcceptorGroup, vertexAcceptorGroup: config.VertexAcceptorGroup, - txIndices: map[ids.ID]Index{}, - vtxIndices: map[ids.ID]Index{}, - blockIndices: map[ids.ID]Index{}, + txIndices: map[ids.ID]*index{}, + vtxIndices: map[ids.ID]*index{}, + blockIndices: map[ids.ID]*index{}, pathAdder: config.APIServer, shutdownF: config.ShutdownF, } - if err := indexer.codec.RegisterCodec( - codecVersion, - linearcodec.NewCustomMaxLength(math.MaxUint32), - ); err != nil { - return nil, fmt.Errorf("couldn't register codec: %w", err) - } hasRun, err := indexer.hasRun() if err != nil { return nil, err @@ -111,7 +91,6 @@ func NewIndexer(config Config) (Indexer, error) { } type indexer struct { - codec codec.Manager clock mockable.Clock lock sync.RWMutex log logging.Logger @@ -135,11 +114,11 @@ type indexer struct { indexingEnabled bool // Chain ID --> index of blocks of that chain (if applicable) - blockIndices map[ids.ID]Index + blockIndices map[ids.ID]*index // Chain ID --> index of vertices of that chain (if applicable) - vtxIndices map[ids.ID]Index + vtxIndices map[ids.ID]*index // Chain ID --> index of txs of that chain (if applicable) - txIndices map[ids.ID]Index + txIndices map[ids.ID]*index // Notifies of newly accepted blocks blockAcceptorGroup snow.AcceptorGroup @@ -332,12 +311,12 @@ func (i *indexer) registerChainHelper( prefixEnd byte, name, endpoint string, acceptorGroup snow.AcceptorGroup, -) (Index, error) { - prefix := make([]byte, hashing.HashLen+wrappers.ByteLen) +) (*index, error) { + prefix := make([]byte, ids.IDLen+wrappers.ByteLen) copy(prefix, chainID[:]) - prefix[hashing.HashLen] = prefixEnd + prefix[ids.IDLen] = prefixEnd indexDB := prefixdb.New(prefix, i.db) - index, err := newIndex(indexDB, i.log, i.codec, i.clock) + index, err := newIndex(indexDB, i.log, i.clock) if err != nil { _ = indexDB.Close() return nil, err @@ -354,12 +333,11 @@ func (i *indexer) registerChainHelper( codec := json.NewCodec() apiServer.RegisterCodec(codec, "application/json") apiServer.RegisterCodec(codec, "application/json;charset=UTF-8") - if err := apiServer.RegisterService(&service{Index: index}, "index"); err != nil { + if err := apiServer.RegisterService(&service{index: index}, "index"); err != nil { _ = index.Close() return nil, err } - handler := &common.HTTPHandler{LockOptions: common.NoLock, Handler: apiServer} - if err := i.pathAdder.AddRoute(handler, &sync.RWMutex{}, "index/"+name, "/"+endpoint); err != nil { + if err := i.pathAdder.AddRoute(apiServer, "index/"+name, "/"+endpoint); err != nil { _ = index.Close() return nil, err } @@ -409,32 +387,32 @@ func (i *indexer) close() error { } func (i *indexer) markIncomplete(chainID ids.ID) error { - key := make([]byte, hashing.HashLen+wrappers.ByteLen) + key := make([]byte, ids.IDLen+wrappers.ByteLen) copy(key, chainID[:]) - key[hashing.HashLen] = isIncompletePrefix + key[ids.IDLen] = isIncompletePrefix return i.db.Put(key, nil) } // Returns true if this chain is incomplete func (i *indexer) isIncomplete(chainID ids.ID) (bool, error) { - key := make([]byte, hashing.HashLen+wrappers.ByteLen) + key := make([]byte, ids.IDLen+wrappers.ByteLen) copy(key, chainID[:]) - key[hashing.HashLen] = isIncompletePrefix + key[ids.IDLen] = isIncompletePrefix return i.db.Has(key) } func (i *indexer) markPreviouslyIndexed(chainID ids.ID) error { - key := make([]byte, hashing.HashLen+wrappers.ByteLen) + key := make([]byte, ids.IDLen+wrappers.ByteLen) copy(key, chainID[:]) - key[hashing.HashLen] = previouslyIndexedPrefix + key[ids.IDLen] = previouslyIndexedPrefix return i.db.Put(key, nil) } // Returns true if this chain is incomplete func (i *indexer) previouslyIndexed(chainID ids.ID) (bool, error) { - key := make([]byte, hashing.HashLen+wrappers.ByteLen) + key := make([]byte, ids.IDLen+wrappers.ByteLen) copy(key, chainID[:]) - key[hashing.HashLen] = previouslyIndexedPrefix + key[ids.IDLen] = previouslyIndexedPrefix return i.db.Has(key) } diff --git a/avalanchego/indexer/indexer_test.go b/avalanchego/indexer/indexer_test.go index 79af29fb..c8eb6c8e 100644 --- a/avalanchego/indexer/indexer_test.go +++ b/avalanchego/indexer/indexer_test.go @@ -1,28 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer import ( "errors" + "net/http" "sync" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -39,7 +37,7 @@ type apiServerMock struct { endpoints []string } -func (a *apiServerMock) AddRoute(_ *common.HTTPHandler, _ *sync.RWMutex, base, endpoint string) error { +func (a *apiServerMock) AddRoute(_ http.Handler, base, endpoint string) error { a.timesCalled++ a.bases = append(a.bases, base) a.endpoints = append(a.endpoints, endpoint) @@ -67,9 +65,8 @@ func TestNewIndexer(t *testing.T) { idxrIntf, err := NewIndexer(config) require.NoError(err) - idxr, ok := idxrIntf.(*indexer) - require.True(ok) - require.NotNil(idxr.codec) + require.IsType(&indexer{}, idxrIntf) + idxr := idxrIntf.(*indexer) require.NotNil(idxr.log) require.NotNil(idxr.db) require.False(idxr.closed) @@ -77,11 +74,11 @@ func TestNewIndexer(t *testing.T) { require.True(idxr.indexingEnabled) require.True(idxr.allowIncompleteIndex) require.NotNil(idxr.blockIndices) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) require.NotNil(idxr.txIndices) - require.Len(idxr.txIndices, 0) + require.Empty(idxr.txIndices) require.NotNil(idxr.vtxIndices) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.vtxIndices) require.NotNil(idxr.blockAcceptorGroup) require.NotNil(idxr.txAcceptorGroup) require.NotNil(idxr.vertexAcceptorGroup) @@ -118,8 +115,8 @@ func TestMarkHasRunAndShutdown(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - idxr, ok := idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr := idxrIntf.(*indexer) require.True(idxr.hasRunBefore) require.NoError(idxr.Close()) shutdown.Wait() @@ -130,7 +127,6 @@ func TestMarkHasRunAndShutdown(t *testing.T) { func TestIndexer(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() baseDB := memdb.New() db := versiondb.New(baseDB) @@ -150,14 +146,14 @@ func TestIndexer(t *testing.T) { // Create indexer idxrIntf, err := NewIndexer(config) require.NoError(err) - idxr, ok := idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr := idxrIntf.(*indexer) now := time.Now() idxr.clock.Set(now) // Assert state is right - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() + snow1Ctx := snowtest.Context(t, snowtest.CChainID) + chain1Ctx := snowtest.ConsensusContext(snow1Ctx) isIncomplete, err := idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.False(isIncomplete) @@ -166,7 +162,7 @@ func TestIndexer(t *testing.T) { require.False(previouslyIndexed) // Register this chain, creating a new index - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) @@ -174,12 +170,12 @@ func TestIndexer(t *testing.T) { previouslyIndexed, err = idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) require.True(previouslyIndexed) - require.EqualValues(1, server.timesCalled) - require.EqualValues("index/chain1", server.bases[0]) - require.EqualValues("/block", server.endpoints[0]) + require.Equal(1, server.timesCalled) + require.Equal("index/chain1", server.bases[0]) + require.Equal("/block", server.endpoints[0]) require.Len(idxr.blockIndices, 1) - require.Len(idxr.txIndices, 0) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.txIndices) + require.Empty(idxr.vtxIndices) // Accept a container blkID, blkBytes := ids.GenerateTestID(), utils.RandomBytes(32) @@ -207,7 +203,7 @@ func TestIndexer(t *testing.T) { // Verify GetIndex is right index, err := blkIdx.GetIndex(blkID) require.NoError(err) - require.EqualValues(0, index) + require.Zero(index) // Verify GetContainerByIndex is right container, err = blkIdx.GetContainerByIndex(0) @@ -232,13 +228,13 @@ func TestIndexer(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - idxr, ok = idxrIntf.(*indexer) + require.IsType(&indexer{}, idxrIntf) + idxr = idxrIntf.(*indexer) now = time.Now() idxr.clock.Set(now) - require.True(ok) - require.Len(idxr.blockIndices, 0) - require.Len(idxr.txIndices, 0) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.blockIndices) + require.Empty(idxr.txIndices) + require.Empty(idxr.vtxIndices) require.True(idxr.hasRunBefore) previouslyIndexed, err = idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) @@ -257,12 +253,12 @@ func TestIndexer(t *testing.T) { container, err = blkIdx.GetLastAccepted() require.NoError(err) require.Equal(blkID, container.ID) - require.EqualValues(1, server.timesCalled) // block index for chain + require.Equal(1, server.timesCalled) // block index for chain require.Contains(server.endpoints, "/block") // Register a DAG chain - chain2Ctx := snow.DefaultConsensusContextTest() - chain2Ctx.ChainID = ids.GenerateTestID() + snow2Ctx := snowtest.Context(t, snowtest.XChainID) + chain2Ctx := snowtest.ConsensusContext(snow2Ctx) isIncomplete, err = idxr.isIncomplete(chain2Ctx.ChainID) require.NoError(err) require.False(isIncomplete) @@ -272,7 +268,7 @@ func TestIndexer(t *testing.T) { dagVM := vertex.NewMockLinearizableVM(ctrl) idxr.RegisterChain("chain2", chain2Ctx, dagVM) require.NoError(err) - require.EqualValues(4, server.timesCalled) // block index for chain, block index for dag, vtx index, tx index + require.Equal(4, server.timesCalled) // block index for chain, block index for dag, vtx index, tx index require.Contains(server.bases, "index/chain2") require.Contains(server.endpoints, "/block") require.Contains(server.endpoints, "/vtx") @@ -307,7 +303,7 @@ func TestIndexer(t *testing.T) { // Verify GetIndex is right index, err = vtxIdx.GetIndex(vtxID) require.NoError(err) - require.EqualValues(0, index) + require.Zero(index) // Verify GetContainerByIndex is right vtx, err = vtxIdx.GetContainerByIndex(0) @@ -324,21 +320,11 @@ func TestIndexer(t *testing.T) { txID, txBytes := ids.GenerateTestID(), utils.RandomBytes(32) expectedTx := Container{ ID: txID, - Bytes: blkBytes, + Bytes: txBytes, Timestamp: now.UnixNano(), } - // Mocked VM knows about this tx now - dagVM.EXPECT().GetTx(gomock.Any(), txID).Return( - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID, - StatusV: choices.Accepted, - }, - BytesV: txBytes, - }, nil, - ).AnyTimes() - - require.NoError(config.TxAcceptorGroup.Accept(chain2Ctx, txID, blkBytes)) + + require.NoError(config.TxAcceptorGroup.Accept(chain2Ctx, txID, txBytes)) txIdx := idxr.txIndices[chain2Ctx.ChainID] require.NotNil(txIdx) @@ -356,7 +342,7 @@ func TestIndexer(t *testing.T) { // Verify GetIndex is right index, err = txIdx.GetIndex(txID) require.NoError(err) - require.EqualValues(0, index) + require.Zero(index) // Verify GetContainerByIndex is right tx, err = txIdx.GetContainerByIndex(0) @@ -373,13 +359,13 @@ func TestIndexer(t *testing.T) { // happen on the block/tx index. Similar for tx. lastAcceptedTx, err := txIdx.GetLastAccepted() require.NoError(err) - require.EqualValues(txID, lastAcceptedTx.ID) + require.Equal(txID, lastAcceptedTx.ID) lastAcceptedVtx, err := vtxIdx.GetLastAccepted() require.NoError(err) - require.EqualValues(vtxID, lastAcceptedVtx.ID) + require.Equal(vtxID, lastAcceptedVtx.ID) lastAcceptedBlk, err := blkIdx.GetLastAccepted() require.NoError(err) - require.EqualValues(blkID, lastAcceptedBlk.ID) + require.Equal(blkID, lastAcceptedBlk.ID) // Close the indexer again require.NoError(config.DB.(*versiondb.Database).Commit()) @@ -389,21 +375,21 @@ func TestIndexer(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - idxr, ok = idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr = idxrIntf.(*indexer) idxr.RegisterChain("chain1", chain1Ctx, chainVM) idxr.RegisterChain("chain2", chain2Ctx, dagVM) // Verify state lastAcceptedTx, err = idxr.txIndices[chain2Ctx.ChainID].GetLastAccepted() require.NoError(err) - require.EqualValues(txID, lastAcceptedTx.ID) + require.Equal(txID, lastAcceptedTx.ID) lastAcceptedVtx, err = idxr.vtxIndices[chain2Ctx.ChainID].GetLastAccepted() require.NoError(err) - require.EqualValues(vtxID, lastAcceptedVtx.ID) + require.Equal(vtxID, lastAcceptedVtx.ID) lastAcceptedBlk, err = idxr.blockIndices[chain1Ctx.ChainID].GetLastAccepted() require.NoError(err) - require.EqualValues(blkID, lastAcceptedBlk.ID) + require.Equal(blkID, lastAcceptedBlk.ID) } // Make sure the indexer doesn't allow incomplete indices unless explicitly allowed @@ -411,7 +397,6 @@ func TestIncompleteIndex(t *testing.T) { // Create an indexer with indexing disabled require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() baseDB := memdb.New() config := Config{ @@ -427,25 +412,25 @@ func TestIncompleteIndex(t *testing.T) { } idxrIntf, err := NewIndexer(config) require.NoError(err) - idxr, ok := idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr := idxrIntf.(*indexer) require.False(idxr.indexingEnabled) // Register a chain - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() + snow1Ctx := snowtest.Context(t, snowtest.CChainID) + chain1Ctx := snowtest.ConsensusContext(snow1Ctx) isIncomplete, err := idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.False(isIncomplete) previouslyIndexed, err := idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) require.False(previouslyIndexed) - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.True(isIncomplete) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) // Close and re-open the indexer, this time with indexing enabled require.NoError(config.DB.(*versiondb.Database).Commit()) @@ -454,8 +439,8 @@ func TestIncompleteIndex(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - idxr, ok = idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr = idxrIntf.(*indexer) require.True(idxr.indexingEnabled) // Register the chain again. Should die due to incomplete index. @@ -470,8 +455,8 @@ func TestIncompleteIndex(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - idxr, ok = idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr = idxrIntf.(*indexer) require.True(idxr.allowIncompleteIndex) // Register the chain again. Should be OK @@ -486,15 +471,13 @@ func TestIncompleteIndex(t *testing.T) { config.DB = versiondb.New(baseDB) idxrIntf, err = NewIndexer(config) require.NoError(err) - _, ok = idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) } // Ensure we only index chains in the primary network func TestIgnoreNonDefaultChains(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() baseDB := memdb.New() db := versiondb.New(baseDB) @@ -513,16 +496,17 @@ func TestIgnoreNonDefaultChains(t *testing.T) { // Create indexer idxrIntf, err := NewIndexer(config) require.NoError(err) - idxr, ok := idxrIntf.(*indexer) - require.True(ok) + require.IsType(&indexer{}, idxrIntf) + idxr := idxrIntf.(*indexer) - // Assert state is right - chain1Ctx := snow.DefaultConsensusContextTest() - chain1Ctx.ChainID = ids.GenerateTestID() - chain1Ctx.SubnetID = ids.GenerateTestID() + // Create chain1Ctx for a random subnet + chain. + chain1Ctx := snowtest.ConsensusContext(&snow.Context{ + ChainID: ids.GenerateTestID(), + SubnetID: ids.GenerateTestID(), + }) // RegisterChain should return without adding an index for this chain - chainVM := mocks.NewMockChainVM(ctrl) + chainVM := block.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) } diff --git a/avalanchego/indexer/service.go b/avalanchego/indexer/service.go index 98bc91e9..83f9912f 100644 --- a/avalanchego/indexer/service.go +++ b/avalanchego/indexer/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -15,7 +15,7 @@ import ( ) type service struct { - Index + index *index } type FormattedContainer struct { @@ -46,11 +46,11 @@ type GetLastAcceptedArgs struct { } func (s *service) GetLastAccepted(_ *http.Request, args *GetLastAcceptedArgs, reply *FormattedContainer) error { - container, err := s.Index.GetLastAccepted() + container, err := s.index.GetLastAccepted() if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -64,11 +64,11 @@ type GetContainerByIndexArgs struct { } func (s *service) GetContainerByIndex(_ *http.Request, args *GetContainerByIndexArgs, reply *FormattedContainer) error { - container, err := s.Index.GetContainerByIndex(uint64(args.Index)) + container, err := s.index.GetContainerByIndex(uint64(args.Index)) if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -92,14 +92,14 @@ type GetContainerRangeResponse struct { // If [n] > [MaxFetchedByRange], returns an error. // If we run out of transactions, returns the ones fetched before running out. func (s *service) GetContainerRange(_ *http.Request, args *GetContainerRangeArgs, reply *GetContainerRangeResponse) error { - containers, err := s.Index.GetContainerRange(uint64(args.StartIndex), uint64(args.NumToFetch)) + containers, err := s.index.GetContainerRange(uint64(args.StartIndex), uint64(args.NumToFetch)) if err != nil { return err } reply.Containers = make([]FormattedContainer, len(containers)) for i, container := range containers { - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } @@ -120,7 +120,7 @@ type GetIndexResponse struct { } func (s *service) GetIndex(_ *http.Request, args *GetIndexArgs, reply *GetIndexResponse) error { - index, err := s.Index.GetIndex(args.ID) + index, err := s.index.GetIndex(args.ID) reply.Index = json.Uint64(index) return err } @@ -134,7 +134,7 @@ type IsAcceptedResponse struct { } func (s *service) IsAccepted(_ *http.Request, args *IsAcceptedArgs, reply *IsAcceptedResponse) error { - _, err := s.Index.GetIndex(args.ID) + _, err := s.index.GetIndex(args.ID) if err == nil { reply.IsAccepted = true return nil @@ -152,11 +152,11 @@ type GetContainerByIDArgs struct { } func (s *service) GetContainerByID(_ *http.Request, args *GetContainerByIDArgs, reply *FormattedContainer) error { - container, err := s.Index.GetContainerByID(args.ID) + container, err := s.index.GetContainerByID(args.ID) if err != nil { return err } - index, err := s.Index.GetIndex(container.ID) + index, err := s.index.GetIndex(container.ID) if err != nil { return fmt.Errorf("couldn't get index: %w", err) } diff --git a/avalanchego/ipcs/chainipc.go b/avalanchego/ipcs/chainipc.go index 56d43933..fc8e230e 100644 --- a/avalanchego/ipcs/chainipc.go +++ b/avalanchego/ipcs/chainipc.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -8,7 +8,6 @@ import ( "path/filepath" "go.uber.org/zap" - "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/ids" diff --git a/avalanchego/ipcs/eventsocket.go b/avalanchego/ipcs/eventsocket.go index 37b370c3..0dbbe1c9 100644 --- a/avalanchego/ipcs/eventsocket.go +++ b/avalanchego/ipcs/eventsocket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ipcs/socket" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -133,12 +134,10 @@ func newEventIPCSocket( url: url, socket: socket.NewSocket(url, ctx.log), unregisterFn: func() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( snowmanAcceptorGroup.DeregisterAcceptor(chainID, ipcName), avalancheAcceptorGroup.DeregisterAcceptor(chainID, ipcName), ) - return errs.Err }, } @@ -175,9 +174,10 @@ func (eis *eventSocket) Accept(_ *snow.ConsensusContext, _ ids.ID, container []b // stop unregisters the event handler and closes the eventSocket func (eis *eventSocket) stop() error { eis.log.Info("closing Chain IPC") - errs := wrappers.Errs{} - errs.Add(eis.unregisterFn(), eis.socket.Close()) - return errs.Err + return utils.Err( + eis.unregisterFn(), + eis.socket.Close(), + ) } // URL returns the URL of the socket diff --git a/avalanchego/ipcs/socket/socket.go b/avalanchego/ipcs/socket/socket.go index d3ca391d..77f2d6fd 100644 --- a/avalanchego/ipcs/socket/socket.go +++ b/avalanchego/ipcs/socket/socket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket diff --git a/avalanchego/ipcs/socket/socket_test.go b/avalanchego/ipcs/socket/socket_test.go index 3489fef2..a2c1ec63 100644 --- a/avalanchego/ipcs/socket/socket_test.go +++ b/avalanchego/ipcs/socket/socket_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket @@ -6,9 +6,15 @@ package socket import ( "net" "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestSocketSendAndReceive(t *testing.T) { + require := require.New(t) + var ( connCh chan net.Conn socketName = "/tmp/pipe-test.sock" @@ -17,16 +23,12 @@ func TestSocketSendAndReceive(t *testing.T) { ) // Create socket and client; wait for client to connect - socket := NewSocket(socketName, nil) + socket := NewSocket(socketName, logging.NoLog{}) socket.accept, connCh = newTestAcceptFn(t) - if err := socket.Listen(); err != nil { - t.Fatal("Failed to listen on socket:", err.Error()) - } + require.NoError(socket.Listen()) client, err := Dial(socketName) - if err != nil { - t.Fatal("Failed to dial socket:", err.Error()) - } + require.NoError(err) <-connCh // Start sending in the background @@ -38,22 +40,17 @@ func TestSocketSendAndReceive(t *testing.T) { // Receive message and compare it to what was sent receivedMsg, err := client.Recv() - if err != nil { - t.Fatal("Failed to receive from socket:", err.Error()) - } - if string(receivedMsg) != string(msg) { - t.Fatal("Received incorrect message:", string(msg)) - } + require.NoError(err) + require.Equal(msg, receivedMsg) // Test max message size client.SetMaxMessageSize(msgLen) - if _, err := client.Recv(); err != nil { - t.Fatal("Failed to receive from socket:", err.Error()) - } + _, err = client.Recv() + require.NoError(err) + client.SetMaxMessageSize(msgLen - 1) - if _, err := client.Recv(); err != ErrMessageTooLarge { - t.Fatal("Should have received message too large error, got:", err) - } + _, err = client.Recv() + require.ErrorIs(err, ErrMessageTooLarge) } // newTestAcceptFn creates a new acceptFn and a channel that receives all new @@ -63,9 +60,7 @@ func newTestAcceptFn(t *testing.T) (acceptFn, chan net.Conn) { return func(s *Socket, l net.Listener) { conn, err := l.Accept() - if err != nil { - t.Error(err) - } + require.NoError(t, err) s.connLock.Lock() s.conns[conn] = struct{}{} diff --git a/avalanchego/ipcs/socket/socket_unix.go b/avalanchego/ipcs/socket/socket_unix.go index cf5d7c1a..14d5aabd 100644 --- a/avalanchego/ipcs/socket/socket_unix.go +++ b/avalanchego/ipcs/socket/socket_unix.go @@ -1,9 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build !windows && !plan9 && !js // +build !windows,!plan9,!js -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package socket import ( diff --git a/avalanchego/ipcs/socket/socket_windows.go b/avalanchego/ipcs/socket/socket_windows.go index ea61157f..99590cb6 100644 --- a/avalanchego/ipcs/socket/socket_windows.go +++ b/avalanchego/ipcs/socket/socket_windows.go @@ -1,9 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build windows // +build windows -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package socket import ( diff --git a/avalanchego/main/default.pgo b/avalanchego/main/default.pgo new file mode 100644 index 00000000..ad91ea81 Binary files /dev/null and b/avalanchego/main/default.pgo differ diff --git a/avalanchego/main/main.go b/avalanchego/main/main.go index 93bfa54f..88a9f9d1 100644 --- a/avalanchego/main/main.go +++ b/avalanchego/main/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -9,7 +9,6 @@ import ( "os" "github.com/spf13/pflag" - "golang.org/x/term" "github.com/ava-labs/avalanchego/app" @@ -45,11 +44,16 @@ func main() { // Flare specific: set the application prefix (flare for songbird and avalanche for flare) version.InitApplicationPrefix(nodeConfig.NetworkID) - nodeApp := app.New(nodeConfig) // Create node wrapper if term.IsTerminal(int(os.Stdout.Fd())) { fmt.Println(app.Header) } + nodeApp, err := app.New(nodeConfig) + if err != nil { + fmt.Printf("couldn't start node: %s\n", err) + os.Exit(1) + } + exitCode := app.Run(nodeApp) os.Exit(exitCode) } diff --git a/avalanchego/message/creator.go b/avalanchego/message/creator.go index f1a6def2..8040bccb 100644 --- a/avalanchego/message/creator.go +++ b/avalanchego/message/creator.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( - "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ Creator = (*creator)(nil) @@ -32,7 +32,7 @@ func NewCreator( compressionType compression.Type, maxMessageTimeout time.Duration, ) (Creator, error) { - namespace := fmt.Sprintf("%s_codec", parentNamespace) + namespace := metric.AppendNamespace(parentNamespace, "codec") builder, err := newMsgBuilder( log, namespace, diff --git a/avalanchego/message/fields.go b/avalanchego/message/fields.go index 87bffe51..08e744fa 100644 --- a/avalanchego/message/fields.go +++ b/avalanchego/message/fields.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/avalanchego/message/inbound_msg_builder.go b/avalanchego/message/inbound_msg_builder.go index 1cc1edcd..b32dbc5d 100644 --- a/avalanchego/message/inbound_msg_builder.go +++ b/avalanchego/message/inbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -135,18 +135,16 @@ func InboundGetAcceptedFrontier( func InboundAcceptedFrontier( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + containerID ids.ID, nodeID ids.NodeID, ) InboundMessage { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) return &inboundMessage{ nodeID: nodeID, op: AcceptedFrontierOp, message: &p2p.AcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, + ChainId: chainID[:], + RequestId: requestID, + ContainerId: containerID[:], }, expiration: mockable.MaxTime, } @@ -201,6 +199,7 @@ func InboundPushQuery( requestID uint32, deadline time.Duration, container []byte, + requestedHeight uint64, nodeID ids.NodeID, engineType p2p.EngineType, ) InboundMessage { @@ -208,11 +207,12 @@ func InboundPushQuery( nodeID: nodeID, op: PushQueryOp, message: &p2p.PushQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Container: container, - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Container: container, + RequestedHeight: requestedHeight, + EngineType: engineType, }, expiration: time.Now().Add(deadline), } @@ -223,6 +223,7 @@ func InboundPullQuery( requestID uint32, deadline time.Duration, containerID ids.ID, + requestedHeight uint64, nodeID ids.NodeID, engineType p2p.EngineType, ) InboundMessage { @@ -230,11 +231,12 @@ func InboundPullQuery( nodeID: nodeID, op: PullQueryOp, message: &p2p.PullQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerId: containerID[:], + RequestedHeight: requestedHeight, + EngineType: engineType, }, expiration: time.Now().Add(deadline), } @@ -243,22 +245,20 @@ func InboundPullQuery( func InboundChits( chainID ids.ID, requestID uint32, - preferredContainerIDs []ids.ID, - acceptedContainerIDs []ids.ID, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, nodeID ids.NodeID, ) InboundMessage { - preferredContainerIDBytes := make([][]byte, len(preferredContainerIDs)) - encodeIDs(preferredContainerIDs, preferredContainerIDBytes) - acceptedContainerIDBytes := make([][]byte, len(acceptedContainerIDs)) - encodeIDs(acceptedContainerIDs, acceptedContainerIDBytes) return &inboundMessage{ nodeID: nodeID, op: ChitsOp, message: &p2p.Chits{ - ChainId: chainID[:], - RequestId: requestID, - PreferredContainerIds: preferredContainerIDBytes, - AcceptedContainerIds: acceptedContainerIDBytes, + ChainId: chainID[:], + RequestId: requestID, + PreferredId: preferredID[:], + PreferredIdAtHeight: preferredIDAtHeight[:], + AcceptedId: acceptedID[:], }, expiration: mockable.MaxTime, } @@ -284,6 +284,26 @@ func InboundAppRequest( } } +func InboundAppError( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + errorCode int32, + errorMessage string, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: AppErrorOp, + message: &p2p.AppError{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + expiration: mockable.MaxTime, + } +} + func InboundAppResponse( chainID ids.ID, requestID uint32, @@ -304,7 +324,7 @@ func InboundAppResponse( func encodeIDs(ids []ids.ID, result [][]byte) { for i, id := range ids { - copy := id - result[i] = copy[:] + id := id + result[i] = id[:] } } diff --git a/avalanchego/message/inbound_msg_builder_test.go b/avalanchego/message/inbound_msg_builder_test.go index 667a205d..37f713a3 100644 --- a/avalanchego/message/inbound_msg_builder_test.go +++ b/avalanchego/message/inbound_msg_builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -8,11 +8,11 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -33,18 +33,19 @@ func Test_newMsgBuilder(t *testing.T) { func TestInboundMsgBuilder(t *testing.T) { var ( - chainID = ids.GenerateTestID() - requestID uint32 = 12345 - deadline = time.Hour - nodeID = ids.GenerateTestNodeID() - summary = []byte{9, 8, 7} - appBytes = []byte{1, 3, 3, 7} - container = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} - containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - acceptedContainerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - heights = []uint64{1000, 2000} - engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + chainID = ids.GenerateTestID() + requestID uint32 = 12345 + deadline = time.Hour + nodeID = ids.GenerateTestNodeID() + summary = []byte{9, 8, 7} + appBytes = []byte{1, 3, 3, 7} + container = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + requestedHeight uint64 = 999 + acceptedContainerID = ids.GenerateTestID() + summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + heights = []uint64{1000, 2000} + engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) t.Run( @@ -65,8 +66,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.GetStateSummaryFrontier) - require.True(ok) + require.IsType(&p2p.GetStateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetStateSummaryFrontier) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) }, @@ -87,8 +88,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(StateSummaryFrontierOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.StateSummaryFrontier) - require.True(ok) + require.IsType(&p2p.StateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.StateSummaryFrontier) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summary, innerMsg.Summary) @@ -114,8 +115,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.GetAcceptedStateSummary) - require.True(ok) + require.IsType(&p2p.GetAcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAcceptedStateSummary) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(heights, innerMsg.Heights) @@ -137,8 +138,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(AcceptedStateSummaryOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.AcceptedStateSummary) - require.True(ok) + require.IsType(&p2p.AcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AcceptedStateSummary) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) summaryIDsBytes := make([][]byte, len(summaryIDs)) @@ -169,8 +170,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.GetAcceptedFrontier) - require.True(ok) + require.IsType(&p2p.GetAcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAcceptedFrontier) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(engineType, innerMsg.EngineType) @@ -185,23 +186,18 @@ func TestInboundMsgBuilder(t *testing.T) { msg := InboundAcceptedFrontier( chainID, requestID, - containerIDs, + containerIDs[0], nodeID, ) require.Equal(AcceptedFrontierOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.AcceptedFrontier) - require.True(ok) + require.IsType(&p2p.AcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AcceptedFrontier) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) - containerIDsBytes := make([][]byte, len(containerIDs)) - for i, id := range containerIDs { - id := id - containerIDsBytes[i] = id[:] - } - require.Equal(containerIDsBytes, innerMsg.ContainerIds) + require.Equal(containerIDs[0][:], innerMsg.ContainerId) }, ) @@ -225,8 +221,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.GetAccepted) - require.True(ok) + require.IsType(&p2p.GetAccepted{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAccepted) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(engineType, innerMsg.EngineType) @@ -248,8 +244,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(AcceptedOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.Accepted) - require.True(ok) + require.IsType(&p2p.Accepted{}, msg.Message()) + innerMsg := msg.Message().(*p2p.Accepted) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) containerIDsBytes := make([][]byte, len(containerIDs)) @@ -272,6 +268,7 @@ func TestInboundMsgBuilder(t *testing.T) { requestID, deadline, container, + requestedHeight, nodeID, engineType, ) @@ -281,11 +278,12 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.PushQuery) - require.True(ok) + require.IsType(&p2p.PushQuery{}, msg.Message()) + innerMsg := msg.Message().(*p2p.PushQuery) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(container, innerMsg.Container) + require.Equal(requestedHeight, innerMsg.RequestedHeight) require.Equal(engineType, innerMsg.EngineType) }, ) @@ -301,6 +299,7 @@ func TestInboundMsgBuilder(t *testing.T) { requestID, deadline, containerIDs[0], + requestedHeight, nodeID, engineType, ) @@ -310,11 +309,12 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.PullQuery) - require.True(ok) + require.IsType(&p2p.PullQuery{}, msg.Message()) + innerMsg := msg.Message().(*p2p.PullQuery) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(containerIDs[0][:], innerMsg.ContainerId) + require.Equal(requestedHeight, innerMsg.RequestedHeight) require.Equal(engineType, innerMsg.EngineType) }, ) @@ -327,30 +327,22 @@ func TestInboundMsgBuilder(t *testing.T) { msg := InboundChits( chainID, requestID, - containerIDs, - acceptedContainerIDs, + containerIDs[0], + containerIDs[1], + acceptedContainerID, nodeID, ) require.Equal(ChitsOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.Chits) - require.True(ok) + require.IsType(&p2p.Chits{}, msg.Message()) + innerMsg := msg.Message().(*p2p.Chits) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) - containerIDsBytes := make([][]byte, len(containerIDs)) - for i, id := range containerIDs { - id := id - containerIDsBytes[i] = id[:] - } - require.Equal(containerIDsBytes, innerMsg.PreferredContainerIds) - acceptedContainerIDsBytes := make([][]byte, len(acceptedContainerIDs)) - for i, id := range acceptedContainerIDs { - id := id - acceptedContainerIDsBytes[i] = id[:] - } - require.Equal(acceptedContainerIDsBytes, innerMsg.AcceptedContainerIds) + require.Equal(containerIDs[0][:], innerMsg.PreferredId) + require.Equal(containerIDs[1][:], innerMsg.PreferredIdAtHeight) + require.Equal(acceptedContainerID[:], innerMsg.AcceptedId) }, ) @@ -373,8 +365,8 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(nodeID, msg.NodeID()) require.False(msg.Expiration().Before(start.Add(deadline))) require.False(end.Add(deadline).Before(msg.Expiration())) - innerMsg, ok := msg.Message().(*p2p.AppRequest) - require.True(ok) + require.IsType(&p2p.AppRequest{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AppRequest) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(appBytes, innerMsg.AppBytes) @@ -396,11 +388,54 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(AppResponseOp, msg.Op()) require.Equal(nodeID, msg.NodeID()) require.Equal(mockable.MaxTime, msg.Expiration()) - innerMsg, ok := msg.Message().(*p2p.AppResponse) - require.True(ok) + require.IsType(&p2p.AppResponse{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AppResponse) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(appBytes, innerMsg.AppBytes) }, ) } + +func TestAppError(t *testing.T) { + require := require.New(t) + + mb, err := newMsgBuilder( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + time.Second, + ) + require.NoError(err) + + nodeID := ids.GenerateTestNodeID() + chainID := ids.GenerateTestID() + requestID := uint32(1) + errorCode := int32(2) + errorMessage := "hello world" + + want := &p2p.Message{ + Message: &p2p.Message_AppError{ + AppError: &p2p.AppError{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + }, + } + + outMsg, err := mb.createOutbound(want, compression.TypeNone, false) + require.NoError(err) + + got, err := mb.parseInbound(outMsg.Bytes(), nodeID, func() {}) + require.NoError(err) + + require.Equal(nodeID, got.NodeID()) + require.Equal(AppErrorOp, got.Op()) + + msg, ok := got.Message().(*p2p.AppError) + require.True(ok) + require.Equal(errorCode, msg.ErrorCode) + require.Equal(errorMessage, msg.ErrorMessage) +} diff --git a/avalanchego/message/internal_msg_builder.go b/avalanchego/message/internal_msg_builder.go index 9f27d256..38a95cb7 100644 --- a/avalanchego/message/internal_msg_builder.go +++ b/avalanchego/message/internal_msg_builder.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //nolint:stylecheck // proto generates interfaces that fail linting package message import ( + "fmt" "time" "github.com/ava-labs/avalanchego/ids" @@ -15,49 +16,62 @@ import ( var ( disconnected = &Disconnected{} - timeout = &Timeout{} gossipRequest = &GossipRequest{} + timeout = &Timeout{} + _ fmt.Stringer = (*GetStateSummaryFrontierFailed)(nil) _ chainIDGetter = (*GetStateSummaryFrontierFailed)(nil) _ requestIDGetter = (*GetStateSummaryFrontierFailed)(nil) + _ fmt.Stringer = (*GetAcceptedStateSummaryFailed)(nil) _ chainIDGetter = (*GetAcceptedStateSummaryFailed)(nil) _ requestIDGetter = (*GetAcceptedStateSummaryFailed)(nil) + _ fmt.Stringer = (*GetAcceptedFrontierFailed)(nil) _ chainIDGetter = (*GetAcceptedFrontierFailed)(nil) _ requestIDGetter = (*GetAcceptedFrontierFailed)(nil) _ engineTypeGetter = (*GetAcceptedFrontierFailed)(nil) + _ fmt.Stringer = (*GetAcceptedFailed)(nil) _ chainIDGetter = (*GetAcceptedFailed)(nil) _ requestIDGetter = (*GetAcceptedFailed)(nil) _ engineTypeGetter = (*GetAcceptedFailed)(nil) + _ fmt.Stringer = (*GetAncestorsFailed)(nil) _ chainIDGetter = (*GetAncestorsFailed)(nil) _ requestIDGetter = (*GetAncestorsFailed)(nil) _ engineTypeGetter = (*GetAncestorsFailed)(nil) + _ fmt.Stringer = (*GetFailed)(nil) _ chainIDGetter = (*GetFailed)(nil) _ requestIDGetter = (*GetFailed)(nil) _ engineTypeGetter = (*GetFailed)(nil) + _ fmt.Stringer = (*QueryFailed)(nil) _ chainIDGetter = (*QueryFailed)(nil) _ requestIDGetter = (*QueryFailed)(nil) _ engineTypeGetter = (*QueryFailed)(nil) - _ chainIDGetter = (*AppRequestFailed)(nil) - _ requestIDGetter = (*AppRequestFailed)(nil) - + _ fmt.Stringer = (*CrossChainAppRequest)(nil) _ sourceChainIDGetter = (*CrossChainAppRequest)(nil) _ chainIDGetter = (*CrossChainAppRequest)(nil) _ requestIDGetter = (*CrossChainAppRequest)(nil) + _ fmt.Stringer = (*CrossChainAppRequestFailed)(nil) _ sourceChainIDGetter = (*CrossChainAppRequestFailed)(nil) _ chainIDGetter = (*CrossChainAppRequestFailed)(nil) _ requestIDGetter = (*CrossChainAppRequestFailed)(nil) + _ fmt.Stringer = (*CrossChainAppResponse)(nil) _ sourceChainIDGetter = (*CrossChainAppResponse)(nil) _ chainIDGetter = (*CrossChainAppResponse)(nil) _ requestIDGetter = (*CrossChainAppResponse)(nil) + + _ fmt.Stringer = (*Disconnected)(nil) + + _ fmt.Stringer = (*GossipRequest)(nil) + + _ fmt.Stringer = (*Timeout)(nil) ) type GetStateSummaryFrontierFailed struct { @@ -65,6 +79,13 @@ type GetStateSummaryFrontierFailed struct { RequestID uint32 `json:"request_id,omitempty"` } +func (m *GetStateSummaryFrontierFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, + ) +} + func (m *GetStateSummaryFrontierFailed) GetChainId() []byte { return m.ChainID[:] } @@ -94,6 +115,13 @@ type GetAcceptedStateSummaryFailed struct { RequestID uint32 `json:"request_id,omitempty"` } +func (m *GetAcceptedStateSummaryFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, + ) +} + func (m *GetAcceptedStateSummaryFailed) GetChainId() []byte { return m.ChainID[:] } @@ -124,6 +152,13 @@ type GetAcceptedFrontierFailed struct { EngineType p2p.EngineType `json:"engine_type,omitempty"` } +func (m *GetAcceptedFrontierFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d EngineType: %s", + m.ChainID, m.RequestID, m.EngineType, + ) +} + func (m *GetAcceptedFrontierFailed) GetChainId() []byte { return m.ChainID[:] } @@ -160,6 +195,13 @@ type GetAcceptedFailed struct { EngineType p2p.EngineType `json:"engine_type,omitempty"` } +func (m *GetAcceptedFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d EngineType: %s", + m.ChainID, m.RequestID, m.EngineType, + ) +} + func (m *GetAcceptedFailed) GetChainId() []byte { return m.ChainID[:] } @@ -196,6 +238,13 @@ type GetAncestorsFailed struct { EngineType p2p.EngineType `json:"engine_type,omitempty"` } +func (m *GetAncestorsFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d EngineType: %s", + m.ChainID, m.RequestID, m.EngineType, + ) +} + func (m *GetAncestorsFailed) GetChainId() []byte { return m.ChainID[:] } @@ -232,6 +281,13 @@ type GetFailed struct { EngineType p2p.EngineType `json:"engine_type,omitempty"` } +func (m *GetFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d EngineType: %s", + m.ChainID, m.RequestID, m.EngineType, + ) +} + func (m *GetFailed) GetChainId() []byte { return m.ChainID[:] } @@ -268,6 +324,13 @@ type QueryFailed struct { EngineType p2p.EngineType `json:"engine_type,omitempty"` } +func (m *QueryFailed) String() string { + return fmt.Sprintf( + "ChainID: %s RequestID: %d EngineType: %s", + m.ChainID, m.RequestID, m.EngineType, + ) +} + func (m *QueryFailed) GetChainId() []byte { return m.ChainID[:] } @@ -298,35 +361,6 @@ func InternalQueryFailed( } } -type AppRequestFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` -} - -func (m *AppRequestFailed) GetChainId() []byte { - return m.ChainID[:] -} - -func (m *AppRequestFailed) GetRequestId() uint32 { - return m.RequestID -} - -func InternalAppRequestFailed( - nodeID ids.NodeID, - chainID ids.ID, - requestID uint32, -) InboundMessage { - return &inboundMessage{ - nodeID: nodeID, - op: AppRequestFailedOp, - message: &AppRequestFailed{ - ChainID: chainID, - RequestID: requestID, - }, - expiration: mockable.MaxTime, - } -} - type CrossChainAppRequest struct { SourceChainID ids.ID `json:"source_chain_id,omitempty"` DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` @@ -334,6 +368,13 @@ type CrossChainAppRequest struct { Message []byte `json:"message,omitempty"` } +func (m *CrossChainAppRequest) String() string { + return fmt.Sprintf( + "SourceChainID: %s DestinationChainID: %s RequestID: %d Message: 0x%x", + m.SourceChainID, m.DestinationChainID, m.RequestID, m.Message, + ) +} + func (m *CrossChainAppRequest) GetSourceChainID() ids.ID { return m.SourceChainID } @@ -371,6 +412,15 @@ type CrossChainAppRequestFailed struct { SourceChainID ids.ID `json:"source_chain_id,omitempty"` DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` RequestID uint32 `json:"request_id,omitempty"` + ErrorCode int32 `json:"error_code,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` +} + +func (m *CrossChainAppRequestFailed) String() string { + return fmt.Sprintf( + "SourceChainID: %s DestinationChainID: %s RequestID: %d", + m.SourceChainID, m.DestinationChainID, m.RequestID, + ) } func (m *CrossChainAppRequestFailed) GetSourceChainID() ids.ID { @@ -385,19 +435,23 @@ func (m *CrossChainAppRequestFailed) GetRequestId() uint32 { return m.RequestID } -func InternalCrossChainAppRequestFailed( +func InternalCrossChainAppError( nodeID ids.NodeID, sourceChainID ids.ID, destinationChainID ids.ID, requestID uint32, + errorCode int32, + errorMessage string, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, - op: CrossChainAppRequestFailedOp, + op: CrossChainAppErrorOp, message: &CrossChainAppRequestFailed{ SourceChainID: sourceChainID, DestinationChainID: destinationChainID, RequestID: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, }, expiration: mockable.MaxTime, } @@ -410,6 +464,13 @@ type CrossChainAppResponse struct { Message []byte `json:"message,omitempty"` } +func (m *CrossChainAppResponse) String() string { + return fmt.Sprintf( + "SourceChainID: %s DestinationChainID: %s RequestID: %d Message: 0x%x", + m.SourceChainID, m.DestinationChainID, m.RequestID, m.Message, + ) +} + func (m *CrossChainAppResponse) GetSourceChainID() ids.ID { return m.SourceChainID } @@ -446,6 +507,13 @@ type Connected struct { NodeVersion *version.Application `json:"node_version,omitempty"` } +func (m *Connected) String() string { + return fmt.Sprintf( + "NodeVersion: %s", + m.NodeVersion, + ) +} + func InternalConnected(nodeID ids.NodeID, nodeVersion *version.Application) InboundMessage { return &inboundMessage{ nodeID: nodeID, @@ -463,6 +531,13 @@ type ConnectedSubnet struct { SubnetID ids.ID `json:"subnet_id,omitempty"` } +func (m *ConnectedSubnet) String() string { + return fmt.Sprintf( + "SubnetID: %s", + m.SubnetID, + ) +} + // InternalConnectedSubnet returns a message that indicates the node with [nodeID] is // connected to the subnet with the given [subnetID]. func InternalConnectedSubnet(nodeID ids.NodeID, subnetID ids.ID) InboundMessage { @@ -478,6 +553,10 @@ func InternalConnectedSubnet(nodeID ids.NodeID, subnetID ids.ID) InboundMessage type Disconnected struct{} +func (Disconnected) String() string { + return "" +} + func InternalDisconnected(nodeID ids.NodeID) InboundMessage { return &inboundMessage{ nodeID: nodeID, @@ -491,6 +570,13 @@ type VMMessage struct { Notification uint32 `json:"notification,omitempty"` } +func (m *VMMessage) String() string { + return fmt.Sprintf( + "Notification: %d", + m.Notification, + ) +} + func InternalVMMessage( nodeID ids.NodeID, notification uint32, @@ -507,6 +593,10 @@ func InternalVMMessage( type GossipRequest struct{} +func (GossipRequest) String() string { + return "" +} + func InternalGossipRequest( nodeID ids.NodeID, ) InboundMessage { @@ -520,6 +610,10 @@ func InternalGossipRequest( type Timeout struct{} +func (Timeout) String() string { + return "" +} + func InternalTimeout(nodeID ids.NodeID) InboundMessage { return &inboundMessage{ nodeID: nodeID, diff --git a/avalanchego/message/messages.go b/avalanchego/message/messages.go index 99ac9dc6..aef0577b 100644 --- a/avalanchego/message/messages.go +++ b/avalanchego/message/messages.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -9,9 +9,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" @@ -19,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -34,12 +31,13 @@ var ( // InboundMessage represents a set of fields for an inbound message type InboundMessage interface { + fmt.Stringer // NodeID returns the ID of the node that sent this message NodeID() ids.NodeID // Op returns the op that describes this message type Op() Op // Message returns the message that was sent - Message() any + Message() fmt.Stringer // Expiration returns the time that the sender will have already timed out // this request Expiration() time.Time @@ -54,7 +52,7 @@ type InboundMessage interface { type inboundMessage struct { nodeID ids.NodeID op Op - message any + message fmt.Stringer expiration time.Time onFinishedHandling func() bytesSavedCompression int @@ -68,7 +66,7 @@ func (m *inboundMessage) Op() Op { return m.op } -func (m *inboundMessage) Message() any { +func (m *inboundMessage) Message() fmt.Stringer { return m.message } @@ -86,6 +84,11 @@ func (m *inboundMessage) BytesSavedCompression() int { return m.bytesSavedCompression } +func (m *inboundMessage) String() string { + return fmt.Sprintf("%s Op: %s Message: %s", + m.nodeID, m.op, m.message) +} + // OutboundMessage represents a set of fields for an outbound message that can // be serialized into a byte stream type OutboundMessage interface { @@ -128,8 +131,8 @@ func (m *outboundMessage) BytesSavedCompression() int { type msgBuilder struct { log logging.Logger + // TODO: Remove gzip once v1.11.x is out. gzipCompressor compression.Compressor - gzipCompressTimeMetrics map[Op]metric.Averager gzipDecompressTimeMetrics map[Op]metric.Averager zstdCompressor compression.Compressor @@ -158,7 +161,6 @@ func newMsgBuilder( log: log, gzipCompressor: gzipCompressor, - gzipCompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), gzipDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), zstdCompressor: zstdCompressor, @@ -170,13 +172,6 @@ func newMsgBuilder( errs := wrappers.Errs{} for _, op := range ExternalOps { - mb.gzipCompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("gzip_%s_compress_time", op), - fmt.Sprintf("time (in ns) to compress %s messages with gzip", op), - metrics, - &errs, - ) mb.gzipDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( namespace, fmt.Sprintf("gzip_%s_decompress_time", op), @@ -230,17 +225,6 @@ func (mb *msgBuilder) marshal( switch compressionType { case compression.TypeNone: return uncompressedMsgBytes, 0, op, nil - case compression.TypeGzip: - compressedBytes, err := mb.gzipCompressor.Compress(uncompressedMsgBytes) - if err != nil { - return nil, 0, 0, err - } - compressedMsg = p2p.Message{ - Message: &p2p.Message_CompressedGzip{ - CompressedGzip: compressedBytes, - }, - } - opToCompressTimeMetrics = mb.gzipCompressTimeMetrics case compression.TypeZstd: compressedBytes, err := mb.zstdCompressor.Compress(uncompressedMsgBytes) if err != nil { @@ -366,7 +350,7 @@ func (mb *msgBuilder) parseInbound( expiration := mockable.MaxTime if deadline, ok := GetDeadline(msg); ok { - deadline = math.Min(deadline, mb.maxMessageTimeout) + deadline = min(deadline, mb.maxMessageTimeout) expiration = time.Now().Add(deadline) } diff --git a/avalanchego/message/messages_benchmark_test.go b/avalanchego/message/messages_benchmark_test.go index f87493fc..595ba1b1 100644 --- a/avalanchego/message/messages_benchmark_test.go +++ b/avalanchego/message/messages_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -10,9 +10,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" @@ -26,37 +24,38 @@ var ( dummyOnFinishedHandling = func() {} ) -// Benchmarks marshal-ing "Version" message. +// Benchmarks marshal-ing "Handshake" message. // // e.g., // // $ go install -v golang.org/x/tools/cmd/benchcmp@latest // $ go install -v golang.org/x/perf/cmd/benchstat@latest // -// $ go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.after.txt +// $ go test -run=NONE -bench=BenchmarkMarshalHandshake > /tmp/cpu.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalHandshake > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // -// $ go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.after.txt +// $ go test -run=NONE -bench=BenchmarkMarshalHandshake -benchmem > /tmp/mem.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalHandshake -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt -func BenchmarkMarshalVersion(b *testing.B) { +func BenchmarkMarshalHandshake(b *testing.B) { require := require.New(b) id := ids.GenerateTestID() msg := p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, MyVersion: "v1.2.3", - MyVersionTime: uint64(time.Now().Unix()), - Sig: []byte{'y', 'e', 'e', 't'}, + IpSigningTime: uint64(time.Now().Unix()), + IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, + IpBlsSig: []byte{'y', 'e', 'e', 't', '2'}, }, }, } @@ -87,32 +86,33 @@ func BenchmarkMarshalVersion(b *testing.B) { // $ go install -v golang.org/x/tools/cmd/benchcmp@latest // $ go install -v golang.org/x/perf/cmd/benchstat@latest // -// $ go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.after.txt +// $ go test -run=NONE -bench=BenchmarkUnmarshalHandshake > /tmp/cpu.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalHandshake > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // -// $ go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.after.txt +// $ go test -run=NONE -bench=BenchmarkUnmarshalHandshake -benchmem > /tmp/mem.before.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalHandshake -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt -func BenchmarkUnmarshalVersion(b *testing.B) { +func BenchmarkUnmarshalHandshake(b *testing.B) { require := require.New(b) b.StopTimer() id := ids.GenerateTestID() msg := p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, MyVersion: "v1.2.3", - MyVersionTime: uint64(time.Now().Unix()), - Sig: []byte{'y', 'e', 'e', 't'}, + IpSigningTime: uint64(time.Now().Unix()), + IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, + IpBlsSig: []byte{'y', 'e', 'e', 't', '2'}, }, }, } @@ -128,10 +128,10 @@ func BenchmarkUnmarshalVersion(b *testing.B) { for i := 0; i < b.N; i++ { if useBuilder { _, err = codec.parseInbound(rawMsg, dummyNodeID, dummyOnFinishedHandling) + require.NoError(err) } else { var msg p2p.Message - err = proto.Unmarshal(rawMsg, &msg) + require.NoError(proto.Unmarshal(rawMsg, &msg)) } - require.NoError(err) } } diff --git a/avalanchego/message/messages_test.go b/avalanchego/message/messages_test.go index c04e3ea4..6e4978dd 100644 --- a/avalanchego/message/messages_test.go +++ b/avalanchego/message/messages_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -10,9 +10,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" @@ -25,15 +23,13 @@ import ( func TestMessage(t *testing.T) { t.Parallel() - require := require.New(t) - mb, err := newMsgBuilder( logging.NoLog{}, "test", prometheus.NewRegistry(), 5*time.Second, ) - require.NoError(err) + require.NoError(t, err) testID := ids.GenerateTestID() compressibleContainers := [][]byte{ @@ -43,10 +39,10 @@ func TestMessage(t *testing.T) { } testCertRaw, testKeyRaw, err := staking.NewCertAndKeyBytes() - require.NoError(err) + require.NoError(t, err) testTLSCert, err := staking.LoadTLSCertFromBytes(testKeyRaw, testCertRaw) - require.NoError(err) + require.NoError(t, err) nowUnix := time.Now().Unix() @@ -59,7 +55,7 @@ func TestMessage(t *testing.T) { bytesSaved bool // if true, outbound message saved bytes must be non-zero }{ { - desc: "ping message with no compression", + desc: "ping message with no compression no subnet uptimes", op: PingOp, msg: &p2p.Message{ Message: &p2p.Message_Ping{ @@ -84,6 +80,25 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, + { + desc: "ping message with no compression and subnet uptimes", + op: PingOp, + msg: &p2p.Message{ + Message: &p2p.Message_Ping{ + Ping: &p2p.Ping{ + SubnetUptimes: []*p2p.SubnetUptime{ + { + SubnetId: testID[:], + Uptime: 100, + }, + }, + }, + }, + }, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, + }, { desc: "pong message with no compression and subnet uptimes", op: PongOp, @@ -105,19 +120,20 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "version message with no compression", - op: VersionOp, + desc: "Handshake message with no compression", + op: HandshakeOp, msg: &p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: uint32(1337), MyTime: uint64(nowUnix), IpAddr: []byte(net.IPv6zero), IpPort: 9651, MyVersion: "v1.2.3", - MyVersionTime: uint64(nowUnix), - Sig: []byte{'y', 'e', 'e', 't'}, + IpSigningTime: uint64(nowUnix), + IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{testID[:]}, + IpBlsSig: []byte{'y', 'e', 'e', 't', '2'}, }, }, }, @@ -126,55 +142,67 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "peer_list message with no compression", - op: PeerListOp, + desc: "get_peer_list message with no compression", + op: GetPeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ - ClaimedIpPorts: []*p2p.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv4zero), - IpPort: 10, - Timestamp: 1, - Signature: []byte{0}, - }, + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: make([]byte, 2048), + Salt: make([]byte, 32), }, }, }, }, compressionType: compression.TypeNone, - bypassThrottling: true, + bypassThrottling: false, bytesSaved: false, }, { - desc: "peer_list message with gzip compression", + desc: "get_peer_list message with zstd compression", + op: GetPeerListOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: make([]byte, 2048), + Salt: make([]byte, 32), + }, + }, + }, + }, + compressionType: compression.TypeZstd, + bypassThrottling: false, + bytesSaved: true, + }, + { + desc: "peer_list message with no compression", op: PeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: []*p2p.ClaimedIpPort{ { X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv6zero), - IpPort: 9651, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], + IpAddr: []byte(net.IPv4zero), + IpPort: 10, + Timestamp: 1, + Signature: []byte{0}, }, }, }, }, }, - compressionType: compression.TypeGzip, + compressionType: compression.TypeNone, bypassThrottling: true, - bytesSaved: true, + bytesSaved: false, }, { desc: "peer_list message with zstd compression", op: PeerListOp, msg: &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: []*p2p.ClaimedIpPort{ { X509Certificate: testTLSCert.Certificate[0], @@ -191,25 +219,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: true, }, - { - desc: "peer_list_ack message with no compression", - op: PeerListAckOp, - msg: &p2p.Message{ - Message: &p2p.Message_PeerListAck{ - PeerListAck: &p2p.PeerListAck{ - PeerAcks: []*p2p.PeerAck{ - { - TxId: testID[:], - Timestamp: 1, - }, - }, - }, - }, - }, - compressionType: compression.TypeNone, - bypassThrottling: false, - bytesSaved: false, - }, { desc: "get_state_summary_frontier message with no compression", op: GetStateSummaryFrontierOp, @@ -242,22 +251,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "state_summary_frontier message with gzip compression", - op: StateSummaryFrontierOp, - msg: &p2p.Message{ - Message: &p2p.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2p.StateSummaryFrontier{ - ChainId: testID[:], - RequestId: 1, - Summary: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "state_summary_frontier message with zstd compression", op: StateSummaryFrontierOp, @@ -291,23 +284,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "get_accepted_state_summary message with gzip compression", - op: GetAcceptedStateSummaryOp, - msg: &p2p.Message{ - Message: &p2p.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Heights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: false, - }, { desc: "get_accepted_state_summary message with zstd compression", op: GetAcceptedStateSummaryOp, @@ -341,22 +317,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "accepted_state_summary message with gzip compression", - op: AcceptedStateSummaryOp, - msg: &p2p.Message{ - Message: &p2p.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2p.AcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - SummaryIds: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "accepted_state_summary message with zstd compression", op: AcceptedStateSummaryOp, @@ -396,9 +356,9 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_AcceptedFrontier_{ AcceptedFrontier_: &p2p.AcceptedFrontier{ - ChainId: testID[:], - RequestId: 1, - ContainerIds: [][]byte{testID[:], testID[:]}, + ChainId: testID[:], + RequestId: 1, + ContainerId: testID[:], }, }, }, @@ -474,22 +434,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "ancestors message with gzip compression", - op: AncestorsOp, - msg: &p2p.Message{ - Message: &p2p.Message_Ancestors_{ - Ancestors_: &p2p.Ancestors{ - ChainId: testID[:], - RequestId: 12345, - Containers: compressibleContainers, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "ancestors message with zstd compression", op: AncestorsOp, @@ -541,23 +485,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "put message with gzip compression", - op: PutOp, - msg: &p2p.Message{ - Message: &p2p.Message_Put{ - Put: &p2p.Put{ - ChainId: testID[:], - RequestId: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "put message with zstd compression", op: PutOp, @@ -593,24 +520,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "push_query message with gzip compression", - op: PushQueryOp, - msg: &p2p.Message{ - Message: &p2p.Message_PushQuery{ - PushQuery: &p2p.PushQuery{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "push_query message with zstd compression", op: PushQueryOp, @@ -653,9 +562,9 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_Chits{ Chits: &p2p.Chits{ - ChainId: testID[:], - RequestId: 1, - PreferredContainerIds: [][]byte{testID[:], testID[:]}, + ChainId: testID[:], + RequestId: 1, + PreferredId: testID[:], }, }, }, @@ -680,23 +589,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_request message with gzip compression", - op: AppRequestOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppRequest{ - AppRequest: &p2p.AppRequest{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_request message with zstd compression", op: AppRequestOp, @@ -730,22 +622,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_response message with gzip compression", - op: AppResponseOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppResponse{ - AppResponse: &p2p.AppResponse{ - ChainId: testID[:], - RequestId: 1, - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_response message with zstd compression", op: AppResponseOp, @@ -777,21 +653,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "app_gossip message with gzip compression", - op: AppGossipOp, - msg: &p2p.Message{ - Message: &p2p.Message_AppGossip{ - AppGossip: &p2p.AppGossip{ - ChainId: testID[:], - AppBytes: compressibleContainers[0], - }, - }, - }, - compressionType: compression.TypeGzip, - bypassThrottling: true, - bytesSaved: true, - }, { desc: "app_gossip message with zstd compression", op: AppGossipOp, @@ -810,23 +671,60 @@ func TestMessage(t *testing.T) { } for _, tv := range tests { - require.True(t.Run(tv.desc, func(t2 *testing.T) { + t.Run(tv.desc, func(t *testing.T) { + require := require.New(t) + encodedMsg, err := mb.createOutbound(tv.msg, tv.compressionType, tv.bypassThrottling) require.NoError(err) require.Equal(tv.bypassThrottling, encodedMsg.BypassThrottling()) require.Equal(tv.op, encodedMsg.Op()) - bytesSaved := encodedMsg.BytesSavedCompression() - require.Equal(tv.bytesSaved, bytesSaved > 0) + if bytesSaved := encodedMsg.BytesSavedCompression(); tv.bytesSaved { + require.Greater(bytesSaved, 0) + } parsedMsg, err := mb.parseInbound(encodedMsg.Bytes(), ids.EmptyNodeID, func() {}) require.NoError(err) require.Equal(tv.op, parsedMsg.Op()) - })) + }) } } +// Tests the Stringer interface on inbound messages +func TestInboundMessageToString(t *testing.T) { + t.Parallel() + + require := require.New(t) + + mb, err := newMsgBuilder( + logging.NoLog{}, + "test", + prometheus.NewRegistry(), + 5*time.Second, + ) + require.NoError(err) + + // msg that will become the tested InboundMessage + msg := &p2p.Message{ + Message: &p2p.Message_Pong{ + Pong: &p2p.Pong{ + Uptime: 100, + }, + }, + } + msgBytes, err := proto.Marshal(msg) + require.NoError(err) + + inboundMsg, err := mb.parseInbound(msgBytes, ids.EmptyNodeID, func() {}) + require.NoError(err) + + require.Equal("NodeID-111111111111111111116DBWJs Op: pong Message: uptime:100", inboundMsg.String()) + + internalMsg := InternalGetStateSummaryFrontierFailed(ids.EmptyNodeID, ids.Empty, 1) + require.Equal("NodeID-111111111111111111116DBWJs Op: get_state_summary_frontier_failed Message: ChainID: 11111111111111111111111111111111LpoYY RequestID: 1", internalMsg.String()) +} + func TestEmptyInboundMessage(t *testing.T) { t.Parallel() @@ -872,7 +770,7 @@ func TestNilInboundMessage(t *testing.T) { parsedMsg, err := mb.parseInbound(msgBytes, ids.EmptyNodeID, func() {}) require.NoError(err) - pingMsg, ok := parsedMsg.message.(*p2p.Ping) - require.True(ok) + require.IsType(&p2p.Ping{}, parsedMsg.message) + pingMsg := parsedMsg.message.(*p2p.Ping) require.NotNil(pingMsg) } diff --git a/avalanchego/message/mock_message.go b/avalanchego/message/mock_message.go index a32b3366..ea6b9a67 100644 --- a/avalanchego/message/mock_message.go +++ b/avalanchego/message/mock_message.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMessage) +// +// Generated by this command: +// +// mockgen -package=message -destination=message/mock_message.go github.com/ava-labs/avalanchego/message OutboundMessage +// // Package message is a generated GoMock package. package message @@ -10,7 +12,7 @@ package message import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockOutboundMessage is a mock of OutboundMessage interface. diff --git a/avalanchego/message/mock_outbound_message_builder.go b/avalanchego/message/mock_outbound_message_builder.go index c11c43d2..d3ec69a0 100644 --- a/avalanchego/message/mock_outbound_message_builder.go +++ b/avalanchego/message/mock_outbound_message_builder.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMsgBuilder) +// +// Generated by this command: +// +// mockgen -package=message -destination=message/mock_outbound_message_builder.go github.com/ava-labs/avalanchego/message OutboundMsgBuilder +// // Package message is a generated GoMock package. package message @@ -14,7 +16,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" p2p "github.com/ava-labs/avalanchego/proto/pb/p2p" ips "github.com/ava-labs/avalanchego/utils/ips" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockOutboundMsgBuilder is a mock of OutboundMsgBuilder interface. @@ -50,13 +52,13 @@ func (m *MockOutboundMsgBuilder) Accepted(arg0 ids.ID, arg1 uint32, arg2 []ids.I } // Accepted indicates an expected call of Accepted. -func (mr *MockOutboundMsgBuilderMockRecorder) Accepted(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Accepted(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Accepted), arg0, arg1, arg2) } // AcceptedFrontier mocks base method. -func (m *MockOutboundMsgBuilder) AcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 []ids.ID) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) AcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 ids.ID) (OutboundMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AcceptedFrontier", arg0, arg1, arg2) ret0, _ := ret[0].(OutboundMessage) @@ -65,7 +67,7 @@ func (m *MockOutboundMsgBuilder) AcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 } // AcceptedFrontier indicates an expected call of AcceptedFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedFrontier), arg0, arg1, arg2) } @@ -80,7 +82,7 @@ func (m *MockOutboundMsgBuilder) AcceptedStateSummary(arg0 ids.ID, arg1 uint32, } // AcceptedStateSummary indicates an expected call of AcceptedStateSummary. -func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedStateSummary(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedStateSummary(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedStateSummary), arg0, arg1, arg2) } @@ -95,11 +97,26 @@ func (m *MockOutboundMsgBuilder) Ancestors(arg0 ids.ID, arg1 uint32, arg2 [][]by } // Ancestors indicates an expected call of Ancestors. -func (mr *MockOutboundMsgBuilderMockRecorder) Ancestors(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Ancestors(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ancestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ancestors), arg0, arg1, arg2) } +// AppError mocks base method. +func (m *MockOutboundMsgBuilder) AppError(arg0 ids.ID, arg1 uint32, arg2 int32, arg3 string) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppError", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppError indicates an expected call of AppError. +func (mr *MockOutboundMsgBuilderMockRecorder) AppError(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppError", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppError), arg0, arg1, arg2, arg3) +} + // AppGossip mocks base method. func (m *MockOutboundMsgBuilder) AppGossip(arg0 ids.ID, arg1 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() @@ -110,7 +127,7 @@ func (m *MockOutboundMsgBuilder) AppGossip(arg0 ids.ID, arg1 []byte) (OutboundMe } // AppGossip indicates an expected call of AppGossip. -func (mr *MockOutboundMsgBuilderMockRecorder) AppGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppGossip(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppGossip), arg0, arg1) } @@ -125,7 +142,7 @@ func (m *MockOutboundMsgBuilder) AppRequest(arg0 ids.ID, arg1 uint32, arg2 time. } // AppRequest indicates an expected call of AppRequest. -func (mr *MockOutboundMsgBuilderMockRecorder) AppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppRequest(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppRequest), arg0, arg1, arg2, arg3) } @@ -140,24 +157,24 @@ func (m *MockOutboundMsgBuilder) AppResponse(arg0 ids.ID, arg1 uint32, arg2 []by } // AppResponse indicates an expected call of AppResponse. -func (mr *MockOutboundMsgBuilderMockRecorder) AppResponse(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) AppResponse(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppResponse), arg0, arg1, arg2) } // Chits mocks base method. -func (m *MockOutboundMsgBuilder) Chits(arg0 ids.ID, arg1 uint32, arg2, arg3 []ids.ID) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Chits(arg0 ids.ID, arg1 uint32, arg2, arg3, arg4 ids.ID) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Chits", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Chits", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Chits indicates an expected call of Chits. -func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chits", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Chits), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chits", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Chits), arg0, arg1, arg2, arg3, arg4) } // Get mocks base method. @@ -170,7 +187,7 @@ func (m *MockOutboundMsgBuilder) Get(arg0 ids.ID, arg1 uint32, arg2 time.Duratio } // Get indicates an expected call of Get. -func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Get), arg0, arg1, arg2, arg3, arg4) } @@ -185,7 +202,7 @@ func (m *MockOutboundMsgBuilder) GetAccepted(arg0 ids.ID, arg1 uint32, arg2 time } // GetAccepted indicates an expected call of GetAccepted. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAccepted), arg0, arg1, arg2, arg3, arg4) } @@ -200,7 +217,7 @@ func (m *MockOutboundMsgBuilder) GetAcceptedFrontier(arg0 ids.ID, arg1 uint32, a } // GetAcceptedFrontier indicates an expected call of GetAcceptedFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedFrontier), arg0, arg1, arg2, arg3) } @@ -215,7 +232,7 @@ func (m *MockOutboundMsgBuilder) GetAcceptedStateSummary(arg0 ids.ID, arg1 uint3 } // GetAcceptedStateSummary indicates an expected call of GetAcceptedStateSummary. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedStateSummary(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedStateSummary), arg0, arg1, arg2, arg3) } @@ -230,11 +247,26 @@ func (m *MockOutboundMsgBuilder) GetAncestors(arg0 ids.ID, arg1 uint32, arg2 tim } // GetAncestors indicates an expected call of GetAncestors. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAncestors(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAncestors(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAncestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAncestors), arg0, arg1, arg2, arg3, arg4) } +// GetPeerList mocks base method. +func (m *MockOutboundMsgBuilder) GetPeerList(arg0, arg1 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerList", arg0, arg1) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerList indicates an expected call of GetPeerList. +func (mr *MockOutboundMsgBuilderMockRecorder) GetPeerList(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetPeerList), arg0, arg1) +} + // GetStateSummaryFrontier mocks base method. func (m *MockOutboundMsgBuilder) GetStateSummaryFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration) (OutboundMessage, error) { m.ctrl.T.Helper() @@ -245,54 +277,54 @@ func (m *MockOutboundMsgBuilder) GetStateSummaryFrontier(arg0 ids.ID, arg1 uint3 } // GetStateSummaryFrontier indicates an expected call of GetStateSummaryFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetStateSummaryFrontier), arg0, arg1, arg2) } -// PeerList mocks base method. -func (m *MockOutboundMsgBuilder) PeerList(arg0 []ips.ClaimedIPPort, arg1 bool) (OutboundMessage, error) { +// Handshake mocks base method. +func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3, arg4 string, arg5, arg6, arg7 uint32, arg8 uint64, arg9, arg10 []byte, arg11 []ids.ID, arg12, arg13 []uint32, arg14, arg15 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeerList", arg0, arg1) + ret := m.ctrl.Call(m, "Handshake", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } -// PeerList indicates an expected call of PeerList. -func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 interface{}) *gomock.Call { +// Handshake indicates an expected call of Handshake. +func (mr *MockOutboundMsgBuilderMockRecorder) Handshake(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerList), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Handshake", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Handshake), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15) } -// PeerListAck mocks base method. -func (m *MockOutboundMsgBuilder) PeerListAck(arg0 []*p2p.PeerAck) (OutboundMessage, error) { +// PeerList mocks base method. +func (m *MockOutboundMsgBuilder) PeerList(arg0 []*ips.ClaimedIPPort, arg1 bool) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeerListAck", arg0) + ret := m.ctrl.Call(m, "PeerList", arg0, arg1) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } -// PeerListAck indicates an expected call of PeerListAck. -func (mr *MockOutboundMsgBuilderMockRecorder) PeerListAck(arg0 interface{}) *gomock.Call { +// PeerList indicates an expected call of PeerList. +func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerListAck", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerListAck), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerList), arg0, arg1) } // Ping mocks base method. -func (m *MockOutboundMsgBuilder) Ping() (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Ping(arg0 uint32, arg1 []*p2p.SubnetUptime) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ping") + ret := m.ctrl.Call(m, "Ping", arg0, arg1) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Ping indicates an expected call of Ping. -func (mr *MockOutboundMsgBuilderMockRecorder) Ping() *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping), arg0, arg1) } // Pong mocks base method. @@ -305,39 +337,39 @@ func (m *MockOutboundMsgBuilder) Pong(arg0 uint32, arg1 []*p2p.SubnetUptime) (Ou } // Pong indicates an expected call of Pong. -func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pong", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Pong), arg0, arg1) } // PullQuery mocks base method. -func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 uint64, arg5 p2p.EngineType) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PullQuery", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "PullQuery", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // PullQuery indicates an expected call of PullQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4, arg5) } // PushQuery mocks base method. -func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte, arg4 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte, arg4 uint64, arg5 p2p.EngineType) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PushQuery", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "PushQuery", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // PushQuery indicates an expected call of PushQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4, arg5) } // Put mocks base method. @@ -350,7 +382,7 @@ func (m *MockOutboundMsgBuilder) Put(arg0 ids.ID, arg1 uint32, arg2 []byte, arg3 } // Put indicates an expected call of Put. -func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Put), arg0, arg1, arg2, arg3) } @@ -365,22 +397,7 @@ func (m *MockOutboundMsgBuilder) StateSummaryFrontier(arg0 ids.ID, arg1 uint32, } // StateSummaryFrontier indicates an expected call of StateSummaryFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) StateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) StateSummaryFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).StateSummaryFrontier), arg0, arg1, arg2) } - -// Version mocks base method. -func (m *MockOutboundMsgBuilder) Version(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3 string, arg4 uint64, arg5 []byte, arg6 []ids.ID) (OutboundMessage, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(OutboundMessage) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Version indicates an expected call of Version. -func (mr *MockOutboundMsgBuilderMockRecorder) Version(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Version), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} diff --git a/avalanchego/message/ops.go b/avalanchego/message/ops.go index e3ee5cf2..11c69087 100644 --- a/avalanchego/message/ops.go +++ b/avalanchego/message/ops.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -21,9 +21,9 @@ const ( // Handshake: PingOp Op = iota PongOp - VersionOp + HandshakeOp + GetPeerListOp PeerListOp - PeerListAckOp // State sync: GetStateSummaryFrontierOp GetStateSummaryFrontierFailedOp @@ -51,12 +51,12 @@ const ( ChitsOp // Application: AppRequestOp - AppRequestFailedOp + AppErrorOp AppResponseOp AppGossipOp // Cross chain: CrossChainAppRequestOp - CrossChainAppRequestFailedOp + CrossChainAppErrorOp CrossChainAppResponseOp // Internal: ConnectedOp @@ -71,9 +71,9 @@ var ( HandshakeOps = []Op{ PingOp, PongOp, - VersionOp, + HandshakeOp, + GetPeerListOp, PeerListOp, - PeerListAckOp, } // List of all consensus request message types @@ -97,6 +97,7 @@ var ( PutOp, ChitsOp, AppResponseOp, + AppErrorOp, } // AppGossip is the only message that is sent unrequested without the // expectation of a response @@ -115,9 +116,8 @@ var ( GetAncestorsFailedOp, GetFailedOp, QueryFailedOp, - AppRequestFailedOp, CrossChainAppRequestOp, - CrossChainAppRequestFailedOp, + CrossChainAppErrorOp, CrossChainAppResponseOp, ConnectedOp, ConnectedSubnetOp, @@ -165,12 +165,12 @@ var ( AsynchronousOps = []Op{ // Application AppRequestOp, - AppRequestFailedOp, + AppErrorOp, AppGossipOp, AppResponseOp, // Cross chain CrossChainAppRequestOp, - CrossChainAppRequestFailedOp, + CrossChainAppErrorOp, CrossChainAppResponseOp, } @@ -182,22 +182,22 @@ var ( GetAncestorsFailedOp: AncestorsOp, GetFailedOp: PutOp, QueryFailedOp: ChitsOp, - AppRequestFailedOp: AppResponseOp, - CrossChainAppRequestFailedOp: CrossChainAppResponseOp, - } - UnrequestedOps = set.Set[Op]{ - GetAcceptedFrontierOp: {}, - GetAcceptedOp: {}, - GetAncestorsOp: {}, - GetOp: {}, - PushQueryOp: {}, - PullQueryOp: {}, - AppRequestOp: {}, - AppGossipOp: {}, - CrossChainAppRequestOp: {}, - GetStateSummaryFrontierOp: {}, - GetAcceptedStateSummaryOp: {}, + AppErrorOp: AppResponseOp, + CrossChainAppErrorOp: CrossChainAppResponseOp, } + UnrequestedOps = set.Of( + GetAcceptedFrontierOp, + GetAcceptedOp, + GetAncestorsOp, + GetOp, + PushQueryOp, + PullQueryOp, + AppRequestOp, + AppGossipOp, + CrossChainAppRequestOp, + GetStateSummaryFrontierOp, + GetAcceptedStateSummaryOp, + ) errUnknownMessageType = errors.New("unknown message type") ) @@ -209,12 +209,12 @@ func (op Op) String() string { return "ping" case PongOp: return "pong" - case VersionOp: - return "version" + case HandshakeOp: + return "handshake" + case GetPeerListOp: + return "get_peerlist" case PeerListOp: return "peerlist" - case PeerListAckOp: - return "peerlist_ack" // State sync case GetStateSummaryFrontierOp: return "get_state_summary_frontier" @@ -265,8 +265,8 @@ func (op Op) String() string { // Application case AppRequestOp: return "app_request" - case AppRequestFailedOp: - return "app_request_failed" + case AppErrorOp: + return "app_error" case AppResponseOp: return "app_response" case AppGossipOp: @@ -274,8 +274,8 @@ func (op Op) String() string { // Cross chain case CrossChainAppRequestOp: return "cross_chain_app_request" - case CrossChainAppRequestFailedOp: - return "cross_chain_app_request_failed" + case CrossChainAppErrorOp: + return "cross_chain_app_error" case CrossChainAppResponseOp: return "cross_chain_app_response" // Internal @@ -296,19 +296,19 @@ func (op Op) String() string { } } -func Unwrap(m *p2p.Message) (interface{}, error) { +func Unwrap(m *p2p.Message) (fmt.Stringer, error) { switch msg := m.GetMessage().(type) { // Handshake: case *p2p.Message_Ping: return msg.Ping, nil case *p2p.Message_Pong: return msg.Pong, nil - case *p2p.Message_Version: - return msg.Version, nil - case *p2p.Message_PeerList: - return msg.PeerList, nil - case *p2p.Message_PeerListAck: - return msg.PeerListAck, nil + case *p2p.Message_Handshake: + return msg.Handshake, nil + case *p2p.Message_GetPeerList: + return msg.GetPeerList, nil + case *p2p.Message_PeerList_: + return msg.PeerList_, nil // State sync: case *p2p.Message_GetStateSummaryFrontier: return msg.GetStateSummaryFrontier, nil @@ -347,6 +347,8 @@ func Unwrap(m *p2p.Message) (interface{}, error) { return msg.AppRequest, nil case *p2p.Message_AppResponse: return msg.AppResponse, nil + case *p2p.Message_AppError: + return msg.AppError, nil case *p2p.Message_AppGossip: return msg.AppGossip, nil default: @@ -360,12 +362,12 @@ func ToOp(m *p2p.Message) (Op, error) { return PingOp, nil case *p2p.Message_Pong: return PongOp, nil - case *p2p.Message_Version: - return VersionOp, nil - case *p2p.Message_PeerList: + case *p2p.Message_Handshake: + return HandshakeOp, nil + case *p2p.Message_GetPeerList: + return GetPeerListOp, nil + case *p2p.Message_PeerList_: return PeerListOp, nil - case *p2p.Message_PeerListAck: - return PeerListAckOp, nil case *p2p.Message_GetStateSummaryFrontier: return GetStateSummaryFrontierOp, nil case *p2p.Message_StateSummaryFrontier_: @@ -400,6 +402,8 @@ func ToOp(m *p2p.Message) (Op, error) { return AppRequestOp, nil case *p2p.Message_AppResponse: return AppResponseOp, nil + case *p2p.Message_AppError: + return AppErrorOp, nil case *p2p.Message_AppGossip: return AppGossipOp, nil default: diff --git a/avalanchego/message/outbound_msg_builder.go b/avalanchego/message/outbound_msg_builder.go index 693120b1..f90a5f50 100644 --- a/avalanchego/message/outbound_msg_builder.go +++ b/avalanchego/message/outbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -18,27 +18,40 @@ var _ OutboundMsgBuilder = (*outMsgBuilder)(nil) // with a reference count of 1. Once the reference count hits 0, the message // bytes should no longer be accessed. type OutboundMsgBuilder interface { - Version( + Handshake( networkID uint32, myTime uint64, ip ips.IPPort, myVersion string, - myVersionTime uint64, - sig []byte, + client string, + major uint32, + minor uint32, + patch uint32, + ipSigningTime uint64, + ipNodeIDSig []byte, + ipBLSSig []byte, trackedSubnets []ids.ID, + supportedACPs []uint32, + objectedACPs []uint32, + knownPeersFilter []byte, + knownPeersSalt []byte, + ) (OutboundMessage, error) + + GetPeerList( + knownPeersFilter []byte, + knownPeersSalt []byte, ) (OutboundMessage, error) PeerList( - peers []ips.ClaimedIPPort, + peers []*ips.ClaimedIPPort, bypassThrottling bool, ) (OutboundMessage, error) - PeerListAck( - peerAcks []*p2p.PeerAck, + Ping( + primaryUptime uint32, + subnetUptimes []*p2p.SubnetUptime, ) (OutboundMessage, error) - Ping() (OutboundMessage, error) - Pong( primaryUptime uint32, subnetUptimes []*p2p.SubnetUptime, @@ -79,7 +92,7 @@ type OutboundMsgBuilder interface { AcceptedFrontier( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + containerID ids.ID, ) (OutboundMessage, error) GetAccepted( @@ -130,6 +143,7 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, container []byte, + requestedHeight uint64, engineType p2p.EngineType, ) (OutboundMessage, error) @@ -138,14 +152,16 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerID ids.ID, + requestedHeight uint64, engineType p2p.EngineType, ) (OutboundMessage, error) Chits( chainID ids.ID, requestID uint32, - preferredContainerIDs []ids.ID, - acceptedContainerIDs []ids.ID, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, ) (OutboundMessage, error) AppRequest( @@ -161,6 +177,13 @@ type OutboundMsgBuilder interface { msg []byte, ) (OutboundMessage, error) + AppError( + chainID ids.ID, + requestID uint32, + errorCode int32, + errorMessage string, + ) (OutboundMessage, error) + AppGossip( chainID ids.ID, msg []byte, @@ -182,11 +205,17 @@ func newOutboundBuilder(compressionType compression.Type, builder *msgBuilder) O } } -func (b *outMsgBuilder) Ping() (OutboundMessage, error) { +func (b *outMsgBuilder) Ping( + primaryUptime uint32, + subnetUptimes []*p2p.SubnetUptime, +) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Ping{ - Ping: &p2p.Ping{}, + Ping: &p2p.Ping{ + Uptime: primaryUptime, + SubnetUptimes: subnetUptimes, + }, }, }, compression.TypeNone, @@ -212,29 +241,51 @@ func (b *outMsgBuilder) Pong( ) } -func (b *outMsgBuilder) Version( +func (b *outMsgBuilder) Handshake( networkID uint32, myTime uint64, ip ips.IPPort, myVersion string, - myVersionTime uint64, - sig []byte, + client string, + major uint32, + minor uint32, + patch uint32, + ipSigningTime uint64, + ipNodeIDSig []byte, + ipBLSSig []byte, trackedSubnets []ids.ID, + supportedACPs []uint32, + objectedACPs []uint32, + knownPeersFilter []byte, + knownPeersSalt []byte, ) (OutboundMessage, error) { subnetIDBytes := make([][]byte, len(trackedSubnets)) encodeIDs(trackedSubnets, subnetIDBytes) return b.builder.createOutbound( &p2p.Message{ - Message: &p2p.Message_Version{ - Version: &p2p.Version{ + Message: &p2p.Message_Handshake{ + Handshake: &p2p.Handshake{ NetworkId: networkID, MyTime: myTime, IpAddr: ip.IP.To16(), IpPort: uint32(ip.Port), MyVersion: myVersion, - MyVersionTime: myVersionTime, - Sig: sig, + IpSigningTime: ipSigningTime, + IpNodeIdSig: ipNodeIDSig, TrackedSubnets: subnetIDBytes, + Client: &p2p.Client{ + Name: client, + Major: major, + Minor: minor, + Patch: patch, + }, + SupportedAcps: supportedACPs, + ObjectedAcps: objectedACPs, + KnownPeers: &p2p.BloomFilter{ + Filter: knownPeersFilter, + Salt: knownPeersSalt, + }, + IpBlsSig: ipBLSSig, }, }, }, @@ -243,7 +294,27 @@ func (b *outMsgBuilder) Version( ) } -func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { +func (b *outMsgBuilder) GetPeerList( + knownPeersFilter []byte, + knownPeersSalt []byte, +) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetPeerList{ + GetPeerList: &p2p.GetPeerList{ + KnownPeers: &p2p.BloomFilter{ + Filter: knownPeersFilter, + Salt: knownPeersSalt, + }, + }, + }, + }, + b.compressionType, + false, + ) +} + +func (b *outMsgBuilder) PeerList(peers []*ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { claimIPPorts := make([]*p2p.ClaimedIpPort, len(peers)) for i, p := range peers { claimIPPorts[i] = &p2p.ClaimedIpPort{ @@ -252,13 +323,13 @@ func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling boo IpPort: uint32(p.IPPort.Port), Timestamp: p.Timestamp, Signature: p.Signature, - TxId: p.TxID[:], + TxId: ids.Empty[:], } } return b.builder.createOutbound( &p2p.Message{ - Message: &p2p.Message_PeerList{ - PeerList: &p2p.PeerList{ + Message: &p2p.Message_PeerList_{ + PeerList_: &p2p.PeerList{ ClaimedIpPorts: claimIPPorts, }, }, @@ -268,20 +339,6 @@ func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling boo ) } -func (b *outMsgBuilder) PeerListAck(peerAcks []*p2p.PeerAck) (OutboundMessage, error) { - return b.builder.createOutbound( - &p2p.Message{ - Message: &p2p.Message_PeerListAck{ - PeerListAck: &p2p.PeerListAck{ - PeerAcks: peerAcks, - }, - }, - }, - compression.TypeNone, - false, - ) -} - func (b *outMsgBuilder) GetStateSummaryFrontier( chainID ids.ID, requestID uint32, @@ -391,17 +448,15 @@ func (b *outMsgBuilder) GetAcceptedFrontier( func (b *outMsgBuilder) AcceptedFrontier( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + containerID ids.ID, ) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_AcceptedFrontier_{ AcceptedFrontier_: &p2p.AcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, + ChainId: chainID[:], + RequestId: requestID, + ContainerId: containerID[:], }, }, }, @@ -553,17 +608,19 @@ func (b *outMsgBuilder) PushQuery( requestID uint32, deadline time.Duration, container []byte, + requestedHeight uint64, engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_PushQuery{ PushQuery: &p2p.PushQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Container: container, - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Container: container, + RequestedHeight: requestedHeight, + EngineType: engineType, }, }, }, @@ -577,17 +634,19 @@ func (b *outMsgBuilder) PullQuery( requestID uint32, deadline time.Duration, containerID ids.ID, + requestedHeight uint64, engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_PullQuery{ PullQuery: &p2p.PullQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerId: containerID[:], + RequestedHeight: requestedHeight, + EngineType: engineType, }, }, }, @@ -599,21 +658,19 @@ func (b *outMsgBuilder) PullQuery( func (b *outMsgBuilder) Chits( chainID ids.ID, requestID uint32, - preferredContainerIDs []ids.ID, - acceptedContainerIDs []ids.ID, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, ) (OutboundMessage, error) { - preferredContainerIDBytes := make([][]byte, len(preferredContainerIDs)) - encodeIDs(preferredContainerIDs, preferredContainerIDBytes) - acceptedContainerIDBytes := make([][]byte, len(acceptedContainerIDs)) - encodeIDs(acceptedContainerIDs, acceptedContainerIDBytes) return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Chits{ Chits: &p2p.Chits{ - ChainId: chainID[:], - RequestId: requestID, - PreferredContainerIds: preferredContainerIDBytes, - AcceptedContainerIds: acceptedContainerIDBytes, + ChainId: chainID[:], + RequestId: requestID, + PreferredId: preferredID[:], + PreferredIdAtHeight: preferredIDAtHeight[:], + AcceptedId: acceptedID[:], }, }, }, @@ -660,6 +717,23 @@ func (b *outMsgBuilder) AppResponse(chainID ids.ID, requestID uint32, msg []byte ) } +func (b *outMsgBuilder) AppError(chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AppError{ + AppError: &p2p.AppError{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + }, + }, + b.compressionType, + false, + ) +} + func (b *outMsgBuilder) AppGossip(chainID ids.ID, msg []byte) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ diff --git a/avalanchego/message/outbound_msg_builder_test.go b/avalanchego/message/outbound_msg_builder_test.go index 50f273bf..02e46ef1 100644 --- a/avalanchego/message/outbound_msg_builder_test.go +++ b/avalanchego/message/outbound_msg_builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -8,7 +8,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -29,7 +28,6 @@ func Test_newOutboundBuilder(t *testing.T) { for _, compressionType := range []compression.Type{ compression.TypeNone, - compression.TypeGzip, compression.TypeZstd, } { t.Run(compressionType.String(), func(t *testing.T) { diff --git a/avalanchego/nat/nat.go b/avalanchego/nat/nat.go index 33749ca0..a6e37078 100644 --- a/avalanchego/nat/nat.go +++ b/avalanchego/nat/nat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -53,8 +53,8 @@ type Mapper struct { } // NewPortMapper returns an initialized mapper -func NewPortMapper(log logging.Logger, r Router) Mapper { - return Mapper{ +func NewPortMapper(log logging.Logger, r Router) *Mapper { + return &Mapper{ log: log, r: r, closer: make(chan struct{}), diff --git a/avalanchego/nat/no_router.go b/avalanchego/nat/no_router.go index 5c894c8c..19c68dac 100644 --- a/avalanchego/nat/no_router.go +++ b/avalanchego/nat/no_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/avalanchego/nat/pmp.go b/avalanchego/nat/pmp.go index ad2032ec..ecee9793 100644 --- a/avalanchego/nat/pmp.go +++ b/avalanchego/nat/pmp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/avalanchego/nat/upnp.go b/avalanchego/nat/upnp.go index 2571048e..aa26d6d8 100644 --- a/avalanchego/nat/upnp.go +++ b/avalanchego/nat/upnp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat diff --git a/avalanchego/network/README.md b/avalanchego/network/README.md index 5364d9db..303d1f56 100644 --- a/avalanchego/network/README.md +++ b/avalanchego/network/README.md @@ -46,7 +46,7 @@ When starting an Avalanche node, a node needs to be able to initiate some proces In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **beacons** (this is user-configurable). Once connected to a set of beacons, a node is able to discover other nodes in the network. Over time, a node eventually discovers other peers in the network through `PeerList` messages it receives through: - The handshake initiated between two peers when attempting to connect to a peer (see [Connecting](#connecting)). -- Periodic `PeerList` gossip messages that every peer sends to the peers it's connected to (see [Connected](#connected)). +- Responses to periodically sent `GetPeerList` messages requesting a `PeerList` of unknown peers (see [Connected](#connected)). #### Connecting @@ -54,32 +54,31 @@ In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **b Upon connection to any peer, a handshake is performed between the node attempting to establish the outbound connection to the peer and the peer receiving the inbound connection. -When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Version` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. +When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Handshake` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. ```mermaid sequenceDiagram Note over Node,Peer: Initiate Handshake Note left of Node: I want to connect to you! -Note over Node,Peer: Version message +Note over Node,Peer: Handshake message Node->>Peer: AvalancheGo v1.0.0 Note right of Peer: My version v1.9.4 is incompatible with your version v1.0.0. Peer-xNode: Connection dropped Note over Node,Peer: Handshake Failed ``` -If the `Version` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. +If the `Handshake` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. ```mermaid sequenceDiagram Note over Node,Peer: Initiate Handshake Note left of Node: I want to connect to you! -Note over Node,Peer: Version message +Note over Node,Peer: Handshake message Node->>Peer: AvalancheGo v1.9.4 Note right of Peer: LGTM! Note over Node,Peer: PeerList message Peer->>Node: Peer-X, Peer-Y, Peer-Z Note over Node,Peer: Handshake Complete -Node->>Peer: ACK Peer-X, Peer-Y, Peer-Z ``` Once the node attempting to join the network receives this `PeerList` message, the handshake is complete and the node is now connected to the peer. The node attempts to connect to the new peers discovered in the `PeerList` message. Each connection results in another peer handshake, which results in the node incrementally discovering more and more peers in the network as more and more `PeerList` messages are exchanged. @@ -90,73 +89,55 @@ Some peers aren't discovered through the `PeerList` messages exchanged through p ```mermaid sequenceDiagram -Node ->> Peer-1: Version - v1.9.5 +Node ->> Peer-1: Handshake - v1.9.5 Peer-1 ->> Node: PeerList - Peer-2 -Node ->> Peer-1: ACK - Peer-2 Note left of Node: Node is connected to Peer-1 and now tries to connect to Peer-2. -Node ->> Peer-2: Version - v1.9.5 +Node ->> Peer-2: Handshake - v1.9.5 Peer-2 ->> Node: PeerList - Peer-1 -Node ->> Peer-2: ACK - Peer-1 Note left of Node: Peer-3 was never sampled, so we haven't connected yet! Node --> Peer-3: No connection ``` -To guarantee that a node can discover all peers, each node periodically gossips a sample of the peers it knows about to other peers. +To guarantee that a node can discover all peers, each node periodically sends a `GetPeerList` message to a random peer. ##### PeerList Gossip ###### Messages -A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains networking-level metadata about the peer that provides the necessary information to connect to it, alongside the corresponding transaction id that added that peer to the validator set. Transaction ids are unique hashes that only add a single validator, so it is guaranteed that there is a 1:1 mapping between a validator and its associated transaction id. +A `GetPeerList` message requests that the peer sends a `PeerList` message. `GetPeerList` messages contain a bloom filter of already known peers to reduce useless bandwidth on `PeerList` messages. The bloom filter reduces bandwidth by enabling the `PeerList` message to only include peers that aren't already known. -`PeerListAck` messages are sent in response to `PeerList` messages to allow a peer to confirm which peers it will actually attempt to connect to. Because nodes only gossip peers they believe another peer doesn't already know about to optimize bandwidth, `PeerListAck` messages are important to confirm that a peer will attempt to connect to someone. Without this, a node might gossip a peer to another peer and assume a connection between the two is being established, and not re-gossip the peer in future gossip cycles. If the connection was never actually wanted by the peer being gossiped to due to a transient reason, that peer would never be able to re-discover the gossiped peer and could be isolated from a subset of the network. +A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains signed networking-level metadata about a peer that provides the necessary information to connect to it. -Once a `PeerListAck` message is received from a peer, the node that sent the original `PeerList` message marks the corresponding acknowledged validators as already having been transmitted to the peer, so that it's excluded from subsequent iterations of `PeerList` gossip. +Once peer metadata is received, the node will add that data to its bloom filter to prevent learning about it again. ###### Gossip Handshake messages provide a node with some knowledge of peers in the network, but offers no guarantee that learning about a subset of peers from each peer the node connects with will result in the node learning about every peer in the network. -In order to provide a probabilistic guarantee that all peers in the network will eventually learn of one another, each node periodically gossips a sample of the peers that they're aware of to a sample of the peers that they're connected to. Over time, this probabilistically guarantees that every peer will eventually learn of every other peer. +To provide an eventual guarantee that all peers learn of one another, each node periodically requests peers from a random peer. -To optimize bandwidth usage, each node tracks which peers are guaranteed to know of which peers. A node learns this information by tracking both inbound and outbound `PeerList` gossip. +To optimize bandwidth, each node tracks the most recent IPs of validators. The validator's nodeID and timestamp are inserted into a bloom filter which is used to select only necessary IPs to gossip. -- Inbound - - If a node ever receives `PeerList` from a peer, that peer _must_ have known about the peers in that `PeerList` message in order to have gossiped them. -- Outbound - - If a node sends a `PeerList` to a peer and the peer replies with an `PeerListAck` message, then all peers in the `PeerListAck` must be known by the peer. +As the number of entries increases in the bloom filter, the probability of a false positive increases. False positives can cause recent IPs not to be gossiped when they otherwise should be, slowing down the rate of `PeerList` gossip. To prevent the bloom filter from having too many false positives, a new bloom filter is periodically generated and the number of entries a validator is allowed to have in the bloom filter is capped. Generating the new bloom filter both removes stale entries and modifies the hash functions to avoid persistent hash collisions. -To efficiently track which peers know of which peers, the peers that each peer is aware of is represented in a [bit set](https://en.wikipedia.org/wiki/Bit_array). A peer is represented by either a `0` if it isn't known by the peer yet, or a `1` if it is known by the peer. - -An node follows the following steps for every cycle of `PeerList` gossip: - -1. Get a sample of peers in the network that the node is connected to -2. For each peer: - 1. Figure out which peers the node hasn't gossiped to them yet. - 2. Take a random sample of these unknown peers. - 3. Send a message describing these peers to the peer. +A node follows the following steps for of `PeerList` gossip: ```mermaid sequenceDiagram -Note left of Node: Initialize gossip bit set for Peer-123 -Note left of Node: Peer-123: [0, 0, 0] -Node->>Peer-123: PeerList - Peer-1 -Peer-123->>Node: PeerListAck - Peer-1 -Note left of Node: Peer-123: [1, 0, 0] -Node->>Peer-123: PeerList - Peer-3 -Peer-123->>Node: PeerListAck - Peer-3 -Note left of Node: Peer-123: [1, 0, 1] -Node->>Peer-123: PeerList - Peer-2 -Peer-123->>Node: PeerListAck - Peer-2 -Note left of Node: Peer-123: [1, 1, 1] -Note left of Node: No more gossip left to send to Peer-123! +Note left of Node: Initialize bloom filter +Note left of Node: Bloom: [0, 0, 0] +Node->>Peer-123: GetPeerList [0, 0, 0] +Note right of Peer-123: Any peers can be sent. +Peer-123->>Node: PeerList - Peer-1 +Note left of Node: Bloom: [1, 0, 0] +Node->>Peer-123: GetPeerList [1, 0, 0] +Note right of Peer-123: Either Peer-2 or Peer-3 can be sent. +Peer-123->>Node: PeerList - Peer-3 +Note left of Node: Bloom: [1, 0, 1] +Node->>Peer-123: GetPeerList [1, 0, 1] +Note right of Peer-123: Only Peer-2 can be sent. +Peer-123->>Node: PeerList - Peer-2 +Note left of Node: Bloom: [1, 1, 1] +Node->>Peer-123: GetPeerList [1, 1, 1] +Note right of Peer-123: There are no more peers left to send! ``` - -Because network state is generally expected to be stable (i.e nodes are not continuously flickering online/offline), as more and more gossip messages are exchanged nodes eventually realize that the peers that they are connected to have learned about every other peer. - -A node eventually stops gossiping peers when there's no more new peers to gossip about. `PeerList` gossip only resumes once: - -1. a new peer joins -2. a peer disconnects and reconnects -3. a new validator joins the network -4. a validator's IP is updated diff --git a/avalanchego/network/certs_test.go b/avalanchego/network/certs_test.go index 8405107d..a4b1642b 100644 --- a/avalanchego/network/certs_test.go +++ b/avalanchego/network/certs_test.go @@ -1,39 +1,100 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network import ( "crypto/tls" + "net" "sync" "testing" + "github.com/stretchr/testify/require" + + _ "embed" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" ) var ( + //go:embed test_cert_1.crt + testCertBytes1 []byte + //go:embed test_key_1.key + testKeyBytes1 []byte + //go:embed test_cert_2.crt + testCertBytes2 []byte + //go:embed test_key_2.key + testKeyBytes2 []byte + //go:embed test_cert_3.crt + testCertBytes3 []byte + //go:embed test_key_3.key + testKeyBytes3 []byte + + ip *ips.ClaimedIPPort + otherIP *ips.ClaimedIPPort + certLock sync.Mutex tlsCerts []*tls.Certificate tlsConfigs []*tls.Config ) +func init() { + cert1, err := staking.LoadTLSCertFromBytes(testKeyBytes1, testCertBytes1) + if err != nil { + panic(err) + } + cert2, err := staking.LoadTLSCertFromBytes(testKeyBytes2, testCertBytes2) + if err != nil { + panic(err) + } + cert3, err := staking.LoadTLSCertFromBytes(testKeyBytes3, testCertBytes3) + if err != nil { + panic(err) + } + tlsCerts = []*tls.Certificate{ + cert1, cert2, cert3, + } + + ip = ips.NewClaimedIPPort( + staking.CertificateFromX509(cert1.Leaf), + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) + otherIP = ips.NewClaimedIPPort( + staking.CertificateFromX509(cert2.Leaf), + ips.IPPort{ + IP: net.IPv4(127, 0, 0, 1), + Port: 9651, + }, + 1, // timestamp + nil, // signature + ) +} + func getTLS(t *testing.T, index int) (ids.NodeID, *tls.Certificate, *tls.Config) { certLock.Lock() defer certLock.Unlock() for len(tlsCerts) <= index { cert, err := staking.NewTLSCert() - if err != nil { - t.Fatal(err) - } - tlsConfig := peer.TLSConfig(*cert, nil) - + require.NoError(t, err) tlsCerts = append(tlsCerts, cert) + } + for len(tlsConfigs) <= index { + cert := tlsCerts[len(tlsConfigs)] + tlsConfig := peer.TLSConfig(*cert, nil) tlsConfigs = append(tlsConfigs, tlsConfig) } - cert := tlsCerts[index] - return ids.NodeIDFromCert(cert.Leaf), cert, tlsConfigs[index] + tlsCert := tlsCerts[index] + cert := staking.CertificateFromX509(tlsCert.Leaf) + nodeID := ids.NodeIDFromCert(cert) + return nodeID, tlsCert, tlsConfigs[index] } diff --git a/avalanchego/network/config.go b/avalanchego/network/config.go index 11191b83..64c57d12 100644 --- a/avalanchego/network/config.go +++ b/avalanchego/network/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -10,12 +10,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/dialer" - "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/set" ) @@ -26,7 +26,7 @@ type HealthConfig struct { Enabled bool `json:"-"` // MinConnectedPeers is the minimum number of peers that the network should - // be connected to to be considered healthy. + // be connected to be considered healthy. MinConnectedPeers uint `json:"minConnectedPeers"` // MaxTimeSinceMsgReceived is the maximum amount of time since the network @@ -73,6 +73,14 @@ type PeerListGossipConfig struct { // PeerListGossipFreq is the frequency that this node will attempt to gossip // signed IPs to its peers. PeerListGossipFreq time.Duration `json:"peerListGossipFreq"` + + // PeerListPullGossipFreq is the frequency that this node will attempt to + // request signed IPs from its peers. + PeerListPullGossipFreq time.Duration `json:"peerListPullGossipFreq"` + + // PeerListBloomResetFreq is how frequently this node will recalculate the + // IP tracker's bloom filter. + PeerListBloomResetFreq time.Duration `json:"peerListBloomResetFreq"` } type TimeoutConfig struct { @@ -126,16 +134,21 @@ type Config struct { PingFrequency time.Duration `json:"pingFrequency"` AllowPrivateIPs bool `json:"allowPrivateIPs"` + SupportedACPs set.Set[uint32] `json:"supportedACPs"` + ObjectedACPs set.Set[uint32] `json:"objectedACPs"` + // The compression type to use when compressing outbound messages. // Assumes all peers support this compression type. CompressionType compression.Type `json:"compressionType"` // TLSKey is this node's TLS key that is used to sign IPs. TLSKey crypto.Signer `json:"-"` + // BLSKey is this node's BLS key that is used to sign IPs. + BLSKey *bls.SecretKey `json:"-"` // TrackedSubnets of the node. - TrackedSubnets set.Set[ids.ID] `json:"-"` - Beacons validators.Set `json:"-"` + TrackedSubnets set.Set[ids.ID] `json:"-"` + Beacons validators.Manager `json:"-"` // Validators are the current validators in the Avalanche network Validators validators.Manager `json:"-"` @@ -179,7 +192,4 @@ type Config struct { // Specifies how much disk usage each peer can cause before // we rate-limit them. DiskTargeter tracker.Targeter `json:"-"` - - // Tracks which validators have been sent to which peers - GossipTracker peer.GossipTracker `json:"-"` } diff --git a/avalanchego/network/conn_test.go b/avalanchego/network/conn_test.go index 8a48e5ac..6a44c615 100644 --- a/avalanchego/network/conn_test.go +++ b/avalanchego/network/conn_test.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network -import ( - "net" -) +import "net" var _ net.Conn = (*testConn)(nil) diff --git a/avalanchego/network/dialer/dialer.go b/avalanchego/network/dialer/dialer.go index 22e8c3ba..109b63cc 100644 --- a/avalanchego/network/dialer/dialer.go +++ b/avalanchego/network/dialer/dialer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer diff --git a/avalanchego/network/dialer/dialer_test.go b/avalanchego/network/dialer/dialer_test.go index 8fd516cd..a824b8b0 100644 --- a/avalanchego/network/dialer/dialer_test.go +++ b/avalanchego/network/dialer/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer @@ -64,7 +64,7 @@ func TestDialerCancelDial(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err = dialer.Dial(ctx, myIP) - require.Error(err) + require.ErrorIs(err, context.Canceled) // Make an outgoing connection with a non-cancelled context conn, err := dialer.Dial(context.Background(), myIP) diff --git a/avalanchego/network/dialer_test.go b/avalanchego/network/dialer_test.go index 9009985a..7a60d056 100644 --- a/avalanchego/network/dialer_test.go +++ b/avalanchego/network/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -30,9 +30,10 @@ func newTestDialer() *testDialer { } func (d *testDialer) NewListener() (ips.DynamicIPPort, *testListener) { + // Uses a private IP to easily enable testing AllowPrivateIPs ip := ips.NewDynamicIPPort( - net.IPv6loopback, - uint16(len(d.listeners)), + net.IPv4(10, 0, 0, 0), + uint16(len(d.listeners)+1), ) staticIP := ip.IPPort() listener := newTestListener(staticIP) @@ -54,22 +55,22 @@ func (d *testDialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) Conn: serverConn, localAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 0, + Port: 1, }, remoteAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 1, + Port: 2, }, } client := &testConn{ Conn: clientConn, localAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 2, + Port: 3, }, remoteAddr: &net.TCPAddr{ IP: net.IPv6loopback, - Port: 3, + Port: 4, }, } select { diff --git a/avalanchego/network/example_test.go b/avalanchego/network/example_test.go deleted file mode 100644 index 77bedc2e..00000000 --- a/avalanchego/network/example_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package network - -import ( - "context" - "os" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/genesis" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/snow/networking/router" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" -) - -var _ router.ExternalHandler = (*testExternalHandler)(nil) - -// Note: all of the external handler's methods are called on peer goroutines. It -// is possible for multiple concurrent calls to happen with different NodeIDs. -// However, a given NodeID will only be performing one call at a time. -type testExternalHandler struct { - log logging.Logger -} - -// Note: HandleInbound will be called with raw P2P messages, the networking -// implementation does not implicitly register timeouts, so this handler is only -// called by messages explicitly sent by the peer. If timeouts are required, -// that must be handled by the user of this utility. -func (t *testExternalHandler) HandleInbound(_ context.Context, message message.InboundMessage) { - t.log.Info( - "receiving message", - zap.Stringer("op", message.Op()), - ) -} - -func (t *testExternalHandler) Connected(nodeID ids.NodeID, version *version.Application, subnetID ids.ID) { - t.log.Info( - "connected", - zap.Stringer("nodeID", nodeID), - zap.Stringer("version", version), - zap.Stringer("subnetID", subnetID), - ) -} - -func (t *testExternalHandler) Disconnected(nodeID ids.NodeID) { - t.log.Info( - "disconnected", - zap.Stringer("nodeID", nodeID), - ) -} - -type testAggressiveValidatorSet struct { - validators.Set -} - -func (*testAggressiveValidatorSet) Contains(ids.NodeID) bool { - return true -} - -func ExampleNewTestNetwork() { - log := logging.NewLogger( - "networking", - logging.NewWrappedCore( - logging.Info, - os.Stdout, - logging.Colors.ConsoleEncoder(), - ), - ) - - // Needs to be periodically updated by the caller to have the latest - // validator set - validators := &testAggressiveValidatorSet{ - Set: validators.NewSet(), - } - - // If we want to be able to communicate with non-primary network subnets, we - // should register them here. - trackedSubnets := set.Set[ids.ID]{} - - // Messages and connections are handled by the external handler. - handler := &testExternalHandler{ - log: log, - } - - network, err := NewTestNetwork( - log, - constants.CostwoID, - validators, - trackedSubnets, - handler, - ) - if err != nil { - log.Fatal( - "failed to create test network", - zap.Error(err), - ) - return - } - - // We need to initially connect to some nodes in the network before peer - // gossip will enable connecting to all the remaining nodes in the network. - beaconIPs, beaconIDs := genesis.SampleBeacons(constants.CostwoID, 5) - for i, beaconIDStr := range beaconIDs { - beaconID, err := ids.NodeIDFromString(beaconIDStr) - if err != nil { - log.Fatal( - "failed to parse beaconID", - zap.String("beaconID", beaconIDStr), - zap.Error(err), - ) - return - } - - beaconIPStr := beaconIPs[i] - ipPort, err := ips.ToIPPort(beaconIPStr) - if err != nil { - log.Fatal( - "failed to parse beaconIP", - zap.String("beaconIP", beaconIPStr), - zap.Error(err), - ) - return - } - - network.ManuallyTrack(beaconID, ipPort) - } - - // Typically network.StartClose() should be called based on receiving a - // SIGINT or SIGTERM. For the example, we close the network after 15s. - go log.RecoverAndPanic(func() { - time.Sleep(15 * time.Second) - network.StartClose() - }) - - // network.Send(...) and network.Gossip(...) can be used here to send - // messages to peers. - - // Calling network.Dispatch() will block until a fatal error occurs or - // network.StartClose() is called. - err = network.Dispatch() - log.Info( - "network exited", - zap.Error(err), - ) -} diff --git a/avalanchego/network/handler_test.go b/avalanchego/network/handler_test.go index 64350b3b..08c99a0d 100644 --- a/avalanchego/network/handler_test.go +++ b/avalanchego/network/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/network/ip_tracker.go b/avalanchego/network/ip_tracker.go new file mode 100644 index 00000000..ed3935d4 --- /dev/null +++ b/avalanchego/network/ip_tracker.go @@ -0,0 +1,402 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "crypto/rand" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" +) + +const ( + saltSize = 32 + minCountEstimate = 128 + targetFalsePositiveProbability = .001 + maxFalsePositiveProbability = .01 + // By setting maxIPEntriesPerValidator > 1, we allow validators to update + // their IP at least once per bloom filter reset. + maxIPEntriesPerValidator = 2 +) + +var _ validators.SetCallbackListener = (*ipTracker)(nil) + +func newIPTracker( + log logging.Logger, + namespace string, + registerer prometheus.Registerer, +) (*ipTracker, error) { + bloomNamespace := metric.AppendNamespace(namespace, "ip_bloom") + bloomMetrics, err := bloom.NewMetrics(bloomNamespace, registerer) + if err != nil { + return nil, err + } + tracker := &ipTracker{ + log: log, + numValidatorIPs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "validator_ips", + Help: "Number of known validator IPs", + }), + numGossipable: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "gossipable_ips", + Help: "Number of IPs this node is willing to gossip", + }), + bloomMetrics: bloomMetrics, + connected: make(map[ids.NodeID]*ips.ClaimedIPPort), + mostRecentValidatorIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), + gossipableIndicies: make(map[ids.NodeID]int), + bloomAdditions: make(map[ids.NodeID]int), + } + err = utils.Err( + registerer.Register(tracker.numValidatorIPs), + registerer.Register(tracker.numGossipable), + ) + if err != nil { + return nil, err + } + return tracker, tracker.resetBloom() +} + +type ipTracker struct { + log logging.Logger + numValidatorIPs prometheus.Gauge + numGossipable prometheus.Gauge + bloomMetrics *bloom.Metrics + + lock sync.RWMutex + // Manually tracked nodes are always treated like validators + manuallyTracked set.Set[ids.NodeID] + // Connected tracks the currently connected peers, including validators and + // non-validators. The IP is not necessarily the same IP as in + // mostRecentIPs. + connected map[ids.NodeID]*ips.ClaimedIPPort + mostRecentValidatorIPs map[ids.NodeID]*ips.ClaimedIPPort + validators set.Set[ids.NodeID] + + // An IP is marked as gossipable if: + // - The node is a validator + // - The node is connected + // - The IP the node connected with is its latest IP + gossipableIndicies map[ids.NodeID]int + gossipableIPs []*ips.ClaimedIPPort + + // The bloom filter contains the most recent validator IPs to avoid + // unnecessary IP gossip. + bloom *bloom.Filter + // To prevent validators from causing the bloom filter to have too many + // false positives, we limit each validator to maxIPEntriesPerValidator in + // the bloom filter. + bloomAdditions map[ids.NodeID]int // Number of IPs added to the bloom + bloomSalt []byte + maxBloomCount int +} + +func (i *ipTracker) ManuallyTrack(nodeID ids.NodeID) { + i.lock.Lock() + defer i.lock.Unlock() + + // We treat manually tracked nodes as if they were validators. + if !i.validators.Contains(nodeID) { + i.onValidatorAdded(nodeID) + } + // Now that the node is marked as a validator, freeze it's validation + // status. Future calls to OnValidatorAdded or OnValidatorRemoved will be + // treated as noops. + i.manuallyTracked.Add(nodeID) +} + +func (i *ipTracker) WantsConnection(nodeID ids.NodeID) bool { + i.lock.RLock() + defer i.lock.RUnlock() + + return i.validators.Contains(nodeID) +} + +func (i *ipTracker) ShouldVerifyIP(ip *ips.ClaimedIPPort) bool { + i.lock.RLock() + defer i.lock.RUnlock() + + if !i.validators.Contains(ip.NodeID) { + return false + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + return !ok || // This would be the first IP + prevIP.Timestamp < ip.Timestamp // This would be a newer IP +} + +// AddIP returns true if the addition of the provided IP updated the most +// recently known IP of a validator. +func (i *ipTracker) AddIP(ip *ips.ClaimedIPPort) bool { + i.lock.Lock() + defer i.lock.Unlock() + + if !i.validators.Contains(ip.NodeID) { + return false + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + if !ok { + // This is the first IP we've heard from the validator, so it is the + // most recent. + i.updateMostRecentValidatorIP(ip) + // Because we didn't previously have an IP, we know we aren't currently + // connected to them. + return true + } + + if prevIP.Timestamp >= ip.Timestamp { + // This IP is not newer than the previously known IP. + return false + } + + i.updateMostRecentValidatorIP(ip) + i.removeGossipableIP(ip.NodeID) + return true +} + +func (i *ipTracker) GetIP(nodeID ids.NodeID) (*ips.ClaimedIPPort, bool) { + i.lock.RLock() + defer i.lock.RUnlock() + + ip, ok := i.mostRecentValidatorIPs[nodeID] + return ip, ok +} + +func (i *ipTracker) Connected(ip *ips.ClaimedIPPort) { + i.lock.Lock() + defer i.lock.Unlock() + + i.connected[ip.NodeID] = ip + if !i.validators.Contains(ip.NodeID) { + return + } + + prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + if !ok { + // This is the first IP we've heard from the validator, so it is the + // most recent. + i.updateMostRecentValidatorIP(ip) + i.addGossipableIP(ip) + return + } + + if prevIP.Timestamp > ip.Timestamp { + // There is a more up-to-date IP than the one that was used to connect. + return + } + + if prevIP.Timestamp < ip.Timestamp { + i.updateMostRecentValidatorIP(ip) + } + i.addGossipableIP(ip) +} + +func (i *ipTracker) Disconnected(nodeID ids.NodeID) { + i.lock.Lock() + defer i.lock.Unlock() + + delete(i.connected, nodeID) + i.removeGossipableIP(nodeID) +} + +func (i *ipTracker) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, _ uint64) { + i.lock.Lock() + defer i.lock.Unlock() + + i.onValidatorAdded(nodeID) +} + +func (i *ipTracker) onValidatorAdded(nodeID ids.NodeID) { + if i.manuallyTracked.Contains(nodeID) { + return + } + + i.validators.Add(nodeID) + ip, connected := i.connected[nodeID] + if !connected { + return + } + + // Because we only track validator IPs, the from the connection is + // guaranteed to be the most up-to-date IP that we know. + i.updateMostRecentValidatorIP(ip) + i.addGossipableIP(ip) +} + +func (*ipTracker) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} + +func (i *ipTracker) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { + i.lock.Lock() + defer i.lock.Unlock() + + if i.manuallyTracked.Contains(nodeID) { + return + } + + delete(i.mostRecentValidatorIPs, nodeID) + i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) + + i.validators.Remove(nodeID) + i.removeGossipableIP(nodeID) +} + +func (i *ipTracker) updateMostRecentValidatorIP(ip *ips.ClaimedIPPort) { + i.mostRecentValidatorIPs[ip.NodeID] = ip + i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) + + oldCount := i.bloomAdditions[ip.NodeID] + if oldCount >= maxIPEntriesPerValidator { + return + } + + // If the validator set is growing rapidly, we should increase the size of + // the bloom filter. + if count := i.bloom.Count(); count >= i.maxBloomCount { + if err := i.resetBloom(); err != nil { + i.log.Error("failed to reset validator tracker bloom filter", + zap.Int("maxCount", i.maxBloomCount), + zap.Int("currentCount", count), + zap.Error(err), + ) + } else { + i.log.Info("reset validator tracker bloom filter", + zap.Int("currentCount", count), + ) + } + return + } + + i.bloomAdditions[ip.NodeID] = oldCount + 1 + bloom.Add(i.bloom, ip.GossipID[:], i.bloomSalt) + i.bloomMetrics.Count.Inc() +} + +func (i *ipTracker) addGossipableIP(ip *ips.ClaimedIPPort) { + i.gossipableIndicies[ip.NodeID] = len(i.gossipableIPs) + i.gossipableIPs = append(i.gossipableIPs, ip) + i.numGossipable.Inc() +} + +func (i *ipTracker) removeGossipableIP(nodeID ids.NodeID) { + indexToRemove, wasGossipable := i.gossipableIndicies[nodeID] + if !wasGossipable { + return + } + + newNumGossipable := len(i.gossipableIPs) - 1 + if newNumGossipable != indexToRemove { + replacementIP := i.gossipableIPs[newNumGossipable] + i.gossipableIndicies[replacementIP.NodeID] = indexToRemove + i.gossipableIPs[indexToRemove] = replacementIP + } + + delete(i.gossipableIndicies, nodeID) + i.gossipableIPs[newNumGossipable] = nil + i.gossipableIPs = i.gossipableIPs[:newNumGossipable] + i.numGossipable.Dec() +} + +// GetGossipableIPs returns the latest IPs of connected validators. The returned +// IPs will not contain [exceptNodeID] or any IPs contained in [exceptIPs]. If +// the number of eligible IPs to return low, it's possible that every IP will be +// iterated over while handling this call. +func (i *ipTracker) GetGossipableIPs( + exceptNodeID ids.NodeID, + exceptIPs *bloom.ReadFilter, + salt []byte, + maxNumIPs int, +) []*ips.ClaimedIPPort { + var ( + uniform = sampler.NewUniform() + ips = make([]*ips.ClaimedIPPort, 0, maxNumIPs) + ) + + i.lock.RLock() + defer i.lock.RUnlock() + + uniform.Initialize(uint64(len(i.gossipableIPs))) + for len(ips) < maxNumIPs { + index, err := uniform.Next() + if err != nil { + return ips + } + + ip := i.gossipableIPs[index] + if ip.NodeID == exceptNodeID { + continue + } + + if !bloom.Contains(exceptIPs, ip.GossipID[:], salt) { + ips = append(ips, ip) + } + } + return ips +} + +// ResetBloom prunes the current bloom filter. This must be called periodically +// to ensure that validators that change their IPs are updated correctly and +// that validators that left the validator set are removed. +func (i *ipTracker) ResetBloom() error { + i.lock.Lock() + defer i.lock.Unlock() + + return i.resetBloom() +} + +// Bloom returns the binary representation of the bloom filter along with the +// random salt. +func (i *ipTracker) Bloom() ([]byte, []byte) { + i.lock.RLock() + defer i.lock.RUnlock() + + return i.bloom.Marshal(), i.bloomSalt +} + +// resetBloom creates a new bloom filter with a reasonable size for the current +// validator set size. This function additionally populates the new bloom filter +// with the current most recently known IPs of validators. +func (i *ipTracker) resetBloom() error { + newSalt := make([]byte, saltSize) + _, err := rand.Reader.Read(newSalt) + if err != nil { + return err + } + + count := max(maxIPEntriesPerValidator*i.validators.Len(), minCountEstimate) + numHashes, numEntries := bloom.OptimalParameters( + count, + targetFalsePositiveProbability, + ) + newFilter, err := bloom.New(numHashes, numEntries) + if err != nil { + return err + } + + i.bloom = newFilter + clear(i.bloomAdditions) + i.bloomSalt = newSalt + i.maxBloomCount = bloom.EstimateCount(numHashes, numEntries, maxFalsePositiveProbability) + + for nodeID, ip := range i.mostRecentValidatorIPs { + bloom.Add(newFilter, ip.GossipID[:], newSalt) + i.bloomAdditions[nodeID] = 1 + } + i.bloomMetrics.Reset(newFilter, i.maxBloomCount) + return nil +} diff --git a/avalanchego/network/ip_tracker_test.go b/avalanchego/network/ip_tracker_test.go new file mode 100644 index 00000000..da088f4a --- /dev/null +++ b/avalanchego/network/ip_tracker_test.go @@ -0,0 +1,710 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func newTestIPTracker(t *testing.T) *ipTracker { + tracker, err := newIPTracker(logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) + return tracker +} + +func newerTestIP(ip *ips.ClaimedIPPort) *ips.ClaimedIPPort { + return ips.NewClaimedIPPort( + ip.Cert, + ip.IPPort, + ip.Timestamp+1, + ip.Signature, + ) +} + +func requireEqual(t *testing.T, expected, actual *ipTracker) { + require := require.New(t) + require.Equal(expected.manuallyTracked, actual.manuallyTracked) + require.Equal(expected.connected, actual.connected) + require.Equal(expected.mostRecentValidatorIPs, actual.mostRecentValidatorIPs) + require.Equal(expected.validators, actual.validators) + require.Equal(expected.gossipableIndicies, actual.gossipableIndicies) + require.Equal(expected.gossipableIPs, actual.gossipableIPs) + require.Equal(expected.bloomAdditions, actual.bloomAdditions) + require.Equal(expected.maxBloomCount, actual.maxBloomCount) +} + +func requireMetricsConsistent(t *testing.T, tracker *ipTracker) { + require := require.New(t) + require.Equal(float64(len(tracker.mostRecentValidatorIPs)), testutil.ToFloat64(tracker.numValidatorIPs)) + require.Equal(float64(len(tracker.gossipableIPs)), testutil.ToFloat64(tracker.numGossipable)) + require.Equal(float64(tracker.bloom.Count()), testutil.ToFloat64(tracker.bloomMetrics.Count)) + require.Equal(float64(tracker.maxBloomCount), testutil.ToFloat64(tracker.bloomMetrics.MaxCount)) +} + +func TestIPTracker_ManuallyTrack(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "non-connected non-validator", + initialState: newTestIPTracker(t), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.validators.Add(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected non-validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + tracker.validators.Add(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "non-connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.ManuallyTrack(test.nodeID) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_AddIP(t *testing.T) { + newerIP := newerTestIP(ip) + tests := []struct { + name string + initialState *ipTracker + ip *ips.ClaimedIPPort + expectedUpdated bool + expectedState *ipTracker + }{ + { + name: "non-validator", + initialState: newTestIPTracker(t), + ip: ip, + expectedUpdated: false, + expectedState: newTestIPTracker(t), + }, + { + name: "first known IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + ip: ip, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + return tracker + }(), + }, + { + name: "older IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + ip: ip, + expectedUpdated: false, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + }, + { + name: "same IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: ip, + expectedUpdated: false, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + }, + { + name: "disconnected newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: newerIP, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + return tracker + }(), + }, + { + name: "connected newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + ip: newerIP, + expectedUpdated: true, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + delete(tracker.gossipableIndicies, newerIP.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + updated := test.initialState.AddIP(test.ip) + require.Equal(t, test.expectedUpdated, updated) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_Connected(t *testing.T) { + newerIP := newerTestIP(ip) + tests := []struct { + name string + initialState *ipTracker + ip *ips.ClaimedIPPort + expectedState *ipTracker + }{ + { + name: "non-validator", + initialState: newTestIPTracker(t), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.connected[ip.NodeID] = ip + return tracker + }(), + }, + { + name: "first known IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.connected[ip.NodeID] = ip + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + { + name: "connected with older IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(newerIP.NodeID) + require.True(t, tracker.AddIP(newerIP)) + tracker.connected[ip.NodeID] = ip + return tracker + }(), + }, + { + name: "connected with newer IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: newerIP, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.connected[newerIP.NodeID] = newerIP + tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.bloomAdditions[newerIP.NodeID] = 2 + tracker.gossipableIndicies[newerIP.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + newerIP, + } + return tracker + }(), + }, + { + name: "connected with same IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + ip: ip, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + tracker.connected[ip.NodeID] = ip + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.Connected(test.ip) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_Disconnected(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: newTestIPTracker(t), + }, + { + name: "latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + delete(tracker.connected, ip.NodeID) + delete(tracker.gossipableIndicies, ip.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + { + name: "non-latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + delete(tracker.connected, ip.NodeID) + tracker.gossipableIndicies = map[ids.NodeID]int{ + otherIP.NodeID: 0, + } + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + otherIP, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.Disconnected(test.nodeID) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_OnValidatorAdded(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "manually tracked", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + return tracker + }(), + }, + { + name: "disconnected", + initialState: newTestIPTracker(t), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.validators.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.validators.Add(ip.NodeID) + tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + ip, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.OnValidatorAdded(test.nodeID, nil, ids.Empty, 0) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_OnValidatorRemoved(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "manually tracked", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + }, + { + name: "not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(t, tracker.AddIP(ip)) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + return tracker + }(), + }, + { + name: "latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + delete(tracker.gossipableIndicies, ip.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + return tracker + }(), + }, + { + name: "non-latest gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + tracker.Connected(ip) + tracker.onValidatorAdded(otherIP.NodeID) + tracker.Connected(otherIP) + delete(tracker.mostRecentValidatorIPs, ip.NodeID) + tracker.validators.Remove(ip.NodeID) + tracker.gossipableIndicies = map[ids.NodeID]int{ + otherIP.NodeID: 0, + } + tracker.gossipableIPs = []*ips.ClaimedIPPort{ + otherIP, + } + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.OnValidatorRemoved(test.nodeID, 0) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_GetGossipableIPs(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.Connected(otherIP) + tracker.onValidatorAdded(ip.NodeID) + tracker.onValidatorAdded(otherIP.NodeID) + + gossipableIPs := tracker.GetGossipableIPs(ids.EmptyNodeID, bloom.EmptyFilter, nil, 2) + require.ElementsMatch([]*ips.ClaimedIPPort{ip, otherIP}, gossipableIPs) + + gossipableIPs = tracker.GetGossipableIPs(ip.NodeID, bloom.EmptyFilter, nil, 2) + require.Equal([]*ips.ClaimedIPPort{otherIP}, gossipableIPs) + + gossipableIPs = tracker.GetGossipableIPs(ids.EmptyNodeID, bloom.FullFilter, nil, 2) + require.Empty(gossipableIPs) + + filter, err := bloom.New(8, 1024) + require.NoError(err) + bloom.Add(filter, ip.GossipID[:], nil) + + readFilter, err := bloom.Parse(filter.Marshal()) + require.NoError(err) + + gossipableIPs = tracker.GetGossipableIPs(ip.NodeID, readFilter, nil, 2) + require.Equal([]*ips.ClaimedIPPort{otherIP}, gossipableIPs) +} + +func TestIPTracker_BloomFiltersEverything(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.Connected(otherIP) + tracker.onValidatorAdded(ip.NodeID) + tracker.onValidatorAdded(otherIP.NodeID) + + bloomBytes, salt := tracker.Bloom() + readFilter, err := bloom.Parse(bloomBytes) + require.NoError(err) + + gossipableIPs := tracker.GetGossipableIPs(ids.EmptyNodeID, readFilter, salt, 2) + require.Empty(gossipableIPs) + + require.NoError(tracker.ResetBloom()) +} + +func TestIPTracker_BloomGrowsWithValidatorSet(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + initialMaxBloomCount := tracker.maxBloomCount + for i := 0; i < 2048; i++ { + tracker.onValidatorAdded(ids.GenerateTestNodeID()) + } + requireMetricsConsistent(t, tracker) + + require.NoError(tracker.ResetBloom()) + require.Greater(tracker.maxBloomCount, initialMaxBloomCount) + requireMetricsConsistent(t, tracker) +} + +func TestIPTracker_BloomResetsDynamically(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorRemoved(ip.NodeID, 0) + tracker.maxBloomCount = 1 + tracker.Connected(otherIP) + tracker.onValidatorAdded(otherIP.NodeID) + requireMetricsConsistent(t, tracker) + + bloomBytes, salt := tracker.Bloom() + readFilter, err := bloom.Parse(bloomBytes) + require.NoError(err) + + require.False(bloom.Contains(readFilter, ip.GossipID[:], salt)) + require.True(bloom.Contains(readFilter, otherIP.GossipID[:], salt)) +} + +func TestIPTracker_PreventBloomFilterAddition(t *testing.T) { + require := require.New(t) + + newerIP := newerTestIP(ip) + newestIP := newerTestIP(newerIP) + + tracker := newTestIPTracker(t) + tracker.onValidatorAdded(ip.NodeID) + require.True(tracker.AddIP(ip)) + require.True(tracker.AddIP(newerIP)) + require.True(tracker.AddIP(newestIP)) + require.Equal(maxIPEntriesPerValidator, tracker.bloomAdditions[ip.NodeID]) + requireMetricsConsistent(t, tracker) +} + +func TestIPTracker_ShouldVerifyIP(t *testing.T) { + require := require.New(t) + + newerIP := newerTestIP(ip) + + tracker := newTestIPTracker(t) + require.False(tracker.ShouldVerifyIP(ip)) + tracker.onValidatorAdded(ip.NodeID) + require.True(tracker.ShouldVerifyIP(ip)) + require.True(tracker.AddIP(ip)) + require.False(tracker.ShouldVerifyIP(ip)) + require.True(tracker.ShouldVerifyIP(newerIP)) +} diff --git a/avalanchego/network/listener_test.go b/avalanchego/network/listener_test.go index 1b15b006..5d6073c6 100644 --- a/avalanchego/network/listener_test.go +++ b/avalanchego/network/listener_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/network/metrics.go b/avalanchego/network/metrics.go index ad7826eb..e2a3a363 100644 --- a/avalanchego/network/metrics.go +++ b/avalanchego/network/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -11,9 +11,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) type metrics struct { @@ -22,13 +22,13 @@ type metrics struct { numSubnetPeers *prometheus.GaugeVec timeSinceLastMsgSent prometheus.Gauge timeSinceLastMsgReceived prometheus.Gauge - sendQueuePortionFull prometheus.Gauge sendFailRate prometheus.Gauge connected prometheus.Counter disconnected prometheus.Counter acceptFailed prometheus.Counter inboundConnRateLimited prometheus.Counter inboundConnAllowed prometheus.Counter + tlsConnRejected prometheus.Counter numUselessPeerListBytes prometheus.Counter nodeUptimeWeightedAverage prometheus.Gauge nodeUptimeRewardingStake prometheus.Gauge @@ -71,11 +71,6 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne Name: "time_since_last_msg_sent", Help: "Time (in ns) since the last msg was sent", }), - sendQueuePortionFull: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "send_queue_portion_full", - Help: "Percentage of use in Send Queue", - }), sendFailRate: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "send_fail_rate", @@ -101,6 +96,11 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne Name: "inbound_conn_throttler_allowed", Help: "Times this node allowed (attempted to upgrade) an inbound connection", }), + tlsConnRejected: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "tls_conn_rejected", + Help: "Times this node rejected a connection due to an unsupported TLS certificate", + }), numUselessPeerListBytes: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "num_useless_peerlist_bytes", @@ -147,19 +147,18 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne peerConnectedStartTimes: make(map[ids.NodeID]float64), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numTracked), registerer.Register(m.numPeers), registerer.Register(m.numSubnetPeers), registerer.Register(m.timeSinceLastMsgReceived), registerer.Register(m.timeSinceLastMsgSent), - registerer.Register(m.sendQueuePortionFull), registerer.Register(m.sendFailRate), registerer.Register(m.connected), registerer.Register(m.disconnected), registerer.Register(m.acceptFailed), registerer.Register(m.inboundConnAllowed), + registerer.Register(m.tlsConnRejected), registerer.Register(m.numUselessPeerListBytes), registerer.Register(m.inboundConnRateLimited), registerer.Register(m.nodeUptimeWeightedAverage), @@ -182,7 +181,7 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne m.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(0) } - return m, errs.Err + return m, err } func (m *metrics) markConnected(peer peer.Peer) { diff --git a/avalanchego/network/network.go b/avalanchego/network/network.go index 8a41baba..5e4b3cdc 100644 --- a/avalanchego/network/network.go +++ b/avalanchego/network/network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -7,41 +7,36 @@ import ( "context" "errors" "fmt" + "math" "net" "strings" "sync" "sync/atomic" "time" - gomath "math" - "github.com/pires/go-proxyproto" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "golang.org/x/exp/maps" - "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) const ( @@ -55,12 +50,10 @@ var ( _ sender.ExternalSender = (*network)(nil) _ Network = (*network)(nil) - errMissingPrimaryValidators = errors.New("missing primary validator set") - errNotValidator = errors.New("node is not a validator") - errNotTracked = errors.New("subnet is not tracked") - errSubnetNotExist = errors.New("subnet does not exist") - errExpectedProxy = errors.New("expected proxy") - errExpectedTCPProtocol = errors.New("expected TCP protocol") + errNotValidator = errors.New("node is not a validator") + errNotTracked = errors.New("subnet is not tracked") + errExpectedProxy = errors.New("expected proxy") + errExpectedTCPProtocol = errors.New("expected TCP protocol") ) // Network defines the functionality of the networking library. @@ -82,12 +75,6 @@ type Network interface { // or the network is closed. Dispatch() error - // WantsConnection returns true if this node is willing to attempt to - // connect to the provided nodeID. If the node is attempting to connect to - // the minimum number of peers, then it should only connect if the peer is a - // validator or beacon. - WantsConnection(ids.NodeID) bool - // Attempt to connect to this IP. The network will never stop attempting to // connect to this ID. ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) @@ -118,6 +105,14 @@ type UptimeResult struct { WeightedAveragePercentage float64 } +// To avoid potential deadlocks, we maintain that locks must be grabbed in the +// following order: +// +// 1. peersLock +// 2. manuallyTrackedIDsLock +// +// If a higher lock (e.g. manuallyTrackedIDsLock) is held when trying to grab a +// lower lock (e.g. peersLock) a deadlock could occur. type network struct { config *Config peerConfig *peer.Config @@ -141,27 +136,21 @@ type network struct { // Cancelled on close onCloseCtx context.Context // Call [onCloseCtxCancel] to cancel [onCloseCtx] during close() - onCloseCtxCancel func() + onCloseCtxCancel context.CancelFunc - sendFailRateCalculator math.Averager + sendFailRateCalculator safemath.Averager // Tracks which peers know about which peers - gossipTracker peer.GossipTracker - peersLock sync.RWMutex - // peerIPs contains the most up to date set of signed IPs for nodes we are - // currently connected or attempting to connect to. - // Note: The txID provided inside of a claimed IP is not verified and should - // not be accessed from this map. - peerIPs map[ids.NodeID]*ips.ClaimedIPPort + ipTracker *ipTracker + peersLock sync.RWMutex // trackedIPs contains the set of IPs that we are currently attempting to // connect to. An entry is added to this set when we first start attempting // to connect to the peer. An entry is deleted from this set once we have // finished the handshake. - trackedIPs map[ids.NodeID]*trackedIP - manuallyTrackedIDs set.Set[ids.NodeID] - connectingPeers peer.Set - connectedPeers peer.Set - closing bool + trackedIPs map[ids.NodeID]*trackedIP + connectingPeers peer.Set + connectedPeers peer.Set + closing bool // router is notified about all peer [Connected] and [Disconnected] events // as well as all non-handshake peer messages. @@ -189,11 +178,6 @@ func NewNetwork( dialer dialer.Dialer, router router.ExternalHandler, ) (Network, error) { - primaryNetworkValidators, ok := config.Validators.Get(constants.PrimaryNetworkID) - if !ok { - return nil, errMissingPrimaryValidators - } - if config.ProxyEnabled { // Wrap the listener to process the proxy header. listener = &proxyproto.Listener{ @@ -220,7 +204,7 @@ func NewNetwork( log, config.Namespace, metricsRegisterer, - primaryNetworkValidators, + config.Validators, config.ThrottlerConfig.InboundMsgThrottlerConfig, config.ResourceTracker, config.CPUTargeter, @@ -234,7 +218,7 @@ func NewNetwork( log, config.Namespace, metricsRegisterer, - primaryNetworkValidators, + config.Validators, config.ThrottlerConfig.OutboundMsgThrottlerConfig, ) if err != nil { @@ -251,6 +235,18 @@ func NewNetwork( return nil, fmt.Errorf("initializing network metrics failed with: %w", err) } + ipTracker, err := newIPTracker(log, config.Namespace, metricsRegisterer) + if err != nil { + return nil, fmt.Errorf("initializing ip tracker failed with: %w", err) + } + config.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, ipTracker) + + // Track all default bootstrappers to ensure their current IPs are gossiped + // like validator IPs. + for _, bootstrapper := range genesis.GetBootstrappers(config.NetworkID) { + ipTracker.ManuallyTrack(bootstrapper.ID) + } + peerConfig := &peer.Config{ ReadBufferSize: config.PeerReadBufferSize, WriteBufferSize: config.PeerWriteBufferSize, @@ -264,15 +260,24 @@ func NewNetwork( VersionCompatibility: version.GetCompatibility(config.NetworkID), MySubnets: config.TrackedSubnets, Beacons: config.Beacons, + Validators: config.Validators, NetworkID: config.NetworkID, PingFrequency: config.PingFrequency, PongTimeout: config.PingPongTimeout, MaxClockDifference: config.MaxClockDifference, + SupportedACPs: config.SupportedACPs.List(), + ObjectedACPs: config.ObjectedACPs.List(), ResourceTracker: config.ResourceTracker, UptimeCalculator: config.UptimeCalculator, - IPSigner: peer.NewIPSigner(config.MyIPPort, config.TLSKey), + IPSigner: peer.NewIPSigner(config.MyIPPort, config.TLSKey, config.BLSKey), } + // Invariant: We delay the activation of durango during the TLS handshake to + // avoid gossiping any TLS certs that anyone else in the network may + // consider invalid. Recall that if a peer gossips an invalid cert, the + // connection is terminated. + durangoTime := version.GetDurangoTime(config.NetworkID) + durangoTimeWithClockSkew := durangoTime.Add(config.MaxClockDifference) onCloseCtx, cancel := context.WithCancel(context.Background()) n := &network{ config: config, @@ -283,21 +288,20 @@ func NewNetwork( inboundConnUpgradeThrottler: throttling.NewInboundConnUpgradeThrottler(log, config.ThrottlerConfig.InboundConnUpgradeThrottlerConfig), listener: listener, dialer: dialer, - serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig), - clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig), + serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), + clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), onCloseCtx: onCloseCtx, onCloseCtxCancel: cancel, - sendFailRateCalculator: math.NewSyncAverager(math.NewAverager( + sendFailRateCalculator: safemath.NewSyncAverager(safemath.NewAverager( 0, config.SendFailRateHalflife, time.Now(), )), - peerIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), trackedIPs: make(map[ids.NodeID]*trackedIP), - gossipTracker: config.GossipTracker, + ipTracker: ipTracker, connectingPeers: peer.NewSet(), connectedPeers: peer.NewSet(), router: router, @@ -419,32 +423,6 @@ func (n *network) Connected(nodeID ids.NodeID) { return } - peerIP := peer.IP() - newIP := &ips.ClaimedIPPort{ - Cert: peer.Cert(), - IPPort: peerIP.IPPort, - Timestamp: peerIP.Timestamp, - Signature: peerIP.Signature, - } - prevIP, ok := n.peerIPs[nodeID] - if !ok { - // If the IP wasn't previously tracked, then we never could have - // gossiped it. This means we don't need to reset the validator's - // tracked set. - n.peerIPs[nodeID] = newIP - } else if prevIP.Timestamp < newIP.Timestamp { - // The previous IP was stale, so we should gossip the newer IP. - n.peerIPs[nodeID] = newIP - - if !prevIP.IPPort.Equal(newIP.IPPort) { - // This IP is actually different, so we should gossip it. - n.peerConfig.Log.Debug("resetting gossip due to ip change", - zap.Stringer("nodeID", nodeID), - ) - _ = n.gossipTracker.ResetValidator(nodeID) - } - } - if tracked, ok := n.trackedIPs[nodeID]; ok { tracked.stopTracking() delete(n.trackedIPs, nodeID) @@ -453,6 +431,15 @@ func (n *network) Connected(nodeID ids.NodeID) { n.connectedPeers.Add(peer) n.peersLock.Unlock() + peerIP := peer.IP() + newIP := ips.NewClaimedIPPort( + peer.Cert(), + peerIP.IPPort, + peerIP.Timestamp, + peerIP.TLSSignature, + ) + n.ipTracker.Connected(newIP) + n.metrics.markConnected(peer) peerVersion := peer.Version() @@ -467,178 +454,18 @@ func (n *network) Connected(nodeID ids.NodeID) { // of peers, then it should only connect if this node is a validator, or the // peer is a validator/beacon. func (n *network) AllowConnection(nodeID ids.NodeID) bool { - return !n.config.RequireValidatorToConnect || - validators.Contains(n.config.Validators, constants.PrimaryNetworkID, n.config.MyNodeID) || - n.WantsConnection(nodeID) -} - -func (n *network) Track(peerID ids.NodeID, claimedIPPorts []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { - // Perform all signature verification and hashing before grabbing the peer - // lock. - // Note: Avoiding signature verification when the IP isn't needed is a - // **significant** performance optimization. - // Note: To avoid signature verification when the IP isn't needed, we - // optimistically filter out IPs. This can result in us not tracking an IP - // that we otherwise would have. This case can only happen if the node - // became a validator between the time we verified the signature and when we - // processed the IP; which should be very rare. - ipAuths, err := n.authenticateIPs(claimedIPPorts) - if err != nil { - n.peerConfig.Log.Debug("authenticating claimed IPs failed", - zap.Stringer("nodeID", peerID), - zap.Error(err), - ) - return nil, err + if !n.config.RequireValidatorToConnect { + return true } - - // Information for them to update about us - ipLen := len(claimedIPPorts) - newestTimestamp := make(map[ids.ID]uint64, ipLen) - // Information for us to update about them - txIDsWithUpToDateIP := make([]ids.ID, 0, ipLen) - - // Atomically modify peer data - n.peersLock.Lock() - defer n.peersLock.Unlock() - for i, ip := range claimedIPPorts { - ipAuth := ipAuths[i] - nodeID := ipAuth.nodeID - // Invariant: [ip] is only used to modify local node state if - // [verifiedIP] is true. - // Note: modifying peer-level state is allowed regardless of - // [verifiedIP]. - verifiedIP := ipAuth.verified - - // Re-fetch latest info for a [nodeID] in case it changed since we last - // held [peersLock]. - prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) - tracked, isTracked := n.trackedIPs[nodeID] - - // Evaluate if the gossiped IP is useful to us or to the peer that - // shared it with us. - switch { - case previouslyTracked && prevIP.Timestamp > ip.Timestamp: - // Our previous IP was more up to date. We should tell the peer - // not to gossip their IP to us. We should still gossip our IP to - // them. - newestTimestamp[ip.TxID] = prevIP.Timestamp - - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - case previouslyTracked && prevIP.Timestamp == ip.Timestamp: - // Our previous IP was equally fresh. We should tell the peer - // not to gossip this IP to us. We should not gossip our IP to them. - newestTimestamp[ip.TxID] = prevIP.Timestamp - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - case verifiedIP && shouldUpdateOurIP: - // This IP is more up to date. We should tell the peer not to gossip - // this IP to us. We should not gossip our IP to them. - newestTimestamp[ip.TxID] = ip.Timestamp - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - // In the future, we should gossip this IP rather than the old IP. - n.peerIPs[nodeID] = ip - - // If the new IP is equal to the old IP, there is no reason to - // refresh the references to it. This can happen when a node - // restarts but does not change their IP. - if prevIP.IPPort.Equal(ip.IPPort) { - continue - } - - // We should gossip this new IP to all our peers. - n.peerConfig.Log.Debug("resetting gossip due to ip change", - zap.Stringer("nodeID", nodeID), - ) - _ = n.gossipTracker.ResetValidator(nodeID) - - // We should update any existing outbound connection attempts. - if isTracked { - // Stop tracking the old IP and start tracking the new one. - tracked := tracked.trackNewIP(ip.IPPort) - n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) - } - case verifiedIP && shouldDial: - // Invariant: [isTracked] is false here. - - // This is the first we've heard of this IP and we want to connect - // to it. We should tell the peer not to gossip this IP to us again. - newestTimestamp[ip.TxID] = ip.Timestamp - // We should not gossip this IP back to them. - txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) - - // We don't need to reset gossip about this validator because - // we've never gossiped it before. - n.peerIPs[nodeID] = ip - - tracked := newTrackedIP(ip.IPPort) - n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) - default: - // This IP isn't desired - n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) - } - } - - txIDsToAck := maps.Keys(newestTimestamp) - txIDsToAck, ok := n.gossipTracker.AddKnown(peerID, txIDsWithUpToDateIP, txIDsToAck) - if !ok { - n.peerConfig.Log.Error("failed to update known peers", - zap.Stringer("nodeID", peerID), - ) - return nil, nil - } - - peerAcks := make([]*p2p.PeerAck, len(txIDsToAck)) - for i, txID := range txIDsToAck { - txID := txID - peerAcks[i] = &p2p.PeerAck{ - TxId: txID[:], - // By responding with the highest timestamp, not just the timestamp - // the peer provided us, we may be able to avoid some unnecessary - // gossip in the case that the peer is about to update this - // validator's IP. - Timestamp: newestTimestamp[txID], - } - } - return peerAcks, nil + _, iAmAValidator := n.config.Validators.GetValidator(constants.PrimaryNetworkID, n.config.MyNodeID) + return iAmAValidator || n.ipTracker.WantsConnection(nodeID) } -func (n *network) MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error { - txIDs := make([]ids.ID, 0, len(ips)) - - n.peersLock.RLock() - defer n.peersLock.RUnlock() - - for _, ip := range ips { - txID, err := ids.ToID(ip.TxId) - if err != nil { +func (n *network) Track(claimedIPPorts []*ips.ClaimedIPPort) error { + for _, ip := range claimedIPPorts { + if err := n.track(ip); err != nil { return err } - - // If [txID]'s corresponding nodeID isn't known, then they must no - // longer be a validator. Therefore we wouldn't gossip their IP anyways. - nodeID, ok := n.gossipTracker.GetNodeID(txID) - if !ok { - continue - } - - // If the peer returns a lower timestamp than I currently have, then I - // have updated the IP since I sent the PeerList message this is in - // response to. That means that I should re-gossip this node's IP to the - // peer. - myIP, previouslyTracked := n.peerIPs[nodeID] - if previouslyTracked && myIP.Timestamp <= ip.Timestamp { - txIDs = append(txIDs, txID) - } - } - - if _, ok := n.gossipTracker.AddKnown(peerID, txIDs, nil); !ok { - n.peerConfig.Log.Error("failed to update known peers", - zap.Stringer("nodeID", peerID), - ) } return nil } @@ -649,13 +476,6 @@ func (n *network) MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error { // call. Note that this is from the perspective of a single peer object, because // a peer with the same ID can reconnect to this network instance. func (n *network) Disconnected(nodeID ids.NodeID) { - if !n.gossipTracker.StopTrackingPeer(nodeID) { - n.peerConfig.Log.Error( - "stopped non-existent peer tracker", - zap.Stringer("nodeID", nodeID), - ) - } - n.peersLock.RLock() _, connecting := n.connectingPeers.GetByID(nodeID) peer, connected := n.connectedPeers.GetByID(nodeID) @@ -669,59 +489,17 @@ func (n *network) Disconnected(nodeID ids.NodeID) { } } -func (n *network) Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) { - // Only select validators that we haven't already sent to this peer - unknownValidators, ok := n.gossipTracker.GetUnknown(peerID) - if !ok { - n.peerConfig.Log.Debug( - "unable to find peer to gossip to", - zap.Stringer("nodeID", peerID), - ) - return nil, nil - } - - // We select a random sample of validators to gossip to avoid starving out a - // validator from being gossiped for an extended period of time. - s := sampler.NewUniform() - if err := s.Initialize(uint64(len(unknownValidators))); err != nil { - return nil, err - } - - // Calculate the unknown information we need to send to this peer. - validatorIPs := make([]ips.ClaimedIPPort, 0, int(n.config.PeerListNumValidatorIPs)) - for i := 0; i < len(unknownValidators) && len(validatorIPs) < int(n.config.PeerListNumValidatorIPs); i++ { - drawn, err := s.Next() - if err != nil { - return nil, err - } - - validator := unknownValidators[drawn] - n.peersLock.RLock() - _, isConnected := n.connectedPeers.GetByID(validator.NodeID) - peerIP := n.peerIPs[validator.NodeID] - n.peersLock.RUnlock() - if !isConnected { - n.peerConfig.Log.Verbo( - "unable to find validator in connected peers", - zap.Stringer("nodeID", validator.NodeID), - ) - continue - } - - // Note: peerIP isn't used directly here because the TxID may be - // incorrect. - validatorIPs = append(validatorIPs, - ips.ClaimedIPPort{ - Cert: peerIP.Cert, - IPPort: peerIP.IPPort, - Timestamp: peerIP.Timestamp, - Signature: peerIP.Signature, - TxID: validator.TxID, - }, - ) - } +func (n *network) KnownPeers() ([]byte, []byte) { + return n.ipTracker.Bloom() +} - return validatorIPs, nil +func (n *network) Peers(except ids.NodeID, knownPeers *bloom.ReadFilter, salt []byte) []*ips.ClaimedIPPort { + return n.ipTracker.GetGossipableIPs( + except, + knownPeers, + salt, + int(n.config.PeerListNumValidatorIPs), + ) } // Dispatch starts accepting connections from other nodes attempting to connect @@ -729,7 +507,6 @@ func (n *network) Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) { func (n *network) Dispatch() error { go n.runTimers() // Periodically perform operations go n.inboundConnUpgradeThrottler.Dispatch() - errs := wrappers.Errs{} for { // Continuously accept new connections if n.onCloseCtx.Err() != nil { break @@ -748,13 +525,6 @@ func (n *network) Dispatch() error { // Note: listener.Accept is rate limited outside of this package, so a // peer can not just arbitrarily spin up goroutines here. go func() { - // We pessimistically drop an incoming connection if the remote - // address is found in connectedIPs, myIPs, or peerAliasIPs. This - // protects our node from spending CPU cycles on TLS handshakes to - // upgrade connections from existing peers. Specifically, this can - // occur when one of our existing peers attempts to connect to one - // our IP aliases (that they aren't yet aware is an alias). - // // Note: Calling [RemoteAddr] with the Proxy protocol enabled may // block for up to ProxyReadHeaderTimeout. Therefore, we ensure to // call this function inside the go-routine, rather than the main @@ -802,30 +572,19 @@ func (n *network) Dispatch() error { connected := n.connectedPeers.Sample(n.connectedPeers.Len(), peer.NoPrecondition) n.peersLock.RUnlock() + errs := wrappers.Errs{} for _, peer := range append(connecting, connected...) { errs.Add(peer.AwaitClosed(context.TODO())) } return errs.Err } -func (n *network) WantsConnection(nodeID ids.NodeID) bool { - n.peersLock.RLock() - defer n.peersLock.RUnlock() - - return n.wantsConnection(nodeID) -} - -func (n *network) wantsConnection(nodeID ids.NodeID) bool { - return validators.Contains(n.config.Validators, constants.PrimaryNetworkID, nodeID) || - n.manuallyTrackedIDs.Contains(nodeID) -} - func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { + n.ipTracker.ManuallyTrack(nodeID) + n.peersLock.Lock() defer n.peersLock.Unlock() - n.manuallyTrackedIDs.Add(nodeID) - _, connected := n.connectedPeers.GetByID(nodeID) if connected { // If I'm currently connected to [nodeID] then they will have told me @@ -838,8 +597,61 @@ func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { if !isTracked { tracked := newTrackedIP(ip) n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) + n.dial(nodeID, tracked) + } +} + +func (n *network) track(ip *ips.ClaimedIPPort) error { + // To avoid signature verification when the IP isn't needed, we + // optimistically filter out IPs. This can result in us not tracking an IP + // that we otherwise would have. This case can only happen if the node + // became a validator between the time we verified the signature and when we + // processed the IP; which should be very rare. + // + // Note: Avoiding signature verification when the IP isn't needed is a + // **significant** performance optimization. + if !n.ipTracker.ShouldVerifyIP(ip) { + n.metrics.numUselessPeerListBytes.Add(float64(ip.Size())) + return nil + } + + // Perform all signature verification and hashing before grabbing the peer + // lock. + signedIP := peer.SignedIP{ + UnsignedIP: peer.UnsignedIP{ + IPPort: ip.IPPort, + Timestamp: ip.Timestamp, + }, + TLSSignature: ip.Signature, + } + maxTimestamp := n.peerConfig.Clock.Time().Add(n.peerConfig.MaxClockDifference) + if err := signedIP.Verify(ip.Cert, maxTimestamp); err != nil { + return err + } + + n.peersLock.Lock() + defer n.peersLock.Unlock() + + if !n.ipTracker.AddIP(ip) { + return nil + } + + if _, connected := n.connectedPeers.GetByID(ip.NodeID); connected { + // If I'm currently connected to [nodeID] then I'll attempt to dial them + // when we disconnect. + return nil } + + tracked, isTracked := n.trackedIPs[ip.NodeID] + if isTracked { + // Stop tracking the old IP and start tracking the new one. + tracked = tracked.trackNewIP(ip.IPPort) + } else { + tracked = newTrackedIP(ip.IPPort) + } + n.trackedIPs[ip.NodeID] = tracked + n.dial(ip.NodeID, tracked) + return nil } // getPeers returns a slice of connected peers from a set of [nodeIDs]. @@ -871,7 +683,7 @@ func (n *network) getPeers( continue } - isValidator := validators.Contains(n.config.Validators, subnetID, nodeID) + _, isValidator := n.config.Validators.GetValidator(subnetID, nodeID) // check if the peer is allowed to connect to the subnet if !allower.IsAllowed(nodeID, isValidator) { continue @@ -890,14 +702,9 @@ func (n *network) samplePeers( numPeersToSample int, allower subnets.Allower, ) []peer.Peer { - subnetValidators, ok := n.config.Validators.Get(subnetID) - if !ok { - return nil - } - // If there are fewer validators than [numValidatorsToSample], then only // sample [numValidatorsToSample] validators. - subnetValidatorsLen := subnetValidators.Len() + subnetValidatorsLen := n.config.Validators.Count(subnetID) if subnetValidatorsLen < numValidatorsToSample { numValidatorsToSample = subnetValidatorsLen } @@ -915,7 +722,7 @@ func (n *network) samplePeers( } peerID := p.ID() - isValidator := subnetValidators.Contains(peerID) + _, isValidator := n.config.Validators.GetValidator(subnetID, peerID) // check if the peer is allowed to connect to the subnet if !allower.IsAllowed(peerID, isValidator) { return false @@ -971,13 +778,12 @@ func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { // The peer that is disconnecting from us didn't finish the handshake tracked, ok := n.trackedIPs[nodeID] if ok { - if n.wantsConnection(nodeID) { + if n.ipTracker.WantsConnection(nodeID) { tracked := tracked.trackNewIP(tracked.ip) n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) + n.dial(nodeID, tracked) } else { tracked.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } } @@ -986,6 +792,7 @@ func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { } func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { + n.ipTracker.Disconnected(nodeID) n.router.Disconnected(nodeID) n.peersLock.Lock() @@ -994,67 +801,15 @@ func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { n.connectedPeers.Remove(nodeID) // The peer that is disconnecting from us finished the handshake - if n.wantsConnection(nodeID) { - prevIP := n.peerIPs[nodeID] - tracked := newTrackedIP(prevIP.IPPort) + if ip, wantsConnection := n.ipTracker.GetIP(nodeID); wantsConnection { + tracked := newTrackedIP(ip.IPPort) n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) - } else { - delete(n.peerIPs, nodeID) + n.dial(nodeID, tracked) } n.metrics.markDisconnected(peer) } -// ipAuth is a helper struct used to convey information about an -// [*ips.ClaimedIPPort]. -type ipAuth struct { - nodeID ids.NodeID - verified bool -} - -func (n *network) authenticateIPs(ips []*ips.ClaimedIPPort) ([]*ipAuth, error) { - ipAuths := make([]*ipAuth, len(ips)) - for i, ip := range ips { - nodeID := ids.NodeIDFromCert(ip.Cert) - n.peersLock.RLock() - _, _, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) - n.peersLock.RUnlock() - if !shouldUpdateOurIP && !shouldDial { - ipAuths[i] = &ipAuth{ - nodeID: nodeID, - } - continue - } - - // Verify signature if needed - signedIP := peer.SignedIP{ - UnsignedIP: peer.UnsignedIP{ - IPPort: ip.IPPort, - Timestamp: ip.Timestamp, - }, - Signature: ip.Signature, - } - if err := signedIP.Verify(ip.Cert); err != nil { - return nil, err - } - ipAuths[i] = &ipAuth{ - nodeID: nodeID, - verified: true, - } - } - return ipAuths, nil -} - -// peerIPStatus assumes the caller holds [peersLock] -func (n *network) peerIPStatus(nodeID ids.NodeID, ip *ips.ClaimedIPPort) (*ips.ClaimedIPPort, bool, bool, bool) { - prevIP, previouslyTracked := n.peerIPs[nodeID] - _, connected := n.connectedPeers.GetByID(nodeID) - shouldUpdateOurIP := previouslyTracked && prevIP.Timestamp < ip.Timestamp - shouldDial := !previouslyTracked && !connected && n.wantsConnection(nodeID) - return prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial -} - // dial will spin up a new goroutine and attempt to establish a connection with // [nodeID] at [ip]. // @@ -1074,7 +829,11 @@ func (n *network) peerIPStatus(nodeID ids.NodeID, ip *ips.ClaimedIPPort) (*ips.C // If initiating a connection to [ip] fails, then dial will reattempt. However, // there is a randomized exponential backoff to avoid spamming connection // attempts. -func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { +func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { + n.peerConfig.Log.Verbo("attempting to dial node", + zap.Stringer("nodeID", nodeID), + zap.Stringer("ip", ip.ip), + ) go func() { n.metrics.numTracked.Inc() defer n.metrics.numTracked.Dec() @@ -1083,6 +842,9 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { timer := time.NewTimer(ip.getDelay()) select { + case <-n.onCloseCtx.Done(): + timer.Stop() + return case <-ip.onStopTracking: timer.Stop() return @@ -1090,13 +852,16 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { } n.peersLock.Lock() - if !n.wantsConnection(nodeID) { + // If we no longer desire a connect to nodeID, we should cleanup + // trackedIPs and this goroutine. This prevents a memory leak when + // the tracked nodeID leaves the validator set and is never able to + // be connected to. + if !n.ipTracker.WantsConnection(nodeID) { // Typically [n.trackedIPs[nodeID]] will already equal [ip], but // the reference to [ip] is refreshed to avoid any potential // race conditions before removing the entry. if ip, exists := n.trackedIPs[nodeID]; exists { ip.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } n.peersLock.Unlock() @@ -1128,11 +893,31 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { n.config.MaxReconnectDelay, ) - conn, err := n.dialer.Dial(ctx, ip.ip) + // If the network is configured to disallow private IPs and the + // provided IP is private, we skip all attempts to initiate a + // connection. + // + // Invariant: We perform this check inside of the looping goroutine + // because this goroutine must clean up the trackedIPs entry if + // nodeID leaves the validator set. This is why we continue the loop + // rather than returning even though we will never initiate an + // outbound connection with this IP. + if !n.config.AllowPrivateIPs && ip.ip.IP.IsPrivate() { + n.peerConfig.Log.Verbo("skipping connection dial", + zap.String("reason", "outbound connections to private IPs are prohibited"), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), + zap.Duration("delay", ip.delay), + ) + continue + } + + conn, err := n.dialer.Dial(n.onCloseCtx, ip.ip) if err != nil { n.peerConfig.Log.Verbo( "failed to reach peer, attempting again", - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), ) continue @@ -1140,14 +925,16 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { n.peerConfig.Log.Verbo("starting to upgrade connection", zap.String("direction", "outbound"), - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), ) err = n.upgrade(conn, n.clientUpgrader) if err != nil { n.peerConfig.Log.Verbo( "failed to upgrade, attempting again", - zap.Stringer("peerIP", ip.ip.IP), + zap.Stringer("nodeID", nodeID), + zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), ) continue @@ -1251,13 +1038,6 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { zap.Stringer("nodeID", nodeID), ) - if !n.gossipTracker.StartTrackingPeer(nodeID) { - n.peerConfig.Log.Error( - "started duplicate peer tracker", - zap.Stringer("nodeID", nodeID), - ) - } - // peer.Start requires there is only ever one peer instance running with the // same [peerConfig.InboundMsgThrottler]. This is guaranteed by the above // de-duplications for [connectingPeers] and [connectedPeers]. @@ -1306,7 +1086,6 @@ func (n *network) StartClose() { for nodeID, tracked := range n.trackedIPs { tracked.stopTracking() - delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } @@ -1327,18 +1106,18 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { return UptimeResult{}, errNotTracked } - validators, ok := n.config.Validators.Get(subnetID) - if !ok { - return UptimeResult{}, errSubnetNotExist - } - - myStake := validators.GetWeight(n.config.MyNodeID) + myStake := n.config.Validators.GetWeight(subnetID, n.config.MyNodeID) if myStake == 0 { return UptimeResult{}, errNotValidator } + totalWeightInt, err := n.config.Validators.TotalWeight(subnetID) + if err != nil { + return UptimeResult{}, fmt.Errorf("error while fetching weight for subnet %s: %w", subnetID, err) + } + var ( - totalWeight = float64(validators.Weight()) + totalWeight = float64(totalWeightInt) totalWeightedPercent = 100 * float64(myStake) rewardingStake = float64(myStake) ) @@ -1350,7 +1129,7 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { peer, _ := n.connectedPeers.GetByIndex(i) nodeID := peer.ID() - weight := validators.GetWeight(nodeID) + weight := n.config.Validators.GetWeight(subnetID, nodeID) if weight == 0 { // this is not a validator skip it. continue @@ -1372,16 +1151,19 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { } return UptimeResult{ - WeightedAveragePercentage: gomath.Abs(totalWeightedPercent / totalWeight), - RewardingStakePercentage: gomath.Abs(100 * rewardingStake / totalWeight), + WeightedAveragePercentage: math.Abs(totalWeightedPercent / totalWeight), + RewardingStakePercentage: math.Abs(100 * rewardingStake / totalWeight), }, nil } func (n *network) runTimers() { - gossipPeerlists := time.NewTicker(n.config.PeerListGossipFreq) + pushGossipPeerlists := time.NewTicker(n.config.PeerListGossipFreq) + pullGossipPeerlists := time.NewTicker(n.config.PeerListPullGossipFreq) + resetPeerListBloom := time.NewTicker(n.config.PeerListBloomResetFreq) updateUptimes := time.NewTicker(n.config.UptimeMetricFreq) defer func() { - gossipPeerlists.Stop() + pushGossipPeerlists.Stop() + resetPeerListBloom.Stop() updateUptimes.Stop() }() @@ -1389,8 +1171,18 @@ func (n *network) runTimers() { select { case <-n.onCloseCtx.Done(): return - case <-gossipPeerlists.C: - n.gossipPeerLists() + case <-pushGossipPeerlists.C: + n.pushGossipPeerLists() + case <-pullGossipPeerlists.C: + n.pullGossipPeerLists() + case <-resetPeerListBloom.C: + if err := n.ipTracker.ResetBloom(); err != nil { + n.peerConfig.Log.Error("failed to reset ip tracker bloom filter", + zap.Error(err), + ) + } else { + n.peerConfig.Log.Debug("reset ip tracker bloom filter") + } case <-updateUptimes.C: primaryUptime, err := n.NodeUptime(constants.PrimaryNetworkID) if err != nil { @@ -1417,8 +1209,8 @@ func (n *network) runTimers() { } } -// gossipPeerLists gossips validators to peers in the network -func (n *network) gossipPeerLists() { +// pushGossipPeerLists gossips validators to peers in the network +func (n *network) pushGossipPeerLists() { peers := n.samplePeers( constants.PrimaryNetworkID, int(n.config.PeerListValidatorGossipSize), @@ -1432,6 +1224,21 @@ func (n *network) gossipPeerLists() { } } +// pullGossipPeerLists requests validators from peers in the network +func (n *network) pullGossipPeerLists() { + peers := n.samplePeers( + constants.PrimaryNetworkID, + 1, // numValidatorsToSample + 0, // numNonValidatorsToSample + 0, // numPeersToSample + subnets.NoOpAllower, + ) + + for _, p := range peers { + p.StartSendGetPeerList() + } +} + func (n *network) getLastReceived() (time.Time, bool) { lastReceived := atomic.LoadInt64(&n.peerConfig.LastReceived) if lastReceived == 0 { diff --git a/avalanchego/network/network_test.go b/avalanchego/network/network_test.go index 63efee69..0f95d722 100644 --- a/avalanchego/network/network_test.go +++ b/avalanchego/network/network_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -6,13 +6,13 @@ package network import ( "context" "crypto" + "crypto/rsa" "net" "sync" "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -25,13 +25,16 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/version" ) @@ -51,6 +54,8 @@ var ( PeerListNonValidatorGossipSize: 100, PeerListPeersGossipSize: 100, PeerListGossipFreq: time.Second, + PeerListPullGossipFreq: time.Second, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, } defaultTimeoutConfig = TimeoutConfig{ PingPongTimeout: 30 * time.Second, @@ -112,7 +117,7 @@ var ( CompressionType: constants.DefaultNetworkCompressionType, - UptimeCalculator: uptime.NewManager(uptime.NewTestState()), + UptimeCalculator: uptime.NewManager(uptime.NewTestState(), &mockable.Clock{}), UptimeMetricFreq: 30 * time.Second, UptimeRequirement: .8, @@ -132,12 +137,13 @@ func init() { func newDefaultTargeter(t tracker.Tracker) tracker.Targeter { return tracker.NewTargeter( + logging.NoLog{}, &tracker.TargeterConfig{ VdrAlloc: 10, MaxNonVdrUsage: 10, MaxNonVdrNodeUsage: 10, }, - validators.NewSet(), + validators.NewManager(), t, ) } @@ -166,11 +172,15 @@ func newTestNetwork(t *testing.T, count int) (*testDialer, []*testListener, []id ip, listener := dialer.NewListener() nodeID, tlsCert, tlsConfig := getTLS(t, i) + blsKey, err := bls.NewSecretKey() + require.NoError(t, err) + config := defaultConfig config.TLSConfig = tlsConfig config.MyNodeID = nodeID config.MyIPPort = ip config.TLSKey = tlsCert.PrivateKey.(crypto.Signer) + config.BLSKey = blsKey listeners[i] = listener nodeIDs[i] = nodeID @@ -194,13 +204,13 @@ func newMessageCreator(t *testing.T) message.Creator { return mc } -func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler) ([]ids.NodeID, []Network, *sync.WaitGroup) { +func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler) ([]ids.NodeID, []*network, *sync.WaitGroup) { require := require.New(t) dialer, listeners, nodeIDs, configs := newTestNetwork(t, len(handlers)) var ( - networks = make([]Network, len(configs)) + networks = make([]*network, len(configs)) globalLock sync.Mutex numConnected int @@ -211,32 +221,16 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler msgCreator := newMessageCreator(t) registry := prometheus.NewRegistry() - g, err := peer.NewGossipTracker(registry, "foobar") - require.NoError(err) + beacons := validators.NewManager() + require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) - log := logging.NoLog{} - gossipTrackerCallback := peer.GossipTrackerCallback{ - Log: log, - GossipTracker: g, - } - - beacons := validators.NewSet() - err = beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1) - require.NoError(err) - - primaryVdrs := validators.NewSet() - primaryVdrs.RegisterCallbackListener(&gossipTrackerCallback) + vdrs := validators.NewManager() for _, nodeID := range nodeIDs { - err := primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.GenerateTestID(), 1)) } - vdrs := validators.NewManager() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - config := config - config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs @@ -245,7 +239,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler config, msgCreator, registry, - log, + logging.NoLog{}, listeners[i], dialer, &testHandler{ @@ -278,7 +272,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler }, ) require.NoError(err) - networks[i] = net + networks[i] = net.(*network) } wg := sync.WaitGroup{} @@ -292,8 +286,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler go func(net Network) { defer wg.Done() - err := net.Dispatch() - require.NoError(err) + require.NoError(net.Dispatch()) }(net) } @@ -320,13 +313,13 @@ func TestSend(t *testing.T) { t, []router.InboundHandler{ router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { - t.Fatal("unexpected message received") + require.FailNow("unexpected message received") }), router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { received <- msg }), router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { - t.Fatal("unexpected message received") + require.FailNow("unexpected message received") }), }, ) @@ -337,10 +330,9 @@ func TestSend(t *testing.T) { outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) require.NoError(err) - toSend := set.Set[ids.NodeID]{} - toSend.Add(nodeIDs[1]) + toSend := set.Of(nodeIDs[1]) sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, subnets.NoOpAllower) - require.EqualValues(toSend, sentTo) + require.Equal(toSend, sentTo) inboundGetMsg := <-received require.Equal(message.GetOp, inboundGetMsg.Op()) @@ -359,13 +351,13 @@ func TestSendAndGossipWithFilter(t *testing.T) { t, []router.InboundHandler{ router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { - t.Fatal("unexpected message received") + require.FailNow("unexpected message received") }), router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { received <- msg }), router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { - t.Fatal("unexpected message received") + require.FailNow("unexpected message received") }), }, ) @@ -376,9 +368,8 @@ func TestSendAndGossipWithFilter(t *testing.T) { outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) require.NoError(err) - toSend := set.NewSet[ids.NodeID](3) + toSend := set.Of(nodeIDs...) validNodeID := nodeIDs[1] - toSend.Add(nodeIDs...) sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, newNodeIDConnector(validNodeID)) require.Len(sentTo, 1) require.Contains(sentTo, validNodeID) @@ -405,22 +396,23 @@ func TestTrackVerifiesSignatures(t *testing.T) { _, networks, wg := newFullyConnectedTestNetwork(t, []router.InboundHandler{nil}) - network := networks[0].(*network) + network := networks[0] nodeID, tlsCert, _ := getTLS(t, 1) - err := validators.Add(network.config.Validators, constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1) - require.NoError(err) - - _, err = network.Track(ids.EmptyNodeID, []*ips.ClaimedIPPort{{ - Cert: tlsCert.Leaf, - IPPort: ips.IPPort{ - IP: net.IPv4(123, 132, 123, 123), - Port: 10000, - }, - Timestamp: 1000, - Signature: nil, - }}) + require.NoError(network.config.Validators.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) + + err := network.Track([]*ips.ClaimedIPPort{ + ips.NewClaimedIPPort( + staking.CertificateFromX509(tlsCert.Leaf), + ips.IPPort{ + IP: net.IPv4(123, 132, 123, 123), + Port: 10000, + }, + 1000, // timestamp + nil, // signature + ), + }) // The signature is wrong so this peer tracking info isn't useful. - require.Error(err) + require.ErrorIs(err, rsa.ErrVerification) network.peersLock.RLock() require.Empty(network.trackedIPs) @@ -431,3 +423,310 @@ func TestTrackVerifiesSignatures(t *testing.T) { } wg.Wait() } + +func TestTrackDoesNotDialPrivateIPs(t *testing.T) { + require := require.New(t) + + dialer, listeners, nodeIDs, configs := newTestNetwork(t, 2) + + networks := make([]Network, len(configs)) + for i, config := range configs { + msgCreator := newMessageCreator(t) + registry := prometheus.NewRegistry() + + beacons := validators.NewManager() + require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) + + vdrs := validators.NewManager() + for _, nodeID := range nodeIDs { + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.GenerateTestID(), 1)) + } + + config := config + + config.Beacons = beacons + config.Validators = vdrs + config.AllowPrivateIPs = false + + net, err := NewNetwork( + config, + msgCreator, + registry, + logging.NoLog{}, + listeners[i], + dialer, + &testHandler{ + InboundHandler: nil, + ConnectedF: func(ids.NodeID, *version.Application, ids.ID) { + require.FailNow("unexpectedly connected to a peer") + }, + DisconnectedF: nil, + }, + ) + require.NoError(err) + networks[i] = net + } + + wg := sync.WaitGroup{} + wg.Add(len(networks)) + for i, net := range networks { + if i != 0 { + config := configs[0] + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + } + + go func(net Network) { + defer wg.Done() + + require.NoError(net.Dispatch()) + }(net) + } + + network := networks[1].(*network) + require.Eventually( + func() bool { + network.peersLock.RLock() + defer network.peersLock.RUnlock() + + nodeID := nodeIDs[0] + require.Contains(network.trackedIPs, nodeID) + ip := network.trackedIPs[nodeID] + return ip.getDelay() != 0 + }, + 10*time.Second, + 50*time.Millisecond, + ) + + for _, net := range networks { + net.StartClose() + } + wg.Wait() +} + +func TestDialDeletesNonValidators(t *testing.T) { + require := require.New(t) + + dialer, listeners, nodeIDs, configs := newTestNetwork(t, 2) + + vdrs := validators.NewManager() + for _, nodeID := range nodeIDs { + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.GenerateTestID(), 1)) + } + + networks := make([]Network, len(configs)) + for i, config := range configs { + msgCreator := newMessageCreator(t) + registry := prometheus.NewRegistry() + + beacons := validators.NewManager() + require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) + + config := config + + config.Beacons = beacons + config.Validators = vdrs + config.AllowPrivateIPs = false + + net, err := NewNetwork( + config, + msgCreator, + registry, + logging.NoLog{}, + listeners[i], + dialer, + &testHandler{ + InboundHandler: nil, + ConnectedF: func(ids.NodeID, *version.Application, ids.ID) { + require.FailNow("unexpectedly connected to a peer") + }, + DisconnectedF: nil, + }, + ) + require.NoError(err) + networks[i] = net + } + + config := configs[0] + signer := peer.NewIPSigner(config.MyIPPort, config.TLSKey, config.BLSKey) + ip, err := signer.GetSignedIP() + require.NoError(err) + + wg := sync.WaitGroup{} + wg.Add(len(networks)) + for i, net := range networks { + if i != 0 { + err := net.Track([]*ips.ClaimedIPPort{ + ips.NewClaimedIPPort( + staking.CertificateFromX509(config.TLSConfig.Certificates[0].Leaf), + ip.IPPort, + ip.Timestamp, + ip.TLSSignature, + ), + }) + require.NoError(err) + } + + go func(net Network) { + defer wg.Done() + + require.NoError(net.Dispatch()) + }(net) + } + + // Give the dialer time to run one iteration. This is racy, but should ony + // be possible to flake as a false negative (test passes when it shouldn't). + time.Sleep(50 * time.Millisecond) + + network := networks[1].(*network) + require.NoError(vdrs.RemoveWeight(constants.PrimaryNetworkID, nodeIDs[0], 1)) + require.Eventually( + func() bool { + network.peersLock.RLock() + defer network.peersLock.RUnlock() + + nodeID := nodeIDs[0] + _, ok := network.trackedIPs[nodeID] + return !ok + }, + 10*time.Second, + 50*time.Millisecond, + ) + + for _, net := range networks { + net.StartClose() + } + wg.Wait() +} + +// Test that cancelling the context passed into dial +// causes dial to return immediately. +func TestDialContext(t *testing.T) { + _, networks, wg := newFullyConnectedTestNetwork(t, []router.InboundHandler{nil}) + + dialer := newTestDialer() + network := networks[0] + network.dialer = dialer + + var ( + neverDialedNodeID = ids.GenerateTestNodeID() + dialedNodeID = ids.GenerateTestNodeID() + + dynamicNeverDialedIP, neverDialedListener = dialer.NewListener() + dynamicDialedIP, dialedListener = dialer.NewListener() + + neverDialedIP = &trackedIP{ + ip: dynamicNeverDialedIP.IPPort(), + } + dialedIP = &trackedIP{ + ip: dynamicDialedIP.IPPort(), + } + ) + + network.ManuallyTrack(neverDialedNodeID, neverDialedIP.ip) + network.ManuallyTrack(dialedNodeID, dialedIP.ip) + + // Sanity check that when a non-cancelled context is given, + // we actually dial the peer. + network.dial(dialedNodeID, dialedIP) + + gotDialedIPConn := make(chan struct{}) + go func() { + _, _ = dialedListener.Accept() + close(gotDialedIPConn) + }() + <-gotDialedIPConn + + // Asset that when [n.onCloseCtx] is cancelled, dial returns immediately. + // That is, [neverDialedListener] doesn't accept a connection. + network.onCloseCtxCancel() + network.dial(neverDialedNodeID, neverDialedIP) + + gotNeverDialedIPConn := make(chan struct{}) + go func() { + _, _ = neverDialedListener.Accept() + close(gotNeverDialedIPConn) + }() + + select { + case <-gotNeverDialedIPConn: + require.FailNow(t, "unexpectedly connected to peer") + default: + } + + network.StartClose() + wg.Wait() +} + +func TestAllowConnectionAsAValidator(t *testing.T) { + require := require.New(t) + + dialer, listeners, nodeIDs, configs := newTestNetwork(t, 2) + + networks := make([]Network, len(configs)) + for i, config := range configs { + msgCreator := newMessageCreator(t) + registry := prometheus.NewRegistry() + + beacons := validators.NewManager() + require.NoError(beacons.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) + + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, nodeIDs[0], nil, ids.GenerateTestID(), 1)) + + config := config + + config.Beacons = beacons + config.Validators = vdrs + config.RequireValidatorToConnect = true + + net, err := NewNetwork( + config, + msgCreator, + registry, + logging.NoLog{}, + listeners[i], + dialer, + &testHandler{ + InboundHandler: nil, + ConnectedF: nil, + DisconnectedF: nil, + }, + ) + require.NoError(err) + networks[i] = net + } + + wg := sync.WaitGroup{} + wg.Add(len(networks)) + for i, net := range networks { + if i != 0 { + config := configs[0] + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + } + + go func(net Network) { + defer wg.Done() + + require.NoError(net.Dispatch()) + }(net) + } + + network := networks[1].(*network) + require.Eventually( + func() bool { + network.peersLock.RLock() + defer network.peersLock.RUnlock() + + nodeID := nodeIDs[0] + _, contains := network.connectedPeers.GetByID(nodeID) + return contains + }, + 10*time.Second, + 50*time.Millisecond, + ) + + for _, net := range networks { + net.StartClose() + } + wg.Wait() +} diff --git a/avalanchego/network/p2p/client.go b/avalanchego/network/p2p/client.go new file mode 100644 index 00000000..b506baf9 --- /dev/null +++ b/avalanchego/network/p2p/client.go @@ -0,0 +1,180 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + ErrRequestPending = errors.New("request pending") + ErrNoPeers = errors.New("no peers") +) + +// AppResponseCallback is called upon receiving an AppResponse for an AppRequest +// issued by Client. +// Callers should check [err] to see whether the AppRequest failed or not. +type AppResponseCallback func( + ctx context.Context, + nodeID ids.NodeID, + responseBytes []byte, + err error, +) + +// CrossChainAppResponseCallback is called upon receiving an +// CrossChainAppResponse for a CrossChainAppRequest issued by Client. +// Callers should check [err] to see whether the AppRequest failed or not. +type CrossChainAppResponseCallback func( + ctx context.Context, + chainID ids.ID, + responseBytes []byte, + err error, +) + +type Client struct { + handlerID uint64 + handlerIDStr string + handlerPrefix []byte + router *router + sender common.AppSender + options *clientOptions +} + +// AppRequestAny issues an AppRequest to an arbitrary node decided by Client. +// If a specific node needs to be requested, use AppRequest instead. +// See AppRequest for more docs. +func (c *Client) AppRequestAny( + ctx context.Context, + appRequestBytes []byte, + onResponse AppResponseCallback, +) error { + sampled := c.options.nodeSampler.Sample(ctx, 1) + if len(sampled) != 1 { + return ErrNoPeers + } + + nodeIDs := set.Of(sampled...) + return c.AppRequest(ctx, nodeIDs, appRequestBytes, onResponse) +} + +// AppRequest issues an arbitrary request to a node. +// [onResponse] is invoked upon an error or a response. +func (c *Client) AppRequest( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + appRequestBytes []byte, + onResponse AppResponseCallback, +) error { + c.router.lock.Lock() + defer c.router.lock.Unlock() + + appRequestBytes = PrefixMessage(c.handlerPrefix, appRequestBytes) + for nodeID := range nodeIDs { + requestID := c.router.requestID + if _, ok := c.router.pendingAppRequests[requestID]; ok { + return fmt.Errorf( + "failed to issue request with request id %d: %w", + requestID, + ErrRequestPending, + ) + } + + if err := c.sender.SendAppRequest( + ctx, + set.Of(nodeID), + requestID, + appRequestBytes, + ); err != nil { + return err + } + + c.router.pendingAppRequests[requestID] = pendingAppRequest{ + handlerID: c.handlerIDStr, + callback: onResponse, + } + c.router.requestID += 2 + } + + return nil +} + +// AppGossip sends a gossip message to a random set of peers. +func (c *Client) AppGossip( + ctx context.Context, + appGossipBytes []byte, +) error { + return c.sender.SendAppGossip( + ctx, + PrefixMessage(c.handlerPrefix, appGossipBytes), + ) +} + +// AppGossipSpecific sends a gossip message to a predetermined set of peers. +func (c *Client) AppGossipSpecific( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + appGossipBytes []byte, +) error { + return c.sender.SendAppGossipSpecific( + ctx, + nodeIDs, + PrefixMessage(c.handlerPrefix, appGossipBytes), + ) +} + +// CrossChainAppRequest sends a cross chain app request to another vm. +// [onResponse] is invoked upon an error or a response. +func (c *Client) CrossChainAppRequest( + ctx context.Context, + chainID ids.ID, + appRequestBytes []byte, + onResponse CrossChainAppResponseCallback, +) error { + c.router.lock.Lock() + defer c.router.lock.Unlock() + + requestID := c.router.requestID + if _, ok := c.router.pendingCrossChainAppRequests[requestID]; ok { + return fmt.Errorf( + "failed to issue request with request id %d: %w", + requestID, + ErrRequestPending, + ) + } + + if err := c.sender.SendCrossChainAppRequest( + ctx, + chainID, + requestID, + PrefixMessage(c.handlerPrefix, appRequestBytes), + ); err != nil { + return err + } + + c.router.pendingCrossChainAppRequests[requestID] = pendingCrossChainAppRequest{ + handlerID: c.handlerIDStr, + callback: onResponse, + } + c.router.requestID += 2 + + return nil +} + +// PrefixMessage prefixes the original message with the protocol identifier. +// +// Only gossip and request messages need to be prefixed. +// Response messages don't need to be prefixed because request ids are tracked +// which map to the expected response handler. +func PrefixMessage(prefix, msg []byte) []byte { + messageBytes := make([]byte, len(prefix)+len(msg)) + copy(messageBytes, prefix) + copy(messageBytes[len(prefix):], msg) + return messageBytes +} diff --git a/avalanchego/network/p2p/gossip/bloom.go b/avalanchego/network/p2p/gossip/bloom.go new file mode 100644 index 00000000..e1a2e4d5 --- /dev/null +++ b/avalanchego/network/p2p/gossip/bloom.go @@ -0,0 +1,131 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "crypto/rand" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bloom" +) + +// NewBloomFilter returns a new instance of a bloom filter with at least [minTargetElements] elements +// anticipated at any moment, and a false positive probability of [targetFalsePositiveProbability]. If the +// false positive probability exceeds [resetFalsePositiveProbability], the bloom filter will be reset. +// +// Invariant: The returned bloom filter is not safe to reset concurrently with +// other operations. However, it is otherwise safe to access concurrently. +func NewBloomFilter( + registerer prometheus.Registerer, + namespace string, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) (*BloomFilter, error) { + metrics, err := bloom.NewMetrics(namespace, registerer) + if err != nil { + return nil, err + } + filter := &BloomFilter{ + minTargetElements: minTargetElements, + targetFalsePositiveProbability: targetFalsePositiveProbability, + resetFalsePositiveProbability: resetFalsePositiveProbability, + + metrics: metrics, + } + err = resetBloomFilter( + filter, + minTargetElements, + targetFalsePositiveProbability, + resetFalsePositiveProbability, + ) + return filter, err +} + +type BloomFilter struct { + minTargetElements int + targetFalsePositiveProbability float64 + resetFalsePositiveProbability float64 + + metrics *bloom.Metrics + + maxCount int + bloom *bloom.Filter + // salt is provided to eventually unblock collisions in Bloom. It's possible + // that conflicting Gossipable items collide in the bloom filter, so a salt + // is generated to eventually resolve collisions. + salt ids.ID +} + +func (b *BloomFilter) Add(gossipable Gossipable) { + h := gossipable.GossipID() + bloom.Add(b.bloom, h[:], b.salt[:]) + b.metrics.Count.Inc() +} + +func (b *BloomFilter) Has(gossipable Gossipable) bool { + h := gossipable.GossipID() + return bloom.Contains(b.bloom, h[:], b.salt[:]) +} + +func (b *BloomFilter) Marshal() ([]byte, []byte) { + bloomBytes := b.bloom.Marshal() + // salt must be copied here to ensure the bytes aren't overwritten if salt + // is later modified. + salt := b.salt + return bloomBytes, salt[:] +} + +// ResetBloomFilterIfNeeded resets a bloom filter if it breaches [targetFalsePositiveProbability]. +// +// If [targetElements] exceeds [minTargetElements], the size of the bloom filter will grow to maintain +// the same [targetFalsePositiveProbability]. +// +// Returns true if the bloom filter was reset. +func ResetBloomFilterIfNeeded( + bloomFilter *BloomFilter, + targetElements int, +) (bool, error) { + if bloomFilter.bloom.Count() <= bloomFilter.maxCount { + return false, nil + } + + targetElements = max(bloomFilter.minTargetElements, targetElements) + err := resetBloomFilter( + bloomFilter, + targetElements, + bloomFilter.targetFalsePositiveProbability, + bloomFilter.resetFalsePositiveProbability, + ) + return err == nil, err +} + +func resetBloomFilter( + bloomFilter *BloomFilter, + targetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) error { + numHashes, numEntries := bloom.OptimalParameters( + targetElements, + targetFalsePositiveProbability, + ) + newBloom, err := bloom.New(numHashes, numEntries) + if err != nil { + return err + } + var newSalt ids.ID + if _, err := rand.Read(newSalt[:]); err != nil { + return err + } + + bloomFilter.maxCount = bloom.EstimateCount(numHashes, numEntries, resetFalsePositiveProbability) + bloomFilter.bloom = newBloom + bloomFilter.salt = newSalt + + bloomFilter.metrics.Reset(newBloom, bloomFilter.maxCount) + return nil +} diff --git a/avalanchego/network/p2p/gossip/bloom_test.go b/avalanchego/network/p2p/gossip/bloom_test.go new file mode 100644 index 00000000..a6df82f6 --- /dev/null +++ b/avalanchego/network/p2p/gossip/bloom_test.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "slices" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestBloomFilterRefresh(t *testing.T) { + tests := []struct { + name string + minTargetElements int + targetFalsePositiveProbability float64 + resetFalsePositiveProbability float64 + resetCount uint64 + add []*testTx + expected []*testTx + }{ + { + name: "no refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 1, + resetCount: 0, // maxCount = 9223372036854775807 + add: []*testTx{ + {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, + }, + expected: []*testTx{ + {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, + }, + }, + { + name: "refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 + resetCount: 1, + add: []*testTx{ + {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, + }, + expected: []*testTx{ + {id: ids.ID{2}}, + }, + }, + { + name: "multiple refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 + resetCount: 2, + add: []*testTx{ + {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, + {id: ids.ID{3}}, + {id: ids.ID{4}}, + }, + expected: []*testTx{ + {id: ids.ID{4}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", tt.minTargetElements, tt.targetFalsePositiveProbability, tt.resetFalsePositiveProbability) + require.NoError(err) + + var resetCount uint64 + for _, item := range tt.add { + bloomBytes, saltBytes := bloom.Marshal() + initialBloomBytes := slices.Clone(bloomBytes) + initialSaltBytes := slices.Clone(saltBytes) + + reset, err := ResetBloomFilterIfNeeded(bloom, len(tt.add)) + require.NoError(err) + if reset { + resetCount++ + } + bloom.Add(item) + + require.Equal(initialBloomBytes, bloomBytes) + require.Equal(initialSaltBytes, saltBytes) + } + + require.Equal(tt.resetCount, resetCount) + require.Equal(float64(tt.resetCount+1), testutil.ToFloat64(bloom.metrics.ResetCount)) + for _, expected := range tt.expected { + require.True(bloom.Has(expected)) + } + }) + } +} diff --git a/avalanchego/network/p2p/gossip/gossip.go b/avalanchego/network/p2p/gossip/gossip.go new file mode 100644 index 00000000..3b910216 --- /dev/null +++ b/avalanchego/network/p2p/gossip/gossip.go @@ -0,0 +1,373 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/buffer" + "github.com/ava-labs/avalanchego/utils/logging" +) + +const ( + typeLabel = "type" + pushType = "push" + pullType = "pull" +) + +var ( + _ Gossiper = (*ValidatorGossiper)(nil) + _ Gossiper = (*PullGossiper[*testTx])(nil) + _ Gossiper = (*NoOpGossiper)(nil) + _ Gossiper = (*TestGossiper)(nil) + + _ Accumulator[*testTx] = (*PushGossiper[*testTx])(nil) + _ Accumulator[*testTx] = (*NoOpAccumulator[*testTx])(nil) + _ Accumulator[*testTx] = (*TestAccumulator[*testTx])(nil) + + metricLabels = []string{typeLabel} + pushLabels = prometheus.Labels{ + typeLabel: pushType, + } + pullLabels = prometheus.Labels{ + typeLabel: pullType, + } +) + +// Gossiper gossips Gossipables to other nodes +type Gossiper interface { + // Gossip runs a cycle of gossip. Returns an error if we failed to gossip. + Gossip(ctx context.Context) error +} + +// Accumulator allows a caller to accumulate gossipables to be gossiped +type Accumulator[T Gossipable] interface { + Gossiper + // Add queues gossipables to be gossiped + Add(gossipables ...T) +} + +// ValidatorGossiper only calls [Gossip] if the given node is a validator +type ValidatorGossiper struct { + Gossiper + + NodeID ids.NodeID + Validators p2p.ValidatorSet +} + +// Metrics that are tracked across a gossip protocol. A given protocol should +// only use a single instance of Metrics. +type Metrics struct { + sentCount *prometheus.CounterVec + sentBytes *prometheus.CounterVec + receivedCount *prometheus.CounterVec + receivedBytes *prometheus.CounterVec +} + +// NewMetrics returns a common set of metrics +func NewMetrics( + metrics prometheus.Registerer, + namespace string, +) (Metrics, error) { + m := Metrics{ + sentCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_sent_count", + Help: "amount of gossip sent (n)", + }, metricLabels), + sentBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_sent_bytes", + Help: "amount of gossip sent (bytes)", + }, metricLabels), + receivedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_received_count", + Help: "amount of gossip received (n)", + }, metricLabels), + receivedBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_received_bytes", + Help: "amount of gossip received (bytes)", + }, metricLabels), + } + err := utils.Err( + metrics.Register(m.sentCount), + metrics.Register(m.sentBytes), + metrics.Register(m.receivedCount), + metrics.Register(m.receivedBytes), + ) + return m, err +} + +func (v ValidatorGossiper) Gossip(ctx context.Context) error { + if !v.Validators.Has(ctx, v.NodeID) { + return nil + } + + return v.Gossiper.Gossip(ctx) +} + +func NewPullGossiper[T Gossipable]( + log logging.Logger, + marshaller Marshaller[T], + set Set[T], + client *p2p.Client, + metrics Metrics, + pollSize int, +) *PullGossiper[T] { + return &PullGossiper[T]{ + log: log, + marshaller: marshaller, + set: set, + client: client, + metrics: metrics, + pollSize: pollSize, + } +} + +type PullGossiper[T Gossipable] struct { + log logging.Logger + marshaller Marshaller[T] + set Set[T] + client *p2p.Client + metrics Metrics + pollSize int +} + +func (p *PullGossiper[_]) Gossip(ctx context.Context) error { + msgBytes, err := MarshalAppRequest(p.set.GetFilter()) + if err != nil { + return err + } + + for i := 0; i < p.pollSize; i++ { + err := p.client.AppRequestAny(ctx, msgBytes, p.handleResponse) + if err != nil && !errors.Is(err, p2p.ErrNoPeers) { + return err + } + } + + return nil +} + +func (p *PullGossiper[_]) handleResponse( + _ context.Context, + nodeID ids.NodeID, + responseBytes []byte, + err error, +) { + if err != nil { + p.log.Debug( + "failed gossip request", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + return + } + + gossip, err := ParseAppResponse(responseBytes) + if err != nil { + p.log.Debug("failed to unmarshal gossip response", zap.Error(err)) + return + } + + receivedBytes := 0 + for _, bytes := range gossip { + receivedBytes += len(bytes) + + gossipable, err := p.marshaller.UnmarshalGossip(bytes) + if err != nil { + p.log.Debug( + "failed to unmarshal gossip", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + continue + } + + hash := gossipable.GossipID() + p.log.Debug( + "received gossip", + zap.Stringer("nodeID", nodeID), + zap.Stringer("id", hash), + ) + if err := p.set.Add(gossipable); err != nil { + p.log.Debug( + "failed to add gossip to the known set", + zap.Stringer("nodeID", nodeID), + zap.Stringer("id", hash), + zap.Error(err), + ) + continue + } + } + + receivedCountMetric, err := p.metrics.receivedCount.GetMetricWith(pullLabels) + if err != nil { + p.log.Error("failed to get received count metric", zap.Error(err)) + return + } + + receivedBytesMetric, err := p.metrics.receivedBytes.GetMetricWith(pullLabels) + if err != nil { + p.log.Error("failed to get received bytes metric", zap.Error(err)) + return + } + + receivedCountMetric.Add(float64(len(gossip))) + receivedBytesMetric.Add(float64(receivedBytes)) +} + +// NewPushGossiper returns an instance of PushGossiper +func NewPushGossiper[T Gossipable](marshaller Marshaller[T], client *p2p.Client, metrics Metrics, targetGossipSize int) *PushGossiper[T] { + return &PushGossiper[T]{ + marshaller: marshaller, + client: client, + metrics: metrics, + targetGossipSize: targetGossipSize, + pending: buffer.NewUnboundedDeque[T](0), + } +} + +// PushGossiper broadcasts gossip to peers randomly in the network +type PushGossiper[T Gossipable] struct { + marshaller Marshaller[T] + client *p2p.Client + metrics Metrics + targetGossipSize int + + lock sync.Mutex + pending buffer.Deque[T] +} + +// Gossip flushes any queued gossipables +func (p *PushGossiper[T]) Gossip(ctx context.Context) error { + p.lock.Lock() + defer p.lock.Unlock() + + if p.pending.Len() == 0 { + return nil + } + + sentBytes := 0 + gossip := make([][]byte, 0, p.pending.Len()) + for sentBytes < p.targetGossipSize { + gossipable, ok := p.pending.PeekLeft() + if !ok { + break + } + + bytes, err := p.marshaller.MarshalGossip(gossipable) + if err != nil { + // remove this item so we don't get stuck in a loop + _, _ = p.pending.PopLeft() + return err + } + + gossip = append(gossip, bytes) + sentBytes += len(bytes) + p.pending.PopLeft() + } + + msgBytes, err := MarshalAppGossip(gossip) + if err != nil { + return err + } + + sentCountMetric, err := p.metrics.sentCount.GetMetricWith(pushLabels) + if err != nil { + return fmt.Errorf("failed to get sent count metric: %w", err) + } + + sentBytesMetric, err := p.metrics.sentBytes.GetMetricWith(pushLabels) + if err != nil { + return fmt.Errorf("failed to get sent bytes metric: %w", err) + } + + sentCountMetric.Add(float64(len(gossip))) + sentBytesMetric.Add(float64(sentBytes)) + + return p.client.AppGossip(ctx, msgBytes) +} + +func (p *PushGossiper[T]) Add(gossipables ...T) { + p.lock.Lock() + defer p.lock.Unlock() + + for _, gossipable := range gossipables { + p.pending.PushRight(gossipable) + } +} + +// Every calls [Gossip] every [frequency] amount of time. +func Every(ctx context.Context, log logging.Logger, gossiper Gossiper, frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := gossiper.Gossip(ctx); err != nil { + log.Warn("failed to gossip", zap.Error(err)) + } + case <-ctx.Done(): + log.Debug("shutting down gossip") + return + } + } +} + +type NoOpGossiper struct{} + +func (NoOpGossiper) Gossip(context.Context) error { + return nil +} + +type NoOpAccumulator[T Gossipable] struct{} + +func (NoOpAccumulator[_]) Gossip(context.Context) error { + return nil +} + +func (NoOpAccumulator[T]) Add(...T) {} + +type TestGossiper struct { + GossipF func(ctx context.Context) error +} + +func (t *TestGossiper) Gossip(ctx context.Context) error { + return t.GossipF(ctx) +} + +type TestAccumulator[T Gossipable] struct { + GossipF func(ctx context.Context) error + AddF func(...T) +} + +func (t TestAccumulator[T]) Gossip(ctx context.Context) error { + if t.GossipF == nil { + return nil + } + + return t.GossipF(ctx) +} + +func (t TestAccumulator[T]) Add(gossipables ...T) { + if t.AddF == nil { + return + } + + t.AddF(gossipables...) +} diff --git a/avalanchego/network/p2p/gossip/gossip_test.go b/avalanchego/network/p2p/gossip/gossip_test.go new file mode 100644 index 00000000..154b9131 --- /dev/null +++ b/avalanchego/network/p2p/gossip/gossip_test.go @@ -0,0 +1,465 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestGossiperShutdown(*testing.T) { + gossiper := NewPullGossiper[*testTx]( + logging.NoLog{}, + nil, + nil, + nil, + Metrics{}, + 0, + ) + ctx, cancel := context.WithCancel(context.Background()) + + wg := &sync.WaitGroup{} + wg.Add(1) + + go func() { + Every(ctx, logging.NoLog{}, gossiper, time.Second) + wg.Done() + }() + + cancel() + wg.Wait() +} + +func TestGossiperGossip(t *testing.T) { + tests := []struct { + name string + targetResponseSize int + requester []*testTx // what we have + responder []*testTx // what the peer we're requesting gossip from has + expectedPossibleValues []*testTx // possible values we can have + expectedLen int + }{ + { + name: "no gossip - no one knows anything", + }, + { + name: "no gossip - requester knows more than responder", + targetResponseSize: 1024, + requester: []*testTx{{id: ids.ID{0}}}, + expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, + expectedLen: 1, + }, + { + name: "no gossip - requester knows everything responder knows", + targetResponseSize: 1024, + requester: []*testTx{{id: ids.ID{0}}}, + responder: []*testTx{{id: ids.ID{0}}}, + expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, + expectedLen: 1, + }, + { + name: "gossip - requester knows nothing", + targetResponseSize: 1024, + responder: []*testTx{{id: ids.ID{0}}}, + expectedPossibleValues: []*testTx{{id: ids.ID{0}}}, + expectedLen: 1, + }, + { + name: "gossip - requester knows less than responder", + targetResponseSize: 1024, + requester: []*testTx{{id: ids.ID{0}}}, + responder: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}}, + expectedPossibleValues: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}}, + expectedLen: 2, + }, + { + name: "gossip - target response size exceeded", + targetResponseSize: 32, + responder: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}, {id: ids.ID{2}}}, + expectedPossibleValues: []*testTx{{id: ids.ID{0}}, {id: ids.ID{1}}, {id: ids.ID{2}}}, + expectedLen: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + responseSender := &common.FakeSender{ + SentAppResponse: make(chan []byte, 1), + } + responseNetwork, err := p2p.NewNetwork(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") + require.NoError(err) + + responseBloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) + require.NoError(err) + responseSet := &testSet{ + txs: make(map[ids.ID]*testTx), + bloom: responseBloom, + } + for _, item := range tt.responder { + require.NoError(responseSet.Add(item)) + } + + metrics, err := NewMetrics(prometheus.NewRegistry(), "") + require.NoError(err) + marshaller := testMarshaller{} + handler := NewHandler[*testTx]( + logging.NoLog{}, + marshaller, + NoOpAccumulator[*testTx]{}, + responseSet, + metrics, + tt.targetResponseSize, + ) + require.NoError(err) + require.NoError(responseNetwork.AddHandler(0x0, handler)) + + requestSender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + + requestNetwork, err := p2p.NewNetwork(logging.NoLog{}, requestSender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(requestNetwork.Connected(context.Background(), ids.EmptyNodeID, nil)) + + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) + require.NoError(err) + requestSet := &testSet{ + txs: make(map[ids.ID]*testTx), + bloom: bloom, + } + for _, item := range tt.requester { + require.NoError(requestSet.Add(item)) + } + + requestClient := requestNetwork.NewClient(0x0) + + require.NoError(err) + gossiper := NewPullGossiper[*testTx]( + logging.NoLog{}, + marshaller, + requestSet, + requestClient, + metrics, + 1, + ) + require.NoError(err) + received := set.Set[*testTx]{} + requestSet.onAdd = func(tx *testTx) { + received.Add(tx) + } + + require.NoError(gossiper.Gossip(ctx)) + require.NoError(responseNetwork.AppRequest(ctx, ids.EmptyNodeID, 1, time.Time{}, <-requestSender.SentAppRequest)) + require.NoError(requestNetwork.AppResponse(ctx, ids.EmptyNodeID, 1, <-responseSender.SentAppResponse)) + + require.Len(requestSet.txs, tt.expectedLen) + require.Subset(tt.expectedPossibleValues, maps.Values(requestSet.txs)) + + // we should not receive anything that we already had before we + // requested the gossip + for _, tx := range tt.requester { + require.NotContains(received, tx) + } + }) + } +} + +func TestEvery(*testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + calls := 0 + gossiper := &TestGossiper{ + GossipF: func(context.Context) error { + if calls >= 10 { + cancel() + return nil + } + + calls++ + return nil + }, + } + + go Every(ctx, logging.NoLog{}, gossiper, time.Millisecond) + <-ctx.Done() +} + +func TestValidatorGossiper(t *testing.T) { + require := require.New(t) + + nodeID := ids.GenerateTestNodeID() + + validators := testValidatorSet{ + validators: set.Of(nodeID), + } + + calls := 0 + gossiper := ValidatorGossiper{ + Gossiper: &TestGossiper{ + GossipF: func(context.Context) error { + calls++ + return nil + }, + }, + NodeID: nodeID, + Validators: validators, + } + + // we are a validator, so we should request gossip + require.NoError(gossiper.Gossip(context.Background())) + require.Equal(1, calls) + + // we are not a validator, so we should not request gossip + validators.validators = set.Set[ids.NodeID]{} + require.NoError(gossiper.Gossip(context.Background())) + require.Equal(2, calls) +} + +// Tests that the outgoing gossip is equivalent to what was accumulated +func TestPushGossiper(t *testing.T) { + tests := []struct { + name string + cycles [][]*testTx + }{ + { + name: "single cycle", + cycles: [][]*testTx{ + { + &testTx{ + id: ids.ID{0}, + }, + &testTx{ + id: ids.ID{1}, + }, + &testTx{ + id: ids.ID{2}, + }, + }, + }, + }, + { + name: "multiple cycles", + cycles: [][]*testTx{ + { + &testTx{ + id: ids.ID{0}, + }, + }, + { + &testTx{ + id: ids.ID{1}, + }, + &testTx{ + id: ids.ID{2}, + }, + }, + { + &testTx{ + id: ids.ID{3}, + }, + &testTx{ + id: ids.ID{4}, + }, + &testTx{ + id: ids.ID{5}, + }, + }, + { + &testTx{ + id: ids.ID{6}, + }, + &testTx{ + id: ids.ID{7}, + }, + &testTx{ + id: ids.ID{8}, + }, + &testTx{ + id: ids.ID{9}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + network, err := p2p.NewNetwork( + logging.NoLog{}, + sender, + prometheus.NewRegistry(), + "", + ) + require.NoError(err) + client := network.NewClient(0) + metrics, err := NewMetrics(prometheus.NewRegistry(), "") + require.NoError(err) + marshaller := testMarshaller{} + gossiper := NewPushGossiper[*testTx]( + marshaller, + client, + metrics, + units.MiB, + ) + + for _, gossipables := range tt.cycles { + gossiper.Add(gossipables...) + require.NoError(gossiper.Gossip(ctx)) + + want := &sdk.PushGossip{ + Gossip: make([][]byte, 0, len(tt.cycles)), + } + + for _, gossipable := range gossipables { + bytes, err := marshaller.MarshalGossip(gossipable) + require.NoError(err) + + want.Gossip = append(want.Gossip, bytes) + } + + // remove the handler prefix + sentMsg := <-sender.SentAppGossip + got := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(sentMsg[1:], got)) + + require.Equal(want.Gossip, got.Gossip) + } + }) + } +} + +// Tests that gossip to a peer should forward the gossip if it was not +// previously known +func TestPushGossipE2E(t *testing.T) { + t.SkipNow() + + require := require.New(t) + + // tx known by both the sender and the receiver which should not be + // forwarded + knownTx := &testTx{id: ids.GenerateTestID()} + + log := logging.NoLog{} + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 100, 0.01, 0.05) + require.NoError(err) + set := &testSet{ + txs: make(map[ids.ID]*testTx), + bloom: bloom, + } + require.NoError(set.Add(knownTx)) + + forwarder := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + forwarderNetwork, err := p2p.NewNetwork(log, forwarder, prometheus.NewRegistry(), "") + require.NoError(err) + handlerID := uint64(123) + client := forwarderNetwork.NewClient(handlerID) + + metrics, err := NewMetrics(prometheus.NewRegistry(), "") + require.NoError(err) + marshaller := testMarshaller{} + forwarderGossiper := NewPushGossiper[*testTx]( + marshaller, + client, + metrics, + units.MiB, + ) + + handler := NewHandler[*testTx]( + log, + marshaller, + forwarderGossiper, + set, + metrics, + 0, + ) + require.NoError(err) + require.NoError(forwarderNetwork.AddHandler(handlerID, handler)) + + issuer := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + issuerNetwork, err := p2p.NewNetwork(log, issuer, prometheus.NewRegistry(), "") + require.NoError(err) + issuerClient := issuerNetwork.NewClient(handlerID) + require.NoError(err) + issuerGossiper := NewPushGossiper[*testTx]( + marshaller, + issuerClient, + metrics, + units.MiB, + ) + + want := []*testTx{ + {id: ids.GenerateTestID()}, + {id: ids.GenerateTestID()}, + {id: ids.GenerateTestID()}, + } + + // gossip both some unseen txs and one the receiver already knows about + var gossiped []*testTx + gossiped = append(gossiped, want...) + gossiped = append(gossiped, knownTx) + + issuerGossiper.Add(gossiped...) + addedToSet := make([]*testTx, 0, len(want)) + set.onAdd = func(tx *testTx) { + addedToSet = append(addedToSet, tx) + } + + ctx := context.Background() + require.NoError(issuerGossiper.Gossip(ctx)) + + // make sure that we only add new txs someone gossips to us + require.NoError(forwarderNetwork.AppGossip(ctx, ids.EmptyNodeID, <-issuer.SentAppGossip)) + require.Equal(want, addedToSet) + + // make sure that we only forward txs we have not already seen before + forwardedBytes := <-forwarder.SentAppGossip + forwardedMsg := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(forwardedBytes[1:], forwardedMsg)) + require.Len(forwardedMsg.Gossip, len(want)) + + gotForwarded := make([]*testTx, 0, len(addedToSet)) + + for _, bytes := range forwardedMsg.Gossip { + tx, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + gotForwarded = append(gotForwarded, tx) + } + + require.Equal(want, gotForwarded) +} + +type testValidatorSet struct { + validators set.Set[ids.NodeID] +} + +func (t testValidatorSet) Has(_ context.Context, nodeID ids.NodeID) bool { + return t.validators.Contains(nodeID) +} diff --git a/avalanchego/network/p2p/gossip/gossipable.go b/avalanchego/network/p2p/gossip/gossipable.go new file mode 100644 index 00000000..238c62b4 --- /dev/null +++ b/avalanchego/network/p2p/gossip/gossipable.go @@ -0,0 +1,29 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import "github.com/ava-labs/avalanchego/ids" + +// Gossipable is an item that can be gossiped across the network +type Gossipable interface { + GossipID() ids.ID +} + +// Marshaller handles parsing logic for a concrete Gossipable type +type Marshaller[T Gossipable] interface { + MarshalGossip(T) ([]byte, error) + UnmarshalGossip([]byte) (T, error) +} + +// Set holds a set of known Gossipable items +type Set[T Gossipable] interface { + // Add adds a Gossipable to the set. Returns an error if gossipable was not + // added. + Add(gossipable T) error + // Iterate iterates over elements until [f] returns false + Iterate(f func(gossipable T) bool) + // GetFilter returns the byte representation of bloom filter and its + // corresponding salt. + GetFilter() (bloom []byte, salt []byte) +} diff --git a/avalanchego/network/p2p/gossip/handler.go b/avalanchego/network/p2p/gossip/handler.go new file mode 100644 index 00000000..4df99bd1 --- /dev/null +++ b/avalanchego/network/p2p/gossip/handler.go @@ -0,0 +1,143 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var _ p2p.Handler = (*Handler[*testTx])(nil) + +func NewHandler[T Gossipable]( + log logging.Logger, + marshaller Marshaller[T], + accumulator Accumulator[T], + set Set[T], + metrics Metrics, + targetResponseSize int, +) *Handler[T] { + return &Handler[T]{ + Handler: p2p.NoOpHandler{}, + log: log, + marshaller: marshaller, + accumulator: accumulator, + set: set, + metrics: metrics, + targetResponseSize: targetResponseSize, + } +} + +type Handler[T Gossipable] struct { + p2p.Handler + marshaller Marshaller[T] + accumulator Accumulator[T] + log logging.Logger + set Set[T] + metrics Metrics + targetResponseSize int +} + +func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, error) { + filter, salt, err := ParseAppRequest(requestBytes) + if err != nil { + return nil, err + } + + responseSize := 0 + gossipBytes := make([][]byte, 0) + h.set.Iterate(func(gossipable T) bool { + gossipID := gossipable.GossipID() + + // filter out what the requesting peer already knows about + if bloom.Contains(filter, gossipID[:], salt[:]) { + return true + } + + var bytes []byte + bytes, err = h.marshaller.MarshalGossip(gossipable) + if err != nil { + return false + } + + // check that this doesn't exceed our maximum configured target response + // size + gossipBytes = append(gossipBytes, bytes) + responseSize += len(bytes) + + return responseSize <= h.targetResponseSize + }) + + if err != nil { + return nil, err + } + + sentCountMetric, err := h.metrics.sentCount.GetMetricWith(pullLabels) + if err != nil { + return nil, fmt.Errorf("failed to get sent count metric: %w", err) + } + + sentBytesMetric, err := h.metrics.sentBytes.GetMetricWith(pullLabels) + if err != nil { + return nil, fmt.Errorf("failed to get sent bytes metric: %w", err) + } + + sentCountMetric.Add(float64(len(gossipBytes))) + sentBytesMetric.Add(float64(responseSize)) + + return MarshalAppResponse(gossipBytes) +} + +func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) { + gossip, err := ParseAppGossip(gossipBytes) + if err != nil { + h.log.Debug("failed to unmarshal gossip", zap.Error(err)) + return + } + + receivedBytes := 0 + for _, bytes := range gossip { + receivedBytes += len(bytes) + gossipable, err := h.marshaller.UnmarshalGossip(bytes) + if err != nil { + h.log.Debug("failed to unmarshal gossip", + zap.Stringer("nodeID", nodeID), + zap.Error(err), + ) + continue + } + + if err := h.set.Add(gossipable); err != nil { + h.log.Debug( + "failed to add gossip to the known set", + zap.Stringer("nodeID", nodeID), + zap.Stringer("id", gossipable.GossipID()), + zap.Error(err), + ) + } + } + + receivedCountMetric, err := h.metrics.receivedCount.GetMetricWith(pushLabels) + if err != nil { + h.log.Error("failed to get received count metric", zap.Error(err)) + return + } + + receivedBytesMetric, err := h.metrics.receivedBytes.GetMetricWith(pushLabels) + if err != nil { + h.log.Error("failed to get received bytes metric", zap.Error(err)) + return + } + + receivedCountMetric.Add(float64(len(gossip))) + receivedBytesMetric.Add(float64(receivedBytes)) +} diff --git a/avalanchego/network/p2p/gossip/message.go b/avalanchego/network/p2p/gossip/message.go new file mode 100644 index 00000000..47e6784e --- /dev/null +++ b/avalanchego/network/p2p/gossip/message.go @@ -0,0 +1,59 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils/bloom" +) + +func MarshalAppRequest(filter, salt []byte) ([]byte, error) { + request := &sdk.PullGossipRequest{ + Filter: filter, + Salt: salt, + } + return proto.Marshal(request) +} + +func ParseAppRequest(bytes []byte) (*bloom.ReadFilter, ids.ID, error) { + request := &sdk.PullGossipRequest{} + if err := proto.Unmarshal(bytes, request); err != nil { + return nil, ids.Empty, err + } + + salt, err := ids.ToID(request.Salt) + if err != nil { + return nil, ids.Empty, err + } + + filter, err := bloom.Parse(request.Filter) + return filter, salt, err +} + +func MarshalAppResponse(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PullGossipResponse{ + Gossip: gossip, + }) +} + +func ParseAppResponse(bytes []byte) ([][]byte, error) { + response := &sdk.PullGossipResponse{} + err := proto.Unmarshal(bytes, response) + return response.Gossip, err +} + +func MarshalAppGossip(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PushGossip{ + Gossip: gossip, + }) +} + +func ParseAppGossip(bytes []byte) ([][]byte, error) { + msg := &sdk.PushGossip{} + err := proto.Unmarshal(bytes, msg) + return msg.Gossip, err +} diff --git a/avalanchego/network/p2p/gossip/test_gossip.go b/avalanchego/network/p2p/gossip/test_gossip.go new file mode 100644 index 00000000..03098399 --- /dev/null +++ b/avalanchego/network/p2p/gossip/test_gossip.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +var ( + _ Gossipable = (*testTx)(nil) + _ Set[*testTx] = (*testSet)(nil) + _ Marshaller[*testTx] = (*testMarshaller)(nil) +) + +type testTx struct { + id ids.ID +} + +func (t *testTx) GossipID() ids.ID { + return t.id +} + +type testMarshaller struct{} + +func (testMarshaller) MarshalGossip(tx *testTx) ([]byte, error) { + return tx.id[:], nil +} + +func (testMarshaller) UnmarshalGossip(bytes []byte) (*testTx, error) { + id, err := ids.ToID(bytes) + return &testTx{ + id: id, + }, err +} + +type testSet struct { + txs map[ids.ID]*testTx + bloom *BloomFilter + onAdd func(tx *testTx) +} + +func (t *testSet) Add(gossipable *testTx) error { + if _, ok := t.txs[gossipable.id]; ok { + return fmt.Errorf("%s already present", gossipable.id) + } + + t.txs[gossipable.id] = gossipable + t.bloom.Add(gossipable) + if t.onAdd != nil { + t.onAdd(gossipable) + } + + return nil +} + +func (t *testSet) Iterate(f func(gossipable *testTx) bool) { + for _, tx := range t.txs { + if !f(tx) { + return + } + } +} + +func (t *testSet) GetFilter() ([]byte, []byte) { + return t.bloom.Marshal() +} diff --git a/avalanchego/network/p2p/handler.go b/avalanchego/network/p2p/handler.go new file mode 100644 index 00000000..3ff4de29 --- /dev/null +++ b/avalanchego/network/p2p/handler.go @@ -0,0 +1,184 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "errors" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var ( + ErrNotValidator = errors.New("not a validator") + + _ Handler = (*NoOpHandler)(nil) + _ Handler = (*TestHandler)(nil) + _ Handler = (*ValidatorHandler)(nil) +) + +// Handler is the server-side logic for virtual machine application protocols. +type Handler interface { + // AppGossip is called when handling an AppGossip message. + AppGossip( + ctx context.Context, + nodeID ids.NodeID, + gossipBytes []byte, + ) + // AppRequest is called when handling an AppRequest message. + // Returns the bytes for the response corresponding to [requestBytes] + AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, + ) ([]byte, error) + // CrossChainAppRequest is called when handling a CrossChainAppRequest + // message. + // Returns the bytes for the response corresponding to [requestBytes] + CrossChainAppRequest( + ctx context.Context, + chainID ids.ID, + deadline time.Time, + requestBytes []byte, + ) ([]byte, error) +} + +// NoOpHandler drops all messages +type NoOpHandler struct{} + +func (NoOpHandler) AppGossip(context.Context, ids.NodeID, []byte) {} + +func (NoOpHandler) AppRequest(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + +func (NoOpHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + +func NewValidatorHandler( + handler Handler, + validatorSet ValidatorSet, + log logging.Logger, +) *ValidatorHandler { + return &ValidatorHandler{ + handler: handler, + validatorSet: validatorSet, + log: log, + } +} + +// ValidatorHandler drops messages from non-validators +type ValidatorHandler struct { + handler Handler + validatorSet ValidatorSet + log logging.Logger +} + +func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if !v.validatorSet.Has(ctx, nodeID) { + v.log.Debug( + "dropping message", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "not a validator"), + ) + return + } + + v.handler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (v ValidatorHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if !v.validatorSet.Has(ctx, nodeID) { + return nil, ErrNotValidator + } + + return v.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +func (v ValidatorHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return v.handler.CrossChainAppRequest(ctx, chainID, deadline, requestBytes) +} + +// responder automatically sends the response for a given request +type responder struct { + Handler + handlerID uint64 + log logging.Logger + sender common.AppSender +} + +// AppRequest calls the underlying handler and sends back the response to nodeID +func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + appResponse, err := r.Handler.AppRequest(ctx, nodeID, deadline, request) + if err != nil { + r.log.Debug("failed to handle message", + zap.Stringer("messageOp", message.AppRequestOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Time("deadline", deadline), + zap.Uint64("handlerID", r.handlerID), + zap.Binary("message", request), + ) + return nil + } + + return r.sender.SendAppResponse(ctx, nodeID, requestID, appResponse) +} + +// CrossChainAppRequest calls the underlying handler and sends back the response +// to chainID +func (r *responder) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + appResponse, err := r.Handler.CrossChainAppRequest(ctx, chainID, deadline, request) + if err != nil { + r.log.Debug("failed to handle message", + zap.Stringer("messageOp", message.CrossChainAppRequestOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + zap.Time("deadline", deadline), + zap.Uint64("handlerID", r.handlerID), + zap.Binary("message", request), + ) + return nil + } + + return r.sender.SendCrossChainAppResponse(ctx, chainID, requestID, appResponse) +} + +type TestHandler struct { + AppGossipF func(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) + AppRequestF func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) + CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) +} + +func (t TestHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if t.AppGossipF == nil { + return + } + + t.AppGossipF(ctx, nodeID, gossipBytes) +} + +func (t TestHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.AppRequestF == nil { + return nil, nil + } + + return t.AppRequestF(ctx, nodeID, deadline, requestBytes) +} + +func (t TestHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if t.CrossChainAppRequestF == nil { + return nil, nil + } + + return t.CrossChainAppRequestF(ctx, chainID, deadline, requestBytes) +} diff --git a/avalanchego/network/p2p/handler_test.go b/avalanchego/network/p2p/handler_test.go new file mode 100644 index 00000000..0633b70f --- /dev/null +++ b/avalanchego/network/p2p/handler_test.go @@ -0,0 +1,113 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ ValidatorSet = (*testValidatorSet)(nil) + +type testValidatorSet struct { + validators set.Set[ids.NodeID] +} + +func (t testValidatorSet) Has(_ context.Context, nodeID ids.NodeID) bool { + return t.validators.Contains(nodeID) +} + +func TestValidatorHandlerAppGossip(t *testing.T) { + nodeID := ids.GenerateTestNodeID() + validatorSet := set.Of(nodeID) + + tests := []struct { + name string + validatorSet ValidatorSet + nodeID ids.NodeID + expected bool + }{ + { + name: "message dropped", + validatorSet: testValidatorSet{}, + nodeID: nodeID, + }, + { + name: "message handled", + validatorSet: testValidatorSet{ + validators: validatorSet, + }, + nodeID: nodeID, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + called := false + handler := NewValidatorHandler( + &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, + tt.validatorSet, + logging.NoLog{}, + ) + + handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) + require.Equal(tt.expected, called) + }) + } +} + +func TestValidatorHandlerAppRequest(t *testing.T) { + nodeID := ids.GenerateTestNodeID() + validatorSet := set.Of(nodeID) + + tests := []struct { + name string + validatorSet ValidatorSet + nodeID ids.NodeID + expected error + }{ + { + name: "message dropped", + validatorSet: testValidatorSet{}, + nodeID: nodeID, + expected: ErrNotValidator, + }, + { + name: "message handled", + validatorSet: testValidatorSet{ + validators: validatorSet, + }, + nodeID: nodeID, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + handler := NewValidatorHandler( + NoOpHandler{}, + tt.validatorSet, + logging.NoLog{}, + ) + + _, err := handler.AppRequest(context.Background(), tt.nodeID, time.Time{}, []byte("foobar")) + require.ErrorIs(err, tt.expected) + }) + } +} diff --git a/avalanchego/network/p2p/network.go b/avalanchego/network/p2p/network.go new file mode 100644 index 00000000..a98579c4 --- /dev/null +++ b/avalanchego/network/p2p/network.go @@ -0,0 +1,287 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "encoding/binary" + "strconv" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +var ( + _ validators.Connector = (*Network)(nil) + _ common.AppHandler = (*Network)(nil) + _ NodeSampler = (*peerSampler)(nil) + + handlerLabel = "handlerID" + labelNames = []string{handlerLabel} +) + +// ClientOption configures Client +type ClientOption interface { + apply(options *clientOptions) +} + +type clientOptionFunc func(options *clientOptions) + +func (o clientOptionFunc) apply(options *clientOptions) { + o(options) +} + +// WithValidatorSampling configures Client.AppRequestAny to sample validators +func WithValidatorSampling(validators *Validators) ClientOption { + return clientOptionFunc(func(options *clientOptions) { + options.nodeSampler = validators + }) +} + +// clientOptions holds client-configurable values +type clientOptions struct { + // nodeSampler is used to select nodes to route Client.AppRequestAny to + nodeSampler NodeSampler +} + +// NewNetwork returns an instance of Network +func NewNetwork( + log logging.Logger, + sender common.AppSender, + registerer prometheus.Registerer, + namespace string, +) (*Network, error) { + metrics := metrics{ + appRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_time", + Help: "app request time (ns)", + }, labelNames), + appRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_count", + Help: "app request count (n)", + }, labelNames), + appResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_response_time", + Help: "app response time (ns)", + }, labelNames), + appResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_response_count", + Help: "app response count (n)", + }, labelNames), + appRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_failed_time", + Help: "app request failed time (ns)", + }, labelNames), + appRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_request_failed_count", + Help: "app request failed count (ns)", + }, labelNames), + appGossipTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_gossip_time", + Help: "app gossip time (ns)", + }, labelNames), + appGossipCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "app_gossip_count", + Help: "app gossip count (n)", + }, labelNames), + crossChainAppRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_time", + Help: "cross chain app request time (ns)", + }, labelNames), + crossChainAppRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_count", + Help: "cross chain app request count (n)", + }, labelNames), + crossChainAppResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_response_time", + Help: "cross chain app response time (ns)", + }, labelNames), + crossChainAppResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_response_count", + Help: "cross chain app response count (n)", + }, labelNames), + crossChainAppRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_failed_time", + Help: "cross chain app request failed time (ns)", + }, labelNames), + crossChainAppRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "cross_chain_app_request_failed_count", + Help: "cross chain app request failed count (n)", + }, labelNames), + } + + err := utils.Err( + registerer.Register(metrics.appRequestTime), + registerer.Register(metrics.appRequestCount), + registerer.Register(metrics.appResponseTime), + registerer.Register(metrics.appResponseCount), + registerer.Register(metrics.appRequestFailedTime), + registerer.Register(metrics.appRequestFailedCount), + registerer.Register(metrics.appGossipTime), + registerer.Register(metrics.appGossipCount), + registerer.Register(metrics.crossChainAppRequestTime), + registerer.Register(metrics.crossChainAppRequestCount), + registerer.Register(metrics.crossChainAppResponseTime), + registerer.Register(metrics.crossChainAppResponseCount), + registerer.Register(metrics.crossChainAppRequestFailedTime), + registerer.Register(metrics.crossChainAppRequestFailedCount), + ) + if err != nil { + return nil, err + } + + return &Network{ + Peers: &Peers{}, + log: log, + sender: sender, + router: newRouter(log, sender, metrics), + }, nil +} + +// Network exposes networking state and supports building p2p application +// protocols +type Network struct { + Peers *Peers + + log logging.Logger + sender common.AppSender + + router *router +} + +func (n *Network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.AppRequest(ctx, nodeID, requestID, deadline, request) +} + +func (n *Network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + return n.router.AppResponse(ctx, nodeID, requestID, response) +} + +func (n *Network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + return n.router.AppRequestFailed(ctx, nodeID, requestID, appErr) +} + +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { + return n.router.AppGossip(ctx, nodeID, msg) +} + +func (n *Network) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.CrossChainAppRequest(ctx, chainID, requestID, deadline, request) +} + +func (n *Network) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + return n.router.CrossChainAppResponse(ctx, chainID, requestID, response) +} + +func (n *Network) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + return n.router.CrossChainAppRequestFailed(ctx, chainID, requestID, appErr) +} + +func (n *Network) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { + n.Peers.add(nodeID) + return nil +} + +func (n *Network) Disconnected(_ context.Context, nodeID ids.NodeID) error { + n.Peers.remove(nodeID) + return nil +} + +// NewClient returns a Client that can be used to send messages for the +// corresponding protocol. +func (n *Network) NewClient(handlerID uint64, options ...ClientOption) *Client { + client := &Client{ + handlerID: handlerID, + handlerIDStr: strconv.FormatUint(handlerID, 10), + handlerPrefix: ProtocolPrefix(handlerID), + sender: n.sender, + router: n.router, + options: &clientOptions{ + nodeSampler: &peerSampler{ + peers: n.Peers, + }, + }, + } + + for _, option := range options { + option.apply(client.options) + } + + return client +} + +// AddHandler reserves an identifier for an application protocol +func (n *Network) AddHandler(handlerID uint64, handler Handler) error { + return n.router.addHandler(handlerID, handler) +} + +// Peers contains metadata about the current set of connected peers +type Peers struct { + lock sync.RWMutex + set set.SampleableSet[ids.NodeID] +} + +func (p *Peers) add(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Add(nodeID) +} + +func (p *Peers) remove(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Remove(nodeID) +} + +func (p *Peers) has(nodeID ids.NodeID) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Contains(nodeID) +} + +// Sample returns a pseudo-random sample of up to limit Peers +func (p *Peers) Sample(limit int) []ids.NodeID { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Sample(limit) +} + +type peerSampler struct { + peers *Peers +} + +func (p peerSampler) Sample(_ context.Context, limit int) []ids.NodeID { + return p.peers.Sample(limit) +} + +func ProtocolPrefix(handlerID uint64) []byte { + return binary.AppendUvarint(nil, handlerID) +} diff --git a/avalanchego/network/p2p/network_test.go b/avalanchego/network/p2p/network_test.go new file mode 100644 index 00000000..1cb4e70f --- /dev/null +++ b/avalanchego/network/p2p/network_test.go @@ -0,0 +1,629 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +const ( + handlerID = 123 + handlerPrefix = byte(handlerID) +) + +var errFoo = &common.AppError{ + Code: 123, + Message: "foo", +} + +func TestMessageRouting(t *testing.T) { + require := require.New(t) + ctx := context.Background() + wantNodeID := ids.GenerateTestNodeID() + wantChainID := ids.GenerateTestID() + wantMsg := []byte("message") + + var appGossipCalled, appRequestCalled, crossChainAppRequestCalled bool + testHandler := &TestHandler{ + AppGossipF: func(_ context.Context, nodeID ids.NodeID, msg []byte) { + appGossipCalled = true + require.Equal(wantNodeID, nodeID) + require.Equal(wantMsg, msg) + }, + AppRequestF: func(_ context.Context, nodeID ids.NodeID, _ time.Time, msg []byte) ([]byte, error) { + appRequestCalled = true + require.Equal(wantNodeID, nodeID) + require.Equal(wantMsg, msg) + return nil, nil + }, + CrossChainAppRequestF: func(_ context.Context, chainID ids.ID, _ time.Time, msg []byte) ([]byte, error) { + crossChainAppRequestCalled = true + require.Equal(wantChainID, chainID) + require.Equal(wantMsg, msg) + return nil, nil + }, + } + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + SentAppRequest: make(chan []byte, 1), + SentCrossChainAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(1, testHandler)) + client := network.NewClient(1) + + require.NoError(client.AppGossip(ctx, wantMsg)) + require.NoError(network.AppGossip(ctx, wantNodeID, <-sender.SentAppGossip)) + require.True(appGossipCalled) + + require.NoError(client.AppRequest(ctx, set.Of(ids.EmptyNodeID), wantMsg, func(context.Context, ids.NodeID, []byte, error) {})) + require.NoError(network.AppRequest(ctx, wantNodeID, 1, time.Time{}, <-sender.SentAppRequest)) + require.True(appRequestCalled) + + require.NoError(client.CrossChainAppRequest(ctx, ids.Empty, wantMsg, func(context.Context, ids.ID, []byte, error) {})) + require.NoError(network.CrossChainAppRequest(ctx, wantChainID, 1, time.Time{}, <-sender.SentCrossChainAppRequest)) + require.True(crossChainAppRequestCalled) +} + +// Tests that the Client prefixes messages with the handler prefix +func TestClientPrefixesMessages(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + SentAppGossip: make(chan []byte, 1), + SentAppGossipSpecific: make(chan []byte, 1), + SentCrossChainAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.Connected(ctx, ids.EmptyNodeID, nil)) + client := network.NewClient(handlerID) + + want := []byte("message") + + require.NoError(client.AppRequest( + ctx, + set.Of(ids.EmptyNodeID), + want, + func(context.Context, ids.NodeID, []byte, error) {}, + )) + gotAppRequest := <-sender.SentAppRequest + require.Equal(handlerPrefix, gotAppRequest[0]) + require.Equal(want, gotAppRequest[1:]) + + require.NoError(client.AppRequestAny( + ctx, + want, + func(context.Context, ids.NodeID, []byte, error) {}, + )) + gotAppRequest = <-sender.SentAppRequest + require.Equal(handlerPrefix, gotAppRequest[0]) + require.Equal(want, gotAppRequest[1:]) + + require.NoError(client.CrossChainAppRequest( + ctx, + ids.Empty, + want, + func(context.Context, ids.ID, []byte, error) {}, + )) + gotCrossChainAppRequest := <-sender.SentCrossChainAppRequest + require.Equal(handlerPrefix, gotCrossChainAppRequest[0]) + require.Equal(want, gotCrossChainAppRequest[1:]) + + require.NoError(client.AppGossip(ctx, want)) + gotAppGossip := <-sender.SentAppGossip + require.Equal(handlerPrefix, gotAppGossip[0]) + require.Equal(want, gotAppGossip[1:]) + + require.NoError(client.AppGossipSpecific(ctx, set.Of(ids.EmptyNodeID), want)) + gotAppGossip = <-sender.SentAppGossipSpecific + require.Equal(handlerPrefix, gotAppGossip[0]) + require.Equal(want, gotAppGossip[1:]) +} + +// Tests that the Client callback is called on a successful response +func TestAppRequestResponse(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantResponse := []byte("response") + wantNodeID := ids.GenerateTestNodeID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotNodeID ids.NodeID, gotResponse []byte, err error) { + require.Equal(wantNodeID, gotNodeID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + want := []byte("request") + require.NoError(client.AppRequest(ctx, set.Of(wantNodeID), want, callback)) + got := <-sender.SentAppRequest + require.Equal(handlerPrefix, got[0]) + require.Equal(want, got[1:]) + + require.NoError(network.AppResponse(ctx, wantNodeID, 1, wantResponse)) + <-done +} + +// Tests that the Client callback is given an error if the request fails +func TestAppRequestFailed(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantNodeID := ids.GenerateTestNodeID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotNodeID ids.NodeID, gotResponse []byte, err error) { + require.Equal(wantNodeID, gotNodeID) + require.ErrorIs(err, errFoo) + require.Nil(gotResponse) + + close(done) + } + + require.NoError(client.AppRequest(ctx, set.Of(wantNodeID), []byte("request"), callback)) + <-sender.SentAppRequest + + require.NoError(network.AppRequestFailed(ctx, wantNodeID, 1, errFoo)) + <-done +} + +// Tests that the Client callback is called on a successful response +func TestCrossChainAppRequestResponse(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentCrossChainAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantChainID := ids.GenerateTestID() + wantResponse := []byte("response") + done := make(chan struct{}) + + callback := func(_ context.Context, gotChainID ids.ID, gotResponse []byte, err error) { + require.Equal(wantChainID, gotChainID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + require.NoError(client.CrossChainAppRequest(ctx, wantChainID, []byte("request"), callback)) + <-sender.SentCrossChainAppRequest + + require.NoError(network.CrossChainAppResponse(ctx, wantChainID, 1, wantResponse)) + <-done +} + +// Tests that the Client callback is given an error if the request fails +func TestCrossChainAppRequestFailed(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := common.FakeSender{ + SentCrossChainAppRequest: make(chan []byte, 1), + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantChainID := ids.GenerateTestID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotChainID ids.ID, gotResponse []byte, err error) { + require.Equal(wantChainID, gotChainID) + require.ErrorIs(err, errFoo) + require.Nil(gotResponse) + + close(done) + } + + require.NoError(client.CrossChainAppRequest(ctx, wantChainID, []byte("request"), callback)) + <-sender.SentCrossChainAppRequest + + require.NoError(network.CrossChainAppRequestFailed(ctx, wantChainID, 1, errFoo)) + <-done +} + +// Messages for unregistered handlers should be dropped gracefully +func TestMessageForUnregisteredHandler(t *testing.T) { + tests := []struct { + name string + msg []byte + }{ + { + name: "nil", + msg: nil, + }, + { + name: "empty", + msg: []byte{}, + }, + { + name: "non-empty", + msg: []byte("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + handler := &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + require.Fail("should not be called") + }, + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + CrossChainAppRequestF: func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + } + network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + + require.NoError(network.AppRequest(ctx, ids.EmptyNodeID, 0, time.Time{}, tt.msg)) + require.NoError(network.AppGossip(ctx, ids.EmptyNodeID, tt.msg)) + require.NoError(network.CrossChainAppRequest(ctx, ids.Empty, 0, time.Time{}, tt.msg)) + }) + } +} + +// A response or timeout for a request we never made should return an error +func TestResponseForUnrequestedRequest(t *testing.T) { + tests := []struct { + name string + msg []byte + }{ + { + name: "nil", + msg: nil, + }, + { + name: "empty", + msg: []byte{}, + }, + { + name: "non-empty", + msg: []byte("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + handler := &TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + require.Fail("should not be called") + }, + AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + CrossChainAppRequestF: func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + require.Fail("should not be called") + return nil, nil + }, + } + network, err := NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(network.AddHandler(handlerID, handler)) + + err = network.AppResponse(ctx, ids.EmptyNodeID, 0, []byte("foobar")) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.AppRequestFailed(ctx, ids.EmptyNodeID, 0, common.ErrTimeout) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.CrossChainAppResponse(ctx, ids.Empty, 0, []byte("foobar")) + require.ErrorIs(err, ErrUnrequestedResponse) + err = network.CrossChainAppRequestFailed(ctx, ids.Empty, 0, common.ErrTimeout) + + require.ErrorIs(err, ErrUnrequestedResponse) + }) + } +} + +// It's possible for the request id to overflow and wrap around. +// If there are still pending requests with the same request id, we should +// not attempt to issue another request until the previous one has cleared. +func TestAppRequestDuplicateRequestIDs(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(0x1) + + noOpCallback := func(context.Context, ids.NodeID, []byte, error) {} + // create a request that never gets a response + network.router.requestID = 1 + require.NoError(client.AppRequest(ctx, set.Of(ids.EmptyNodeID), []byte{}, noOpCallback)) + <-sender.SentAppRequest + + // force the network to use the same requestID + network.router.requestID = 1 + err = client.AppRequest(context.Background(), set.Of(ids.EmptyNodeID), []byte{}, noOpCallback) + require.ErrorIs(err, ErrRequestPending) +} + +// Sample should always return up to [limit] peers, and less if fewer than +// [limit] peers are available. +func TestPeersSample(t *testing.T) { + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + nodeID3 := ids.GenerateTestNodeID() + + tests := []struct { + name string + connected set.Set[ids.NodeID] + disconnected set.Set[ids.NodeID] + limit int + }{ + { + name: "no peers", + limit: 1, + }, + { + name: "one peer connected", + connected: set.Of(nodeID1), + limit: 1, + }, + { + name: "multiple peers connected", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 1, + }, + { + name: "peer connects and disconnects - 1", + connected: set.Of(nodeID1), + disconnected: set.Of(nodeID1), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2), + disconnected: set.Of(nodeID2), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2, nodeID3), + disconnected: set.Of(nodeID1, nodeID2), + limit: 1, + }, + { + name: "less than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 4, + }, + { + name: "limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 3, + }, + { + name: "more than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + network, err := NewNetwork(logging.NoLog{}, &common.FakeSender{}, prometheus.NewRegistry(), "") + require.NoError(err) + + for connected := range tt.connected { + require.NoError(network.Connected(context.Background(), connected, nil)) + } + + for disconnected := range tt.disconnected { + require.NoError(network.Disconnected(context.Background(), disconnected)) + } + + sampleable := set.Set[ids.NodeID]{} + sampleable.Union(tt.connected) + sampleable.Difference(tt.disconnected) + + sampled := network.Peers.Sample(tt.limit) + require.Len(sampled, min(tt.limit, len(sampleable))) + require.Subset(sampleable, sampled) + }) + } +} + +func TestAppRequestAnyNodeSelection(t *testing.T) { + tests := []struct { + name string + peers []ids.NodeID + expected error + }{ + { + name: "no peers", + expected: ErrNoPeers, + }, + { + name: "has peers", + peers: []ids.NodeID{ids.GenerateTestNodeID()}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + sent := set.Set[ids.NodeID]{} + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + sent = nodeIDs + return nil + }, + } + + n, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + for _, peer := range tt.peers { + require.NoError(n.Connected(context.Background(), peer, &version.Application{})) + } + + client := n.NewClient(1) + + err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) + require.ErrorIs(err, tt.expected) + require.Subset(tt.peers, sent.List()) + }) + } +} + +func TestNodeSamplerClientOption(t *testing.T) { + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + tests := []struct { + name string + peers []ids.NodeID + option func(t *testing.T, n *Network) ClientOption + expected []ids.NodeID + expectedErr error + }{ + { + name: "default", + peers: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + option: func(*testing.T, *Network) ClientOption { + return clientOptionFunc(func(*clientOptions) {}) + }, + expected: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + }, + { + name: "validator connected", + peers: []ids.NodeID{nodeID0, nodeID1}, + option: func(_ *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expected: []ids.NodeID{nodeID1}, + }, + { + name: "validator disconnected", + peers: []ids.NodeID{nodeID0}, + option: func(_ *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expectedErr: ErrNoPeers, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + done := make(chan struct{}) + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + require.Subset(tt.expected, nodeIDs.List()) + close(done) + return nil + }, + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + ctx := context.Background() + for _, peer := range tt.peers { + require.NoError(network.Connected(ctx, peer, nil)) + } + + client := network.NewClient(0, tt.option(t, network)) + + if err = client.AppRequestAny(ctx, []byte("request"), nil); err != nil { + close(done) + } + + require.ErrorIs(err, tt.expectedErr) + <-done + }) + } +} + +// Tests that a given protocol can have more than one client +func TestMultipleClients(t *testing.T) { + require := require.New(t) + + n, err := NewNetwork(logging.NoLog{}, &common.SenderTest{}, prometheus.NewRegistry(), "") + require.NoError(err) + _ = n.NewClient(0) + _ = n.NewClient(0) +} diff --git a/avalanchego/network/p2p/node_sampler.go b/avalanchego/network/p2p/node_sampler.go new file mode 100644 index 00000000..5bb3815e --- /dev/null +++ b/avalanchego/network/p2p/node_sampler.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" +) + +// NodeSampler samples nodes in network +type NodeSampler interface { + // Sample returns at most [limit] nodes. This may return fewer nodes if + // fewer than [limit] are available. + Sample(ctx context.Context, limit int) []ids.NodeID +} diff --git a/avalanchego/network/p2p/peer_tracker.go b/avalanchego/network/p2p/peer_tracker.go new file mode 100644 index 00000000..31a4fb61 --- /dev/null +++ b/avalanchego/network/p2p/peer_tracker.go @@ -0,0 +1,317 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "math" + "math/rand" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/heap" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +const ( + bandwidthHalflife = 5 * time.Minute + + // controls how eagerly we connect to new peers vs. using peers with known + // good response bandwidth. + desiredMinResponsivePeers = 20 + newPeerConnectFactor = 0.1 + + // The probability that, when we select a peer, we select randomly rather + // than based on their performance. + randomPeerProbability = 0.2 +) + +// Tracks the bandwidth of responses coming from peers, +// preferring to contact peers with known good bandwidth, connecting +// to new peers with an exponentially decaying probability. +type PeerTracker struct { + // Lock to protect concurrent access to the peer tracker + lock sync.RWMutex + // Peers that we're connected to that we haven't sent a request to since we + // most recently connected to them. + untrackedPeers set.Set[ids.NodeID] + // Peers that we're connected to that we've sent a request to since we most + // recently connected to them. + trackedPeers set.Set[ids.NodeID] + // Peers that we're connected to that responded to the last request they + // were sent. + responsivePeers set.Set[ids.NodeID] + // Bandwidth of peers that we have measured. + peerBandwidth map[ids.NodeID]safemath.Averager + // Max heap that contains the average bandwidth of peers that do not have an + // outstanding request. + bandwidthHeap heap.Map[ids.NodeID, safemath.Averager] + // Average bandwidth is only used for metrics. + averageBandwidth safemath.Averager + + // The below fields are assumed to be constant and are not protected by the + // lock. + log logging.Logger + ignoredNodes set.Set[ids.NodeID] + minVersion *version.Application + metrics peerTrackerMetrics +} + +type peerTrackerMetrics struct { + numTrackedPeers prometheus.Gauge + numResponsivePeers prometheus.Gauge + averageBandwidth prometheus.Gauge +} + +func NewPeerTracker( + log logging.Logger, + metricsNamespace string, + registerer prometheus.Registerer, + ignoredNodes set.Set[ids.NodeID], + minVersion *version.Application, +) (*PeerTracker, error) { + t := &PeerTracker{ + peerBandwidth: make(map[ids.NodeID]safemath.Averager), + bandwidthHeap: heap.NewMap[ids.NodeID, safemath.Averager](func(a, b safemath.Averager) bool { + return a.Read() > b.Read() + }), + averageBandwidth: safemath.NewAverager(0, bandwidthHalflife, time.Now()), + log: log, + ignoredNodes: ignoredNodes, + minVersion: minVersion, + metrics: peerTrackerMetrics{ + numTrackedPeers: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Name: "num_tracked_peers", + Help: "number of tracked peers", + }, + ), + numResponsivePeers: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Name: "num_responsive_peers", + Help: "number of responsive peers", + }, + ), + averageBandwidth: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Name: "average_bandwidth", + Help: "average sync bandwidth used by peers", + }, + ), + }, + } + + err := utils.Err( + registerer.Register(t.metrics.numTrackedPeers), + registerer.Register(t.metrics.numResponsivePeers), + registerer.Register(t.metrics.averageBandwidth), + ) + return t, err +} + +// Returns true if: +// - We have not observed the desired minimum number of responsive peers. +// - Randomly with the frequency decreasing as the number of responsive peers +// increases. +// +// Assumes the read lock is held. +func (p *PeerTracker) shouldSelectUntrackedPeer() bool { + numResponsivePeers := p.responsivePeers.Len() + if numResponsivePeers < desiredMinResponsivePeers { + return true + } + if p.untrackedPeers.Len() == 0 { + return false // already tracking all peers + } + + // TODO danlaine: we should consider tuning this probability function. + // With [newPeerConnectFactor] as 0.1 the probabilities are: + // + // numResponsivePeers | probability + // 100 | 4.5399929762484854e-05 + // 200 | 2.061153622438558e-09 + // 500 | 1.9287498479639178e-22 + // 1000 | 3.720075976020836e-44 + // 2000 | 1.3838965267367376e-87 + // 5000 | 7.124576406741286e-218 + // + // In other words, the probability drops off extremely quickly. + newPeerProbability := math.Exp(-float64(numResponsivePeers) * newPeerConnectFactor) + return rand.Float64() < newPeerProbability // #nosec G404 +} + +// SelectPeer that we could send a request to. +// +// If we should track more peers, returns a random untracked peer, if any exist. +// Otherwise, with probability [randomPeerProbability] returns a random peer +// from [p.responsivePeers]. +// With probability [1-randomPeerProbability] returns the peer in +// [p.bandwidthHeap] with the highest bandwidth. +// +// Returns false if there are no connected peers. +func (p *PeerTracker) SelectPeer() (ids.NodeID, bool) { + p.lock.RLock() + defer p.lock.RUnlock() + + if p.shouldSelectUntrackedPeer() { + if nodeID, ok := p.untrackedPeers.Peek(); ok { + p.log.Debug("selecting peer", + zap.String("reason", "untracked"), + zap.Stringer("nodeID", nodeID), + zap.Int("trackedPeers", p.trackedPeers.Len()), + zap.Int("responsivePeers", p.responsivePeers.Len()), + ) + return nodeID, true + } + } + + useBandwidthHeap := rand.Float64() > randomPeerProbability // #nosec G404 + if useBandwidthHeap { + if nodeID, bandwidth, ok := p.bandwidthHeap.Peek(); ok { + p.log.Debug("selecting peer", + zap.String("reason", "bandwidth"), + zap.Stringer("nodeID", nodeID), + zap.Float64("bandwidth", bandwidth.Read()), + ) + return nodeID, true + } + } else { + if nodeID, ok := p.responsivePeers.Peek(); ok { + p.log.Debug("selecting peer", + zap.String("reason", "responsive"), + zap.Stringer("nodeID", nodeID), + ) + return nodeID, true + } + } + + if nodeID, ok := p.trackedPeers.Peek(); ok { + p.log.Debug("selecting peer", + zap.String("reason", "tracked"), + zap.Stringer("nodeID", nodeID), + zap.Bool("checkedBandwidthHeap", useBandwidthHeap), + ) + return nodeID, true + } + + // We're not connected to any peers. + return ids.EmptyNodeID, false +} + +// Record that we sent a request to [nodeID]. +// +// Removes the peer's bandwidth averager from the bandwidth heap. +func (p *PeerTracker) RegisterRequest(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.untrackedPeers.Remove(nodeID) + p.trackedPeers.Add(nodeID) + p.bandwidthHeap.Remove(nodeID) + + p.metrics.numTrackedPeers.Set(float64(p.trackedPeers.Len())) +} + +// Record that we observed that [nodeID]'s bandwidth is [bandwidth]. +// +// Adds the peer's bandwidth averager to the bandwidth heap. +func (p *PeerTracker) RegisterResponse(nodeID ids.NodeID, bandwidth float64) { + p.updateBandwidth(nodeID, bandwidth, true) +} + +// Record that a request failed to [nodeID]. +// +// Adds the peer's bandwidth averager to the bandwidth heap. +func (p *PeerTracker) RegisterFailure(nodeID ids.NodeID) { + p.updateBandwidth(nodeID, 0, false) +} + +func (p *PeerTracker) updateBandwidth(nodeID ids.NodeID, bandwidth float64, responsive bool) { + p.lock.Lock() + defer p.lock.Unlock() + + if !p.trackedPeers.Contains(nodeID) { + // we're not tracking this peer, nothing to do here + p.log.Debug("tracking bandwidth for untracked peer", + zap.Stringer("nodeID", nodeID), + ) + return + } + + now := time.Now() + peerBandwidth, ok := p.peerBandwidth[nodeID] + if ok { + peerBandwidth.Observe(bandwidth, now) + } else { + peerBandwidth = safemath.NewAverager(bandwidth, bandwidthHalflife, now) + p.peerBandwidth[nodeID] = peerBandwidth + } + p.bandwidthHeap.Push(nodeID, peerBandwidth) + p.averageBandwidth.Observe(bandwidth, now) + + if responsive { + p.responsivePeers.Add(nodeID) + } else { + p.responsivePeers.Remove(nodeID) + } + + p.metrics.numResponsivePeers.Set(float64(p.responsivePeers.Len())) + p.metrics.averageBandwidth.Set(p.averageBandwidth.Read()) +} + +// Connected should be called when [nodeID] connects to this node. +func (p *PeerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { + // If this peer should be ignored, don't mark it as connected. + if p.ignoredNodes.Contains(nodeID) { + return + } + // If minVersion is specified and peer's version is less, don't mark it as + // connected. + if p.minVersion != nil && nodeVersion.Compare(p.minVersion) < 0 { + return + } + + p.lock.Lock() + defer p.lock.Unlock() + + p.untrackedPeers.Add(nodeID) +} + +// Disconnected should be called when [nodeID] disconnects from this node. +func (p *PeerTracker) Disconnected(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + // Because of the checks performed in Connected, it's possible that this + // node was never marked as connected here. However, all of the below + // functions are noops if called with a peer that was never marked as + // connected. + p.untrackedPeers.Remove(nodeID) + p.trackedPeers.Remove(nodeID) + p.responsivePeers.Remove(nodeID) + delete(p.peerBandwidth, nodeID) + p.bandwidthHeap.Remove(nodeID) + + p.metrics.numTrackedPeers.Set(float64(p.trackedPeers.Len())) + p.metrics.numResponsivePeers.Set(float64(p.responsivePeers.Len())) +} + +// Returns the number of peers the node is connected to. +func (p *PeerTracker) Size() int { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.untrackedPeers.Len() + p.trackedPeers.Len() +} diff --git a/avalanchego/network/p2p/peer_tracker_test.go b/avalanchego/network/p2p/peer_tracker_test.go new file mode 100644 index 00000000..01ebcfb8 --- /dev/null +++ b/avalanchego/network/p2p/peer_tracker_test.go @@ -0,0 +1,104 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/version" +) + +func TestPeerTracker(t *testing.T) { + require := require.New(t) + p, err := NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + nil, + ) + require.NoError(err) + + // Connect some peers + numExtraPeers := 10 + numPeers := desiredMinResponsivePeers + numExtraPeers + peerIDs := make([]ids.NodeID, numPeers) + peerVersion := &version.Application{ + Major: 1, + Minor: 2, + Patch: 3, + } + + for i := range peerIDs { + peerIDs[i] = ids.GenerateTestNodeID() + p.Connected(peerIDs[i], peerVersion) + } + + responsivePeers := make(map[ids.NodeID]bool) + + // Expect requests to go to new peers until we have desiredMinResponsivePeers responsive peers. + for i := 0; i < desiredMinResponsivePeers+numExtraPeers/2; i++ { + peer, ok := p.SelectPeer() + require.True(ok) + require.NotZero(peer) + + _, exists := responsivePeers[peer] + require.Falsef(exists, "expected connecting to a new peer, but got the same peer twice: peer %s iteration %d", peer, i) + responsivePeers[peer] = true + + p.RegisterRequest(peer) // mark the peer as having a message sent to it + } + + // Mark some peers as responsive and others as not responsive + i := 0 + for peer := range responsivePeers { + if i < desiredMinResponsivePeers { + p.RegisterResponse(peer, 10) + } else { + responsivePeers[peer] = false // remember which peers were not responsive + p.RegisterFailure(peer) + } + i++ + } + + // Expect requests to go to responsive or new peers, so long as they are available + numRequests := 50 + for i := 0; i < numRequests; i++ { + peer, ok := p.SelectPeer() + require.True(ok) + require.NotZero(peer) + + responsive, ok := responsivePeers[peer] + if ok { + require.Truef(responsive, "expected connecting to a responsive peer, but got a peer that was not responsive: peer %s iteration %d", peer, i) + p.RegisterResponse(peer, 10) + } else { + responsivePeers[peer] = false // remember that we connected to this peer + p.RegisterRequest(peer) // mark the peer as having a message sent to it + p.RegisterFailure(peer) // mark the peer as non-responsive + } + } + + // Disconnect from peers that were previously responsive and ones we didn't connect to yet. + for _, peer := range peerIDs { + responsive, ok := responsivePeers[peer] + if ok && responsive || !ok { + p.Disconnected(peer) + } + } + + // Requests should fall back on non-responsive peers when no other choice is left + peer, ok := p.SelectPeer() + require.True(ok) + require.NotZero(peer) + + responsive, ok := responsivePeers[peer] + require.True(ok) + require.Falsef(responsive, "expected connecting to a non-responsive peer, but got a peer that was responsive: peer %s", peer) +} diff --git a/avalanchego/network/p2p/router.go b/avalanchego/network/p2p/router.go new file mode 100644 index 00000000..4828ea0f --- /dev/null +++ b/avalanchego/network/p2p/router.go @@ -0,0 +1,448 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "strconv" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var ( + ErrExistingAppProtocol = errors.New("existing app protocol") + ErrUnrequestedResponse = errors.New("unrequested response") + + _ common.AppHandler = (*router)(nil) +) + +type pendingAppRequest struct { + handlerID string + callback AppResponseCallback +} + +type pendingCrossChainAppRequest struct { + handlerID string + callback CrossChainAppResponseCallback +} + +// meteredHandler emits metrics for a Handler +type meteredHandler struct { + *responder + metrics +} + +type metrics struct { + appRequestTime *prometheus.CounterVec + appRequestCount *prometheus.CounterVec + appResponseTime *prometheus.CounterVec + appResponseCount *prometheus.CounterVec + appRequestFailedTime *prometheus.CounterVec + appRequestFailedCount *prometheus.CounterVec + appGossipTime *prometheus.CounterVec + appGossipCount *prometheus.CounterVec + crossChainAppRequestTime *prometheus.CounterVec + crossChainAppRequestCount *prometheus.CounterVec + crossChainAppResponseTime *prometheus.CounterVec + crossChainAppResponseCount *prometheus.CounterVec + crossChainAppRequestFailedTime *prometheus.CounterVec + crossChainAppRequestFailedCount *prometheus.CounterVec +} + +// router routes incoming application messages to the corresponding registered +// app handler. App messages must be made using the registered handler's +// corresponding Client. +type router struct { + log logging.Logger + sender common.AppSender + metrics metrics + + lock sync.RWMutex + handlers map[uint64]*meteredHandler + pendingAppRequests map[uint32]pendingAppRequest + pendingCrossChainAppRequests map[uint32]pendingCrossChainAppRequest + requestID uint32 +} + +// newRouter returns a new instance of Router +func newRouter( + log logging.Logger, + sender common.AppSender, + metrics metrics, +) *router { + return &router{ + log: log, + sender: sender, + metrics: metrics, + handlers: make(map[uint64]*meteredHandler), + pendingAppRequests: make(map[uint32]pendingAppRequest), + pendingCrossChainAppRequests: make(map[uint32]pendingCrossChainAppRequest), + // invariant: sdk uses odd-numbered requestIDs + requestID: 1, + } +} + +func (r *router) addHandler(handlerID uint64, handler Handler) error { + r.lock.Lock() + defer r.lock.Unlock() + + if _, ok := r.handlers[handlerID]; ok { + return fmt.Errorf("failed to register handler id %d: %w", handlerID, ErrExistingAppProtocol) + } + + r.handlers[handlerID] = &meteredHandler{ + responder: &responder{ + Handler: handler, + handlerID: handlerID, + log: r.log, + sender: r.sender, + }, + metrics: r.metrics, + } + + return nil +} + +// AppRequest routes an AppRequest to a Handler based on the handler prefix. The +// message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + start := time.Now() + parsedMsg, handler, handlerID, ok := r.parse(request) + if !ok { + r.log.Debug("failed to process message", + zap.Stringer("messageOp", message.AppRequestOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Time("deadline", deadline), + zap.Binary("message", request), + ) + return nil + } + + // call the corresponding handler and send back a response to nodeID + if err := handler.AppRequest(ctx, nodeID, requestID, deadline, parsedMsg); err != nil { + return err + } + + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.appRequestCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appRequestTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// AppRequestFailed routes an AppRequestFailed message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + start := time.Now() + pending, ok := r.clearAppRequest(requestID) + if !ok { + // we should never receive a timeout without a corresponding requestID + return ErrUnrequestedResponse + } + + pending.callback(ctx, nodeID, nil, appErr) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.appRequestFailedCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appRequestFailedTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// AppResponse routes an AppResponse message to the callback corresponding to +// requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + start := time.Now() + pending, ok := r.clearAppRequest(requestID) + if !ok { + // we should never receive a timeout without a corresponding requestID + return ErrUnrequestedResponse + } + + pending.callback(ctx, nodeID, response, nil) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.appResponseCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appResponseTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// AppGossip routes an AppGossip message to a Handler based on the handler +// prefix. The message is dropped if no matching handler can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { + start := time.Now() + parsedMsg, handler, handlerID, ok := r.parse(gossip) + if !ok { + r.log.Debug("failed to process message", + zap.Stringer("messageOp", message.AppGossipOp), + zap.Stringer("nodeID", nodeID), + zap.Binary("message", gossip), + ) + return nil + } + + handler.AppGossip(ctx, nodeID, parsedMsg) + + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.appGossipCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.appGossipTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// CrossChainAppRequest routes a CrossChainAppRequest message to a Handler +// based on the handler prefix. The message is dropped if no matching handler +// can be found. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppRequest( + ctx context.Context, + chainID ids.ID, + requestID uint32, + deadline time.Time, + msg []byte, +) error { + start := time.Now() + parsedMsg, handler, handlerID, ok := r.parse(msg) + if !ok { + r.log.Debug("failed to process message", + zap.Stringer("messageOp", message.CrossChainAppRequestOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + zap.Time("deadline", deadline), + zap.Binary("message", msg), + ) + return nil + } + + if err := handler.CrossChainAppRequest(ctx, chainID, requestID, deadline, parsedMsg); err != nil { + return err + } + + labels := prometheus.Labels{ + handlerLabel: handlerID, + } + + metricCount, err := r.metrics.crossChainAppRequestCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppRequestTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// CrossChainAppRequestFailed routes a CrossChainAppRequestFailed message to +// the callback corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + start := time.Now() + pending, ok := r.clearCrossChainAppRequest(requestID) + if !ok { + // we should never receive a timeout without a corresponding requestID + return ErrUnrequestedResponse + } + + pending.callback(ctx, chainID, nil, appErr) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.crossChainAppRequestFailedCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppRequestFailedTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// CrossChainAppResponse routes a CrossChainAppResponse message to the callback +// corresponding to requestID. +// +// Any error condition propagated outside Handler application logic is +// considered fatal +func (r *router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + start := time.Now() + pending, ok := r.clearCrossChainAppRequest(requestID) + if !ok { + // we should never receive a timeout without a corresponding requestID + return ErrUnrequestedResponse + } + + pending.callback(ctx, chainID, response, nil) + + labels := prometheus.Labels{ + handlerLabel: pending.handlerID, + } + + metricCount, err := r.metrics.crossChainAppResponseCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime, err := r.metrics.crossChainAppResponseTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount.Inc() + metricTime.Add(float64(time.Since(start))) + + return nil +} + +// Parse parses a gossip or request message and maps it to a corresponding +// handler if present. +// +// Returns: +// - The unprefixed protocol message. +// - The protocol responder. +// - The protocol metric name. +// - A boolean indicating that parsing succeeded. +// +// Invariant: Assumes [r.lock] isn't held. +func (r *router) parse(prefixedMsg []byte) ([]byte, *meteredHandler, string, bool) { + handlerID, msg, ok := ParseMessage(prefixedMsg) + if !ok { + return nil, nil, "", false + } + + handlerStr := strconv.FormatUint(handlerID, 10) + + r.lock.RLock() + defer r.lock.RUnlock() + + handler, ok := r.handlers[handlerID] + return msg, handler, handlerStr, ok +} + +// Invariant: Assumes [r.lock] isn't held. +func (r *router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { + r.lock.Lock() + defer r.lock.Unlock() + + callback, ok := r.pendingAppRequests[requestID] + delete(r.pendingAppRequests, requestID) + return callback, ok +} + +// Invariant: Assumes [r.lock] isn't held. +func (r *router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainAppRequest, bool) { + r.lock.Lock() + defer r.lock.Unlock() + + callback, ok := r.pendingCrossChainAppRequests[requestID] + delete(r.pendingCrossChainAppRequests, requestID) + return callback, ok +} + +// Parse a gossip or request message. +// +// Returns: +// - The protocol ID. +// - The unprefixed protocol message. +// - A boolean indicating that parsing succeeded. +func ParseMessage(msg []byte) (uint64, []byte, bool) { + handlerID, bytesRead := binary.Uvarint(msg) + if bytesRead <= 0 { + return 0, nil, false + } + return handlerID, msg[bytesRead:], true +} diff --git a/avalanchego/network/p2p/throttler.go b/avalanchego/network/p2p/throttler.go new file mode 100644 index 00000000..c8f34a7e --- /dev/null +++ b/avalanchego/network/p2p/throttler.go @@ -0,0 +1,103 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "sync" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +var _ Throttler = (*SlidingWindowThrottler)(nil) + +type Throttler interface { + // Handle returns true if a message from [nodeID] should be handled. + Handle(nodeID ids.NodeID) bool +} + +// NewSlidingWindowThrottler returns a new instance of SlidingWindowThrottler. +// Nodes are throttled if they exceed [limit] messages during an interval of +// time over [period]. +// [period] and [limit] should both be > 0. +func NewSlidingWindowThrottler(period time.Duration, limit int) *SlidingWindowThrottler { + now := time.Now() + return &SlidingWindowThrottler{ + period: period, + limit: float64(limit), + windows: [2]window{ + { + start: now, + hits: make(map[ids.NodeID]float64), + }, + { + start: now.Add(-period), + hits: make(map[ids.NodeID]float64), + }, + }, + } +} + +// window is used internally by SlidingWindowThrottler to represent the amount +// of hits from a node in the evaluation period beginning at [start] +type window struct { + start time.Time + hits map[ids.NodeID]float64 +} + +// SlidingWindowThrottler is an implementation of the sliding window throttling +// algorithm. +type SlidingWindowThrottler struct { + period time.Duration + limit float64 + clock mockable.Clock + + lock sync.Mutex + current int + windows [2]window +} + +// Handle returns true if the amount of calls received in the last [s.period] +// time is less than [s.limit] +// +// This is calculated by adding the current period's count to a weighted count +// of the previous period. +func (s *SlidingWindowThrottler) Handle(nodeID ids.NodeID) bool { + s.lock.Lock() + defer s.lock.Unlock() + + // The current window becomes the previous window if the current evaluation + // period is over + now := s.clock.Time() + sinceUpdate := now.Sub(s.windows[s.current].start) + if sinceUpdate >= 2*s.period { + s.rotate(now.Add(-s.period)) + } + if sinceUpdate >= s.period { + s.rotate(now) + sinceUpdate = 0 + } + + currentHits := s.windows[s.current].hits + current := currentHits[nodeID] + previousFraction := float64(s.period-sinceUpdate) / float64(s.period) + previous := s.windows[1-s.current].hits[nodeID] + estimatedHits := current + previousFraction*previous + if estimatedHits >= s.limit { + // The peer has sent too many requests, drop this request. + return false + } + + currentHits[nodeID]++ + return true +} + +func (s *SlidingWindowThrottler) rotate(t time.Time) { + s.current = 1 - s.current + s.windows[s.current] = window{ + start: t, + hits: make(map[ids.NodeID]float64), + } +} diff --git a/avalanchego/network/p2p/throttler_handler.go b/avalanchego/network/p2p/throttler_handler.go new file mode 100644 index 00000000..8fa3df93 --- /dev/null +++ b/avalanchego/network/p2p/throttler_handler.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "errors" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var ( + ErrThrottled = errors.New("throttled") + _ Handler = (*ThrottlerHandler)(nil) +) + +func NewThrottlerHandler(handler Handler, throttler Throttler, log logging.Logger) *ThrottlerHandler { + return &ThrottlerHandler{ + handler: handler, + throttler: throttler, + log: log, + } +} + +type ThrottlerHandler struct { + handler Handler + throttler Throttler + log logging.Logger +} + +func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + if !t.throttler.Handle(nodeID) { + t.log.Debug( + "dropping message", + zap.Stringer("nodeID", nodeID), + zap.String("reason", "throttled"), + ) + return + } + + t.handler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t ThrottlerHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + if !t.throttler.Handle(nodeID) { + return nil, fmt.Errorf("dropping message from %s: %w", nodeID, ErrThrottled) + } + + return t.handler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +func (t ThrottlerHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return t.handler.CrossChainAppRequest(ctx, chainID, deadline, requestBytes) +} diff --git a/avalanchego/network/p2p/throttler_handler_test.go b/avalanchego/network/p2p/throttler_handler_test.go new file mode 100644 index 00000000..1f5a0706 --- /dev/null +++ b/avalanchego/network/p2p/throttler_handler_test.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var _ Handler = (*TestHandler)(nil) + +func TestThrottlerHandlerAppGossip(t *testing.T) { + tests := []struct { + name string + Throttler Throttler + expected bool + }{ + { + name: "not throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 1), + expected: true, + }, + { + name: "throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 0), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + called := false + handler := NewThrottlerHandler( + TestHandler{ + AppGossipF: func(context.Context, ids.NodeID, []byte) { + called = true + }, + }, + tt.Throttler, + logging.NoLog{}, + ) + + handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) + require.Equal(tt.expected, called) + }) + } +} + +func TestThrottlerHandlerAppRequest(t *testing.T) { + tests := []struct { + name string + Throttler Throttler + expectedErr error + }{ + { + name: "not throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 1), + }, + { + name: "throttled", + Throttler: NewSlidingWindowThrottler(time.Second, 0), + expectedErr: ErrThrottled, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + handler := NewThrottlerHandler( + NoOpHandler{}, + tt.Throttler, + logging.NoLog{}, + ) + _, err := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, []byte("foobar")) + require.ErrorIs(err, tt.expectedErr) + }) + } +} diff --git a/avalanchego/network/p2p/throttler_test.go b/avalanchego/network/p2p/throttler_test.go new file mode 100644 index 00000000..3c3c5636 --- /dev/null +++ b/avalanchego/network/p2p/throttler_test.go @@ -0,0 +1,139 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestSlidingWindowThrottlerHandle(t *testing.T) { + period := time.Minute + previousWindowStartTime := time.Time{} + currentWindowStartTime := previousWindowStartTime.Add(period) + + nodeID := ids.GenerateTestNodeID() + + type call struct { + time time.Time + throttled bool + } + + tests := []struct { + name string + limit int + calls []call + }{ + { + name: "throttled in current window", + limit: 1, + calls: []call{ + { + time: currentWindowStartTime, + }, + { + time: currentWindowStartTime, + throttled: true, + }, + }, + }, + { + name: "throttled from previous window", + limit: 1, + calls: []call{ + { + time: previousWindowStartTime, + }, + { + time: currentWindowStartTime, + throttled: true, + }, + }, + }, + { + name: "throttled over multiple evaluation periods", + limit: 5, + calls: []call{ + { + time: currentWindowStartTime.Add(30 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(1 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(2 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(3 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(4 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(30 * time.Second), + }, + { + time: currentWindowStartTime.Add(period).Add(30 * time.Second), + throttled: true, + }, + { + time: currentWindowStartTime.Add(5 * period), + }, + }, + }, + { + name: "one hit per period", + limit: 2, + calls: []call{ + { + time: currentWindowStartTime, + }, + { + time: currentWindowStartTime.Add(period).Add(time.Second), + }, + { + time: currentWindowStartTime.Add(2 * period).Add(time.Second), + }, + { + time: currentWindowStartTime.Add(3 * period).Add(time.Second), + }, + { + time: currentWindowStartTime.Add(4 * period).Add(time.Second), + }, + }, + }, + { + // if too much time passes by, a current window might not be a + // valid previous window. + name: "current window needs to be reset", + limit: 1, + calls: []call{ + { + time: currentWindowStartTime, + }, + { + time: currentWindowStartTime.Add(10 * period), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + throttler := NewSlidingWindowThrottler(period, tt.limit) + throttler.windows[throttler.current].start = currentWindowStartTime + throttler.windows[1-throttler.current].start = previousWindowStartTime + + for _, call := range tt.calls { + throttler.clock.Set(call.time) + require.Equal(call.throttled, !throttler.Handle(nodeID)) + } + }) + } +} diff --git a/avalanchego/network/p2p/validators.go b/avalanchego/network/p2p/validators.go new file mode 100644 index 00000000..3ece6559 --- /dev/null +++ b/avalanchego/network/p2p/validators.go @@ -0,0 +1,113 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + _ ValidatorSet = (*Validators)(nil) + _ NodeSampler = (*Validators)(nil) +) + +type ValidatorSet interface { + Has(ctx context.Context, nodeID ids.NodeID) bool // TODO return error +} + +func NewValidators( + peers *Peers, + log logging.Logger, + subnetID ids.ID, + validators validators.State, + maxValidatorSetStaleness time.Duration, +) *Validators { + return &Validators{ + peers: peers, + log: log, + subnetID: subnetID, + validators: validators, + maxValidatorSetStaleness: maxValidatorSetStaleness, + } +} + +// Validators contains a set of nodes that are staking. +type Validators struct { + peers *Peers + log logging.Logger + subnetID ids.ID + validators validators.State + + lock sync.Mutex + validatorIDs set.SampleableSet[ids.NodeID] + lastUpdated time.Time + maxValidatorSetStaleness time.Duration +} + +func (v *Validators) refresh(ctx context.Context) { + if time.Since(v.lastUpdated) < v.maxValidatorSetStaleness { + return + } + + v.validatorIDs.Clear() + + height, err := v.validators.GetCurrentHeight(ctx) + if err != nil { + v.log.Warn("failed to get current height", zap.Error(err)) + return + } + validatorSet, err := v.validators.GetValidatorSet(ctx, height, v.subnetID) + if err != nil { + v.log.Warn("failed to get validator set", zap.Error(err)) + return + } + + for nodeID := range validatorSet { + v.validatorIDs.Add(nodeID) + } + + v.lastUpdated = time.Now() +} + +// Sample returns a random sample of connected validators +func (v *Validators) Sample(ctx context.Context, limit int) []ids.NodeID { + v.lock.Lock() + defer v.lock.Unlock() + + v.refresh(ctx) + + // TODO: Account for peer connectivity during the sampling of validators + // rather than filtering sampled validators. + validatorIDs := v.validatorIDs.Sample(limit) + sampled := validatorIDs[:0] + + for _, validatorID := range validatorIDs { + if !v.peers.has(validatorID) { + continue + } + + sampled = append(sampled, validatorID) + } + + return sampled +} + +// Has returns if nodeID is a connected validator +func (v *Validators) Has(ctx context.Context, nodeID ids.NodeID) bool { + v.lock.Lock() + defer v.lock.Unlock() + + v.refresh(ctx) + + return v.peers.has(nodeID) && v.validatorIDs.Contains(nodeID) +} diff --git a/avalanchego/network/p2p/validators_test.go b/avalanchego/network/p2p/validators_test.go new file mode 100644 index 00000000..7239ac01 --- /dev/null +++ b/avalanchego/network/p2p/validators_test.go @@ -0,0 +1,196 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func TestValidatorsSample(t *testing.T) { + errFoobar := errors.New("foobar") + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + type call struct { + limit int + + time time.Time + + height uint64 + getCurrentHeightErr error + + validators []ids.NodeID + getValidatorSetErr error + + // superset of possible values in the result + expected []ids.NodeID + } + + tests := []struct { + name string + maxStaleness time.Duration + calls []call + }{ + { + // if we don't have as many validators as requested by the caller, + // we should return all the validators we have + name: "less than limit validators", + maxStaleness: time.Hour, + calls: []call{ + { + time: time.Time{}.Add(time.Second), + limit: 2, + height: 1, + validators: []ids.NodeID{nodeID1}, + expected: []ids.NodeID{nodeID1}, + }, + }, + }, + { + // if we have as many validators as requested by the caller, we + // should return all the validators we have + name: "equal to limit validators", + maxStaleness: time.Hour, + calls: []call{ + { + time: time.Time{}.Add(time.Second), + limit: 1, + height: 1, + validators: []ids.NodeID{nodeID1}, + expected: []ids.NodeID{nodeID1}, + }, + }, + }, + { + // if we have less validators than requested by the caller, we + // should return a subset of the validators that we have + name: "less than limit validators", + maxStaleness: time.Hour, + calls: []call{ + { + time: time.Time{}.Add(time.Second), + limit: 1, + height: 1, + validators: []ids.NodeID{nodeID1, nodeID2}, + expected: []ids.NodeID{nodeID1, nodeID2}, + }, + }, + }, + { + name: "within max staleness threshold", + maxStaleness: time.Hour, + calls: []call{ + { + time: time.Time{}.Add(time.Second), + limit: 1, + height: 1, + validators: []ids.NodeID{nodeID1}, + expected: []ids.NodeID{nodeID1}, + }, + }, + }, + { + name: "beyond max staleness threshold", + maxStaleness: time.Hour, + calls: []call{ + { + limit: 1, + time: time.Time{}.Add(time.Hour), + height: 1, + validators: []ids.NodeID{nodeID1}, + expected: []ids.NodeID{nodeID1}, + }, + }, + }, + { + name: "fail to get current height", + maxStaleness: time.Second, + calls: []call{ + { + limit: 1, + time: time.Time{}.Add(time.Hour), + getCurrentHeightErr: errFoobar, + expected: []ids.NodeID{}, + }, + }, + }, + { + name: "second get validator set call fails", + maxStaleness: time.Minute, + calls: []call{ + { + limit: 1, + time: time.Time{}.Add(time.Second), + height: 1, + validators: []ids.NodeID{nodeID1}, + expected: []ids.NodeID{nodeID1}, + }, + { + limit: 1, + time: time.Time{}.Add(time.Hour), + height: 1, + getValidatorSetErr: errFoobar, + expected: []ids.NodeID{}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + subnetID := ids.GenerateTestID() + ctrl := gomock.NewController(t) + mockValidators := validators.NewMockState(ctrl) + + calls := make([]any, 0) + for _, call := range tt.calls { + calls = append(calls, mockValidators.EXPECT(). + GetCurrentHeight(gomock.Any()).Return(call.height, call.getCurrentHeightErr)) + + if call.getCurrentHeightErr != nil { + continue + } + + validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, 0) + for _, validator := range call.validators { + validatorSet[validator] = nil + } + + calls = append(calls, + mockValidators.EXPECT(). + GetValidatorSet(gomock.Any(), gomock.Any(), subnetID). + Return(validatorSet, call.getValidatorSetErr)) + } + gomock.InOrder(calls...) + + network, err := NewNetwork(logging.NoLog{}, &common.FakeSender{}, prometheus.NewRegistry(), "") + require.NoError(err) + + ctx := context.Background() + require.NoError(network.Connected(ctx, nodeID1, nil)) + require.NoError(network.Connected(ctx, nodeID2, nil)) + + v := NewValidators(network.Peers, network.log, subnetID, mockValidators, tt.maxStaleness) + for _, call := range tt.calls { + v.lastUpdated = call.time + sampled := v.Sample(ctx, call.limit) + require.LessOrEqual(len(sampled), call.limit) + require.Subset(call.expected, sampled) + } + }) + } +} diff --git a/avalanchego/network/peer/config.go b/avalanchego/network/peer/config.go index 2ad13a19..3eb83192 100644 --- a/avalanchego/network/peer/config.go +++ b/avalanchego/network/peer/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -34,12 +34,16 @@ type Config struct { Router router.InboundHandler VersionCompatibility version.Compatibility MySubnets set.Set[ids.ID] - Beacons validators.Set + Beacons validators.Manager + Validators validators.Manager NetworkID uint32 PingFrequency time.Duration PongTimeout time.Duration MaxClockDifference time.Duration + SupportedACPs []uint32 + ObjectedACPs []uint32 + // Unix time of the last message sent and received respectively // Must only be accessed atomically LastSent, LastReceived int64 @@ -50,6 +54,6 @@ type Config struct { // Calculates uptime of peers UptimeCalculator uptime.Calculator - // Signs my IP so I can send my signed IP address in the Version message + // Signs my IP so I can send my signed IP address in the Handshake message IPSigner *IPSigner } diff --git a/avalanchego/network/peer/example_test.go b/avalanchego/network/peer/example_test.go index 75eaecee..d6c8ba20 100644 --- a/avalanchego/network/peer/example_test.go +++ b/avalanchego/network/peer/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/avalanchego/network/peer/gossip_tracker.go b/avalanchego/network/peer/gossip_tracker.go deleted file mode 100644 index 5676b073..00000000 --- a/avalanchego/network/peer/gossip_tracker.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -// GossipTracker tracks the validators that we're currently aware of, as well as -// the validators we've told each peers about. This data is stored in a bitset -// to optimize space, where only N (num validators) bits will be used per peer. -// -// This is done by recording some state information of both what validators this -// node is aware of, and what validators we've told each peer about. -// As an example, say we track three peers and three validators (MSB first): -// -// trackedPeers: { -// p1: [1, 1, 1] // we have already told [p1] about all validators -// p2: [0, 1, 1] // [p2] doesn't know about [v3] -// p3: [0, 0, 1] // [p3] knows only about [v3] -// } -// -// GetUnknown computes the validators we haven't sent to a given peer. Ex: -// -// GetUnknown(p1) - [0, 0, 0] -// GetUnknown(p2) - [1, 0, 0] -// GetUnknown(p3) - [1, 1, 0] -// -// Using the gossipTracker, we can quickly compute the validators each peer -// doesn't know about using GetUnknown so that in subsequent PeerList gossip -// messages we only send information that this peer (most likely) doesn't -// already know about. The only case where we'll send a redundant set of -// bytes is if another remote peer gossips to the same peer we're trying to -// gossip to first. -type GossipTracker interface { - // Tracked returns if a peer is being tracked - // Returns: - // bool: False if [peerID] is not tracked. True otherwise. - Tracked(peerID ids.NodeID) bool - - // StartTrackingPeer starts tracking a peer - // Returns: - // bool: False if [peerID] was already tracked. True otherwise. - StartTrackingPeer(peerID ids.NodeID) bool - // StopTrackingPeer stops tracking a given peer - // Returns: - // bool: False if [peerID] was not tracked. True otherwise. - StopTrackingPeer(peerID ids.NodeID) bool - - // AddValidator adds a validator that can be gossiped about - // bool: False if a validator with the same node ID or txID as [validator] - // is present. True otherwise. - AddValidator(validator ValidatorID) bool - // GetNodeID maps a txID into a nodeIDs - // nodeID: The nodeID that was registered by [txID] - // bool: False if [validator] was not present. True otherwise. - GetNodeID(txID ids.ID) (ids.NodeID, bool) - // RemoveValidator removes a validator that can be gossiped about - // bool: False if [validator] was already not present. True otherwise. - RemoveValidator(validatorID ids.NodeID) bool - // ResetValidator resets known gossip status of [validatorID] to unknown - // for all peers - // bool: False if [validator] was not present. True otherwise. - ResetValidator(validatorID ids.NodeID) bool - - // AddKnown adds [knownTxIDs] to the txIDs known by [peerID] and filters - // [txIDs] for non-validators. - // Returns: - // txIDs: The txIDs in [txIDs] that are currently validators. - // bool: False if [peerID] is not tracked. True otherwise. - AddKnown( - peerID ids.NodeID, - knownTxIDs []ids.ID, - txIDs []ids.ID, - ) ([]ids.ID, bool) - // GetUnknown gets the peers that we haven't sent to this peer - // Returns: - // []ValidatorID: a slice of ValidatorIDs that [peerID] doesn't know about. - // bool: False if [peerID] is not tracked. True otherwise. - GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) -} - -type gossipTracker struct { - lock sync.RWMutex - // a mapping of txIDs => the validator added to the validiator set by that - // tx. - txIDsToNodeIDs map[ids.ID]ids.NodeID - // a mapping of validators => the index they occupy in the bitsets - nodeIDsToIndices map[ids.NodeID]int - // each validator in the index it occupies in the bitset - validatorIDs []ValidatorID - // a mapping of each peer => the validators they know about - trackedPeers map[ids.NodeID]set.Bits - - metrics gossipTrackerMetrics -} - -// NewGossipTracker returns an instance of gossipTracker -func NewGossipTracker( - registerer prometheus.Registerer, - namespace string, -) (GossipTracker, error) { - m, err := newGossipTrackerMetrics(registerer, fmt.Sprintf("%s_gossip_tracker", namespace)) - if err != nil { - return nil, err - } - - return &gossipTracker{ - txIDsToNodeIDs: make(map[ids.ID]ids.NodeID), - nodeIDsToIndices: make(map[ids.NodeID]int), - trackedPeers: make(map[ids.NodeID]set.Bits), - metrics: m, - }, nil -} - -func (g *gossipTracker) Tracked(peerID ids.NodeID) bool { - g.lock.RLock() - defer g.lock.RUnlock() - - _, ok := g.trackedPeers[peerID] - return ok -} - -func (g *gossipTracker) StartTrackingPeer(peerID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // don't track the peer if it's already being tracked - if _, ok := g.trackedPeers[peerID]; ok { - return false - } - - // start tracking the peer. Initialize their bitset to zero since we - // haven't sent them anything yet. - g.trackedPeers[peerID] = set.NewBits() - - // emit metrics - g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) - - return true -} - -func (g *gossipTracker) StopTrackingPeer(peerID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only stop tracking peers that are actually being tracked - if _, ok := g.trackedPeers[peerID]; !ok { - return false - } - - // stop tracking the peer by removing them - delete(g.trackedPeers, peerID) - g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) - - return true -} - -func (g *gossipTracker) AddValidator(validator ValidatorID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only add validators that are not already present - if _, ok := g.txIDsToNodeIDs[validator.TxID]; ok { - return false - } - if _, ok := g.nodeIDsToIndices[validator.NodeID]; ok { - return false - } - - // add the validator to the MSB of the bitset. - msb := len(g.validatorIDs) - g.txIDsToNodeIDs[validator.TxID] = validator.NodeID - g.nodeIDsToIndices[validator.NodeID] = msb - g.validatorIDs = append(g.validatorIDs, validator) - - // emit metrics - g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) - - return true -} - -func (g *gossipTracker) GetNodeID(txID ids.ID) (ids.NodeID, bool) { - g.lock.RLock() - defer g.lock.RUnlock() - - nodeID, ok := g.txIDsToNodeIDs[txID] - return nodeID, ok -} - -func (g *gossipTracker) RemoveValidator(validatorID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only remove validators that are already present - indexToRemove, ok := g.nodeIDsToIndices[validatorID] - if !ok { - return false - } - validatorToRemove := g.validatorIDs[indexToRemove] - - // swap the validator-to-be-removed with the validator in the last index - // if the element we're swapping with is ourselves, we can skip this swap - // since we only need to delete instead - lastIndex := len(g.validatorIDs) - 1 - if indexToRemove != lastIndex { - lastValidator := g.validatorIDs[lastIndex] - - g.nodeIDsToIndices[lastValidator.NodeID] = indexToRemove - g.validatorIDs[indexToRemove] = lastValidator - } - - delete(g.txIDsToNodeIDs, validatorToRemove.TxID) - delete(g.nodeIDsToIndices, validatorID) - g.validatorIDs = g.validatorIDs[:lastIndex] - - // Invariant: We must remove the validator from everyone else's validator - // bitsets to make sure that each validator occupies the same position in - // each bitset. - for _, knownPeers := range g.trackedPeers { - // swap the element to be removed with the msb - if indexToRemove != lastIndex { - if knownPeers.Contains(lastIndex) { - knownPeers.Add(indexToRemove) - } else { - knownPeers.Remove(indexToRemove) - } - } - knownPeers.Remove(lastIndex) - } - - // emit metrics - g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) - - return true -} - -func (g *gossipTracker) ResetValidator(validatorID ids.NodeID) bool { - g.lock.Lock() - defer g.lock.Unlock() - - // only reset validators that exist - indexToReset, ok := g.nodeIDsToIndices[validatorID] - if !ok { - return false - } - - for _, knownPeers := range g.trackedPeers { - knownPeers.Remove(indexToReset) - } - - return true -} - -// AddKnown invariants: -// -// 1. [peerID] SHOULD only be a nodeID that has been tracked with -// StartTrackingPeer(). -func (g *gossipTracker) AddKnown( - peerID ids.NodeID, - knownTxIDs []ids.ID, - txIDs []ids.ID, -) ([]ids.ID, bool) { - g.lock.Lock() - defer g.lock.Unlock() - - knownPeers, ok := g.trackedPeers[peerID] - if !ok { - return nil, false - } - for _, txID := range knownTxIDs { - nodeID, ok := g.txIDsToNodeIDs[txID] - if !ok { - // We don't know about this txID, this can happen due to differences - // between our current validator set and the peer's current - // validator set. - continue - } - - // Because we fetched the nodeID from [g.txIDsToNodeIDs], we are - // guaranteed that the index is populated. - index := g.nodeIDsToIndices[nodeID] - knownPeers.Add(index) - } - - validatorTxIDs := make([]ids.ID, 0, len(txIDs)) - for _, txID := range txIDs { - if _, ok := g.txIDsToNodeIDs[txID]; ok { - validatorTxIDs = append(validatorTxIDs, txID) - } - } - return validatorTxIDs, true -} - -func (g *gossipTracker) GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) { - g.lock.RLock() - defer g.lock.RUnlock() - - // return false if this peer isn't tracked - knownPeers, ok := g.trackedPeers[peerID] - if !ok { - return nil, false - } - - // Calculate the unknown information we need to send to this peer. We do - // this by computing the difference between the validators we know about - // and the validators we know we've sent to [peerID]. - result := make([]ValidatorID, 0, len(g.validatorIDs)) - for i, validatorID := range g.validatorIDs { - if !knownPeers.Contains(i) { - result = append(result, validatorID) - } - } - - return result, true -} diff --git a/avalanchego/network/peer/gossip_tracker_callback.go b/avalanchego/network/peer/gossip_tracker_callback.go deleted file mode 100644 index 28514ac1..00000000 --- a/avalanchego/network/peer/gossip_tracker_callback.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/logging" -) - -var _ validators.SetCallbackListener = (*GossipTrackerCallback)(nil) - -// GossipTrackerCallback synchronizes GossipTracker's validator state with the -// validator set it's registered to. -type GossipTrackerCallback struct { - Log logging.Logger - GossipTracker GossipTracker -} - -// OnValidatorAdded adds [validatorID] to the set of validators that can be -// gossiped about -func (g *GossipTrackerCallback) OnValidatorAdded( - nodeID ids.NodeID, - _ *bls.PublicKey, - txID ids.ID, - _ uint64, -) { - vdr := ValidatorID{ - NodeID: nodeID, - TxID: txID, - } - if !g.GossipTracker.AddValidator(vdr) { - g.Log.Error("failed to add a validator", - zap.Stringer("nodeID", nodeID), - zap.Stringer("txID", txID), - ) - } -} - -// OnValidatorRemoved removes [validatorID] from the set of validators that can -// be gossiped about. -func (g *GossipTrackerCallback) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { - if !g.GossipTracker.RemoveValidator(nodeID) { - g.Log.Error("failed to remove a validator", - zap.Stringer("nodeID", nodeID), - ) - } -} - -// OnValidatorWeightChanged does nothing because PeerList gossip doesn't care -// about validator weights. -func (*GossipTrackerCallback) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} diff --git a/avalanchego/network/peer/gossip_tracker_metrics.go b/avalanchego/network/peer/gossip_tracker_metrics.go deleted file mode 100644 index be167ebf..00000000 --- a/avalanchego/network/peer/gossip_tracker_metrics.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -type gossipTrackerMetrics struct { - trackedPeersSize prometheus.Gauge - validatorsSize prometheus.Gauge -} - -func newGossipTrackerMetrics(registerer prometheus.Registerer, namespace string) (gossipTrackerMetrics, error) { - m := gossipTrackerMetrics{ - trackedPeersSize: prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tracked_peers_size", - Help: "amount of peers that are being tracked", - }, - ), - validatorsSize: prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validators_size", - Help: "number of validators this node is tracking", - }, - ), - } - - errs := wrappers.Errs{} - errs.Add( - registerer.Register(m.trackedPeersSize), - registerer.Register(m.validatorsSize), - ) - - return m, errs.Err -} diff --git a/avalanchego/network/peer/gossip_tracker_test.go b/avalanchego/network/peer/gossip_tracker_test.go deleted file mode 100644 index 1bd420c4..00000000 --- a/avalanchego/network/peer/gossip_tracker_test.go +++ /dev/null @@ -1,620 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import ( - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - // peers - p1 = ids.GenerateTestNodeID() - p2 = ids.GenerateTestNodeID() - p3 = ids.GenerateTestNodeID() - - // validators - v1 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } - v2 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } - v3 = ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: ids.GenerateTestID(), - } -) - -func TestGossipTracker_Contains(t *testing.T) { - tests := []struct { - name string - track []ids.NodeID - contains ids.NodeID - expected bool - }{ - { - name: "empty", - track: []ids.NodeID{}, - contains: p1, - expected: false, - }, - { - name: "populated - does not contain", - track: []ids.NodeID{p1, p2}, - contains: p3, - expected: false, - }, - { - name: "populated - contains", - track: []ids.NodeID{p1, p2, p3}, - contains: p3, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, add := range test.track { - require.True(g.StartTrackingPeer(add)) - } - - require.Equal(test.expected, g.Tracked(test.contains)) - }) - } -} - -func TestGossipTracker_StartTrackingPeer(t *testing.T) { - tests := []struct { - name string - toStartTracking []ids.NodeID - expected []bool - }{ - { - // Tracking new peers always works - name: "unique adds", - toStartTracking: []ids.NodeID{p1, p2, p3}, - expected: []bool{true, true, true}, - }, - { - // We shouldn't be able to track a peer more than once - name: "duplicate adds", - toStartTracking: []ids.NodeID{p1, p1, p1}, - expected: []bool{true, false, false}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for i, p := range test.toStartTracking { - require.Equal(test.expected[i], g.StartTrackingPeer(p)) - require.True(g.Tracked(p)) - } - }) - } -} - -func TestGossipTracker_StopTrackingPeer(t *testing.T) { - tests := []struct { - name string - toStartTracking []ids.NodeID - expectedStartTracking []bool - toStopTracking []ids.NodeID - expectedStopTracking []bool - }{ - { - // We should be able to stop tracking that we are tracking - name: "stop tracking tracked peers", - toStartTracking: []ids.NodeID{p1, p2, p3}, - toStopTracking: []ids.NodeID{p1, p2, p3}, - expectedStopTracking: []bool{true, true, true}, - }, - { - // We shouldn't be able to stop tracking peers we've stopped tracking - name: "stop tracking twice", - toStartTracking: []ids.NodeID{p1}, - toStopTracking: []ids.NodeID{p1, p1}, - expectedStopTracking: []bool{true, false}, - }, - { - // We shouldn't be able to stop tracking peers we were never tracking - name: "remove non-existent elements", - toStartTracking: []ids.NodeID{}, - expectedStartTracking: []bool{}, - toStopTracking: []ids.NodeID{p1, p2, p3}, - expectedStopTracking: []bool{false, false, false}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, add := range test.toStartTracking { - require.True(g.StartTrackingPeer(add)) - require.True(g.Tracked(add)) - } - - for i, p := range test.toStopTracking { - require.Equal(test.expectedStopTracking[i], g.StopTrackingPeer(p)) - } - }) - } -} - -func TestGossipTracker_AddValidator(t *testing.T) { - type args struct { - validator ValidatorID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "not present", - validators: []ValidatorID{}, - args: args{validator: v1}, - expected: true, - }, - { - name: "already present txID but with different nodeID", - validators: []ValidatorID{v1}, - args: args{validator: ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: v1.TxID, - }}, - expected: false, - }, - { - name: "already present nodeID but with different txID", - validators: []ValidatorID{v1}, - args: args{validator: ValidatorID{ - NodeID: v1.NodeID, - TxID: ids.GenerateTestID(), - }}, - expected: false, - }, - { - name: "already present validatorID", - validators: []ValidatorID{v1}, - args: args{validator: v1}, - expected: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - require.Equal(test.expected, g.AddValidator(test.args.validator)) - }) - } -} - -func TestGossipTracker_RemoveValidator(t *testing.T) { - type args struct { - id ids.NodeID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "not already present", - validators: []ValidatorID{}, - args: args{id: v1.NodeID}, - expected: false, - }, - { - name: "already present", - validators: []ValidatorID{v1}, - args: args{id: v1.NodeID}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - require.Equal(test.expected, g.RemoveValidator(test.args.id)) - }) - } -} - -func TestGossipTracker_ResetValidator(t *testing.T) { - type args struct { - id ids.NodeID - } - - tests := []struct { - name string - validators []ValidatorID - args args - expected bool - }{ - { - name: "non-existent validator", - validators: []ValidatorID{}, - args: args{id: v1.NodeID}, - expected: false, - }, - { - name: "existing validator", - validators: []ValidatorID{v1}, - args: args{id: v1.NodeID}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - require.True(g.StartTrackingPeer(p1)) - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - g.AddKnown(p1, []ids.ID{v.TxID}, nil) - - unknown, ok := g.GetUnknown(p1) - require.True(ok) - require.NotContains(unknown, v) - } - - require.Equal(test.expected, g.ResetValidator(test.args.id)) - - for _, v := range test.validators { - unknown, ok := g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v) - } - }) - } -} - -func TestGossipTracker_AddKnown(t *testing.T) { - type args struct { - peerID ids.NodeID - txIDs []ids.ID - } - - tests := []struct { - name string - trackedPeers []ids.NodeID - validators []ValidatorID - args args - expectedTxIDs []ids.ID - expectedOk bool - }{ - { - // We should not be able to update an untracked peer - name: "untracked peer - empty", - trackedPeers: []ids.NodeID{}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We should not be able to update an untracked peer - name: "untracked peer - populated", - trackedPeers: []ids.NodeID{p2, p3}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We shouldn't be able to look up a peer that isn't tracked - name: "untracked peer - unknown validator", - trackedPeers: []ids.NodeID{}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: nil, - expectedOk: false, - }, - { - // We shouldn't fail on a validator that's not registered - name: "tracked peer - unknown validator", - trackedPeers: []ids.NodeID{p1}, - validators: []ValidatorID{}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: []ids.ID{}, - expectedOk: true, - }, - { - // We should be able to update a tracked validator - name: "update tracked validator", - trackedPeers: []ids.NodeID{p1, p2, p3}, - validators: []ValidatorID{v1}, - args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, - expectedTxIDs: []ids.ID{v1.TxID}, - expectedOk: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - for _, p := range test.trackedPeers { - require.True(g.StartTrackingPeer(p)) - require.True(g.Tracked(p)) - } - - for _, v := range test.validators { - require.True(g.AddValidator(v)) - } - - txIDs, ok := g.AddKnown(test.args.peerID, test.args.txIDs, test.args.txIDs) - require.Equal(test.expectedOk, ok) - require.Equal(test.expectedTxIDs, txIDs) - }) - } -} - -func TestGossipTracker_GetUnknown(t *testing.T) { - tests := []struct { - name string - peerID ids.NodeID - peersToTrack []ids.NodeID - validators []ValidatorID - expectedUnknown []ValidatorID - expectedOk bool - }{ - { - name: "non tracked peer", - peerID: p1, - validators: []ValidatorID{v2}, - peersToTrack: []ids.NodeID{}, - expectedUnknown: nil, - expectedOk: false, - }, - { - name: "only validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1}, - validators: []ValidatorID{v2}, - expectedUnknown: []ValidatorID{v2}, - expectedOk: true, - }, - { - name: "only non-validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1, p2}, - validators: []ValidatorID{}, - expectedUnknown: []ValidatorID{}, - expectedOk: true, - }, - { - name: "validators and non-validators", - peerID: p1, - peersToTrack: []ids.NodeID{p1, p3}, - validators: []ValidatorID{v2}, - expectedUnknown: []ValidatorID{v2}, - expectedOk: true, - }, - { - name: "same as limit", - peerID: p1, - peersToTrack: []ids.NodeID{p1}, - validators: []ValidatorID{v2, v3}, - expectedUnknown: []ValidatorID{v2, v3}, - expectedOk: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - // add our validators - for _, validator := range test.validators { - require.True(g.AddValidator(validator)) - } - - // start tracking our peers - for _, nonValidator := range test.peersToTrack { - require.True(g.StartTrackingPeer(nonValidator)) - require.True(g.Tracked(nonValidator)) - } - - // get the unknown peers for this peer - result, ok := g.GetUnknown(test.peerID) - require.Equal(test.expectedOk, ok) - require.Len(result, len(test.expectedUnknown)) - for _, v := range test.expectedUnknown { - require.Contains(result, v) - } - }) - } -} - -func TestGossipTracker_E2E(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - // [v1, v2, v3] are validators - require.True(g.AddValidator(v1)) - require.True(g.AddValidator(v2)) - - // we should get an empty unknown since we're not tracking anything - unknown, ok := g.GetUnknown(p1) - require.False(ok) - require.Nil(unknown) - - // we should get a unknown of [v1, v2] since v1 and v2 are registered - require.True(g.StartTrackingPeer(p1)) - require.True(g.Tracked(p1)) - - // check p1's unknown - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // Check p2's unknown. We should get nothing since we're not tracking it - // yet. - unknown, ok = g.GetUnknown(p2) - require.False(ok) - require.Nil(unknown) - - // Start tracking p2 - require.True(g.StartTrackingPeer(p2)) - - // check p2's unknown - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // p1 now knows about v1, but not v2, so it should see [v2] in its unknown - // p2 still knows nothing, so it should see both - txIDs, ok := g.AddKnown(p1, []ids.ID{v1.TxID}, []ids.ID{v1.TxID}) - require.True(ok) - require.Equal([]ids.ID{v1.TxID}, txIDs) - - // p1 should have an unknown of [v2], since it knows v1 - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Len(unknown, 1) - - // p2 should have a unknown of [v1, v2], since it knows nothing - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Len(unknown, 2) - - // Add v3 - require.True(g.AddValidator(v3)) - - // track p3, who knows of v1, v2, and v3 - // p1 and p2 still don't know of v3 - require.True(g.StartTrackingPeer(p3)) - - txIDs, ok = g.AddKnown(p3, []ids.ID{v1.TxID, v2.TxID, v3.TxID}, []ids.ID{v1.TxID, v2.TxID, v3.TxID}) - require.True(ok) - require.Equal([]ids.ID{v1.TxID, v2.TxID, v3.TxID}, txIDs) - - // p1 doesn't know about [v2, v3] - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 2) - - // p2 doesn't know about [v1, v2, v3] - unknown, ok = g.GetUnknown(p2) - require.True(ok) - require.Contains(unknown, v1) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 3) - - // p3 knows about everyone - unknown, ok = g.GetUnknown(p3) - require.True(ok) - require.Empty(unknown) - - // stop tracking p2 - require.True(g.StopTrackingPeer(p2)) - unknown, ok = g.GetUnknown(p2) - require.False(ok) - require.Nil(unknown) - - // p1 doesn't know about [v2, v3] because v2 is still registered as - // a validator - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v2) - require.Contains(unknown, v3) - require.Len(unknown, 2) - - // Remove p2 from the validator set - require.True(g.RemoveValidator(v2.NodeID)) - - // p1 doesn't know about [v3] since v2 left the validator set - unknown, ok = g.GetUnknown(p1) - require.True(ok) - require.Contains(unknown, v3) - require.Len(unknown, 1) - - // p3 knows about everyone since it learned about v1 and v3 earlier. - unknown, ok = g.GetUnknown(p3) - require.Empty(unknown) - require.True(ok) -} - -func TestGossipTracker_Regression_IncorrectTxIDDeletion(t *testing.T) { - require := require.New(t) - - g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") - require.NoError(err) - - require.True(g.AddValidator(v1)) - require.True(g.AddValidator(v2)) - - require.True(g.RemoveValidator(v1.NodeID)) - - require.False(g.AddValidator(ValidatorID{ - NodeID: ids.GenerateTestNodeID(), - TxID: v2.TxID, - })) -} diff --git a/avalanchego/network/peer/info.go b/avalanchego/network/peer/info.go index 45f7a3cd..00ccaec7 100644 --- a/avalanchego/network/peer/info.go +++ b/avalanchego/network/peer/info.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" ) type Info struct { @@ -19,5 +20,7 @@ type Info struct { LastReceived time.Time `json:"lastReceived"` ObservedUptime json.Uint32 `json:"observedUptime"` ObservedSubnetUptimes map[ids.ID]json.Uint32 `json:"observedSubnetUptimes"` - TrackedSubnets []ids.ID `json:"trackedSubnets"` + TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` + SupportedACPs set.Set[uint32] `json:"supportedACPs"` + ObjectedACPs set.Set[uint32] `json:"objectedACPs"` } diff --git a/avalanchego/network/peer/ip.go b/avalanchego/network/peer/ip.go index 720a1cd8..a873f166 100644 --- a/avalanchego/network/peer/ip.go +++ b/avalanchego/network/peer/ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -6,13 +6,22 @@ package peer import ( "crypto" "crypto/rand" - "crypto/x509" + "errors" + "fmt" + "time" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/wrappers" ) +var ( + errTimestampTooFarInFuture = errors.New("timestamp too far in the future") + errInvalidTLSSignature = errors.New("invalid TLS signature") +) + // UnsignedIP is used for a validator to claim an IP. The [Timestamp] is used to // ensure that the most updated IP claim is tracked by peers for a given // validator. @@ -22,21 +31,25 @@ type UnsignedIP struct { } // Sign this IP with the provided signer and return the signed IP. -func (ip *UnsignedIP) Sign(signer crypto.Signer) (*SignedIP, error) { - sig, err := signer.Sign( +func (ip *UnsignedIP) Sign(tlsSigner crypto.Signer, blsSigner *bls.SecretKey) (*SignedIP, error) { + ipBytes := ip.bytes() + tlsSignature, err := tlsSigner.Sign( rand.Reader, - hashing.ComputeHash256(ip.bytes()), + hashing.ComputeHash256(ipBytes), crypto.SHA256, ) + blsSignature := bls.SignProofOfPossession(blsSigner, ipBytes) return &SignedIP{ - UnsignedIP: *ip, - Signature: sig, + UnsignedIP: *ip, + TLSSignature: tlsSignature, + BLSSignature: blsSignature, + BLSSignatureBytes: bls.SignatureToBytes(blsSignature), }, err } func (ip *UnsignedIP) bytes() []byte { p := wrappers.Packer{ - Bytes: make([]byte, wrappers.IPLen+wrappers.LongLen), + Bytes: make([]byte, ips.IPPortLen+wrappers.LongLen), } ips.PackIP(&p, ip.IPPort) p.PackLong(ip.Timestamp) @@ -46,13 +59,29 @@ func (ip *UnsignedIP) bytes() []byte { // SignedIP is a wrapper of an UnsignedIP with the signature from a signer. type SignedIP struct { UnsignedIP - Signature []byte + TLSSignature []byte + BLSSignature *bls.Signature + BLSSignatureBytes []byte } -func (ip *SignedIP) Verify(cert *x509.Certificate) error { - return cert.CheckSignature( - cert.SignatureAlgorithm, +// Returns nil if: +// * [ip.Timestamp] is not after [maxTimestamp]. +// * [ip.TLSSignature] is a valid signature over [ip.UnsignedIP] from [cert]. +func (ip *SignedIP) Verify( + cert *staking.Certificate, + maxTimestamp time.Time, +) error { + maxUnixTimestamp := uint64(maxTimestamp.Unix()) + if ip.Timestamp > maxUnixTimestamp { + return fmt.Errorf("%w: timestamp %d > maxTimestamp %d", errTimestampTooFarInFuture, ip.Timestamp, maxUnixTimestamp) + } + + if err := staking.CheckSignature( + cert, ip.UnsignedIP.bytes(), - ip.Signature, - ) + ip.TLSSignature, + ); err != nil { + return fmt.Errorf("%w: %w", errInvalidTLSSignature, err) + } + return nil } diff --git a/avalanchego/network/peer/ip_signer.go b/avalanchego/network/peer/ip_signer.go index b524d346..1c38d4e6 100644 --- a/avalanchego/network/peer/ip_signer.go +++ b/avalanchego/network/peer/ip_signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -7,15 +7,17 @@ import ( "crypto" "sync" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) // IPSigner will return a signedIP for the current value of our dynamic IP. type IPSigner struct { - ip ips.DynamicIPPort - clock mockable.Clock - signer crypto.Signer + ip ips.DynamicIPPort + clock mockable.Clock + tlsSigner crypto.Signer + blsSigner *bls.SecretKey // Must be held while accessing [signedIP] signedIPLock sync.RWMutex @@ -26,11 +28,13 @@ type IPSigner struct { func NewIPSigner( ip ips.DynamicIPPort, - signer crypto.Signer, + tlsSigner crypto.Signer, + blsSigner *bls.SecretKey, ) *IPSigner { return &IPSigner{ - ip: ip, - signer: signer, + ip: ip, + tlsSigner: tlsSigner, + blsSigner: blsSigner, } } @@ -67,7 +71,7 @@ func (s *IPSigner) GetSignedIP() (*SignedIP, error) { IPPort: ip, Timestamp: s.clock.Unix(), } - signedIP, err := unsignedIP.Sign(s.signer) + signedIP, err := unsignedIP.Sign(s.tlsSigner, s.blsSigner) if err != nil { return nil, err } diff --git a/avalanchego/network/peer/ip_signer_test.go b/avalanchego/network/peer/ip_signer_test.go index 1633c7e6..315becd8 100644 --- a/avalanchego/network/peer/ip_signer_test.go +++ b/avalanchego/network/peer/ip_signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -26,30 +27,32 @@ func TestIPSigner(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - key := tlsCert.PrivateKey.(crypto.Signer) + tlsKey := tlsCert.PrivateKey.(crypto.Signer) + blsKey, err := bls.NewSecretKey() + require.NoError(err) - s := NewIPSigner(dynIP, key) + s := NewIPSigner(dynIP, tlsKey, blsKey) s.clock.Set(time.Unix(10, 0)) signedIP1, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP1.IPPort) - require.EqualValues(10, signedIP1.Timestamp) + require.Equal(dynIP.IPPort(), signedIP1.IPPort) + require.Equal(uint64(10), signedIP1.Timestamp) s.clock.Set(time.Unix(11, 0)) signedIP2, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP2.IPPort) - require.EqualValues(10, signedIP2.Timestamp) - require.EqualValues(signedIP1.Signature, signedIP2.Signature) + require.Equal(dynIP.IPPort(), signedIP2.IPPort) + require.Equal(uint64(10), signedIP2.Timestamp) + require.Equal(signedIP1.TLSSignature, signedIP2.TLSSignature) dynIP.SetIP(net.IPv4(1, 2, 3, 4)) signedIP3, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP3.IPPort) - require.EqualValues(11, signedIP3.Timestamp) - require.NotEqualValues(signedIP2.Signature, signedIP3.Signature) + require.Equal(dynIP.IPPort(), signedIP3.IPPort) + require.Equal(uint64(11), signedIP3.Timestamp) + require.NotEqual(signedIP2.TLSSignature, signedIP3.TLSSignature) } diff --git a/avalanchego/network/peer/ip_test.go b/avalanchego/network/peer/ip_test.go new file mode 100644 index 00000000..dd39d5a8 --- /dev/null +++ b/avalanchego/network/peer/ip_test.go @@ -0,0 +1,117 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "crypto" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/ips" +) + +func TestSignedIpVerify(t *testing.T) { + tlsCert1, err := staking.NewTLSCert() + require.NoError(t, err) + cert1 := staking.CertificateFromX509(tlsCert1.Leaf) + require.NoError(t, staking.ValidateCertificate(cert1)) + tlsKey1 := tlsCert1.PrivateKey.(crypto.Signer) + blsKey1, err := bls.NewSecretKey() + require.NoError(t, err) + + tlsCert2, err := staking.NewTLSCert() + require.NoError(t, err) + cert2 := staking.CertificateFromX509(tlsCert2.Leaf) + require.NoError(t, staking.ValidateCertificate(cert2)) + + now := time.Now() + + type test struct { + name string + tlsSigner crypto.Signer + blsSigner *bls.SecretKey + expectedCert *staking.Certificate + ip UnsignedIP + maxTimestamp time.Time + expectedErr error + } + + tests := []test{ + { + name: "valid (before max time)", + tlsSigner: tlsKey1, + blsSigner: blsKey1, + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) - 1, + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "valid (at max time)", + tlsSigner: tlsKey1, + blsSigner: blsKey1, + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "timestamp too far ahead", + tlsSigner: tlsKey1, + blsSigner: blsKey1, + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) + 1, + }, + maxTimestamp: now, + expectedErr: errTimestampTooFarInFuture, + }, + { + name: "sig from wrong cert", + tlsSigner: tlsKey1, + blsSigner: blsKey1, + expectedCert: cert2, // note this isn't cert1 + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: errInvalidTLSSignature, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + signedIP, err := tt.ip.Sign(tt.tlsSigner, tt.blsSigner) + require.NoError(t, err) + + err = signedIP.Verify(tt.expectedCert, tt.maxTimestamp) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} diff --git a/avalanchego/network/peer/message_queue.go b/avalanchego/network/peer/message_queue.go index b9d38996..f2ccef6d 100644 --- a/avalanchego/network/peer/message_queue.go +++ b/avalanchego/network/peer/message_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/avalanchego/network/peer/message_queue_test.go b/avalanchego/network/peer/message_queue_test.go index 7c7f4d3a..4b9b63f4 100644 --- a/avalanchego/network/peer/message_queue_test.go +++ b/avalanchego/network/peer/message_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -20,7 +20,7 @@ func TestMessageQueue(t *testing.T) { expectFail := false q := NewBlockingMessageQueue( - SendFailedFunc(func(msg message.OutboundMessage) { + SendFailedFunc(func(message.OutboundMessage) { require.True(expectFail) }), logging.NoLog{}, @@ -35,7 +35,8 @@ func TestMessageQueue(t *testing.T) { for i := 0; i < numToSend; i++ { testID := ids.GenerateTestID() testID2 := ids.GenerateTestID() - m, err := mc.Pong(uint32(i), + m, err := mc.Ping( + uint32(i), []*p2p.SubnetUptime{ {SubnetId: testID[:], Uptime: uint32(i)}, {SubnetId: testID2[:], Uptime: uint32(i)}, diff --git a/avalanchego/network/peer/metrics.go b/avalanchego/network/peer/metrics.go index ea5290ae..1dcfcdd6 100644 --- a/avalanchego/network/peer/metrics.go +++ b/avalanchego/network/peer/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -7,7 +7,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/message" @@ -81,6 +80,7 @@ func NewMessageMetrics( type Metrics struct { Log logging.Logger + ClockSkew metric.Averager FailedToParse prometheus.Counter MessageMetrics map[message.Op]*MessageMetrics } @@ -107,6 +107,14 @@ func NewMetrics( for _, op := range message.ExternalOps { m.MessageMetrics[op] = NewMessageMetrics(op, namespace, registerer, &errs) } + + m.ClockSkew = metric.NewAveragerWithErrs( + namespace, + "clock_skew", + "clock skew during peer handshake", + registerer, + &errs, + ) return m, errs.Err } diff --git a/avalanchego/network/peer/mock_gossip_tracker.go b/avalanchego/network/peer/mock_gossip_tracker.go deleted file mode 100644 index 9ab60bcc..00000000 --- a/avalanchego/network/peer/mock_gossip_tracker.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/network/peer (interfaces: GossipTracker) - -// Package peer is a generated GoMock package. -package peer - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" -) - -// MockGossipTracker is a mock of GossipTracker interface. -type MockGossipTracker struct { - ctrl *gomock.Controller - recorder *MockGossipTrackerMockRecorder -} - -// MockGossipTrackerMockRecorder is the mock recorder for MockGossipTracker. -type MockGossipTrackerMockRecorder struct { - mock *MockGossipTracker -} - -// NewMockGossipTracker creates a new mock instance. -func NewMockGossipTracker(ctrl *gomock.Controller) *MockGossipTracker { - mock := &MockGossipTracker{ctrl: ctrl} - mock.recorder = &MockGossipTrackerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGossipTracker) EXPECT() *MockGossipTrackerMockRecorder { - return m.recorder -} - -// AddKnown mocks base method. -func (m *MockGossipTracker) AddKnown(arg0 ids.NodeID, arg1, arg2 []ids.ID) ([]ids.ID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddKnown", arg0, arg1, arg2) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// AddKnown indicates an expected call of AddKnown. -func (mr *MockGossipTrackerMockRecorder) AddKnown(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddKnown", reflect.TypeOf((*MockGossipTracker)(nil).AddKnown), arg0, arg1, arg2) -} - -// AddValidator mocks base method. -func (m *MockGossipTracker) AddValidator(arg0 ValidatorID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// AddValidator indicates an expected call of AddValidator. -func (mr *MockGossipTrackerMockRecorder) AddValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddValidator", reflect.TypeOf((*MockGossipTracker)(nil).AddValidator), arg0) -} - -// GetNodeID mocks base method. -func (m *MockGossipTracker) GetNodeID(arg0 ids.ID) (ids.NodeID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNodeID", arg0) - ret0, _ := ret[0].(ids.NodeID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetNodeID indicates an expected call of GetNodeID. -func (mr *MockGossipTrackerMockRecorder) GetNodeID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockGossipTracker)(nil).GetNodeID), arg0) -} - -// GetUnknown mocks base method. -func (m *MockGossipTracker) GetUnknown(arg0 ids.NodeID) ([]ValidatorID, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnknown", arg0) - ret0, _ := ret[0].([]ValidatorID) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetUnknown indicates an expected call of GetUnknown. -func (mr *MockGossipTrackerMockRecorder) GetUnknown(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnknown", reflect.TypeOf((*MockGossipTracker)(nil).GetUnknown), arg0) -} - -// RemoveValidator mocks base method. -func (m *MockGossipTracker) RemoveValidator(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// RemoveValidator indicates an expected call of RemoveValidator. -func (mr *MockGossipTrackerMockRecorder) RemoveValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveValidator", reflect.TypeOf((*MockGossipTracker)(nil).RemoveValidator), arg0) -} - -// ResetValidator mocks base method. -func (m *MockGossipTracker) ResetValidator(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ResetValidator", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// ResetValidator indicates an expected call of ResetValidator. -func (mr *MockGossipTrackerMockRecorder) ResetValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetValidator", reflect.TypeOf((*MockGossipTracker)(nil).ResetValidator), arg0) -} - -// StartTrackingPeer mocks base method. -func (m *MockGossipTracker) StartTrackingPeer(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartTrackingPeer", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// StartTrackingPeer indicates an expected call of StartTrackingPeer. -func (mr *MockGossipTrackerMockRecorder) StartTrackingPeer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StartTrackingPeer), arg0) -} - -// StopTrackingPeer mocks base method. -func (m *MockGossipTracker) StopTrackingPeer(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopTrackingPeer", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// StopTrackingPeer indicates an expected call of StopTrackingPeer. -func (mr *MockGossipTrackerMockRecorder) StopTrackingPeer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StopTrackingPeer), arg0) -} - -// Tracked mocks base method. -func (m *MockGossipTracker) Tracked(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Tracked", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Tracked indicates an expected call of Tracked. -func (mr *MockGossipTrackerMockRecorder) Tracked(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tracked", reflect.TypeOf((*MockGossipTracker)(nil).Tracked), arg0) -} diff --git a/avalanchego/network/peer/msg_length.go b/avalanchego/network/peer/msg_length.go index 2cbcf5cd..62503491 100644 --- a/avalanchego/network/peer/msg_length.go +++ b/avalanchego/network/peer/msg_length.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -17,8 +17,10 @@ var ( errMaxMessageLengthExceeded = errors.New("maximum message length exceeded") ) -// Used to mask the most significant bit to indicate that the message format -// uses protocol buffers. +// Used to mask the most significant bit that was used to indicate that the +// message format uses protocol buffers. +// +// TODO: Once the v1.11 is activated, this mask should be removed. const bitmaskCodec = uint32(1 << 31) // Assumes the specified [msgLen] will never >= 1<<31. @@ -34,16 +36,8 @@ func writeMsgLen(msgLen uint32, maxMsgLen uint32) ([wrappers.IntLen]byte, error) return [wrappers.IntLen]byte{}, fmt.Errorf("%w; the message length %d exceeds the specified limit %d", errMaxMessageLengthExceeded, msgLen, maxMsgLen) } - x := msgLen - - // Mask the most significant bit to denote it's using proto. This bit isn't - // read anymore, because all the messages use proto. However, it is set for - // backwards compatibility. - // TODO: Once the v1.10 is activated, this mask should be removed. - x |= bitmaskCodec - b := [wrappers.IntLen]byte{} - binary.BigEndian.PutUint32(b[:], x) + binary.BigEndian.PutUint32(b[:], msgLen) return b, nil } diff --git a/avalanchego/network/peer/msg_length_test.go b/avalanchego/network/peer/msg_length_test.go index 9d7a3399..97866a7d 100644 --- a/avalanchego/network/peer/msg_length_test.go +++ b/avalanchego/network/peer/msg_length_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -120,7 +120,10 @@ func TestReadMsgLen(t *testing.T) { msgLenBytes, err := writeMsgLen(msgLen, tv.msgLimit) require.NoError(err) - require.Equal(tv.msgLenBytes, msgLenBytes[:]) + + msgLenAfterWrite, err := readMsgLen(msgLenBytes[:], tv.msgLimit) + require.NoError(err) + require.Equal(tv.expectedMsgLen, msgLenAfterWrite) } } diff --git a/avalanchego/network/peer/network.go b/avalanchego/network/peer/network.go index fc136f0b..b8fb0181 100644 --- a/avalanchego/network/peer/network.go +++ b/avalanchego/network/peer/network.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -19,15 +19,9 @@ type Network interface { // connection is no longer desired and should be terminated. AllowConnection(peerID ids.NodeID) bool - // Track allows the peer to notify the network of a potential new peer to - // connect to, given the [ips] of the peers it sent us during the peer - // handshake. - // - // Returns which IPs should not be gossipped to this node again. - Track(peerID ids.NodeID, ips []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) - - // MarkTracked stops sending gossip about [ips] to [peerID]. - MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error + // Track allows the peer to notify the network of potential new peers to + // connect to. + Track(ips []*ips.ClaimedIPPort) error // Disconnected is called when the peer finishes shutting down. It is not // guaranteed that [Connected] was called for the provided peer. However, it @@ -35,6 +29,13 @@ type Network interface { // for a given [Peer] object. Disconnected(peerID ids.NodeID) - // Peers returns peers that [peerID] might not know about. - Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) + // KnownPeers returns the bloom filter of the known peers. + KnownPeers() (bloomFilter []byte, salt []byte) + + // Peers returns peers that are not known. + Peers( + peerID ids.NodeID, + knownPeers *bloom.ReadFilter, + peerSalt []byte, + ) []*ips.ClaimedIPPort } diff --git a/avalanchego/network/peer/peer.go b/avalanchego/network/peer/peer.go index dfe67084..216889d7 100644 --- a/avalanchego/network/peer/peer.go +++ b/avalanchego/network/peer/peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -6,7 +6,6 @@ package peer import ( "bufio" "context" - "crypto/x509" "errors" "io" "math" @@ -20,8 +19,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" @@ -29,6 +31,10 @@ import ( "github.com/ava-labs/avalanchego/version" ) +// maxBloomSaltLen restricts the allowed size of the bloom salt to prevent +// excessively expensive bloom filter contains checks. +const maxBloomSaltLen = 32 + var ( errClosed = errors.New("closed") @@ -43,7 +49,7 @@ type Peer interface { // Cert returns the certificate that the remote peer is using to // authenticate their messages. - Cert() *x509.Certificate + Cert() *staking.Certificate // LastSent returns the last time a message was sent to the peer. LastSent() time.Time @@ -91,6 +97,11 @@ type Peer interface { // sent. StartSendPeerList() + // StartSendGetPeerList attempts to send a GetPeerList message to this peer + // on this peer's gossip routine. It is not guaranteed that a GetPeerList + // will be sent. + StartSendGetPeerList() + // StartClose will begin shutting down the peer. It will not block. StartClose() @@ -112,7 +123,7 @@ type peer struct { // [cert] is this peer's certificate, specifically the leaf of the // certificate chain they provided. - cert *x509.Certificate + cert *staking.Certificate // node ID of this peer. id ids.NodeID @@ -120,27 +131,38 @@ type peer struct { // queue of messages to send to this peer. messageQueue MessageQueue - // ip is the claimed IP the peer gave us in the Version message. + // ip is the claimed IP the peer gave us in the Handshake message. ip *SignedIP // version is the claimed version the peer is running that we received in - // the Version message. + // the Handshake message. version *version.Application - // trackedSubnets is the subset of subnetIDs the peer sent us in the Version + // trackedSubnets is the subset of subnetIDs the peer sent us in the Handshake // message that we are also tracking. trackedSubnets set.Set[ids.ID] + // options of ACPs provided in the Handshake message. + supportedACPs set.Set[uint32] + objectedACPs set.Set[uint32] + + // txIDOfVerifiedBLSKey is the txID that added the BLS key that was most + // recently verified to have signed the IP. + // + // Invariant: Prior to the handshake being completed, this can only be + // accessed by the reader goroutine. After the handshake has been completed, + // this can only be accessed by the message sender goroutine. + txIDOfVerifiedBLSKey ids.ID observedUptimesLock sync.RWMutex // [observedUptimesLock] must be held while accessing [observedUptime] // Subnet ID --> Our uptime for the given subnet as perceived by the peer observedUptimes map[ids.ID]uint32 - // True if this peer has sent us a valid Version message and + // True if this peer has sent us a valid Handshake message and // is running a compatible version. // Only modified on the connection's reader routine. - gotVersion utils.Atomic[bool] + gotHandshake utils.Atomic[bool] // True if the peer: - // * Has sent us a Version message + // * Has sent us a Handshake message // * Has sent us a PeerList message // * Is running a compatible version // Only modified on the connection's reader routine. @@ -167,6 +189,10 @@ type peer struct { // peerListChan signals that we should attempt to send a PeerList to this // peer peerListChan chan struct{} + + // getPeerListChan signals that we should attempt to send a GetPeerList to + // this peer + getPeerListChan chan struct{} } // Start a new peer instance. @@ -176,7 +202,7 @@ type peer struct { func Start( config *Config, conn net.Conn, - cert *x509.Certificate, + cert *staking.Certificate, id ids.NodeID, messageQueue MessageQueue, ) Peer { @@ -194,6 +220,7 @@ func Start( onClosed: make(chan struct{}), observedUptimes: make(map[ids.ID]uint32), peerListChan: make(chan struct{}, 1), + getPeerListChan: make(chan struct{}, 1), } go p.readMessages() @@ -207,7 +234,7 @@ func (p *peer) ID() ids.NodeID { return p.id } -func (p *peer) Cert() *x509.Certificate { +func (p *peer) Cert() *staking.Certificate { return p.cert } @@ -246,10 +273,9 @@ func (p *peer) Info() Info { publicIPStr = p.ip.IPPort.String() } - trackedSubnets := p.trackedSubnets.List() - uptimes := make(map[ids.ID]json.Uint32, len(trackedSubnets)) + uptimes := make(map[ids.ID]json.Uint32, p.trackedSubnets.Len()) - for _, subnetID := range trackedSubnets { + for subnetID := range p.trackedSubnets { uptime, exist := p.ObservedUptime(subnetID) if !exist { continue @@ -271,7 +297,9 @@ func (p *peer) Info() Info { LastReceived: p.LastReceived(), ObservedUptime: json.Uint32(primaryUptime), ObservedSubnetUptimes: uptimes, - TrackedSubnets: trackedSubnets, + TrackedSubnets: p.trackedSubnets, + SupportedACPs: p.supportedACPs, + ObjectedACPs: p.objectedACPs, } } @@ -306,6 +334,13 @@ func (p *peer) StartSendPeerList() { } } +func (p *peer) StartSendGetPeerList() { + select { + case p.getPeerListChan <- struct{}{}: + default: + } +} + func (p *peer) StartClose() { p.startClosingOnce.Do(func() { if err := p.conn.Close(); err != nil { @@ -487,27 +522,55 @@ func (p *peer) writeMessages() { writer := bufio.NewWriterSize(p.conn, p.Config.WriteBufferSize) - // Make sure that the version is the first message sent + // Make sure that the Handshake is the first message sent mySignedIP, err := p.IPSigner.GetSignedIP() if err != nil { p.Log.Error("failed to get signed IP", + zap.Stringer("nodeID", p.id), zap.Error(err), ) return } + if mySignedIP.Port == 0 { + p.Log.Error("signed IP has invalid port", + zap.Stringer("nodeID", p.id), + zap.Uint16("port", mySignedIP.Port), + ) + return + } + + myVersion := p.VersionCompatibility.Version() + legacyApplication := &version.Application{ + Name: version.GetApplicationPrefix(), + Major: myVersion.Major, + Minor: myVersion.Minor, + Patch: myVersion.Patch, + } + + knownPeersFilter, knownPeersSalt := p.Network.KnownPeers() - msg, err := p.MessageCreator.Version( + msg, err := p.MessageCreator.Handshake( p.NetworkID, p.Clock.Unix(), mySignedIP.IPPort, - p.VersionCompatibility.Version().String(), + legacyApplication.String(), + myVersion.Name, + uint32(myVersion.Major), + uint32(myVersion.Minor), + uint32(myVersion.Patch), mySignedIP.Timestamp, - mySignedIP.Signature, + mySignedIP.TLSSignature, + mySignedIP.BLSSignatureBytes, p.MySubnets.List(), + p.SupportedACPs, + p.ObjectedACPs, + knownPeersFilter, + knownPeersSalt, ) if err != nil { p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.HandshakeOp), + zap.Stringer("nodeID", p.id), zap.Error(err), ) return @@ -594,15 +657,7 @@ func (p *peer) sendNetworkMessages() { for { select { case <-p.peerListChan: - peerIPs, err := p.Config.Network.Peers(p.id) - if err != nil { - p.Log.Error("failed to get peers to gossip", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - return - } - + peerIPs := p.Config.Network.Peers(p.id, bloom.EmptyFilter, nil) if len(peerIPs) == 0 { p.Log.Verbo( "skipping peer gossip as there are no unknown peers", @@ -627,6 +682,22 @@ func (p *peer) sendNetworkMessages() { zap.Stringer("nodeID", p.id), ) } + case <-p.getPeerListChan: + knownPeersFilter, knownPeersSalt := p.Config.Network.KnownPeers() + msg, err := p.Config.MessageCreator.GetPeerList(knownPeersFilter, knownPeersSalt) + if err != nil { + p.Log.Error("failed to create get peer list message", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + continue + } + + if !p.Send(p.onClosingCtx, msg) { + p.Log.Debug("failed to send get peer list", + zap.Stringer("nodeID", p.id), + ) + } case <-sendPingsTicker.C: if !p.Network.AllowConnection(p.id) { p.Log.Debug("disconnecting from peer", @@ -636,22 +707,19 @@ func (p *peer) sendNetworkMessages() { return } - if p.finishedHandshake.Get() { - if err := p.VersionCompatibility.Compatible(p.version); err != nil { - p.Log.Debug("disconnecting from peer", - zap.String("reason", "version not compatible"), - zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", p.version), - zap.Error(err), - ) - return - } + // Only check if we should disconnect after the handshake is + // finished to avoid race conditions and accessing uninitialized + // values. + if p.finishedHandshake.Get() && p.shouldDisconnect() { + return } - pingMessage, err := p.Config.MessageCreator.Ping() + primaryUptime, subnetUptimes := p.getUptimes() + pingMessage, err := p.MessageCreator.Ping(primaryUptime, subnetUptimes) if err != nil { p.Log.Error("failed to create message", zap.Stringer("messageOp", message.PingOp), + zap.Stringer("nodeID", p.id), zap.Error(err), ) return @@ -664,6 +732,68 @@ func (p *peer) sendNetworkMessages() { } } +// shouldDisconnect is called both during receipt of the Handshake message and +// periodically when sending a Ping message (after finishing the handshake!). +// +// It is called during the Handshake to prevent marking a peer as connected and +// then immediately disconnecting from them. +// +// It is called when sending a Ping message to account for validator set +// changes. It's called when sending a Ping rather than in a validator set +// callback to avoid signature verification on the P-chain accept path. +func (p *peer) shouldDisconnect() bool { + if err := p.VersionCompatibility.Compatible(p.version); err != nil { + p.Log.Debug("disconnecting from peer", + zap.String("reason", "version not compatible"), + zap.Stringer("nodeID", p.id), + zap.Stringer("peerVersion", p.version), + zap.Error(err), + ) + return true + } + + // Enforce that all validators that have registered a BLS key are signing + // their IP with it after the activation of Durango. + vdr, ok := p.Validators.GetValidator(constants.PrimaryNetworkID, p.id) + if !ok || vdr.PublicKey == nil || vdr.TxID == p.txIDOfVerifiedBLSKey { + return false + } + + postDurango := p.Clock.Time().After(version.GetDurangoTime(p.Config.NetworkID)) + if postDurango && p.ip.BLSSignature == nil { + p.Log.Debug("disconnecting from peer", + zap.String("reason", "missing BLS signature"), + zap.Stringer("nodeID", p.id), + ) + return true + } + + // If Durango hasn't activated on mainnet yet, we don't require BLS + // signatures to be provided. However, if they are provided, verify that + // they are correct. + if p.ip.BLSSignature == nil { + return false + } + + validSignature := bls.VerifyProofOfPossession( + vdr.PublicKey, + p.ip.BLSSignature, + p.ip.UnsignedIP.bytes(), + ) + if !validSignature { + p.Log.Debug("disconnecting from peer", + zap.String("reason", "invalid BLS signature"), + zap.Stringer("nodeID", p.id), + ) + return true + } + + // Avoid unnecessary signature verifications by only verifing the signature + // once per validation period. + p.txIDOfVerifiedBLSKey = vdr.TxID + return false +} + func (p *peer) handle(msg message.InboundMessage) { switch m := msg.Message().(type) { // Network-related message types case *p2p.Ping: @@ -674,16 +804,16 @@ func (p *peer) handle(msg message.InboundMessage) { p.handlePong(m) msg.OnFinishedHandling() return - case *p2p.Version: - p.handleVersion(m) + case *p2p.Handshake: + p.handleHandshake(m) msg.OnFinishedHandling() return - case *p2p.PeerList: - p.handlePeerList(m) + case *p2p.GetPeerList: + p.handleGetPeerList(m) msg.OnFinishedHandling() return - case *p2p.PeerListAck: - p.handlePeerListAck(m) + case *p2p.PeerList: + p.handlePeerList(m) msg.OnFinishedHandling() return } @@ -702,7 +832,23 @@ func (p *peer) handle(msg message.InboundMessage) { p.Router.HandleInbound(context.Background(), msg) } -func (p *peer) handlePing(*p2p.Ping) { +func (p *peer) handlePing(msg *p2p.Ping) { + p.observeUptimes(msg.Uptime, msg.SubnetUptimes) + + primaryUptime, subnetUptimes := p.getUptimes() + pongMessage, err := p.MessageCreator.Pong(primaryUptime, subnetUptimes) + if err != nil { + p.Log.Error("failed to create message", + zap.Stringer("messageOp", message.PongOp), + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + return + } + p.Send(p.onClosingCtx, pongMessage) +} + +func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { primaryUptime, err := p.UptimeCalculator.CalculateUptimePercent( p.id, constants.PrimaryNetworkID, @@ -736,32 +882,38 @@ func (p *peer) handlePing(*p2p.Ping) { } primaryUptimePercent := uint32(primaryUptime * 100) - msg, err := p.MessageCreator.Pong(primaryUptimePercent, subnetUptimes) - if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.PongOp), - zap.Error(err), - ) - return - } - p.Send(p.onClosingCtx, msg) + return primaryUptimePercent, subnetUptimes } func (p *peer) handlePong(msg *p2p.Pong) { - if msg.Uptime > 100 { - p.Log.Debug("dropping pong message with invalid uptime", + // TODO: Remove once everyone sends uptimes in Ping messages. + p.observeUptimes(msg.Uptime, msg.SubnetUptimes) +} + +func (p *peer) observeUptimes(primaryUptime uint32, subnetUptimes []*p2p.SubnetUptime) { + // TODO: Remove once everyone sends uptimes in Ping messages. + // + // If primaryUptime is 0, the message may not include any uptimes. This may + // happen with old Ping messages or new Pong messages. + if primaryUptime == 0 { + return + } + + if primaryUptime > 100 { + p.Log.Debug("dropping message with invalid uptime", zap.Stringer("nodeID", p.id), - zap.Uint32("uptime", msg.Uptime), + zap.Stringer("subnetID", constants.PrimaryNetworkID), + zap.Uint32("uptime", primaryUptime), ) p.StartClose() return } - p.observeUptime(constants.PrimaryNetworkID, msg.Uptime) + p.observeUptime(constants.PrimaryNetworkID, primaryUptime) - for _, subnetUptime := range msg.SubnetUptimes { + for _, subnetUptime := range subnetUptimes { subnetID, err := ids.ToID(subnetUptime.SubnetId) if err != nil { - p.Log.Debug("dropping pong message with invalid subnetID", + p.Log.Debug("dropping message with invalid subnetID", zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -769,9 +921,18 @@ func (p *peer) handlePong(msg *p2p.Pong) { return } + if !p.MySubnets.Contains(subnetID) { + p.Log.Debug("dropping message with unexpected subnetID", + zap.Stringer("nodeID", p.id), + zap.Stringer("subnetID", subnetID), + ) + p.StartClose() + return + } + uptime := subnetUptime.Uptime if uptime > 100 { - p.Log.Debug("dropping pong message with invalid uptime", + p.Log.Debug("dropping message with invalid uptime", zap.Stringer("nodeID", p.id), zap.Stringer("subnetID", subnetID), zap.Uint32("uptime", uptime), @@ -785,16 +946,18 @@ func (p *peer) handlePong(msg *p2p.Pong) { // Record that the given peer perceives our uptime for the given [subnetID] // to be [uptime]. +// Assumes [uptime] is in the range [0, 100] and [subnetID] is a valid ID of a +// subnet this peer tracks. func (p *peer) observeUptime(subnetID ids.ID, uptime uint32) { p.observedUptimesLock.Lock() p.observedUptimes[subnetID] = uptime // [0, 100] percentage p.observedUptimesLock.Unlock() } -func (p *peer) handleVersion(msg *p2p.Version) { - if p.gotVersion.Get() { +func (p *peer) handleHandshake(msg *p2p.Handshake) { + if p.gotHandshake.Get() { // TODO: this should never happen, should we close the connection here? - p.Log.Verbo("dropping duplicated version message", + p.Log.Verbo("dropping duplicated handshake message", zap.Stringer("nodeID", p.id), ) return @@ -810,86 +973,133 @@ func (p *peer) handleVersion(msg *p2p.Version) { return } - myTime := p.Clock.Unix() - if math.Abs(float64(msg.MyTime)-float64(myTime)) > p.MaxClockDifference.Seconds() { - if p.Beacons.Contains(p.id) { + myTime := p.Clock.Time() + myTimeUnix := uint64(myTime.Unix()) + clockDifference := math.Abs(float64(msg.MyTime) - float64(myTimeUnix)) + + p.Metrics.ClockSkew.Observe(clockDifference) + + if clockDifference > p.MaxClockDifference.Seconds() { + if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { p.Log.Warn("beacon reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } else { p.Log.Debug("peer reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } p.StartClose() return } - peerVersion, err := version.ParseApplication(msg.MyVersion) - if err != nil { - p.Log.Debug("failed to parse peer version", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - p.StartClose() - return + if msg.Client != nil { + p.version = &version.Application{ + Name: msg.Client.Name, + Major: int(msg.Client.Major), + Minor: int(msg.Client.Minor), + Patch: int(msg.Client.Patch), + } + } else { + // Handle legacy version field + peerVersion, err := version.ParseLegacyApplication(msg.MyVersion) + if err != nil { + p.Log.Debug("failed to parse peer version", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + p.StartClose() + return + } + p.version = peerVersion } - p.version = peerVersion - if p.VersionCompatibility.Version().Before(peerVersion) { - if p.Beacons.Contains(p.id) { + if p.VersionCompatibility.Version().Before(p.version) { + if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { p.Log.Info("beacon attempting to connect with newer version. You may want to update your client", zap.Stringer("nodeID", p.id), - zap.Stringer("beaconVersion", peerVersion), + zap.Stringer("beaconVersion", p.version), ) } else { p.Log.Debug("peer attempting to connect with newer version. You may want to update your client", zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", peerVersion), + zap.Stringer("peerVersion", p.version), ) } } - if err := p.VersionCompatibility.Compatible(peerVersion); err != nil { - p.Log.Verbo("peer version not compatible", - zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", peerVersion), - zap.Error(err), - ) - p.StartClose() - return + // handle subnet IDs + for _, subnetIDBytes := range msg.TrackedSubnets { + subnetID, err := ids.ToID(subnetIDBytes) + if err != nil { + p.Log.Debug("failed to parse peer's tracked subnets", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + p.StartClose() + return + } + // add only if we also track this subnet + if p.MySubnets.Contains(subnetID) { + p.trackedSubnets.Add(subnetID) + } + } + + for _, acp := range msg.SupportedAcps { + if constants.CurrentACPs.Contains(acp) { + p.supportedACPs.Add(acp) + } + } + for _, acp := range msg.ObjectedAcps { + if constants.CurrentACPs.Contains(acp) { + p.objectedACPs.Add(acp) + } } - // Note that it is expected that the [versionTime] can be in the past. We - // are just verifying that the claimed signing time isn't too far in the - // future here. - if float64(msg.MyVersionTime)-float64(myTime) > p.MaxClockDifference.Seconds() { - p.Log.Debug("peer attempting to connect with version timestamp too far in the future", + if p.supportedACPs.Overlaps(p.objectedACPs) { + p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Uint64("versionTime", msg.MyVersionTime), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "ACPs"), + zap.Reflect("supportedACPs", p.supportedACPs), + zap.Reflect("objectedACPs", p.objectedACPs), ) p.StartClose() return } - // handle subnet IDs - for _, subnetIDBytes := range msg.TrackedSubnets { - subnetID, err := ids.ToID(subnetIDBytes) + var ( + knownPeers = bloom.EmptyFilter + salt []byte + ) + if msg.KnownPeers != nil { + var err error + knownPeers, err = bloom.Parse(msg.KnownPeers.Filter) if err != nil { - p.Log.Debug("failed to parse peer's tracked subnets", + p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "KnownPeers.Filter"), zap.Error(err), ) p.StartClose() return } - // add only if we also track this subnet - if p.MySubnets.Contains(subnetID) { - p.trackedSubnets.Add(subnetID) + + salt = msg.KnownPeers.Salt + if saltLen := len(salt); saltLen > maxBloomSaltLen { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "KnownPeers.Salt"), + zap.Int("saltLen", saltLen), + ) + p.StartClose() + return } } @@ -897,13 +1107,23 @@ func (p *peer) handleVersion(msg *p2p.Version) { if ipLen := len(msg.IpAddr); ipLen != net.IPv6len { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "IP"), zap.Int("ipLen", ipLen), ) p.StartClose() return } + if msg.IpPort == 0 { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "Port"), + zap.Uint32("port", msg.IpPort), + ) + p.StartClose() + return + } p.ip = &SignedIP{ UnsignedIP: UnsignedIP{ @@ -911,44 +1131,139 @@ func (p *peer) handleVersion(msg *p2p.Version) { IP: msg.IpAddr, Port: uint16(msg.IpPort), }, - Timestamp: msg.MyVersionTime, + Timestamp: msg.IpSigningTime, }, - Signature: msg.Sig, + TLSSignature: msg.IpNodeIdSig, + } + maxTimestamp := myTime.Add(p.MaxClockDifference) + if err := p.ip.Verify(p.cert, maxTimestamp); err != nil { + if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { + p.Log.Warn("beacon has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.String("signatureType", "tls"), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } else { + p.Log.Debug("peer has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.String("signatureType", "tls"), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } + + p.StartClose() + return + } + + // TODO: After v1.11.x is activated, require the key to be provided. + if len(msg.IpBlsSig) > 0 { + signature, err := bls.SignatureFromBytes(msg.IpBlsSig) + if err != nil { + p.Log.Debug("peer has malformed signature", + zap.Stringer("nodeID", p.id), + zap.String("signatureType", "bls"), + zap.Error(err), + ) + p.StartClose() + return + } + + p.ip.BLSSignature = signature + p.ip.BLSSignatureBytes = msg.IpBlsSig + } + + // If the peer is running an incompatible version or has an invalid BLS + // signature, disconnect from them prior to marking the handshake as + // completed. + if p.shouldDisconnect() { + p.StartClose() + return } - if err := p.ip.Verify(p.cert); err != nil { - p.Log.Debug("signature verification failed", + + p.gotHandshake.Set(true) + + peerIPs := p.Network.Peers(p.id, knownPeers, salt) + + // We bypass throttling here to ensure that the handshake message is + // acknowledged correctly. + peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) + if err != nil { + p.Log.Error("failed to create peer list handshake message", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) - p.StartClose() return } - p.gotVersion.Set(true) + if !p.Send(p.onClosingCtx, peerListMsg) { + // Because throttling was marked to be bypassed with this message, + // sending should only fail if the peer has started closing. + p.Log.Debug("failed to send peer list for handshake", + zap.Stringer("nodeID", p.id), + zap.Error(p.onClosingCtx.Err()), + ) + } +} - peerIPs, err := p.Network.Peers(p.id) +func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { + if !p.finishedHandshake.Get() { + p.Log.Verbo("dropping get peer list message", + zap.Stringer("nodeID", p.id), + ) + return + } + + knownPeersMsg := msg.GetKnownPeers() + filter, err := bloom.Parse(knownPeersMsg.GetFilter()) if err != nil { - p.Log.Error("failed to get peers to gossip for handshake", + p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), + zap.String("field", "KnownPeers.Filter"), zap.Error(err), ) + p.StartClose() return } - // We bypass throttling here to ensure that the version message is - // acknowledged timely. - peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) + salt := knownPeersMsg.GetSalt() + if saltLen := len(salt); saltLen > maxBloomSaltLen { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), + zap.String("field", "KnownPeers.Salt"), + zap.Int("saltLen", saltLen), + ) + p.StartClose() + return + } + + peerIPs := p.Network.Peers(p.id, filter, salt) + if len(peerIPs) == 0 { + p.Log.Debug("skipping sending of empty peer list", + zap.Stringer("nodeID", p.id), + ) + return + } + + // Bypass throttling is disabled here to follow the non-handshake message + // sending pattern. + peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, false /*=bypassThrottling*/) if err != nil { - p.Log.Error("failed to create peer list handshake message", + p.Log.Error("failed to create peer list message", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) return } if !p.Send(p.onClosingCtx, peerListMsg) { - p.Log.Error("failed to send peer list for handshake", + p.Log.Debug("failed to send peer list", zap.Stringer("nodeID", p.id), ) } @@ -956,7 +1271,7 @@ func (p *peer) handleVersion(msg *p2p.Version) { func (p *peer) handlePeerList(msg *p2p.PeerList) { if !p.finishedHandshake.Get() { - if !p.gotVersion.Get() { + if !p.gotHandshake.Get() { return } @@ -965,10 +1280,22 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { close(p.onFinishHandshake) } - // the peers this peer told us about - discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) + // Invariant: We do not account for clock skew here, as the sender of the + // certificate is expected to account for clock skew during the activation + // of Durango. + durangoTime := version.GetDurangoTime(p.NetworkID) + beforeDurango := time.Now().Before(durangoTime) + discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) // the peers this peer told us about for i, claimedIPPort := range msg.ClaimedIpPorts { - tlsCert, err := x509.ParseCertificate(claimedIPPort.X509Certificate) + var ( + tlsCert *staking.Certificate + err error + ) + if beforeDurango { + tlsCert, err = staking.ParseCertificate(claimedIPPort.X509Certificate) + } else { + tlsCert, err = staking.ParseCertificatePermissive(claimedIPPort.X509Certificate) + } if err != nil { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), @@ -984,44 +1311,36 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { if ipLen := len(claimedIPPort.IpAddr); ipLen != net.IPv6len { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.VersionOp), + zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "IP"), zap.Int("ipLen", ipLen), ) p.StartClose() return } - - // TODO: After the next network upgrade, require txIDs to be populated. - var txID ids.ID - if len(claimedIPPort.TxId) > 0 { - txID, err = ids.ToID(claimedIPPort.TxId) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListOp), - zap.String("field", "txID"), - zap.Error(err), - ) - p.StartClose() - return - } + if claimedIPPort.IpPort == 0 { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), + zap.String("field", "Port"), + zap.Uint32("port", claimedIPPort.IpPort), + ) + // TODO: After v1.11.x is activated, close the peer here. + continue } - discoveredIPs[i] = &ips.ClaimedIPPort{ - Cert: tlsCert, - IPPort: ips.IPPort{ + discoveredIPs[i] = ips.NewClaimedIPPort( + tlsCert, + ips.IPPort{ IP: claimedIPPort.IpAddr, Port: uint16(claimedIPPort.IpPort), }, - Timestamp: claimedIPPort.Timestamp, - Signature: claimedIPPort.Signature, - TxID: txID, - } + claimedIPPort.Timestamp, + claimedIPPort.Signature, + ) } - trackedPeers, err := p.Network.Track(p.id, discoveredIPs) - if err != nil { + if err := p.Network.Track(discoveredIPs); err != nil { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), @@ -1029,42 +1348,6 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { zap.Error(err), ) p.StartClose() - return - } - if len(trackedPeers) == 0 { - p.Log.Debug("skipping peerlist ack as there were no tracked peers", - zap.Stringer("nodeID", p.id), - ) - return - } - - peerListAckMsg, err := p.Config.MessageCreator.PeerListAck(trackedPeers) - if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListAckOp), - zap.Error(err), - ) - return - } - - if !p.Send(p.onClosingCtx, peerListAckMsg) { - p.Log.Debug("failed to send peer list ack", - zap.Stringer("nodeID", p.id), - ) - } -} - -func (p *peer) handlePeerListAck(msg *p2p.PeerListAck) { - err := p.Network.MarkTracked(p.id, msg.PeerAcks) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerListAckOp), - zap.String("field", "txID"), - zap.Error(err), - ) - p.StartClose() } } diff --git a/avalanchego/network/peer/peer_test.go b/avalanchego/network/peer/peer_test.go index 653a0c61..e52273fc 100644 --- a/avalanchego/network/peer/peer_test.go +++ b/avalanchego/network/peer/peer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -6,13 +6,11 @@ package peer import ( "context" "crypto" - "crypto/x509" "net" "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -21,14 +19,17 @@ import ( "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" ) @@ -40,7 +41,7 @@ type testPeer struct { type rawTestPeer struct { config *Config conn net.Conn - cert *x509.Certificate + cert *staking.Certificate nodeID ids.NodeID inboundMsgChan <-chan message.InboundMessage } @@ -60,7 +61,7 @@ func newMessageCreator(t *testing.T) message.Creator { return mc } -func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { +func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPeer, *rawTestPeer) { t.Helper() require := require.New(t) @@ -68,12 +69,14 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { tlsCert0, err := staking.NewTLSCert() require.NoError(err) + cert0 := staking.CertificateFromX509(tlsCert0.Leaf) tlsCert1, err := staking.NewTLSCert() require.NoError(err) + cert1 := staking.CertificateFromX509(tlsCert1.Leaf) - nodeID0 := ids.NodeIDFromCert(tlsCert0.Leaf) - nodeID1 := ids.NodeIDFromCert(tlsCert1.Leaf) + nodeID0 := ids.NodeIDFromCert(cert0) + nodeID1 := ids.NodeIDFromCert(cert1) mc := newMessageCreator(t) @@ -98,8 +101,10 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { Log: logging.NoLog{}, InboundMsgThrottler: throttling.NewNoInboundThrottler(), VersionCompatibility: version.GetCompatibility(constants.LocalID), - MySubnets: set.Set[ids.ID]{}, - Beacons: validators.NewSet(), + MySubnets: trackedSubnets, + UptimeCalculator: uptime.NoOpCalculator, + Beacons: validators.NewManager(), + Validators: validators.NewManager(), NetworkID: constants.LocalID, PingFrequency: constants.DefaultPingFrequency, PongTimeout: constants.DefaultPingPongTimeout, @@ -109,9 +114,12 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { peerConfig0 := sharedConfig peerConfig1 := sharedConfig - ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 0) + ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 1) tls0 := tlsCert0.PrivateKey.(crypto.Signer) - peerConfig0.IPSigner = NewIPSigner(ip0, tls0) + bls0, err := bls.NewSecretKey() + require.NoError(err) + + peerConfig0.IPSigner = NewIPSigner(ip0, tls0, bls0) peerConfig0.Network = TestNetwork inboundMsgChan0 := make(chan message.InboundMessage) @@ -119,9 +127,12 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { inboundMsgChan0 <- msg }) - ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 1) + ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 2) tls1 := tlsCert1.PrivateKey.(crypto.Signer) - peerConfig1.IPSigner = NewIPSigner(ip1, tls1) + bls1, err := bls.NewSecretKey() + require.NoError(err) + + peerConfig1.IPSigner = NewIPSigner(ip1, tls1, bls1) peerConfig1.Network = TestNetwork inboundMsgChan1 := make(chan message.InboundMessage) @@ -132,22 +143,22 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { peer0 := &rawTestPeer{ config: &peerConfig0, conn: conn0, - cert: tlsCert0.Leaf, + cert: cert0, nodeID: nodeID0, inboundMsgChan: inboundMsgChan0, } peer1 := &rawTestPeer{ config: &peerConfig1, conn: conn1, - cert: tlsCert1.Leaf, + cert: cert1, nodeID: nodeID1, inboundMsgChan: inboundMsgChan1, } return peer0, peer1 } -func makeTestPeers(t *testing.T) (*testPeer, *testPeer) { - rawPeer0, rawPeer1 := makeRawTestPeers(t) +func makeTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { + rawPeer0, rawPeer1 := makeRawTestPeers(t, trackedSubnets) peer0 := &testPeer{ Peer: Start( @@ -182,21 +193,17 @@ func makeTestPeers(t *testing.T) (*testPeer, *testPeer) { return peer0, peer1 } -func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) { +func makeReadyTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { t.Helper() require := require.New(t) - peer0, peer1 := makeTestPeers(t) + peer0, peer1 := makeTestPeers(t, trackedSubnets) - err := peer0.AwaitReady(context.Background()) - require.NoError(err) - isReady := peer0.Ready() - require.True(isReady) + require.NoError(peer0.AwaitReady(context.Background())) + require.True(peer0.Ready()) - err = peer1.AwaitReady(context.Background()) - require.NoError(err) - isReady = peer1.Ready() - require.True(isReady) + require.NoError(peer1.AwaitReady(context.Background())) + require.True(peer1.Ready()) return peer0, peer1 } @@ -204,8 +211,7 @@ func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) { func TestReady(t *testing.T) { require := require.New(t) - rawPeer0, rawPeer1 := makeRawTestPeers(t) - + rawPeer0, rawPeer1 := makeRawTestPeers(t, set.Set[ids.ID]{}) peer0 := Start( rawPeer0.config, rawPeer0.conn, @@ -219,8 +225,7 @@ func TestReady(t *testing.T) { ), ) - isReady := peer0.Ready() - require.False(isReady) + require.False(peer0.Ready()) peer1 := Start( rawPeer1.config, @@ -235,41 +240,603 @@ func TestReady(t *testing.T) { ), ) - err := peer0.AwaitReady(context.Background()) - require.NoError(err) - isReady = peer0.Ready() - require.True(isReady) + require.NoError(peer0.AwaitReady(context.Background())) + require.True(peer0.Ready()) - err = peer1.AwaitReady(context.Background()) - require.NoError(err) - isReady = peer1.Ready() - require.True(isReady) + require.NoError(peer1.AwaitReady(context.Background())) + require.True(peer1.Ready()) peer0.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) - require.NoError(err) + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) } func TestSend(t *testing.T) { require := require.New(t) - peer0, peer1 := makeReadyTestPeers(t) + peer0, peer1 := makeReadyTestPeers(t, set.Set[ids.ID]{}) mc := newMessageCreator(t) outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) require.NoError(err) - sent := peer0.Send(context.Background(), outboundGetMsg) - require.True(sent) + require.True(peer0.Send(context.Background(), outboundGetMsg)) inboundGetMsg := <-peer1.inboundMsgChan require.Equal(message.GetOp, inboundGetMsg.Op()) peer1.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) +} + +func TestPingUptimes(t *testing.T) { + trackedSubnetID := ids.GenerateTestID() + untrackedSubnetID := ids.GenerateTestID() + + trackedSubnets := set.Of(trackedSubnetID) + + mc := newMessageCreator(t) + + testCases := []struct { + name string + msg message.OutboundMessage + shouldClose bool + assertFn func(*require.Assertions, *testPeer) + }{ + { + name: "primary network only", + msg: func() message.OutboundMessage { + pingMsg, err := mc.Ping(1, nil) + require.NoError(t, err) + return pingMsg + }(), + assertFn: func(require *require.Assertions, peer *testPeer) { + uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) + require.True(ok) + require.Equal(uint32(1), uptime) + + uptime, ok = peer.ObservedUptime(trackedSubnetID) + require.False(ok) + require.Zero(uptime) + }, + }, + { + name: "primary network and subnet", + msg: func() message.OutboundMessage { + pingMsg, err := mc.Ping( + 1, + []*p2p.SubnetUptime{ + { + SubnetId: trackedSubnetID[:], + Uptime: 1, + }, + }, + ) + require.NoError(t, err) + return pingMsg + }(), + assertFn: func(require *require.Assertions, peer *testPeer) { + uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) + require.True(ok) + require.Equal(uint32(1), uptime) + + uptime, ok = peer.ObservedUptime(trackedSubnetID) + require.True(ok) + require.Equal(uint32(1), uptime) + }, + }, + { + name: "primary network and non tracked subnet", + msg: func() message.OutboundMessage { + pingMsg, err := mc.Ping( + 1, + []*p2p.SubnetUptime{ + { + // Providing the untrackedSubnetID here should cause + // the remote peer to disconnect from us. + SubnetId: untrackedSubnetID[:], + Uptime: 1, + }, + { + SubnetId: trackedSubnetID[:], + Uptime: 1, + }, + }, + ) + require.NoError(t, err) + return pingMsg + }(), + shouldClose: true, + }, + } + + // Note: we reuse peers across tests because makeReadyTestPeers takes awhile + // to run. + peer0, peer1 := makeReadyTestPeers(t, trackedSubnets) + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(t, peer0.AwaitClosed(context.Background())) + require.NoError(t, peer1.AwaitClosed(context.Background())) + }() + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + require.True(peer0.Send(context.Background(), tc.msg)) + + // Note: shouldClose can only be `true` for the last test because + // we reuse peers across tests. + if tc.shouldClose { + require.NoError(peer1.AwaitClosed(context.Background())) + return + } + + // we send Get message after ping to ensure Ping is handled by the + // time Get is handled. This is because Get is routed to the handler + // whereas Ping is handled by the peer directly. We have no way to + // know when the peer has handled the Ping message. + sendAndFlush(t, peer0, peer1) + + tc.assertFn(require, peer1) + }) + } +} + +// Test that a peer using the wrong BLS key is disconnected from. +func TestInvalidBLSKeyDisconnects(t *testing.T) { + require := require.New(t) + + rawPeer0, rawPeer1 := makeRawTestPeers(t, nil) + require.NoError(rawPeer0.config.Validators.AddStaker( + constants.PrimaryNetworkID, + rawPeer1.nodeID, + bls.PublicFromSecretKey(rawPeer1.config.IPSigner.blsSigner), + ids.GenerateTestID(), + 1, + )) + + bogusBLSKey, err := bls.NewSecretKey() require.NoError(err) + require.NoError(rawPeer1.config.Validators.AddStaker( + constants.PrimaryNetworkID, + rawPeer0.nodeID, + bls.PublicFromSecretKey(bogusBLSKey), // This is the wrong BLS key for this peer + ids.GenerateTestID(), + 1, + )) + peer0 := &testPeer{ + Peer: Start( + rawPeer0.config, + rawPeer0.conn, + rawPeer1.cert, + rawPeer1.nodeID, + NewThrottledMessageQueue( + rawPeer0.config.Metrics, + rawPeer1.nodeID, + logging.NoLog{}, + throttling.NewNoOutboundThrottler(), + ), + ), + inboundMsgChan: rawPeer0.inboundMsgChan, + } + peer1 := &testPeer{ + Peer: Start( + rawPeer1.config, + rawPeer1.conn, + rawPeer0.cert, + rawPeer0.nodeID, + NewThrottledMessageQueue( + rawPeer1.config.Metrics, + rawPeer0.nodeID, + logging.NoLog{}, + throttling.NewNoOutboundThrottler(), + ), + ), + inboundMsgChan: rawPeer1.inboundMsgChan, + } + + // Because peer1 thinks that peer0 is using the wrong BLS key, they should + // disconnect from each other. + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) +} + +func TestShouldDisconnect(t *testing.T) { + peerID := ids.GenerateTestNodeID() + txID := ids.GenerateTestID() + blsKey, err := bls.NewSecretKey() + require.NoError(t, err) + + tests := []struct { + name string + initialPeer *peer + expectedPeer *peer + expectedShouldDisconnect bool + }{ + { + name: "peer is reporting old version", + initialPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + }, + version: &version.Application{ + Name: version.Client, + Major: 0, + Minor: 0, + Patch: 0, + }, + }, + expectedPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + }, + version: &version.Application{ + Name: version.Client, + Major: 0, + Minor: 0, + Patch: 0, + }, + }, + expectedShouldDisconnect: true, + }, + { + name: "peer is not a validator", + initialPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: validators.NewManager(), + }, + version: version.CurrentApp, + }, + expectedPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: validators.NewManager(), + }, + version: version.CurrentApp, + }, + expectedShouldDisconnect: false, + }, + { + name: "peer is a validator without a BLS key", + initialPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + nil, + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + }, + expectedPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + nil, + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + }, + expectedShouldDisconnect: false, + }, + { + name: "already verified peer", + initialPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + txIDOfVerifiedBLSKey: txID, + }, + expectedPeer: &peer{ + Config: &Config{ + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + txIDOfVerifiedBLSKey: txID, + }, + expectedShouldDisconnect: false, + }, + { + name: "past durango without a signature", + initialPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(mockable.MaxTime) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{}, + }, + expectedPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(mockable.MaxTime) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{}, + }, + expectedShouldDisconnect: true, + }, + { + name: "pre durango without a signature", + initialPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{}, + }, + expectedPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{}, + }, + expectedShouldDisconnect: false, + }, + { + name: "pre durango with an invalid signature", + initialPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{ + BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), + }, + }, + expectedPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{ + BLSSignature: bls.SignProofOfPossession(blsKey, []byte("wrong message")), + }, + }, + expectedShouldDisconnect: true, + }, + { + name: "pre durango with a valid signature", + initialPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{ + BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), + }, + }, + expectedPeer: &peer{ + Config: &Config{ + Clock: func() mockable.Clock { + clk := mockable.Clock{} + clk.Set(time.Time{}) + return clk + }(), + Log: logging.NoLog{}, + VersionCompatibility: version.GetCompatibility(constants.UnitTestID), + Validators: func() validators.Manager { + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker( + constants.PrimaryNetworkID, + peerID, + bls.PublicFromSecretKey(blsKey), + txID, + 1, + )) + return vdrs + }(), + }, + id: peerID, + version: version.CurrentApp, + ip: &SignedIP{ + BLSSignature: bls.SignProofOfPossession(blsKey, (&UnsignedIP{}).bytes()), + }, + txIDOfVerifiedBLSKey: txID, + }, + expectedShouldDisconnect: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + shouldDisconnect := test.initialPeer.shouldDisconnect() + require.Equal(test.expectedPeer, test.initialPeer) + require.Equal(test.expectedShouldDisconnect, shouldDisconnect) + }) + } +} + +// Helper to send a message from sender to receiver and assert that the +// receiver receives the message. This can be used to test a prior message +// was handled by the peer. +func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) { + t.Helper() + mc := newMessageCreator(t) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + require.NoError(t, err) + require.True(t, sender.Send(context.Background(), outboundGetMsg)) + inboundGetMsg := <-receiver.inboundMsgChan + require.Equal(t, message.GetOp, inboundGetMsg.Op()) } diff --git a/avalanchego/network/peer/set.go b/avalanchego/network/peer/set.go index a26901f3..cbb9675e 100644 --- a/avalanchego/network/peer/set.go +++ b/avalanchego/network/peer/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -120,9 +120,7 @@ func (s *peerSet) Sample(n int, precondition func(Peer) bool) []Peer { } sampler := sampler.NewUniform() - // It is impossible for the sampler to report an error here. Since - // [len(s.peersSlice)] <= MaxInt64. - _ = sampler.Initialize(uint64(len(s.peersSlice))) + sampler.Initialize(uint64(len(s.peersSlice))) peers := make([]Peer, 0, n) for len(peers) < n { diff --git a/avalanchego/network/peer/set_test.go b/avalanchego/network/peer/set_test.go index f26b1d19..c28c1ce7 100644 --- a/avalanchego/network/peer/set_test.go +++ b/avalanchego/network/peer/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -18,24 +18,24 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, } updatedPeer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } unknownPeer := &peer{ - id: ids.NodeID{0xff}, + id: ids.BuildTestNodeID([]byte{0xff}), } peer3 := &peer{ - id: ids.NodeID{0x03}, + id: ids.BuildTestNodeID([]byte{0x03}), } peer4 := &peer{ - id: ids.NodeID{0x04}, + id: ids.BuildTestNodeID([]byte{0x04}), } // add of first peer is handled @@ -105,10 +105,10 @@ func TestSetSample(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, + id: ids.BuildTestNodeID([]byte{0x01}), } peer2 := &peer{ - id: ids.NodeID{0x02}, + id: ids.BuildTestNodeID([]byte{0x02}), } // Case: Empty @@ -128,10 +128,10 @@ func TestSetSample(t *testing.T) { require.Empty(peers) peers = set.Sample(1, NoPrecondition) - require.Equal(peers, []Peer{peer1}) + require.Equal([]Peer{peer1}, peers) peers = set.Sample(2, NoPrecondition) - require.Equal(peers, []Peer{peer1}) + require.Equal([]Peer{peer1}, peers) // Case: 2 peers set.Add(peer2) diff --git a/avalanchego/network/peer/test_network.go b/avalanchego/network/peer/test_network.go index 9bac6260..01a341ae 100644 --- a/avalanchego/network/peer/test_network.go +++ b/avalanchego/network/peer/test_network.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -19,16 +19,16 @@ func (testNetwork) AllowConnection(ids.NodeID) bool { return true } -func (testNetwork) Track(ids.NodeID, []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { - return nil, nil -} - -func (testNetwork) MarkTracked(ids.NodeID, []*p2p.PeerAck) error { +func (testNetwork) Track([]*ips.ClaimedIPPort) error { return nil } func (testNetwork) Disconnected(ids.NodeID) {} -func (testNetwork) Peers(ids.NodeID) ([]ips.ClaimedIPPort, error) { - return nil, nil +func (testNetwork) KnownPeers() ([]byte, []byte) { + return bloom.EmptyFilter.Marshal(), nil +} + +func (testNetwork) Peers(ids.NodeID, *bloom.ReadFilter, []byte) []*ips.ClaimedIPPort { + return nil } diff --git a/avalanchego/network/peer/test_peer.go b/avalanchego/network/peer/test_peer.go index d813a16a..eb1a7947 100644 --- a/avalanchego/network/peer/test_peer.go +++ b/avalanchego/network/peer/test_peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" @@ -62,7 +63,11 @@ func StartTestPeer( } tlsConfg := TLSConfig(*tlsCert, nil) - clientUpgrader := NewTLSClientUpgrader(tlsConfg) + clientUpgrader := NewTLSClientUpgrader( + tlsConfg, + prometheus.NewCounter(prometheus.CounterOpts{}), + version.GetDurangoTime(networkID), + ) peerID, conn, cert, err := clientUpgrader.Upgrade(conn) if err != nil { @@ -99,8 +104,12 @@ func StartTestPeer( return nil, err } - signerIP := ips.NewDynamicIPPort(net.IPv6zero, 0) - tls := tlsCert.PrivateKey.(crypto.Signer) + signerIP := ips.NewDynamicIPPort(net.IPv6zero, 1) + tlsKey := tlsCert.PrivateKey.(crypto.Signer) + blsKey, err := bls.NewSecretKey() + if err != nil { + return nil, err + } peer := Start( &Config{ @@ -112,14 +121,15 @@ func StartTestPeer( Router: router, VersionCompatibility: version.GetCompatibility(networkID), MySubnets: set.Set[ids.ID]{}, - Beacons: validators.NewSet(), + Beacons: validators.NewManager(), + Validators: validators.NewManager(), NetworkID: networkID, PingFrequency: constants.DefaultPingFrequency, PongTimeout: constants.DefaultPingPongTimeout, MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, UptimeCalculator: uptime.NoOpCalculator, - IPSigner: NewIPSigner(signerIP, tls), + IPSigner: NewIPSigner(signerIP, tlsKey, blsKey), }, conn, cert, diff --git a/avalanchego/network/peer/tls_config.go b/avalanchego/network/peer/tls_config.go index 733812db..7de848ed 100644 --- a/avalanchego/network/peer/tls_config.go +++ b/avalanchego/network/peer/tls_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/avalanchego/network/peer/upgrader.go b/avalanchego/network/peer/upgrader.go index 3e892188..93419221 100644 --- a/avalanchego/network/peer/upgrader.go +++ b/avalanchego/network/peer/upgrader.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "crypto/tls" - "crypto/x509" "errors" "net" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" ) var ( @@ -21,46 +24,75 @@ var ( type Upgrader interface { // Must be thread safe - Upgrade(net.Conn) (ids.NodeID, net.Conn, *x509.Certificate, error) + Upgrade(net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) } type tlsServerUpgrader struct { - config *tls.Config + config *tls.Config + invalidCerts prometheus.Counter + durangoTime time.Time } -func NewTLSServerUpgrader(config *tls.Config) Upgrader { - return tlsServerUpgrader{ - config: config, +func NewTLSServerUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { + return &tlsServerUpgrader{ + config: config, + invalidCerts: invalidCerts, + durangoTime: durangoTime, } } -func (t tlsServerUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *x509.Certificate, error) { - return connToIDAndCert(tls.Server(conn, t.config)) +func (t *tlsServerUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { + return connToIDAndCert(tls.Server(conn, t.config), t.invalidCerts, t.durangoTime) } type tlsClientUpgrader struct { - config *tls.Config + config *tls.Config + invalidCerts prometheus.Counter + durangoTime time.Time } -func NewTLSClientUpgrader(config *tls.Config) Upgrader { - return tlsClientUpgrader{ - config: config, +func NewTLSClientUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { + return &tlsClientUpgrader{ + config: config, + invalidCerts: invalidCerts, + durangoTime: durangoTime, } } -func (t tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *x509.Certificate, error) { - return connToIDAndCert(tls.Client(conn, t.config)) +func (t *tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { + return connToIDAndCert(tls.Client(conn, t.config), t.invalidCerts, t.durangoTime) } -func connToIDAndCert(conn *tls.Conn) (ids.NodeID, net.Conn, *x509.Certificate, error) { +func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter, durangoTime time.Time) (ids.NodeID, net.Conn, *staking.Certificate, error) { if err := conn.Handshake(); err != nil { - return ids.NodeID{}, nil, nil, err + return ids.EmptyNodeID, nil, nil, err } state := conn.ConnectionState() if len(state.PeerCertificates) == 0 { - return ids.NodeID{}, nil, nil, errNoCert + return ids.EmptyNodeID, nil, nil, errNoCert + } + + tlsCert := state.PeerCertificates[0] + // Invariant: ParseCertificate is used rather than CertificateFromX509 to + // ensure that signature verification can assume the certificate was + // parseable according the staking package's parser. + // + // TODO: Remove pre-Durango parsing after v1.11.x has activated. + var ( + peerCert *staking.Certificate + err error + ) + if time.Now().Before(durangoTime) { + peerCert, err = staking.ParseCertificate(tlsCert.Raw) + } else { + peerCert, err = staking.ParseCertificatePermissive(tlsCert.Raw) } - peerCert := state.PeerCertificates[0] - return ids.NodeIDFromCert(peerCert), conn, peerCert, nil + if err != nil { + invalidCerts.Inc() + return ids.EmptyNodeID, nil, nil, err + } + + nodeID := ids.NodeIDFromCert(peerCert) + return nodeID, conn, peerCert, nil } diff --git a/avalanchego/network/peer/validator_id.go b/avalanchego/network/peer/validator_id.go deleted file mode 100644 index 5471fda2..00000000 --- a/avalanchego/network/peer/validator_id.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package peer - -import "github.com/ava-labs/avalanchego/ids" - -// ValidatorID represents a validator that we gossip to other peers -type ValidatorID struct { - // The validator's ID - NodeID ids.NodeID - // The Tx that added this into the validator set - TxID ids.ID -} diff --git a/avalanchego/network/test_cert_1.crt b/avalanchego/network/test_cert_1.crt new file mode 100644 index 00000000..2f2b95e6 --- /dev/null +++ b/avalanchego/network/test_cert_1.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMTA5MTQ0NTU4WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAqCOUESK8b5N894dVCSIs4mTfNTdhaL5cnw3ZXSbZlfquBRJOxhqHXutG +An9++OTWvevrssaXBxGT4oOT3N11dm4iKh7ewi3to+1Sfqq71blCVZtBDOeWpZx0 +WwhPO37Us26fCR7T2gStiTHY9qE0QV/9p15OCAFsRb94JuhF0OR0d6tRm0yQ6b7Y +NRzpaBw4MBxZD9h84+QDdhsTyxI0xk/NnbG74pykjsau0/YA9mNqHHSnL4DyD5qu +IKqRfD5HQHemx66I3jEXUB/GxTHhxz5uskIpS9AV3oclvVi14BjSEWgNkJX+nMi+ +tjuSKouAFpzJZzZme2DvmyAecxbNVBdajOTe2QRiG7HKh1OdMZabd2dUNv5S9/gd +bI53s4R++z/H4llsBfk6B2+/DmqDRauh4Mz9HTf0Pud7Nz2b7r77PnPTjHExgN3R +i+Yo6LskRCQTzzTVwW/RY+rNVux9UE6ZPLarDbXnSyetKMUS7qlz8NUerWjtkC6i +om570LfTGs3GxIqVgoGg0mXuji+EoG+XpYR3PRaeo8cAmfEu7T+SxgSfJAv7DyZv ++a2VTZcOPDI1KTLrM8Xovy17t5rd9cy1/75vxnKLiGDEhzWJmNl4IvIYbtihWWl5 +ksdFYbe9Dpvuh/wBCGoK+kmCirUM1DiizWn5TxJeS1qYI8I2sYMCAwEAAaMgMB4w +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB +AABzczRjzfhlmV+bFDzAs7nszQlZREcoRuWe5qHy7VKLvZvIAaYlcApB34hH7nDq +T/8fS8g8rC+Tzw0iCPF21Z4AzSe76V6EU4VGWWe8l00nDszfvavE5BF24z8dCuVC +1gScC1tvG6FPT23koZ0BVmyueCIa7sdqFlDz8rrRpLWfUcLj38gxwWM2JVBHQUvV +j87lzpTNH+2nPiwrKISqUPFi4YvbWKe8T4bY2Elw7THiNLZGfgqOXVkeIVi4fs97 +Tc5uscZ4OpSTlrfJqMJEV8cMRvrDmhD/VWbJvnk7lyELPoHx6MUinBswBT51yvmY +bZh4AZ43GSvSyo/V7p9scytQP3zM1MeHpsFa0RHwGVFp2BmO1abvydAxX0NMWasv +WUzXCKliXsVD/qUeCU/CFnaBqpzBvm4AFBgwHzprwzP9Be/mz/TjTcsfrmoiyxlr +QjXNk9TnP9d+aeOJsRz+JSYyHETACO5PkCg+XCDyEOf+kQAzVb9Dp0oWaCovXciU +A5z0DSDzyKVBOQo0syb5NFsLZ2DeJemNbP+3kCNzBBASQ4VWAvRbLjPh3Oe8A5PZ +xezCvzRE05O6tYkz5C5hcKbpAjfP8G8RV6ERjLBICBfb7XI7T0hixhiNHlIKknkJ +F82B/zDt+qBFARw8A/qr44RF+vy3Ql4IS2ZcflAv2pTO +-----END CERTIFICATE----- diff --git a/avalanchego/network/test_cert_2.crt b/avalanchego/network/test_cert_2.crt new file mode 100644 index 00000000..283e286b --- /dev/null +++ b/avalanchego/network/test_cert_2.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMTA5MTQ0NTQ3WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEArT7afarml6cvCmAySAO8GQka1mcQIMACyEWy6KsqiccX+DoGh5ECyQSa +WFKWKGdQ32dAWGVlSkmmgJ1jtW749hSguysav3EPMaxe/ad5CV1MwyyccGS9U99M +z0UVuFEXVjN5W6UlcULp1oJDj07NzZP6ByRiDvnjzgeYb3jHwjqOBNwex1jLW6vp +oWD03zTanVQXZaaGcEISCI2CgDP3uXfd0NQpoGVpf9gMi0cdGu8gpqbLqBjzjzr8 +GDBQYGaWKFnlqe6X9nBUad/qNE3Zeb3ehSg+M2ecQzTZFWirfa6cGTtovu04RMML +9OLflQy3rTRST2HQ6z0gpVCP3V2Mg/LmAuWyhOLVYNkhEwkRHvddzFksRzQ+ghpP +cGfvI0dwxQV0CbEMVjd9zVEA6dOrMLI3st2922hqF23Al1+Hwcu1G/T3ybfSTwjd +YZ23IgkQF4r+RIXevzgOBBXfEwE8XERW2zNwUG5Sv5dxx+FgDjX0EGbrzgY6OeKT +D1SP/7WQLjwmGgwyNJYkAklvEKwU+dlGD5NpgvJ9fg8R1wUhp2HhSZ1l1OUVmRYw +YqUm7dTLK1CJU2BH2sRyZcUkwstjvgi688zfHNttGYmAnx6wGS12jWf+W4df+QNI +Ng6AdcJ5Ee0z0JAbTpZW/zX3CTSroow7igHnd4AwvKEVQFcyO/MCAwEAAaMgMB4w +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB +ACePaZvjw2KiheheWNjzOv2B+7uLVe7oEbThEUQypEmTFK8wKaHwI4BGdBGEOr/N +LZ1M2wAYgwzMTEDJE+GEB2ZHIdH9cH5lu7ITsOMVcBSJttEJVhhEtbMwVJ9JC62j +AsW4VmHFpEik+xvinxedKczXOa21YJo4sv2TiFWFaSHqPeRo7HA1dxQYOwiLsS6e +JKIupMrn8IZz2YN5gFhbvQTBp2J3u6kxMIzN0a+BPARR4fwMn5lVMVvye/+8Kwtw +dZHSN1FYUcFqHagmhNlNkAOaGQklSFWtsVVQxQCFS2bxEImLj5kG16fCAsQoRC0J +ZS2OaRncrtB0r0Qu1JB5XJP9FLflSb57KIxBNVrl+iWdWikgBFE6cMthMwgLfQ99 +k8AMp6KrCjcxqegN+P30ct/JwahKPq2+SwtdHG3yrZ2TJEjhOtersrTnRK9zqm9v +lqS7JsiztjgqnhMs2eTdXygfEe0AoZihGTaaLYj37A9+2RECkuijkjBghG2NBnv6 +264lTghZyZcZgZNCgYglYC1bhifEorJpYf6TOOcDAi5UH8R7vi4x70vI6sIDrhga +d9E63EVe11QdIjceceMlNm42UTrhl0epMbL6FIzU+d91qBgd9qT6YqoYPFZSiYFy +2hArgLxH2fxTXatCAit5g1MEk0w1MiHVrPZ8lTU3U/ET +-----END CERTIFICATE----- diff --git a/avalanchego/network/test_cert_3.crt b/avalanchego/network/test_cert_3.crt new file mode 100644 index 00000000..c0977191 --- /dev/null +++ b/avalanchego/network/test_cert_3.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw +MFoYDzIxMjQwMTA5MTQ0NTM0WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEA5aV76ivIZ1iWmW0OzGMCrmFQBnej9JntQ1jP9yiacKu7j5Z/bD/eqoyc +jRwoSiesErfnThAGy7H80glVw/XmC0fYNPVDPyzAEdNk46M3yEI8hAKI6aSkl1s1 +KVAHpQuNcG+3xIB39OOMx0XuycZ6gqzyMmjqeT0cThNDXTwGbodMVDAf0q220QAq +zB/lz0sjHPXlYh25LJ1yPtl+vlcfGrP+q+2ODR9rnI79PE7AZB4Xc6wUIca5XXkH +PS7zQ1Ida1xrf446MYCVuazLFhpzq8/nhkxNMzxdZsJaWavL+xkpjGxAySvj0jlu +QFGsmsxOIU/XgJD/VRqqyISXpl2wg0l8mpsU9fV7bEW1y6MIc7AARRgbbEPiDz8m +/O8mjEW3C16untLHB7LzPCCitTssGR65Shkj+Lw+aM4X5ZI+Xm8eHTRCek8T5Cl3 +Sm2UFkLk2mun6cwoyWWhwi6+EfW6ks0c7qSHtJTP8DgLrWxYmBuD9PKSHclpa4/5 +toj52YnT6fIBJWz5ggIdntRCaH8+0eWvwuvDsdPUL7JQFjJmfQOdMenlNqW2aEvx ++JZiYLJBWj9cjpI33P5CAfFEVM3IFlDHmMHRTQ/kKLcfvSDfuofEBoMt4tjf01Um +dfi8kFKWl9ba9I7CoQ13U4J1wkk6KxatZP7eGCmKRoq8w+Y38NsCAwEAAaMgMB4w +DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB +AKsvbN5/r4YPguetl+jIhqpr4TZM8GNZRGTqkKC8clRspBeihJqkNQWsnZiFkJTH +NhNAx+7tlJHqeGdojc2XjBAkc+//qYqXKHgihsO54bVG9oN9IPO+mpPumRRhGneH +jTUE/hLFqwA4ZPw5L1HtJ0m1yqg/HXf4aBXcVQ/YO8YN17ZgLpueYt+Chi1pP/Ku +TzHuoKuHst2T6uuZQZxcD+XJoXwdOt7mfPTh5y9/Psjn+qx833DNWSwF3O/lEghA +2yOb+5CFta2LLUHH894oj5SvgJ/5cvn4+NbyDCUv5ebvE98BMh72PLNRuIRV0gfO +XalMIZ+9Jm2TGXD0dWt9GeZ5z3h+nCEB6s3x0sqluaWG3lTUx+4T/aIxdGuvPFi6 +7DWm7TG7yxFGfbECyyXXL+B/gyHhE1Q93nE3wK9flSG+ljqFJS+8wytht52XhgwE +lV1AwHgxkbkFzNIwB0s7etR9+wBcQvFKqeCZrDeG1twKNcY1dv1D/OCUlBYJvL/X +YADeT2ZjFzHhWhv6TLVEAtqytT1o4qXh6VWeIrwfMG0VcQSiJyNxwO/aW5BOTM44 +EelDzvSjo/pRxqN/m44Iuf0Ran86DO7LmjNYh/04FN3oaL9cFIaT9BWXt/Xx2Fdw ++dg5bPSJ62ExVnnNRlY9lQECkSoRZK2epcICs+3YmmGX +-----END CERTIFICATE----- diff --git a/avalanchego/network/test_key_1.key b/avalanchego/network/test_key_1.key new file mode 100644 index 00000000..c4977511 --- /dev/null +++ b/avalanchego/network/test_key_1.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCoI5QRIrxvk3z3 +h1UJIiziZN81N2FovlyfDdldJtmV+q4FEk7GGode60YCf3745Na96+uyxpcHEZPi +g5Pc3XV2biIqHt7CLe2j7VJ+qrvVuUJVm0EM55alnHRbCE87ftSzbp8JHtPaBK2J +Mdj2oTRBX/2nXk4IAWxFv3gm6EXQ5HR3q1GbTJDpvtg1HOloHDgwHFkP2Hzj5AN2 +GxPLEjTGT82dsbvinKSOxq7T9gD2Y2ocdKcvgPIPmq4gqpF8PkdAd6bHrojeMRdQ +H8bFMeHHPm6yQilL0BXehyW9WLXgGNIRaA2Qlf6cyL62O5Iqi4AWnMlnNmZ7YO+b +IB5zFs1UF1qM5N7ZBGIbscqHU50xlpt3Z1Q2/lL3+B1sjnezhH77P8fiWWwF+ToH +b78OaoNFq6HgzP0dN/Q+53s3PZvuvvs+c9OMcTGA3dGL5ijouyREJBPPNNXBb9Fj +6s1W7H1QTpk8tqsNtedLJ60oxRLuqXPw1R6taO2QLqKibnvQt9MazcbEipWCgaDS +Ze6OL4Sgb5elhHc9Fp6jxwCZ8S7tP5LGBJ8kC/sPJm/5rZVNlw48MjUpMuszxei/ +LXu3mt31zLX/vm/GcouIYMSHNYmY2Xgi8hhu2KFZaXmSx0Vht70Om+6H/AEIagr6 +SYKKtQzUOKLNaflPEl5LWpgjwjaxgwIDAQABAoICAHGe8U0PGyWPFlCzLDyq0of+ +wHNWxEWi9jYphqyTN1BJgVU+BOuMO9RhywKfI6+P/KmFBtbdqmuFblkQr1f+c4Uf +cYjjKYcwwDkZg7jDKYGI2pG9A51z1nJ9oodtuxUqZRQH+gKQyXq31Ik0nTg0wXo4 +ItH6QWLZi1AqzkgEiEFcUHQZ2mDGwdqjM7nYmsXW5AVm8qxpkCP0Dn6+V4bP+8fT +X9BjreK6Fd3B15y2zfmyPp+SGPRZ/7mZvnemq/+4mi+va43enPEBXY6wmoLhbYBV +6ToeyYdIy65/x3oHu4f/Xd2TYi9FnTRX18CPyvtjH6CoPNW5hlFztRcwAkOlsgQ7 +sZ+9FGAnRvz1lrBg80DeCHeSKVkDHmMQSINhPcPnlMJpxn6iiZjdvz/Bd+9RRqZl +xUI/lV3/Wueh8SeCQlFOj3fHBZEaq6QoC/VmmaeIiLEm1hj+ymuFxwOtA6AKWLb3 +59XnEkONeTfv9d2eQ7NOPU86n/zhWHUKodmBUEaxLDaUwRkS1Adb4rLuRwrMfn3a +2KkknYWzvyrlk8lDqKAMeQneFmpresGAXeIn0vt434eaGcK4a/IZ8PebuhZxGq1Z +bVbxVm0AsLmd9X3htR6MOiZswnVmA3JCw1AMKZpLMDRSbjV0uYuhBJQsN4Y/kyOK +l52JtymFNvbuRF+836+RAoIBAQDZ9wyihmgsEPLl7PHzfYo4pnTs1puoT5PS7GjO +iVm7UtOKaawsJxKX3cxzSFVXONs9hbPPzmsQEL3Xz+lUsgrSeXReF00KLRbfE2LM +dv9hlJVMQXEKnEkFYNNgETyZIJE3ZDDqdd2PDzNM8aKHlvLYREiETCwVn7r4x5QE +jIHC0gUjRJHqUgSdAMa+qvranPLxVV9mpJmL2RXjjb/OtJosFef9h5augSNI9tPS +EDLm4wMjyXr25Vu20/cusmTlOhCzi2d23hNHx8nPE0nCEVtZ2rnnWyH/ozqRnpXX +EPh0IeZQmebBhHWzkjIPaOa05Ua5rkVAQau8/FUUubjXytyZAoIBAQDFerIQwodP +V46WVC0LtSq4ju88x1vgDfT0NFE3H6hIX7Mc91zt0NGOhzv4crfjnoj+romNfQwD +0ymtudnnoaGPFBRrRF8T+26jfFpes7Ve5q/PpY78zJH1ZLwyKKX4dzgeY0Aj9FbO +q4dzh21oD7wyknRm0NTqOvgLAuxoBFZ4FTgudKNDzGymgIaQVT1+h0226og289WT +iptkpOZ/HcxQts2U3j3a87pJB0IFjIrBTtVqIyphdwRVDa929WGDITUPHa3aqykx +Ma/zvXvocAlIDITVwxXlS16DkSS+5jdN/CUj5h0O6FefGaJmk6/bFQIeXM4fRhRF +M0cs1mxXkNR7AoIBAQCFxYftn4wDr4tD7f44sE3Kou6UBMqXq+9PvmQ8jjOSMi0+ +f8h5eKmCp0+5WSV3WJ/FzG8lFMzEmWHKOAI+Rt85ee0fajGQE0g8NMuoLUhjfSt8 +F5XnKy/tqxVPmoSUflZhpo4W96u5B1021f4oNU5pyM6w04ci5lt8IBEKEan6Bae9 +k3HyW9AVA8r2bj1zOmwoDXt1pYPPPraeZ/rWRCVy9SbihPrHst4TA9nQzLxQ0/az +Wg6rxOxa8xB7imU+AjsJ1n7zhyxSG54SBwZ3outr5D/AbEAbgvSJNslDq1iw/bU6 +tpnXHxKV2R38MyeU0jpr7zb1Tti2Li+RfsKhPhHRAoIBAHfbpXH4r6mfaeKiCokd +l2VXE6tfEMtnjTIfAuAjLb9nnk3JcTTCVj5cpDCCaEwV7+4sPz6KFB3KL3TK5Y/q +ESXHOTF12QNGyvsdQbhS+JU2DKVKRgP3oetADd2fwESTD5OaB9cKuRlNELQ1EVlk +m4RSUaYJwAC+c8gzKQtk/pp5vpSrpGBFFfjk70dxBRbjxm5r4OsBibK4IOKwF1o1 +2sluek6NqRtYbMtgRVka2SjE0VFPMKzhUNbSrJnWCy5MnGilSdz7n8/E6ZdVfXwx +a+C4AHPBqWt3GFFgad4X2p9Rl7U3OJHQwUXGiEQcBVNCZ/vHti9TGIB7xApZxn5L +YDsCggEBAJ8RhrfEzm2YkyODFKFwgOszHQ3TNSvbC4+yLOUMSdzdKIyroOq0t53A +PSs046TINd+EDs9Pi6E69C+RYLim1NYMHeHFMzmKnQPXPwJVnYYUKInbIMURcuE9 +8FNBSKg3SUGz31SwG4bRIkJluMUp5oSAEUxWaxbUzLYkZex2uxnUGSd6TjddWKk1 ++SuoiZ3+W6yPWWh7TDKAR/oukBCmLIJI7dXSwv2DhagRpppdoMfqcnsCAgs/omB8 +Ku4y/jEkGbxLgo3Qd6U1o/QZlZG+9Q0iaxQS4dIpMxA3LwrL5txy00bm3JeWMB4H +MUZqfFgfj8ESxFBEeToOwr3Jq46vOwQ= +-----END PRIVATE KEY----- diff --git a/avalanchego/network/test_key_2.key b/avalanchego/network/test_key_2.key new file mode 100644 index 00000000..bcc0a192 --- /dev/null +++ b/avalanchego/network/test_key_2.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCtPtp9quaXpy8K +YDJIA7wZCRrWZxAgwALIRbLoqyqJxxf4OgaHkQLJBJpYUpYoZ1DfZ0BYZWVKSaaA +nWO1bvj2FKC7Kxq/cQ8xrF79p3kJXUzDLJxwZL1T30zPRRW4URdWM3lbpSVxQunW +gkOPTs3Nk/oHJGIO+ePOB5hveMfCOo4E3B7HWMtbq+mhYPTfNNqdVBdlpoZwQhII +jYKAM/e5d93Q1CmgZWl/2AyLRx0a7yCmpsuoGPOPOvwYMFBgZpYoWeWp7pf2cFRp +3+o0Tdl5vd6FKD4zZ5xDNNkVaKt9rpwZO2i+7ThEwwv04t+VDLetNFJPYdDrPSCl +UI/dXYyD8uYC5bKE4tVg2SETCREe913MWSxHND6CGk9wZ+8jR3DFBXQJsQxWN33N +UQDp06swsjey3b3baGoXbcCXX4fBy7Ub9PfJt9JPCN1hnbciCRAXiv5Ehd6/OA4E +Fd8TATxcRFbbM3BQblK/l3HH4WAONfQQZuvOBjo54pMPVI//tZAuPCYaDDI0liQC +SW8QrBT52UYPk2mC8n1+DxHXBSGnYeFJnWXU5RWZFjBipSbt1MsrUIlTYEfaxHJl +xSTCy2O+CLrzzN8c220ZiYCfHrAZLXaNZ/5bh1/5A0g2DoB1wnkR7TPQkBtOllb/ +NfcJNKuijDuKAed3gDC8oRVAVzI78wIDAQABAoICAQCIgPu7BMuINoyUClPT9k1h +FJF22eIVS/VlQ7XCKgvsX1j9lwrKCnI9XUkXyorR7wYD4OEMRWhX7kwpDtoffP7h +NkOm9kGvEjA8nWqDRk/SFxeCuUXSMS4URd/JeM+yWQKgQxKeKTOlWGnTQPRmmFsE +XlIlCn/Q+QiLr+RmAK601VpNbfs6azZgVsZRB4opzQVr7XQ5/cnz7bszzfxDc67/ +DflSr7jUztMfjmXj3/aI4F3DsazKGE7gTkOP85GBQ5OQ27Rf/sTxwnRgr7Nj3us6 +R2ZrWNgZvMudEKjze3OUJd6M6wiPV258j4p+O7ybPlgDOzSXo6TvlUyBtUaFz04E +5S7bgimNUxEjFzTxkn9W/FTUeauvJcgDk+JmMZ+I9dFdMIuyksndywN9KdXBVxZH +1ZtO1P6JeFpxF7zQUmkH+/6RZd9PbQGlpNI06nAj98LVwqSDCO1aejLqoXYs9zqG +DOU4JdRm3qK0eshIghkvVOWIYhqKPkskQfbTFY+hasg82cGGFyzxqOsSiuW+CVIy +3iF3WyfKgvLMABoK/38zutsMT+/mOtA7rjErh1NJuwwWkkglmuwQMDqaWdOASs+v +MK8JjSi6zDpnbp70Prw5pUlHvvsD1iYWo7SOcpFos+U5zw1jHJJvnAatzcXWixuu +Xzbn2BtCqSFigW7waMy14QKCAQEAx/Nwy2xH9lVGfz8aO2CB0FGL9Ra3Jcv4HFJT +nw6/yvVLvRAwr87+/c+qbIzwLKbQXV/4vmNsqPrIJiazY+Tk739DjcW8YaMbejfr +ASPHtYbeF0FmVbxBHNZ/JSDSYUXdFZ7JlBiDSs3zhPlFBZYG2tU3JJZCR8+9J/Ss +JEIwL9UlapMznMwljFkLbvZ2oFstKkfdY61WxROOIwuGaKr0yRnNvMMp135JiB/O +dwh/NfROt4JzQ5O4ipMg6Wc73+OvBsOSQHYZQHl9NOaK1uomu5bUY7H8pLwGU7sw +LmPRzrGiu8dB+UUEyFkNI2xzwkjet+0UGupDyOfsCMf9hlzWmwKCAQEA3c8FeHkl +Il4GEB0VEw1NtC5x6i+s3NiPOlUmH+nOHgdaI7/BfljfTokQBGo+GkXkJ36yTEMh +L9Vtya3HtW4VEHNfPMjntPztn4XQvMZdSpu/k8rM44m+CB0DDLhFfwRr2cyUAwHz +xebXw8KhceqaWRp6ygJGx5Sk0gr7s7nhmIByjdx4tddEH/MahLklGdV7Vnp+yb3o +zNLVx/aDueknArgUb/zvZRcYWuNoGs9ac4pl0m6jan/x0ZcdBF0SU2bI6ltvF3WT +qwcvVnbJbBwq5PRuL4ZUqrqmXBbBAkpLJTx+kfPKD4bgcZTBnV2TxDbzze9CeieT +YCtg4u+khW7ZiQKCAQBrMIEuPD0TvEFPo8dvP1w4Dg9Gc0f5li/LFwNHCIQezIMu +togzJ3ehHvuQt7llZoPbGsDhZ7FvoQk9EpAmpCVqksHnNbK4cNUhHur3sHO2R7e1 +pdSzb3lEeWStxbuic+6CUZ5kqwNvTZsXlP3Acd344EZwcbDUiHQyAENsKKNmcRBe +4szPaM1UQMQVV0De1CIRQXdYoSsb+VDATsReRg9140Rcxg8fO881jz+CpmZzySWN +0PvzpTRP7XG+Th5V9tv0d1FnByigXMCXZGPXtKzQ8ZmoXFlBAp8tsfKxW8e005uW +qMogVDStJrgZXmFsLN5goVKe3yk5gcMSLgwmRIyzAoIBAQCoE6CkmsAd27uiaDc4 ++aLA/1TIzZmiu+NEo5NBKY1LyexvHHZGBJgqTcg6YDtw8zchCmuXSGMUeRk5cxrb +C3Cgx5wKVn7l8acqc18qPPIigATavBkn7o92XG2cLOJUjogfQVuDL+6GLxeeupRV +2x1cmakj/DegMq32j+YNWbRuOB8WClPaDyYLQ877dcR8X/2XGTmMLAEFfFoMrWtB +7D/oWo76EWNiae7FqH6RmkCDPwNLQxVHtW4LkQOm89PYKRHkLKbw0uKz/bzMOzUE +XA/Q8Lux/YuY19kJ/SACWUO6Eq4icObTfzQCPWO9mFRJog57JWttXyHZBOXk8Qzt +I4NpAoIBACurK0zJxaGUdTjmzaVipauyOZYFBsbzvCWsdSNodtZ/mw6n/qkj2N33 +vNCRLrsQAkDKATzWrscRg+xvl5/wIa4B3s8TZNIp3hL7bvI/NoR5bi5M0vcjdXEd +DeKeZsSBzEs5zivM3aWEF5MSR2zpJPNYyD0PnT6EvZOkMoq6LM3FJcouS1ChePLQ +wHEY5ZMqPODOcQ+EixNXl6FGdywaJYxKnG4liG9zdJ0lGNIivTA7gyM+JCbG4fs8 +73uGsbCpts5Y2xKFp3uK8HjWKbOCR3dE4mOZM8M/NlsUGNjSydXZMIJYWR8nvVmo +i3mHicYaTQxj0ruIz7JHOtFNVGi1sME= +-----END PRIVATE KEY----- diff --git a/avalanchego/network/test_key_3.key b/avalanchego/network/test_key_3.key new file mode 100644 index 00000000..2cef238b --- /dev/null +++ b/avalanchego/network/test_key_3.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDlpXvqK8hnWJaZ +bQ7MYwKuYVAGd6P0me1DWM/3KJpwq7uPln9sP96qjJyNHChKJ6wSt+dOEAbLsfzS +CVXD9eYLR9g09UM/LMAR02TjozfIQjyEAojppKSXWzUpUAelC41wb7fEgHf044zH +Re7JxnqCrPIyaOp5PRxOE0NdPAZuh0xUMB/SrbbRACrMH+XPSyMc9eViHbksnXI+ +2X6+Vx8as/6r7Y4NH2ucjv08TsBkHhdzrBQhxrldeQc9LvNDUh1rXGt/jjoxgJW5 +rMsWGnOrz+eGTE0zPF1mwlpZq8v7GSmMbEDJK+PSOW5AUayazE4hT9eAkP9VGqrI +hJemXbCDSXyamxT19XtsRbXLowhzsABFGBtsQ+IPPyb87yaMRbcLXq6e0scHsvM8 +IKK1OywZHrlKGSP4vD5ozhflkj5ebx4dNEJ6TxPkKXdKbZQWQuTaa6fpzCjJZaHC +Lr4R9bqSzRzupIe0lM/wOAutbFiYG4P08pIdyWlrj/m2iPnZidPp8gElbPmCAh2e +1EJofz7R5a/C68Ox09QvslAWMmZ9A50x6eU2pbZoS/H4lmJgskFaP1yOkjfc/kIB +8URUzcgWUMeYwdFND+Qotx+9IN+6h8QGgy3i2N/TVSZ1+LyQUpaX1tr0jsKhDXdT +gnXCSTorFq1k/t4YKYpGirzD5jfw2wIDAQABAoICAQC/Rt32h29NvTj7JB5OWS2z +h3R7Xo2ev9Mi5EecSyKQNEpuZ+FMjcpubd47nrdkRLULhkhP+gNfCKpXW9Um+psY +zEemnJ7dcO2uK1B+VsWwtJLpNZ9KVIuPUjXuai1j6EJv423Ca2r++8WXeYVSZVJH +o7u8By09vIvl8B+M+eE1kNYfzVHETlLWtHfxO6RTy/a8OYhM+ArzwVSWStxJuBE9 +Ua0PETffcEtWxLbi04lmGrZX7315QKfG1ncUHBYc/blpYjpbrWCFON/9HpKtn2y3 +L91dPBKVWXNGkx1kUTb+t8+mmchAh6Ejyhgt1Jma+g8dqf4KpTs3bJXRnLcfqCvL +Kq+wCUGv7iVWlTmhlzLpneajLDdBxGfbkAgwPFOyZoJNrnh6hU60TPc1IV6YSLlB +GsxesK9QWUrg3BAN4iKD3FvDt0qeUPbPztxEZi1OzSYQDZUQBrBL+WHuD9NxeAYe +2yx1OlPMo73gK5GW/MHBCz77+NX2kVURlTvYW4TsmInCRvOTsVNkRPUJtiHYT7Ss +Y8SzS5F/u9sfjFAVowGgwtNfq8Rm6Q1QdPZltiUNBgiTekFNQEy7WhzVg6MlT5Ca +BRqUhN3+CFwxLZ9rSQL6gxfAHk9umb0ee4JU9JgcYjtb5AtyE6DmmcSZPSejjxit +HwZ/g5MDK7kk5fKMcnL7kQKCAQEA895z7T0c6y3rhWfEUMDdTlsPgAoxYNf+jXyJ +aQmtfnDP9tf8BdPpobfHp29e7JRaGGa9QWPaaemBPHXMmD+IegG9/E+PQdHQwFSG +OpI13uCBULt8a+MMUbTCg1V4uXqf2j1BUo9SFQ6aXh/Rg1gVBgsq1M6eyvel93io +0X+/cinsDEpB5HENZwBuRb0SP0RfCgQR9Yh+jIy2TwJDDNw3sG1TvIo9aK7blSwB +z/gwSDx1UUa2KReD4ChYcqgLFUj3F/uF2f20P/JuaUn7tU3HoCsbG0C+Cci/XSJ9 +gu8xYl64Vg16bO3CflqjucPTFXgyBOt0lIug77YYa9CgCUJvEwKCAQEA8RHqGghV +meDnRXvPmAEwtoT7IKBe+eYjGN6wc2o+QZzjeUFkyfOtaB8rqriUXqvihD2GD6XQ +O/cSNCqp5g6yUhBLo3b9BmCsQsvxkhMpwB/hdi5aYjn+CFQVD4rAso9yGwRBWoA0 +gQdGMKenOUhU/PtVKyTTUuY7rFD8RhYq0ZLqEgO7chn8QXCNPo7MfE/qF9vQBosP +ktiS0FG442PJp2B/lYKK6N2w77ZeCoLhQowaNN0/N36kX/n4bjBE2XFLNpSuHtlg +C7bV/RMR5i/3yB0eRVUDVlqC077qlC1w0tCNZvvi6kbWwIu/4pQTdcA8mAz5B7Lc +OwOMbA2GT4OIGQKCAQABoyS0Gwzup0hFhQTUZfcWZ5YbDfZ25/xVhtiFVANOLgO3 +bIvMnjebVliIzz6b6AMS1t2+aqU0wNSVS1UsUIDiENDtuLsFfhsgr3CXRBQIgwlb +OWcEcmnKwqPrrc85r5ETLgYaP8wVSBvRNfV6JEU/3SNUem6mfjMnDjBT97+ZTJ7B +Fl6K4hds8ZvL7BELS7I3pv9X3qq61tcCgMlidLgK/zDouyTeZw4iWkFI3Cm20nEX +MppWfEnuX1b4rhgk9HB0QMQNSp7DLyV+n3iJJxSIBsIP1Mdx2V8viOO+1UxHlMs4 +CK8hvBbqMkGXJbFtG3l6fvoxZR6XfWl8j9IDPebxAoIBAF07cnBy/LgwdQE4awb8 +ntxX/c+WdmTrjnNV3KQmWMGDba49jj9UkKIOPBMgo7EhhM9kA+8VT72BRncKcP7a +fDikuLwVjrHivXxv55N4+dKmAcp1DtuiVg7ehe6m2PO16olsUeIwZx3ntEuo61GK +GeRlR4ESEvCivj1cbNSmShUXXpNtAheU2Sxt3RJuo8MIHR7xEjkVmwZN4CnVEU5Q +D3M+LNmjzRlWc9GhlCk4iOn1yUTctFBAGE5OHLhwzo/R8ya+xcCEjVK6eXQQ5gFC +V+/64vQpdsr04lgGJC7+i/3cTnOfwxicIP4CjkmQvx3xJP4hNka189qW+r3nVSR3 +WDECggEAAQCCqF4J8C2keY+o/kYQBq0tHhrC28HgiVQuCGc4XruYQtDh4di/I72F +RsvgVHS29ApAlh29i29ws7K2bU6WIc+JR3nmwAHUtiJmxRZhn/c722AvRXF5YMH/ +u46bEURHF5sGz8vr5chX/R4LiF579xyNsB9KC3mPqdjW/L6ACQdrBJVAS9cwplO0 +D+YWxmCE1Ps2tQtz6ZN+LUC7WO6M24k8KW2y4Scue0/23uCllWFgS3/vxDdQDZWn ++7AvMYPh4Wrfdd0t0cU+c9rirFYVz+uo/QBUIZOIw64AvIUjZpHTbhcjz1mAqcgJ +eAOQk+OFUTNKeI9uJwoNYOguHsxt2w== +-----END PRIVATE KEY----- diff --git a/avalanchego/network/test_network.go b/avalanchego/network/test_network.go index 296108b7..1cb56127 100644 --- a/avalanchego/network/test_network.go +++ b/avalanchego/network/test_network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" @@ -66,14 +67,14 @@ func (l *noopListener) Close() error { func (*noopListener) Addr() net.Addr { return &net.TCPAddr{ IP: net.IPv4zero, - Port: 0, + Port: 1, } } func NewTestNetwork( log logging.Logger, networkID uint32, - currentValidators validators.Set, + currentValidators validators.Manager, trackedSubnets set.Set[ids.ID], router router.ExternalHandler, ) (Network, error) { @@ -156,6 +157,8 @@ func NewTestNetwork( PeerListNonValidatorGossipSize: constants.DefaultNetworkPeerListNonValidatorGossipSize, PeerListPeersGossipSize: constants.DefaultNetworkPeerListPeersGossipSize, PeerListGossipFreq: constants.DefaultNetworkPeerListGossipFreq, + PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, }, DelayConfig: DelayConfig{ @@ -166,7 +169,7 @@ func NewTestNetwork( MaxClockDifference: constants.DefaultNetworkMaxClockDifference, CompressionType: constants.DefaultNetworkCompressionType, PingFrequency: constants.DefaultPingFrequency, - AllowPrivateIPs: constants.DefaultNetworkAllowPrivateIPs, + AllowPrivateIPs: !constants.ProductionNetworkIDs.Contains(networkID), UptimeMetricFreq: constants.DefaultUptimeMetricFreq, MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, @@ -185,12 +188,13 @@ func NewTestNetwork( tlsConfig := peer.TLSConfig(*tlsCert, nil) networkConfig.TLSConfig = tlsConfig networkConfig.TLSKey = tlsCert.PrivateKey.(crypto.Signer) + networkConfig.BLSKey, err = bls.NewSecretKey() + if err != nil { + return nil, err + } - validatorManager := validators.NewManager() - beacons := validators.NewSet() - networkConfig.Validators = validatorManager - networkConfig.Validators.Add(constants.PrimaryNetworkID, currentValidators) - networkConfig.Beacons = beacons + networkConfig.Validators = currentValidators + networkConfig.Beacons = validators.NewManager() // This never actually does anything because we never initialize the P-chain networkConfig.UptimeCalculator = uptime.NoOpCalculator @@ -207,6 +211,7 @@ func NewTestNetwork( return nil, err } networkConfig.CPUTargeter = tracker.NewTargeter( + logging.NoLog{}, &tracker.TargeterConfig{ VdrAlloc: float64(runtime.NumCPU()), MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), @@ -216,6 +221,7 @@ func NewTestNetwork( networkConfig.ResourceTracker.CPUTracker(), ) networkConfig.DiskTargeter = tracker.NewTargeter( + logging.NoLog{}, &tracker.TargeterConfig{ VdrAlloc: 1000 * units.GiB, MaxNonVdrUsage: 1000 * units.GiB, @@ -225,12 +231,7 @@ func NewTestNetwork( networkConfig.ResourceTracker.DiskTracker(), ) - networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 0) - - networkConfig.GossipTracker, err = peer.NewGossipTracker(metrics, "") - if err != nil { - return nil, err - } + networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 1) return NewNetwork( &networkConfig, diff --git a/avalanchego/network/throttling/bandwidth_throttler.go b/avalanchego/network/throttling/bandwidth_throttler.go index 5adfcb00..cde94b96 100644 --- a/avalanchego/network/throttling/bandwidth_throttler.go +++ b/avalanchego/network/throttling/bandwidth_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,9 +9,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "golang.org/x/time/rate" "github.com/ava-labs/avalanchego/ids" diff --git a/avalanchego/network/throttling/bandwidth_throttler_test.go b/avalanchego/network/throttling/bandwidth_throttler_test.go index 9b4b7eaf..9f919547 100644 --- a/avalanchego/network/throttling/bandwidth_throttler_test.go +++ b/avalanchego/network/throttling/bandwidth_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,7 +9,6 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -25,13 +24,13 @@ func TestBandwidthThrottler(t *testing.T) { } throttlerIntf, err := newBandwidthThrottler(logging.NoLog{}, "", prometheus.NewRegistry(), config) require.NoError(err) - throttler, ok := throttlerIntf.(*bandwidthThrottlerImpl) - require.True(ok) + require.IsType(&bandwidthThrottlerImpl{}, throttlerIntf) + throttler := throttlerIntf.(*bandwidthThrottlerImpl) require.NotNil(throttler.log) require.NotNil(throttler.limiters) - require.EqualValues(throttler.RefillRate, 8) - require.EqualValues(throttler.MaxBurstSize, 10) - require.Len(throttler.limiters, 0) + require.Equal(config.RefillRate, throttler.RefillRate) + require.Equal(config.MaxBurstSize, throttler.MaxBurstSize) + require.Empty(throttler.limiters) // Add a node nodeID1 := ids.GenerateTestNodeID() @@ -40,7 +39,7 @@ func TestBandwidthThrottler(t *testing.T) { // Remove the node throttler.RemoveNode(nodeID1) - require.Len(throttler.limiters, 0) + require.Empty(throttler.limiters) // Add the node back throttler.AddNode(nodeID1) diff --git a/avalanchego/network/throttling/common.go b/avalanchego/network/throttling/common.go index c2a92db3..cedd5d73 100644 --- a/avalanchego/network/throttling/common.go +++ b/avalanchego/network/throttling/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -22,8 +22,7 @@ type MsgByteThrottlerConfig struct { type commonMsgThrottler struct { log logging.Logger lock sync.Mutex - // Primary network validator set - vdrs validators.Set + vdrs validators.Manager // Max number of bytes that can be taken from the // at-large byte allocation by a given node. nodeMaxAtLargeBytes uint64 diff --git a/avalanchego/network/throttling/dial_throttler.go b/avalanchego/network/throttling/dial_throttler.go index 491c312b..07c04aef 100644 --- a/avalanchego/network/throttling/dial_throttler.go +++ b/avalanchego/network/throttling/dial_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/dial_throttler_test.go b/avalanchego/network/throttling/dial_throttler_test.go index f3c3f651..1dd57c2e 100644 --- a/avalanchego/network/throttling/dial_throttler_test.go +++ b/avalanchego/network/throttling/dial_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -13,6 +13,8 @@ import ( // Test that the DialThrottler returned by NewDialThrottler works func TestDialThrottler(t *testing.T) { + require := require.New(t) + startTime := time.Now() // Allows 5 per second throttler := NewDialThrottler(5) @@ -21,13 +23,12 @@ func TestDialThrottler(t *testing.T) { acquiredChan := make(chan struct{}, 1) // Should return immediately because < 5 taken this second go func() { - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() select { case <-time.After(10 * time.Millisecond): - t.Fatal("should have acquired immediately") + require.FailNow("should have acquired immediately") case <-acquiredChan: } close(acquiredChan) @@ -36,15 +37,14 @@ func TestDialThrottler(t *testing.T) { acquiredChan := make(chan struct{}, 1) go func() { // Should block because 5 already taken within last second - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() select { case <-time.After(25 * time.Millisecond): case <-acquiredChan: - t.Fatal("should not have been able to acquire immediately") + require.FailNow("should not have been able to acquire immediately") } // Wait until the 6th Acquire() has returned. The time at which @@ -54,13 +54,13 @@ func TestDialThrottler(t *testing.T) { close(acquiredChan) // Use 1.05 seconds instead of 1 second to give some "wiggle room" // so test doesn't flake - if time.Since(startTime) > 1050*time.Millisecond { - t.Fatal("should not have blocked for so long") - } + require.LessOrEqual(time.Since(startTime), 1050*time.Millisecond) } // Test that Acquire honors its specification about its context being canceled func TestDialThrottlerCancel(t *testing.T) { + require := require.New(t) + // Allows 5 per second throttler := NewDialThrottler(5) // Use all 5 @@ -68,13 +68,12 @@ func TestDialThrottlerCancel(t *testing.T) { acquiredChan := make(chan struct{}, 1) // Should return immediately because < 5 taken this second go func() { - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() select { case <-time.After(10 * time.Millisecond): - t.Fatal("should have acquired immediately") + require.FailNow("should have acquired immediately") case <-acquiredChan: } close(acquiredChan) @@ -86,7 +85,7 @@ func TestDialThrottlerCancel(t *testing.T) { // Should block because 5 already taken within last second err := throttler.Acquire(ctx) // Should error because we call cancel() below - require.Error(t, err) + require.ErrorIs(err, context.Canceled) acquiredChan <- struct{}{} }() @@ -95,18 +94,19 @@ func TestDialThrottlerCancel(t *testing.T) { select { case <-acquiredChan: case <-time.After(10 * time.Millisecond): - t.Fatal("Acquire should have returned immediately upon context cancelation") + require.FailNow("Acquire should have returned immediately upon context cancellation") } close(acquiredChan) } // Test that the Throttler return by NewNoThrottler never blocks on Acquire() func TestNoDialThrottler(t *testing.T) { + require := require.New(t) + throttler := NewNoDialThrottler() for i := 0; i < 250; i++ { startTime := time.Now() - err := throttler.Acquire(context.Background()) // Should always immediately return - require.NoError(t, err) - require.WithinDuration(t, time.Now(), startTime, 25*time.Millisecond) + require.NoError(throttler.Acquire(context.Background())) // Should always immediately return + require.WithinDuration(time.Now(), startTime, 25*time.Millisecond) } } diff --git a/avalanchego/network/throttling/inbound_conn_throttler.go b/avalanchego/network/throttling/inbound_conn_throttler.go index 7f220639..5e152807 100644 --- a/avalanchego/network/throttling/inbound_conn_throttler.go +++ b/avalanchego/network/throttling/inbound_conn_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/inbound_conn_throttler_test.go b/avalanchego/network/throttling/inbound_conn_throttler_test.go index 9e13e32b..9e2fde15 100644 --- a/avalanchego/network/throttling/inbound_conn_throttler_test.go +++ b/avalanchego/network/throttling/inbound_conn_throttler_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling import ( + "context" "net" "testing" @@ -21,7 +22,7 @@ type MockListener struct { func (ml *MockListener) Accept() (net.Conn, error) { if ml.OnAcceptF == nil { - ml.t.Fatal("unexpectedly called Accept") + require.FailNow(ml.t, "unexpectedly called Accept") return nil, nil } return ml.OnAcceptF() @@ -29,7 +30,7 @@ func (ml *MockListener) Accept() (net.Conn, error) { func (ml *MockListener) Close() error { if ml.OnCloseF == nil { - ml.t.Fatal("unexpectedly called Close") + require.FailNow(ml.t, "unexpectedly called Close") return nil } return ml.OnCloseF() @@ -37,13 +38,15 @@ func (ml *MockListener) Close() error { func (ml *MockListener) Addr() net.Addr { if ml.OnAddrF == nil { - ml.t.Fatal("unexpectedly called Addr") + require.FailNow(ml.t, "unexpectedly called Addr") return nil } return ml.OnAddrF() } func TestInboundConnThrottlerClose(t *testing.T) { + require := require.New(t) + closed := false l := &MockListener{ t: t, @@ -53,18 +56,18 @@ func TestInboundConnThrottlerClose(t *testing.T) { }, } wrappedL := NewThrottledListener(l, 1) - err := wrappedL.Close() - require.NoError(t, err) - require.True(t, closed) + require.NoError(wrappedL.Close()) + require.True(closed) + select { case <-wrappedL.(*throttledListener).ctx.Done(): default: - t.Fatal("should have closed context") + require.FailNow("should have closed context") } // Accept() should return an error because the context is cancelled - _, err = wrappedL.Accept() - require.Error(t, err) + _, err := wrappedL.Accept() + require.ErrorIs(err, context.Canceled) } func TestInboundConnThrottlerAddr(t *testing.T) { @@ -82,6 +85,8 @@ func TestInboundConnThrottlerAddr(t *testing.T) { } func TestInboundConnThrottlerAccept(t *testing.T) { + require := require.New(t) + acceptCalled := false l := &MockListener{ t: t, @@ -92,6 +97,6 @@ func TestInboundConnThrottlerAccept(t *testing.T) { } wrappedL := NewThrottledListener(l, 1) _, err := wrappedL.Accept() - require.NoError(t, err) - require.True(t, acceptCalled) + require.NoError(err) + require.True(acceptCalled) } diff --git a/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go b/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go index 9d058e29..4df5ee39 100644 --- a/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go +++ b/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go b/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go index 03fec7a8..2f6cd926 100644 --- a/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go +++ b/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -23,6 +23,8 @@ var ( ) func TestNoInboundConnUpgradeThrottler(t *testing.T) { + require := require.New(t) + { throttler := NewInboundConnUpgradeThrottler( logging.NoLog{}, @@ -33,8 +35,7 @@ func TestNoInboundConnUpgradeThrottler(t *testing.T) { ) // throttler should allow all for i := 0; i < 10; i++ { - allow := throttler.ShouldUpgrade(host1) - require.True(t, allow) + require.True(throttler.ShouldUpgrade(host1)) } } { @@ -47,8 +48,7 @@ func TestNoInboundConnUpgradeThrottler(t *testing.T) { ) // throttler should allow all for i := 0; i < 10; i++ { - allow := throttler.ShouldUpgrade(host1) - require.True(t, allow) + require.True(throttler.ShouldUpgrade(host1)) } } } @@ -91,7 +91,7 @@ func TestInboundConnUpgradeThrottler(t *testing.T) { throttler := throttlerIntf.(*inboundConnUpgradeThrottler) select { case <-throttler.done: - t.Fatal("shouldn't be done") + require.FailNow("shouldn't be done") default: } @@ -102,6 +102,6 @@ func TestInboundConnUpgradeThrottler(t *testing.T) { case _, chanOpen := <-throttler.done: require.False(chanOpen) default: - t.Fatal("should be done") + require.FailNow("should be done") } } diff --git a/avalanchego/network/throttling/inbound_msg_buffer_throttler.go b/avalanchego/network/throttling/inbound_msg_buffer_throttler.go index d0617783..65306eea 100644 --- a/avalanchego/network/throttling/inbound_msg_buffer_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_buffer_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go b/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go index 329957cb..38e6d735 100644 --- a/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,7 +9,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -27,15 +26,15 @@ func TestMsgBufferThrottler(t *testing.T) { throttler.Acquire(context.Background(), nodeID1) throttler.Acquire(context.Background(), nodeID1) require.Len(throttler.nodeToNumProcessingMsgs, 1) - require.EqualValues(3, throttler.nodeToNumProcessingMsgs[nodeID1]) + require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID1]) // Acquire shouldn't block for other node throttler.Acquire(context.Background(), nodeID2) throttler.Acquire(context.Background(), nodeID2) throttler.Acquire(context.Background(), nodeID2) require.Len(throttler.nodeToNumProcessingMsgs, 2) - require.EqualValues(3, throttler.nodeToNumProcessingMsgs[nodeID1]) - require.EqualValues(3, throttler.nodeToNumProcessingMsgs[nodeID2]) + require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID1]) + require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID2]) // Acquire should block for 4th acquire done := make(chan struct{}) @@ -45,7 +44,7 @@ func TestMsgBufferThrottler(t *testing.T) { }() select { case <-done: - t.Fatal("should block on acquiring") + require.FailNow("should block on acquiring") case <-time.After(50 * time.Millisecond): } @@ -53,7 +52,7 @@ func TestMsgBufferThrottler(t *testing.T) { // fourth acquire should be unblocked <-done require.Len(throttler.nodeToNumProcessingMsgs, 2) - require.EqualValues(3, throttler.nodeToNumProcessingMsgs[nodeID2]) + require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID2]) // Releasing from other node should have no effect throttler.release(nodeID2) @@ -64,7 +63,7 @@ func TestMsgBufferThrottler(t *testing.T) { throttler.release(nodeID1) throttler.release(nodeID1) throttler.release(nodeID1) - require.Len(throttler.nodeToNumProcessingMsgs, 0) + require.Empty(throttler.nodeToNumProcessingMsgs) } // Test inboundMsgBufferThrottler when an acquire is cancelled @@ -80,7 +79,7 @@ func TestMsgBufferThrottlerContextCancelled(t *testing.T) { throttler.Acquire(vdr1Context, nodeID1) throttler.Acquire(vdr1Context, nodeID1) require.Len(throttler.nodeToNumProcessingMsgs, 1) - require.EqualValues(3, throttler.nodeToNumProcessingMsgs[nodeID1]) + require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID1]) // Acquire should block for 4th acquire done := make(chan struct{}) @@ -90,7 +89,7 @@ func TestMsgBufferThrottlerContextCancelled(t *testing.T) { }() select { case <-done: - t.Fatal("should block on acquiring") + require.FailNow("should block on acquiring") case <-time.After(50 * time.Millisecond): } @@ -102,7 +101,7 @@ func TestMsgBufferThrottlerContextCancelled(t *testing.T) { }() select { case <-done2: - t.Fatal("should block on acquiring") + require.FailNow("should block on acquiring") case <-time.After(50 * time.Millisecond): } @@ -111,11 +110,11 @@ func TestMsgBufferThrottlerContextCancelled(t *testing.T) { select { case <-done2: case <-time.After(50 * time.Millisecond): - t.Fatal("cancelling context should unblock Acquire") + require.FailNow("cancelling context should unblock Acquire") } select { case <-done: case <-time.After(50 * time.Millisecond): - t.Fatal("should be blocked") + require.FailNow("should be blocked") } } diff --git a/avalanchego/network/throttling/inbound_msg_byte_throttler.go b/avalanchego/network/throttling/inbound_msg_byte_throttler.go index 66efa79b..0bac7ca2 100644 --- a/avalanchego/network/throttling/inbound_msg_byte_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_byte_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -8,14 +8,13 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -26,7 +25,7 @@ func newInboundMsgByteThrottler( log logging.Logger, namespace string, registerer prometheus.Registerer, - vdrs validators.Set, + vdrs validators.Manager, config MsgByteThrottlerConfig, ) (*inboundMsgByteThrottler, error) { t := &inboundMsgByteThrottler{ @@ -96,7 +95,7 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n t.lock.Lock() - // If there is already a message waiting, log the error but continue + // If there is already a message waiting, log the error and return if existingID, exists := t.nodeToWaitingMsgID[nodeID]; exists { t.log.Error("node already waiting on message", zap.Stringer("nodeID", nodeID), @@ -107,7 +106,7 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n } // Take as many bytes as we can from the at-large allocation. - atLargeBytesUsed := math.Min( + atLargeBytesUsed := min( // only give as many bytes as needed metadata.bytesNeeded, // don't exceed per-node limit @@ -131,9 +130,16 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n // Take as many bytes as we can from [nodeID]'s validator allocation. // Calculate [nodeID]'s validator allocation size based on its weight vdrAllocationSize := uint64(0) - weight := t.vdrs.GetWeight(nodeID) + weight := t.vdrs.GetWeight(constants.PrimaryNetworkID, nodeID) if weight != 0 { - vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(t.vdrs.Weight())) + totalWeight, err := t.vdrs.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + t.log.Error("couldn't get total weight of primary network", + zap.Error(err), + ) + } else { + vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(totalWeight)) + } } vdrBytesAlreadyUsed := t.nodeToVdrBytesUsed[nodeID] // [vdrBytesAllowed] is the number of bytes this node @@ -145,7 +151,7 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n } else { vdrBytesAllowed -= vdrBytesAlreadyUsed } - vdrBytesUsed := math.Min(t.remainingVdrBytes, metadata.bytesNeeded, vdrBytesAllowed) + vdrBytesUsed := min(t.remainingVdrBytes, metadata.bytesNeeded, vdrBytesAllowed) if vdrBytesUsed > 0 { // Mark that [nodeID] used [vdrBytesUsed] from its validator allocation t.nodeToVdrBytesUsed[nodeID] += vdrBytesUsed @@ -208,7 +214,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node // or messages from [nodeID] currently waiting to acquire bytes. vdrBytesUsed := t.nodeToVdrBytesUsed[nodeID] releasedBytes := metadata.msgSize - metadata.bytesNeeded - vdrBytesToReturn := math.Min(releasedBytes, vdrBytesUsed) + vdrBytesToReturn := min(releasedBytes, vdrBytesUsed) // [atLargeBytesToReturn] is the number of bytes from [msgSize] // that will be given to the at-large allocation or a message @@ -231,7 +237,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node msg := iter.Value() // From the at-large allocation, take the maximum number of bytes // without exceeding the per-node limit on taking from at-large pool. - atLargeBytesGiven := math.Min( + atLargeBytesGiven := min( // don't give [msg] too many bytes msg.bytesNeeded, // don't exceed per-node limit @@ -264,7 +270,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node msg, exists := t.waitingToAcquire.Get(msgID) if exists { // Give [msg] all the bytes we can - bytesToGive := math.Min(msg.bytesNeeded, vdrBytesToReturn) + bytesToGive := min(msg.bytesNeeded, vdrBytesToReturn) msg.bytesNeeded -= bytesToGive vdrBytesToReturn -= bytesToGive if msg.bytesNeeded == 0 { diff --git a/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go b/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go index 6565d0b6..52ffcf83 100644 --- a/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go +++ b/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,11 +9,11 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -24,9 +24,9 @@ func TestInboundMsgByteThrottlerCancelContextDeadlock(t *testing.T) { AtLargeAllocSize: 1, NodeMaxAtLargeBytes: 1, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -52,11 +52,11 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { AtLargeAllocSize: 512, NodeMaxAtLargeBytes: 1024, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -78,7 +78,7 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { }() select { case <-vdr2Done: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } @@ -86,7 +86,7 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { throttler.lock.Lock() require.Len(throttler.nodeToWaitingMsgID, 1) require.Contains(throttler.nodeToWaitingMsgID, vdr2ID) - require.EqualValues(1, throttler.waitingToAcquire.Len()) + require.Equal(1, throttler.waitingToAcquire.Len()) _, exists := throttler.waitingToAcquire.Get(throttler.nodeToWaitingMsgID[vdr2ID]) require.True(exists) throttler.lock.Unlock() @@ -97,7 +97,7 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { select { case <-vdr2Done: case <-time.After(50 * time.Millisecond): - t.Fatal("channel should signal because ctx was cancelled") + require.FailNow("channel should signal because ctx was cancelled") } require.NotContains(throttler.nodeToWaitingMsgID, vdr2ID) @@ -110,11 +110,11 @@ func TestInboundMsgByteThrottler(t *testing.T) { AtLargeAllocSize: 1024, NodeMaxAtLargeBytes: 1024, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -137,49 +137,49 @@ func TestInboundMsgByteThrottler(t *testing.T) { // Take from at-large allocation. // Should return immediately. throttler.Acquire(context.Background(), 1, vdr1ID) - require.EqualValues(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(1, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Release the bytes throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) - require.EqualValues(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Empty(throttler.nodeToAtLargeBytesUsed) // Use all the at-large allocation bytes and 1 of the validator allocation bytes // Should return immediately. throttler.Acquire(context.Background(), config.AtLargeAllocSize+1, vdr1ID) // vdr1 at-large bytes used: 1024. Validator bytes used: 1 - require.EqualValues(0, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize-1, throttler.remainingVdrBytes) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], 1) + require.Zero(throttler.remainingAtLargeBytes) + require.Equal(config.VdrAllocSize-1, throttler.remainingVdrBytes) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) require.Len(throttler.nodeToVdrBytesUsed, 1) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // The other validator should be able to acquire half the validator allocation. // Should return immediately. throttler.Acquire(context.Background(), config.AtLargeAllocSize/2, vdr2ID) // vdr2 at-large bytes used: 0. Validator bytes used: 512 - require.EqualValues(config.VdrAllocSize/2-1, throttler.remainingVdrBytes) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], 1) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr2ID], config.VdrAllocSize/2) + require.Equal(config.VdrAllocSize/2-1, throttler.remainingVdrBytes) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) + require.Equal(config.VdrAllocSize/2, throttler.nodeToVdrBytesUsed[vdr2ID]) require.Len(throttler.nodeToVdrBytesUsed, 2) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.Len(throttler.nodeToWaitingMsgID, 0) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Empty(throttler.nodeToWaitingMsgID) + require.Zero(throttler.waitingToAcquire.Len()) // vdr1 should be able to acquire the rest of the validator allocation // Should return immediately. throttler.Acquire(context.Background(), config.VdrAllocSize/2-1, vdr1ID) // vdr1 at-large bytes used: 1024. Validator bytes used: 512 - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], config.VdrAllocSize/2) + require.Equal(config.VdrAllocSize/2, throttler.nodeToVdrBytesUsed[vdr1ID]) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Trying to take more bytes for either node should block vdr1Done := make(chan struct{}) @@ -189,13 +189,13 @@ func TestInboundMsgByteThrottler(t *testing.T) { }() select { case <-vdr1Done: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } throttler.lock.Lock() require.Len(throttler.nodeToWaitingMsgID, 1) require.Contains(throttler.nodeToWaitingMsgID, vdr1ID) - require.EqualValues(1, throttler.waitingToAcquire.Len()) + require.Equal(1, throttler.waitingToAcquire.Len()) _, exists := throttler.waitingToAcquire.Get(throttler.nodeToWaitingMsgID[vdr1ID]) require.True(exists) throttler.lock.Unlock() @@ -207,14 +207,14 @@ func TestInboundMsgByteThrottler(t *testing.T) { }() select { case <-vdr2Done: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } throttler.lock.Lock() require.Len(throttler.nodeToWaitingMsgID, 2) require.Contains(throttler.nodeToWaitingMsgID, vdr2ID) - require.EqualValues(2, throttler.waitingToAcquire.Len()) + require.Equal(2, throttler.waitingToAcquire.Len()) _, exists = throttler.waitingToAcquire.Get(throttler.nodeToWaitingMsgID[vdr2ID]) require.True(exists) throttler.lock.Unlock() @@ -227,13 +227,13 @@ func TestInboundMsgByteThrottler(t *testing.T) { }() select { case <-nonVdrDone: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } throttler.lock.Lock() require.Len(throttler.nodeToWaitingMsgID, 3) require.Contains(throttler.nodeToWaitingMsgID, nonVdrID) - require.EqualValues(3, throttler.waitingToAcquire.Len()) + require.Equal(3, throttler.waitingToAcquire.Len()) _, exists = throttler.waitingToAcquire.Get(throttler.nodeToWaitingMsgID[nonVdrID]) require.True(exists) throttler.lock.Unlock() @@ -248,23 +248,23 @@ func TestInboundMsgByteThrottler(t *testing.T) { <-vdr2Done <-nonVdrDone - require.EqualValues(config.NodeMaxAtLargeBytes/2, throttler.remainingVdrBytes) + require.Equal(config.NodeMaxAtLargeBytes/2, throttler.remainingVdrBytes) require.Len(throttler.nodeToAtLargeBytesUsed, 3) // vdr1, vdr2, nonVdrID - require.EqualValues(config.AtLargeAllocSize/2, throttler.nodeToAtLargeBytesUsed[vdr1ID]) - require.EqualValues(1, throttler.nodeToAtLargeBytesUsed[vdr2ID]) - require.EqualValues(1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) + require.Equal(config.AtLargeAllocSize/2, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[vdr2ID]) + require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[nonVdrID]) require.Len(throttler.nodeToVdrBytesUsed, 1) - require.EqualValues(0, throttler.nodeToVdrBytesUsed[vdr1ID]) - require.EqualValues(config.AtLargeAllocSize/2-2, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToWaitingMsgID, 0) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Zero(throttler.nodeToVdrBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize/2-2, throttler.remainingAtLargeBytes) + require.Empty(throttler.nodeToWaitingMsgID) + require.Zero(throttler.waitingToAcquire.Len()) // Non-validator should be able to take the rest of the at-large bytes throttler.Acquire(context.Background(), config.AtLargeAllocSize/2-2, nonVdrID) - require.EqualValues(0, throttler.remainingAtLargeBytes) - require.EqualValues(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) - require.Len(throttler.nodeToWaitingMsgID, 0) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Zero(throttler.remainingAtLargeBytes) + require.Equal(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) + require.Empty(throttler.nodeToWaitingMsgID) + require.Zero(throttler.waitingToAcquire.Len()) // But should block on subsequent Acquires go func() { @@ -273,13 +273,13 @@ func TestInboundMsgByteThrottler(t *testing.T) { }() select { case <-nonVdrDone: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } throttler.lock.Lock() require.Contains(throttler.nodeToWaitingMsgID, nonVdrID) require.Contains(throttler.nodeToWaitingMsgID, nonVdrID) - require.EqualValues(1, throttler.waitingToAcquire.Len()) + require.Equal(1, throttler.waitingToAcquire.Len()) _, exists = throttler.waitingToAcquire.Get(throttler.nodeToWaitingMsgID[nonVdrID]) require.True(exists) throttler.lock.Unlock() @@ -290,34 +290,34 @@ func TestInboundMsgByteThrottler(t *testing.T) { <-nonVdrDone - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr2ID]) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(0, throttler.remainingAtLargeBytes) + require.Zero(throttler.nodeToAtLargeBytesUsed[vdr2ID]) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Zero(throttler.remainingAtLargeBytes) require.NotContains(throttler.nodeToWaitingMsgID, nonVdrID) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Zero(throttler.waitingToAcquire.Len()) // Release all of vdr1's messages throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) throttler.release(&msgMetadata{msgSize: config.AtLargeAllocSize/2 - 1}, vdr1ID) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.EqualValues(config.AtLargeAllocSize/2, throttler.remainingAtLargeBytes) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Equal(config.AtLargeAllocSize/2, throttler.remainingAtLargeBytes) + require.Zero(throttler.nodeToAtLargeBytesUsed[vdr1ID]) require.NotContains(throttler.nodeToWaitingMsgID, nonVdrID) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Zero(throttler.waitingToAcquire.Len()) // Release nonVdr's messages throttler.release(&msgMetadata{msgSize: 1}, nonVdrID) throttler.release(&msgMetadata{msgSize: 1}, nonVdrID) throttler.release(&msgMetadata{msgSize: config.AtLargeAllocSize/2 - 2}, nonVdrID) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.EqualValues(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[nonVdrID]) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) + require.Empty(throttler.nodeToAtLargeBytesUsed) + require.Zero(throttler.nodeToAtLargeBytesUsed[nonVdrID]) require.NotContains(throttler.nodeToWaitingMsgID, nonVdrID) - require.EqualValues(0, throttler.waitingToAcquire.Len()) + require.Zero(throttler.waitingToAcquire.Len()) } // Ensure that the limit on taking from the at-large allocation is enforced @@ -328,9 +328,9 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { AtLargeAllocSize: 100, NodeMaxAtLargeBytes: 10, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, "", @@ -350,7 +350,7 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { }() select { case <-nonVdrDone: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } @@ -360,11 +360,11 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { // Validator should only be able to take [MaxAtLargeBytes] throttler.Acquire(context.Background(), config.NodeMaxAtLargeBytes+1, vdr1ID) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) - require.EqualValues(1, throttler.nodeToVdrBytesUsed[vdr1ID]) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID2]) - require.EqualValues(config.AtLargeAllocSize-config.NodeMaxAtLargeBytes*3, throttler.remainingAtLargeBytes) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID2]) + require.Equal(config.AtLargeAllocSize-config.NodeMaxAtLargeBytes*3, throttler.remainingAtLargeBytes) } // Test that messages waiting to be acquired by a given node execute next @@ -375,9 +375,9 @@ func TestMsgThrottlerNextMsg(t *testing.T) { AtLargeAllocSize: 1024, NodeMaxAtLargeBytes: 1024, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) nonVdrNodeID := ids.GenerateTestNodeID() maxVdrBytes := config.VdrAllocSize + config.AtLargeAllocSize @@ -404,7 +404,7 @@ func TestMsgThrottlerNextMsg(t *testing.T) { }() select { case <-doneVdr: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } @@ -416,23 +416,23 @@ func TestMsgThrottlerNextMsg(t *testing.T) { }() select { case <-done: - t.Fatal("should block on acquiring any more bytes") + require.FailNow("should block on acquiring any more bytes") case <-time.After(50 * time.Millisecond): } // Release 1 byte throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) // Byte should have gone toward next validator message - require.EqualValues(2, throttler.waitingToAcquire.Len()) + require.Equal(2, throttler.waitingToAcquire.Len()) require.Contains(throttler.nodeToWaitingMsgID, vdr1ID) firstMsgID := throttler.nodeToWaitingMsgID[vdr1ID] firstMsg, exists := throttler.waitingToAcquire.Get(firstMsgID) require.True(exists) - require.EqualValues(maxBytes-2, firstMsg.bytesNeeded) + require.Equal(maxBytes-2, firstMsg.bytesNeeded) select { case <-doneVdr: - t.Fatal("should still be blocking") + require.FailNow("should still be blocking") case <-time.After(50 * time.Millisecond): } diff --git a/avalanchego/network/throttling/inbound_msg_throttler.go b/avalanchego/network/throttling/inbound_msg_throttler.go index b76a7a34..ea9167de 100644 --- a/avalanchego/network/throttling/inbound_msg_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_throttler.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling import ( "context" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -13,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) @@ -56,7 +56,7 @@ func NewInboundMsgThrottler( log logging.Logger, namespace string, registerer prometheus.Registerer, - vdrs validators.Set, + vdrs validators.Manager, throttlerConfig InboundMsgThrottlerConfig, resourceTracker tracker.ResourceTracker, cpuTargeter tracker.Targeter, @@ -90,7 +90,7 @@ func NewInboundMsgThrottler( return nil, err } cpuThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_cpu", namespace), + metric.AppendNamespace(namespace, "cpu"), registerer, throttlerConfig.CPUThrottlerConfig, resourceTracker.CPUTracker(), @@ -100,7 +100,7 @@ func NewInboundMsgThrottler( return nil, err } diskThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_disk", namespace), + metric.AppendNamespace(namespace, "disk"), registerer, throttlerConfig.DiskThrottlerConfig, resourceTracker.DiskTracker(), diff --git a/avalanchego/network/throttling/inbound_resource_throttler.go b/avalanchego/network/throttling/inbound_resource_throttler.go index a12e8562..eb0e939b 100644 --- a/avalanchego/network/throttling/inbound_resource_throttler.go +++ b/avalanchego/network/throttling/inbound_resource_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -13,8 +13,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const epsilon = time.Millisecond @@ -80,13 +80,12 @@ func newSystemThrottlerMetrics(namespace string, reg prometheus.Registerer) (*sy Help: "Number of nodes we're waiting to read a message from because their usage is too high", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.totalWaits), reg.Register(m.totalNoWaits), reg.Register(m.awaitingAcquire), ) - return m, errs.Err + return m, err } func NewSystemThrottler( diff --git a/avalanchego/network/throttling/inbound_resource_throttler_test.go b/avalanchego/network/throttling/inbound_resource_throttler_test.go index 266dd070..93ec2811 100644 --- a/avalanchego/network/throttling/inbound_resource_throttler_test.go +++ b/avalanchego/network/throttling/inbound_resource_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -8,11 +8,9 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" @@ -23,8 +21,6 @@ import ( func TestNewSystemThrottler(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) reg := prometheus.NewRegistry() clock := mockable.Clock{} @@ -40,18 +36,16 @@ func TestNewSystemThrottler(t *testing.T) { targeter := tracker.NewMockTargeter(ctrl) throttlerIntf, err := NewSystemThrottler("", reg, config, cpuTracker, targeter) require.NoError(err) - throttler, ok := throttlerIntf.(*systemThrottler) - require.True(ok) - require.EqualValues(clock, config.Clock) - require.EqualValues(time.Second, config.MaxRecheckDelay) - require.EqualValues(cpuTracker, throttler.tracker) - require.EqualValues(targeter, throttler.targeter) + require.IsType(&systemThrottler{}, throttlerIntf) + throttler := throttlerIntf.(*systemThrottler) + require.Equal(clock, config.Clock) + require.Equal(time.Second, config.MaxRecheckDelay) + require.Equal(cpuTracker, throttler.tracker) + require.Equal(targeter, throttler.targeter) } func TestSystemThrottler(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) // Setup @@ -135,7 +129,6 @@ func TestSystemThrottler(t *testing.T) { func TestSystemThrottlerContextCancel(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Setup mockTracker := tracker.NewMockTracker(ctrl) diff --git a/avalanchego/network/throttling/no_inbound_msg_throttler.go b/avalanchego/network/throttling/no_inbound_msg_throttler.go index de6e03f8..6f7af32f 100644 --- a/avalanchego/network/throttling/no_inbound_msg_throttler.go +++ b/avalanchego/network/throttling/no_inbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/outbound_msg_throttler.go b/avalanchego/network/throttling/outbound_msg_throttler.go index 8b46cb2c..d75c53f1 100644 --- a/avalanchego/network/throttling/outbound_msg_throttler.go +++ b/avalanchego/network/throttling/outbound_msg_throttler.go @@ -1,17 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling import ( "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -43,7 +44,7 @@ func NewSybilOutboundMsgThrottler( log logging.Logger, namespace string, registerer prometheus.Registerer, - vdrs validators.Set, + vdrs validators.Manager, config MsgByteThrottlerConfig, ) (OutboundMsgThrottler, error) { t := &outboundMsgThrottler{ @@ -72,7 +73,7 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N // Take as many bytes as we can from the at-large allocation. bytesNeeded := uint64(len(msg.Bytes())) - atLargeBytesUsed := math.Min( + atLargeBytesUsed := min( // only give as many bytes as needed bytesNeeded, // don't exceed per-node limit @@ -85,9 +86,16 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N // Take as many bytes as we can from [nodeID]'s validator allocation. // Calculate [nodeID]'s validator allocation size based on its weight vdrAllocationSize := uint64(0) - weight := t.vdrs.GetWeight(nodeID) + weight := t.vdrs.GetWeight(constants.PrimaryNetworkID, nodeID) if weight != 0 { - vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(t.vdrs.Weight())) + totalWeight, err := t.vdrs.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + t.log.Error("Failed to get total weight of primary network validators", + zap.Error(err), + ) + } else { + vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(totalWeight)) + } } vdrBytesAlreadyUsed := t.nodeToVdrBytesUsed[nodeID] // [vdrBytesAllowed] is the number of bytes this node @@ -99,7 +107,7 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N } else { vdrBytesAllowed -= vdrBytesAlreadyUsed } - vdrBytesUsed := math.Min(t.remainingVdrBytes, bytesNeeded, vdrBytesAllowed) + vdrBytesUsed := min(t.remainingVdrBytes, bytesNeeded, vdrBytesAllowed) bytesNeeded -= vdrBytesUsed if bytesNeeded != 0 { // Can't acquire enough bytes to queue this message to be sent @@ -142,7 +150,7 @@ func (t *outboundMsgThrottler) Release(msg message.OutboundMessage, nodeID ids.N // that will be given back to [nodeID]'s validator allocation. vdrBytesUsed := t.nodeToVdrBytesUsed[nodeID] msgSize := uint64(len(msg.Bytes())) - vdrBytesToReturn := math.Min(msgSize, vdrBytesUsed) + vdrBytesToReturn := min(msgSize, vdrBytesUsed) t.nodeToVdrBytesUsed[nodeID] -= vdrBytesToReturn if t.nodeToVdrBytesUsed[nodeID] == 0 { delete(t.nodeToVdrBytesUsed, nodeID) @@ -194,15 +202,13 @@ func (m *outboundMsgThrottlerMetrics) initialize(namespace string, registerer pr Name: "throttler_outbound_awaiting_release", Help: "Number of messages waiting to be sent", }) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( registerer.Register(m.acquireSuccesses), registerer.Register(m.acquireFailures), registerer.Register(m.remainingAtLargeBytes), registerer.Register(m.remainingVdrBytes), registerer.Register(m.awaitingRelease), ) - return errs.Err } func NewNoOutboundThrottler() OutboundMsgThrottler { diff --git a/avalanchego/network/throttling/outbound_msg_throttler_test.go b/avalanchego/network/throttling/outbound_msg_throttler_test.go index f260558d..664449ad 100644 --- a/avalanchego/network/throttling/outbound_msg_throttler_test.go +++ b/avalanchego/network/throttling/outbound_msg_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -6,33 +6,30 @@ package throttling import ( "testing" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" ) func TestSybilOutboundMsgThrottler(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 1024, AtLargeAllocSize: 1024, NodeMaxAtLargeBytes: 1024, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -55,39 +52,39 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { msg := testMsgWithSize(ctrl, 1) acquired := throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) - require.EqualValues(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(1, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Release the bytes throttlerIntf.Release(msg, vdr1ID) - require.EqualValues(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Empty(throttler.nodeToAtLargeBytesUsed) // Use all the at-large allocation bytes and 1 of the validator allocation bytes msg = testMsgWithSize(ctrl, config.AtLargeAllocSize+1) acquired = throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) // vdr1 at-large bytes used: 1024. Validator bytes used: 1 - require.EqualValues(0, throttler.remainingAtLargeBytes) - require.EqualValues(config.VdrAllocSize-1, throttler.remainingVdrBytes) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], 1) + require.Zero(throttler.remainingAtLargeBytes) + require.Equal(throttler.remainingVdrBytes, config.VdrAllocSize-1) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) require.Len(throttler.nodeToVdrBytesUsed, 1) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // The other validator should be able to acquire half the validator allocation. msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2) acquired = throttlerIntf.Acquire(msg, vdr2ID) require.True(acquired) // vdr2 at-large bytes used: 0. Validator bytes used: 512 - require.EqualValues(config.VdrAllocSize/2-1, throttler.remainingVdrBytes) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], 1) - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr2ID], config.VdrAllocSize/2) + require.Equal(throttler.remainingVdrBytes, config.VdrAllocSize/2-1) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID], 1) + require.Equal(config.VdrAllocSize/2, throttler.nodeToVdrBytesUsed[vdr2ID]) require.Len(throttler.nodeToVdrBytesUsed, 2) require.Len(throttler.nodeToAtLargeBytesUsed, 1) @@ -96,9 +93,9 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { acquired = throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) // vdr1 at-large bytes used: 1024. Validator bytes used: 512 - require.EqualValues(throttler.nodeToVdrBytesUsed[vdr1ID], config.VdrAllocSize/2) + require.Equal(throttler.nodeToVdrBytesUsed[vdr1ID], config.VdrAllocSize/2) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Trying to take more bytes for either node should fail msg = testMsgWithSize(ctrl, 1) @@ -117,11 +114,11 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { msg = testMsgWithSize(ctrl, config.AtLargeAllocSize+1) throttlerIntf.Release(msg, vdr1ID) - require.EqualValues(config.NodeMaxAtLargeBytes/2, throttler.remainingVdrBytes) + require.Equal(config.NodeMaxAtLargeBytes/2, throttler.remainingVdrBytes) require.Len(throttler.nodeToAtLargeBytesUsed, 1) // vdr1 - require.EqualValues(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[vdr1ID]) require.Len(throttler.nodeToVdrBytesUsed, 1) - require.EqualValues(config.AtLargeAllocSize/2+1, throttler.remainingAtLargeBytes) + require.Equal(config.AtLargeAllocSize/2+1, throttler.remainingAtLargeBytes) // Non-validator should be able to take the rest of the at-large bytes // nonVdrID at-large bytes used: 513 @@ -129,8 +126,8 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2+1) acquired = throttlerIntf.Acquire(msg, nonVdrID) require.True(acquired) - require.EqualValues(0, throttler.remainingAtLargeBytes) - require.EqualValues(config.AtLargeAllocSize/2+1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) + require.Zero(throttler.remainingAtLargeBytes) + require.Equal(config.AtLargeAllocSize/2+1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) // Non-validator shouldn't be able to acquire more since at-large allocation empty msg = testMsgWithSize(ctrl, 1) @@ -140,43 +137,41 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { // Release all of vdr2's messages msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2) throttlerIntf.Release(msg, vdr2ID) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr2ID]) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(0, throttler.remainingAtLargeBytes) + require.Zero(throttler.nodeToAtLargeBytesUsed[vdr2ID]) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Zero(throttler.remainingAtLargeBytes) // Release all of vdr1's messages msg = testMsgWithSize(ctrl, config.VdrAllocSize/2-1) throttlerIntf.Release(msg, vdr1ID) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.EqualValues(config.AtLargeAllocSize/2-1, throttler.remainingAtLargeBytes) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Equal(config.AtLargeAllocSize/2-1, throttler.remainingAtLargeBytes) + require.Zero(throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Release nonVdr's messages msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2+1) throttlerIntf.Release(msg, nonVdrID) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) - require.EqualValues(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[nonVdrID]) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) + require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) + require.Empty(throttler.nodeToAtLargeBytesUsed) + require.Zero(throttler.nodeToAtLargeBytesUsed[nonVdrID]) } // Ensure that the limit on taking from the at-large allocation is enforced func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 100, AtLargeAllocSize: 100, NodeMaxAtLargeBytes: 10, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -205,27 +200,25 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { // Validator should only be able to take [MaxAtLargeBytes] msg = testMsgWithSize(ctrl, config.NodeMaxAtLargeBytes+1) throttlerIntf.Acquire(msg, vdr1ID) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) - require.EqualValues(1, throttler.nodeToVdrBytesUsed[vdr1ID]) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) - require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID2]) - require.EqualValues(config.AtLargeAllocSize-config.NodeMaxAtLargeBytes*3, throttler.remainingAtLargeBytes) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) + require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID2]) + require.Equal(config.AtLargeAllocSize-config.NodeMaxAtLargeBytes*3, throttler.remainingAtLargeBytes) } // Ensure that the throttler honors requested bypasses func TestBypassThrottling(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 100, AtLargeAllocSize: 100, NodeMaxAtLargeBytes: 10, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -262,10 +255,10 @@ func TestBypassThrottling(t *testing.T) { msg.EXPECT().Op().Return(message.AppGossipOp).AnyTimes() msg.EXPECT().Bytes().Return(make([]byte, config.NodeMaxAtLargeBytes+1)).AnyTimes() throttlerIntf.Acquire(msg, vdr1ID) - require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr1ID]) - require.EqualValues(0, throttler.nodeToVdrBytesUsed[vdr1ID]) - require.EqualValues(1, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) - require.EqualValues(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) + require.Zero(throttler.nodeToAtLargeBytesUsed[vdr1ID]) + require.Zero(throttler.nodeToVdrBytesUsed[vdr1ID]) + require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) + require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) } func testMsgWithSize(ctrl *gomock.Controller, size uint64) message.OutboundMessage { diff --git a/avalanchego/network/throttling/release_func.go b/avalanchego/network/throttling/release_func.go index 0abe2bf4..e2cbcf1b 100644 --- a/avalanchego/network/throttling/release_func.go +++ b/avalanchego/network/throttling/release_func.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/tracked_ip.go b/avalanchego/network/tracked_ip.go index ca673f76..6a95bbee 100644 --- a/avalanchego/network/tracked_ip.go +++ b/avalanchego/network/tracked_ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/network/tracked_ip_test.go b/avalanchego/network/tracked_ip_test.go index bbf6267d..956f02cc 100644 --- a/avalanchego/network/tracked_ip_test.go +++ b/avalanchego/network/tracked_ip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/node/beacon_manager.go b/avalanchego/node/beacon_manager.go index 3e198241..9b6806fd 100644 --- a/avalanchego/node/beacon_manager.go +++ b/avalanchego/node/beacon_manager.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node import ( + "sync" "sync/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) @@ -18,23 +18,27 @@ var _ router.Router = (*beaconManager)(nil) type beaconManager struct { router.Router - timer *timer.Timer - beacons validators.Set - requiredConns int64 - numConns int64 + beacons validators.Manager + requiredConns int64 + numConns int64 + onSufficientlyConnected chan struct{} + onceOnSufficientlyConnected sync.Once } func (b *beaconManager) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { - if constants.PrimaryNetworkID == subnetID && - b.beacons.Contains(nodeID) && + _, isBeacon := b.beacons.GetValidator(constants.PrimaryNetworkID, nodeID) + if isBeacon && + constants.PrimaryNetworkID == subnetID && atomic.AddInt64(&b.numConns, 1) >= b.requiredConns { - b.timer.Cancel() + b.onceOnSufficientlyConnected.Do(func() { + close(b.onSufficientlyConnected) + }) } b.Router.Connected(nodeID, nodeVersion, subnetID) } func (b *beaconManager) Disconnected(nodeID ids.NodeID) { - if b.beacons.Contains(nodeID) { + if _, isBeacon := b.beacons.GetValidator(constants.PrimaryNetworkID, nodeID); isBeacon { atomic.AddInt64(&b.numConns, -1) } b.Router.Disconnected(nodeID) diff --git a/avalanchego/node/beacon_manager_test.go b/avalanchego/node/beacon_manager_test.go index 4e5ce1b4..c4d00975 100644 --- a/avalanchego/node/beacon_manager_test.go +++ b/avalanchego/node/beacon_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -7,15 +7,13 @@ import ( "sync" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) @@ -27,11 +25,11 @@ func TestBeaconManager_DataRace(t *testing.T) { require := require.New(t) validatorIDs := make([]ids.NodeID, 0, numValidators) - validatorSet := validators.NewSet() + validatorSet := validators.NewManager() for i := 0; i < numValidators; i++ { nodeID := ids.GenerateTestNodeID() - require.NoError(validatorSet.Add(nodeID, nil, ids.Empty, 1)) + require.NoError(validatorSet.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) validatorIDs = append(validatorIDs, nodeID) } @@ -41,10 +39,10 @@ func TestBeaconManager_DataRace(t *testing.T) { mockRouter := router.NewMockRouter(ctrl) b := beaconManager{ - Router: mockRouter, - timer: timer.NewTimer(nil), - beacons: validatorSet, - requiredConns: numValidators, + Router: mockRouter, + beacons: validatorSet, + requiredConns: numValidators, + onSufficientlyConnected: make(chan struct{}), } // connect numValidators validators, each with a weight of 1 @@ -66,7 +64,7 @@ func TestBeaconManager_DataRace(t *testing.T) { wg.Wait() // we should have a weight of numValidators now - require.EqualValues(numValidators, b.numConns) + require.Equal(int64(numValidators), b.numConns) // disconnect numValidators validators wg.Add(numValidators) diff --git a/avalanchego/node/config.go b/avalanchego/node/config.go index d32a0b07..3974409d 100644 --- a/avalanchego/node/config.go +++ b/avalanchego/node/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" @@ -19,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/profiler" @@ -53,7 +51,8 @@ type HTTPConfig struct { HTTPSKey []byte `json:"-"` HTTPSCert []byte `json:"-"` - APIAllowedOrigins []string `json:"apiAllowedOrigins"` + HTTPAllowedOrigins []string `json:"httpAllowedOrigins"` + HTTPAllowedHosts []string `json:"httpAllowedHosts"` ShutdownTimeout time.Duration `json:"shutdownTimeout"` ShutdownWait time.Duration `json:"shutdownWait"` @@ -73,24 +72,28 @@ type APIConfig struct { } type IPConfig struct { - IPPort ips.DynamicIPPort `json:"ip"` - IPUpdater dynamicip.Updater `json:"-"` - IPResolutionFreq time.Duration `json:"ipResolutionFrequency"` - // True if we attempted NAT traversal - AttemptedNATTraversal bool `json:"attemptedNATTraversal"` - // Tries to perform network address translation - Nat nat.Router `json:"-"` + PublicIP string `json:"publicIP"` + PublicIPResolutionService string `json:"publicIPResolutionService"` + PublicIPResolutionFreq time.Duration `json:"publicIPResolutionFreq"` + // The host portion of the address to listen on. The port to + // listen on will be sourced from IPPort. + // + // - If empty, listen on all interfaces (both ipv4 and ipv6). + // - If populated, listen only on the specified address. + ListenHost string `json:"listenHost"` + ListenPort uint16 `json:"listenPort"` } type StakingConfig struct { genesis.StakingConfig - EnableStaking bool `json:"enableStaking"` - StakingTLSCert tls.Certificate `json:"-"` - StakingSigningKey *bls.SecretKey `json:"-"` - DisabledStakingWeight uint64 `json:"disabledStakingWeight"` - StakingKeyPath string `json:"stakingKeyPath"` - StakingCertPath string `json:"stakingCertPath"` - StakingSignerPath string `json:"stakingSignerPath"` + SybilProtectionEnabled bool `json:"sybilProtectionEnabled"` + PartialSyncPrimaryNetwork bool `json:"partialSyncPrimaryNetwork"` + StakingTLSCert tls.Certificate `json:"-"` + StakingSigningKey *bls.SecretKey `json:"-"` + SybilProtectionDisabledWeight uint64 `json:"sybilProtectionDisabledWeight"` + StakingKeyPath string `json:"stakingKeyPath"` + StakingCertPath string `json:"stakingCertPath"` + StakingSignerPath string `json:"stakingSignerPath"` } type StateSyncConfig struct { @@ -99,12 +102,6 @@ type StateSyncConfig struct { } type BootstrapConfig struct { - // Should Bootstrap be retried - RetryBootstrap bool `json:"retryBootstrap"` - - // Max number of times to retry bootstrap before warning the node operator - RetryBootstrapWarnFrequency int `json:"retryBootstrapWarnFrequency"` - // Timeout before emitting a warn log when connecting to bootstrapping beacons BootstrapBeaconConnectionTimeout time.Duration `json:"bootstrapBeaconConnectionTimeout"` @@ -119,11 +116,13 @@ type BootstrapConfig struct { // ancestors while responding to a GetAncestors message BootstrapMaxTimeGetAncestors time.Duration `json:"bootstrapMaxTimeGetAncestors"` - BootstrapIDs []ids.NodeID `json:"bootstrapIDs"` - BootstrapIPs []ips.IPPort `json:"bootstrapIPs"` + Bootstrappers []genesis.Bootstrapper `json:"bootstrappers"` } type DatabaseConfig struct { + // If true, all writes are to memory and are discarded at node shutdown. + ReadOnly bool `json:"readOnly"` + // Path to database Path string `json:"path"` @@ -173,12 +172,10 @@ type Config struct { // Metrics MeterVMEnabled bool `json:"meterVMEnabled"` - // Router that is used to handle incoming consensus messages - ConsensusRouter router.Router `json:"-"` RouterHealthConfig router.HealthConfig `json:"routerHealthConfig"` ConsensusShutdownTimeout time.Duration `json:"consensusShutdownTimeout"` - // Gossip a container in the accepted frontier every [ConsensusGossipFrequency] - ConsensusGossipFrequency time.Duration `json:"consensusGossipFreq"` + // Poll for new frontiers every [FrontierPollFrequency] + FrontierPollFrequency time.Duration `json:"consensusGossipFreq"` // ConsensusAppConcurrency defines the maximum number of goroutines to // handle App messages per chain. ConsensusAppConcurrency int `json:"consensusAppConcurrency"` @@ -190,7 +187,7 @@ type Config struct { ChainConfigs map[string]chains.ChainConfig `json:"-"` ChainAliases map[ids.ID][]string `json:"chainAliases"` - VMAliaser ids.Aliaser `json:"-"` + VMAliases map[ids.ID][]string `json:"vmAliases"` // Halflife to use for the processing requests tracker. // Larger halflife --> usage metrics change more slowly. @@ -218,10 +215,6 @@ type Config struct { TraceConfig trace.Config `json:"traceConfig"` - // See comment on [MinPercentConnectedStakeHealthy] in platformvm.Config - // TODO: consider moving to subnet config - MinPercentConnectedStakeHealthy map[ids.ID]float64 `json:"minPercentConnectedStakeHealthy"` - // See comment on [UseCurrentHeight] in platformvm.Config UseCurrentHeight bool `json:"useCurrentHeight"` @@ -231,4 +224,8 @@ type Config struct { // ChainDataDir is the root path for per-chain directories where VMs can // write arbitrary data. ChainDataDir string `json:"chainDataDir"` + + // Path to write process context to (including PID, API URI, and + // staking address). + ProcessContextFilePath string `json:"processContextFilePath"` } diff --git a/avalanchego/node/insecure_validator_manager.go b/avalanchego/node/insecure_validator_manager.go index 12428ed8..0e23b8b9 100644 --- a/avalanchego/node/insecure_validator_manager.go +++ b/avalanchego/node/insecure_validator_manager.go @@ -1,34 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node import ( + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/version" ) type insecureValidatorManager struct { router.Router - vdrs validators.Set + log logging.Logger + vdrs validators.Manager weight uint64 } func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { if constants.PrimaryNetworkID == subnetID { - // Staking is disabled so we don't have a txID that added the peer as a - // validator. Because each validator needs a txID associated with it, we - // hack one together by padding the nodeID with zeroes. + // Sybil protection is disabled so we don't have a txID that added the + // peer as a validator. Because each validator needs a txID associated + // with it, we hack one together by padding the nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], vdrID[:]) + copy(dummyTxID[:], vdrID.Bytes()) - // Add will only error here if the total weight of the set would go over - // [math.MaxUint64]. In this case, we will just not mark this new peer - // as a validator. - _ = i.vdrs.Add(vdrID, nil, dummyTxID, i.weight) + err := i.vdrs.AddStaker(constants.PrimaryNetworkID, vdrID, nil, dummyTxID, i.weight) + if err != nil { + i.log.Error("failed to add validator", + zap.Stringer("nodeID", vdrID), + zap.Stringer("subnetID", constants.PrimaryNetworkID), + zap.Error(err), + ) + } } i.Router.Connected(vdrID, nodeVersion, subnetID) } @@ -36,6 +44,13 @@ func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *vers func (i *insecureValidatorManager) Disconnected(vdrID ids.NodeID) { // RemoveWeight will only error here if there was an error reported during // Add. - _ = i.vdrs.RemoveWeight(vdrID, i.weight) + err := i.vdrs.RemoveWeight(constants.PrimaryNetworkID, vdrID, i.weight) + if err != nil { + i.log.Error("failed to remove weight", + zap.Stringer("nodeID", vdrID), + zap.Stringer("subnetID", constants.PrimaryNetworkID), + zap.Error(err), + ) + } i.Router.Disconnected(vdrID) } diff --git a/avalanchego/node/node.go b/avalanchego/node/node.go index 0e258f48..54caaba1 100644 --- a/avalanchego/node/node.go +++ b/avalanchego/node/node.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -6,23 +6,24 @@ package node import ( "context" "crypto" + "crypto/tls" + "encoding/json" "errors" "fmt" "io" + "io/fs" "net" "os" "path/filepath" + "strconv" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" - coreth "github.com/ava-labs/coreth/plugin/evm" - "github.com/ava-labs/avalanchego/api/admin" "github.com/ava-labs/avalanchego/api/auth" "github.com/ava-labs/avalanchego/api/health" @@ -34,30 +35,34 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/meterdb" + "github.com/ava-labs/avalanchego/database/pebble" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" "github.com/ava-labs/avalanchego/ipcs" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/ips" @@ -67,32 +72,196 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" + coreth "github.com/ava-labs/coreth/plugin/evm" +) + +const ( + stakingPortName = constants.AppName + "-staking" + httpPortName = constants.AppName + "-http" + + ipResolutionTimeout = 30 * time.Second ) var ( - genesisHashKey = []byte("genesisID") - indexerDBPrefix = []byte{0x00} + genesisHashKey = []byte("genesisID") + ungracefulShutdown = []byte("ungracefulShutdown") + + indexerDBPrefix = []byte{0x00} + keystoreDBPrefix = []byte("keystore") errInvalidTLSKey = errors.New("invalid TLS key") errShuttingDown = errors.New("server shutting down") ) +// New returns an instance of Node +func New( + config *Config, + logFactory logging.Factory, + logger logging.Logger, +) (*Node, error) { + tlsCert := config.StakingTLSCert.Leaf + stakingCert := staking.CertificateFromX509(tlsCert) + if err := staking.ValidateCertificate(stakingCert); err != nil { + return nil, fmt.Errorf("invalid staking certificate: %w", err) + } + + n := &Node{ + Log: logger, + LogFactory: logFactory, + ID: ids.NodeIDFromCert(stakingCert), + Config: config, + } + + n.DoneShuttingDown.Add(1) + + pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) + logger.Info("initializing node", + zap.Stringer("version", version.CurrentApp), + zap.Stringer("nodeID", n.ID), + zap.Stringer("stakingKeyType", tlsCert.PublicKeyAlgorithm), + zap.Reflect("nodePOP", pop), + zap.Reflect("providedFlags", n.Config.ProvidedFlags), + zap.Reflect("config", n.Config), + ) + + var err error + n.VMFactoryLog, err = logFactory.Make("vm-factory") + if err != nil { + return nil, fmt.Errorf("problem creating vm logger: %w", err) + } + + n.VMAliaser = ids.NewAliaser() + for vmID, aliases := range config.VMAliases { + for _, alias := range aliases { + if err := n.VMAliaser.Alias(vmID, alias); err != nil { + return nil, err + } + } + } + n.VMManager = vms.NewManager(n.VMFactoryLog, n.VMAliaser) + + if err := n.initBootstrappers(); err != nil { // Configure the bootstrappers + return nil, fmt.Errorf("problem initializing node beacons: %w", err) + } + + // Set up tracer + n.tracer, err = trace.New(n.Config.TraceConfig) + if err != nil { + return nil, fmt.Errorf("couldn't initialize tracer: %w", err) + } + + n.initMetrics() + n.initNAT() + if err := n.initAPIServer(); err != nil { // Start the API Server + return nil, fmt.Errorf("couldn't initialize API server: %w", err) + } + + if err := n.initMetricsAPI(); err != nil { // Start the Metrics API + return nil, fmt.Errorf("couldn't initialize metrics API: %w", err) + } + + if err := n.initDatabase(); err != nil { // Set up the node's database + return nil, fmt.Errorf("problem initializing database: %w", err) + } + + if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API + return nil, fmt.Errorf("couldn't initialize keystore API: %w", err) + } + + n.initSharedMemory() // Initialize shared memory + + // message.Creator is shared between networking, chainManager and the engine. + // It must be initiated before networking (initNetworking), chain manager (initChainManager) + // and the engine (initChains) but after the metrics (initMetricsAPI) + // message.Creator currently record metrics under network namespace + n.networkNamespace = "network" + n.msgCreator, err = message.NewCreator( + n.Log, + n.MetricsRegisterer, + n.networkNamespace, + n.Config.NetworkConfig.CompressionType, + n.Config.NetworkConfig.MaximumInboundMessageTimeout, + ) + if err != nil { + return nil, fmt.Errorf("problem initializing message creator: %w", err) + } + + n.vdrs = validators.NewManager() + if !n.Config.SybilProtectionEnabled { + logger.Warn("sybil control is not enforced") + n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) + } + if err := n.initResourceManager(n.MetricsRegisterer); err != nil { + return nil, fmt.Errorf("problem initializing resource manager: %w", err) + } + n.initCPUTargeter(&config.CPUTargeterConfig) + n.initDiskTargeter(&config.DiskTargeterConfig) + if err := n.initNetworking(); err != nil { // Set up networking layer. + return nil, fmt.Errorf("problem initializing networking: %w", err) + } + + n.initEventDispatchers() + + // Start the Health API + // Has to be initialized before chain manager + // [n.Net] must already be set + if err := n.initHealthAPI(); err != nil { + return nil, fmt.Errorf("couldn't initialize health API: %w", err) + } + if err := n.addDefaultVMAliases(); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager + return nil, fmt.Errorf("couldn't initialize chain manager: %w", err) + } + if err := n.initVMs(); err != nil { // Initialize the VM registry. + return nil, fmt.Errorf("couldn't initialize VM registry: %w", err) + } + if err := n.initAdminAPI(); err != nil { // Start the Admin API + return nil, fmt.Errorf("couldn't initialize admin API: %w", err) + } + if err := n.initInfoAPI(); err != nil { // Start the Info API + return nil, fmt.Errorf("couldn't initialize info API: %w", err) + } + if err := n.initIPCs(); err != nil { // Start the IPCs + return nil, fmt.Errorf("couldn't initialize IPCs: %w", err) + } + if err := n.initIPCAPI(); err != nil { // Start the IPC API + return nil, fmt.Errorf("couldn't initialize the IPC API: %w", err) + } + if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chain aliases: %w", err) + } + if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize API aliases: %w", err) + } + if err := n.initIndexer(); err != nil { + return nil, fmt.Errorf("couldn't initialize indexer: %w", err) + } + + n.health.Start(context.TODO(), n.Config.HealthCheckFreq) + n.initProfiler() + + // Start the Platform chain + if err := n.initChains(n.Config.GenesisBytes); err != nil { + return nil, fmt.Errorf("couldn't initialize chains: %w", err) + } + return n, nil +} + // Node is an instance of an Avalanche node. type Node struct { Log logging.Logger @@ -104,8 +273,13 @@ type Node struct { ID ids.NodeID // Storage for this node - DBManager manager.Manager - DB database.Database + DB database.Database + + router nat.Router + portMapper *nat.Mapper + ipUpdater dynamicip.Updater + + chainRouter router.Router // Profiles the process. Nil if continuous profiling is disabled. profiler profiler.ContinuousProfiler @@ -125,6 +299,9 @@ type Node struct { // Build and parse messages, for both network layer and chain manager msgCreator message.Creator + // Manages network timeouts + timeoutManager timeout.Manager + // Manages creation of blockchains and routing messages to them chainManager chains.Manager @@ -144,16 +321,23 @@ type Node struct { networkNamespace string Net network.Network + // The staking address will optionally be written to a process context + // file to enable other nodes to be configured to use this node as a + // beacon. + stakingAddress string + // tlsKeyLogWriterCloser is a debug file handle that writes all the TLS // session keys. This value should only be non-nil during debugging. tlsKeyLogWriterCloser io.WriteCloser // this node's initial connections to the network - beacons validators.Set + bootstrappers validators.Manager // current validators of the network vdrs validators.Manager + apiURI string + // Handles HTTP API calls APIServer server.Server @@ -179,6 +363,7 @@ type Node struct { MetricsRegisterer *prometheus.Registry MetricsGatherer metrics.MultiGatherer + VMAliaser ids.Aliaser VMManager vms.Manager // VM endpoint registry @@ -209,31 +394,95 @@ type Node struct { */ // Initialize the networking layer. -// Assumes [n.CPUTracker] and [n.CPUTargeter] have been initialized. -func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { - currentIPPort := n.Config.IPPort.IPPort() - listener, err := net.Listen(constants.NetworkType, fmt.Sprintf(":%d", currentIPPort.Port)) +// Assumes [n.vdrs], [n.CPUTracker], and [n.CPUTargeter] have been initialized. +func (n *Node) initNetworking() error { + // Providing either loopback address - `::1` for ipv6 and `127.0.0.1` for ipv4 - as the listen + // host will avoid the need for a firewall exception on recent MacOS: + // + // - MacOS requires a manually-approved firewall exception [1] for each version of a given + // binary that wants to bind to all interfaces (i.e. with an address of `:[port]`). Each + // compiled version of avalanchego requires a separate exception to be allowed to bind to all + // interfaces. + // + // - A firewall exception is not required to bind to a loopback interface, but the only way for + // Listen() to bind to loopback for both ipv4 and ipv6 is to bind to all interfaces [2] which + // requires an exception. + // + // - Thus, the only way to start a node on MacOS without approving a firewall exception for the + // avalanchego binary is to bind to loopback by specifying the host to be `::1` or `127.0.0.1`. + // + // 1: https://apple.stackexchange.com/questions/393715/do-you-want-the-application-main-to-accept-incoming-network-connections-pop + // 2: https://github.com/golang/go/issues/56998 + listenAddress := net.JoinHostPort(n.Config.ListenHost, strconv.FormatUint(uint64(n.Config.ListenPort), 10)) + listener, err := net.Listen(constants.NetworkType, listenAddress) if err != nil { return err } // Wrap listener so it will only accept a certain number of incoming connections per second listener = throttling.NewThrottledListener(listener, n.Config.NetworkConfig.ThrottlerConfig.MaxInboundConnsPerSec) - ipPort, err := ips.ToIPPort(listener.Addr().String()) + // Record the bound address to enable inclusion in process context file. + n.stakingAddress = listener.Addr().String() + ipPort, err := ips.ToIPPort(n.stakingAddress) if err != nil { - n.Log.Info("initializing networking", - zap.Stringer("currentNodeIP", currentIPPort), - ) - } else { - ipPort = ips.IPPort{ - IP: currentIPPort.IP, - Port: ipPort.Port, + return err + } + + var dynamicIP ips.DynamicIPPort + switch { + case n.Config.PublicIP != "": + // Use the specified public IP. + ipPort.IP = net.ParseIP(n.Config.PublicIP) + if ipPort.IP == nil { + return fmt.Errorf("invalid IP Address: %s", n.Config.PublicIP) + } + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewNoUpdater() + case n.Config.PublicIPResolutionService != "": + // Use dynamic IP resolution. + resolver, err := dynamicip.NewResolver(n.Config.PublicIPResolutionService) + if err != nil { + return fmt.Errorf("couldn't create IP resolver: %w", err) + } + + // Use that to resolve our public IP. + ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) + ipPort.IP, err = resolver.Resolve(ctx) + cancel() + if err != nil { + return fmt.Errorf("couldn't resolve public IP: %w", err) } - n.Log.Info("initializing networking", - zap.Stringer("currentNodeIP", ipPort), + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewUpdater(dynamicIP, resolver, n.Config.PublicIPResolutionFreq) + default: + ipPort.IP, err = n.router.ExternalIP() + if err != nil { + return fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) + } + dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + n.ipUpdater = dynamicip.NewNoUpdater() + } + + if ipPort.IP.IsLoopback() || ipPort.IP.IsPrivate() { + n.Log.Warn("P2P IP is private, you will not be publicly discoverable", + zap.Stringer("ip", ipPort), ) } + // Regularly update our public IP and port mappings. + n.portMapper.Map( + ipPort.Port, + ipPort.Port, + stakingPortName, + dynamicIP, + n.Config.PublicIPResolutionFreq, + ) + go n.ipUpdater.Dispatch(n.Log) + + n.Log.Info("initializing networking", + zap.Stringer("ip", ipPort), + ) + tlsKey, ok := n.Config.StakingTLSCert.PrivateKey.(crypto.Signer) if !ok { return errInvalidTLSKey @@ -249,97 +498,115 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { ) } + // We allow nodes to gossip unknown ACPs in case the current ACPs constant + // becomes out of date. + var unknownACPs set.Set[uint32] + for acp := range n.Config.NetworkConfig.SupportedACPs { + if !constants.CurrentACPs.Contains(acp) { + unknownACPs.Add(acp) + } + } + for acp := range n.Config.NetworkConfig.ObjectedACPs { + if !constants.CurrentACPs.Contains(acp) { + unknownACPs.Add(acp) + } + } + if unknownACPs.Len() > 0 { + n.Log.Warn("gossipping unknown ACPs", + zap.Reflect("acps", unknownACPs), + ) + } + tlsConfig := peer.TLSConfig(n.Config.StakingTLSCert, n.tlsKeyLogWriterCloser) + // Create chain router + n.chainRouter = &router.ChainRouter{} + if n.Config.TraceConfig.Enabled { + n.chainRouter = router.Trace(n.chainRouter, n.tracer) + } + // Configure benchlist n.Config.BenchlistConfig.Validators = n.vdrs - n.Config.BenchlistConfig.Benchable = n.Config.ConsensusRouter - n.Config.BenchlistConfig.StakingEnabled = n.Config.EnableStaking + n.Config.BenchlistConfig.Benchable = n.chainRouter n.benchlistManager = benchlist.NewManager(&n.Config.BenchlistConfig) n.uptimeCalculator = uptime.NewLockedCalculator() - consensusRouter := n.Config.ConsensusRouter - if !n.Config.EnableStaking { - // Staking is disabled so we don't have a txID that added us as a - // validator. Because each validator needs a txID associated with it, we - // hack one together by just padding our nodeID with zeroes. + consensusRouter := n.chainRouter + if !n.Config.SybilProtectionEnabled { + // Sybil protection is disabled so we don't have a txID that added us as + // a validator. Because each validator needs a txID associated with it, + // we hack one together by just padding our nodeID with zeroes. dummyTxID := ids.Empty - copy(dummyTxID[:], n.ID[:]) + copy(dummyTxID[:], n.ID.Bytes()) - err := primaryNetVdrs.Add( + err := n.vdrs.AddStaker( + constants.PrimaryNetworkID, n.ID, bls.PublicFromSecretKey(n.Config.StakingSigningKey), dummyTxID, - n.Config.DisabledStakingWeight, + n.Config.SybilProtectionDisabledWeight, ) if err != nil { return err } consensusRouter = &insecureValidatorManager{ + log: n.Log, Router: consensusRouter, - vdrs: primaryNetVdrs, - weight: n.Config.DisabledStakingWeight, + vdrs: n.vdrs, + weight: n.Config.SybilProtectionDisabledWeight, } } - numBeacons := n.beacons.Len() - requiredConns := (3*numBeacons + 3) / 4 + numBootstrappers := n.bootstrappers.Count(constants.PrimaryNetworkID) + requiredConns := (3*numBootstrappers + 3) / 4 if requiredConns > 0 { - // Set a timer that will fire after a given timeout unless we connect - // to a sufficient portion of nodes. If the timeout fires, the node will - // shutdown. - timer := timer.NewTimer(func() { - // If the timeout fires and we're already shutting down, nothing to do. - if !n.shuttingDown.Get() { + onSufficientlyConnected := make(chan struct{}) + consensusRouter = &beaconManager{ + Router: consensusRouter, + beacons: n.bootstrappers, + requiredConns: int64(requiredConns), + onSufficientlyConnected: onSufficientlyConnected, + } + + // Log a warning if we aren't able to connect to a sufficient portion of + // nodes. + go func() { + timer := time.NewTimer(n.Config.BootstrapBeaconConnectionTimeout) + defer timer.Stop() + + select { + case <-timer.C: + if n.shuttingDown.Get() { + return + } n.Log.Warn("failed to connect to bootstrap nodes", - zap.Stringer("beacons", n.beacons), + zap.Stringer("bootstrappers", n.bootstrappers), zap.Duration("duration", n.Config.BootstrapBeaconConnectionTimeout), ) + case <-onSufficientlyConnected: } - }) - - go timer.Dispatch() - timer.SetTimeoutIn(n.Config.BootstrapBeaconConnectionTimeout) - - consensusRouter = &beaconManager{ - Router: consensusRouter, - timer: timer, - beacons: n.beacons, - requiredConns: int64(requiredConns), - } - } - - // initialize gossip tracker - gossipTracker, err := peer.NewGossipTracker(n.MetricsRegisterer, n.networkNamespace) - if err != nil { - return err + }() } - // keep gossip tracker synchronized with the validator set - primaryNetVdrs.RegisterCallbackListener(&peer.GossipTrackerCallback{ - Log: n.Log, - GossipTracker: gossipTracker, - }) - // add node configs to network config n.Config.NetworkConfig.Namespace = n.networkNamespace n.Config.NetworkConfig.MyNodeID = n.ID - n.Config.NetworkConfig.MyIPPort = n.Config.IPPort + n.Config.NetworkConfig.MyIPPort = dynamicIP n.Config.NetworkConfig.NetworkID = n.Config.NetworkID n.Config.NetworkConfig.Validators = n.vdrs - n.Config.NetworkConfig.Beacons = n.beacons + n.Config.NetworkConfig.Beacons = n.bootstrappers n.Config.NetworkConfig.TLSConfig = tlsConfig n.Config.NetworkConfig.TLSKey = tlsKey + n.Config.NetworkConfig.BLSKey = n.Config.StakingSigningKey n.Config.NetworkConfig.TrackedSubnets = n.Config.TrackedSubnets n.Config.NetworkConfig.UptimeCalculator = n.uptimeCalculator n.Config.NetworkConfig.UptimeRequirement = n.Config.UptimeRequirement n.Config.NetworkConfig.ResourceTracker = n.resourceTracker n.Config.NetworkConfig.CPUTargeter = n.cpuTargeter n.Config.NetworkConfig.DiskTargeter = n.diskTargeter - n.Config.NetworkConfig.GossipTracker = gossipTracker n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, @@ -354,19 +621,51 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { return err } +type NodeProcessContext struct { + // The process id of the node + PID int `json:"pid"` + // URI to access the node API + // Format: [https|http]://[host]:[port] + URI string `json:"uri"` + // Address other nodes can use to communicate with this node + // Format: [host]:[port] + StakingAddress string `json:"stakingAddress"` +} + +// Write process context to the configured path. Supports the use of +// dynamically chosen network ports with local network orchestration. +func (n *Node) writeProcessContext() error { + n.Log.Info("writing process context", zap.String("path", n.Config.ProcessContextFilePath)) + + // Write the process context to disk + processContext := &NodeProcessContext{ + PID: os.Getpid(), + URI: n.apiURI, + StakingAddress: n.stakingAddress, // Set by network initialization + } + bytes, err := json.MarshalIndent(processContext, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal process context: %w", err) + } + if err := perms.WriteFile(n.Config.ProcessContextFilePath, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write process context: %w", err) + } + return nil +} + // Dispatch starts the node's servers. // Returns when the node exits. func (n *Node) Dispatch() error { + if err := n.writeProcessContext(); err != nil { + return err + } + // Start the HTTP API server go n.Log.RecoverAndPanic(func() { - var err error - if n.Config.HTTPSEnabled { - n.Log.Debug("initializing API server with TLS") - err = n.APIServer.DispatchTLS(n.Config.HTTPSCert, n.Config.HTTPSKey) - } else { - n.Log.Debug("initializing API server without TLS") - err = n.APIServer.Dispatch() - } + n.Log.Info("API server listening", + zap.String("uri", n.apiURI), + ) + err := n.APIServer.Dispatch() // When [n].Shutdown() is called, [n.APIServer].Close() is called. // This causes [n.APIServer].Dispatch() to return an error. // If that happened, don't log/return an error here. @@ -386,8 +685,8 @@ func (n *Node) Dispatch() error { } // Add bootstrap nodes to the peer network - for i, peerIP := range n.Config.BootstrapIPs { - n.Net.ManuallyTrack(n.Config.BootstrapIDs[i], peerIP) + for _, bootstrapper := range n.Config.Bootstrappers { + n.Net.ManuallyTrack(bootstrapper.ID, ips.IPPort(bootstrapper.IP)) } // Start P2P connections @@ -409,6 +708,16 @@ func (n *Node) Dispatch() error { // Wait until the node is done shutting down before returning n.DoneShuttingDown.Wait() + + // Remove the process context file to communicate to an orchestrator + // that the node is no longer running. + if err := os.Remove(n.Config.ProcessContextFilePath); err != nil && !errors.Is(err, fs.ErrNotExist) { + n.Log.Error("removal of process context file failed", + zap.String("path", n.Config.ProcessContextFilePath), + zap.Error(err), + ) + } + return err } @@ -419,41 +728,46 @@ func (n *Node) Dispatch() error { */ func (n *Node) initDatabase() error { - // start the db manager - var ( - dbManager manager.Manager - err error - ) + // start the db switch n.Config.DatabaseConfig.Name { case leveldb.Name: - dbManager, err = manager.NewLevelDB(n.Config.DatabaseConfig.Path, n.Config.DatabaseConfig.Config, n.Log, version.CurrentDatabase, "db_internal", n.MetricsRegisterer) + // Prior to v1.10.15, the only on-disk database was leveldb, and its + // files went to [dbPath]/[networkID]/v1.4.5. + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, version.CurrentDatabase.String()) + var err error + n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + if err != nil { + return fmt.Errorf("couldn't create leveldb at %s: %w", dbPath, err) + } case memdb.Name: - dbManager = manager.NewMemDB(version.CurrentDatabase) + n.DB = memdb.New() + case pebble.Name: + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, pebble.Name) + var err error + n.DB, err = pebble.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + if err != nil { + return fmt.Errorf("couldn't create pebbledb at %s: %w", dbPath, err) + } default: - err = fmt.Errorf( - "db-type was %q but should have been one of {%s, %s}", + return fmt.Errorf( + "db-type was %q but should have been one of {%s, %s, %s}", n.Config.DatabaseConfig.Name, leveldb.Name, memdb.Name, + pebble.Name, ) } - if err != nil { - return err + + if n.Config.ReadOnly && n.Config.DatabaseConfig.Name != memdb.Name { + n.DB = versiondb.New(n.DB) } - meterDBManager, err := dbManager.NewMeterDBManager("db", n.MetricsRegisterer) + var err error + n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) if err != nil { return err } - n.DBManager = meterDBManager - - currentDB := dbManager.Current() - n.Log.Info("initializing database", - zap.Stringer("dbVersion", currentDB.Version), - ) - n.DB = currentDB.Database - rawExpectedGenesisHash := hashing.ComputeHash256(n.Config.GenesisBytes) rawGenesisHash, err := n.DB.Get(genesisHashKey) @@ -477,17 +791,38 @@ func (n *Node) initDatabase() error { if genesisHash != expectedGenesisHash { return fmt.Errorf("db contains invalid genesis hash. DB Genesis: %s Generated Genesis: %s", genesisHash, expectedGenesisHash) } + + n.Log.Info("initializing database", + zap.Stringer("genesisHash", genesisHash), + ) + + ok, err := n.DB.Has(ungracefulShutdown) + if err != nil { + return fmt.Errorf("failed to read ungraceful shutdown key: %w", err) + } + + if ok { + n.Log.Warn("detected previous ungraceful shutdown") + } + + if err := n.DB.Put(ungracefulShutdown, nil); err != nil { + return fmt.Errorf( + "failed to write ungraceful shutdown key at: %w", + err, + ) + } + return nil } // Set the node IDs of the peers this node should first connect to -func (n *Node) initBeacons() error { - n.beacons = validators.NewSet() - for _, peerID := range n.Config.BootstrapIDs { +func (n *Node) initBootstrappers() error { + n.bootstrappers = validators.NewManager() + for _, bootstrapper := range n.Config.Bootstrappers { // Note: The beacon connection manager will treat all beaconIDs as // equal. // Invariant: We never use the TxID or BLS keys populated here. - if err := n.beacons.Add(peerID, nil, ids.Empty, 1); err != nil { + if err := n.bootstrappers.AddStaker(constants.PrimaryNetworkID, bootstrapper.ID, nil, ids.Empty, 1); err != nil { return err } } @@ -565,7 +900,7 @@ func (n *Node) initChains(genesisBytes []byte) error { SubnetID: constants.PrimaryNetworkID, GenesisData: genesisBytes, // Specifies other chains to create VMID: constants.PlatformVMID, - CustomBeacons: n.beacons, + CustomBeacons: n.bootstrappers, } // Start the chain creator with the Platform Chain @@ -577,18 +912,99 @@ func (n *Node) initMetrics() { n.MetricsGatherer = metrics.NewMultiGatherer() } +func (n *Node) initNAT() { + n.Log.Info("initializing NAT") + + if n.Config.PublicIP == "" && n.Config.PublicIPResolutionService == "" { + n.router = nat.GetRouter() + if !n.router.SupportsNAT() { + n.Log.Warn("UPnP and NAT-PMP router attach failed, " + + "you may not be listening publicly. " + + "Please confirm the settings in your router") + } + } else { + n.router = nat.NewNoRouter() + } + + n.portMapper = nat.NewPortMapper(n.Log, n.router) +} + // initAPIServer initializes the server that handles HTTP calls func (n *Node) initAPIServer() error { n.Log.Info("initializing API server") + // An empty host is treated as a wildcard to match all addresses, so it is + // considered public. + hostIsPublic := n.Config.HTTPHost == "" + if !hostIsPublic { + ip, err := ips.Lookup(n.Config.HTTPHost) + if err != nil { + n.Log.Fatal("failed to lookup HTTP host", + zap.String("host", n.Config.HTTPHost), + zap.Error(err), + ) + return err + } + hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + + n.Log.Debug("finished HTTP host lookup", + zap.String("host", n.Config.HTTPHost), + zap.Stringer("ip", ip), + zap.Bool("isPublic", hostIsPublic), + ) + } + + listenAddress := net.JoinHostPort(n.Config.HTTPHost, strconv.FormatUint(uint64(n.Config.HTTPPort), 10)) + listener, err := net.Listen("tcp", listenAddress) + if err != nil { + return err + } + + addr := listener.Addr().String() + ipPort, err := ips.ToIPPort(addr) + if err != nil { + return err + } + + // Don't open the HTTP port if the HTTP server is private + if hostIsPublic { + n.Log.Warn("HTTP server is binding to a potentially public host. "+ + "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", + zap.String("host", n.Config.HTTPHost), + ) + + n.portMapper.Map( + ipPort.Port, + ipPort.Port, + httpPortName, + nil, + n.Config.PublicIPResolutionFreq, + ) + } + + protocol := "http" + if n.Config.HTTPSEnabled { + cert, err := tls.X509KeyPair(n.Config.HTTPSCert, n.Config.HTTPSKey) + if err != nil { + return err + } + config := &tls.Config{ + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + } + listener = tls.NewListener(listener, config) + + protocol = "https" + } + n.apiURI = fmt.Sprintf("%s://%s", protocol, listener.Addr()) + if !n.Config.APIRequireAuthToken { var err error n.APIServer, err = server.New( n.Log, n.LogFactory, - n.Config.HTTPHost, - n.Config.HTTPPort, - n.Config.APIAllowedOrigins, + listener, + n.Config.HTTPAllowedOrigins, n.Config.ShutdownTimeout, n.ID, n.Config.TraceConfig.Enabled, @@ -596,6 +1012,7 @@ func (n *Node) initAPIServer() error { "api", n.MetricsRegisterer, n.Config.HTTPConfig.HTTPConfig, + n.Config.HTTPAllowedHosts, ) return err } @@ -608,9 +1025,8 @@ func (n *Node) initAPIServer() error { n.APIServer, err = server.New( n.Log, n.LogFactory, - n.Config.HTTPHost, - n.Config.HTTPPort, - n.Config.APIAllowedOrigins, + listener, + n.Config.HTTPAllowedOrigins, n.Config.ShutdownTimeout, n.ID, n.Config.TraceConfig.Enabled, @@ -618,6 +1034,7 @@ func (n *Node) initAPIServer() error { "api", n.MetricsRegisterer, n.Config.HTTPConfig.HTTPConfig, + n.Config.HTTPAllowedHosts, a, ) if err != nil { @@ -626,25 +1043,20 @@ func (n *Node) initAPIServer() error { // only create auth service if token authorization is required n.Log.Info("API authorization is enabled. Auth tokens must be passed in the header of API requests, except requests to the auth service.") - authService, err := a.CreateHandler() + handler, err := a.CreateHandler() if err != nil { return err } - handler := &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: authService, - } - return n.APIServer.AddRoute(handler, &sync.RWMutex{}, "auth", "") + return n.APIServer.AddRoute(handler, "auth", "") } // Add the default VM aliases func (n *Node) addDefaultVMAliases() error { n.Log.Info("adding the default VM aliases") - vmAliases := genesis.GetVMAliases() - for vmID, aliases := range vmAliases { + for vmID, aliases := range genesis.VMAliases { for _, alias := range aliases { - if err := n.Config.VMAliaser.Alias(vmID, alias); err != nil { + if err := n.VMAliaser.Alias(vmID, alias); err != nil { return err } } @@ -669,15 +1081,13 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { cChainID := createEVMTx.ID() // If any of these chains die, the node shuts down - criticalChains := set.Set[ids.ID]{} - criticalChains.Add( + criticalChains := set.Of( constants.PlatformChainID, xChainID, cChainID, ) - // Manages network timeouts - timeoutManager, err := timeout.NewManager( + n.timeoutManager, err = timeout.NewManager( &n.Config.AdaptiveTimeoutConfig, n.benchlistManager, "requests", @@ -686,16 +1096,16 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { if err != nil { return err } - go n.Log.RecoverAndPanic(timeoutManager.Dispatch) + go n.Log.RecoverAndPanic(n.timeoutManager.Dispatch) // Routes incoming messages from peers to the appropriate chain - err = n.Config.ConsensusRouter.Initialize( + err = n.chainRouter.Initialize( n.ID, n.Log, - timeoutManager, + n.timeoutManager, n.Config.ConsensusShutdownTimeout, criticalChains, - n.Config.EnableStaking, + n.Config.SybilProtectionEnabled, n.Config.TrackedSubnets, n.Shutdown, n.Config.RouterHealthConfig, @@ -706,52 +1116,58 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { return fmt.Errorf("couldn't initialize chain router: %w", err) } - n.chainManager = chains.New(&chains.ManagerConfig{ - StakingEnabled: n.Config.EnableStaking, - StakingCert: n.Config.StakingTLSCert, - StakingBLSKey: n.Config.StakingSigningKey, - Log: n.Log, - LogFactory: n.LogFactory, - VMManager: n.VMManager, - BlockAcceptorGroup: n.BlockAcceptorGroup, - TxAcceptorGroup: n.TxAcceptorGroup, - VertexAcceptorGroup: n.VertexAcceptorGroup, - DBManager: n.DBManager, - MsgCreator: n.msgCreator, - Router: n.Config.ConsensusRouter, - Net: n.Net, - Validators: n.vdrs, - NodeID: n.ID, - NetworkID: n.Config.NetworkID, - Server: n.APIServer, - Keystore: n.keystore, - AtomicMemory: n.sharedMemory, - AVAXAssetID: avaxAssetID, - XChainID: xChainID, - CChainID: cChainID, - CriticalChains: criticalChains, - TimeoutManager: timeoutManager, - Health: n.health, - RetryBootstrap: n.Config.RetryBootstrap, - RetryBootstrapWarnFrequency: n.Config.RetryBootstrapWarnFrequency, - ShutdownNodeFunc: n.Shutdown, - MeterVMEnabled: n.Config.MeterVMEnabled, - Metrics: n.MetricsGatherer, - SubnetConfigs: n.Config.SubnetConfigs, - ChainConfigs: n.Config.ChainConfigs, - ConsensusGossipFrequency: n.Config.ConsensusGossipFrequency, - ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, - BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, - BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, - BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, - ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), - ApricotPhase4MinPChainHeight: version.GetApricotPhase4MinPChainHeight(n.Config.NetworkID), - ResourceTracker: n.resourceTracker, - StateSyncBeacons: n.Config.StateSyncIDs, - TracingEnabled: n.Config.TraceConfig.Enabled, - Tracer: n.tracer, - ChainDataDir: n.Config.ChainDataDir, - }) + subnets, err := chains.NewSubnets(n.ID, n.Config.SubnetConfigs) + if err != nil { + return fmt.Errorf("failed to initialize subnets: %w", err) + } + n.chainManager = chains.New( + &chains.ManagerConfig{ + SybilProtectionEnabled: n.Config.SybilProtectionEnabled, + StakingTLSCert: n.Config.StakingTLSCert, + StakingBLSKey: n.Config.StakingSigningKey, + Log: n.Log, + LogFactory: n.LogFactory, + VMManager: n.VMManager, + BlockAcceptorGroup: n.BlockAcceptorGroup, + TxAcceptorGroup: n.TxAcceptorGroup, + VertexAcceptorGroup: n.VertexAcceptorGroup, + DB: n.DB, + MsgCreator: n.msgCreator, + Router: n.chainRouter, + Net: n.Net, + Validators: n.vdrs, + PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, + NodeID: n.ID, + NetworkID: n.Config.NetworkID, + Server: n.APIServer, + Keystore: n.keystore, + AtomicMemory: n.sharedMemory, + AVAXAssetID: avaxAssetID, + XChainID: xChainID, + CChainID: cChainID, + CriticalChains: criticalChains, + TimeoutManager: n.timeoutManager, + Health: n.health, + ShutdownNodeFunc: n.Shutdown, + MeterVMEnabled: n.Config.MeterVMEnabled, + Metrics: n.MetricsGatherer, + SubnetConfigs: n.Config.SubnetConfigs, + ChainConfigs: n.Config.ChainConfigs, + FrontierPollFrequency: n.Config.FrontierPollFrequency, + ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, + BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, + BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, + BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, + ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), + ApricotPhase4MinPChainHeight: version.ApricotPhase4MinPChainHeight[n.Config.NetworkID], + ResourceTracker: n.resourceTracker, + StateSyncBeacons: n.Config.StateSyncIDs, + TracingEnabled: n.Config.TraceConfig.Enabled, + Tracer: n.tracer, + ChainDataDir: n.Config.ChainDataDir, + Subnets: subnets, + }, + ) // Notify the API server when new chains are created n.chainManager.AddRegistrant(n.APIServer) @@ -764,70 +1180,70 @@ func (n *Node) initVMs() error { vdrs := n.vdrs - // If staking is disabled, ignore updates to Subnets' validator sets - // Instead of updating node's validator manager, platform chain makes changes - // to its own local validator manager (which isn't used for sampling) - if !n.Config.EnableStaking { + // If sybil protection is disabled, we provide the P-chain its own local + // validator manager that will not be used by the rest of the node. This + // allows the node's validator sets to be determined by network connections. + if !n.Config.SybilProtectionEnabled { vdrs = validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) } - vmRegisterer := registry.NewVMRegisterer(registry.VMRegistererConfig{ - APIServer: n.APIServer, - Log: n.Log, - VMFactoryLog: n.VMFactoryLog, - VMManager: n.VMManager, - }) + durangoTime := version.GetDurangoTime(n.Config.NetworkID) + if err := txs.InitCodec(durangoTime); err != nil { + return err + } + if err := block.InitCodec(durangoTime); err != nil { + return err + } + if err := coreth.InitCodec(durangoTime); err != nil { + return err + } // Register the VMs that Avalanche supports - errs := wrappers.Errs{} - errs.Add( - vmRegisterer.Register(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ + err := utils.Err( + n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ - Chains: n.chainManager, - Validators: vdrs, - UptimeLockedCalculator: n.uptimeCalculator, - StakingEnabled: n.Config.EnableStaking, - TrackedSubnets: n.Config.TrackedSubnets, - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, - ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), - ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), - BanffTime: version.GetBanffTime(n.Config.NetworkID), - CortinaTime: version.GetCortinaTime(n.Config.NetworkID), - MinPercentConnectedStakeHealthy: n.Config.MinPercentConnectedStakeHealthy, - UseCurrentHeight: n.Config.UseCurrentHeight, + Chains: n.chainManager, + Validators: vdrs, + UptimeLockedCalculator: n.uptimeCalculator, + SybilProtectionEnabled: n.Config.SybilProtectionEnabled, + PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, + TrackedSubnets: n.Config.TrackedSubnets, + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + CreateSubnetTxFee: n.Config.CreateSubnetTxFee, + TransformSubnetTxFee: n.Config.TransformSubnetTxFee, + CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, + AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, + AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, + UptimePercentage: n.Config.UptimeRequirement, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, + ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), + ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), + BanffTime: version.GetBanffTime(n.Config.NetworkID), + CortinaTime: version.GetCortinaTime(n.Config.NetworkID), + DurangoTime: durangoTime, + UseCurrentHeight: n.Config.UseCurrentHeight, }, }), - vmRegisterer.Register(context.TODO(), constants.AVMID, &avm.Factory{ + n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ Config: avmconfig.Config{ TxFee: n.Config.TxFee, CreateAssetTxFee: n.Config.CreateAssetTxFee, + DurangoTime: durangoTime, }, }), - vmRegisterer.Register(context.TODO(), constants.EVMID, &coreth.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), secp256k1fx.ID, &secp256k1fx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), nftfx.ID, &nftfx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), propertyfx.ID, &propertyfx.Factory{}), + n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), ) - if errs.Errored() { - return errs.Err + if err != nil { + return err } // initialize vm runtime manager @@ -842,7 +1258,7 @@ func (n *Node) initVMs() error { CPUTracker: n.resourceManager, RuntimeTracker: n.runtimeManager, }), - VMRegisterer: vmRegisterer, + VMManager: n.VMManager, }) // register any vms that need to be installed as plugins from disk @@ -867,9 +1283,8 @@ func (n *Node) initSharedMemory() { // Assumes n.APIServer is already set func (n *Node) initKeystoreAPI() error { n.Log.Info("initializing keystore") - keystoreDB := n.DBManager.NewPrefixDBManager([]byte("keystore")) - n.keystore = keystore.New(n.Log, keystoreDB) - keystoreHandler, err := n.keystore.CreateHandler() + n.keystore = keystore.New(n.Log, prefixdb.New(keystoreDBPrefix, n.DB)) + handler, err := n.keystore.CreateHandler() if err != nil { return err } @@ -878,11 +1293,7 @@ func (n *Node) initKeystoreAPI() error { return nil } n.Log.Warn("initializing deprecated keystore API") - handler := &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: keystoreHandler, - } - return n.APIServer.AddRoute(handler, &sync.RWMutex{}, "keystore", "") + return n.APIServer.AddRoute(handler, "keystore", "") } // initMetricsAPI initializes the Metrics API @@ -912,14 +1323,10 @@ func (n *Node) initMetricsAPI() error { n.Log.Info("initializing metrics API") return n.APIServer.AddRoute( - &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: promhttp.HandlerFor( - n.MetricsGatherer, - promhttp.HandlerOpts{}, - ), - }, - &sync.RWMutex{}, + promhttp.HandlerFor( + n.MetricsGatherer, + promhttp.HandlerOpts{}, + ), "metrics", "", ) @@ -936,6 +1343,7 @@ func (n *Node) initAdminAPI() error { service, err := admin.NewService( admin.Config{ Log: n.Log, + DB: n.DB, ChainManager: n.chainManager, HTTPServer: n.APIServer, ProfileDir: n.Config.ProfilerConfig.Dir, @@ -948,7 +1356,11 @@ func (n *Node) initAdminAPI() error { if err != nil { return err } - return n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "") + return n.APIServer.AddRoute( + service, + "admin", + "", + ) } // initProfiler initializes the continuous profiling @@ -983,7 +1395,6 @@ func (n *Node) initInfoAPI() error { n.Log.Info("initializing info API") - primaryValidators, _ := n.vdrs.Get(constants.PrimaryNetworkID) service, err := info.NewService( info.Parameters{ Version: version.CurrentApp, @@ -1002,17 +1413,21 @@ func (n *Node) initInfoAPI() error { VMManager: n.VMManager, }, n.Log, + n.vdrs, n.chainManager, n.VMManager, n.Config.NetworkConfig.MyIPPort, n.Net, - primaryValidators, n.benchlistManager, ) if err != nil { return err } - return n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "") + return n.APIServer.AddRoute( + service, + "info", + "", + ) } // initHealthAPI initializes the Health API service @@ -1030,18 +1445,18 @@ func (n *Node) initHealthAPI() error { } n.Log.Info("initializing Health API") - err = healthChecker.RegisterHealthCheck("network", n.Net, health.GlobalTag) + err = healthChecker.RegisterHealthCheck("network", n.Net, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register network health check: %w", err) } - err = healthChecker.RegisterHealthCheck("router", n.Config.ConsensusRouter, health.GlobalTag) + err = healthChecker.RegisterHealthCheck("router", n.chainRouter, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register router health check: %w", err) } // TODO: add database health to liveness check - err = healthChecker.RegisterHealthCheck("database", n.DB, health.GlobalTag) + err = healthChecker.RegisterHealthCheck("database", n.DB, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register database health check: %w", err) } @@ -1068,7 +1483,7 @@ func (n *Node) initHealthAPI() error { }, err }) - err = n.health.RegisterHealthCheck("diskspace", diskSpaceCheck, health.GlobalTag) + err = n.health.RegisterHealthCheck("diskspace", diskSpaceCheck, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register resource health check: %w", err) } @@ -1079,11 +1494,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: handler, - }, - &sync.RWMutex{}, + handler, "health", "", ) @@ -1092,11 +1503,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: health.NewGetHandler(healthChecker.Readiness), - }, - &sync.RWMutex{}, + health.NewGetHandler(healthChecker.Readiness), "health", "/readiness", ) @@ -1105,11 +1512,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: health.NewGetHandler(healthChecker.Health), - }, - &sync.RWMutex{}, + health.NewGetHandler(healthChecker.Health), "health", "/health", ) @@ -1118,11 +1521,7 @@ func (n *Node) initHealthAPI() error { } return n.APIServer.AddRoute( - &common.HTTPHandler{ - LockOptions: common.NoLock, - Handler: health.NewGetHandler(healthChecker.Liveness), - }, - &sync.RWMutex{}, + health.NewGetHandler(healthChecker.Liveness), "health", "/liveness", ) @@ -1136,11 +1535,15 @@ func (n *Node) initIPCAPI() error { return nil } n.Log.Warn("initializing deprecated ipc API") - service, err := ipcsapi.NewService(n.Log, n.chainManager, n.APIServer, n.IPCs) + service, err := ipcsapi.NewService(n.Log, n.chainManager, n.IPCs) if err != nil { return err } - return n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "") + return n.APIServer.AddRoute( + service, + "ipcs", + "", + ) } // Give chains aliases as specified by the genesis information @@ -1186,25 +1589,22 @@ func (n *Node) initAPIAliases(genesisBytes []byte) error { return nil } -// Initializes [n.vdrs] and returns the Primary Network validator set. -func (n *Node) initVdrs() validators.Set { - n.vdrs = validators.NewManager() - vdrSet := validators.NewSet() - _ = n.vdrs.Add(constants.PrimaryNetworkID, vdrSet) - return vdrSet -} - // Initialize [n.resourceManager]. func (n *Node) initResourceManager(reg prometheus.Registerer) error { - n.resourceManager = resource.NewManager( + resourceManager, err := resource.NewManager( + n.Log, n.Config.DatabaseConfig.Path, n.Config.SystemTrackerFrequency, n.Config.SystemTrackerCPUHalflife, n.Config.SystemTrackerDiskHalflife, + reg, ) + if err != nil { + return err + } + n.resourceManager = resourceManager n.resourceManager.TrackProcess(os.Getpid()) - var err error n.resourceTracker, err = tracker.NewResourceTracker(reg, n.resourceManager, &meter.ContinuousFactory{}, n.Config.SystemTrackerProcessingHalflife) return err } @@ -1213,11 +1613,11 @@ func (n *Node) initResourceManager(reg prometheus.Registerer) error { // Assumes [n.resourceTracker] is already initialized. func (n *Node) initCPUTargeter( config *tracker.TargeterConfig, - vdrs validators.Set, ) { n.cpuTargeter = tracker.NewTargeter( + n.Log, config, - vdrs, + n.vdrs, n.resourceTracker.CPUTracker(), ) } @@ -1226,153 +1626,15 @@ func (n *Node) initCPUTargeter( // Assumes [n.resourceTracker] is already initialized. func (n *Node) initDiskTargeter( config *tracker.TargeterConfig, - vdrs validators.Set, ) { n.diskTargeter = tracker.NewTargeter( + n.Log, config, - vdrs, + n.vdrs, n.resourceTracker.DiskTracker(), ) } -// Initialize this node -func (n *Node) Initialize( - config *Config, - logger logging.Logger, - logFactory logging.Factory, -) error { - n.Log = logger - n.Config = config - n.ID = ids.NodeIDFromCert(n.Config.StakingTLSCert.Leaf) - n.LogFactory = logFactory - n.DoneShuttingDown.Add(1) - - pop := signer.NewProofOfPossession(n.Config.StakingSigningKey) - n.Log.Info("initializing node", - zap.Stringer("version", version.CurrentApp), - zap.Stringer("nodeID", n.ID), - zap.Reflect("nodePOP", pop), - zap.Reflect("providedFlags", n.Config.ProvidedFlags), - zap.Reflect("config", n.Config), - ) - - var err error - n.VMFactoryLog, err = logFactory.Make("vm-factory") - if err != nil { - return fmt.Errorf("problem creating vm logger: %w", err) - } - - n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) - - if err := n.initBeacons(); err != nil { // Configure the beacons - return fmt.Errorf("problem initializing node beacons: %w", err) - } - - // Set up tracer - n.tracer, err = trace.New(n.Config.TraceConfig) - if err != nil { - return fmt.Errorf("couldn't initialize tracer: %w", err) - } - - if n.Config.TraceConfig.Enabled { - n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) - } - - n.initMetrics() - - if err := n.initAPIServer(); err != nil { // Start the API Server - return fmt.Errorf("couldn't initialize API server: %w", err) - } - - if err := n.initMetricsAPI(); err != nil { // Start the Metrics API - return fmt.Errorf("couldn't initialize metrics API: %w", err) - } - - if err := n.initDatabase(); err != nil { // Set up the node's database - return fmt.Errorf("problem initializing database: %w", err) - } - - if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API - return fmt.Errorf("couldn't initialize keystore API: %w", err) - } - - n.initSharedMemory() // Initialize shared memory - - // message.Creator is shared between networking, chainManager and the engine. - // It must be initiated before networking (initNetworking), chain manager (initChainManager) - // and the engine (initChains) but after the metrics (initMetricsAPI) - // message.Creator currently record metrics under network namespace - n.networkNamespace = "network" - n.msgCreator, err = message.NewCreator( - n.Log, - n.MetricsRegisterer, - n.networkNamespace, - constants.DefaultNetworkCompressionType, - n.Config.NetworkConfig.MaximumInboundMessageTimeout, - ) - if err != nil { - return fmt.Errorf("problem initializing message creator: %w", err) - } - - primaryNetVdrs := n.initVdrs() - if err := n.initResourceManager(n.MetricsRegisterer); err != nil { - return fmt.Errorf("problem initializing resource manager: %w", err) - } - n.initCPUTargeter(&config.CPUTargeterConfig, primaryNetVdrs) - n.initDiskTargeter(&config.DiskTargeterConfig, primaryNetVdrs) - if err := n.initNetworking(primaryNetVdrs); err != nil { // Set up networking layer. - return fmt.Errorf("problem initializing networking: %w", err) - } - - n.initEventDispatchers() - - // Start the Health API - // Has to be initialized before chain manager - // [n.Net] must already be set - if err := n.initHealthAPI(); err != nil { - return fmt.Errorf("couldn't initialize health API: %w", err) - } - if err := n.addDefaultVMAliases(); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initChainManager(n.Config.AvaxAssetID); err != nil { // Set up the chain manager - return fmt.Errorf("couldn't initialize chain manager: %w", err) - } - if err := n.initVMs(); err != nil { // Initialize the VM registry. - return fmt.Errorf("couldn't initialize VM registry: %w", err) - } - if err := n.initAdminAPI(); err != nil { // Start the Admin API - return fmt.Errorf("couldn't initialize admin API: %w", err) - } - if err := n.initInfoAPI(); err != nil { // Start the Info API - return fmt.Errorf("couldn't initialize info API: %w", err) - } - if err := n.initIPCs(); err != nil { // Start the IPCs - return fmt.Errorf("couldn't initialize IPCs: %w", err) - } - if err := n.initIPCAPI(); err != nil { // Start the IPC API - return fmt.Errorf("couldn't initialize the IPC API: %w", err) - } - if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chain aliases: %w", err) - } - if err := n.initAPIAliases(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize API aliases: %w", err) - } - if err := n.initIndexer(); err != nil { - return fmt.Errorf("couldn't initialize indexer: %w", err) - } - - n.health.Start(context.TODO(), n.Config.HealthCheckFreq) - n.initProfiler() - - // Start the Platform chain - if err := n.initChains(n.Config.GenesisBytes); err != nil { - return fmt.Errorf("couldn't initialize chains: %w", err) - } - return nil -} - // Shutdown this node // May be called multiple times func (n *Node) Shutdown(exitCode int) { @@ -1396,7 +1658,7 @@ func (n *Node) shutdown() { }, errShuttingDown }) - err := n.health.RegisterHealthCheck("shuttingDown", shuttingDownCheck, health.GlobalTag) + err := n.health.RegisterHealthCheck("shuttingDown", shuttingDownCheck, health.ApplicationTag) if err != nil { n.Log.Debug("couldn't register shuttingDown health check", zap.Error(err), @@ -1416,6 +1678,7 @@ func (n *Node) shutdown() { ) } } + n.timeoutManager.Stop() if n.chainManager != nil { n.chainManager.Shutdown() } @@ -1430,6 +1693,8 @@ func (n *Node) shutdown() { zap.Error(err), ) } + n.portMapper.UnmapAllPorts() + n.ipUpdater.Stop() if err := n.indexer.Close(); err != nil { n.Log.Debug("error closing tx indexer", zap.Error(err), @@ -1440,8 +1705,15 @@ func (n *Node) shutdown() { n.Log.Info("cleaning up plugin runtimes") n.runtimeManager.Stop(context.TODO()) - if n.DBManager != nil { - if err := n.DBManager.Close(); err != nil { + if n.DB != nil { + if err := n.DB.Delete(ungracefulShutdown); err != nil { + n.Log.Error( + "failed to delete ungraceful shutdown key", + zap.Error(err), + ) + } + + if err := n.DB.Close(); err != nil { n.Log.Warn("error during DB shutdown", zap.Error(err), ) diff --git a/avalanchego/node/overridden_manager.go b/avalanchego/node/overridden_manager.go new file mode 100644 index 00000000..4dd49b65 --- /dev/null +++ b/avalanchego/node/overridden_manager.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ validators.Manager = (*overriddenManager)(nil) + +// newOverriddenManager returns a Manager that overrides of all calls to the +// underlying Manager to only operate on the validators in [subnetID]. +func newOverriddenManager(subnetID ids.ID, manager validators.Manager) *overriddenManager { + return &overriddenManager{ + subnetID: subnetID, + manager: manager, + } +} + +// overriddenManager is a wrapper around a Manager that overrides of all calls +// to the underlying Manager to only operate on the validators in [subnetID]. +// subnetID here is typically the primary network ID, as it has the superset of +// all subnet validators. +type overriddenManager struct { + manager validators.Manager + subnetID ids.ID +} + +func (o *overriddenManager) AddStaker(_ ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + return o.manager.AddStaker(o.subnetID, nodeID, pk, txID, weight) +} + +func (o *overriddenManager) AddWeight(_ ids.ID, nodeID ids.NodeID, weight uint64) error { + return o.manager.AddWeight(o.subnetID, nodeID, weight) +} + +func (o *overriddenManager) GetWeight(_ ids.ID, nodeID ids.NodeID) uint64 { + return o.manager.GetWeight(o.subnetID, nodeID) +} + +func (o *overriddenManager) GetValidator(_ ids.ID, nodeID ids.NodeID) (*validators.Validator, bool) { + return o.manager.GetValidator(o.subnetID, nodeID) +} + +func (o *overriddenManager) SubsetWeight(_ ids.ID, nodeIDs set.Set[ids.NodeID]) (uint64, error) { + return o.manager.SubsetWeight(o.subnetID, nodeIDs) +} + +func (o *overriddenManager) RemoveWeight(_ ids.ID, nodeID ids.NodeID, weight uint64) error { + return o.manager.RemoveWeight(o.subnetID, nodeID, weight) +} + +func (o *overriddenManager) Count(ids.ID) int { + return o.manager.Count(o.subnetID) +} + +func (o *overriddenManager) TotalWeight(ids.ID) (uint64, error) { + return o.manager.TotalWeight(o.subnetID) +} + +func (o *overriddenManager) Sample(_ ids.ID, size int) ([]ids.NodeID, error) { + return o.manager.Sample(o.subnetID, size) +} + +func (o *overriddenManager) GetMap(ids.ID) map[ids.NodeID]*validators.GetValidatorOutput { + return o.manager.GetMap(o.subnetID) +} + +func (o *overriddenManager) RegisterCallbackListener(_ ids.ID, listener validators.SetCallbackListener) { + o.manager.RegisterCallbackListener(o.subnetID, listener) +} + +func (o *overriddenManager) String() string { + return fmt.Sprintf("Overridden Validator Manager (SubnetID = %s): %s", o.subnetID, o.manager) +} + +func (o *overriddenManager) GetValidatorIDs(ids.ID) []ids.NodeID { + return o.manager.GetValidatorIDs(o.subnetID) +} diff --git a/avalanchego/node/overridden_manager_test.go b/avalanchego/node/overridden_manager_test.go new file mode 100644 index 00000000..8af93ff6 --- /dev/null +++ b/avalanchego/node/overridden_manager_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" +) + +func TestOverriddenManager(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + subnetID0 := ids.GenerateTestID() + subnetID1 := ids.GenerateTestID() + + m := validators.NewManager() + require.NoError(m.AddStaker(subnetID0, nodeID0, nil, ids.Empty, 1)) + require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 1)) + + om := newOverriddenManager(subnetID0, m) + _, ok := om.GetValidator(subnetID0, nodeID0) + require.True(ok) + _, ok = om.GetValidator(subnetID0, nodeID1) + require.False(ok) + _, ok = om.GetValidator(subnetID1, nodeID0) + require.True(ok) + _, ok = om.GetValidator(subnetID1, nodeID1) + require.False(ok) + + require.NoError(om.RemoveWeight(subnetID1, nodeID0, 1)) + _, ok = om.GetValidator(subnetID0, nodeID0) + require.False(ok) + _, ok = om.GetValidator(subnetID0, nodeID1) + require.False(ok) + _, ok = om.GetValidator(subnetID1, nodeID0) + require.False(ok) + _, ok = om.GetValidator(subnetID1, nodeID1) + require.False(ok) +} + +func TestOverriddenString(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.EmptyNodeID + nodeID1, err := ids.NodeIDFromString("NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V") + require.NoError(err) + + subnetID0, err := ids.FromString("TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES") + require.NoError(err) + subnetID1, err := ids.FromString("2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w") + require.NoError(err) + + m := validators.NewManager() + require.NoError(m.AddStaker(subnetID0, nodeID0, nil, ids.Empty, 1)) + require.NoError(m.AddStaker(subnetID0, nodeID1, nil, ids.Empty, math.MaxInt64-1)) + require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 1)) + + om := newOverriddenManager(subnetID0, m) + expected := `Overridden Validator Manager (SubnetID = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES): Validator Manager: (Size = 2) + Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806 + Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1) + Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1` + result := om.String() + require.Equal(expected, result) +} diff --git a/avalanchego/proto/Dockerfile.buf b/avalanchego/proto/Dockerfile.buf index 3903baf2..3007f586 100644 --- a/avalanchego/proto/Dockerfile.buf +++ b/avalanchego/proto/Dockerfile.buf @@ -1,4 +1,4 @@ -FROM bufbuild/buf:1.11.0 AS builder +FROM bufbuild/buf:1.29.0 AS builder FROM ubuntu:20.04 @@ -6,7 +6,7 @@ RUN apt-get update && apt -y install bash curl unzip git WORKDIR /opt RUN \ - curl -L https://golang.org/dl/go1.19.6.linux-amd64.tar.gz > golang.tar.gz && \ + curl -L https://go.dev/dl/go1.21.7.linux-amd64.tar.gz > golang.tar.gz && \ mkdir golang && \ tar -zxvf golang.tar.gz -C golang/ @@ -16,7 +16,7 @@ COPY --from=builder /usr/local/bin/buf /usr/local/bin/ # any version changes here should also be bumped in scripts/protobuf_codegen.sh RUN \ - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 && \ - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0 + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30.0 && \ + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 ENV PATH="${PATH}:/root/go/bin/" diff --git a/avalanchego/proto/README.md b/avalanchego/proto/README.md index f42fbf11..fee58835 100644 --- a/avalanchego/proto/README.md +++ b/avalanchego/proto/README.md @@ -1,18 +1,37 @@ # Avalanche gRPC -Now Serving: **Protocol Version 25** +Now Serving: **Protocol Version 33** -Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and can be used as dependencies in other projects. +Protobuf files are hosted at +[https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and +can be used as dependencies in other projects. -Protobuf linting and generation for this project is managed by [buf](https://github.com/bufbuild/buf). +Protobuf linting and generation for this project is managed by +[buf](https://github.com/bufbuild/buf). -Please find installation instructions on [https://docs.buf.build/installation/](https://docs.buf.build/installation/) or use `Dockerfile.buf` provided in the `proto/` directory of AvalancheGo. +Please find installation instructions on +[https://docs.buf.build/installation/](https://docs.buf.build/installation/) or +use `Dockerfile.buf` provided in the `proto/` directory of AvalancheGo. -Any changes made to proto definition can be updated by running `protobuf_codegen.sh` located in the `scripts/` directory of AvalancheGo. +Any changes made to proto definition can be updated by running +`protobuf_codegen.sh` located in the `scripts/` directory of AvalancheGo. -Introduction to `buf` [https://docs.buf.build/tour/introduction](https://docs.buf.build/tour/introduction) +Introduction to `buf` +[https://docs.buf.build/tour/introduction](https://docs.buf.build/tour/introduction) ## Protocol Version Compatibility -The protobuf definitions and generated code are versioned based on the [RPCChainVMProtocol](../version/version.go#L13) defined for the RPCChainVM. -Many versions of an Avalanche client can use the same [RPCChainVMProtocol](../version/version.go#L13). But each Avalanche client and subnet vm must use the same protocol version to be compatible. +The protobuf definitions and generated code are versioned based on the +[RPCChainVMProtocol](../version/version.go#L13) defined for the RPCChainVM. +Many versions of an Avalanche client can use the same +[RPCChainVMProtocol](../version/version.go#L13). But each Avalanche client and +subnet vm must use the same protocol version to be compatible. + +## Publishing to Buf Schema Registry + +- Checkout appropriate tag in AvalancheGo `git checkout v1.10.1` +- Change to proto/ directory `cd proto`. +- Publish new tag to buf registry. `buf push -t v26` + +Note: Publishing requires auth to the ava-labs org in buf +https://buf.build/ava-labs/repositories \ No newline at end of file diff --git a/avalanchego/proto/appsender/appsender.proto b/avalanchego/proto/appsender/appsender.proto index d021bbb5..1d7cdac8 100644 --- a/avalanchego/proto/appsender/appsender.proto +++ b/avalanchego/proto/appsender/appsender.proto @@ -9,11 +9,13 @@ option go_package = "github.com/ava-labs/avalanchego/proto/pb/appsender"; service AppSender { rpc SendAppRequest(SendAppRequestMsg) returns (google.protobuf.Empty); rpc SendAppResponse(SendAppResponseMsg) returns (google.protobuf.Empty); + rpc SendAppError(SendAppErrorMsg) returns (google.protobuf.Empty); rpc SendAppGossip(SendAppGossipMsg) returns (google.protobuf.Empty); rpc SendAppGossipSpecific(SendAppGossipSpecificMsg) returns (google.protobuf.Empty); rpc SendCrossChainAppRequest(SendCrossChainAppRequestMsg) returns (google.protobuf.Empty); rpc SendCrossChainAppResponse(SendCrossChainAppResponseMsg) returns (google.protobuf.Empty); + rpc SendCrossChainAppError(SendCrossChainAppErrorMsg) returns (google.protobuf.Empty); } message SendAppRequestMsg { @@ -34,6 +36,17 @@ message SendAppResponseMsg { bytes response = 3; } +message SendAppErrorMsg { + // The node to send a response to + bytes node_id = 1; + // ID of this request + uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; +} + message SendAppGossipMsg { // The message body bytes msg = 1; @@ -63,3 +76,14 @@ message SendCrossChainAppResponseMsg { // The response body bytes response = 3; } + +message SendCrossChainAppErrorMsg { + // The chain to send a response to + bytes chain_id = 1; + // ID of this request + uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; +} diff --git a/avalanchego/proto/buf.md b/avalanchego/proto/buf.md deleted file mode 100644 index 42061c01..00000000 --- a/avalanchego/proto/buf.md +++ /dev/null @@ -1 +0,0 @@ -README.md \ No newline at end of file diff --git a/avalanchego/proto/buf.md b/avalanchego/proto/buf.md new file mode 120000 index 00000000..42061c01 --- /dev/null +++ b/avalanchego/proto/buf.md @@ -0,0 +1 @@ +README.md \ No newline at end of file diff --git a/avalanchego/proto/message/tx.proto b/avalanchego/proto/message/tx.proto new file mode 100644 index 00000000..d651e7c0 --- /dev/null +++ b/avalanchego/proto/message/tx.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package message; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/message"; + +message Message { + oneof message { + Tx tx = 1; + } +} + +message Tx { + // The byte representation of this transaction. + bytes tx = 1; +} diff --git a/avalanchego/proto/p2p/p2p.proto b/avalanchego/proto/p2p/p2p.proto index d6524a18..71a7c4f8 100644 --- a/avalanchego/proto/p2p/p2p.proto +++ b/avalanchego/proto/p2p/p2p.proto @@ -8,6 +8,8 @@ option go_package = "github.com/ava-labs/avalanchego/proto/pb/p2p"; // Represents peer-to-peer messages. // Only one type can be non-null. message Message { + reserved 33; // Until after durango activation. + reserved 36; // Next unused field number. // NOTES // Use "oneof" for each message type and set rest to null if not used. // That is because when the compression is enabled, we don't want to include uncompressed fields. @@ -28,7 +30,8 @@ message Message { // Network messages: Ping ping = 11; Pong pong = 12; - Version version = 13; + Handshake handshake = 13; + GetPeerList get_peer_list = 35; PeerList peer_list = 14; // State-sync messages: @@ -56,296 +59,388 @@ message Message { AppRequest app_request = 30; AppResponse app_response = 31; AppGossip app_gossip = 32; - - PeerListAck peer_list_ack = 33; + AppError app_error = 34; } } -// Message that the local node sends to its remote peers, -// in order to periodically check its uptime. +// Ping reports a peer's perceived uptime percentage. // -// On receiving "ping", the remote peer responds with the observed -// uptime value of the message sender in "pong" message. -message Ping {} +// Peers should respond to Ping with a Pong. +message Ping { + // Uptime percentage on the primary network [0, 100] + uint32 uptime = 1; + // Uptime percentage on subnets + repeated SubnetUptime subnet_uptimes = 2; +} -// Contains subnet id and the related observed subnet uptime of the message -// receiver (remote peer). +// SubnetUptime is a descriptor for a peer's perceived uptime on a subnet. message SubnetUptime { + // Subnet the peer is validating bytes subnet_id = 1; + // Uptime percentage on the subnet [0, 100] uint32 uptime = 2; } -// Contains the uptime percentage of the message receiver (remote peer) -// from the sender's point of view, in response to "ping" message. -// Uptimes are expected to be provided as integers ranging in [0, 100]. +// Pong is sent in response to a Ping with the perceived uptime of the +// peer. message Pong { - // uptime is the primary network uptime percentage. + // Deprecated: uptime is now sent in Ping + // Uptime percentage on the primary network [0, 100] uint32 uptime = 1; - // subnet_uptimes contains subnet uptime percentages. + // Deprecated: uptime is now sent in Ping + // Uptime percentage on subnets repeated SubnetUptime subnet_uptimes = 2; } -// The first outbound message that the local node sends to its remote peer -// when the connection is established. In order for the local node to be -// tracked as a valid peer by the remote peer, the fields must be valid. -// For instance, the network ID must be matched and timestamp should be in-sync. -// Otherwise, the remote peer closes the connection. -// ref. "avalanchego/network/peer#handleVersion" -// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/network#Network "Dispatch" -message Version { +// Handshake is the first outbound message sent to a peer when a connection is +// established to start the p2p handshake. +// +// Peers must respond to a Handshake message with a PeerList message to allow the +// peer to connect to other peers in the network. +// +// Peers should drop connections to peers with incompatible versions. +message Handshake { + // Network the peer is running on (e.g local, testnet, mainnet) uint32 network_id = 1; + // Unix timestamp when this Handshake message was created uint64 my_time = 2; + // IP address of the peer bytes ip_addr = 3; + // IP port of the peer uint32 ip_port = 4; + // Avalanche client version string my_version = 5; - uint64 my_version_time = 6; - bytes sig = 7; + // Timestamp of the IP + uint64 ip_signing_time = 6; + // Signature of the peer IP port pair at a provided timestamp with the TLS + // key. + bytes ip_node_id_sig = 7; + // Subnets the peer is tracking repeated bytes tracked_subnets = 8; + Client client = 9; + repeated uint32 supported_acps = 10; + repeated uint32 objected_acps = 11; + BloomFilter known_peers = 12; + // Signature of the peer IP port pair at a provided timestamp with the BLS + // key. + bytes ip_bls_sig = 13; +} + +// Metadata about a peer's P2P client used to determine compatibility +message Client { + // Client name (e.g avalanchego) + string name = 1; + // Client semantic version + uint32 major = 2; + uint32 minor = 3; + uint32 patch = 4; } -// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/utils/ips#ClaimedIPPort +// BloomFilter with a random salt to prevent consistent hash collisions +message BloomFilter { + bytes filter = 1; + bytes salt = 2; +} + +// ClaimedIpPort contains metadata needed to connect to a peer message ClaimedIpPort { + // X509 certificate of the peer bytes x509_certificate = 1; + // IP address of the peer bytes ip_addr = 2; + // IP port of the peer uint32 ip_port = 3; + // Timestamp of the IP address + port pair uint64 timestamp = 4; + // Signature of the IP port pair at a provided timestamp bytes signature = 5; + // P-Chain transaction that added this peer to the validator set bytes tx_id = 6; } -// Message that contains a list of peer information (IP, certs, etc.) -// in response to "version" message, and sent periodically to a set of -// validators. -// ref. "avalanchego/network/network#Dispatch.runtTimers" +// GetPeerList contains a bloom filter of the currently known validator IPs. // -// On receiving "peer_list", the engine starts/updates the tracking information -// of the remote peer. -message PeerList { - repeated ClaimedIpPort claimed_ip_ports = 1; +// GetPeerList must not be responded to until finishing the handshake. After the +// handshake is completed, GetPeerlist messages should be responded to with a +// Peerlist message containing validators that are not present in the bloom +// filter. +message GetPeerList { + BloomFilter known_peers = 1; } -// "peer_ack" is sent in response to a "peer_list" message. The "tx_id" should -// correspond to a "tx_id" in the "peer_list" message. The sender should set -// "timestamp" to be the latest known timestamp of a signed IP corresponding to -// the nodeID of "tx_id". +// PeerList contains network-level metadata for a set of validators. // -// Upon receipt, the "tx_id" and "timestamp" will determine if the receiptent -// can forgo future gossip of the node's IP to the sender of this message. -message PeerAck { - bytes tx_id = 1; - uint64 timestamp = 2; -} - -// Message that responds to a peer_list message containing the AddValidatorTxIDs -// from the peer_list message that we currently have in our validator set. -message PeerListAck { - reserved 1; // deprecated; used to be tx_ids - - repeated PeerAck peer_acks = 2; +// PeerList must be sent in response to an inbound Handshake message from a +// remote peer a peer wants to connect to. Once a PeerList is received after +// a Handshake message, the p2p handshake is complete and the connection is +// established. +// +// PeerList should be sent in response to a GetPeerlist message if the handshake +// has been completed. +message PeerList { + repeated ClaimedIpPort claimed_ip_ports = 1; } +// GetStateSummaryFrontier requests a peer's most recently accepted state +// summary message GetStateSummaryFrontier { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; } +// StateSummaryFrontier is sent in response to a GetStateSummaryFrontier request message StateSummaryFrontier { + // Chain being responded from bytes chain_id = 1; + // Request id of the original GetStateSummaryFrontier request uint32 request_id = 2; + // The requested state summary bytes summary = 3; } +// GetAcceptedStateSummary requests a set of state summaries at a set of +// block heights message GetAcceptedStateSummary { + // Chain bein requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Heights being requested repeated uint64 heights = 4; } +// AcceptedStateSummary is sent in response to GetAcceptedStateSummary message AcceptedStateSummary { + // Chain being responded from bytes chain_id = 1; + // Request id of the original GetAcceptedStateSummary request uint32 request_id = 2; + // State summary ids repeated bytes summary_ids = 3; } +// The consensus engine that should be used when handling a consensus request. enum EngineType { ENGINE_TYPE_UNSPECIFIED = 0; + // Only the X-Chain uses avalanche consensus ENGINE_TYPE_AVALANCHE = 1; ENGINE_TYPE_SNOWMAN = 2; } -// Message to request for the accepted frontier of the "remote" peer. -// For instance, the accepted frontier of X-chain DAG is the set of -// accepted vertices that do not have any accepted descendants (i.e., frontier). -// -// During bootstrap, the local node sends out "get_accepted_frontier" to validators -// (see "avalanchego/snow/engine/common/bootstrapper.Startup"). -// And the expected response is "accepted_frontier". +// GetAcceptedFrontier requests the accepted frontier from a peer. // -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// Peers should respond to GetAcceptedFrontier with AcceptedFrontier. message GetAcceptedFrontier { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Consensus type the remote peer should use to handle this message EngineType engine_type = 4; } -// Message that contains the list of accepted frontier in response to -// "get_accepted_frontier". For instance, on receiving "get_accepted_frontier", -// the X-chain engine responds with the accepted frontier of X-chain DAG. +// AcceptedFrontier contains the remote peer's last accepted frontier. // -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// AcceptedFrontier is sent in response to GetAcceptedFrontier. message AcceptedFrontier { reserved 4; // Until Cortina upgrade is activated - + // Chain being responded from bytes chain_id = 1; + // Request id of the original GetAcceptedFrontier request uint32 request_id = 2; - repeated bytes container_ids = 3; + // The id of the last accepted frontier + bytes container_id = 3; } -// Message to request for the accepted blocks/vertices of the "remote" peer. -// The local node sends out this message during bootstrap, following "get_accepted_frontier". -// Basically, sending the list of the accepted frontier and expects the response of -// the accepted IDs from the remote peer. +// GetAccepted sends a request with the sender's accepted frontier to a remote +// peer. // -// See "avalanchego/snow/engine/common/bootstrapper.Startup" and "sendGetAccepted". -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// Peers should respond to GetAccepted with an Accepted message. message GetAccepted { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this message uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // The sender's accepted frontier repeated bytes container_ids = 4; + // Consensus type to handle this message EngineType engine_type = 5; } -// Message that contains the list of accepted block/vertex IDs in response to -// "get_accepted". For instance, on receiving "get_accepted" that contains -// the sender's accepted frontier IDs, the X-chain engine responds only with -// the accepted vertex IDs of the X-chain DAG. -// -// See "snow/engine/avalanche#GetAccepted" and "SendAccepted". -// See "snow/engine/common/bootstrapper.go#Accepted". +// Accepted is sent in response to GetAccepted. The sending peer responds with +// a subset of container ids from the GetAccepted request that the sending peer +// has accepted. message Accepted { reserved 4; // Until Cortina upgrade is activated - + // Chain being responded from bytes chain_id = 1; + // Request id of the original GetAccepted request uint32 request_id = 2; + // Subset of container ids from the GetAccepted request that the sender has + // accepted repeated bytes container_ids = 3; } -// Message that requests for the ancestors (parents) of the specified container ID. -// The engine bootstrapper sends this message to fetch all accepted containers -// in its transitive path. +// GetAncestors requests the ancestors for a given container. // -// On receiving "get_ancestors", it responds with the ancestors' container bytes -// in "ancestors" message. +// The remote peer should respond with an Ancestors message. message GetAncestors { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Container for which ancestors are being requested bytes container_id = 4; + // Consensus type to handle this message EngineType engine_type = 5; } -// Message that contains the container bytes of the ancestors -// in response to "get_ancestors". +// Ancestors is sent in response to GetAncestors. // -// On receiving "ancestors", the engine parses the containers and queues them -// to be accepted once we've received the entire chain history. +// Ancestors contains a contiguous ancestry of containers for the requested +// container in order of increasing block height. message Ancestors { reserved 4; // Until Cortina upgrade is activated - + // Chain being responded from bytes chain_id = 1; + // Request id of the original GetAncestors request uint32 request_id = 2; + // Ancestry for the requested container repeated bytes containers = 3; } -// Message that requests for the container data. +// Get requests a container from a remote peer. // -// On receiving "get", the engine looks up the container from the storage. -// If the container is found, it sends out the container data in "put" message. +// Remote peers should respond with a Put message if they have the container. message Get { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Container being requested bytes container_id = 4; + // Consensus type to handle this message EngineType engine_type = 5; } -// Message that contains the container ID and its bytes in response to "get". -// -// On receiving "put", the engine parses the container and tries to issue it to consensus. +// Put is sent in response to Get with the requested block. message Put { + // Chain being responded from bytes chain_id = 1; + // Request id of the original Get request uint32 request_id = 2; + // Requested container bytes container = 3; + // Consensus type to handle this message EngineType engine_type = 4; } -// Message that contains a preferred container ID and its container bytes -// in order to query other peers for their preferences of the container. -// For example, when a new container is issued, the engine sends out -// "push_query" and "pull_query" queries to ask other peers their preferences. -// See "avalanchego/snow/engine/common#SendMixedQuery". +// PushQuery requests the preferences of a remote peer given a container. // -// On receiving the "push_query", the engine parses the incoming container -// and tries to issue the container and all of its parents to the consensus, -// and calls "pull_query" handler to send "chits" for voting. +// Remote peers should respond to a PushQuery with a Chits message message PushQuery { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Container being gossiped bytes container = 4; + // Consensus type to handle this message EngineType engine_type = 5; + // Requesting peer's last accepted height + uint64 requested_height = 6; } -// Message that contains a preferred container ID to query other peers -// for their preferences of the container. -// For example, when a new container is issued, the engine sends out -// "push_query" and "pull_query" queries to ask other peers their preferences. -// See "avalanchego/snow/engine/common#SendMixedQuery". +// PullQuery requests the preferences of a remote peer given a container id. +// +// Remote peers should respond to a PullQuery with a Chits message message PullQuery { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Container id being gossiped bytes container_id = 4; + // Consensus type to handle this message EngineType engine_type = 5; + // Requesting peer's last accepted height + uint64 requested_height = 6; } -// Message that contains the votes/preferences of the local node, -// in response to "push_query" or "pull_query" (e.g., preferred frontier). -// -// On receiving "chits", the engine issues those preferred containers of vertices/blocks -// to the consensus. If the received container is not found, it responds back with -// "get" message to fetch the missing container from the remote peer. +// Chits contains the preferences of a peer in response to a PushQuery or +// PullQuery message. message Chits { - reserved 5; // Until Cortina upgrade is activated - + // Chain being responded from bytes chain_id = 1; + // Request id of the original PushQuery/PullQuery request uint32 request_id = 2; - // Represents the current preferred frontier. - // TODO: Remove `repeated` once all chains are running Snowman. - repeated bytes preferred_container_ids = 3; - // Represents the current accepted frontier. - // TODO: Remove `repeated` once all chains are running Snowman. - repeated bytes accepted_container_ids = 4; + // Currently preferred block + bytes preferred_id = 3; + // Last accepted block + bytes accepted_id = 4; + // Currently preferred block at the requested height + bytes preferred_id_at_height = 5; } +// AppRequest is a VM-defined request. +// +// Remote peers must respond to AppRequest with a corresponding AppResponse or +// AppError message AppRequest { + // Chain being requested from bytes chain_id = 1; + // Unique identifier for this request uint32 request_id = 2; + // Timeout (ns) for this request uint64 deadline = 3; + // Request body bytes app_bytes = 4; } +// AppResponse is a VM-defined response sent in response to AppRequest message AppResponse { + // Chain being responded from bytes chain_id = 1; + // Request id of the original AppRequest uint32 request_id = 2; + // Response body bytes app_bytes = 3; } +// AppError is a VM-defined error sent in response to AppRequest +message AppError { + // Chain the message is for + bytes chain_id = 1; + // Request id of the original AppRequest + uint32 request_id = 2; + // VM defined error code. VMs may define error codes > 0. + sint32 error_code = 3; + // VM defined error message + string error_message = 4; +} + +// AppGossip is a VM-defined message message AppGossip { + // Chain the message is for bytes chain_id = 1; + // Message body bytes app_bytes = 2; } diff --git a/avalanchego/proto/pb/aliasreader/aliasreader.pb.go b/avalanchego/proto/pb/aliasreader/aliasreader.pb.go index 56886860..20084292 100644 --- a/avalanchego/proto/pb/aliasreader/aliasreader.pb.go +++ b/avalanchego/proto/pb/aliasreader/aliasreader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: aliasreader/aliasreader.proto diff --git a/avalanchego/proto/pb/aliasreader/aliasreader_grpc.pb.go b/avalanchego/proto/pb/aliasreader/aliasreader_grpc.pb.go index 030e6851..9c423873 100644 --- a/avalanchego/proto/pb/aliasreader/aliasreader_grpc.pb.go +++ b/avalanchego/proto/pb/aliasreader/aliasreader_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: aliasreader/aliasreader.proto @@ -18,6 +18,12 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + AliasReader_Lookup_FullMethodName = "/aliasreader.AliasReader/Lookup" + AliasReader_PrimaryAlias_FullMethodName = "/aliasreader.AliasReader/PrimaryAlias" + AliasReader_Aliases_FullMethodName = "/aliasreader.AliasReader/Aliases" +) + // AliasReaderClient is the client API for AliasReader service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -37,7 +43,7 @@ func NewAliasReaderClient(cc grpc.ClientConnInterface) AliasReaderClient { func (c *aliasReaderClient) Lookup(ctx context.Context, in *Alias, opts ...grpc.CallOption) (*ID, error) { out := new(ID) - err := c.cc.Invoke(ctx, "/aliasreader.AliasReader/Lookup", in, out, opts...) + err := c.cc.Invoke(ctx, AliasReader_Lookup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -46,7 +52,7 @@ func (c *aliasReaderClient) Lookup(ctx context.Context, in *Alias, opts ...grpc. func (c *aliasReaderClient) PrimaryAlias(ctx context.Context, in *ID, opts ...grpc.CallOption) (*Alias, error) { out := new(Alias) - err := c.cc.Invoke(ctx, "/aliasreader.AliasReader/PrimaryAlias", in, out, opts...) + err := c.cc.Invoke(ctx, AliasReader_PrimaryAlias_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -55,7 +61,7 @@ func (c *aliasReaderClient) PrimaryAlias(ctx context.Context, in *ID, opts ...gr func (c *aliasReaderClient) Aliases(ctx context.Context, in *ID, opts ...grpc.CallOption) (*AliasList, error) { out := new(AliasList) - err := c.cc.Invoke(ctx, "/aliasreader.AliasReader/Aliases", in, out, opts...) + err := c.cc.Invoke(ctx, AliasReader_Aliases_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -108,7 +114,7 @@ func _AliasReader_Lookup_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/aliasreader.AliasReader/Lookup", + FullMethod: AliasReader_Lookup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AliasReaderServer).Lookup(ctx, req.(*Alias)) @@ -126,7 +132,7 @@ func _AliasReader_PrimaryAlias_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/aliasreader.AliasReader/PrimaryAlias", + FullMethod: AliasReader_PrimaryAlias_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AliasReaderServer).PrimaryAlias(ctx, req.(*ID)) @@ -144,7 +150,7 @@ func _AliasReader_Aliases_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/aliasreader.AliasReader/Aliases", + FullMethod: AliasReader_Aliases_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AliasReaderServer).Aliases(ctx, req.(*ID)) diff --git a/avalanchego/proto/pb/appsender/appsender.pb.go b/avalanchego/proto/pb/appsender/appsender.pb.go index 609d30a1..416faab9 100644 --- a/avalanchego/proto/pb/appsender/appsender.pb.go +++ b/avalanchego/proto/pb/appsender/appsender.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: appsender/appsender.proto @@ -153,6 +153,81 @@ func (x *SendAppResponseMsg) GetResponse() []byte { return nil } +type SendAppErrorMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The node to send a response to + NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // ID of this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *SendAppErrorMsg) Reset() { + *x = SendAppErrorMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_appsender_appsender_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendAppErrorMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendAppErrorMsg) ProtoMessage() {} + +func (x *SendAppErrorMsg) ProtoReflect() protoreflect.Message { + mi := &file_appsender_appsender_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendAppErrorMsg.ProtoReflect.Descriptor instead. +func (*SendAppErrorMsg) Descriptor() ([]byte, []int) { + return file_appsender_appsender_proto_rawDescGZIP(), []int{2} +} + +func (x *SendAppErrorMsg) GetNodeId() []byte { + if x != nil { + return x.NodeId + } + return nil +} + +func (x *SendAppErrorMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *SendAppErrorMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *SendAppErrorMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type SendAppGossipMsg struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -165,7 +240,7 @@ type SendAppGossipMsg struct { func (x *SendAppGossipMsg) Reset() { *x = SendAppGossipMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[2] + mi := &file_appsender_appsender_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -178,7 +253,7 @@ func (x *SendAppGossipMsg) String() string { func (*SendAppGossipMsg) ProtoMessage() {} func (x *SendAppGossipMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[2] + mi := &file_appsender_appsender_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -191,7 +266,7 @@ func (x *SendAppGossipMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAppGossipMsg.ProtoReflect.Descriptor instead. func (*SendAppGossipMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{2} + return file_appsender_appsender_proto_rawDescGZIP(), []int{3} } func (x *SendAppGossipMsg) GetMsg() []byte { @@ -215,7 +290,7 @@ type SendAppGossipSpecificMsg struct { func (x *SendAppGossipSpecificMsg) Reset() { *x = SendAppGossipSpecificMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[3] + mi := &file_appsender_appsender_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -228,7 +303,7 @@ func (x *SendAppGossipSpecificMsg) String() string { func (*SendAppGossipSpecificMsg) ProtoMessage() {} func (x *SendAppGossipSpecificMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[3] + mi := &file_appsender_appsender_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -241,7 +316,7 @@ func (x *SendAppGossipSpecificMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendAppGossipSpecificMsg.ProtoReflect.Descriptor instead. func (*SendAppGossipSpecificMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{3} + return file_appsender_appsender_proto_rawDescGZIP(), []int{4} } func (x *SendAppGossipSpecificMsg) GetNodeIds() [][]byte { @@ -274,7 +349,7 @@ type SendCrossChainAppRequestMsg struct { func (x *SendCrossChainAppRequestMsg) Reset() { *x = SendCrossChainAppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[4] + mi := &file_appsender_appsender_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -287,7 +362,7 @@ func (x *SendCrossChainAppRequestMsg) String() string { func (*SendCrossChainAppRequestMsg) ProtoMessage() {} func (x *SendCrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[4] + mi := &file_appsender_appsender_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -300,7 +375,7 @@ func (x *SendCrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendCrossChainAppRequestMsg.ProtoReflect.Descriptor instead. func (*SendCrossChainAppRequestMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{4} + return file_appsender_appsender_proto_rawDescGZIP(), []int{5} } func (x *SendCrossChainAppRequestMsg) GetChainId() []byte { @@ -340,7 +415,7 @@ type SendCrossChainAppResponseMsg struct { func (x *SendCrossChainAppResponseMsg) Reset() { *x = SendCrossChainAppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[5] + mi := &file_appsender_appsender_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -353,7 +428,7 @@ func (x *SendCrossChainAppResponseMsg) String() string { func (*SendCrossChainAppResponseMsg) ProtoMessage() {} func (x *SendCrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[5] + mi := &file_appsender_appsender_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -366,7 +441,7 @@ func (x *SendCrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendCrossChainAppResponseMsg.ProtoReflect.Descriptor instead. func (*SendCrossChainAppResponseMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{5} + return file_appsender_appsender_proto_rawDescGZIP(), []int{6} } func (x *SendCrossChainAppResponseMsg) GetChainId() []byte { @@ -390,6 +465,81 @@ func (x *SendCrossChainAppResponseMsg) GetResponse() []byte { return nil } +type SendCrossChainAppErrorMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain to send a response to + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // ID of this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *SendCrossChainAppErrorMsg) Reset() { + *x = SendCrossChainAppErrorMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_appsender_appsender_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendCrossChainAppErrorMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendCrossChainAppErrorMsg) ProtoMessage() {} + +func (x *SendCrossChainAppErrorMsg) ProtoReflect() protoreflect.Message { + mi := &file_appsender_appsender_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendCrossChainAppErrorMsg.ProtoReflect.Descriptor instead. +func (*SendCrossChainAppErrorMsg) Descriptor() ([]byte, []int) { + return file_appsender_appsender_proto_rawDescGZIP(), []int{7} +} + +func (x *SendCrossChainAppErrorMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *SendCrossChainAppErrorMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *SendCrossChainAppErrorMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *SendCrossChainAppErrorMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + var File_appsender_appsender_proto protoreflect.FileDescriptor var file_appsender_appsender_proto_rawDesc = []byte{ @@ -410,7 +560,16 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x41, + 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, + 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x24, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x47, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, @@ -431,16 +590,30 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf3, - 0x03, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0e, - 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, - 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, - 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x8f, 0x05, 0x0a, 0x09, 0x41, + 0x70, 0x70, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x70, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x48, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, + 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, + 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x0c, 0x53, 0x65, + 0x6e, 0x64, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1a, 0x2e, 0x61, 0x70, 0x70, + 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, @@ -463,11 +636,16 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, - 0x2f, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, 0x0a, 0x16, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, + 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, + 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, + 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x34, 0x5a, 0x32, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, + 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -482,31 +660,37 @@ func file_appsender_appsender_proto_rawDescGZIP() []byte { return file_appsender_appsender_proto_rawDescData } -var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_appsender_appsender_proto_goTypes = []interface{}{ (*SendAppRequestMsg)(nil), // 0: appsender.SendAppRequestMsg (*SendAppResponseMsg)(nil), // 1: appsender.SendAppResponseMsg - (*SendAppGossipMsg)(nil), // 2: appsender.SendAppGossipMsg - (*SendAppGossipSpecificMsg)(nil), // 3: appsender.SendAppGossipSpecificMsg - (*SendCrossChainAppRequestMsg)(nil), // 4: appsender.SendCrossChainAppRequestMsg - (*SendCrossChainAppResponseMsg)(nil), // 5: appsender.SendCrossChainAppResponseMsg - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty + (*SendAppErrorMsg)(nil), // 2: appsender.SendAppErrorMsg + (*SendAppGossipMsg)(nil), // 3: appsender.SendAppGossipMsg + (*SendAppGossipSpecificMsg)(nil), // 4: appsender.SendAppGossipSpecificMsg + (*SendCrossChainAppRequestMsg)(nil), // 5: appsender.SendCrossChainAppRequestMsg + (*SendCrossChainAppResponseMsg)(nil), // 6: appsender.SendCrossChainAppResponseMsg + (*SendCrossChainAppErrorMsg)(nil), // 7: appsender.SendCrossChainAppErrorMsg + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty } var file_appsender_appsender_proto_depIdxs = []int32{ 0, // 0: appsender.AppSender.SendAppRequest:input_type -> appsender.SendAppRequestMsg 1, // 1: appsender.AppSender.SendAppResponse:input_type -> appsender.SendAppResponseMsg - 2, // 2: appsender.AppSender.SendAppGossip:input_type -> appsender.SendAppGossipMsg - 3, // 3: appsender.AppSender.SendAppGossipSpecific:input_type -> appsender.SendAppGossipSpecificMsg - 4, // 4: appsender.AppSender.SendCrossChainAppRequest:input_type -> appsender.SendCrossChainAppRequestMsg - 5, // 5: appsender.AppSender.SendCrossChainAppResponse:input_type -> appsender.SendCrossChainAppResponseMsg - 6, // 6: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty - 6, // 7: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty - 6, // 8: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty - 6, // 9: appsender.AppSender.SendAppGossipSpecific:output_type -> google.protobuf.Empty - 6, // 10: appsender.AppSender.SendCrossChainAppRequest:output_type -> google.protobuf.Empty - 6, // 11: appsender.AppSender.SendCrossChainAppResponse:output_type -> google.protobuf.Empty - 6, // [6:12] is the sub-list for method output_type - 0, // [0:6] is the sub-list for method input_type + 2, // 2: appsender.AppSender.SendAppError:input_type -> appsender.SendAppErrorMsg + 3, // 3: appsender.AppSender.SendAppGossip:input_type -> appsender.SendAppGossipMsg + 4, // 4: appsender.AppSender.SendAppGossipSpecific:input_type -> appsender.SendAppGossipSpecificMsg + 5, // 5: appsender.AppSender.SendCrossChainAppRequest:input_type -> appsender.SendCrossChainAppRequestMsg + 6, // 6: appsender.AppSender.SendCrossChainAppResponse:input_type -> appsender.SendCrossChainAppResponseMsg + 7, // 7: appsender.AppSender.SendCrossChainAppError:input_type -> appsender.SendCrossChainAppErrorMsg + 8, // 8: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty + 8, // 9: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty + 8, // 10: appsender.AppSender.SendAppError:output_type -> google.protobuf.Empty + 8, // 11: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty + 8, // 12: appsender.AppSender.SendAppGossipSpecific:output_type -> google.protobuf.Empty + 8, // 13: appsender.AppSender.SendCrossChainAppRequest:output_type -> google.protobuf.Empty + 8, // 14: appsender.AppSender.SendCrossChainAppResponse:output_type -> google.protobuf.Empty + 8, // 15: appsender.AppSender.SendCrossChainAppError:output_type -> google.protobuf.Empty + 8, // [8:16] is the sub-list for method output_type + 0, // [0:8] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -543,7 +727,7 @@ func file_appsender_appsender_proto_init() { } } file_appsender_appsender_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAppGossipMsg); i { + switch v := v.(*SendAppErrorMsg); i { case 0: return &v.state case 1: @@ -555,7 +739,7 @@ func file_appsender_appsender_proto_init() { } } file_appsender_appsender_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAppGossipSpecificMsg); i { + switch v := v.(*SendAppGossipMsg); i { case 0: return &v.state case 1: @@ -567,7 +751,7 @@ func file_appsender_appsender_proto_init() { } } file_appsender_appsender_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendCrossChainAppRequestMsg); i { + switch v := v.(*SendAppGossipSpecificMsg); i { case 0: return &v.state case 1: @@ -579,6 +763,18 @@ func file_appsender_appsender_proto_init() { } } file_appsender_appsender_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendCrossChainAppRequestMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_appsender_appsender_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SendCrossChainAppResponseMsg); i { case 0: return &v.state @@ -590,6 +786,18 @@ func file_appsender_appsender_proto_init() { return nil } } + file_appsender_appsender_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendCrossChainAppErrorMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -597,7 +805,7 @@ func file_appsender_appsender_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_appsender_appsender_proto_rawDesc, NumEnums: 0, - NumMessages: 6, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/avalanchego/proto/pb/appsender/appsender_grpc.pb.go b/avalanchego/proto/pb/appsender/appsender_grpc.pb.go index 4ef890e6..6873c776 100644 --- a/avalanchego/proto/pb/appsender/appsender_grpc.pb.go +++ b/avalanchego/proto/pb/appsender/appsender_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: appsender/appsender.proto @@ -19,16 +19,29 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + AppSender_SendAppRequest_FullMethodName = "/appsender.AppSender/SendAppRequest" + AppSender_SendAppResponse_FullMethodName = "/appsender.AppSender/SendAppResponse" + AppSender_SendAppError_FullMethodName = "/appsender.AppSender/SendAppError" + AppSender_SendAppGossip_FullMethodName = "/appsender.AppSender/SendAppGossip" + AppSender_SendAppGossipSpecific_FullMethodName = "/appsender.AppSender/SendAppGossipSpecific" + AppSender_SendCrossChainAppRequest_FullMethodName = "/appsender.AppSender/SendCrossChainAppRequest" + AppSender_SendCrossChainAppResponse_FullMethodName = "/appsender.AppSender/SendCrossChainAppResponse" + AppSender_SendCrossChainAppError_FullMethodName = "/appsender.AppSender/SendCrossChainAppError" +) + // AppSenderClient is the client API for AppSender service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type AppSenderClient interface { SendAppRequest(ctx context.Context, in *SendAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppResponse(ctx context.Context, in *SendAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendAppError(ctx context.Context, in *SendAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppGossip(ctx context.Context, in *SendAppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppGossipSpecific(ctx context.Context, in *SendAppGossipSpecificMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendCrossChainAppResponse(ctx context.Context, in *SendCrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendCrossChainAppError(ctx context.Context, in *SendCrossChainAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) } type appSenderClient struct { @@ -41,7 +54,7 @@ func NewAppSenderClient(cc grpc.ClientConnInterface) AppSenderClient { func (c *appSenderClient) SendAppRequest(ctx context.Context, in *SendAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendAppRequest", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendAppRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -50,7 +63,16 @@ func (c *appSenderClient) SendAppRequest(ctx context.Context, in *SendAppRequest func (c *appSenderClient) SendAppResponse(ctx context.Context, in *SendAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendAppResponse", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendAppResponse_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *appSenderClient) SendAppError(ctx context.Context, in *SendAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, AppSender_SendAppError_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -59,7 +81,7 @@ func (c *appSenderClient) SendAppResponse(ctx context.Context, in *SendAppRespon func (c *appSenderClient) SendAppGossip(ctx context.Context, in *SendAppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendAppGossip", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendAppGossip_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -68,7 +90,7 @@ func (c *appSenderClient) SendAppGossip(ctx context.Context, in *SendAppGossipMs func (c *appSenderClient) SendAppGossipSpecific(ctx context.Context, in *SendAppGossipSpecificMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendAppGossipSpecific", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendAppGossipSpecific_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -77,7 +99,7 @@ func (c *appSenderClient) SendAppGossipSpecific(ctx context.Context, in *SendApp func (c *appSenderClient) SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendCrossChainAppRequest", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendCrossChainAppRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -86,7 +108,16 @@ func (c *appSenderClient) SendCrossChainAppRequest(ctx context.Context, in *Send func (c *appSenderClient) SendCrossChainAppResponse(ctx context.Context, in *SendCrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/appsender.AppSender/SendCrossChainAppResponse", in, out, opts...) + err := c.cc.Invoke(ctx, AppSender_SendCrossChainAppResponse_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *appSenderClient) SendCrossChainAppError(ctx context.Context, in *SendCrossChainAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, AppSender_SendCrossChainAppError_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -99,10 +130,12 @@ func (c *appSenderClient) SendCrossChainAppResponse(ctx context.Context, in *Sen type AppSenderServer interface { SendAppRequest(context.Context, *SendAppRequestMsg) (*emptypb.Empty, error) SendAppResponse(context.Context, *SendAppResponseMsg) (*emptypb.Empty, error) + SendAppError(context.Context, *SendAppErrorMsg) (*emptypb.Empty, error) SendAppGossip(context.Context, *SendAppGossipMsg) (*emptypb.Empty, error) SendAppGossipSpecific(context.Context, *SendAppGossipSpecificMsg) (*emptypb.Empty, error) SendCrossChainAppRequest(context.Context, *SendCrossChainAppRequestMsg) (*emptypb.Empty, error) SendCrossChainAppResponse(context.Context, *SendCrossChainAppResponseMsg) (*emptypb.Empty, error) + SendCrossChainAppError(context.Context, *SendCrossChainAppErrorMsg) (*emptypb.Empty, error) mustEmbedUnimplementedAppSenderServer() } @@ -116,6 +149,9 @@ func (UnimplementedAppSenderServer) SendAppRequest(context.Context, *SendAppRequ func (UnimplementedAppSenderServer) SendAppResponse(context.Context, *SendAppResponseMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendAppResponse not implemented") } +func (UnimplementedAppSenderServer) SendAppError(context.Context, *SendAppErrorMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendAppError not implemented") +} func (UnimplementedAppSenderServer) SendAppGossip(context.Context, *SendAppGossipMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendAppGossip not implemented") } @@ -128,6 +164,9 @@ func (UnimplementedAppSenderServer) SendCrossChainAppRequest(context.Context, *S func (UnimplementedAppSenderServer) SendCrossChainAppResponse(context.Context, *SendCrossChainAppResponseMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendCrossChainAppResponse not implemented") } +func (UnimplementedAppSenderServer) SendCrossChainAppError(context.Context, *SendCrossChainAppErrorMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCrossChainAppError not implemented") +} func (UnimplementedAppSenderServer) mustEmbedUnimplementedAppSenderServer() {} // UnsafeAppSenderServer may be embedded to opt out of forward compatibility for this service. @@ -151,7 +190,7 @@ func _AppSender_SendAppRequest_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendAppRequest", + FullMethod: AppSender_SendAppRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendAppRequest(ctx, req.(*SendAppRequestMsg)) @@ -169,7 +208,7 @@ func _AppSender_SendAppResponse_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendAppResponse", + FullMethod: AppSender_SendAppResponse_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendAppResponse(ctx, req.(*SendAppResponseMsg)) @@ -177,6 +216,24 @@ func _AppSender_SendAppResponse_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _AppSender_SendAppError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendAppErrorMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AppSenderServer).SendAppError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AppSender_SendAppError_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AppSenderServer).SendAppError(ctx, req.(*SendAppErrorMsg)) + } + return interceptor(ctx, in, info, handler) +} + func _AppSender_SendAppGossip_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SendAppGossipMsg) if err := dec(in); err != nil { @@ -187,7 +244,7 @@ func _AppSender_SendAppGossip_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendAppGossip", + FullMethod: AppSender_SendAppGossip_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendAppGossip(ctx, req.(*SendAppGossipMsg)) @@ -205,7 +262,7 @@ func _AppSender_SendAppGossipSpecific_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendAppGossipSpecific", + FullMethod: AppSender_SendAppGossipSpecific_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendAppGossipSpecific(ctx, req.(*SendAppGossipSpecificMsg)) @@ -223,7 +280,7 @@ func _AppSender_SendCrossChainAppRequest_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendCrossChainAppRequest", + FullMethod: AppSender_SendCrossChainAppRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendCrossChainAppRequest(ctx, req.(*SendCrossChainAppRequestMsg)) @@ -241,7 +298,7 @@ func _AppSender_SendCrossChainAppResponse_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/appsender.AppSender/SendCrossChainAppResponse", + FullMethod: AppSender_SendCrossChainAppResponse_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(AppSenderServer).SendCrossChainAppResponse(ctx, req.(*SendCrossChainAppResponseMsg)) @@ -249,6 +306,24 @@ func _AppSender_SendCrossChainAppResponse_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _AppSender_SendCrossChainAppError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendCrossChainAppErrorMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AppSenderServer).SendCrossChainAppError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AppSender_SendCrossChainAppError_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AppSenderServer).SendCrossChainAppError(ctx, req.(*SendCrossChainAppErrorMsg)) + } + return interceptor(ctx, in, info, handler) +} + // AppSender_ServiceDesc is the grpc.ServiceDesc for AppSender service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -264,6 +339,10 @@ var AppSender_ServiceDesc = grpc.ServiceDesc{ MethodName: "SendAppResponse", Handler: _AppSender_SendAppResponse_Handler, }, + { + MethodName: "SendAppError", + Handler: _AppSender_SendAppError_Handler, + }, { MethodName: "SendAppGossip", Handler: _AppSender_SendAppGossip_Handler, @@ -280,6 +359,10 @@ var AppSender_ServiceDesc = grpc.ServiceDesc{ MethodName: "SendCrossChainAppResponse", Handler: _AppSender_SendCrossChainAppResponse_Handler, }, + { + MethodName: "SendCrossChainAppError", + Handler: _AppSender_SendCrossChainAppError_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "appsender/appsender.proto", diff --git a/avalanchego/proto/pb/http/http.pb.go b/avalanchego/proto/pb/http/http.pb.go index fca44132..76b6d916 100644 --- a/avalanchego/proto/pb/http/http.pb.go +++ b/avalanchego/proto/pb/http/http.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: http/http.proto diff --git a/avalanchego/proto/pb/http/http_grpc.pb.go b/avalanchego/proto/pb/http/http_grpc.pb.go index bdea7061..f83ec6c1 100644 --- a/avalanchego/proto/pb/http/http_grpc.pb.go +++ b/avalanchego/proto/pb/http/http_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: http/http.proto @@ -19,6 +19,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + HTTP_Handle_FullMethodName = "/http.HTTP/Handle" + HTTP_HandleSimple_FullMethodName = "/http.HTTP/HandleSimple" +) + // HTTPClient is the client API for HTTP service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -43,7 +48,7 @@ func NewHTTPClient(cc grpc.ClientConnInterface) HTTPClient { func (c *hTTPClient) Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/http.HTTP/Handle", in, out, opts...) + err := c.cc.Invoke(ctx, HTTP_Handle_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -52,7 +57,7 @@ func (c *hTTPClient) Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.C func (c *hTTPClient) HandleSimple(ctx context.Context, in *HandleSimpleHTTPRequest, opts ...grpc.CallOption) (*HandleSimpleHTTPResponse, error) { out := new(HandleSimpleHTTPResponse) - err := c.cc.Invoke(ctx, "/http.HTTP/HandleSimple", in, out, opts...) + err := c.cc.Invoke(ctx, HTTP_HandleSimple_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -107,7 +112,7 @@ func _HTTP_Handle_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.HTTP/Handle", + FullMethod: HTTP_Handle_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HTTPServer).Handle(ctx, req.(*HTTPRequest)) @@ -125,7 +130,7 @@ func _HTTP_HandleSimple_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.HTTP/HandleSimple", + FullMethod: HTTP_HandleSimple_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HTTPServer).HandleSimple(ctx, req.(*HandleSimpleHTTPRequest)) diff --git a/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go b/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go index 348a1086..a49113f3 100644 --- a/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go +++ b/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: http/responsewriter/responsewriter.proto diff --git a/avalanchego/proto/pb/http/responsewriter/responsewriter_grpc.pb.go b/avalanchego/proto/pb/http/responsewriter/responsewriter_grpc.pb.go index 8e3f438c..75177073 100644 --- a/avalanchego/proto/pb/http/responsewriter/responsewriter_grpc.pb.go +++ b/avalanchego/proto/pb/http/responsewriter/responsewriter_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: http/responsewriter/responsewriter.proto @@ -19,6 +19,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Writer_Write_FullMethodName = "/http.responsewriter.Writer/Write" + Writer_WriteHeader_FullMethodName = "/http.responsewriter.Writer/WriteHeader" + Writer_Flush_FullMethodName = "/http.responsewriter.Writer/Flush" + Writer_Hijack_FullMethodName = "/http.responsewriter.Writer/Hijack" +) + // WriterClient is the client API for Writer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -44,7 +51,7 @@ func NewWriterClient(cc grpc.ClientConnInterface) WriterClient { func (c *writerClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/http.responsewriter.Writer/Write", in, out, opts...) + err := c.cc.Invoke(ctx, Writer_Write_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -53,7 +60,7 @@ func (c *writerClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc func (c *writerClient) WriteHeader(ctx context.Context, in *WriteHeaderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/http.responsewriter.Writer/WriteHeader", in, out, opts...) + err := c.cc.Invoke(ctx, Writer_WriteHeader_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -62,7 +69,7 @@ func (c *writerClient) WriteHeader(ctx context.Context, in *WriteHeaderRequest, func (c *writerClient) Flush(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/http.responsewriter.Writer/Flush", in, out, opts...) + err := c.cc.Invoke(ctx, Writer_Flush_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -71,7 +78,7 @@ func (c *writerClient) Flush(ctx context.Context, in *emptypb.Empty, opts ...grp func (c *writerClient) Hijack(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HijackResponse, error) { out := new(HijackResponse) - err := c.cc.Invoke(ctx, "/http.responsewriter.Writer/Hijack", in, out, opts...) + err := c.cc.Invoke(ctx, Writer_Hijack_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -133,7 +140,7 @@ func _Writer_Write_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.responsewriter.Writer/Write", + FullMethod: Writer_Write_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WriterServer).Write(ctx, req.(*WriteRequest)) @@ -151,7 +158,7 @@ func _Writer_WriteHeader_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.responsewriter.Writer/WriteHeader", + FullMethod: Writer_WriteHeader_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WriterServer).WriteHeader(ctx, req.(*WriteHeaderRequest)) @@ -169,7 +176,7 @@ func _Writer_Flush_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.responsewriter.Writer/Flush", + FullMethod: Writer_Flush_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WriterServer).Flush(ctx, req.(*emptypb.Empty)) @@ -187,7 +194,7 @@ func _Writer_Hijack_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/http.responsewriter.Writer/Hijack", + FullMethod: Writer_Hijack_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WriterServer).Hijack(ctx, req.(*emptypb.Empty)) diff --git a/avalanchego/proto/pb/io/reader/reader.pb.go b/avalanchego/proto/pb/io/reader/reader.pb.go index fc1d0d12..34977aa3 100644 --- a/avalanchego/proto/pb/io/reader/reader.pb.go +++ b/avalanchego/proto/pb/io/reader/reader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: io/reader/reader.proto diff --git a/avalanchego/proto/pb/io/reader/reader_grpc.pb.go b/avalanchego/proto/pb/io/reader/reader_grpc.pb.go index 55e3e2db..d3cf9730 100644 --- a/avalanchego/proto/pb/io/reader/reader_grpc.pb.go +++ b/avalanchego/proto/pb/io/reader/reader_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: io/reader/reader.proto @@ -18,6 +18,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Reader_Read_FullMethodName = "/io.reader.Reader/Read" +) + // ReaderClient is the client API for Reader service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +39,7 @@ func NewReaderClient(cc grpc.ClientConnInterface) ReaderClient { func (c *readerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { out := new(ReadResponse) - err := c.cc.Invoke(ctx, "/io.reader.Reader/Read", in, out, opts...) + err := c.cc.Invoke(ctx, Reader_Read_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +84,7 @@ func _Reader_Read_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/io.reader.Reader/Read", + FullMethod: Reader_Read_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReaderServer).Read(ctx, req.(*ReadRequest)) diff --git a/avalanchego/proto/pb/io/writer/writer.pb.go b/avalanchego/proto/pb/io/writer/writer.pb.go index afd1092f..850afaa8 100644 --- a/avalanchego/proto/pb/io/writer/writer.pb.go +++ b/avalanchego/proto/pb/io/writer/writer.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: io/writer/writer.proto diff --git a/avalanchego/proto/pb/io/writer/writer_grpc.pb.go b/avalanchego/proto/pb/io/writer/writer_grpc.pb.go index f61c741f..f1276425 100644 --- a/avalanchego/proto/pb/io/writer/writer_grpc.pb.go +++ b/avalanchego/proto/pb/io/writer/writer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: io/writer/writer.proto @@ -18,6 +18,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Writer_Write_FullMethodName = "/io.writer.Writer/Write" +) + // WriterClient is the client API for Writer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -36,7 +40,7 @@ func NewWriterClient(cc grpc.ClientConnInterface) WriterClient { func (c *writerClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/io.writer.Writer/Write", in, out, opts...) + err := c.cc.Invoke(ctx, Writer_Write_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -82,7 +86,7 @@ func _Writer_Write_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/io.writer.Writer/Write", + FullMethod: Writer_Write_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WriterServer).Write(ctx, req.(*WriteRequest)) diff --git a/avalanchego/proto/pb/keystore/keystore.pb.go b/avalanchego/proto/pb/keystore/keystore.pb.go index d9d75e73..ed9d3813 100644 --- a/avalanchego/proto/pb/keystore/keystore.pb.go +++ b/avalanchego/proto/pb/keystore/keystore.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: keystore/keystore.proto diff --git a/avalanchego/proto/pb/keystore/keystore_grpc.pb.go b/avalanchego/proto/pb/keystore/keystore_grpc.pb.go index a44bddf2..728bf23c 100644 --- a/avalanchego/proto/pb/keystore/keystore_grpc.pb.go +++ b/avalanchego/proto/pb/keystore/keystore_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: keystore/keystore.proto @@ -18,6 +18,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Keystore_GetDatabase_FullMethodName = "/keystore.Keystore/GetDatabase" +) + // KeystoreClient is the client API for Keystore service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +39,7 @@ func NewKeystoreClient(cc grpc.ClientConnInterface) KeystoreClient { func (c *keystoreClient) GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*GetDatabaseResponse, error) { out := new(GetDatabaseResponse) - err := c.cc.Invoke(ctx, "/keystore.Keystore/GetDatabase", in, out, opts...) + err := c.cc.Invoke(ctx, Keystore_GetDatabase_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +84,7 @@ func _Keystore_GetDatabase_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/keystore.Keystore/GetDatabase", + FullMethod: Keystore_GetDatabase_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(KeystoreServer).GetDatabase(ctx, req.(*GetDatabaseRequest)) diff --git a/avalanchego/proto/pb/message/tx.pb.go b/avalanchego/proto/pb/message/tx.pb.go new file mode 100644 index 00000000..4320fdc0 --- /dev/null +++ b/avalanchego/proto/pb/message/tx.pb.go @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: message/tx.proto + +package message + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Message: + // + // *Message_Tx + Message isMessage_Message `protobuf_oneof:"message"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_message_tx_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_message_tx_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_message_tx_proto_rawDescGZIP(), []int{0} +} + +func (m *Message) GetMessage() isMessage_Message { + if m != nil { + return m.Message + } + return nil +} + +func (x *Message) GetTx() *Tx { + if x, ok := x.GetMessage().(*Message_Tx); ok { + return x.Tx + } + return nil +} + +type isMessage_Message interface { + isMessage_Message() +} + +type Message_Tx struct { + Tx *Tx `protobuf:"bytes,1,opt,name=tx,proto3,oneof"` +} + +func (*Message_Tx) isMessage_Message() {} + +type Tx struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of this transaction. + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` +} + +func (x *Tx) Reset() { + *x = Tx{} + if protoimpl.UnsafeEnabled { + mi := &file_message_tx_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tx) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tx) ProtoMessage() {} + +func (x *Tx) ProtoReflect() protoreflect.Message { + mi := &file_message_tx_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tx.ProtoReflect.Descriptor instead. +func (*Tx) Descriptor() ([]byte, []int) { + return file_message_tx_proto_rawDescGZIP(), []int{1} +} + +func (x *Tx) GetTx() []byte { + if x != nil { + return x.Tx + } + return nil +} + +var File_message_tx_proto protoreflect.FileDescriptor + +var file_message_tx_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x74, 0x78, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x33, 0x0a, 0x07, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x78, 0x48, + 0x00, 0x52, 0x02, 0x74, 0x78, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x14, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x74, 0x78, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_message_tx_proto_rawDescOnce sync.Once + file_message_tx_proto_rawDescData = file_message_tx_proto_rawDesc +) + +func file_message_tx_proto_rawDescGZIP() []byte { + file_message_tx_proto_rawDescOnce.Do(func() { + file_message_tx_proto_rawDescData = protoimpl.X.CompressGZIP(file_message_tx_proto_rawDescData) + }) + return file_message_tx_proto_rawDescData +} + +var file_message_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_message_tx_proto_goTypes = []interface{}{ + (*Message)(nil), // 0: message.Message + (*Tx)(nil), // 1: message.Tx +} +var file_message_tx_proto_depIdxs = []int32{ + 1, // 0: message.Message.tx:type_name -> message.Tx + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_message_tx_proto_init() } +func file_message_tx_proto_init() { + if File_message_tx_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_message_tx_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_message_tx_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tx); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_message_tx_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Message_Tx)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_message_tx_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_message_tx_proto_goTypes, + DependencyIndexes: file_message_tx_proto_depIdxs, + MessageInfos: file_message_tx_proto_msgTypes, + }.Build() + File_message_tx_proto = out.File + file_message_tx_proto_rawDesc = nil + file_message_tx_proto_goTypes = nil + file_message_tx_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/messenger/messenger.pb.go b/avalanchego/proto/pb/messenger/messenger.pb.go index 15c43300..830de252 100644 --- a/avalanchego/proto/pb/messenger/messenger.pb.go +++ b/avalanchego/proto/pb/messenger/messenger.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: messenger/messenger.proto diff --git a/avalanchego/proto/pb/messenger/messenger_grpc.pb.go b/avalanchego/proto/pb/messenger/messenger_grpc.pb.go index b7073a03..d03d1819 100644 --- a/avalanchego/proto/pb/messenger/messenger_grpc.pb.go +++ b/avalanchego/proto/pb/messenger/messenger_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: messenger/messenger.proto @@ -18,6 +18,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Messenger_Notify_FullMethodName = "/messenger.Messenger/Notify" +) + // MessengerClient is the client API for Messenger service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +39,7 @@ func NewMessengerClient(cc grpc.ClientConnInterface) MessengerClient { func (c *messengerClient) Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (*NotifyResponse, error) { out := new(NotifyResponse) - err := c.cc.Invoke(ctx, "/messenger.Messenger/Notify", in, out, opts...) + err := c.cc.Invoke(ctx, Messenger_Notify_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +84,7 @@ func _Messenger_Notify_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/messenger.Messenger/Notify", + FullMethod: Messenger_Notify_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MessengerServer).Notify(ctx, req.(*NotifyRequest)) diff --git a/avalanchego/proto/pb/net/conn/conn.pb.go b/avalanchego/proto/pb/net/conn/conn.pb.go index b40c1608..8882e8de 100644 --- a/avalanchego/proto/pb/net/conn/conn.pb.go +++ b/avalanchego/proto/pb/net/conn/conn.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: net/conn/conn.proto diff --git a/avalanchego/proto/pb/net/conn/conn_grpc.pb.go b/avalanchego/proto/pb/net/conn/conn_grpc.pb.go index 3239ef83..d640fb07 100644 --- a/avalanchego/proto/pb/net/conn/conn_grpc.pb.go +++ b/avalanchego/proto/pb/net/conn/conn_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: net/conn/conn.proto @@ -19,6 +19,15 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Conn_Read_FullMethodName = "/net.conn.Conn/Read" + Conn_Write_FullMethodName = "/net.conn.Conn/Write" + Conn_Close_FullMethodName = "/net.conn.Conn/Close" + Conn_SetDeadline_FullMethodName = "/net.conn.Conn/SetDeadline" + Conn_SetReadDeadline_FullMethodName = "/net.conn.Conn/SetReadDeadline" + Conn_SetWriteDeadline_FullMethodName = "/net.conn.Conn/SetWriteDeadline" +) + // ConnClient is the client API for Conn service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -50,7 +59,7 @@ func NewConnClient(cc grpc.ClientConnInterface) ConnClient { func (c *connClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { out := new(ReadResponse) - err := c.cc.Invoke(ctx, "/net.conn.Conn/Read", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_Read_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -59,7 +68,7 @@ func (c *connClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.Cal func (c *connClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { out := new(WriteResponse) - err := c.cc.Invoke(ctx, "/net.conn.Conn/Write", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_Write_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -68,7 +77,7 @@ func (c *connClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.C func (c *connClient) Close(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/net.conn.Conn/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_Close_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -77,7 +86,7 @@ func (c *connClient) Close(ctx context.Context, in *emptypb.Empty, opts ...grpc. func (c *connClient) SetDeadline(ctx context.Context, in *SetDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/net.conn.Conn/SetDeadline", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_SetDeadline_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -86,7 +95,7 @@ func (c *connClient) SetDeadline(ctx context.Context, in *SetDeadlineRequest, op func (c *connClient) SetReadDeadline(ctx context.Context, in *SetDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/net.conn.Conn/SetReadDeadline", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_SetReadDeadline_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -95,7 +104,7 @@ func (c *connClient) SetReadDeadline(ctx context.Context, in *SetDeadlineRequest func (c *connClient) SetWriteDeadline(ctx context.Context, in *SetDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/net.conn.Conn/SetWriteDeadline", in, out, opts...) + err := c.cc.Invoke(ctx, Conn_SetWriteDeadline_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -169,7 +178,7 @@ func _Conn_Read_Handler(srv interface{}, ctx context.Context, dec func(interface } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/Read", + FullMethod: Conn_Read_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).Read(ctx, req.(*ReadRequest)) @@ -187,7 +196,7 @@ func _Conn_Write_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/Write", + FullMethod: Conn_Write_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).Write(ctx, req.(*WriteRequest)) @@ -205,7 +214,7 @@ func _Conn_Close_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/Close", + FullMethod: Conn_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).Close(ctx, req.(*emptypb.Empty)) @@ -223,7 +232,7 @@ func _Conn_SetDeadline_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/SetDeadline", + FullMethod: Conn_SetDeadline_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).SetDeadline(ctx, req.(*SetDeadlineRequest)) @@ -241,7 +250,7 @@ func _Conn_SetReadDeadline_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/SetReadDeadline", + FullMethod: Conn_SetReadDeadline_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).SetReadDeadline(ctx, req.(*SetDeadlineRequest)) @@ -259,7 +268,7 @@ func _Conn_SetWriteDeadline_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/net.conn.Conn/SetWriteDeadline", + FullMethod: Conn_SetWriteDeadline_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ConnServer).SetWriteDeadline(ctx, req.(*SetDeadlineRequest)) diff --git a/avalanchego/proto/pb/p2p/p2p.pb.go b/avalanchego/proto/pb/p2p/p2p.pb.go index ff5127f4..18ef744e 100644 --- a/avalanchego/proto/pb/p2p/p2p.pb.go +++ b/avalanchego/proto/pb/p2p/p2p.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: p2p/p2p.proto @@ -20,12 +20,14 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The consensus engine that should be used when handling a consensus request. type EngineType int32 const ( EngineType_ENGINE_TYPE_UNSPECIFIED EngineType = 0 - EngineType_ENGINE_TYPE_AVALANCHE EngineType = 1 - EngineType_ENGINE_TYPE_SNOWMAN EngineType = 2 + // Only the X-Chain uses avalanche consensus + EngineType_ENGINE_TYPE_AVALANCHE EngineType = 1 + EngineType_ENGINE_TYPE_SNOWMAN EngineType = 2 ) // Enum value maps for EngineType. @@ -86,8 +88,9 @@ type Message struct { // *Message_CompressedZstd // *Message_Ping // *Message_Pong - // *Message_Version - // *Message_PeerList + // *Message_Handshake + // *Message_GetPeerList + // *Message_PeerList_ // *Message_GetStateSummaryFrontier // *Message_StateSummaryFrontier_ // *Message_GetAcceptedStateSummary @@ -106,7 +109,7 @@ type Message struct { // *Message_AppRequest // *Message_AppResponse // *Message_AppGossip - // *Message_PeerListAck + // *Message_AppError Message isMessage_Message `protobuf_oneof:"message"` } @@ -177,16 +180,23 @@ func (x *Message) GetPong() *Pong { return nil } -func (x *Message) GetVersion() *Version { - if x, ok := x.GetMessage().(*Message_Version); ok { - return x.Version +func (x *Message) GetHandshake() *Handshake { + if x, ok := x.GetMessage().(*Message_Handshake); ok { + return x.Handshake } return nil } -func (x *Message) GetPeerList() *PeerList { - if x, ok := x.GetMessage().(*Message_PeerList); ok { - return x.PeerList +func (x *Message) GetGetPeerList() *GetPeerList { + if x, ok := x.GetMessage().(*Message_GetPeerList); ok { + return x.GetPeerList + } + return nil +} + +func (x *Message) GetPeerList_() *PeerList { + if x, ok := x.GetMessage().(*Message_PeerList_); ok { + return x.PeerList_ } return nil } @@ -317,9 +327,9 @@ func (x *Message) GetAppGossip() *AppGossip { return nil } -func (x *Message) GetPeerListAck() *PeerListAck { - if x, ok := x.GetMessage().(*Message_PeerListAck); ok { - return x.PeerListAck +func (x *Message) GetAppError() *AppError { + if x, ok := x.GetMessage().(*Message_AppError); ok { + return x.AppError } return nil } @@ -351,12 +361,16 @@ type Message_Pong struct { Pong *Pong `protobuf:"bytes,12,opt,name=pong,proto3,oneof"` } -type Message_Version struct { - Version *Version `protobuf:"bytes,13,opt,name=version,proto3,oneof"` +type Message_Handshake struct { + Handshake *Handshake `protobuf:"bytes,13,opt,name=handshake,proto3,oneof"` } -type Message_PeerList struct { - PeerList *PeerList `protobuf:"bytes,14,opt,name=peer_list,json=peerList,proto3,oneof"` +type Message_GetPeerList struct { + GetPeerList *GetPeerList `protobuf:"bytes,35,opt,name=get_peer_list,json=getPeerList,proto3,oneof"` +} + +type Message_PeerList_ struct { + PeerList_ *PeerList `protobuf:"bytes,14,opt,name=peer_list,json=peerList,proto3,oneof"` } type Message_GetStateSummaryFrontier struct { @@ -435,8 +449,8 @@ type Message_AppGossip struct { AppGossip *AppGossip `protobuf:"bytes,32,opt,name=app_gossip,json=appGossip,proto3,oneof"` } -type Message_PeerListAck struct { - PeerListAck *PeerListAck `protobuf:"bytes,33,opt,name=peer_list_ack,json=peerListAck,proto3,oneof"` +type Message_AppError struct { + AppError *AppError `protobuf:"bytes,34,opt,name=app_error,json=appError,proto3,oneof"` } func (*Message_CompressedGzip) isMessage_Message() {} @@ -447,9 +461,11 @@ func (*Message_Ping) isMessage_Message() {} func (*Message_Pong) isMessage_Message() {} -func (*Message_Version) isMessage_Message() {} +func (*Message_Handshake) isMessage_Message() {} -func (*Message_PeerList) isMessage_Message() {} +func (*Message_GetPeerList) isMessage_Message() {} + +func (*Message_PeerList_) isMessage_Message() {} func (*Message_GetStateSummaryFrontier) isMessage_Message() {} @@ -487,17 +503,20 @@ func (*Message_AppResponse) isMessage_Message() {} func (*Message_AppGossip) isMessage_Message() {} -func (*Message_PeerListAck) isMessage_Message() {} +func (*Message_AppError) isMessage_Message() {} -// Message that the local node sends to its remote peers, -// in order to periodically check its uptime. +// Ping reports a peer's perceived uptime percentage. // -// On receiving "ping", the remote peer responds with the observed -// uptime value of the message sender in "pong" message. +// Peers should respond to Ping with a Pong. type Ping struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Uptime percentage on the primary network [0, 100] + Uptime uint32 `protobuf:"varint,1,opt,name=uptime,proto3" json:"uptime,omitempty"` + // Uptime percentage on subnets + SubnetUptimes []*SubnetUptime `protobuf:"bytes,2,rep,name=subnet_uptimes,json=subnetUptimes,proto3" json:"subnet_uptimes,omitempty"` } func (x *Ping) Reset() { @@ -532,15 +551,30 @@ func (*Ping) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{1} } -// Contains subnet id and the related observed subnet uptime of the message -// receiver (remote peer). +func (x *Ping) GetUptime() uint32 { + if x != nil { + return x.Uptime + } + return 0 +} + +func (x *Ping) GetSubnetUptimes() []*SubnetUptime { + if x != nil { + return x.SubnetUptimes + } + return nil +} + +// SubnetUptime is a descriptor for a peer's perceived uptime on a subnet. type SubnetUptime struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Subnet the peer is validating SubnetId []byte `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` - Uptime uint32 `protobuf:"varint,2,opt,name=uptime,proto3" json:"uptime,omitempty"` + // Uptime percentage on the subnet [0, 100] + Uptime uint32 `protobuf:"varint,2,opt,name=uptime,proto3" json:"uptime,omitempty"` } func (x *SubnetUptime) Reset() { @@ -589,17 +623,18 @@ func (x *SubnetUptime) GetUptime() uint32 { return 0 } -// Contains the uptime percentage of the message receiver (remote peer) -// from the sender's point of view, in response to "ping" message. -// Uptimes are expected to be provided as integers ranging in [0, 100]. +// Pong is sent in response to a Ping with the perceived uptime of the +// peer. type Pong struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // uptime is the primary network uptime percentage. + // Deprecated: uptime is now sent in Ping + // Uptime percentage on the primary network [0, 100] Uptime uint32 `protobuf:"varint,1,opt,name=uptime,proto3" json:"uptime,omitempty"` - // subnet_uptimes contains subnet uptime percentages. + // Deprecated: uptime is now sent in Ping + // Uptime percentage on subnets SubnetUptimes []*SubnetUptime `protobuf:"bytes,2,rep,name=subnet_uptimes,json=subnetUptimes,proto3" json:"subnet_uptimes,omitempty"` } @@ -649,30 +684,46 @@ func (x *Pong) GetSubnetUptimes() []*SubnetUptime { return nil } -// The first outbound message that the local node sends to its remote peer -// when the connection is established. In order for the local node to be -// tracked as a valid peer by the remote peer, the fields must be valid. -// For instance, the network ID must be matched and timestamp should be in-sync. -// Otherwise, the remote peer closes the connection. -// ref. "avalanchego/network/peer#handleVersion" -// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/network#Network "Dispatch" -type Version struct { +// Handshake is the first outbound message sent to a peer when a connection is +// established to start the p2p handshake. +// +// Peers must respond to a Handshake message with a PeerList message to allow the +// peer to connect to other peers in the network. +// +// Peers should drop connections to peers with incompatible versions. +type Handshake struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` - MyTime uint64 `protobuf:"varint,2,opt,name=my_time,json=myTime,proto3" json:"my_time,omitempty"` - IpAddr []byte `protobuf:"bytes,3,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` - IpPort uint32 `protobuf:"varint,4,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - MyVersion string `protobuf:"bytes,5,opt,name=my_version,json=myVersion,proto3" json:"my_version,omitempty"` - MyVersionTime uint64 `protobuf:"varint,6,opt,name=my_version_time,json=myVersionTime,proto3" json:"my_version_time,omitempty"` - Sig []byte `protobuf:"bytes,7,opt,name=sig,proto3" json:"sig,omitempty"` - TrackedSubnets [][]byte `protobuf:"bytes,8,rep,name=tracked_subnets,json=trackedSubnets,proto3" json:"tracked_subnets,omitempty"` -} - -func (x *Version) Reset() { - *x = Version{} + // Network the peer is running on (e.g local, testnet, mainnet) + NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Unix timestamp when this Handshake message was created + MyTime uint64 `protobuf:"varint,2,opt,name=my_time,json=myTime,proto3" json:"my_time,omitempty"` + // IP address of the peer + IpAddr []byte `protobuf:"bytes,3,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` + // IP port of the peer + IpPort uint32 `protobuf:"varint,4,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + // Avalanche client version + MyVersion string `protobuf:"bytes,5,opt,name=my_version,json=myVersion,proto3" json:"my_version,omitempty"` + // Timestamp of the IP + IpSigningTime uint64 `protobuf:"varint,6,opt,name=ip_signing_time,json=ipSigningTime,proto3" json:"ip_signing_time,omitempty"` + // Signature of the peer IP port pair at a provided timestamp with the TLS + // key. + IpNodeIdSig []byte `protobuf:"bytes,7,opt,name=ip_node_id_sig,json=ipNodeIdSig,proto3" json:"ip_node_id_sig,omitempty"` + // Subnets the peer is tracking + TrackedSubnets [][]byte `protobuf:"bytes,8,rep,name=tracked_subnets,json=trackedSubnets,proto3" json:"tracked_subnets,omitempty"` + Client *Client `protobuf:"bytes,9,opt,name=client,proto3" json:"client,omitempty"` + SupportedAcps []uint32 `protobuf:"varint,10,rep,packed,name=supported_acps,json=supportedAcps,proto3" json:"supported_acps,omitempty"` + ObjectedAcps []uint32 `protobuf:"varint,11,rep,packed,name=objected_acps,json=objectedAcps,proto3" json:"objected_acps,omitempty"` + KnownPeers *BloomFilter `protobuf:"bytes,12,opt,name=known_peers,json=knownPeers,proto3" json:"known_peers,omitempty"` + // Signature of the peer IP port pair at a provided timestamp with the BLS + // key. + IpBlsSig []byte `protobuf:"bytes,13,opt,name=ip_bls_sig,json=ipBlsSig,proto3" json:"ip_bls_sig,omitempty"` +} + +func (x *Handshake) Reset() { + *x = Handshake{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -680,13 +731,13 @@ func (x *Version) Reset() { } } -func (x *Version) String() string { +func (x *Handshake) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Version) ProtoMessage() {} +func (*Handshake) ProtoMessage() {} -func (x *Version) ProtoReflect() protoreflect.Message { +func (x *Handshake) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -698,83 +749,118 @@ func (x *Version) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Version.ProtoReflect.Descriptor instead. -func (*Version) Descriptor() ([]byte, []int) { +// Deprecated: Use Handshake.ProtoReflect.Descriptor instead. +func (*Handshake) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{4} } -func (x *Version) GetNetworkId() uint32 { +func (x *Handshake) GetNetworkId() uint32 { if x != nil { return x.NetworkId } return 0 } -func (x *Version) GetMyTime() uint64 { +func (x *Handshake) GetMyTime() uint64 { if x != nil { return x.MyTime } return 0 } -func (x *Version) GetIpAddr() []byte { +func (x *Handshake) GetIpAddr() []byte { if x != nil { return x.IpAddr } return nil } -func (x *Version) GetIpPort() uint32 { +func (x *Handshake) GetIpPort() uint32 { if x != nil { return x.IpPort } return 0 } -func (x *Version) GetMyVersion() string { +func (x *Handshake) GetMyVersion() string { if x != nil { return x.MyVersion } return "" } -func (x *Version) GetMyVersionTime() uint64 { +func (x *Handshake) GetIpSigningTime() uint64 { if x != nil { - return x.MyVersionTime + return x.IpSigningTime } return 0 } -func (x *Version) GetSig() []byte { +func (x *Handshake) GetIpNodeIdSig() []byte { if x != nil { - return x.Sig + return x.IpNodeIdSig } return nil } -func (x *Version) GetTrackedSubnets() [][]byte { +func (x *Handshake) GetTrackedSubnets() [][]byte { if x != nil { return x.TrackedSubnets } return nil } -// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/utils/ips#ClaimedIPPort -type ClaimedIpPort struct { +func (x *Handshake) GetClient() *Client { + if x != nil { + return x.Client + } + return nil +} + +func (x *Handshake) GetSupportedAcps() []uint32 { + if x != nil { + return x.SupportedAcps + } + return nil +} + +func (x *Handshake) GetObjectedAcps() []uint32 { + if x != nil { + return x.ObjectedAcps + } + return nil +} + +func (x *Handshake) GetKnownPeers() *BloomFilter { + if x != nil { + return x.KnownPeers + } + return nil +} + +func (x *Handshake) GetIpBlsSig() []byte { + if x != nil { + return x.IpBlsSig + } + return nil +} + +// Metadata about a peer's P2P client used to determine compatibility +type Client struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - X509Certificate []byte `protobuf:"bytes,1,opt,name=x509_certificate,json=x509Certificate,proto3" json:"x509_certificate,omitempty"` - IpAddr []byte `protobuf:"bytes,2,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` - TxId []byte `protobuf:"bytes,6,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` + // Client name (e.g avalanchego) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Client semantic version + Major uint32 `protobuf:"varint,2,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,3,opt,name=minor,proto3" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,4,opt,name=patch,proto3" json:"patch,omitempty"` } -func (x *ClaimedIpPort) Reset() { - *x = ClaimedIpPort{} +func (x *Client) Reset() { + *x = Client{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -782,13 +868,13 @@ func (x *ClaimedIpPort) Reset() { } } -func (x *ClaimedIpPort) String() string { +func (x *Client) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ClaimedIpPort) ProtoMessage() {} +func (*Client) ProtoMessage() {} -func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { +func (x *Client) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -800,70 +886,51 @@ func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. -func (*ClaimedIpPort) Descriptor() ([]byte, []int) { +// Deprecated: Use Client.ProtoReflect.Descriptor instead. +func (*Client) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{5} } -func (x *ClaimedIpPort) GetX509Certificate() []byte { - if x != nil { - return x.X509Certificate - } - return nil -} - -func (x *ClaimedIpPort) GetIpAddr() []byte { +func (x *Client) GetName() string { if x != nil { - return x.IpAddr + return x.Name } - return nil + return "" } -func (x *ClaimedIpPort) GetIpPort() uint32 { +func (x *Client) GetMajor() uint32 { if x != nil { - return x.IpPort + return x.Major } return 0 } -func (x *ClaimedIpPort) GetTimestamp() uint64 { +func (x *Client) GetMinor() uint32 { if x != nil { - return x.Timestamp + return x.Minor } return 0 } -func (x *ClaimedIpPort) GetSignature() []byte { - if x != nil { - return x.Signature - } - return nil -} - -func (x *ClaimedIpPort) GetTxId() []byte { +func (x *Client) GetPatch() uint32 { if x != nil { - return x.TxId + return x.Patch } - return nil + return 0 } -// Message that contains a list of peer information (IP, certs, etc.) -// in response to "version" message, and sent periodically to a set of -// validators. -// ref. "avalanchego/network/network#Dispatch.runtTimers" -// -// On receiving "peer_list", the engine starts/updates the tracking information -// of the remote peer. -type PeerList struct { +// BloomFilter with a random salt to prevent consistent hash collisions +type BloomFilter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClaimedIpPorts []*ClaimedIpPort `protobuf:"bytes,1,rep,name=claimed_ip_ports,json=claimedIpPorts,proto3" json:"claimed_ip_ports,omitempty"` + Filter []byte `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + Salt []byte `protobuf:"bytes,2,opt,name=salt,proto3" json:"salt,omitempty"` } -func (x *PeerList) Reset() { - *x = PeerList{} +func (x *BloomFilter) Reset() { + *x = BloomFilter{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -871,13 +938,13 @@ func (x *PeerList) Reset() { } } -func (x *PeerList) String() string { +func (x *BloomFilter) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerList) ProtoMessage() {} +func (*BloomFilter) ProtoMessage() {} -func (x *PeerList) ProtoReflect() protoreflect.Message { +func (x *BloomFilter) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -889,36 +956,47 @@ func (x *PeerList) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerList.ProtoReflect.Descriptor instead. -func (*PeerList) Descriptor() ([]byte, []int) { +// Deprecated: Use BloomFilter.ProtoReflect.Descriptor instead. +func (*BloomFilter) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{6} } -func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { +func (x *BloomFilter) GetFilter() []byte { if x != nil { - return x.ClaimedIpPorts + return x.Filter } return nil } -// "peer_ack" is sent in response to a "peer_list" message. The "tx_id" should -// correspond to a "tx_id" in the "peer_list" message. The sender should set -// "timestamp" to be the latest known timestamp of a signed IP corresponding to -// the nodeID of "tx_id". -// -// Upon receipt, the "tx_id" and "timestamp" will determine if the receiptent -// can forgo future gossip of the node's IP to the sender of this message. -type PeerAck struct { +func (x *BloomFilter) GetSalt() []byte { + if x != nil { + return x.Salt + } + return nil +} + +// ClaimedIpPort contains metadata needed to connect to a peer +type ClaimedIpPort struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TxId []byte `protobuf:"bytes,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` - Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // X509 certificate of the peer + X509Certificate []byte `protobuf:"bytes,1,opt,name=x509_certificate,json=x509Certificate,proto3" json:"x509_certificate,omitempty"` + // IP address of the peer + IpAddr []byte `protobuf:"bytes,2,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` + // IP port of the peer + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + // Timestamp of the IP address + port pair + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Signature of the IP port pair at a provided timestamp + Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` + // P-Chain transaction that added this peer to the validator set + TxId []byte `protobuf:"bytes,6,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` } -func (x *PeerAck) Reset() { - *x = PeerAck{} +func (x *ClaimedIpPort) Reset() { + *x = ClaimedIpPort{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -926,13 +1004,13 @@ func (x *PeerAck) Reset() { } } -func (x *PeerAck) String() string { +func (x *ClaimedIpPort) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerAck) ProtoMessage() {} +func (*ClaimedIpPort) ProtoMessage() {} -func (x *PeerAck) ProtoReflect() protoreflect.Message { +func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -944,37 +1022,69 @@ func (x *PeerAck) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerAck.ProtoReflect.Descriptor instead. -func (*PeerAck) Descriptor() ([]byte, []int) { +// Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. +func (*ClaimedIpPort) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{7} } -func (x *PeerAck) GetTxId() []byte { +func (x *ClaimedIpPort) GetX509Certificate() []byte { if x != nil { - return x.TxId + return x.X509Certificate + } + return nil +} + +func (x *ClaimedIpPort) GetIpAddr() []byte { + if x != nil { + return x.IpAddr } return nil } -func (x *PeerAck) GetTimestamp() uint64 { +func (x *ClaimedIpPort) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +func (x *ClaimedIpPort) GetTimestamp() uint64 { if x != nil { return x.Timestamp } return 0 } -// Message that responds to a peer_list message containing the AddValidatorTxIDs -// from the peer_list message that we currently have in our validator set. -type PeerListAck struct { +func (x *ClaimedIpPort) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *ClaimedIpPort) GetTxId() []byte { + if x != nil { + return x.TxId + } + return nil +} + +// GetPeerList contains a bloom filter of the currently known validator IPs. +// +// GetPeerList must not be responded to until finishing the handshake. After the +// handshake is completed, GetPeerlist messages should be responded to with a +// Peerlist message containing validators that are not present in the bloom +// filter. +type GetPeerList struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PeerAcks []*PeerAck `protobuf:"bytes,2,rep,name=peer_acks,json=peerAcks,proto3" json:"peer_acks,omitempty"` + KnownPeers *BloomFilter `protobuf:"bytes,1,opt,name=known_peers,json=knownPeers,proto3" json:"known_peers,omitempty"` } -func (x *PeerListAck) Reset() { - *x = PeerListAck{} +func (x *GetPeerList) Reset() { + *x = GetPeerList{} if protoimpl.UnsafeEnabled { mi := &file_p2p_p2p_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -982,13 +1092,13 @@ func (x *PeerListAck) Reset() { } } -func (x *PeerListAck) String() string { +func (x *GetPeerList) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PeerListAck) ProtoMessage() {} +func (*GetPeerList) ProtoMessage() {} -func (x *PeerListAck) ProtoReflect() protoreflect.Message { +func (x *GetPeerList) ProtoReflect() protoreflect.Message { mi := &file_p2p_p2p_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1000,32 +1110,93 @@ func (x *PeerListAck) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PeerListAck.ProtoReflect.Descriptor instead. -func (*PeerListAck) Descriptor() ([]byte, []int) { +// Deprecated: Use GetPeerList.ProtoReflect.Descriptor instead. +func (*GetPeerList) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{8} } -func (x *PeerListAck) GetPeerAcks() []*PeerAck { +func (x *GetPeerList) GetKnownPeers() *BloomFilter { + if x != nil { + return x.KnownPeers + } + return nil +} + +// PeerList contains network-level metadata for a set of validators. +// +// PeerList must be sent in response to an inbound Handshake message from a +// remote peer a peer wants to connect to. Once a PeerList is received after +// a Handshake message, the p2p handshake is complete and the connection is +// established. +// +// PeerList should be sent in response to a GetPeerlist message if the handshake +// has been completed. +type PeerList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClaimedIpPorts []*ClaimedIpPort `protobuf:"bytes,1,rep,name=claimed_ip_ports,json=claimedIpPorts,proto3" json:"claimed_ip_ports,omitempty"` +} + +func (x *PeerList) Reset() { + *x = PeerList{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerList) ProtoMessage() {} + +func (x *PeerList) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerList.ProtoReflect.Descriptor instead. +func (*PeerList) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{9} +} + +func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { if x != nil { - return x.PeerAcks + return x.ClaimedIpPorts } return nil } +// GetStateSummaryFrontier requests a peer's most recently accepted state +// summary type GetStateSummaryFrontier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` } func (x *GetStateSummaryFrontier) Reset() { *x = GetStateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1038,7 +1209,7 @@ func (x *GetStateSummaryFrontier) String() string { func (*GetStateSummaryFrontier) ProtoMessage() {} func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1051,7 +1222,7 @@ func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryFrontier.ProtoReflect.Descriptor instead. func (*GetStateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{9} + return file_p2p_p2p_proto_rawDescGZIP(), []int{10} } func (x *GetStateSummaryFrontier) GetChainId() []byte { @@ -1075,20 +1246,24 @@ func (x *GetStateSummaryFrontier) GetDeadline() uint64 { return 0 } +// StateSummaryFrontier is sent in response to a GetStateSummaryFrontier request type StateSummaryFrontier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original GetStateSummaryFrontier request RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Summary []byte `protobuf:"bytes,3,opt,name=summary,proto3" json:"summary,omitempty"` + // The requested state summary + Summary []byte `protobuf:"bytes,3,opt,name=summary,proto3" json:"summary,omitempty"` } func (x *StateSummaryFrontier) Reset() { *x = StateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1101,7 +1276,7 @@ func (x *StateSummaryFrontier) String() string { func (*StateSummaryFrontier) ProtoMessage() {} func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1114,7 +1289,7 @@ func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryFrontier.ProtoReflect.Descriptor instead. func (*StateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{10} + return file_p2p_p2p_proto_rawDescGZIP(), []int{11} } func (x *StateSummaryFrontier) GetChainId() []byte { @@ -1138,21 +1313,27 @@ func (x *StateSummaryFrontier) GetSummary() []byte { return nil } +// GetAcceptedStateSummary requests a set of state summaries at a set of +// block heights type GetAcceptedStateSummary struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - Heights []uint64 `protobuf:"varint,4,rep,packed,name=heights,proto3" json:"heights,omitempty"` + // Chain bein requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Heights being requested + Heights []uint64 `protobuf:"varint,4,rep,packed,name=heights,proto3" json:"heights,omitempty"` } func (x *GetAcceptedStateSummary) Reset() { *x = GetAcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1165,7 +1346,7 @@ func (x *GetAcceptedStateSummary) String() string { func (*GetAcceptedStateSummary) ProtoMessage() {} func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1178,7 +1359,7 @@ func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedStateSummary.ProtoReflect.Descriptor instead. func (*GetAcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{11} + return file_p2p_p2p_proto_rawDescGZIP(), []int{12} } func (x *GetAcceptedStateSummary) GetChainId() []byte { @@ -1209,20 +1390,24 @@ func (x *GetAcceptedStateSummary) GetHeights() []uint64 { return nil } +// AcceptedStateSummary is sent in response to GetAcceptedStateSummary type AcceptedStateSummary struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original GetAcceptedStateSummary request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // State summary ids SummaryIds [][]byte `protobuf:"bytes,3,rep,name=summary_ids,json=summaryIds,proto3" json:"summary_ids,omitempty"` } func (x *AcceptedStateSummary) Reset() { *x = AcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1235,7 +1420,7 @@ func (x *AcceptedStateSummary) String() string { func (*AcceptedStateSummary) ProtoMessage() {} func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1248,7 +1433,7 @@ func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedStateSummary.ProtoReflect.Descriptor instead. func (*AcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{12} + return file_p2p_p2p_proto_rawDescGZIP(), []int{13} } func (x *AcceptedStateSummary) GetChainId() []byte { @@ -1272,30 +1457,28 @@ func (x *AcceptedStateSummary) GetSummaryIds() [][]byte { return nil } -// Message to request for the accepted frontier of the "remote" peer. -// For instance, the accepted frontier of X-chain DAG is the set of -// accepted vertices that do not have any accepted descendants (i.e., frontier). -// -// During bootstrap, the local node sends out "get_accepted_frontier" to validators -// (see "avalanchego/snow/engine/common/bootstrapper.Startup"). -// And the expected response is "accepted_frontier". +// GetAcceptedFrontier requests the accepted frontier from a peer. // -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// Peers should respond to GetAcceptedFrontier with AcceptedFrontier. type GetAcceptedFrontier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Consensus type the remote peer should use to handle this message EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAcceptedFrontier) Reset() { *x = GetAcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1308,7 +1491,7 @@ func (x *GetAcceptedFrontier) String() string { func (*GetAcceptedFrontier) ProtoMessage() {} func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1321,7 +1504,7 @@ func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedFrontier.ProtoReflect.Descriptor instead. func (*GetAcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{13} + return file_p2p_p2p_proto_rawDescGZIP(), []int{14} } func (x *GetAcceptedFrontier) GetChainId() []byte { @@ -1352,25 +1535,26 @@ func (x *GetAcceptedFrontier) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains the list of accepted frontier in response to -// "get_accepted_frontier". For instance, on receiving "get_accepted_frontier", -// the X-chain engine responds with the accepted frontier of X-chain DAG. +// AcceptedFrontier contains the remote peer's last accepted frontier. // -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// AcceptedFrontier is sent in response to GetAcceptedFrontier. type AcceptedFrontier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - ContainerIds [][]byte `protobuf:"bytes,3,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original GetAcceptedFrontier request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // The id of the last accepted frontier + ContainerId []byte `protobuf:"bytes,3,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } func (x *AcceptedFrontier) Reset() { *x = AcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1383,7 +1567,7 @@ func (x *AcceptedFrontier) String() string { func (*AcceptedFrontier) ProtoMessage() {} func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1396,7 +1580,7 @@ func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedFrontier.ProtoReflect.Descriptor instead. func (*AcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{14} + return file_p2p_p2p_proto_rawDescGZIP(), []int{15} } func (x *AcceptedFrontier) GetChainId() []byte { @@ -1413,36 +1597,38 @@ func (x *AcceptedFrontier) GetRequestId() uint32 { return 0 } -func (x *AcceptedFrontier) GetContainerIds() [][]byte { +func (x *AcceptedFrontier) GetContainerId() []byte { if x != nil { - return x.ContainerIds + return x.ContainerId } return nil } -// Message to request for the accepted blocks/vertices of the "remote" peer. -// The local node sends out this message during bootstrap, following "get_accepted_frontier". -// Basically, sending the list of the accepted frontier and expects the response of -// the accepted IDs from the remote peer. +// GetAccepted sends a request with the sender's accepted frontier to a remote +// peer. // -// See "avalanchego/snow/engine/common/bootstrapper.Startup" and "sendGetAccepted". -// See "snow/engine/common/bootstrapper.go#AcceptedFrontier". +// Peers should respond to GetAccepted with an Accepted message. type GetAccepted struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerIds [][]byte `protobuf:"bytes,4,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this message + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // The sender's accepted frontier + ContainerIds [][]byte `protobuf:"bytes,4,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` + // Consensus type to handle this message + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAccepted) Reset() { *x = GetAccepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1455,7 +1641,7 @@ func (x *GetAccepted) String() string { func (*GetAccepted) ProtoMessage() {} func (x *GetAccepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1468,7 +1654,7 @@ func (x *GetAccepted) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAccepted.ProtoReflect.Descriptor instead. func (*GetAccepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{15} + return file_p2p_p2p_proto_rawDescGZIP(), []int{16} } func (x *GetAccepted) GetChainId() []byte { @@ -1506,27 +1692,27 @@ func (x *GetAccepted) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains the list of accepted block/vertex IDs in response to -// "get_accepted". For instance, on receiving "get_accepted" that contains -// the sender's accepted frontier IDs, the X-chain engine responds only with -// the accepted vertex IDs of the X-chain DAG. -// -// See "snow/engine/avalanche#GetAccepted" and "SendAccepted". -// See "snow/engine/common/bootstrapper.go#Accepted". +// Accepted is sent in response to GetAccepted. The sending peer responds with +// a subset of container ids from the GetAccepted request that the sending peer +// has accepted. type Accepted struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original GetAccepted request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Subset of container ids from the GetAccepted request that the sender has + // accepted ContainerIds [][]byte `protobuf:"bytes,3,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` } func (x *Accepted) Reset() { *x = Accepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1539,7 +1725,7 @@ func (x *Accepted) String() string { func (*Accepted) ProtoMessage() {} func (x *Accepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1552,7 +1738,7 @@ func (x *Accepted) ProtoReflect() protoreflect.Message { // Deprecated: Use Accepted.ProtoReflect.Descriptor instead. func (*Accepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{16} + return file_p2p_p2p_proto_rawDescGZIP(), []int{17} } func (x *Accepted) GetChainId() []byte { @@ -1576,28 +1762,30 @@ func (x *Accepted) GetContainerIds() [][]byte { return nil } -// Message that requests for the ancestors (parents) of the specified container ID. -// The engine bootstrapper sends this message to fetch all accepted containers -// in its transitive path. +// GetAncestors requests the ancestors for a given container. // -// On receiving "get_ancestors", it responds with the ancestors' container bytes -// in "ancestors" message. +// The remote peer should respond with an Ancestors message. type GetAncestors struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` -} + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Container for which ancestors are being requested + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Consensus type to handle this message + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` +} func (x *GetAncestors) Reset() { *x = GetAncestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1610,7 +1798,7 @@ func (x *GetAncestors) String() string { func (*GetAncestors) ProtoMessage() {} func (x *GetAncestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1623,7 +1811,7 @@ func (x *GetAncestors) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestors.ProtoReflect.Descriptor instead. func (*GetAncestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{17} + return file_p2p_p2p_proto_rawDescGZIP(), []int{18} } func (x *GetAncestors) GetChainId() []byte { @@ -1661,25 +1849,27 @@ func (x *GetAncestors) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains the container bytes of the ancestors -// in response to "get_ancestors". +// Ancestors is sent in response to GetAncestors. // -// On receiving "ancestors", the engine parses the containers and queues them -// to be accepted once we've received the entire chain history. +// Ancestors contains a contiguous ancestry of containers for the requested +// container in order of increasing block height. type Ancestors struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original GetAncestors request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Ancestry for the requested container Containers [][]byte `protobuf:"bytes,3,rep,name=containers,proto3" json:"containers,omitempty"` } func (x *Ancestors) Reset() { *x = Ancestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1692,7 +1882,7 @@ func (x *Ancestors) String() string { func (*Ancestors) ProtoMessage() {} func (x *Ancestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1705,7 +1895,7 @@ func (x *Ancestors) ProtoReflect() protoreflect.Message { // Deprecated: Use Ancestors.ProtoReflect.Descriptor instead. func (*Ancestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{18} + return file_p2p_p2p_proto_rawDescGZIP(), []int{19} } func (x *Ancestors) GetChainId() []byte { @@ -1729,26 +1919,30 @@ func (x *Ancestors) GetContainers() [][]byte { return nil } -// Message that requests for the container data. +// Get requests a container from a remote peer. // -// On receiving "get", the engine looks up the container from the storage. -// If the container is found, it sends out the container data in "put" message. +// Remote peers should respond with a Put message if they have the container. type Get struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Container being requested + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Consensus type to handle this message + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Get) Reset() { *x = Get{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1761,7 +1955,7 @@ func (x *Get) String() string { func (*Get) ProtoMessage() {} func (x *Get) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1774,7 +1968,7 @@ func (x *Get) ProtoReflect() protoreflect.Message { // Deprecated: Use Get.ProtoReflect.Descriptor instead. func (*Get) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{19} + return file_p2p_p2p_proto_rawDescGZIP(), []int{20} } func (x *Get) GetChainId() []byte { @@ -1812,24 +2006,26 @@ func (x *Get) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains the container ID and its bytes in response to "get". -// -// On receiving "put", the engine parses the container and tries to issue it to consensus. +// Put is sent in response to Get with the requested block. type Put struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Container []byte `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original Get request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Requested container + Container []byte `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` + // Consensus type to handle this message EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Put) Reset() { *x = Put{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1842,7 +2038,7 @@ func (x *Put) String() string { func (*Put) ProtoMessage() {} func (x *Put) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1855,7 +2051,7 @@ func (x *Put) ProtoReflect() protoreflect.Message { // Deprecated: Use Put.ProtoReflect.Descriptor instead. func (*Put) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{20} + return file_p2p_p2p_proto_rawDescGZIP(), []int{21} } func (x *Put) GetChainId() []byte { @@ -1886,31 +2082,32 @@ func (x *Put) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains a preferred container ID and its container bytes -// in order to query other peers for their preferences of the container. -// For example, when a new container is issued, the engine sends out -// "push_query" and "pull_query" queries to ask other peers their preferences. -// See "avalanchego/snow/engine/common#SendMixedQuery". +// PushQuery requests the preferences of a remote peer given a container. // -// On receiving the "push_query", the engine parses the incoming container -// and tries to issue the container and all of its parents to the consensus, -// and calls "pull_query" handler to send "chits" for voting. +// Remote peers should respond to a PushQuery with a Chits message type PushQuery struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - Container []byte `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Container being gossiped + Container []byte `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` + // Consensus type to handle this message EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` + // Requesting peer's last accepted height + RequestedHeight uint64 `protobuf:"varint,6,opt,name=requested_height,json=requestedHeight,proto3" json:"requested_height,omitempty"` } func (x *PushQuery) Reset() { *x = PushQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1923,7 +2120,7 @@ func (x *PushQuery) String() string { func (*PushQuery) ProtoMessage() {} func (x *PushQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1936,7 +2133,7 @@ func (x *PushQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PushQuery.ProtoReflect.Descriptor instead. func (*PushQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{21} + return file_p2p_p2p_proto_rawDescGZIP(), []int{22} } func (x *PushQuery) GetChainId() []byte { @@ -1974,27 +2171,39 @@ func (x *PushQuery) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains a preferred container ID to query other peers -// for their preferences of the container. -// For example, when a new container is issued, the engine sends out -// "push_query" and "pull_query" queries to ask other peers their preferences. -// See "avalanchego/snow/engine/common#SendMixedQuery". +func (x *PushQuery) GetRequestedHeight() uint64 { + if x != nil { + return x.RequestedHeight + } + return 0 +} + +// PullQuery requests the preferences of a remote peer given a container id. +// +// Remote peers should respond to a PullQuery with a Chits message type PullQuery struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Container id being gossiped + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Consensus type to handle this message + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` + // Requesting peer's last accepted height + RequestedHeight uint64 `protobuf:"varint,6,opt,name=requested_height,json=requestedHeight,proto3" json:"requested_height,omitempty"` } func (x *PullQuery) Reset() { *x = PullQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2007,7 +2216,7 @@ func (x *PullQuery) String() string { func (*PullQuery) ProtoMessage() {} func (x *PullQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2020,7 +2229,7 @@ func (x *PullQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PullQuery.ProtoReflect.Descriptor instead. func (*PullQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{22} + return file_p2p_p2p_proto_rawDescGZIP(), []int{23} } func (x *PullQuery) GetChainId() []byte { @@ -2058,31 +2267,36 @@ func (x *PullQuery) GetEngineType() EngineType { return EngineType_ENGINE_TYPE_UNSPECIFIED } -// Message that contains the votes/preferences of the local node, -// in response to "push_query" or "pull_query" (e.g., preferred frontier). -// -// On receiving "chits", the engine issues those preferred containers of vertices/blocks -// to the consensus. If the received container is not found, it responds back with -// "get" message to fetch the missing container from the remote peer. +func (x *PullQuery) GetRequestedHeight() uint64 { + if x != nil { + return x.RequestedHeight + } + return 0 +} + +// Chits contains the preferences of a peer in response to a PushQuery or +// PullQuery message. type Chits struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original PushQuery/PullQuery request RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // Represents the current preferred frontier. - // TODO: Remove `repeated` once all chains are running Snowman. - PreferredContainerIds [][]byte `protobuf:"bytes,3,rep,name=preferred_container_ids,json=preferredContainerIds,proto3" json:"preferred_container_ids,omitempty"` - // Represents the current accepted frontier. - // TODO: Remove `repeated` once all chains are running Snowman. - AcceptedContainerIds [][]byte `protobuf:"bytes,4,rep,name=accepted_container_ids,json=acceptedContainerIds,proto3" json:"accepted_container_ids,omitempty"` + // Currently preferred block + PreferredId []byte `protobuf:"bytes,3,opt,name=preferred_id,json=preferredId,proto3" json:"preferred_id,omitempty"` + // Last accepted block + AcceptedId []byte `protobuf:"bytes,4,opt,name=accepted_id,json=acceptedId,proto3" json:"accepted_id,omitempty"` + // Currently preferred block at the requested height + PreferredIdAtHeight []byte `protobuf:"bytes,5,opt,name=preferred_id_at_height,json=preferredIdAtHeight,proto3" json:"preferred_id_at_height,omitempty"` } func (x *Chits) Reset() { *x = Chits{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2095,7 +2309,7 @@ func (x *Chits) String() string { func (*Chits) ProtoMessage() {} func (x *Chits) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2108,7 +2322,7 @@ func (x *Chits) ProtoReflect() protoreflect.Message { // Deprecated: Use Chits.ProtoReflect.Descriptor instead. func (*Chits) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{23} + return file_p2p_p2p_proto_rawDescGZIP(), []int{24} } func (x *Chits) GetChainId() []byte { @@ -2125,35 +2339,50 @@ func (x *Chits) GetRequestId() uint32 { return 0 } -func (x *Chits) GetPreferredContainerIds() [][]byte { +func (x *Chits) GetPreferredId() []byte { + if x != nil { + return x.PreferredId + } + return nil +} + +func (x *Chits) GetAcceptedId() []byte { if x != nil { - return x.PreferredContainerIds + return x.AcceptedId } return nil } -func (x *Chits) GetAcceptedContainerIds() [][]byte { +func (x *Chits) GetPreferredIdAtHeight() []byte { if x != nil { - return x.AcceptedContainerIds + return x.PreferredIdAtHeight } return nil } +// AppRequest is a VM-defined request. +// +// Remote peers must respond to AppRequest with a corresponding AppResponse or +// AppError type AppRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain being requested from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Unique identifier for this request RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - AppBytes []byte `protobuf:"bytes,4,opt,name=app_bytes,json=appBytes,proto3" json:"app_bytes,omitempty"` + // Timeout (ns) for this request + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Request body + AppBytes []byte `protobuf:"bytes,4,opt,name=app_bytes,json=appBytes,proto3" json:"app_bytes,omitempty"` } func (x *AppRequest) Reset() { *x = AppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2166,7 +2395,7 @@ func (x *AppRequest) String() string { func (*AppRequest) ProtoMessage() {} func (x *AppRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[24] + mi := &file_p2p_p2p_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2179,7 +2408,7 @@ func (x *AppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequest.ProtoReflect.Descriptor instead. func (*AppRequest) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{24} + return file_p2p_p2p_proto_rawDescGZIP(), []int{25} } func (x *AppRequest) GetChainId() []byte { @@ -2210,20 +2439,24 @@ func (x *AppRequest) GetAppBytes() []byte { return nil } +// AppResponse is a VM-defined response sent in response to AppRequest type AppResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain being responded from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original AppRequest RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - AppBytes []byte `protobuf:"bytes,3,opt,name=app_bytes,json=appBytes,proto3" json:"app_bytes,omitempty"` + // Response body + AppBytes []byte `protobuf:"bytes,3,opt,name=app_bytes,json=appBytes,proto3" json:"app_bytes,omitempty"` } func (x *AppResponse) Reset() { *x = AppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2236,7 +2469,7 @@ func (x *AppResponse) String() string { func (*AppResponse) ProtoMessage() {} func (x *AppResponse) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[25] + mi := &file_p2p_p2p_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2249,7 +2482,7 @@ func (x *AppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponse.ProtoReflect.Descriptor instead. func (*AppResponse) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{25} + return file_p2p_p2p_proto_rawDescGZIP(), []int{26} } func (x *AppResponse) GetChainId() []byte { @@ -2273,19 +2506,98 @@ func (x *AppResponse) GetAppBytes() []byte { return nil } +// AppError is a VM-defined error sent in response to AppRequest +type AppError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Chain the message is for + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request id of the original AppRequest + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // VM defined error code. VMs may define error codes > 0. + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // VM defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *AppError) Reset() { + *x = AppError{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppError) ProtoMessage() {} + +func (x *AppError) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppError.ProtoReflect.Descriptor instead. +func (*AppError) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{27} +} + +func (x *AppError) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *AppError) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *AppError) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *AppError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +// AppGossip is a VM-defined message type AppGossip struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Chain the message is for + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Message body AppBytes []byte `protobuf:"bytes,2,opt,name=app_bytes,json=appBytes,proto3" json:"app_bytes,omitempty"` } func (x *AppGossip) Reset() { *x = AppGossip{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[26] + mi := &file_p2p_p2p_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2298,7 +2610,7 @@ func (x *AppGossip) String() string { func (*AppGossip) ProtoMessage() {} func (x *AppGossip) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[26] + mi := &file_p2p_p2p_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2311,7 +2623,7 @@ func (x *AppGossip) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossip.ProtoReflect.Descriptor instead. func (*AppGossip) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{26} + return file_p2p_p2p_proto_rawDescGZIP(), []int{28} } func (x *AppGossip) GetChainId() []byte { @@ -2332,7 +2644,7 @@ var File_p2p_p2p_proto protoreflect.FileDescriptor var file_p2p_p2p_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x70, 0x32, 0x70, 0x22, 0xde, 0x0a, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x03, 0x70, 0x32, 0x70, 0x22, 0x9e, 0x0b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x7a, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x29, 0x0a, 0x0f, 0x63, @@ -2342,200 +2654,281 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x6f, 0x6e, 0x67, - 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, - 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, + 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x09, 0x68, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x48, 0x00, 0x52, 0x09, 0x68, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x36, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, + 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, + 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, - 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x16, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, + 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, 0x16, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, + 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x44, + 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x12, 0x44, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, - 0x52, 0x0b, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, - 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, - 0x74, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, - 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, - 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, - 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, - 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, - 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, - 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x61, 0x63, 0x6b, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0b, 0x70, - 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x22, 0x43, 0x0a, - 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, - 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0xf5, 0x01, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x79, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, - 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, - 0x6e, 0x65, 0x74, 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, - 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, - 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, - 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x74, 0x78, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, - 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, - 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x3c, - 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3e, 0x0a, 0x0b, - 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x09, 0x70, - 0x65, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x52, 0x08, 0x70, 0x65, - 0x65, 0x72, 0x41, 0x63, 0x6b, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x6f, 0x0a, 0x17, - 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, - 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, + 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, + 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x08, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, + 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, + 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x2f, + 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, 0x05, 0x63, + 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x20, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x12, 0x2c, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x22, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, 0x61, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4a, 0x04, 0x08, 0x21, 0x10, 0x22, 0x4a, + 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, + 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, + 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, + 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, + 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, + 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0xcc, + 0x03, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, + 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, + 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, + 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, + 0x69, 0x70, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0e, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x70, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x53, + 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, + 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x06, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, + 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0d, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x41, 0x63, 0x70, 0x73, 0x12, 0x31, 0x0a, 0x0b, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, + 0x1c, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x62, 0x6c, 0x73, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x69, 0x70, 0x42, 0x6c, 0x73, 0x53, 0x69, 0x67, 0x22, 0x5e, 0x0a, + 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, + 0x0b, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, + 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, + 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, + 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, + 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, + 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, + 0x6f, 0x72, 0x74, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, + 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, + 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, + 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, + 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, + 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x75, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, + 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x73, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x77, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, - 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x6b, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, + 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x0b, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, - 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, - 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x22, 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, @@ -2546,97 +2939,59 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6b, 0x0a, 0x09, 0x41, - 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, + 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, - 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x03, - 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, + 0x0a, 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, + 0x74, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, - 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, - 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb5, 0x01, 0x0a, 0x05, 0x43, - 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, - 0x0a, 0x17, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x15, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, - 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, - 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, - 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, - 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, - 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, - 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x41, + 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, + 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, + 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, + 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -2652,76 +3007,82 @@ func file_p2p_p2p_proto_rawDescGZIP() []byte { } var file_p2p_p2p_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_p2p_p2p_proto_goTypes = []interface{}{ (EngineType)(0), // 0: p2p.EngineType (*Message)(nil), // 1: p2p.Message (*Ping)(nil), // 2: p2p.Ping (*SubnetUptime)(nil), // 3: p2p.SubnetUptime (*Pong)(nil), // 4: p2p.Pong - (*Version)(nil), // 5: p2p.Version - (*ClaimedIpPort)(nil), // 6: p2p.ClaimedIpPort - (*PeerList)(nil), // 7: p2p.PeerList - (*PeerAck)(nil), // 8: p2p.PeerAck - (*PeerListAck)(nil), // 9: p2p.PeerListAck - (*GetStateSummaryFrontier)(nil), // 10: p2p.GetStateSummaryFrontier - (*StateSummaryFrontier)(nil), // 11: p2p.StateSummaryFrontier - (*GetAcceptedStateSummary)(nil), // 12: p2p.GetAcceptedStateSummary - (*AcceptedStateSummary)(nil), // 13: p2p.AcceptedStateSummary - (*GetAcceptedFrontier)(nil), // 14: p2p.GetAcceptedFrontier - (*AcceptedFrontier)(nil), // 15: p2p.AcceptedFrontier - (*GetAccepted)(nil), // 16: p2p.GetAccepted - (*Accepted)(nil), // 17: p2p.Accepted - (*GetAncestors)(nil), // 18: p2p.GetAncestors - (*Ancestors)(nil), // 19: p2p.Ancestors - (*Get)(nil), // 20: p2p.Get - (*Put)(nil), // 21: p2p.Put - (*PushQuery)(nil), // 22: p2p.PushQuery - (*PullQuery)(nil), // 23: p2p.PullQuery - (*Chits)(nil), // 24: p2p.Chits - (*AppRequest)(nil), // 25: p2p.AppRequest - (*AppResponse)(nil), // 26: p2p.AppResponse - (*AppGossip)(nil), // 27: p2p.AppGossip + (*Handshake)(nil), // 5: p2p.Handshake + (*Client)(nil), // 6: p2p.Client + (*BloomFilter)(nil), // 7: p2p.BloomFilter + (*ClaimedIpPort)(nil), // 8: p2p.ClaimedIpPort + (*GetPeerList)(nil), // 9: p2p.GetPeerList + (*PeerList)(nil), // 10: p2p.PeerList + (*GetStateSummaryFrontier)(nil), // 11: p2p.GetStateSummaryFrontier + (*StateSummaryFrontier)(nil), // 12: p2p.StateSummaryFrontier + (*GetAcceptedStateSummary)(nil), // 13: p2p.GetAcceptedStateSummary + (*AcceptedStateSummary)(nil), // 14: p2p.AcceptedStateSummary + (*GetAcceptedFrontier)(nil), // 15: p2p.GetAcceptedFrontier + (*AcceptedFrontier)(nil), // 16: p2p.AcceptedFrontier + (*GetAccepted)(nil), // 17: p2p.GetAccepted + (*Accepted)(nil), // 18: p2p.Accepted + (*GetAncestors)(nil), // 19: p2p.GetAncestors + (*Ancestors)(nil), // 20: p2p.Ancestors + (*Get)(nil), // 21: p2p.Get + (*Put)(nil), // 22: p2p.Put + (*PushQuery)(nil), // 23: p2p.PushQuery + (*PullQuery)(nil), // 24: p2p.PullQuery + (*Chits)(nil), // 25: p2p.Chits + (*AppRequest)(nil), // 26: p2p.AppRequest + (*AppResponse)(nil), // 27: p2p.AppResponse + (*AppError)(nil), // 28: p2p.AppError + (*AppGossip)(nil), // 29: p2p.AppGossip } var file_p2p_p2p_proto_depIdxs = []int32{ 2, // 0: p2p.Message.ping:type_name -> p2p.Ping 4, // 1: p2p.Message.pong:type_name -> p2p.Pong - 5, // 2: p2p.Message.version:type_name -> p2p.Version - 7, // 3: p2p.Message.peer_list:type_name -> p2p.PeerList - 10, // 4: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier - 11, // 5: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier - 12, // 6: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary - 13, // 7: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary - 14, // 8: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier - 15, // 9: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier - 16, // 10: p2p.Message.get_accepted:type_name -> p2p.GetAccepted - 17, // 11: p2p.Message.accepted:type_name -> p2p.Accepted - 18, // 12: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors - 19, // 13: p2p.Message.ancestors:type_name -> p2p.Ancestors - 20, // 14: p2p.Message.get:type_name -> p2p.Get - 21, // 15: p2p.Message.put:type_name -> p2p.Put - 22, // 16: p2p.Message.push_query:type_name -> p2p.PushQuery - 23, // 17: p2p.Message.pull_query:type_name -> p2p.PullQuery - 24, // 18: p2p.Message.chits:type_name -> p2p.Chits - 25, // 19: p2p.Message.app_request:type_name -> p2p.AppRequest - 26, // 20: p2p.Message.app_response:type_name -> p2p.AppResponse - 27, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip - 9, // 22: p2p.Message.peer_list_ack:type_name -> p2p.PeerListAck - 3, // 23: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime - 6, // 24: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 8, // 25: p2p.PeerListAck.peer_acks:type_name -> p2p.PeerAck - 0, // 26: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType - 0, // 27: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType - 0, // 28: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType - 0, // 29: p2p.Get.engine_type:type_name -> p2p.EngineType - 0, // 30: p2p.Put.engine_type:type_name -> p2p.EngineType - 0, // 31: p2p.PushQuery.engine_type:type_name -> p2p.EngineType - 0, // 32: p2p.PullQuery.engine_type:type_name -> p2p.EngineType - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name + 5, // 2: p2p.Message.handshake:type_name -> p2p.Handshake + 9, // 3: p2p.Message.get_peer_list:type_name -> p2p.GetPeerList + 10, // 4: p2p.Message.peer_list:type_name -> p2p.PeerList + 11, // 5: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier + 12, // 6: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier + 13, // 7: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary + 14, // 8: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary + 15, // 9: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier + 16, // 10: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier + 17, // 11: p2p.Message.get_accepted:type_name -> p2p.GetAccepted + 18, // 12: p2p.Message.accepted:type_name -> p2p.Accepted + 19, // 13: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors + 20, // 14: p2p.Message.ancestors:type_name -> p2p.Ancestors + 21, // 15: p2p.Message.get:type_name -> p2p.Get + 22, // 16: p2p.Message.put:type_name -> p2p.Put + 23, // 17: p2p.Message.push_query:type_name -> p2p.PushQuery + 24, // 18: p2p.Message.pull_query:type_name -> p2p.PullQuery + 25, // 19: p2p.Message.chits:type_name -> p2p.Chits + 26, // 20: p2p.Message.app_request:type_name -> p2p.AppRequest + 27, // 21: p2p.Message.app_response:type_name -> p2p.AppResponse + 29, // 22: p2p.Message.app_gossip:type_name -> p2p.AppGossip + 28, // 23: p2p.Message.app_error:type_name -> p2p.AppError + 3, // 24: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime + 3, // 25: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime + 6, // 26: p2p.Handshake.client:type_name -> p2p.Client + 7, // 27: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter + 7, // 28: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter + 8, // 29: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 0, // 30: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType + 0, // 31: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType + 0, // 32: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 0, // 33: p2p.Get.engine_type:type_name -> p2p.EngineType + 0, // 34: p2p.Put.engine_type:type_name -> p2p.EngineType + 0, // 35: p2p.PushQuery.engine_type:type_name -> p2p.EngineType + 0, // 36: p2p.PullQuery.engine_type:type_name -> p2p.EngineType + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -2779,7 +3140,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { + switch v := v.(*Handshake); i { case 0: return &v.state case 1: @@ -2791,7 +3152,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClaimedIpPort); i { + switch v := v.(*Client); i { case 0: return &v.state case 1: @@ -2803,7 +3164,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerList); i { + switch v := v.(*BloomFilter); i { case 0: return &v.state case 1: @@ -2815,7 +3176,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerAck); i { + switch v := v.(*ClaimedIpPort); i { case 0: return &v.state case 1: @@ -2827,7 +3188,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerListAck); i { + switch v := v.(*GetPeerList); i { case 0: return &v.state case 1: @@ -2839,7 +3200,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetStateSummaryFrontier); i { + switch v := v.(*PeerList); i { case 0: return &v.state case 1: @@ -2851,7 +3212,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateSummaryFrontier); i { + switch v := v.(*GetStateSummaryFrontier); i { case 0: return &v.state case 1: @@ -2863,7 +3224,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedStateSummary); i { + switch v := v.(*StateSummaryFrontier); i { case 0: return &v.state case 1: @@ -2875,7 +3236,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedStateSummary); i { + switch v := v.(*GetAcceptedStateSummary); i { case 0: return &v.state case 1: @@ -2887,7 +3248,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedFrontier); i { + switch v := v.(*AcceptedStateSummary); i { case 0: return &v.state case 1: @@ -2899,7 +3260,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedFrontier); i { + switch v := v.(*GetAcceptedFrontier); i { case 0: return &v.state case 1: @@ -2911,7 +3272,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAccepted); i { + switch v := v.(*AcceptedFrontier); i { case 0: return &v.state case 1: @@ -2923,7 +3284,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Accepted); i { + switch v := v.(*GetAccepted); i { case 0: return &v.state case 1: @@ -2935,7 +3296,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAncestors); i { + switch v := v.(*Accepted); i { case 0: return &v.state case 1: @@ -2947,7 +3308,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Ancestors); i { + switch v := v.(*GetAncestors); i { case 0: return &v.state case 1: @@ -2959,7 +3320,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Get); i { + switch v := v.(*Ancestors); i { case 0: return &v.state case 1: @@ -2971,7 +3332,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Put); i { + switch v := v.(*Get); i { case 0: return &v.state case 1: @@ -2983,7 +3344,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PushQuery); i { + switch v := v.(*Put); i { case 0: return &v.state case 1: @@ -2995,7 +3356,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PullQuery); i { + switch v := v.(*PushQuery); i { case 0: return &v.state case 1: @@ -3007,7 +3368,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Chits); i { + switch v := v.(*PullQuery); i { case 0: return &v.state case 1: @@ -3019,7 +3380,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequest); i { + switch v := v.(*Chits); i { case 0: return &v.state case 1: @@ -3031,7 +3392,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppResponse); i { + switch v := v.(*AppRequest); i { case 0: return &v.state case 1: @@ -3043,6 +3404,30 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossip); i { case 0: return &v.state @@ -3060,8 +3445,9 @@ func file_p2p_p2p_proto_init() { (*Message_CompressedZstd)(nil), (*Message_Ping)(nil), (*Message_Pong)(nil), - (*Message_Version)(nil), - (*Message_PeerList)(nil), + (*Message_Handshake)(nil), + (*Message_GetPeerList)(nil), + (*Message_PeerList_)(nil), (*Message_GetStateSummaryFrontier)(nil), (*Message_StateSummaryFrontier_)(nil), (*Message_GetAcceptedStateSummary)(nil), @@ -3080,7 +3466,7 @@ func file_p2p_p2p_proto_init() { (*Message_AppRequest)(nil), (*Message_AppResponse)(nil), (*Message_AppGossip)(nil), - (*Message_PeerListAck)(nil), + (*Message_AppError)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -3088,7 +3474,7 @@ func file_p2p_p2p_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_p2p_proto_rawDesc, NumEnums: 1, - NumMessages: 27, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/avalanchego/proto/pb/rpcdb/rpcdb.pb.go b/avalanchego/proto/pb/rpcdb/rpcdb.pb.go index d7cfa7b6..246732f1 100644 --- a/avalanchego/proto/pb/rpcdb/rpcdb.pb.go +++ b/avalanchego/proto/pb/rpcdb/rpcdb.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: rpcdb/rpcdb.proto diff --git a/avalanchego/proto/pb/rpcdb/rpcdb_grpc.pb.go b/avalanchego/proto/pb/rpcdb/rpcdb_grpc.pb.go index f46a3020..cba03091 100644 --- a/avalanchego/proto/pb/rpcdb/rpcdb_grpc.pb.go +++ b/avalanchego/proto/pb/rpcdb/rpcdb_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: rpcdb/rpcdb.proto @@ -19,6 +19,21 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Database_Has_FullMethodName = "/rpcdb.Database/Has" + Database_Get_FullMethodName = "/rpcdb.Database/Get" + Database_Put_FullMethodName = "/rpcdb.Database/Put" + Database_Delete_FullMethodName = "/rpcdb.Database/Delete" + Database_Compact_FullMethodName = "/rpcdb.Database/Compact" + Database_Close_FullMethodName = "/rpcdb.Database/Close" + Database_HealthCheck_FullMethodName = "/rpcdb.Database/HealthCheck" + Database_WriteBatch_FullMethodName = "/rpcdb.Database/WriteBatch" + Database_NewIteratorWithStartAndPrefix_FullMethodName = "/rpcdb.Database/NewIteratorWithStartAndPrefix" + Database_IteratorNext_FullMethodName = "/rpcdb.Database/IteratorNext" + Database_IteratorError_FullMethodName = "/rpcdb.Database/IteratorError" + Database_IteratorRelease_FullMethodName = "/rpcdb.Database/IteratorRelease" +) + // DatabaseClient is the client API for Database service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -47,7 +62,7 @@ func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { func (c *databaseClient) Has(ctx context.Context, in *HasRequest, opts ...grpc.CallOption) (*HasResponse, error) { out := new(HasResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Has", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Has_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -56,7 +71,7 @@ func (c *databaseClient) Has(ctx context.Context, in *HasRequest, opts ...grpc.C func (c *databaseClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { out := new(GetResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Get", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Get_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -65,7 +80,7 @@ func (c *databaseClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.C func (c *databaseClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { out := new(PutResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Put", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Put_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +89,7 @@ func (c *databaseClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.C func (c *databaseClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Delete", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Delete_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -83,7 +98,7 @@ func (c *databaseClient) Delete(ctx context.Context, in *DeleteRequest, opts ... func (c *databaseClient) Compact(ctx context.Context, in *CompactRequest, opts ...grpc.CallOption) (*CompactResponse, error) { out := new(CompactResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Compact", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Compact_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -92,7 +107,7 @@ func (c *databaseClient) Compact(ctx context.Context, in *CompactRequest, opts . func (c *databaseClient) Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) { out := new(CloseResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Close_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -101,7 +116,7 @@ func (c *databaseClient) Close(ctx context.Context, in *CloseRequest, opts ...gr func (c *databaseClient) HealthCheck(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/HealthCheck", in, out, opts...) + err := c.cc.Invoke(ctx, Database_HealthCheck_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -110,7 +125,7 @@ func (c *databaseClient) HealthCheck(ctx context.Context, in *emptypb.Empty, opt func (c *databaseClient) WriteBatch(ctx context.Context, in *WriteBatchRequest, opts ...grpc.CallOption) (*WriteBatchResponse, error) { out := new(WriteBatchResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/WriteBatch", in, out, opts...) + err := c.cc.Invoke(ctx, Database_WriteBatch_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -119,7 +134,7 @@ func (c *databaseClient) WriteBatch(ctx context.Context, in *WriteBatchRequest, func (c *databaseClient) NewIteratorWithStartAndPrefix(ctx context.Context, in *NewIteratorWithStartAndPrefixRequest, opts ...grpc.CallOption) (*NewIteratorWithStartAndPrefixResponse, error) { out := new(NewIteratorWithStartAndPrefixResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/NewIteratorWithStartAndPrefix", in, out, opts...) + err := c.cc.Invoke(ctx, Database_NewIteratorWithStartAndPrefix_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -128,7 +143,7 @@ func (c *databaseClient) NewIteratorWithStartAndPrefix(ctx context.Context, in * func (c *databaseClient) IteratorNext(ctx context.Context, in *IteratorNextRequest, opts ...grpc.CallOption) (*IteratorNextResponse, error) { out := new(IteratorNextResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/IteratorNext", in, out, opts...) + err := c.cc.Invoke(ctx, Database_IteratorNext_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -137,7 +152,7 @@ func (c *databaseClient) IteratorNext(ctx context.Context, in *IteratorNextReque func (c *databaseClient) IteratorError(ctx context.Context, in *IteratorErrorRequest, opts ...grpc.CallOption) (*IteratorErrorResponse, error) { out := new(IteratorErrorResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/IteratorError", in, out, opts...) + err := c.cc.Invoke(ctx, Database_IteratorError_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -146,7 +161,7 @@ func (c *databaseClient) IteratorError(ctx context.Context, in *IteratorErrorReq func (c *databaseClient) IteratorRelease(ctx context.Context, in *IteratorReleaseRequest, opts ...grpc.CallOption) (*IteratorReleaseResponse, error) { out := new(IteratorReleaseResponse) - err := c.cc.Invoke(ctx, "/rpcdb.Database/IteratorRelease", in, out, opts...) + err := c.cc.Invoke(ctx, Database_IteratorRelease_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -235,7 +250,7 @@ func _Database_Has_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Has", + FullMethod: Database_Has_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Has(ctx, req.(*HasRequest)) @@ -253,7 +268,7 @@ func _Database_Get_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Get", + FullMethod: Database_Get_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Get(ctx, req.(*GetRequest)) @@ -271,7 +286,7 @@ func _Database_Put_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Put", + FullMethod: Database_Put_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Put(ctx, req.(*PutRequest)) @@ -289,7 +304,7 @@ func _Database_Delete_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Delete", + FullMethod: Database_Delete_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Delete(ctx, req.(*DeleteRequest)) @@ -307,7 +322,7 @@ func _Database_Compact_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Compact", + FullMethod: Database_Compact_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Compact(ctx, req.(*CompactRequest)) @@ -325,7 +340,7 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/Close", + FullMethod: Database_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Close(ctx, req.(*CloseRequest)) @@ -343,7 +358,7 @@ func _Database_HealthCheck_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/HealthCheck", + FullMethod: Database_HealthCheck_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).HealthCheck(ctx, req.(*emptypb.Empty)) @@ -361,7 +376,7 @@ func _Database_WriteBatch_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/WriteBatch", + FullMethod: Database_WriteBatch_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).WriteBatch(ctx, req.(*WriteBatchRequest)) @@ -379,7 +394,7 @@ func _Database_NewIteratorWithStartAndPrefix_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/NewIteratorWithStartAndPrefix", + FullMethod: Database_NewIteratorWithStartAndPrefix_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).NewIteratorWithStartAndPrefix(ctx, req.(*NewIteratorWithStartAndPrefixRequest)) @@ -397,7 +412,7 @@ func _Database_IteratorNext_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/IteratorNext", + FullMethod: Database_IteratorNext_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).IteratorNext(ctx, req.(*IteratorNextRequest)) @@ -415,7 +430,7 @@ func _Database_IteratorError_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/IteratorError", + FullMethod: Database_IteratorError_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).IteratorError(ctx, req.(*IteratorErrorRequest)) @@ -433,7 +448,7 @@ func _Database_IteratorRelease_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/rpcdb.Database/IteratorRelease", + FullMethod: Database_IteratorRelease_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).IteratorRelease(ctx, req.(*IteratorReleaseRequest)) diff --git a/avalanchego/proto/pb/sdk/sdk.pb.go b/avalanchego/proto/pb/sdk/sdk.pb.go new file mode 100644 index 00000000..b90c2345 --- /dev/null +++ b/avalanchego/proto/pb/sdk/sdk.pb.go @@ -0,0 +1,279 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: sdk/sdk.proto + +package sdk + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PullGossipRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Salt []byte `protobuf:"bytes,2,opt,name=salt,proto3" json:"salt,omitempty"` + Filter []byte `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (x *PullGossipRequest) Reset() { + *x = PullGossipRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_sdk_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PullGossipRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullGossipRequest) ProtoMessage() {} + +func (x *PullGossipRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_sdk_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullGossipRequest.ProtoReflect.Descriptor instead. +func (*PullGossipRequest) Descriptor() ([]byte, []int) { + return file_sdk_sdk_proto_rawDescGZIP(), []int{0} +} + +func (x *PullGossipRequest) GetSalt() []byte { + if x != nil { + return x.Salt + } + return nil +} + +func (x *PullGossipRequest) GetFilter() []byte { + if x != nil { + return x.Filter + } + return nil +} + +type PullGossipResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Gossip [][]byte `protobuf:"bytes,1,rep,name=gossip,proto3" json:"gossip,omitempty"` +} + +func (x *PullGossipResponse) Reset() { + *x = PullGossipResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_sdk_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PullGossipResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullGossipResponse) ProtoMessage() {} + +func (x *PullGossipResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_sdk_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullGossipResponse.ProtoReflect.Descriptor instead. +func (*PullGossipResponse) Descriptor() ([]byte, []int) { + return file_sdk_sdk_proto_rawDescGZIP(), []int{1} +} + +func (x *PullGossipResponse) GetGossip() [][]byte { + if x != nil { + return x.Gossip + } + return nil +} + +type PushGossip struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Gossip [][]byte `protobuf:"bytes,1,rep,name=gossip,proto3" json:"gossip,omitempty"` +} + +func (x *PushGossip) Reset() { + *x = PushGossip{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_sdk_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushGossip) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushGossip) ProtoMessage() {} + +func (x *PushGossip) ProtoReflect() protoreflect.Message { + mi := &file_sdk_sdk_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushGossip.ProtoReflect.Descriptor instead. +func (*PushGossip) Descriptor() ([]byte, []int) { + return file_sdk_sdk_proto_rawDescGZIP(), []int{2} +} + +func (x *PushGossip) GetGossip() [][]byte { + if x != nil { + return x.Gossip + } + return nil +} + +var File_sdk_sdk_proto protoreflect.FileDescriptor + +var file_sdk_sdk_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x73, 0x64, 0x6b, 0x2f, 0x73, 0x64, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x03, 0x73, 0x64, 0x6b, 0x22, 0x45, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x2c, 0x0a, 0x12, 0x50, + 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x22, 0x24, 0x0a, 0x0a, 0x50, 0x75, 0x73, + 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, + 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, + 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, + 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x64, 0x6b, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_sdk_proto_rawDescOnce sync.Once + file_sdk_sdk_proto_rawDescData = file_sdk_sdk_proto_rawDesc +) + +func file_sdk_sdk_proto_rawDescGZIP() []byte { + file_sdk_sdk_proto_rawDescOnce.Do(func() { + file_sdk_sdk_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_sdk_proto_rawDescData) + }) + return file_sdk_sdk_proto_rawDescData +} + +var file_sdk_sdk_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sdk_sdk_proto_goTypes = []interface{}{ + (*PullGossipRequest)(nil), // 0: sdk.PullGossipRequest + (*PullGossipResponse)(nil), // 1: sdk.PullGossipResponse + (*PushGossip)(nil), // 2: sdk.PushGossip +} +var file_sdk_sdk_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_sdk_proto_init() } +func file_sdk_sdk_proto_init() { + if File_sdk_sdk_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_sdk_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PullGossipRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_sdk_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PullGossipResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_sdk_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushGossip); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_sdk_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_sdk_proto_goTypes, + DependencyIndexes: file_sdk_sdk_proto_depIdxs, + MessageInfos: file_sdk_sdk_proto_msgTypes, + }.Build() + File_sdk_sdk_proto = out.File + file_sdk_sdk_proto_rawDesc = nil + file_sdk_sdk_proto_goTypes = nil + file_sdk_sdk_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go b/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go index 02d4fbb6..5de7c788 100644 --- a/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go +++ b/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: sharedmemory/sharedmemory.proto diff --git a/avalanchego/proto/pb/sharedmemory/sharedmemory_grpc.pb.go b/avalanchego/proto/pb/sharedmemory/sharedmemory_grpc.pb.go index 51382a4c..38b3c949 100644 --- a/avalanchego/proto/pb/sharedmemory/sharedmemory_grpc.pb.go +++ b/avalanchego/proto/pb/sharedmemory/sharedmemory_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: sharedmemory/sharedmemory.proto @@ -18,6 +18,12 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + SharedMemory_Get_FullMethodName = "/sharedmemory.SharedMemory/Get" + SharedMemory_Indexed_FullMethodName = "/sharedmemory.SharedMemory/Indexed" + SharedMemory_Apply_FullMethodName = "/sharedmemory.SharedMemory/Apply" +) + // SharedMemoryClient is the client API for SharedMemory service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -37,7 +43,7 @@ func NewSharedMemoryClient(cc grpc.ClientConnInterface) SharedMemoryClient { func (c *sharedMemoryClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { out := new(GetResponse) - err := c.cc.Invoke(ctx, "/sharedmemory.SharedMemory/Get", in, out, opts...) + err := c.cc.Invoke(ctx, SharedMemory_Get_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -46,7 +52,7 @@ func (c *sharedMemoryClient) Get(ctx context.Context, in *GetRequest, opts ...gr func (c *sharedMemoryClient) Indexed(ctx context.Context, in *IndexedRequest, opts ...grpc.CallOption) (*IndexedResponse, error) { out := new(IndexedResponse) - err := c.cc.Invoke(ctx, "/sharedmemory.SharedMemory/Indexed", in, out, opts...) + err := c.cc.Invoke(ctx, SharedMemory_Indexed_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -55,7 +61,7 @@ func (c *sharedMemoryClient) Indexed(ctx context.Context, in *IndexedRequest, op func (c *sharedMemoryClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { out := new(ApplyResponse) - err := c.cc.Invoke(ctx, "/sharedmemory.SharedMemory/Apply", in, out, opts...) + err := c.cc.Invoke(ctx, SharedMemory_Apply_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -108,7 +114,7 @@ func _SharedMemory_Get_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sharedmemory.SharedMemory/Get", + FullMethod: SharedMemory_Get_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SharedMemoryServer).Get(ctx, req.(*GetRequest)) @@ -126,7 +132,7 @@ func _SharedMemory_Indexed_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sharedmemory.SharedMemory/Indexed", + FullMethod: SharedMemory_Indexed_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SharedMemoryServer).Indexed(ctx, req.(*IndexedRequest)) @@ -144,7 +150,7 @@ func _SharedMemory_Apply_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sharedmemory.SharedMemory/Apply", + FullMethod: SharedMemory_Apply_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SharedMemoryServer).Apply(ctx, req.(*ApplyRequest)) diff --git a/avalanchego/proto/pb/sync/sync.pb.go b/avalanchego/proto/pb/sync/sync.pb.go new file mode 100644 index 00000000..eb72e145 --- /dev/null +++ b/avalanchego/proto/pb/sync/sync.pb.go @@ -0,0 +1,2123 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: sync/sync.proto + +package sync + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request represents a request for information during syncing. +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Message: + // + // *Request_RangeProofRequest + // *Request_ChangeProofRequest + Message isRequest_Message `protobuf_oneof:"message"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{0} +} + +func (m *Request) GetMessage() isRequest_Message { + if m != nil { + return m.Message + } + return nil +} + +func (x *Request) GetRangeProofRequest() *SyncGetRangeProofRequest { + if x, ok := x.GetMessage().(*Request_RangeProofRequest); ok { + return x.RangeProofRequest + } + return nil +} + +func (x *Request) GetChangeProofRequest() *SyncGetChangeProofRequest { + if x, ok := x.GetMessage().(*Request_ChangeProofRequest); ok { + return x.ChangeProofRequest + } + return nil +} + +type isRequest_Message interface { + isRequest_Message() +} + +type Request_RangeProofRequest struct { + RangeProofRequest *SyncGetRangeProofRequest `protobuf:"bytes,1,opt,name=range_proof_request,json=rangeProofRequest,proto3,oneof"` +} + +type Request_ChangeProofRequest struct { + ChangeProofRequest *SyncGetChangeProofRequest `protobuf:"bytes,2,opt,name=change_proof_request,json=changeProofRequest,proto3,oneof"` +} + +func (*Request_RangeProofRequest) isRequest_Message() {} + +func (*Request_ChangeProofRequest) isRequest_Message() {} + +type GetMerkleRootResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` +} + +func (x *GetMerkleRootResponse) Reset() { + *x = GetMerkleRootResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMerkleRootResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMerkleRootResponse) ProtoMessage() {} + +func (x *GetMerkleRootResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMerkleRootResponse.ProtoReflect.Descriptor instead. +func (*GetMerkleRootResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{1} +} + +func (x *GetMerkleRootResponse) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +type GetProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *GetProofRequest) Reset() { + *x = GetProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProofRequest) ProtoMessage() {} + +func (x *GetProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProofRequest.ProtoReflect.Descriptor instead. +func (*GetProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{2} +} + +func (x *GetProofRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +type GetProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof *Proof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (x *GetProofResponse) Reset() { + *x = GetProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetProofResponse) ProtoMessage() {} + +func (x *GetProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetProofResponse.ProtoReflect.Descriptor instead. +func (*GetProofResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{3} +} + +func (x *GetProofResponse) GetProof() *Proof { + if x != nil { + return x.Proof + } + return nil +} + +type Proof struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *MaybeBytes `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Proof []*ProofNode `protobuf:"bytes,3,rep,name=proof,proto3" json:"proof,omitempty"` +} + +func (x *Proof) Reset() { + *x = Proof{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Proof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Proof) ProtoMessage() {} + +func (x *Proof) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Proof.ProtoReflect.Descriptor instead. +func (*Proof) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{4} +} + +func (x *Proof) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Proof) GetValue() *MaybeBytes { + if x != nil { + return x.Value + } + return nil +} + +func (x *Proof) GetProof() []*ProofNode { + if x != nil { + return x.Proof + } + return nil +} + +// For use in sync client, which has a restriction on the size of +// the response. GetChangeProof in the DB service doesn't. +type SyncGetChangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartRootHash []byte `protobuf:"bytes,1,opt,name=start_root_hash,json=startRootHash,proto3" json:"start_root_hash,omitempty"` + EndRootHash []byte `protobuf:"bytes,2,opt,name=end_root_hash,json=endRootHash,proto3" json:"end_root_hash,omitempty"` + StartKey *MaybeBytes `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,4,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + KeyLimit uint32 `protobuf:"varint,5,opt,name=key_limit,json=keyLimit,proto3" json:"key_limit,omitempty"` + BytesLimit uint32 `protobuf:"varint,6,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` +} + +func (x *SyncGetChangeProofRequest) Reset() { + *x = SyncGetChangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncGetChangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncGetChangeProofRequest) ProtoMessage() {} + +func (x *SyncGetChangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncGetChangeProofRequest.ProtoReflect.Descriptor instead. +func (*SyncGetChangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{5} +} + +func (x *SyncGetChangeProofRequest) GetStartRootHash() []byte { + if x != nil { + return x.StartRootHash + } + return nil +} + +func (x *SyncGetChangeProofRequest) GetEndRootHash() []byte { + if x != nil { + return x.EndRootHash + } + return nil +} + +func (x *SyncGetChangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *SyncGetChangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *SyncGetChangeProofRequest) GetKeyLimit() uint32 { + if x != nil { + return x.KeyLimit + } + return 0 +} + +func (x *SyncGetChangeProofRequest) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +type SyncGetChangeProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Response: + // + // *SyncGetChangeProofResponse_ChangeProof + // *SyncGetChangeProofResponse_RangeProof + Response isSyncGetChangeProofResponse_Response `protobuf_oneof:"response"` +} + +func (x *SyncGetChangeProofResponse) Reset() { + *x = SyncGetChangeProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncGetChangeProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncGetChangeProofResponse) ProtoMessage() {} + +func (x *SyncGetChangeProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncGetChangeProofResponse.ProtoReflect.Descriptor instead. +func (*SyncGetChangeProofResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{6} +} + +func (m *SyncGetChangeProofResponse) GetResponse() isSyncGetChangeProofResponse_Response { + if m != nil { + return m.Response + } + return nil +} + +func (x *SyncGetChangeProofResponse) GetChangeProof() *ChangeProof { + if x, ok := x.GetResponse().(*SyncGetChangeProofResponse_ChangeProof); ok { + return x.ChangeProof + } + return nil +} + +func (x *SyncGetChangeProofResponse) GetRangeProof() *RangeProof { + if x, ok := x.GetResponse().(*SyncGetChangeProofResponse_RangeProof); ok { + return x.RangeProof + } + return nil +} + +type isSyncGetChangeProofResponse_Response interface { + isSyncGetChangeProofResponse_Response() +} + +type SyncGetChangeProofResponse_ChangeProof struct { + ChangeProof *ChangeProof `protobuf:"bytes,1,opt,name=change_proof,json=changeProof,proto3,oneof"` +} + +type SyncGetChangeProofResponse_RangeProof struct { + RangeProof *RangeProof `protobuf:"bytes,2,opt,name=range_proof,json=rangeProof,proto3,oneof"` +} + +func (*SyncGetChangeProofResponse_ChangeProof) isSyncGetChangeProofResponse_Response() {} + +func (*SyncGetChangeProofResponse_RangeProof) isSyncGetChangeProofResponse_Response() {} + +type GetChangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartRootHash []byte `protobuf:"bytes,1,opt,name=start_root_hash,json=startRootHash,proto3" json:"start_root_hash,omitempty"` + EndRootHash []byte `protobuf:"bytes,2,opt,name=end_root_hash,json=endRootHash,proto3" json:"end_root_hash,omitempty"` + StartKey *MaybeBytes `protobuf:"bytes,3,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,4,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + KeyLimit uint32 `protobuf:"varint,5,opt,name=key_limit,json=keyLimit,proto3" json:"key_limit,omitempty"` +} + +func (x *GetChangeProofRequest) Reset() { + *x = GetChangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetChangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetChangeProofRequest) ProtoMessage() {} + +func (x *GetChangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetChangeProofRequest.ProtoReflect.Descriptor instead. +func (*GetChangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{7} +} + +func (x *GetChangeProofRequest) GetStartRootHash() []byte { + if x != nil { + return x.StartRootHash + } + return nil +} + +func (x *GetChangeProofRequest) GetEndRootHash() []byte { + if x != nil { + return x.EndRootHash + } + return nil +} + +func (x *GetChangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *GetChangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *GetChangeProofRequest) GetKeyLimit() uint32 { + if x != nil { + return x.KeyLimit + } + return 0 +} + +type GetChangeProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Response: + // + // *GetChangeProofResponse_ChangeProof + // *GetChangeProofResponse_RootNotPresent + Response isGetChangeProofResponse_Response `protobuf_oneof:"response"` +} + +func (x *GetChangeProofResponse) Reset() { + *x = GetChangeProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetChangeProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetChangeProofResponse) ProtoMessage() {} + +func (x *GetChangeProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetChangeProofResponse.ProtoReflect.Descriptor instead. +func (*GetChangeProofResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{8} +} + +func (m *GetChangeProofResponse) GetResponse() isGetChangeProofResponse_Response { + if m != nil { + return m.Response + } + return nil +} + +func (x *GetChangeProofResponse) GetChangeProof() *ChangeProof { + if x, ok := x.GetResponse().(*GetChangeProofResponse_ChangeProof); ok { + return x.ChangeProof + } + return nil +} + +func (x *GetChangeProofResponse) GetRootNotPresent() bool { + if x, ok := x.GetResponse().(*GetChangeProofResponse_RootNotPresent); ok { + return x.RootNotPresent + } + return false +} + +type isGetChangeProofResponse_Response interface { + isGetChangeProofResponse_Response() +} + +type GetChangeProofResponse_ChangeProof struct { + ChangeProof *ChangeProof `protobuf:"bytes,1,opt,name=change_proof,json=changeProof,proto3,oneof"` +} + +type GetChangeProofResponse_RootNotPresent struct { + // True iff server errored with merkledb.ErrInsufficientHistory. + RootNotPresent bool `protobuf:"varint,2,opt,name=root_not_present,json=rootNotPresent,proto3,oneof"` +} + +func (*GetChangeProofResponse_ChangeProof) isGetChangeProofResponse_Response() {} + +func (*GetChangeProofResponse_RootNotPresent) isGetChangeProofResponse_Response() {} + +type VerifyChangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof *ChangeProof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` + StartKey *MaybeBytes `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + ExpectedRootHash []byte `protobuf:"bytes,4,opt,name=expected_root_hash,json=expectedRootHash,proto3" json:"expected_root_hash,omitempty"` +} + +func (x *VerifyChangeProofRequest) Reset() { + *x = VerifyChangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyChangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyChangeProofRequest) ProtoMessage() {} + +func (x *VerifyChangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyChangeProofRequest.ProtoReflect.Descriptor instead. +func (*VerifyChangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{9} +} + +func (x *VerifyChangeProofRequest) GetProof() *ChangeProof { + if x != nil { + return x.Proof + } + return nil +} + +func (x *VerifyChangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *VerifyChangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *VerifyChangeProofRequest) GetExpectedRootHash() []byte { + if x != nil { + return x.ExpectedRootHash + } + return nil +} + +type VerifyChangeProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If empty, there was no error. + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *VerifyChangeProofResponse) Reset() { + *x = VerifyChangeProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerifyChangeProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyChangeProofResponse) ProtoMessage() {} + +func (x *VerifyChangeProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyChangeProofResponse.ProtoReflect.Descriptor instead. +func (*VerifyChangeProofResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{10} +} + +func (x *VerifyChangeProofResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type CommitChangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof *ChangeProof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (x *CommitChangeProofRequest) Reset() { + *x = CommitChangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommitChangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommitChangeProofRequest) ProtoMessage() {} + +func (x *CommitChangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommitChangeProofRequest.ProtoReflect.Descriptor instead. +func (*CommitChangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{11} +} + +func (x *CommitChangeProofRequest) GetProof() *ChangeProof { + if x != nil { + return x.Proof + } + return nil +} + +// For use in sync client, which has a restriction on the size of +// the response. GetRangeProof in the DB service doesn't. +type SyncGetRangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + StartKey *MaybeBytes `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + KeyLimit uint32 `protobuf:"varint,4,opt,name=key_limit,json=keyLimit,proto3" json:"key_limit,omitempty"` + BytesLimit uint32 `protobuf:"varint,5,opt,name=bytes_limit,json=bytesLimit,proto3" json:"bytes_limit,omitempty"` +} + +func (x *SyncGetRangeProofRequest) Reset() { + *x = SyncGetRangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SyncGetRangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncGetRangeProofRequest) ProtoMessage() {} + +func (x *SyncGetRangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncGetRangeProofRequest.ProtoReflect.Descriptor instead. +func (*SyncGetRangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{12} +} + +func (x *SyncGetRangeProofRequest) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *SyncGetRangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *SyncGetRangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *SyncGetRangeProofRequest) GetKeyLimit() uint32 { + if x != nil { + return x.KeyLimit + } + return 0 +} + +func (x *SyncGetRangeProofRequest) GetBytesLimit() uint32 { + if x != nil { + return x.BytesLimit + } + return 0 +} + +type GetRangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + StartKey *MaybeBytes `protobuf:"bytes,2,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,3,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + KeyLimit uint32 `protobuf:"varint,4,opt,name=key_limit,json=keyLimit,proto3" json:"key_limit,omitempty"` +} + +func (x *GetRangeProofRequest) Reset() { + *x = GetRangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeProofRequest) ProtoMessage() {} + +func (x *GetRangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeProofRequest.ProtoReflect.Descriptor instead. +func (*GetRangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{13} +} + +func (x *GetRangeProofRequest) GetRootHash() []byte { + if x != nil { + return x.RootHash + } + return nil +} + +func (x *GetRangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *GetRangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *GetRangeProofRequest) GetKeyLimit() uint32 { + if x != nil { + return x.KeyLimit + } + return 0 +} + +type GetRangeProofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Proof *RangeProof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (x *GetRangeProofResponse) Reset() { + *x = GetRangeProofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeProofResponse) ProtoMessage() {} + +func (x *GetRangeProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeProofResponse.ProtoReflect.Descriptor instead. +func (*GetRangeProofResponse) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{14} +} + +func (x *GetRangeProofResponse) GetProof() *RangeProof { + if x != nil { + return x.Proof + } + return nil +} + +type CommitRangeProofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartKey *MaybeBytes `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey *MaybeBytes `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + RangeProof *RangeProof `protobuf:"bytes,3,opt,name=range_proof,json=rangeProof,proto3" json:"range_proof,omitempty"` +} + +func (x *CommitRangeProofRequest) Reset() { + *x = CommitRangeProofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommitRangeProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommitRangeProofRequest) ProtoMessage() {} + +func (x *CommitRangeProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommitRangeProofRequest.ProtoReflect.Descriptor instead. +func (*CommitRangeProofRequest) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{15} +} + +func (x *CommitRangeProofRequest) GetStartKey() *MaybeBytes { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *CommitRangeProofRequest) GetEndKey() *MaybeBytes { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *CommitRangeProofRequest) GetRangeProof() *RangeProof { + if x != nil { + return x.RangeProof + } + return nil +} + +type ChangeProof struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartProof []*ProofNode `protobuf:"bytes,1,rep,name=start_proof,json=startProof,proto3" json:"start_proof,omitempty"` + EndProof []*ProofNode `protobuf:"bytes,2,rep,name=end_proof,json=endProof,proto3" json:"end_proof,omitempty"` + KeyChanges []*KeyChange `protobuf:"bytes,3,rep,name=key_changes,json=keyChanges,proto3" json:"key_changes,omitempty"` +} + +func (x *ChangeProof) Reset() { + *x = ChangeProof{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangeProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangeProof) ProtoMessage() {} + +func (x *ChangeProof) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangeProof.ProtoReflect.Descriptor instead. +func (*ChangeProof) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{16} +} + +func (x *ChangeProof) GetStartProof() []*ProofNode { + if x != nil { + return x.StartProof + } + return nil +} + +func (x *ChangeProof) GetEndProof() []*ProofNode { + if x != nil { + return x.EndProof + } + return nil +} + +func (x *ChangeProof) GetKeyChanges() []*KeyChange { + if x != nil { + return x.KeyChanges + } + return nil +} + +type RangeProof struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartProof []*ProofNode `protobuf:"bytes,1,rep,name=start_proof,json=startProof,proto3" json:"start_proof,omitempty"` + EndProof []*ProofNode `protobuf:"bytes,2,rep,name=end_proof,json=endProof,proto3" json:"end_proof,omitempty"` + KeyValues []*KeyValue `protobuf:"bytes,3,rep,name=key_values,json=keyValues,proto3" json:"key_values,omitempty"` +} + +func (x *RangeProof) Reset() { + *x = RangeProof{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RangeProof) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RangeProof) ProtoMessage() {} + +func (x *RangeProof) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RangeProof.ProtoReflect.Descriptor instead. +func (*RangeProof) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{17} +} + +func (x *RangeProof) GetStartProof() []*ProofNode { + if x != nil { + return x.StartProof + } + return nil +} + +func (x *RangeProof) GetEndProof() []*ProofNode { + if x != nil { + return x.EndProof + } + return nil +} + +func (x *RangeProof) GetKeyValues() []*KeyValue { + if x != nil { + return x.KeyValues + } + return nil +} + +type ProofNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Key `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + ValueOrHash *MaybeBytes `protobuf:"bytes,2,opt,name=value_or_hash,json=valueOrHash,proto3" json:"value_or_hash,omitempty"` + Children map[uint32][]byte `protobuf:"bytes,3,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ProofNode) Reset() { + *x = ProofNode{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProofNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProofNode) ProtoMessage() {} + +func (x *ProofNode) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProofNode.ProtoReflect.Descriptor instead. +func (*ProofNode) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{18} +} + +func (x *ProofNode) GetKey() *Key { + if x != nil { + return x.Key + } + return nil +} + +func (x *ProofNode) GetValueOrHash() *MaybeBytes { + if x != nil { + return x.ValueOrHash + } + return nil +} + +func (x *ProofNode) GetChildren() map[uint32][]byte { + if x != nil { + return x.Children + } + return nil +} + +type KeyChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *MaybeBytes `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyChange) Reset() { + *x = KeyChange{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyChange) ProtoMessage() {} + +func (x *KeyChange) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyChange.ProtoReflect.Descriptor instead. +func (*KeyChange) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{19} +} + +func (x *KeyChange) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyChange) GetValue() *MaybeBytes { + if x != nil { + return x.Value + } + return nil +} + +type Key struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Length uint64 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Key) Reset() { + *x = Key{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Key) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Key) ProtoMessage() {} + +func (x *Key) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Key.ProtoReflect.Descriptor instead. +func (*Key) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{20} +} + +func (x *Key) GetLength() uint64 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Key) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type MaybeBytes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false, this is None. + // Otherwise this is Some. + IsNothing bool `protobuf:"varint,2,opt,name=is_nothing,json=isNothing,proto3" json:"is_nothing,omitempty"` +} + +func (x *MaybeBytes) Reset() { + *x = MaybeBytes{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MaybeBytes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MaybeBytes) ProtoMessage() {} + +func (x *MaybeBytes) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MaybeBytes.ProtoReflect.Descriptor instead. +func (*MaybeBytes) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{21} +} + +func (x *MaybeBytes) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *MaybeBytes) GetIsNothing() bool { + if x != nil { + return x.IsNothing + } + return false +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_sync_sync_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_sync_sync_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_sync_sync_proto_rawDescGZIP(), []int{22} +} + +func (x *KeyValue) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_sync_sync_proto protoreflect.FileDescriptor + +var file_sync_sync_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x50, 0x0a, 0x13, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, + 0x11, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x53, 0x0a, 0x14, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, + 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x34, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, + 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x35, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x05, 0x70, + 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xff, + 0x01, 0x0a, 0x19, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x6f, 0x6f, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x65, 0x6e, 0x64, + 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, + 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, + 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x95, 0x01, 0x0a, 0x1a, 0x53, 0x79, 0x6e, 0x63, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x33, 0x0a, 0x0b, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, + 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x0a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x6e, + 0x64, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, + 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x36, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x6f, 0x6f, 0x74, 0x4e, 0x6f, 0x74, 0x50, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xcb, 0x01, 0x0a, 0x18, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, + 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, + 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, + 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6f, + 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x65, 0x78, + 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x31, + 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x43, 0x0a, 0x18, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x53, 0x79, 0x6e, 0x63, 0x47, + 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, + 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, + 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, + 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x3f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, + 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, + 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xa6, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, + 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x0b, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, + 0x6f, 0x6f, 0x66, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, + 0x9f, 0x01, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x30, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x30, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x12, 0x2c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, + 0x12, 0x2d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, + 0xd6, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x0d, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x72, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x39, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4e, + 0x6f, 0x64, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x3b, 0x0a, 0x0d, 0x43, + 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x45, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x4d, 0x61, + 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x33, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x41, 0x0a, 0x0a, 0x4d, 0x61, 0x79, 0x62, 0x65, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x6e, + 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, + 0x4e, 0x6f, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xc3, 0x04, 0x0a, 0x02, + 0x44, 0x42, 0x12, 0x44, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, 0x65, 0x61, + 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x39, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x15, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1b, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4b, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0d, + 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1a, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x79, 0x6e, 0x63, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x79, + 0x6e, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sync_sync_proto_rawDescOnce sync.Once + file_sync_sync_proto_rawDescData = file_sync_sync_proto_rawDesc +) + +func file_sync_sync_proto_rawDescGZIP() []byte { + file_sync_sync_proto_rawDescOnce.Do(func() { + file_sync_sync_proto_rawDescData = protoimpl.X.CompressGZIP(file_sync_sync_proto_rawDescData) + }) + return file_sync_sync_proto_rawDescData +} + +var file_sync_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_sync_sync_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: sync.Request + (*GetMerkleRootResponse)(nil), // 1: sync.GetMerkleRootResponse + (*GetProofRequest)(nil), // 2: sync.GetProofRequest + (*GetProofResponse)(nil), // 3: sync.GetProofResponse + (*Proof)(nil), // 4: sync.Proof + (*SyncGetChangeProofRequest)(nil), // 5: sync.SyncGetChangeProofRequest + (*SyncGetChangeProofResponse)(nil), // 6: sync.SyncGetChangeProofResponse + (*GetChangeProofRequest)(nil), // 7: sync.GetChangeProofRequest + (*GetChangeProofResponse)(nil), // 8: sync.GetChangeProofResponse + (*VerifyChangeProofRequest)(nil), // 9: sync.VerifyChangeProofRequest + (*VerifyChangeProofResponse)(nil), // 10: sync.VerifyChangeProofResponse + (*CommitChangeProofRequest)(nil), // 11: sync.CommitChangeProofRequest + (*SyncGetRangeProofRequest)(nil), // 12: sync.SyncGetRangeProofRequest + (*GetRangeProofRequest)(nil), // 13: sync.GetRangeProofRequest + (*GetRangeProofResponse)(nil), // 14: sync.GetRangeProofResponse + (*CommitRangeProofRequest)(nil), // 15: sync.CommitRangeProofRequest + (*ChangeProof)(nil), // 16: sync.ChangeProof + (*RangeProof)(nil), // 17: sync.RangeProof + (*ProofNode)(nil), // 18: sync.ProofNode + (*KeyChange)(nil), // 19: sync.KeyChange + (*Key)(nil), // 20: sync.Key + (*MaybeBytes)(nil), // 21: sync.MaybeBytes + (*KeyValue)(nil), // 22: sync.KeyValue + nil, // 23: sync.ProofNode.ChildrenEntry + (*emptypb.Empty)(nil), // 24: google.protobuf.Empty +} +var file_sync_sync_proto_depIdxs = []int32{ + 12, // 0: sync.Request.range_proof_request:type_name -> sync.SyncGetRangeProofRequest + 5, // 1: sync.Request.change_proof_request:type_name -> sync.SyncGetChangeProofRequest + 4, // 2: sync.GetProofResponse.proof:type_name -> sync.Proof + 21, // 3: sync.Proof.value:type_name -> sync.MaybeBytes + 18, // 4: sync.Proof.proof:type_name -> sync.ProofNode + 21, // 5: sync.SyncGetChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 6: sync.SyncGetChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 16, // 7: sync.SyncGetChangeProofResponse.change_proof:type_name -> sync.ChangeProof + 17, // 8: sync.SyncGetChangeProofResponse.range_proof:type_name -> sync.RangeProof + 21, // 9: sync.GetChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 10: sync.GetChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 16, // 11: sync.GetChangeProofResponse.change_proof:type_name -> sync.ChangeProof + 16, // 12: sync.VerifyChangeProofRequest.proof:type_name -> sync.ChangeProof + 21, // 13: sync.VerifyChangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 14: sync.VerifyChangeProofRequest.end_key:type_name -> sync.MaybeBytes + 16, // 15: sync.CommitChangeProofRequest.proof:type_name -> sync.ChangeProof + 21, // 16: sync.SyncGetRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 17: sync.SyncGetRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 21, // 18: sync.GetRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 19: sync.GetRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 17, // 20: sync.GetRangeProofResponse.proof:type_name -> sync.RangeProof + 21, // 21: sync.CommitRangeProofRequest.start_key:type_name -> sync.MaybeBytes + 21, // 22: sync.CommitRangeProofRequest.end_key:type_name -> sync.MaybeBytes + 17, // 23: sync.CommitRangeProofRequest.range_proof:type_name -> sync.RangeProof + 18, // 24: sync.ChangeProof.start_proof:type_name -> sync.ProofNode + 18, // 25: sync.ChangeProof.end_proof:type_name -> sync.ProofNode + 19, // 26: sync.ChangeProof.key_changes:type_name -> sync.KeyChange + 18, // 27: sync.RangeProof.start_proof:type_name -> sync.ProofNode + 18, // 28: sync.RangeProof.end_proof:type_name -> sync.ProofNode + 22, // 29: sync.RangeProof.key_values:type_name -> sync.KeyValue + 20, // 30: sync.ProofNode.key:type_name -> sync.Key + 21, // 31: sync.ProofNode.value_or_hash:type_name -> sync.MaybeBytes + 23, // 32: sync.ProofNode.children:type_name -> sync.ProofNode.ChildrenEntry + 21, // 33: sync.KeyChange.value:type_name -> sync.MaybeBytes + 24, // 34: sync.DB.GetMerkleRoot:input_type -> google.protobuf.Empty + 24, // 35: sync.DB.Clear:input_type -> google.protobuf.Empty + 2, // 36: sync.DB.GetProof:input_type -> sync.GetProofRequest + 7, // 37: sync.DB.GetChangeProof:input_type -> sync.GetChangeProofRequest + 9, // 38: sync.DB.VerifyChangeProof:input_type -> sync.VerifyChangeProofRequest + 11, // 39: sync.DB.CommitChangeProof:input_type -> sync.CommitChangeProofRequest + 13, // 40: sync.DB.GetRangeProof:input_type -> sync.GetRangeProofRequest + 15, // 41: sync.DB.CommitRangeProof:input_type -> sync.CommitRangeProofRequest + 1, // 42: sync.DB.GetMerkleRoot:output_type -> sync.GetMerkleRootResponse + 24, // 43: sync.DB.Clear:output_type -> google.protobuf.Empty + 3, // 44: sync.DB.GetProof:output_type -> sync.GetProofResponse + 8, // 45: sync.DB.GetChangeProof:output_type -> sync.GetChangeProofResponse + 10, // 46: sync.DB.VerifyChangeProof:output_type -> sync.VerifyChangeProofResponse + 24, // 47: sync.DB.CommitChangeProof:output_type -> google.protobuf.Empty + 14, // 48: sync.DB.GetRangeProof:output_type -> sync.GetRangeProofResponse + 24, // 49: sync.DB.CommitRangeProof:output_type -> google.protobuf.Empty + 42, // [42:50] is the sub-list for method output_type + 34, // [34:42] is the sub-list for method input_type + 34, // [34:34] is the sub-list for extension type_name + 34, // [34:34] is the sub-list for extension extendee + 0, // [0:34] is the sub-list for field type_name +} + +func init() { file_sync_sync_proto_init() } +func file_sync_sync_proto_init() { + if File_sync_sync_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sync_sync_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMerkleRootResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Proof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncGetChangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncGetChangeProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetChangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetChangeProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyChangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerifyChangeProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommitChangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SyncGetRangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeProofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommitRangeProofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeProof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RangeProof); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProofNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Key); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaybeBytes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sync_sync_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sync_sync_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Request_RangeProofRequest)(nil), + (*Request_ChangeProofRequest)(nil), + } + file_sync_sync_proto_msgTypes[6].OneofWrappers = []interface{}{ + (*SyncGetChangeProofResponse_ChangeProof)(nil), + (*SyncGetChangeProofResponse_RangeProof)(nil), + } + file_sync_sync_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*GetChangeProofResponse_ChangeProof)(nil), + (*GetChangeProofResponse_RootNotPresent)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sync_sync_proto_rawDesc, + NumEnums: 0, + NumMessages: 24, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sync_sync_proto_goTypes, + DependencyIndexes: file_sync_sync_proto_depIdxs, + MessageInfos: file_sync_sync_proto_msgTypes, + }.Build() + File_sync_sync_proto = out.File + file_sync_sync_proto_rawDesc = nil + file_sync_sync_proto_goTypes = nil + file_sync_sync_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/sync/sync_grpc.pb.go b/avalanchego/proto/pb/sync/sync_grpc.pb.go new file mode 100644 index 00000000..5f79687b --- /dev/null +++ b/avalanchego/proto/pb/sync/sync_grpc.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sync/sync.proto + +package sync + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + DB_GetMerkleRoot_FullMethodName = "/sync.DB/GetMerkleRoot" + DB_Clear_FullMethodName = "/sync.DB/Clear" + DB_GetProof_FullMethodName = "/sync.DB/GetProof" + DB_GetChangeProof_FullMethodName = "/sync.DB/GetChangeProof" + DB_VerifyChangeProof_FullMethodName = "/sync.DB/VerifyChangeProof" + DB_CommitChangeProof_FullMethodName = "/sync.DB/CommitChangeProof" + DB_GetRangeProof_FullMethodName = "/sync.DB/GetRangeProof" + DB_CommitRangeProof_FullMethodName = "/sync.DB/CommitRangeProof" +) + +// DBClient is the client API for DB service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DBClient interface { + GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMerkleRootResponse, error) + Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) + GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) + GetChangeProof(ctx context.Context, in *GetChangeProofRequest, opts ...grpc.CallOption) (*GetChangeProofResponse, error) + VerifyChangeProof(ctx context.Context, in *VerifyChangeProofRequest, opts ...grpc.CallOption) (*VerifyChangeProofResponse, error) + CommitChangeProof(ctx context.Context, in *CommitChangeProofRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + GetRangeProof(ctx context.Context, in *GetRangeProofRequest, opts ...grpc.CallOption) (*GetRangeProofResponse, error) + CommitRangeProof(ctx context.Context, in *CommitRangeProofRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type dBClient struct { + cc grpc.ClientConnInterface +} + +func NewDBClient(cc grpc.ClientConnInterface) DBClient { + return &dBClient{cc} +} + +func (c *dBClient) GetMerkleRoot(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMerkleRootResponse, error) { + out := new(GetMerkleRootResponse) + err := c.cc.Invoke(ctx, DB_GetMerkleRoot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) Clear(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, DB_Clear_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetProof(ctx context.Context, in *GetProofRequest, opts ...grpc.CallOption) (*GetProofResponse, error) { + out := new(GetProofResponse) + err := c.cc.Invoke(ctx, DB_GetProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetChangeProof(ctx context.Context, in *GetChangeProofRequest, opts ...grpc.CallOption) (*GetChangeProofResponse, error) { + out := new(GetChangeProofResponse) + err := c.cc.Invoke(ctx, DB_GetChangeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) VerifyChangeProof(ctx context.Context, in *VerifyChangeProofRequest, opts ...grpc.CallOption) (*VerifyChangeProofResponse, error) { + out := new(VerifyChangeProofResponse) + err := c.cc.Invoke(ctx, DB_VerifyChangeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) CommitChangeProof(ctx context.Context, in *CommitChangeProofRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, DB_CommitChangeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) GetRangeProof(ctx context.Context, in *GetRangeProofRequest, opts ...grpc.CallOption) (*GetRangeProofResponse, error) { + out := new(GetRangeProofResponse) + err := c.cc.Invoke(ctx, DB_GetRangeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dBClient) CommitRangeProof(ctx context.Context, in *CommitRangeProofRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, DB_CommitRangeProof_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DBServer is the server API for DB service. +// All implementations must embed UnimplementedDBServer +// for forward compatibility +type DBServer interface { + GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) + Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) + GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) + GetChangeProof(context.Context, *GetChangeProofRequest) (*GetChangeProofResponse, error) + VerifyChangeProof(context.Context, *VerifyChangeProofRequest) (*VerifyChangeProofResponse, error) + CommitChangeProof(context.Context, *CommitChangeProofRequest) (*emptypb.Empty, error) + GetRangeProof(context.Context, *GetRangeProofRequest) (*GetRangeProofResponse, error) + CommitRangeProof(context.Context, *CommitRangeProofRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedDBServer() +} + +// UnimplementedDBServer must be embedded to have forward compatible implementations. +type UnimplementedDBServer struct { +} + +func (UnimplementedDBServer) GetMerkleRoot(context.Context, *emptypb.Empty) (*GetMerkleRootResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMerkleRoot not implemented") +} +func (UnimplementedDBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Clear not implemented") +} +func (UnimplementedDBServer) GetProof(context.Context, *GetProofRequest) (*GetProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetProof not implemented") +} +func (UnimplementedDBServer) GetChangeProof(context.Context, *GetChangeProofRequest) (*GetChangeProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetChangeProof not implemented") +} +func (UnimplementedDBServer) VerifyChangeProof(context.Context, *VerifyChangeProofRequest) (*VerifyChangeProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyChangeProof not implemented") +} +func (UnimplementedDBServer) CommitChangeProof(context.Context, *CommitChangeProofRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CommitChangeProof not implemented") +} +func (UnimplementedDBServer) GetRangeProof(context.Context, *GetRangeProofRequest) (*GetRangeProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRangeProof not implemented") +} +func (UnimplementedDBServer) CommitRangeProof(context.Context, *CommitRangeProofRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CommitRangeProof not implemented") +} +func (UnimplementedDBServer) mustEmbedUnimplementedDBServer() {} + +// UnsafeDBServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DBServer will +// result in compilation errors. +type UnsafeDBServer interface { + mustEmbedUnimplementedDBServer() +} + +func RegisterDBServer(s grpc.ServiceRegistrar, srv DBServer) { + s.RegisterService(&DB_ServiceDesc, srv) +} + +func _DB_GetMerkleRoot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).GetMerkleRoot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_GetMerkleRoot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).GetMerkleRoot(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_Clear_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).Clear(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_Clear_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).Clear(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).GetProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_GetProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).GetProof(ctx, req.(*GetProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetChangeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetChangeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).GetChangeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_GetChangeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).GetChangeProof(ctx, req.(*GetChangeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_VerifyChangeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyChangeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).VerifyChangeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_VerifyChangeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).VerifyChangeProof(ctx, req.(*VerifyChangeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_CommitChangeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitChangeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).CommitChangeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_CommitChangeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).CommitChangeProof(ctx, req.(*CommitChangeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_GetRangeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRangeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).GetRangeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_GetRangeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).GetRangeProof(ctx, req.(*GetRangeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DB_CommitRangeProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitRangeProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DBServer).CommitRangeProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DB_CommitRangeProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DBServer).CommitRangeProof(ctx, req.(*CommitRangeProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DB_ServiceDesc is the grpc.ServiceDesc for DB service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DB_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "sync.DB", + HandlerType: (*DBServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMerkleRoot", + Handler: _DB_GetMerkleRoot_Handler, + }, + { + MethodName: "Clear", + Handler: _DB_Clear_Handler, + }, + { + MethodName: "GetProof", + Handler: _DB_GetProof_Handler, + }, + { + MethodName: "GetChangeProof", + Handler: _DB_GetChangeProof_Handler, + }, + { + MethodName: "VerifyChangeProof", + Handler: _DB_VerifyChangeProof_Handler, + }, + { + MethodName: "CommitChangeProof", + Handler: _DB_CommitChangeProof_Handler, + }, + { + MethodName: "GetRangeProof", + Handler: _DB_GetRangeProof_Handler, + }, + { + MethodName: "CommitRangeProof", + Handler: _DB_CommitRangeProof_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sync/sync.proto", +} diff --git a/avalanchego/proto/pb/validatorstate/validator_state.pb.go b/avalanchego/proto/pb/validatorstate/validator_state.pb.go index 7f89e25a..84ddc0ab 100644 --- a/avalanchego/proto/pb/validatorstate/validator_state.pb.go +++ b/avalanchego/proto/pb/validatorstate/validator_state.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: validatorstate/validator_state.proto diff --git a/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go b/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go index 1b328a7f..8dc2137a 100644 --- a/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go +++ b/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: validatorstate/validator_state.proto @@ -19,6 +19,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ValidatorState_GetMinimumHeight_FullMethodName = "/validatorstate.ValidatorState/GetMinimumHeight" + ValidatorState_GetCurrentHeight_FullMethodName = "/validatorstate.ValidatorState/GetCurrentHeight" + ValidatorState_GetSubnetID_FullMethodName = "/validatorstate.ValidatorState/GetSubnetID" + ValidatorState_GetValidatorSet_FullMethodName = "/validatorstate.ValidatorState/GetValidatorSet" +) + // ValidatorStateClient is the client API for ValidatorState service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -45,7 +52,7 @@ func NewValidatorStateClient(cc grpc.ClientConnInterface) ValidatorStateClient { func (c *validatorStateClient) GetMinimumHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMinimumHeightResponse, error) { out := new(GetMinimumHeightResponse) - err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetMinimumHeight", in, out, opts...) + err := c.cc.Invoke(ctx, ValidatorState_GetMinimumHeight_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -54,7 +61,7 @@ func (c *validatorStateClient) GetMinimumHeight(ctx context.Context, in *emptypb func (c *validatorStateClient) GetCurrentHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetCurrentHeightResponse, error) { out := new(GetCurrentHeightResponse) - err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetCurrentHeight", in, out, opts...) + err := c.cc.Invoke(ctx, ValidatorState_GetCurrentHeight_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -63,7 +70,7 @@ func (c *validatorStateClient) GetCurrentHeight(ctx context.Context, in *emptypb func (c *validatorStateClient) GetSubnetID(ctx context.Context, in *GetSubnetIDRequest, opts ...grpc.CallOption) (*GetSubnetIDResponse, error) { out := new(GetSubnetIDResponse) - err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetSubnetID", in, out, opts...) + err := c.cc.Invoke(ctx, ValidatorState_GetSubnetID_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -72,7 +79,7 @@ func (c *validatorStateClient) GetSubnetID(ctx context.Context, in *GetSubnetIDR func (c *validatorStateClient) GetValidatorSet(ctx context.Context, in *GetValidatorSetRequest, opts ...grpc.CallOption) (*GetValidatorSetResponse, error) { out := new(GetValidatorSetResponse) - err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetValidatorSet", in, out, opts...) + err := c.cc.Invoke(ctx, ValidatorState_GetValidatorSet_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -135,7 +142,7 @@ func _ValidatorState_GetMinimumHeight_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/validatorstate.ValidatorState/GetMinimumHeight", + FullMethod: ValidatorState_GetMinimumHeight_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ValidatorStateServer).GetMinimumHeight(ctx, req.(*emptypb.Empty)) @@ -153,7 +160,7 @@ func _ValidatorState_GetCurrentHeight_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/validatorstate.ValidatorState/GetCurrentHeight", + FullMethod: ValidatorState_GetCurrentHeight_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ValidatorStateServer).GetCurrentHeight(ctx, req.(*emptypb.Empty)) @@ -171,7 +178,7 @@ func _ValidatorState_GetSubnetID_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/validatorstate.ValidatorState/GetSubnetID", + FullMethod: ValidatorState_GetSubnetID_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ValidatorStateServer).GetSubnetID(ctx, req.(*GetSubnetIDRequest)) @@ -189,7 +196,7 @@ func _ValidatorState_GetValidatorSet_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/validatorstate.ValidatorState/GetValidatorSet", + FullMethod: ValidatorState_GetValidatorSet_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ValidatorStateServer).GetValidatorSet(ctx, req.(*GetValidatorSetRequest)) diff --git a/avalanchego/proto/pb/vm/runtime/runtime.pb.go b/avalanchego/proto/pb/vm/runtime/runtime.pb.go index c8a90fc7..2a5f15db 100644 --- a/avalanchego/proto/pb/vm/runtime/runtime.pb.go +++ b/avalanchego/proto/pb/vm/runtime/runtime.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: vm/runtime/runtime.proto diff --git a/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go b/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go index be32d678..4fd1b2d6 100644 --- a/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go +++ b/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: vm/runtime/runtime.proto @@ -19,6 +19,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Runtime_Initialize_FullMethodName = "/vm.runtime.Runtime/Initialize" +) + // RuntimeClient is the client API for Runtime service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -37,7 +41,7 @@ func NewRuntimeClient(cc grpc.ClientConnInterface) RuntimeClient { func (c *runtimeClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.runtime.Runtime/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Runtime_Initialize_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -83,7 +87,7 @@ func _Runtime_Initialize_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.runtime.Runtime/Initialize", + FullMethod: Runtime_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RuntimeServer).Initialize(ctx, req.(*InitializeRequest)) diff --git a/avalanchego/proto/pb/vm/vm.pb.go b/avalanchego/proto/pb/vm/vm.pb.go index bf9ae4d0..7f38e5bb 100644 --- a/avalanchego/proto/pb/vm/vm.pb.go +++ b/avalanchego/proto/pb/vm/vm.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: vm/vm.proto @@ -131,12 +131,11 @@ type Error int32 const ( // ERROR_UNSPECIFIED is used to indicate that no error occurred. - Error_ERROR_UNSPECIFIED Error = 0 - Error_ERROR_CLOSED Error = 1 - Error_ERROR_NOT_FOUND Error = 2 - Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED Error = 3 - Error_ERROR_HEIGHT_INDEX_INCOMPLETE Error = 4 - Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED Error = 5 + Error_ERROR_UNSPECIFIED Error = 0 + Error_ERROR_CLOSED Error = 1 + Error_ERROR_NOT_FOUND Error = 2 + Error_ERROR_HEIGHT_INDEX_INCOMPLETE Error = 3 + Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED Error = 4 ) // Enum value maps for Error. @@ -145,17 +144,15 @@ var ( 0: "ERROR_UNSPECIFIED", 1: "ERROR_CLOSED", 2: "ERROR_NOT_FOUND", - 3: "ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED", - 4: "ERROR_HEIGHT_INDEX_INCOMPLETE", - 5: "ERROR_STATE_SYNC_NOT_IMPLEMENTED", + 3: "ERROR_HEIGHT_INDEX_INCOMPLETE", + 4: "ERROR_STATE_SYNC_NOT_IMPLEMENTED", } Error_value = map[string]int32{ - "ERROR_UNSPECIFIED": 0, - "ERROR_CLOSED": 1, - "ERROR_NOT_FOUND": 2, - "ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED": 3, - "ERROR_HEIGHT_INDEX_INCOMPLETE": 4, - "ERROR_STATE_SYNC_NOT_IMPLEMENTED": 5, + "ERROR_UNSPECIFIED": 0, + "ERROR_CLOSED": 1, + "ERROR_NOT_FOUND": 2, + "ERROR_HEIGHT_INDEX_INCOMPLETE": 3, + "ERROR_STATE_SYNC_NOT_IMPLEMENTED": 4, } ) @@ -235,7 +232,7 @@ func (x StateSummaryAcceptResponse_Mode) Number() protoreflect.EnumNumber { // Deprecated: Use StateSummaryAcceptResponse_Mode.Descriptor instead. func (StateSummaryAcceptResponse_Mode) EnumDescriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{46, 0} + return file_vm_vm_proto_rawDescGZIP(), []int{44, 0} } type InitializeRequest struct { @@ -249,15 +246,15 @@ type InitializeRequest struct { NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // public_key is the BLS public key that would correspond with any signatures // produced by the warp messaging signer - PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` - CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` - AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` - ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` - GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` - UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` - ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` - DbServers []*VersionedDBServer `protobuf:"bytes,13,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` + PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` + CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` + AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` + ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` + GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` + UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` + ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` + DbServerAddr string `protobuf:"bytes,13,opt,name=db_server_addr,json=dbServerAddr,proto3" json:"db_server_addr,omitempty"` // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services @@ -380,11 +377,11 @@ func (x *InitializeRequest) GetConfigBytes() []byte { return nil } -func (x *InitializeRequest) GetDbServers() []*VersionedDBServer { +func (x *InitializeRequest) GetDbServerAddr() string { if x != nil { - return x.DbServers + return x.DbServerAddr } - return nil + return "" } func (x *InitializeRequest) GetServerAddr() string { @@ -473,63 +470,6 @@ func (x *InitializeResponse) GetTimestamp() *timestamppb.Timestamp { return nil } -type VersionedDBServer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // server_addr is the address of the gRPC server which serves the - // Database service - ServerAddr string `protobuf:"bytes,2,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` -} - -func (x *VersionedDBServer) Reset() { - *x = VersionedDBServer{} - if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VersionedDBServer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VersionedDBServer) ProtoMessage() {} - -func (x *VersionedDBServer) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VersionedDBServer.ProtoReflect.Descriptor instead. -func (*VersionedDBServer) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{2} -} - -func (x *VersionedDBServer) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *VersionedDBServer) GetServerAddr() string { - if x != nil { - return x.ServerAddr - } - return "" -} - type SetStateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -541,7 +481,7 @@ type SetStateRequest struct { func (x *SetStateRequest) Reset() { *x = SetStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[3] + mi := &file_vm_vm_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -554,7 +494,7 @@ func (x *SetStateRequest) String() string { func (*SetStateRequest) ProtoMessage() {} func (x *SetStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[3] + mi := &file_vm_vm_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -567,7 +507,7 @@ func (x *SetStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetStateRequest.ProtoReflect.Descriptor instead. func (*SetStateRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{3} + return file_vm_vm_proto_rawDescGZIP(), []int{2} } func (x *SetStateRequest) GetState() State { @@ -592,7 +532,7 @@ type SetStateResponse struct { func (x *SetStateResponse) Reset() { *x = SetStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[4] + mi := &file_vm_vm_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -605,7 +545,7 @@ func (x *SetStateResponse) String() string { func (*SetStateResponse) ProtoMessage() {} func (x *SetStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[4] + mi := &file_vm_vm_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -618,7 +558,7 @@ func (x *SetStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetStateResponse.ProtoReflect.Descriptor instead. func (*SetStateResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{4} + return file_vm_vm_proto_rawDescGZIP(), []int{3} } func (x *SetStateResponse) GetLastAcceptedId() []byte { @@ -667,7 +607,7 @@ type CreateHandlersResponse struct { func (x *CreateHandlersResponse) Reset() { *x = CreateHandlersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[5] + mi := &file_vm_vm_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -680,7 +620,7 @@ func (x *CreateHandlersResponse) String() string { func (*CreateHandlersResponse) ProtoMessage() {} func (x *CreateHandlersResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[5] + mi := &file_vm_vm_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -693,7 +633,7 @@ func (x *CreateHandlersResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHandlersResponse.ProtoReflect.Descriptor instead. func (*CreateHandlersResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{5} + return file_vm_vm_proto_rawDescGZIP(), []int{4} } func (x *CreateHandlersResponse) GetHandlers() []*Handler { @@ -703,69 +643,21 @@ func (x *CreateHandlersResponse) GetHandlers() []*Handler { return nil } -type CreateStaticHandlersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Handlers []*Handler `protobuf:"bytes,1,rep,name=handlers,proto3" json:"handlers,omitempty"` -} - -func (x *CreateStaticHandlersResponse) Reset() { - *x = CreateStaticHandlersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateStaticHandlersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateStaticHandlersResponse) ProtoMessage() {} - -func (x *CreateStaticHandlersResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateStaticHandlersResponse.ProtoReflect.Descriptor instead. -func (*CreateStaticHandlersResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{6} -} - -func (x *CreateStaticHandlersResponse) GetHandlers() []*Handler { - if x != nil { - return x.Handlers - } - return nil -} - type Handler struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - LockOptions uint32 `protobuf:"varint,2,opt,name=lock_options,json=lockOptions,proto3" json:"lock_options,omitempty"` + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` // server_addr is the address of the gRPC server which serves the // HTTP service - ServerAddr string `protobuf:"bytes,3,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` + ServerAddr string `protobuf:"bytes,2,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` } func (x *Handler) Reset() { *x = Handler{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -778,7 +670,7 @@ func (x *Handler) String() string { func (*Handler) ProtoMessage() {} func (x *Handler) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[7] + mi := &file_vm_vm_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -791,7 +683,7 @@ func (x *Handler) ProtoReflect() protoreflect.Message { // Deprecated: Use Handler.ProtoReflect.Descriptor instead. func (*Handler) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{7} + return file_vm_vm_proto_rawDescGZIP(), []int{5} } func (x *Handler) GetPrefix() string { @@ -801,13 +693,6 @@ func (x *Handler) GetPrefix() string { return "" } -func (x *Handler) GetLockOptions() uint32 { - if x != nil { - return x.LockOptions - } - return 0 -} - func (x *Handler) GetServerAddr() string { if x != nil { return x.ServerAddr @@ -826,7 +711,7 @@ type BuildBlockRequest struct { func (x *BuildBlockRequest) Reset() { *x = BuildBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -839,7 +724,7 @@ func (x *BuildBlockRequest) String() string { func (*BuildBlockRequest) ProtoMessage() {} func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -852,7 +737,7 @@ func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockRequest.ProtoReflect.Descriptor instead. func (*BuildBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{8} + return file_vm_vm_proto_rawDescGZIP(), []int{6} } func (x *BuildBlockRequest) GetPChainHeight() uint64 { @@ -879,7 +764,7 @@ type BuildBlockResponse struct { func (x *BuildBlockResponse) Reset() { *x = BuildBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -892,7 +777,7 @@ func (x *BuildBlockResponse) String() string { func (*BuildBlockResponse) ProtoMessage() {} func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -905,7 +790,7 @@ func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockResponse.ProtoReflect.Descriptor instead. func (*BuildBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{9} + return file_vm_vm_proto_rawDescGZIP(), []int{7} } func (x *BuildBlockResponse) GetId() []byte { @@ -961,7 +846,7 @@ type ParseBlockRequest struct { func (x *ParseBlockRequest) Reset() { *x = ParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -974,7 +859,7 @@ func (x *ParseBlockRequest) String() string { func (*ParseBlockRequest) ProtoMessage() {} func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -987,7 +872,7 @@ func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockRequest.ProtoReflect.Descriptor instead. func (*ParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{10} + return file_vm_vm_proto_rawDescGZIP(), []int{8} } func (x *ParseBlockRequest) GetBytes() []byte { @@ -1013,7 +898,7 @@ type ParseBlockResponse struct { func (x *ParseBlockResponse) Reset() { *x = ParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1026,7 +911,7 @@ func (x *ParseBlockResponse) String() string { func (*ParseBlockResponse) ProtoMessage() {} func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1039,7 +924,7 @@ func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockResponse.ProtoReflect.Descriptor instead. func (*ParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{11} + return file_vm_vm_proto_rawDescGZIP(), []int{9} } func (x *ParseBlockResponse) GetId() []byte { @@ -1095,7 +980,7 @@ type GetBlockRequest struct { func (x *GetBlockRequest) Reset() { *x = GetBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1108,7 +993,7 @@ func (x *GetBlockRequest) String() string { func (*GetBlockRequest) ProtoMessage() {} func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1121,7 +1006,7 @@ func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockRequest.ProtoReflect.Descriptor instead. func (*GetBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{12} + return file_vm_vm_proto_rawDescGZIP(), []int{10} } func (x *GetBlockRequest) GetId() []byte { @@ -1149,7 +1034,7 @@ type GetBlockResponse struct { func (x *GetBlockResponse) Reset() { *x = GetBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1162,7 +1047,7 @@ func (x *GetBlockResponse) String() string { func (*GetBlockResponse) ProtoMessage() {} func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1175,7 +1060,7 @@ func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockResponse.ProtoReflect.Descriptor instead. func (*GetBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{13} + return file_vm_vm_proto_rawDescGZIP(), []int{11} } func (x *GetBlockResponse) GetParentId() []byte { @@ -1238,7 +1123,7 @@ type SetPreferenceRequest struct { func (x *SetPreferenceRequest) Reset() { *x = SetPreferenceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1251,7 +1136,7 @@ func (x *SetPreferenceRequest) String() string { func (*SetPreferenceRequest) ProtoMessage() {} func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1264,7 +1149,7 @@ func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetPreferenceRequest.ProtoReflect.Descriptor instead. func (*SetPreferenceRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{14} + return file_vm_vm_proto_rawDescGZIP(), []int{12} } func (x *SetPreferenceRequest) GetId() []byte { @@ -1288,7 +1173,7 @@ type BlockVerifyRequest struct { func (x *BlockVerifyRequest) Reset() { *x = BlockVerifyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1301,7 +1186,7 @@ func (x *BlockVerifyRequest) String() string { func (*BlockVerifyRequest) ProtoMessage() {} func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1314,7 +1199,7 @@ func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyRequest.ProtoReflect.Descriptor instead. func (*BlockVerifyRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{15} + return file_vm_vm_proto_rawDescGZIP(), []int{13} } func (x *BlockVerifyRequest) GetBytes() []byte { @@ -1342,7 +1227,7 @@ type BlockVerifyResponse struct { func (x *BlockVerifyResponse) Reset() { *x = BlockVerifyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1355,7 +1240,7 @@ func (x *BlockVerifyResponse) String() string { func (*BlockVerifyResponse) ProtoMessage() {} func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1368,7 +1253,7 @@ func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyResponse.ProtoReflect.Descriptor instead. func (*BlockVerifyResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{16} + return file_vm_vm_proto_rawDescGZIP(), []int{14} } func (x *BlockVerifyResponse) GetTimestamp() *timestamppb.Timestamp { @@ -1389,7 +1274,7 @@ type BlockAcceptRequest struct { func (x *BlockAcceptRequest) Reset() { *x = BlockAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1402,7 +1287,7 @@ func (x *BlockAcceptRequest) String() string { func (*BlockAcceptRequest) ProtoMessage() {} func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1415,7 +1300,7 @@ func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockAcceptRequest.ProtoReflect.Descriptor instead. func (*BlockAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{17} + return file_vm_vm_proto_rawDescGZIP(), []int{15} } func (x *BlockAcceptRequest) GetId() []byte { @@ -1436,7 +1321,7 @@ type BlockRejectRequest struct { func (x *BlockRejectRequest) Reset() { *x = BlockRejectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1449,7 +1334,7 @@ func (x *BlockRejectRequest) String() string { func (*BlockRejectRequest) ProtoMessage() {} func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1462,7 +1347,7 @@ func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockRejectRequest.ProtoReflect.Descriptor instead. func (*BlockRejectRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{18} + return file_vm_vm_proto_rawDescGZIP(), []int{16} } func (x *BlockRejectRequest) GetId() []byte { @@ -1483,7 +1368,7 @@ type HealthResponse struct { func (x *HealthResponse) Reset() { *x = HealthResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1496,7 +1381,7 @@ func (x *HealthResponse) String() string { func (*HealthResponse) ProtoMessage() {} func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1509,7 +1394,7 @@ func (x *HealthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{19} + return file_vm_vm_proto_rawDescGZIP(), []int{17} } func (x *HealthResponse) GetDetails() []byte { @@ -1530,7 +1415,7 @@ type VersionResponse struct { func (x *VersionResponse) Reset() { *x = VersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1543,7 +1428,7 @@ func (x *VersionResponse) String() string { func (*VersionResponse) ProtoMessage() {} func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1556,7 +1441,7 @@ func (x *VersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{20} + return file_vm_vm_proto_rawDescGZIP(), []int{18} } func (x *VersionResponse) GetVersion() string { @@ -1584,7 +1469,7 @@ type AppRequestMsg struct { func (x *AppRequestMsg) Reset() { *x = AppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1597,7 +1482,7 @@ func (x *AppRequestMsg) String() string { func (*AppRequestMsg) ProtoMessage() {} func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1610,7 +1495,7 @@ func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestMsg.ProtoReflect.Descriptor instead. func (*AppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{21} + return file_vm_vm_proto_rawDescGZIP(), []int{19} } func (x *AppRequestMsg) GetNodeId() []byte { @@ -1650,12 +1535,16 @@ type AppRequestFailedMsg struct { NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // The ID of the request we sent and didn't get a response to RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *AppRequestFailedMsg) Reset() { *x = AppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1668,7 +1557,7 @@ func (x *AppRequestFailedMsg) String() string { func (*AppRequestFailedMsg) ProtoMessage() {} func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1681,7 +1570,7 @@ func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*AppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{22} + return file_vm_vm_proto_rawDescGZIP(), []int{20} } func (x *AppRequestFailedMsg) GetNodeId() []byte { @@ -1698,6 +1587,20 @@ func (x *AppRequestFailedMsg) GetRequestId() uint32 { return 0 } +func (x *AppRequestFailedMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *AppRequestFailedMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type AppResponseMsg struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1714,7 +1617,7 @@ type AppResponseMsg struct { func (x *AppResponseMsg) Reset() { *x = AppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1727,7 +1630,7 @@ func (x *AppResponseMsg) String() string { func (*AppResponseMsg) ProtoMessage() {} func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1740,7 +1643,7 @@ func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponseMsg.ProtoReflect.Descriptor instead. func (*AppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{23} + return file_vm_vm_proto_rawDescGZIP(), []int{21} } func (x *AppResponseMsg) GetNodeId() []byte { @@ -1778,7 +1681,7 @@ type AppGossipMsg struct { func (x *AppGossipMsg) Reset() { *x = AppGossipMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1791,7 +1694,7 @@ func (x *AppGossipMsg) String() string { func (*AppGossipMsg) ProtoMessage() {} func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1804,7 +1707,7 @@ func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossipMsg.ProtoReflect.Descriptor instead. func (*AppGossipMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{24} + return file_vm_vm_proto_rawDescGZIP(), []int{22} } func (x *AppGossipMsg) GetNodeId() []byte { @@ -1839,7 +1742,7 @@ type CrossChainAppRequestMsg struct { func (x *CrossChainAppRequestMsg) Reset() { *x = CrossChainAppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1852,7 +1755,7 @@ func (x *CrossChainAppRequestMsg) String() string { func (*CrossChainAppRequestMsg) ProtoMessage() {} func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1865,7 +1768,7 @@ func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{25} + return file_vm_vm_proto_rawDescGZIP(), []int{23} } func (x *CrossChainAppRequestMsg) GetChainId() []byte { @@ -1905,12 +1808,16 @@ type CrossChainAppRequestFailedMsg struct { ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` // The ID of the request we sent and didn't get a response to RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Application-defined error code + ErrorCode int32 `protobuf:"zigzag32,3,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Application-defined error message + ErrorMessage string `protobuf:"bytes,4,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *CrossChainAppRequestFailedMsg) Reset() { *x = CrossChainAppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1923,7 +1830,7 @@ func (x *CrossChainAppRequestFailedMsg) String() string { func (*CrossChainAppRequestFailedMsg) ProtoMessage() {} func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1936,7 +1843,7 @@ func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{26} + return file_vm_vm_proto_rawDescGZIP(), []int{24} } func (x *CrossChainAppRequestFailedMsg) GetChainId() []byte { @@ -1953,6 +1860,20 @@ func (x *CrossChainAppRequestFailedMsg) GetRequestId() uint32 { return 0 } +func (x *CrossChainAppRequestFailedMsg) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *CrossChainAppRequestFailedMsg) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + type CrossChainAppResponseMsg struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1969,7 +1890,7 @@ type CrossChainAppResponseMsg struct { func (x *CrossChainAppResponseMsg) Reset() { *x = CrossChainAppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1982,7 +1903,7 @@ func (x *CrossChainAppResponseMsg) String() string { func (*CrossChainAppResponseMsg) ProtoMessage() {} func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1995,7 +1916,7 @@ func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use CrossChainAppResponseMsg.ProtoReflect.Descriptor instead. func (*CrossChainAppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{27} + return file_vm_vm_proto_rawDescGZIP(), []int{25} } func (x *CrossChainAppResponseMsg) GetChainId() []byte { @@ -2024,14 +1945,19 @@ type ConnectedRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Client name (e.g avalanchego) + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Client semantic version + Major uint32 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,5,opt,name=patch,proto3" json:"patch,omitempty"` } func (x *ConnectedRequest) Reset() { *x = ConnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2044,7 +1970,7 @@ func (x *ConnectedRequest) String() string { func (*ConnectedRequest) ProtoMessage() {} func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2057,7 +1983,7 @@ func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectedRequest.ProtoReflect.Descriptor instead. func (*ConnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{28} + return file_vm_vm_proto_rawDescGZIP(), []int{26} } func (x *ConnectedRequest) GetNodeId() []byte { @@ -2067,13 +1993,34 @@ func (x *ConnectedRequest) GetNodeId() []byte { return nil } -func (x *ConnectedRequest) GetVersion() string { +func (x *ConnectedRequest) GetName() string { if x != nil { - return x.Version + return x.Name } return "" } +func (x *ConnectedRequest) GetMajor() uint32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *ConnectedRequest) GetMinor() uint32 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *ConnectedRequest) GetPatch() uint32 { + if x != nil { + return x.Patch + } + return 0 +} + type DisconnectedRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2085,7 +2032,7 @@ type DisconnectedRequest struct { func (x *DisconnectedRequest) Reset() { *x = DisconnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2098,7 +2045,7 @@ func (x *DisconnectedRequest) String() string { func (*DisconnectedRequest) ProtoMessage() {} func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2111,7 +2058,7 @@ func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DisconnectedRequest.ProtoReflect.Descriptor instead. func (*DisconnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{29} + return file_vm_vm_proto_rawDescGZIP(), []int{27} } func (x *DisconnectedRequest) GetNodeId() []byte { @@ -2135,7 +2082,7 @@ type GetAncestorsRequest struct { func (x *GetAncestorsRequest) Reset() { *x = GetAncestorsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2148,7 +2095,7 @@ func (x *GetAncestorsRequest) String() string { func (*GetAncestorsRequest) ProtoMessage() {} func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2161,7 +2108,7 @@ func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsRequest.ProtoReflect.Descriptor instead. func (*GetAncestorsRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{30} + return file_vm_vm_proto_rawDescGZIP(), []int{28} } func (x *GetAncestorsRequest) GetBlkId() []byte { @@ -2203,7 +2150,7 @@ type GetAncestorsResponse struct { func (x *GetAncestorsResponse) Reset() { *x = GetAncestorsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2216,7 +2163,7 @@ func (x *GetAncestorsResponse) String() string { func (*GetAncestorsResponse) ProtoMessage() {} func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2229,7 +2176,7 @@ func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsResponse.ProtoReflect.Descriptor instead. func (*GetAncestorsResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{31} + return file_vm_vm_proto_rawDescGZIP(), []int{29} } func (x *GetAncestorsResponse) GetBlksBytes() [][]byte { @@ -2250,7 +2197,7 @@ type BatchedParseBlockRequest struct { func (x *BatchedParseBlockRequest) Reset() { *x = BatchedParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2263,7 +2210,7 @@ func (x *BatchedParseBlockRequest) String() string { func (*BatchedParseBlockRequest) ProtoMessage() {} func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2276,7 +2223,7 @@ func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockRequest.ProtoReflect.Descriptor instead. func (*BatchedParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{32} + return file_vm_vm_proto_rawDescGZIP(), []int{30} } func (x *BatchedParseBlockRequest) GetRequest() [][]byte { @@ -2297,7 +2244,7 @@ type BatchedParseBlockResponse struct { func (x *BatchedParseBlockResponse) Reset() { *x = BatchedParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2310,7 +2257,7 @@ func (x *BatchedParseBlockResponse) String() string { func (*BatchedParseBlockResponse) ProtoMessage() {} func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2323,7 +2270,7 @@ func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockResponse.ProtoReflect.Descriptor instead. func (*BatchedParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{33} + return file_vm_vm_proto_rawDescGZIP(), []int{31} } func (x *BatchedParseBlockResponse) GetResponse() []*ParseBlockResponse { @@ -2344,7 +2291,7 @@ type VerifyHeightIndexResponse struct { func (x *VerifyHeightIndexResponse) Reset() { *x = VerifyHeightIndexResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2357,7 +2304,7 @@ func (x *VerifyHeightIndexResponse) String() string { func (*VerifyHeightIndexResponse) ProtoMessage() {} func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2370,7 +2317,7 @@ func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyHeightIndexResponse.ProtoReflect.Descriptor instead. func (*VerifyHeightIndexResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{34} + return file_vm_vm_proto_rawDescGZIP(), []int{32} } func (x *VerifyHeightIndexResponse) GetErr() Error { @@ -2391,7 +2338,7 @@ type GetBlockIDAtHeightRequest struct { func (x *GetBlockIDAtHeightRequest) Reset() { *x = GetBlockIDAtHeightRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2404,7 +2351,7 @@ func (x *GetBlockIDAtHeightRequest) String() string { func (*GetBlockIDAtHeightRequest) ProtoMessage() {} func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2417,7 +2364,7 @@ func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightRequest.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{35} + return file_vm_vm_proto_rawDescGZIP(), []int{33} } func (x *GetBlockIDAtHeightRequest) GetHeight() uint64 { @@ -2439,7 +2386,7 @@ type GetBlockIDAtHeightResponse struct { func (x *GetBlockIDAtHeightResponse) Reset() { *x = GetBlockIDAtHeightResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2452,7 +2399,7 @@ func (x *GetBlockIDAtHeightResponse) String() string { func (*GetBlockIDAtHeightResponse) ProtoMessage() {} func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2465,7 +2412,7 @@ func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightResponse.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{36} + return file_vm_vm_proto_rawDescGZIP(), []int{34} } func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { @@ -2493,7 +2440,7 @@ type GatherResponse struct { func (x *GatherResponse) Reset() { *x = GatherResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2506,7 +2453,7 @@ func (x *GatherResponse) String() string { func (*GatherResponse) ProtoMessage() {} func (x *GatherResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2519,7 +2466,7 @@ func (x *GatherResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GatherResponse.ProtoReflect.Descriptor instead. func (*GatherResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{37} + return file_vm_vm_proto_rawDescGZIP(), []int{35} } func (x *GatherResponse) GetMetricFamilies() []*_go.MetricFamily { @@ -2541,7 +2488,7 @@ type StateSyncEnabledResponse struct { func (x *StateSyncEnabledResponse) Reset() { *x = StateSyncEnabledResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2554,7 +2501,7 @@ func (x *StateSyncEnabledResponse) String() string { func (*StateSyncEnabledResponse) ProtoMessage() {} func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2567,7 +2514,7 @@ func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSyncEnabledResponse.ProtoReflect.Descriptor instead. func (*StateSyncEnabledResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{38} + return file_vm_vm_proto_rawDescGZIP(), []int{36} } func (x *StateSyncEnabledResponse) GetEnabled() bool { @@ -2598,7 +2545,7 @@ type GetOngoingSyncStateSummaryResponse struct { func (x *GetOngoingSyncStateSummaryResponse) Reset() { *x = GetOngoingSyncStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2611,7 +2558,7 @@ func (x *GetOngoingSyncStateSummaryResponse) String() string { func (*GetOngoingSyncStateSummaryResponse) ProtoMessage() {} func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2624,7 +2571,7 @@ func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetOngoingSyncStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetOngoingSyncStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{39} + return file_vm_vm_proto_rawDescGZIP(), []int{37} } func (x *GetOngoingSyncStateSummaryResponse) GetId() []byte { @@ -2669,7 +2616,7 @@ type GetLastStateSummaryResponse struct { func (x *GetLastStateSummaryResponse) Reset() { *x = GetLastStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2682,7 +2629,7 @@ func (x *GetLastStateSummaryResponse) String() string { func (*GetLastStateSummaryResponse) ProtoMessage() {} func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2695,7 +2642,7 @@ func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLastStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetLastStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{40} + return file_vm_vm_proto_rawDescGZIP(), []int{38} } func (x *GetLastStateSummaryResponse) GetId() []byte { @@ -2737,7 +2684,7 @@ type ParseStateSummaryRequest struct { func (x *ParseStateSummaryRequest) Reset() { *x = ParseStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2750,7 +2697,7 @@ func (x *ParseStateSummaryRequest) String() string { func (*ParseStateSummaryRequest) ProtoMessage() {} func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2763,7 +2710,7 @@ func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryRequest.ProtoReflect.Descriptor instead. func (*ParseStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{41} + return file_vm_vm_proto_rawDescGZIP(), []int{39} } func (x *ParseStateSummaryRequest) GetBytes() []byte { @@ -2786,7 +2733,7 @@ type ParseStateSummaryResponse struct { func (x *ParseStateSummaryResponse) Reset() { *x = ParseStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2799,7 +2746,7 @@ func (x *ParseStateSummaryResponse) String() string { func (*ParseStateSummaryResponse) ProtoMessage() {} func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2812,7 +2759,7 @@ func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryResponse.ProtoReflect.Descriptor instead. func (*ParseStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{42} + return file_vm_vm_proto_rawDescGZIP(), []int{40} } func (x *ParseStateSummaryResponse) GetId() []byte { @@ -2847,7 +2794,7 @@ type GetStateSummaryRequest struct { func (x *GetStateSummaryRequest) Reset() { *x = GetStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2860,7 +2807,7 @@ func (x *GetStateSummaryRequest) String() string { func (*GetStateSummaryRequest) ProtoMessage() {} func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2873,7 +2820,7 @@ func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryRequest.ProtoReflect.Descriptor instead. func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{43} + return file_vm_vm_proto_rawDescGZIP(), []int{41} } func (x *GetStateSummaryRequest) GetHeight() uint64 { @@ -2896,7 +2843,7 @@ type GetStateSummaryResponse struct { func (x *GetStateSummaryResponse) Reset() { *x = GetStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2909,7 +2856,7 @@ func (x *GetStateSummaryResponse) String() string { func (*GetStateSummaryResponse) ProtoMessage() {} func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2922,7 +2869,7 @@ func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{44} + return file_vm_vm_proto_rawDescGZIP(), []int{42} } func (x *GetStateSummaryResponse) GetId() []byte { @@ -2957,7 +2904,7 @@ type StateSummaryAcceptRequest struct { func (x *StateSummaryAcceptRequest) Reset() { *x = StateSummaryAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2970,7 +2917,7 @@ func (x *StateSummaryAcceptRequest) String() string { func (*StateSummaryAcceptRequest) ProtoMessage() {} func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[45] + mi := &file_vm_vm_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2983,7 +2930,7 @@ func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptRequest.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{45} + return file_vm_vm_proto_rawDescGZIP(), []int{43} } func (x *StateSummaryAcceptRequest) GetBytes() []byte { @@ -3005,7 +2952,7 @@ type StateSummaryAcceptResponse struct { func (x *StateSummaryAcceptResponse) Reset() { *x = StateSummaryAcceptResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[46] + mi := &file_vm_vm_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3018,7 +2965,7 @@ func (x *StateSummaryAcceptResponse) String() string { func (*StateSummaryAcceptResponse) ProtoMessage() {} func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[46] + mi := &file_vm_vm_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3031,7 +2978,7 @@ func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptResponse.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{46} + return file_vm_vm_proto_rawDescGZIP(), []int{44} } func (x *StateSummaryAcceptResponse) GetMode() StateSummaryAcceptResponse_Mode { @@ -3058,7 +3005,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xec, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, @@ -3083,11 +3030,10 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, 0x64, - 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, @@ -3103,12 +3049,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x22, 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, - 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, - 0x64, 0x72, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x6d, 0x70, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x53, 0x74, @@ -3129,423 +3070,421 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, - 0x22, 0x65, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6c, 0x6f, 0x63, 0x6b, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, - 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x47, - 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, - 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x03, - 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, - 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, 0x74, - 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, - 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, - 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x4f, 0x0a, 0x13, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x12, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x42, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, + 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, + 0x01, 0x0a, 0x12, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, + 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, + 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x21, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, - 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, - 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x69, 0x64, 0x22, 0x88, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, + 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, + 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, + 0x14, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, + 0x5f, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, + 0x4f, 0x0a, 0x13, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x91, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x64, 0x0a, 0x0e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, + 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, - 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, - 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, - 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, - 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x59, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x22, 0x70, 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x45, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, - 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, - 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x73, 0x52, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, - 0x35, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, - 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, - 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, - 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, - 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, + 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, + 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x70, + 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x81, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, + 0x61, 0x74, 0x63, 0x68, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, + 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, + 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x47, 0x65, + 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, + 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, + 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, + 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x7f, 0x0a, 0x22, 0x47, + 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, + 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, + 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x78, 0x0a, 0x1b, + 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1b, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, - 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, - 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, - 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, - 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, - 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, - 0x22, 0x7f, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x78, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, - 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, - 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, - 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x5c, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x31, 0x0a, 0x19, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, - 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, - 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, - 0x31, 0x0a, 0x19, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, - 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, - 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, - 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, - 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, - 0x03, 0x2a, 0x61, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, - 0x4f, 0x43, 0x45, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, - 0x45, 0x44, 0x10, 0x03, 0x2a, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, - 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, - 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, - 0x45, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, - 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, - 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0xa4, 0x12, - 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x49, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x2e, - 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, - 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, - 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x14, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x76, 0x6d, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, - 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xc5, 0x01, + 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, + 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x76, 0x6d, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, + 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x49, + 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, + 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, + 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x2a, 0x61, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, + 0x8e, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, + 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, + 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x04, + 0x32, 0xd2, 0x11, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, + 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, 0x73, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, - 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x13, - 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, 0x74, - 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, 0x2e, - 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, - 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, - 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, 0x61, - 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, - 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, - 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, 0x0a, - 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, 0x6d, - 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, + 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, + 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, + 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, + 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, - 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, 0x2e, - 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, - 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, + 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, + 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, + 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, + 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, + 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, + 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, 0x76, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, + 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, + 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, + 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, + 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, - 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, - 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, 0x6d, - 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, + 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, + 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, - 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3561,7 +3500,7 @@ func file_vm_vm_proto_rawDescGZIP() []byte { } var file_vm_vm_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 47) +var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 45) var file_vm_vm_proto_goTypes = []interface{}{ (State)(0), // 0: vm.State (Status)(0), // 1: vm.Status @@ -3569,155 +3508,149 @@ var file_vm_vm_proto_goTypes = []interface{}{ (StateSummaryAcceptResponse_Mode)(0), // 3: vm.StateSummaryAcceptResponse.Mode (*InitializeRequest)(nil), // 4: vm.InitializeRequest (*InitializeResponse)(nil), // 5: vm.InitializeResponse - (*VersionedDBServer)(nil), // 6: vm.VersionedDBServer - (*SetStateRequest)(nil), // 7: vm.SetStateRequest - (*SetStateResponse)(nil), // 8: vm.SetStateResponse - (*CreateHandlersResponse)(nil), // 9: vm.CreateHandlersResponse - (*CreateStaticHandlersResponse)(nil), // 10: vm.CreateStaticHandlersResponse - (*Handler)(nil), // 11: vm.Handler - (*BuildBlockRequest)(nil), // 12: vm.BuildBlockRequest - (*BuildBlockResponse)(nil), // 13: vm.BuildBlockResponse - (*ParseBlockRequest)(nil), // 14: vm.ParseBlockRequest - (*ParseBlockResponse)(nil), // 15: vm.ParseBlockResponse - (*GetBlockRequest)(nil), // 16: vm.GetBlockRequest - (*GetBlockResponse)(nil), // 17: vm.GetBlockResponse - (*SetPreferenceRequest)(nil), // 18: vm.SetPreferenceRequest - (*BlockVerifyRequest)(nil), // 19: vm.BlockVerifyRequest - (*BlockVerifyResponse)(nil), // 20: vm.BlockVerifyResponse - (*BlockAcceptRequest)(nil), // 21: vm.BlockAcceptRequest - (*BlockRejectRequest)(nil), // 22: vm.BlockRejectRequest - (*HealthResponse)(nil), // 23: vm.HealthResponse - (*VersionResponse)(nil), // 24: vm.VersionResponse - (*AppRequestMsg)(nil), // 25: vm.AppRequestMsg - (*AppRequestFailedMsg)(nil), // 26: vm.AppRequestFailedMsg - (*AppResponseMsg)(nil), // 27: vm.AppResponseMsg - (*AppGossipMsg)(nil), // 28: vm.AppGossipMsg - (*CrossChainAppRequestMsg)(nil), // 29: vm.CrossChainAppRequestMsg - (*CrossChainAppRequestFailedMsg)(nil), // 30: vm.CrossChainAppRequestFailedMsg - (*CrossChainAppResponseMsg)(nil), // 31: vm.CrossChainAppResponseMsg - (*ConnectedRequest)(nil), // 32: vm.ConnectedRequest - (*DisconnectedRequest)(nil), // 33: vm.DisconnectedRequest - (*GetAncestorsRequest)(nil), // 34: vm.GetAncestorsRequest - (*GetAncestorsResponse)(nil), // 35: vm.GetAncestorsResponse - (*BatchedParseBlockRequest)(nil), // 36: vm.BatchedParseBlockRequest - (*BatchedParseBlockResponse)(nil), // 37: vm.BatchedParseBlockResponse - (*VerifyHeightIndexResponse)(nil), // 38: vm.VerifyHeightIndexResponse - (*GetBlockIDAtHeightRequest)(nil), // 39: vm.GetBlockIDAtHeightRequest - (*GetBlockIDAtHeightResponse)(nil), // 40: vm.GetBlockIDAtHeightResponse - (*GatherResponse)(nil), // 41: vm.GatherResponse - (*StateSyncEnabledResponse)(nil), // 42: vm.StateSyncEnabledResponse - (*GetOngoingSyncStateSummaryResponse)(nil), // 43: vm.GetOngoingSyncStateSummaryResponse - (*GetLastStateSummaryResponse)(nil), // 44: vm.GetLastStateSummaryResponse - (*ParseStateSummaryRequest)(nil), // 45: vm.ParseStateSummaryRequest - (*ParseStateSummaryResponse)(nil), // 46: vm.ParseStateSummaryResponse - (*GetStateSummaryRequest)(nil), // 47: vm.GetStateSummaryRequest - (*GetStateSummaryResponse)(nil), // 48: vm.GetStateSummaryResponse - (*StateSummaryAcceptRequest)(nil), // 49: vm.StateSummaryAcceptRequest - (*StateSummaryAcceptResponse)(nil), // 50: vm.StateSummaryAcceptResponse - (*timestamppb.Timestamp)(nil), // 51: google.protobuf.Timestamp - (*_go.MetricFamily)(nil), // 52: io.prometheus.client.MetricFamily - (*emptypb.Empty)(nil), // 53: google.protobuf.Empty + (*SetStateRequest)(nil), // 6: vm.SetStateRequest + (*SetStateResponse)(nil), // 7: vm.SetStateResponse + (*CreateHandlersResponse)(nil), // 8: vm.CreateHandlersResponse + (*Handler)(nil), // 9: vm.Handler + (*BuildBlockRequest)(nil), // 10: vm.BuildBlockRequest + (*BuildBlockResponse)(nil), // 11: vm.BuildBlockResponse + (*ParseBlockRequest)(nil), // 12: vm.ParseBlockRequest + (*ParseBlockResponse)(nil), // 13: vm.ParseBlockResponse + (*GetBlockRequest)(nil), // 14: vm.GetBlockRequest + (*GetBlockResponse)(nil), // 15: vm.GetBlockResponse + (*SetPreferenceRequest)(nil), // 16: vm.SetPreferenceRequest + (*BlockVerifyRequest)(nil), // 17: vm.BlockVerifyRequest + (*BlockVerifyResponse)(nil), // 18: vm.BlockVerifyResponse + (*BlockAcceptRequest)(nil), // 19: vm.BlockAcceptRequest + (*BlockRejectRequest)(nil), // 20: vm.BlockRejectRequest + (*HealthResponse)(nil), // 21: vm.HealthResponse + (*VersionResponse)(nil), // 22: vm.VersionResponse + (*AppRequestMsg)(nil), // 23: vm.AppRequestMsg + (*AppRequestFailedMsg)(nil), // 24: vm.AppRequestFailedMsg + (*AppResponseMsg)(nil), // 25: vm.AppResponseMsg + (*AppGossipMsg)(nil), // 26: vm.AppGossipMsg + (*CrossChainAppRequestMsg)(nil), // 27: vm.CrossChainAppRequestMsg + (*CrossChainAppRequestFailedMsg)(nil), // 28: vm.CrossChainAppRequestFailedMsg + (*CrossChainAppResponseMsg)(nil), // 29: vm.CrossChainAppResponseMsg + (*ConnectedRequest)(nil), // 30: vm.ConnectedRequest + (*DisconnectedRequest)(nil), // 31: vm.DisconnectedRequest + (*GetAncestorsRequest)(nil), // 32: vm.GetAncestorsRequest + (*GetAncestorsResponse)(nil), // 33: vm.GetAncestorsResponse + (*BatchedParseBlockRequest)(nil), // 34: vm.BatchedParseBlockRequest + (*BatchedParseBlockResponse)(nil), // 35: vm.BatchedParseBlockResponse + (*VerifyHeightIndexResponse)(nil), // 36: vm.VerifyHeightIndexResponse + (*GetBlockIDAtHeightRequest)(nil), // 37: vm.GetBlockIDAtHeightRequest + (*GetBlockIDAtHeightResponse)(nil), // 38: vm.GetBlockIDAtHeightResponse + (*GatherResponse)(nil), // 39: vm.GatherResponse + (*StateSyncEnabledResponse)(nil), // 40: vm.StateSyncEnabledResponse + (*GetOngoingSyncStateSummaryResponse)(nil), // 41: vm.GetOngoingSyncStateSummaryResponse + (*GetLastStateSummaryResponse)(nil), // 42: vm.GetLastStateSummaryResponse + (*ParseStateSummaryRequest)(nil), // 43: vm.ParseStateSummaryRequest + (*ParseStateSummaryResponse)(nil), // 44: vm.ParseStateSummaryResponse + (*GetStateSummaryRequest)(nil), // 45: vm.GetStateSummaryRequest + (*GetStateSummaryResponse)(nil), // 46: vm.GetStateSummaryResponse + (*StateSummaryAcceptRequest)(nil), // 47: vm.StateSummaryAcceptRequest + (*StateSummaryAcceptResponse)(nil), // 48: vm.StateSummaryAcceptResponse + (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp + (*_go.MetricFamily)(nil), // 50: io.prometheus.client.MetricFamily + (*emptypb.Empty)(nil), // 51: google.protobuf.Empty } var file_vm_vm_proto_depIdxs = []int32{ - 6, // 0: vm.InitializeRequest.db_servers:type_name -> vm.VersionedDBServer - 51, // 1: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp - 0, // 2: vm.SetStateRequest.state:type_name -> vm.State - 51, // 3: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp - 11, // 4: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler - 11, // 5: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler - 51, // 6: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 7: vm.ParseBlockResponse.status:type_name -> vm.Status - 51, // 8: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 1, // 9: vm.GetBlockResponse.status:type_name -> vm.Status - 51, // 10: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 2, // 11: vm.GetBlockResponse.err:type_name -> vm.Error - 51, // 12: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp - 51, // 13: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 51, // 14: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 15, // 15: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse - 2, // 16: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error - 2, // 17: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error - 52, // 18: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily - 2, // 19: vm.StateSyncEnabledResponse.err:type_name -> vm.Error - 2, // 20: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error - 2, // 21: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error - 2, // 22: vm.ParseStateSummaryResponse.err:type_name -> vm.Error - 2, // 23: vm.GetStateSummaryResponse.err:type_name -> vm.Error - 3, // 24: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode - 2, // 25: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error - 4, // 26: vm.VM.Initialize:input_type -> vm.InitializeRequest - 7, // 27: vm.VM.SetState:input_type -> vm.SetStateRequest - 53, // 28: vm.VM.Shutdown:input_type -> google.protobuf.Empty - 53, // 29: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty - 53, // 30: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty - 32, // 31: vm.VM.Connected:input_type -> vm.ConnectedRequest - 33, // 32: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest - 12, // 33: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest - 14, // 34: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest - 16, // 35: vm.VM.GetBlock:input_type -> vm.GetBlockRequest - 18, // 36: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest - 53, // 37: vm.VM.Health:input_type -> google.protobuf.Empty - 53, // 38: vm.VM.Version:input_type -> google.protobuf.Empty - 25, // 39: vm.VM.AppRequest:input_type -> vm.AppRequestMsg - 26, // 40: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg - 27, // 41: vm.VM.AppResponse:input_type -> vm.AppResponseMsg - 28, // 42: vm.VM.AppGossip:input_type -> vm.AppGossipMsg - 53, // 43: vm.VM.Gather:input_type -> google.protobuf.Empty - 29, // 44: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg - 30, // 45: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg - 31, // 46: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg - 34, // 47: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest - 36, // 48: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest - 53, // 49: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty - 39, // 50: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest - 53, // 51: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty - 53, // 52: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty - 53, // 53: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty - 45, // 54: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest - 47, // 55: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest - 19, // 56: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest - 21, // 57: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest - 22, // 58: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest - 49, // 59: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest - 5, // 60: vm.VM.Initialize:output_type -> vm.InitializeResponse - 8, // 61: vm.VM.SetState:output_type -> vm.SetStateResponse - 53, // 62: vm.VM.Shutdown:output_type -> google.protobuf.Empty - 9, // 63: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse - 10, // 64: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse - 53, // 65: vm.VM.Connected:output_type -> google.protobuf.Empty - 53, // 66: vm.VM.Disconnected:output_type -> google.protobuf.Empty - 13, // 67: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse - 15, // 68: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse - 17, // 69: vm.VM.GetBlock:output_type -> vm.GetBlockResponse - 53, // 70: vm.VM.SetPreference:output_type -> google.protobuf.Empty - 23, // 71: vm.VM.Health:output_type -> vm.HealthResponse - 24, // 72: vm.VM.Version:output_type -> vm.VersionResponse - 53, // 73: vm.VM.AppRequest:output_type -> google.protobuf.Empty - 53, // 74: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty - 53, // 75: vm.VM.AppResponse:output_type -> google.protobuf.Empty - 53, // 76: vm.VM.AppGossip:output_type -> google.protobuf.Empty - 41, // 77: vm.VM.Gather:output_type -> vm.GatherResponse - 53, // 78: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty - 53, // 79: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty - 53, // 80: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty - 35, // 81: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse - 37, // 82: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse - 38, // 83: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse - 40, // 84: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse - 42, // 85: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse - 43, // 86: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse - 44, // 87: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse - 46, // 88: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse - 48, // 89: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse - 20, // 90: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse - 53, // 91: vm.VM.BlockAccept:output_type -> google.protobuf.Empty - 53, // 92: vm.VM.BlockReject:output_type -> google.protobuf.Empty - 50, // 93: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse - 60, // [60:94] is the sub-list for method output_type - 26, // [26:60] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name + 49, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: vm.SetStateRequest.state:type_name -> vm.State + 49, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp + 9, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler + 49, // 4: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 5: vm.ParseBlockResponse.status:type_name -> vm.Status + 49, // 6: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 7: vm.GetBlockResponse.status:type_name -> vm.Status + 49, // 8: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 2, // 9: vm.GetBlockResponse.err:type_name -> vm.Error + 49, // 10: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp + 49, // 11: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 49, // 12: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 13, // 13: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse + 2, // 14: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error + 2, // 15: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error + 50, // 16: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily + 2, // 17: vm.StateSyncEnabledResponse.err:type_name -> vm.Error + 2, // 18: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error + 2, // 19: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error + 2, // 20: vm.ParseStateSummaryResponse.err:type_name -> vm.Error + 2, // 21: vm.GetStateSummaryResponse.err:type_name -> vm.Error + 3, // 22: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode + 2, // 23: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error + 4, // 24: vm.VM.Initialize:input_type -> vm.InitializeRequest + 6, // 25: vm.VM.SetState:input_type -> vm.SetStateRequest + 51, // 26: vm.VM.Shutdown:input_type -> google.protobuf.Empty + 51, // 27: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty + 30, // 28: vm.VM.Connected:input_type -> vm.ConnectedRequest + 31, // 29: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest + 10, // 30: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest + 12, // 31: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest + 14, // 32: vm.VM.GetBlock:input_type -> vm.GetBlockRequest + 16, // 33: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest + 51, // 34: vm.VM.Health:input_type -> google.protobuf.Empty + 51, // 35: vm.VM.Version:input_type -> google.protobuf.Empty + 23, // 36: vm.VM.AppRequest:input_type -> vm.AppRequestMsg + 24, // 37: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg + 25, // 38: vm.VM.AppResponse:input_type -> vm.AppResponseMsg + 26, // 39: vm.VM.AppGossip:input_type -> vm.AppGossipMsg + 51, // 40: vm.VM.Gather:input_type -> google.protobuf.Empty + 27, // 41: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg + 28, // 42: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg + 29, // 43: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg + 32, // 44: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest + 34, // 45: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest + 51, // 46: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty + 37, // 47: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest + 51, // 48: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty + 51, // 49: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty + 51, // 50: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty + 43, // 51: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest + 45, // 52: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest + 17, // 53: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest + 19, // 54: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest + 20, // 55: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest + 47, // 56: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest + 5, // 57: vm.VM.Initialize:output_type -> vm.InitializeResponse + 7, // 58: vm.VM.SetState:output_type -> vm.SetStateResponse + 51, // 59: vm.VM.Shutdown:output_type -> google.protobuf.Empty + 8, // 60: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse + 51, // 61: vm.VM.Connected:output_type -> google.protobuf.Empty + 51, // 62: vm.VM.Disconnected:output_type -> google.protobuf.Empty + 11, // 63: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse + 13, // 64: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse + 15, // 65: vm.VM.GetBlock:output_type -> vm.GetBlockResponse + 51, // 66: vm.VM.SetPreference:output_type -> google.protobuf.Empty + 21, // 67: vm.VM.Health:output_type -> vm.HealthResponse + 22, // 68: vm.VM.Version:output_type -> vm.VersionResponse + 51, // 69: vm.VM.AppRequest:output_type -> google.protobuf.Empty + 51, // 70: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty + 51, // 71: vm.VM.AppResponse:output_type -> google.protobuf.Empty + 51, // 72: vm.VM.AppGossip:output_type -> google.protobuf.Empty + 39, // 73: vm.VM.Gather:output_type -> vm.GatherResponse + 51, // 74: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty + 51, // 75: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty + 51, // 76: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty + 33, // 77: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse + 35, // 78: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse + 36, // 79: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse + 38, // 80: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse + 40, // 81: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse + 41, // 82: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse + 42, // 83: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse + 44, // 84: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse + 46, // 85: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse + 18, // 86: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse + 51, // 87: vm.VM.BlockAccept:output_type -> google.protobuf.Empty + 51, // 88: vm.VM.BlockReject:output_type -> google.protobuf.Empty + 48, // 89: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse + 57, // [57:90] is the sub-list for method output_type + 24, // [24:57] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name } func init() { file_vm_vm_proto_init() } @@ -3751,18 +3684,6 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionedDBServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vm_vm_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetStateRequest); i { case 0: return &v.state @@ -3774,7 +3695,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetStateResponse); i { case 0: return &v.state @@ -3786,7 +3707,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateHandlersResponse); i { case 0: return &v.state @@ -3798,19 +3719,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateStaticHandlersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Handler); i { case 0: return &v.state @@ -3822,7 +3731,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockRequest); i { case 0: return &v.state @@ -3834,7 +3743,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BuildBlockResponse); i { case 0: return &v.state @@ -3846,7 +3755,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockRequest); i { case 0: return &v.state @@ -3858,7 +3767,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseBlockResponse); i { case 0: return &v.state @@ -3870,7 +3779,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockRequest); i { case 0: return &v.state @@ -3882,7 +3791,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockResponse); i { case 0: return &v.state @@ -3894,7 +3803,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetPreferenceRequest); i { case 0: return &v.state @@ -3906,7 +3815,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyRequest); i { case 0: return &v.state @@ -3918,7 +3827,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockVerifyResponse); i { case 0: return &v.state @@ -3930,7 +3839,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockAcceptRequest); i { case 0: return &v.state @@ -3942,7 +3851,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlockRejectRequest); i { case 0: return &v.state @@ -3954,7 +3863,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthResponse); i { case 0: return &v.state @@ -3966,7 +3875,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VersionResponse); i { case 0: return &v.state @@ -3978,7 +3887,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestMsg); i { case 0: return &v.state @@ -3990,7 +3899,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppRequestFailedMsg); i { case 0: return &v.state @@ -4002,7 +3911,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppResponseMsg); i { case 0: return &v.state @@ -4014,7 +3923,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossipMsg); i { case 0: return &v.state @@ -4026,7 +3935,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestMsg); i { case 0: return &v.state @@ -4038,7 +3947,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppRequestFailedMsg); i { case 0: return &v.state @@ -4050,7 +3959,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CrossChainAppResponseMsg); i { case 0: return &v.state @@ -4062,7 +3971,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConnectedRequest); i { case 0: return &v.state @@ -4074,7 +3983,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DisconnectedRequest); i { case 0: return &v.state @@ -4086,7 +3995,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsRequest); i { case 0: return &v.state @@ -4098,7 +4007,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetAncestorsResponse); i { case 0: return &v.state @@ -4110,7 +4019,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockRequest); i { case 0: return &v.state @@ -4122,7 +4031,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BatchedParseBlockResponse); i { case 0: return &v.state @@ -4134,7 +4043,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VerifyHeightIndexResponse); i { case 0: return &v.state @@ -4146,7 +4055,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightRequest); i { case 0: return &v.state @@ -4158,7 +4067,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightResponse); i { case 0: return &v.state @@ -4170,7 +4079,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GatherResponse); i { case 0: return &v.state @@ -4182,7 +4091,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSyncEnabledResponse); i { case 0: return &v.state @@ -4194,7 +4103,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetOngoingSyncStateSummaryResponse); i { case 0: return &v.state @@ -4206,7 +4115,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetLastStateSummaryResponse); i { case 0: return &v.state @@ -4218,7 +4127,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryRequest); i { case 0: return &v.state @@ -4230,7 +4139,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryResponse); i { case 0: return &v.state @@ -4242,7 +4151,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryRequest); i { case 0: return &v.state @@ -4254,7 +4163,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryResponse); i { case 0: return &v.state @@ -4266,7 +4175,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptRequest); i { case 0: return &v.state @@ -4278,7 +4187,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptResponse); i { case 0: return &v.state @@ -4291,15 +4200,15 @@ func file_vm_vm_proto_init() { } } } - file_vm_vm_proto_msgTypes[8].OneofWrappers = []interface{}{} - file_vm_vm_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[13].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vm_vm_proto_rawDesc, NumEnums: 4, - NumMessages: 47, + NumMessages: 45, NumExtensions: 0, NumServices: 1, }, diff --git a/avalanchego/proto/pb/vm/vm_grpc.pb.go b/avalanchego/proto/pb/vm/vm_grpc.pb.go index 25c3859b..6d7bb17f 100644 --- a/avalanchego/proto/pb/vm/vm_grpc.pb.go +++ b/avalanchego/proto/pb/vm/vm_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: vm/vm.proto @@ -19,6 +19,42 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + VM_Initialize_FullMethodName = "/vm.VM/Initialize" + VM_SetState_FullMethodName = "/vm.VM/SetState" + VM_Shutdown_FullMethodName = "/vm.VM/Shutdown" + VM_CreateHandlers_FullMethodName = "/vm.VM/CreateHandlers" + VM_Connected_FullMethodName = "/vm.VM/Connected" + VM_Disconnected_FullMethodName = "/vm.VM/Disconnected" + VM_BuildBlock_FullMethodName = "/vm.VM/BuildBlock" + VM_ParseBlock_FullMethodName = "/vm.VM/ParseBlock" + VM_GetBlock_FullMethodName = "/vm.VM/GetBlock" + VM_SetPreference_FullMethodName = "/vm.VM/SetPreference" + VM_Health_FullMethodName = "/vm.VM/Health" + VM_Version_FullMethodName = "/vm.VM/Version" + VM_AppRequest_FullMethodName = "/vm.VM/AppRequest" + VM_AppRequestFailed_FullMethodName = "/vm.VM/AppRequestFailed" + VM_AppResponse_FullMethodName = "/vm.VM/AppResponse" + VM_AppGossip_FullMethodName = "/vm.VM/AppGossip" + VM_Gather_FullMethodName = "/vm.VM/Gather" + VM_CrossChainAppRequest_FullMethodName = "/vm.VM/CrossChainAppRequest" + VM_CrossChainAppRequestFailed_FullMethodName = "/vm.VM/CrossChainAppRequestFailed" + VM_CrossChainAppResponse_FullMethodName = "/vm.VM/CrossChainAppResponse" + VM_GetAncestors_FullMethodName = "/vm.VM/GetAncestors" + VM_BatchedParseBlock_FullMethodName = "/vm.VM/BatchedParseBlock" + VM_VerifyHeightIndex_FullMethodName = "/vm.VM/VerifyHeightIndex" + VM_GetBlockIDAtHeight_FullMethodName = "/vm.VM/GetBlockIDAtHeight" + VM_StateSyncEnabled_FullMethodName = "/vm.VM/StateSyncEnabled" + VM_GetOngoingSyncStateSummary_FullMethodName = "/vm.VM/GetOngoingSyncStateSummary" + VM_GetLastStateSummary_FullMethodName = "/vm.VM/GetLastStateSummary" + VM_ParseStateSummary_FullMethodName = "/vm.VM/ParseStateSummary" + VM_GetStateSummary_FullMethodName = "/vm.VM/GetStateSummary" + VM_BlockVerify_FullMethodName = "/vm.VM/BlockVerify" + VM_BlockAccept_FullMethodName = "/vm.VM/BlockAccept" + VM_BlockReject_FullMethodName = "/vm.VM/BlockReject" + VM_StateSummaryAccept_FullMethodName = "/vm.VM/StateSummaryAccept" +) + // VMClient is the client API for VM service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -33,13 +69,6 @@ type VMClient interface { Shutdown(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates the HTTP handlers for custom chain network calls. CreateHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateHandlersResponse, error) - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateStaticHandlersResponse, error) Connected(ctx context.Context, in *ConnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) Disconnected(ctx context.Context, in *DisconnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Attempt to create a new block from data contained in the VM. @@ -106,7 +135,7 @@ func NewVMClient(cc grpc.ClientConnInterface) VMClient { func (c *vMClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) { out := new(InitializeResponse) - err := c.cc.Invoke(ctx, "/vm.VM/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Initialize_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -115,7 +144,7 @@ func (c *vMClient) Initialize(ctx context.Context, in *InitializeRequest, opts . func (c *vMClient) SetState(ctx context.Context, in *SetStateRequest, opts ...grpc.CallOption) (*SetStateResponse, error) { out := new(SetStateResponse) - err := c.cc.Invoke(ctx, "/vm.VM/SetState", in, out, opts...) + err := c.cc.Invoke(ctx, VM_SetState_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -124,7 +153,7 @@ func (c *vMClient) SetState(ctx context.Context, in *SetStateRequest, opts ...gr func (c *vMClient) Shutdown(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/Shutdown", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Shutdown_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -133,16 +162,7 @@ func (c *vMClient) Shutdown(ctx context.Context, in *emptypb.Empty, opts ...grpc func (c *vMClient) CreateHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateHandlersResponse, error) { out := new(CreateHandlersResponse) - err := c.cc.Invoke(ctx, "/vm.VM/CreateHandlers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *vMClient) CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateStaticHandlersResponse, error) { - out := new(CreateStaticHandlersResponse) - err := c.cc.Invoke(ctx, "/vm.VM/CreateStaticHandlers", in, out, opts...) + err := c.cc.Invoke(ctx, VM_CreateHandlers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -151,7 +171,7 @@ func (c *vMClient) CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, func (c *vMClient) Connected(ctx context.Context, in *ConnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/Connected", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Connected_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -160,7 +180,7 @@ func (c *vMClient) Connected(ctx context.Context, in *ConnectedRequest, opts ... func (c *vMClient) Disconnected(ctx context.Context, in *DisconnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/Disconnected", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Disconnected_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -169,7 +189,7 @@ func (c *vMClient) Disconnected(ctx context.Context, in *DisconnectedRequest, op func (c *vMClient) BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error) { out := new(BuildBlockResponse) - err := c.cc.Invoke(ctx, "/vm.VM/BuildBlock", in, out, opts...) + err := c.cc.Invoke(ctx, VM_BuildBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -178,7 +198,7 @@ func (c *vMClient) BuildBlock(ctx context.Context, in *BuildBlockRequest, opts . func (c *vMClient) ParseBlock(ctx context.Context, in *ParseBlockRequest, opts ...grpc.CallOption) (*ParseBlockResponse, error) { out := new(ParseBlockResponse) - err := c.cc.Invoke(ctx, "/vm.VM/ParseBlock", in, out, opts...) + err := c.cc.Invoke(ctx, VM_ParseBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -187,7 +207,7 @@ func (c *vMClient) ParseBlock(ctx context.Context, in *ParseBlockRequest, opts . func (c *vMClient) GetBlock(ctx context.Context, in *GetBlockRequest, opts ...grpc.CallOption) (*GetBlockResponse, error) { out := new(GetBlockResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetBlock", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -196,7 +216,7 @@ func (c *vMClient) GetBlock(ctx context.Context, in *GetBlockRequest, opts ...gr func (c *vMClient) SetPreference(ctx context.Context, in *SetPreferenceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/SetPreference", in, out, opts...) + err := c.cc.Invoke(ctx, VM_SetPreference_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -205,7 +225,7 @@ func (c *vMClient) SetPreference(ctx context.Context, in *SetPreferenceRequest, func (c *vMClient) Health(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HealthResponse, error) { out := new(HealthResponse) - err := c.cc.Invoke(ctx, "/vm.VM/Health", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Health_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -214,7 +234,7 @@ func (c *vMClient) Health(ctx context.Context, in *emptypb.Empty, opts ...grpc.C func (c *vMClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/vm.VM/Version", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -223,7 +243,7 @@ func (c *vMClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc. func (c *vMClient) AppRequest(ctx context.Context, in *AppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/AppRequest", in, out, opts...) + err := c.cc.Invoke(ctx, VM_AppRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -232,7 +252,7 @@ func (c *vMClient) AppRequest(ctx context.Context, in *AppRequestMsg, opts ...gr func (c *vMClient) AppRequestFailed(ctx context.Context, in *AppRequestFailedMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/AppRequestFailed", in, out, opts...) + err := c.cc.Invoke(ctx, VM_AppRequestFailed_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -241,7 +261,7 @@ func (c *vMClient) AppRequestFailed(ctx context.Context, in *AppRequestFailedMsg func (c *vMClient) AppResponse(ctx context.Context, in *AppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/AppResponse", in, out, opts...) + err := c.cc.Invoke(ctx, VM_AppResponse_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -250,7 +270,7 @@ func (c *vMClient) AppResponse(ctx context.Context, in *AppResponseMsg, opts ... func (c *vMClient) AppGossip(ctx context.Context, in *AppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/AppGossip", in, out, opts...) + err := c.cc.Invoke(ctx, VM_AppGossip_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -259,7 +279,7 @@ func (c *vMClient) AppGossip(ctx context.Context, in *AppGossipMsg, opts ...grpc func (c *vMClient) Gather(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GatherResponse, error) { out := new(GatherResponse) - err := c.cc.Invoke(ctx, "/vm.VM/Gather", in, out, opts...) + err := c.cc.Invoke(ctx, VM_Gather_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -268,7 +288,7 @@ func (c *vMClient) Gather(ctx context.Context, in *emptypb.Empty, opts ...grpc.C func (c *vMClient) CrossChainAppRequest(ctx context.Context, in *CrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppRequest", in, out, opts...) + err := c.cc.Invoke(ctx, VM_CrossChainAppRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -277,7 +297,7 @@ func (c *vMClient) CrossChainAppRequest(ctx context.Context, in *CrossChainAppRe func (c *vMClient) CrossChainAppRequestFailed(ctx context.Context, in *CrossChainAppRequestFailedMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppRequestFailed", in, out, opts...) + err := c.cc.Invoke(ctx, VM_CrossChainAppRequestFailed_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -286,7 +306,7 @@ func (c *vMClient) CrossChainAppRequestFailed(ctx context.Context, in *CrossChai func (c *vMClient) CrossChainAppResponse(ctx context.Context, in *CrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppResponse", in, out, opts...) + err := c.cc.Invoke(ctx, VM_CrossChainAppResponse_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -295,7 +315,7 @@ func (c *vMClient) CrossChainAppResponse(ctx context.Context, in *CrossChainAppR func (c *vMClient) GetAncestors(ctx context.Context, in *GetAncestorsRequest, opts ...grpc.CallOption) (*GetAncestorsResponse, error) { out := new(GetAncestorsResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetAncestors", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetAncestors_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -304,7 +324,7 @@ func (c *vMClient) GetAncestors(ctx context.Context, in *GetAncestorsRequest, op func (c *vMClient) BatchedParseBlock(ctx context.Context, in *BatchedParseBlockRequest, opts ...grpc.CallOption) (*BatchedParseBlockResponse, error) { out := new(BatchedParseBlockResponse) - err := c.cc.Invoke(ctx, "/vm.VM/BatchedParseBlock", in, out, opts...) + err := c.cc.Invoke(ctx, VM_BatchedParseBlock_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -313,7 +333,7 @@ func (c *vMClient) BatchedParseBlock(ctx context.Context, in *BatchedParseBlockR func (c *vMClient) VerifyHeightIndex(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VerifyHeightIndexResponse, error) { out := new(VerifyHeightIndexResponse) - err := c.cc.Invoke(ctx, "/vm.VM/VerifyHeightIndex", in, out, opts...) + err := c.cc.Invoke(ctx, VM_VerifyHeightIndex_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -322,7 +342,7 @@ func (c *vMClient) VerifyHeightIndex(ctx context.Context, in *emptypb.Empty, opt func (c *vMClient) GetBlockIDAtHeight(ctx context.Context, in *GetBlockIDAtHeightRequest, opts ...grpc.CallOption) (*GetBlockIDAtHeightResponse, error) { out := new(GetBlockIDAtHeightResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetBlockIDAtHeight", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetBlockIDAtHeight_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -331,7 +351,7 @@ func (c *vMClient) GetBlockIDAtHeight(ctx context.Context, in *GetBlockIDAtHeigh func (c *vMClient) StateSyncEnabled(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StateSyncEnabledResponse, error) { out := new(StateSyncEnabledResponse) - err := c.cc.Invoke(ctx, "/vm.VM/StateSyncEnabled", in, out, opts...) + err := c.cc.Invoke(ctx, VM_StateSyncEnabled_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -340,7 +360,7 @@ func (c *vMClient) StateSyncEnabled(ctx context.Context, in *emptypb.Empty, opts func (c *vMClient) GetOngoingSyncStateSummary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetOngoingSyncStateSummaryResponse, error) { out := new(GetOngoingSyncStateSummaryResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetOngoingSyncStateSummary", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetOngoingSyncStateSummary_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -349,7 +369,7 @@ func (c *vMClient) GetOngoingSyncStateSummary(ctx context.Context, in *emptypb.E func (c *vMClient) GetLastStateSummary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetLastStateSummaryResponse, error) { out := new(GetLastStateSummaryResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetLastStateSummary", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetLastStateSummary_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -358,7 +378,7 @@ func (c *vMClient) GetLastStateSummary(ctx context.Context, in *emptypb.Empty, o func (c *vMClient) ParseStateSummary(ctx context.Context, in *ParseStateSummaryRequest, opts ...grpc.CallOption) (*ParseStateSummaryResponse, error) { out := new(ParseStateSummaryResponse) - err := c.cc.Invoke(ctx, "/vm.VM/ParseStateSummary", in, out, opts...) + err := c.cc.Invoke(ctx, VM_ParseStateSummary_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -367,7 +387,7 @@ func (c *vMClient) ParseStateSummary(ctx context.Context, in *ParseStateSummaryR func (c *vMClient) GetStateSummary(ctx context.Context, in *GetStateSummaryRequest, opts ...grpc.CallOption) (*GetStateSummaryResponse, error) { out := new(GetStateSummaryResponse) - err := c.cc.Invoke(ctx, "/vm.VM/GetStateSummary", in, out, opts...) + err := c.cc.Invoke(ctx, VM_GetStateSummary_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -376,7 +396,7 @@ func (c *vMClient) GetStateSummary(ctx context.Context, in *GetStateSummaryReque func (c *vMClient) BlockVerify(ctx context.Context, in *BlockVerifyRequest, opts ...grpc.CallOption) (*BlockVerifyResponse, error) { out := new(BlockVerifyResponse) - err := c.cc.Invoke(ctx, "/vm.VM/BlockVerify", in, out, opts...) + err := c.cc.Invoke(ctx, VM_BlockVerify_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -385,7 +405,7 @@ func (c *vMClient) BlockVerify(ctx context.Context, in *BlockVerifyRequest, opts func (c *vMClient) BlockAccept(ctx context.Context, in *BlockAcceptRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/BlockAccept", in, out, opts...) + err := c.cc.Invoke(ctx, VM_BlockAccept_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -394,7 +414,7 @@ func (c *vMClient) BlockAccept(ctx context.Context, in *BlockAcceptRequest, opts func (c *vMClient) BlockReject(ctx context.Context, in *BlockRejectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/vm.VM/BlockReject", in, out, opts...) + err := c.cc.Invoke(ctx, VM_BlockReject_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -403,7 +423,7 @@ func (c *vMClient) BlockReject(ctx context.Context, in *BlockRejectRequest, opts func (c *vMClient) StateSummaryAccept(ctx context.Context, in *StateSummaryAcceptRequest, opts ...grpc.CallOption) (*StateSummaryAcceptResponse, error) { out := new(StateSummaryAcceptResponse) - err := c.cc.Invoke(ctx, "/vm.VM/StateSummaryAccept", in, out, opts...) + err := c.cc.Invoke(ctx, VM_StateSummaryAccept_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -424,13 +444,6 @@ type VMServer interface { Shutdown(context.Context, *emptypb.Empty) (*emptypb.Empty, error) // Creates the HTTP handlers for custom chain network calls. CreateHandlers(context.Context, *emptypb.Empty) (*CreateHandlersResponse, error) - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - CreateStaticHandlers(context.Context, *emptypb.Empty) (*CreateStaticHandlersResponse, error) Connected(context.Context, *ConnectedRequest) (*emptypb.Empty, error) Disconnected(context.Context, *DisconnectedRequest) (*emptypb.Empty, error) // Attempt to create a new block from data contained in the VM. @@ -504,9 +517,6 @@ func (UnimplementedVMServer) Shutdown(context.Context, *emptypb.Empty) (*emptypb func (UnimplementedVMServer) CreateHandlers(context.Context, *emptypb.Empty) (*CreateHandlersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateHandlers not implemented") } -func (UnimplementedVMServer) CreateStaticHandlers(context.Context, *emptypb.Empty) (*CreateStaticHandlersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateStaticHandlers not implemented") -} func (UnimplementedVMServer) Connected(context.Context, *ConnectedRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Connected not implemented") } @@ -617,7 +627,7 @@ func _VM_Initialize_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Initialize", + FullMethod: VM_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Initialize(ctx, req.(*InitializeRequest)) @@ -635,7 +645,7 @@ func _VM_SetState_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/SetState", + FullMethod: VM_SetState_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).SetState(ctx, req.(*SetStateRequest)) @@ -653,7 +663,7 @@ func _VM_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Shutdown", + FullMethod: VM_Shutdown_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Shutdown(ctx, req.(*emptypb.Empty)) @@ -671,7 +681,7 @@ func _VM_CreateHandlers_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/CreateHandlers", + FullMethod: VM_CreateHandlers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).CreateHandlers(ctx, req.(*emptypb.Empty)) @@ -679,24 +689,6 @@ func _VM_CreateHandlers_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } -func _VM_CreateStaticHandlers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VMServer).CreateStaticHandlers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/vm.VM/CreateStaticHandlers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VMServer).CreateStaticHandlers(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _VM_Connected_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ConnectedRequest) if err := dec(in); err != nil { @@ -707,7 +699,7 @@ func _VM_Connected_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Connected", + FullMethod: VM_Connected_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Connected(ctx, req.(*ConnectedRequest)) @@ -725,7 +717,7 @@ func _VM_Disconnected_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Disconnected", + FullMethod: VM_Disconnected_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Disconnected(ctx, req.(*DisconnectedRequest)) @@ -743,7 +735,7 @@ func _VM_BuildBlock_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/BuildBlock", + FullMethod: VM_BuildBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).BuildBlock(ctx, req.(*BuildBlockRequest)) @@ -761,7 +753,7 @@ func _VM_ParseBlock_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/ParseBlock", + FullMethod: VM_ParseBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).ParseBlock(ctx, req.(*ParseBlockRequest)) @@ -779,7 +771,7 @@ func _VM_GetBlock_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetBlock", + FullMethod: VM_GetBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetBlock(ctx, req.(*GetBlockRequest)) @@ -797,7 +789,7 @@ func _VM_SetPreference_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/SetPreference", + FullMethod: VM_SetPreference_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).SetPreference(ctx, req.(*SetPreferenceRequest)) @@ -815,7 +807,7 @@ func _VM_Health_Handler(srv interface{}, ctx context.Context, dec func(interface } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Health", + FullMethod: VM_Health_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Health(ctx, req.(*emptypb.Empty)) @@ -833,7 +825,7 @@ func _VM_Version_Handler(srv interface{}, ctx context.Context, dec func(interfac } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Version", + FullMethod: VM_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Version(ctx, req.(*emptypb.Empty)) @@ -851,7 +843,7 @@ func _VM_AppRequest_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/AppRequest", + FullMethod: VM_AppRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).AppRequest(ctx, req.(*AppRequestMsg)) @@ -869,7 +861,7 @@ func _VM_AppRequestFailed_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/AppRequestFailed", + FullMethod: VM_AppRequestFailed_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).AppRequestFailed(ctx, req.(*AppRequestFailedMsg)) @@ -887,7 +879,7 @@ func _VM_AppResponse_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/AppResponse", + FullMethod: VM_AppResponse_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).AppResponse(ctx, req.(*AppResponseMsg)) @@ -905,7 +897,7 @@ func _VM_AppGossip_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/AppGossip", + FullMethod: VM_AppGossip_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).AppGossip(ctx, req.(*AppGossipMsg)) @@ -923,7 +915,7 @@ func _VM_Gather_Handler(srv interface{}, ctx context.Context, dec func(interface } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/Gather", + FullMethod: VM_Gather_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).Gather(ctx, req.(*emptypb.Empty)) @@ -941,7 +933,7 @@ func _VM_CrossChainAppRequest_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/CrossChainAppRequest", + FullMethod: VM_CrossChainAppRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).CrossChainAppRequest(ctx, req.(*CrossChainAppRequestMsg)) @@ -959,7 +951,7 @@ func _VM_CrossChainAppRequestFailed_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/CrossChainAppRequestFailed", + FullMethod: VM_CrossChainAppRequestFailed_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).CrossChainAppRequestFailed(ctx, req.(*CrossChainAppRequestFailedMsg)) @@ -977,7 +969,7 @@ func _VM_CrossChainAppResponse_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/CrossChainAppResponse", + FullMethod: VM_CrossChainAppResponse_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).CrossChainAppResponse(ctx, req.(*CrossChainAppResponseMsg)) @@ -995,7 +987,7 @@ func _VM_GetAncestors_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetAncestors", + FullMethod: VM_GetAncestors_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetAncestors(ctx, req.(*GetAncestorsRequest)) @@ -1013,7 +1005,7 @@ func _VM_BatchedParseBlock_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/BatchedParseBlock", + FullMethod: VM_BatchedParseBlock_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).BatchedParseBlock(ctx, req.(*BatchedParseBlockRequest)) @@ -1031,7 +1023,7 @@ func _VM_VerifyHeightIndex_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/VerifyHeightIndex", + FullMethod: VM_VerifyHeightIndex_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).VerifyHeightIndex(ctx, req.(*emptypb.Empty)) @@ -1049,7 +1041,7 @@ func _VM_GetBlockIDAtHeight_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetBlockIDAtHeight", + FullMethod: VM_GetBlockIDAtHeight_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetBlockIDAtHeight(ctx, req.(*GetBlockIDAtHeightRequest)) @@ -1067,7 +1059,7 @@ func _VM_StateSyncEnabled_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/StateSyncEnabled", + FullMethod: VM_StateSyncEnabled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).StateSyncEnabled(ctx, req.(*emptypb.Empty)) @@ -1085,7 +1077,7 @@ func _VM_GetOngoingSyncStateSummary_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetOngoingSyncStateSummary", + FullMethod: VM_GetOngoingSyncStateSummary_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetOngoingSyncStateSummary(ctx, req.(*emptypb.Empty)) @@ -1103,7 +1095,7 @@ func _VM_GetLastStateSummary_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetLastStateSummary", + FullMethod: VM_GetLastStateSummary_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetLastStateSummary(ctx, req.(*emptypb.Empty)) @@ -1121,7 +1113,7 @@ func _VM_ParseStateSummary_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/ParseStateSummary", + FullMethod: VM_ParseStateSummary_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).ParseStateSummary(ctx, req.(*ParseStateSummaryRequest)) @@ -1139,7 +1131,7 @@ func _VM_GetStateSummary_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/GetStateSummary", + FullMethod: VM_GetStateSummary_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).GetStateSummary(ctx, req.(*GetStateSummaryRequest)) @@ -1157,7 +1149,7 @@ func _VM_BlockVerify_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/BlockVerify", + FullMethod: VM_BlockVerify_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).BlockVerify(ctx, req.(*BlockVerifyRequest)) @@ -1175,7 +1167,7 @@ func _VM_BlockAccept_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/BlockAccept", + FullMethod: VM_BlockAccept_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).BlockAccept(ctx, req.(*BlockAcceptRequest)) @@ -1193,7 +1185,7 @@ func _VM_BlockReject_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/BlockReject", + FullMethod: VM_BlockReject_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).BlockReject(ctx, req.(*BlockRejectRequest)) @@ -1211,7 +1203,7 @@ func _VM_StateSummaryAccept_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vm.VM/StateSummaryAccept", + FullMethod: VM_StateSummaryAccept_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(VMServer).StateSummaryAccept(ctx, req.(*StateSummaryAcceptRequest)) @@ -1242,10 +1234,6 @@ var VM_ServiceDesc = grpc.ServiceDesc{ MethodName: "CreateHandlers", Handler: _VM_CreateHandlers_Handler, }, - { - MethodName: "CreateStaticHandlers", - Handler: _VM_CreateStaticHandlers_Handler, - }, { MethodName: "Connected", Handler: _VM_Connected_Handler, diff --git a/avalanchego/proto/pb/warp/message.pb.go b/avalanchego/proto/pb/warp/message.pb.go index cfc355c6..d6cb3736 100644 --- a/avalanchego/proto/pb/warp/message.pb.go +++ b/avalanchego/proto/pb/warp/message.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc (unknown) // source: warp/message.proto @@ -25,9 +25,9 @@ type SignRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SourceChainId []byte `protobuf:"bytes,1,opt,name=source_chain_id,json=sourceChainId,proto3" json:"source_chain_id,omitempty"` - DestinationChainId []byte `protobuf:"bytes,2,opt,name=destination_chain_id,json=destinationChainId,proto3" json:"destination_chain_id,omitempty"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + SourceChainId []byte `protobuf:"bytes,2,opt,name=source_chain_id,json=sourceChainId,proto3" json:"source_chain_id,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` } func (x *SignRequest) Reset() { @@ -62,16 +62,16 @@ func (*SignRequest) Descriptor() ([]byte, []int) { return file_warp_message_proto_rawDescGZIP(), []int{0} } -func (x *SignRequest) GetSourceChainId() []byte { +func (x *SignRequest) GetNetworkId() uint32 { if x != nil { - return x.SourceChainId + return x.NetworkId } - return nil + return 0 } -func (x *SignRequest) GetDestinationChainId() []byte { +func (x *SignRequest) GetSourceChainId() []byte { if x != nil { - return x.DestinationChainId + return x.SourceChainId } return nil } @@ -134,25 +134,24 @@ var File_warp_message_proto protoreflect.FileDescriptor var file_warp_message_proto_rawDesc = []byte{ 0x0a, 0x12, 0x77, 0x61, 0x72, 0x70, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x77, 0x61, 0x72, 0x70, 0x22, 0x81, 0x01, 0x0a, 0x0b, 0x53, - 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2c, - 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x32, 0x37, 0x0a, 0x06, - 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x11, - 0x2e, 0x77, 0x61, 0x72, 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x12, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, - 0x62, 0x2f, 0x77, 0x61, 0x72, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x77, 0x61, 0x72, 0x70, 0x22, 0x6e, 0x0a, 0x0b, 0x53, 0x69, + 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2c, 0x0a, 0x0c, 0x53, 0x69, + 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x32, 0x37, 0x0a, 0x06, 0x53, 0x69, 0x67, 0x6e, + 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x11, 0x2e, 0x77, 0x61, 0x72, + 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, + 0x77, 0x61, 0x72, 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x77, 0x61, + 0x72, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/avalanchego/proto/pb/warp/message_grpc.pb.go b/avalanchego/proto/pb/warp/message_grpc.pb.go index fa092303..91c5152f 100644 --- a/avalanchego/proto/pb/warp/message_grpc.pb.go +++ b/avalanchego/proto/pb/warp/message_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) // source: warp/message.proto @@ -18,6 +18,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Signer_Sign_FullMethodName = "/warp.Signer/Sign" +) + // SignerClient is the client API for Signer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +39,7 @@ func NewSignerClient(cc grpc.ClientConnInterface) SignerClient { func (c *signerClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { out := new(SignResponse) - err := c.cc.Invoke(ctx, "/warp.Signer/Sign", in, out, opts...) + err := c.cc.Invoke(ctx, Signer_Sign_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -80,7 +84,7 @@ func _Signer_Sign_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/warp.Signer/Sign", + FullMethod: Signer_Sign_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SignerServer).Sign(ctx, req.(*SignRequest)) diff --git a/avalanchego/proto/sdk/sdk.proto b/avalanchego/proto/sdk/sdk.proto new file mode 100644 index 00000000..f4291239 --- /dev/null +++ b/avalanchego/proto/sdk/sdk.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package sdk; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/sdk"; + +message PullGossipRequest { + // TODO: Remove reservation after v1.11.x activates. + reserved 1; + bytes salt = 2; + bytes filter = 3; +} + +message PullGossipResponse { + repeated bytes gossip = 1; +} + +message PushGossip { + repeated bytes gossip = 1; +} diff --git a/avalanchego/proto/sync/sync.proto b/avalanchego/proto/sync/sync.proto new file mode 100644 index 00000000..1a799433 --- /dev/null +++ b/avalanchego/proto/sync/sync.proto @@ -0,0 +1,169 @@ +syntax = "proto3"; + +package sync; + +import "google/protobuf/empty.proto"; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/sync"; + +// Request represents a request for information during syncing. +message Request { + oneof message { + SyncGetRangeProofRequest range_proof_request = 1; + SyncGetChangeProofRequest change_proof_request = 2; + } +} + +// The interface required by an x/sync/SyncManager for syncing. +// Note this service definition only exists for use in tests. +// A database shouldn't expose this over the internet, as it +// allows for reading/writing to the database. +service DB { + rpc GetMerkleRoot(google.protobuf.Empty) returns (GetMerkleRootResponse); + + rpc Clear(google.protobuf.Empty) returns (google.protobuf.Empty); + + rpc GetProof(GetProofRequest) returns (GetProofResponse); + + rpc GetChangeProof(GetChangeProofRequest) returns (GetChangeProofResponse); + rpc VerifyChangeProof(VerifyChangeProofRequest) returns (VerifyChangeProofResponse); + rpc CommitChangeProof(CommitChangeProofRequest) returns (google.protobuf.Empty); + + rpc GetRangeProof(GetRangeProofRequest) returns (GetRangeProofResponse); + rpc CommitRangeProof(CommitRangeProofRequest) returns (google.protobuf.Empty); +} + +message GetMerkleRootResponse { + bytes root_hash = 1; +} + +message GetProofRequest { + bytes key = 1; +} + +message GetProofResponse { + Proof proof = 1; +} + +message Proof { + bytes key = 1; + MaybeBytes value = 2; + repeated ProofNode proof = 3; +} + +// For use in sync client, which has a restriction on the size of +// the response. GetChangeProof in the DB service doesn't. +message SyncGetChangeProofRequest { + bytes start_root_hash = 1; + bytes end_root_hash = 2; + MaybeBytes start_key = 3; + MaybeBytes end_key = 4; + uint32 key_limit = 5; + uint32 bytes_limit = 6; +} + +message SyncGetChangeProofResponse { + oneof response { + ChangeProof change_proof = 1; + RangeProof range_proof = 2; + } +} + +message GetChangeProofRequest { + bytes start_root_hash = 1; + bytes end_root_hash = 2; + MaybeBytes start_key = 3; + MaybeBytes end_key = 4; + uint32 key_limit = 5; +} + +message GetChangeProofResponse { + oneof response { + ChangeProof change_proof = 1; + // True iff server errored with merkledb.ErrInsufficientHistory. + bool root_not_present = 2; + } +} + +message VerifyChangeProofRequest { + ChangeProof proof = 1; + MaybeBytes start_key = 2; + MaybeBytes end_key = 3; + bytes expected_root_hash = 4; +} + +message VerifyChangeProofResponse { + // If empty, there was no error. + string error = 1; +} + +message CommitChangeProofRequest { + ChangeProof proof = 1; +} + +// For use in sync client, which has a restriction on the size of +// the response. GetRangeProof in the DB service doesn't. +message SyncGetRangeProofRequest { + bytes root_hash = 1; + MaybeBytes start_key = 2; + MaybeBytes end_key = 3; + uint32 key_limit = 4; + uint32 bytes_limit = 5; +} + +message GetRangeProofRequest { + bytes root_hash = 1; + MaybeBytes start_key = 2; + MaybeBytes end_key = 3; + uint32 key_limit = 4; +} + +message GetRangeProofResponse { + RangeProof proof = 1; +} + +message CommitRangeProofRequest { + MaybeBytes start_key = 1; + MaybeBytes end_key = 2; + RangeProof range_proof = 3; +} + +message ChangeProof { + repeated ProofNode start_proof = 1; + repeated ProofNode end_proof = 2; + repeated KeyChange key_changes = 3; +} + +message RangeProof { + repeated ProofNode start_proof = 1; + repeated ProofNode end_proof = 2; + repeated KeyValue key_values = 3; +} + +message ProofNode { + Key key = 1; + MaybeBytes value_or_hash = 2; + map children = 3; +} + +message KeyChange { + bytes key = 1; + MaybeBytes value = 2; +} + +message Key { + uint64 length = 1; + bytes value = 2; +} + +message MaybeBytes { + bytes value = 1; + // If false, this is None. + // Otherwise this is Some. + bool is_nothing = 2; +} + +message KeyValue { + bytes key = 1; + bytes value = 2; +} diff --git a/avalanchego/proto/vm/vm.proto b/avalanchego/proto/vm/vm.proto index 179db3f4..4a0557ba 100644 --- a/avalanchego/proto/vm/vm.proto +++ b/avalanchego/proto/vm/vm.proto @@ -21,13 +21,6 @@ service VM { rpc Shutdown(google.protobuf.Empty) returns (google.protobuf.Empty); // Creates the HTTP handlers for custom chain network calls. rpc CreateHandlers(google.protobuf.Empty) returns (CreateHandlersResponse); - // Creates the HTTP handlers for custom VM network calls. - // - // Note: RPC Chain VM Factory will start a new instance of the VM in a - // seperate process which will populate the static handlers. After this - // process is created other processes will be created to populate blockchains, - // but they will not have the static handlers be called again. - rpc CreateStaticHandlers(google.protobuf.Empty) returns (CreateStaticHandlersResponse); rpc Connected(ConnectedRequest) returns (google.protobuf.Empty); rpc Disconnected(DisconnectedRequest) returns (google.protobuf.Empty); // Attempt to create a new block from data contained in the VM. @@ -108,9 +101,8 @@ enum Error { ERROR_UNSPECIFIED = 0; ERROR_CLOSED = 1; ERROR_NOT_FOUND = 2; - ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED = 3; - ERROR_HEIGHT_INDEX_INCOMPLETE = 4; - ERROR_STATE_SYNC_NOT_IMPLEMENTED = 5; + ERROR_HEIGHT_INDEX_INCOMPLETE = 3; + ERROR_STATE_SYNC_NOT_IMPLEMENTED = 4; } message InitializeRequest { @@ -128,7 +120,7 @@ message InitializeRequest { bytes genesis_bytes = 10; bytes upgrade_bytes = 11; bytes config_bytes = 12; - repeated VersionedDBServer db_servers = 13; + string db_server_addr = 13; // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services @@ -143,13 +135,6 @@ message InitializeResponse { google.protobuf.Timestamp timestamp = 5; } -message VersionedDBServer { - string version = 1; - // server_addr is the address of the gRPC server which serves the - // Database service - string server_addr = 2; -} - message SetStateRequest { State state = 1; } @@ -166,16 +151,11 @@ message CreateHandlersResponse { repeated Handler handlers = 1; } -message CreateStaticHandlersResponse { - repeated Handler handlers = 1; -} - message Handler { string prefix = 1; - uint32 lock_options = 2; // server_addr is the address of the gRPC server which serves the // HTTP service - string server_addr = 3; + string server_addr = 2; } message BuildBlockRequest { @@ -268,6 +248,10 @@ message AppRequestFailedMsg { bytes node_id = 1; // The ID of the request we sent and didn't get a response to uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; } message AppResponseMsg { @@ -302,6 +286,10 @@ message CrossChainAppRequestFailedMsg { bytes chain_id = 1; // The ID of the request we sent and didn't get a response to uint32 request_id = 2; + // Application-defined error code + sint32 error_code = 3; + // Application-defined error message + string error_message = 4; } message CrossChainAppResponseMsg { @@ -315,7 +303,12 @@ message CrossChainAppResponseMsg { message ConnectedRequest { bytes node_id = 1; - string version = 2; + // Client name (e.g avalanchego) + string name = 2; + // Client semantic version + uint32 major = 3; + uint32 minor = 4; + uint32 patch = 5; } message DisconnectedRequest { diff --git a/avalanchego/proto/warp/message.proto b/avalanchego/proto/warp/message.proto index e2f52db6..8cd6a9ef 100644 --- a/avalanchego/proto/warp/message.proto +++ b/avalanchego/proto/warp/message.proto @@ -9,8 +9,8 @@ service Signer { } message SignRequest { - bytes source_chain_id = 1; - bytes destination_chain_id = 2; + uint32 network_id = 1; + bytes source_chain_id = 2; bytes payload = 3; } diff --git a/avalanchego/pubsub/bloom/filter.go b/avalanchego/pubsub/bloom/filter.go new file mode 100644 index 00000000..b0d023b5 --- /dev/null +++ b/avalanchego/pubsub/bloom/filter.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "errors" + + "github.com/ava-labs/avalanchego/utils/bloom" +) + +const bytesPerHash = 8 + +var ( + _ Filter = (*filter)(nil) + + errMaxBytes = errors.New("too large") +) + +type Filter interface { + // Add adds to filter, assumed thread safe + Add(...[]byte) + + // Check checks filter, assumed thread safe + Check([]byte) bool +} + +func New(maxN int, p float64, maxBytes int) (Filter, error) { + numHashes, numEntries := bloom.OptimalParameters(maxN, p) + if neededBytes := 1 + numHashes*bytesPerHash + numEntries; neededBytes > maxBytes { + return nil, errMaxBytes + } + f, err := bloom.New(numHashes, numEntries) + return &filter{ + filter: f, + }, err +} + +type filter struct { + filter *bloom.Filter +} + +func (f *filter) Add(bl ...[]byte) { + for _, b := range bl { + bloom.Add(f.filter, b, nil) + } +} + +func (f *filter) Check(b []byte) bool { + return bloom.Contains(f.filter, b, nil) +} diff --git a/avalanchego/utils/bloom/bloom_filter_test.go b/avalanchego/pubsub/bloom/filter_test.go similarity index 72% rename from avalanchego/utils/bloom/bloom_filter_test.go rename to avalanchego/pubsub/bloom/filter_test.go index 7e810add..3b2c4b71 100644 --- a/avalanchego/utils/bloom/bloom_filter_test.go +++ b/avalanchego/pubsub/bloom/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom @@ -13,10 +13,10 @@ import ( func TestNew(t *testing.T) { var ( - require = require.New(t) - maxN uint64 = 10000 - p = 0.1 - maxBytes uint64 = 1 * units.MiB // 1 MiB + require = require.New(t) + maxN = 10000 + p = 0.1 + maxBytes = 1 * units.MiB // 1 MiB ) f, err := New(maxN, p, maxBytes) require.NoError(err) diff --git a/avalanchego/utils/bloom/map_filter.go b/avalanchego/pubsub/bloom/map_filter.go similarity index 88% rename from avalanchego/utils/bloom/map_filter.go rename to avalanchego/pubsub/bloom/map_filter.go index 19046bea..d0edcbe8 100644 --- a/avalanchego/utils/bloom/map_filter.go +++ b/avalanchego/pubsub/bloom/map_filter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom diff --git a/avalanchego/pubsub/connection.go b/avalanchego/pubsub/connection.go index 2dae38ac..31d49335 100644 --- a/avalanchego/pubsub/connection.go +++ b/avalanchego/pubsub/connection.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -11,10 +11,9 @@ import ( "time" "github.com/gorilla/websocket" - "go.uber.org/zap" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" ) var ( @@ -190,7 +189,7 @@ func (c *connection) handleNewBloom(cmd *NewBloom) error { if !cmd.IsParamsValid() { return ErrInvalidFilterParam } - filter, err := bloom.New(uint64(cmd.MaxElements), float64(cmd.CollisionProb), MaxBytes) + filter, err := bloom.New(int(cmd.MaxElements), float64(cmd.CollisionProb), MaxBytes) if err != nil { return fmt.Errorf("bloom filter creation failed %w", err) } diff --git a/avalanchego/pubsub/connections.go b/avalanchego/pubsub/connections.go index 417e1aa8..25d35ac8 100644 --- a/avalanchego/pubsub/connections.go +++ b/avalanchego/pubsub/connections.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/avalanchego/pubsub/filter_param.go b/avalanchego/pubsub/filter_param.go index e7e2453c..5fd80a2a 100644 --- a/avalanchego/pubsub/filter_param.go +++ b/avalanchego/pubsub/filter_param.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -6,7 +6,7 @@ package pubsub import ( "sync" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" "github.com/ava-labs/avalanchego/utils/set" ) diff --git a/avalanchego/pubsub/filter_test.go b/avalanchego/pubsub/filter_test.go index 051ad94c..3b47a38e 100644 --- a/avalanchego/pubsub/filter_test.go +++ b/avalanchego/pubsub/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/pubsub/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" ) @@ -31,64 +31,47 @@ func TestAddAddressesParseAddresses(t *testing.T) { }, }} - err = msg.parseAddresses() - require.NoError(err) + require.NoError(msg.parseAddresses()) require.Len(msg.addressIds, 1) require.Equal(addrID[:], msg.addressIds[0]) } func TestFilterParamUpdateMulti(t *testing.T) { + require := require.New(t) + fp := NewFilterParam() addr1 := []byte("abc") addr2 := []byte("def") addr3 := []byte("xyz") - if err := fp.Add(addr1, addr2, addr3); err != nil { - t.Fatal(err) - } - if len(fp.set) != 3 { - t.Fatalf("update multi failed") - } - if _, exists := fp.set[string(addr1)]; !exists { - t.Fatalf("update multi failed") - } - if _, exists := fp.set[string(addr2)]; !exists { - t.Fatalf("update multi failed") - } - if _, exists := fp.set[string(addr3)]; !exists { - t.Fatalf("update multi failed") - } + require.NoError(fp.Add(addr1, addr2, addr3)) + require.Len(fp.set, 3) + require.Contains(fp.set, string(addr1)) + require.Contains(fp.set, string(addr2)) + require.Contains(fp.set, string(addr3)) } func TestFilterParam(t *testing.T) { + require := require.New(t) + mapFilter := bloom.NewMap() fp := NewFilterParam() fp.SetFilter(mapFilter) addr := ids.GenerateTestShortID() - if err := fp.Add(addr[:]); err != nil { - t.Fatal(err) - } - if !fp.Check(addr[:]) { - t.Fatalf("check address failed") - } + require.NoError(fp.Add(addr[:])) + require.True(fp.Check(addr[:])) delete(fp.set, string(addr[:])) mapFilter.Add(addr[:]) - if !fp.Check(addr[:]) { - t.Fatalf("check address failed") - } - if fp.Check([]byte("bye")) { - t.Fatalf("check address failed") - } + require.True(fp.Check(addr[:])) + require.False(fp.Check([]byte("bye"))) } func TestNewBloom(t *testing.T) { cm := &NewBloom{} - if cm.IsParamsValid() { - t.Fatalf("new filter check failed") - } + require.False(t, cm.IsParamsValid()) } diff --git a/avalanchego/pubsub/filterer.go b/avalanchego/pubsub/filterer.go index 389448ea..3ec2910a 100644 --- a/avalanchego/pubsub/filterer.go +++ b/avalanchego/pubsub/filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/avalanchego/pubsub/messages.go b/avalanchego/pubsub/messages.go index 525ae035..ec41af81 100644 --- a/avalanchego/pubsub/messages.go +++ b/avalanchego/pubsub/messages.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/avalanchego/pubsub/server.go b/avalanchego/pubsub/server.go index b7e4eaf7..b07dea89 100644 --- a/avalanchego/pubsub/server.go +++ b/avalanchego/pubsub/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -9,7 +9,6 @@ import ( "time" "github.com/gorilla/websocket" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/utils/logging" diff --git a/avalanchego/scripts/build.sh b/avalanchego/scripts/build.sh index ac035737..7c5fc683 100755 --- a/avalanchego/scripts/build.sh +++ b/avalanchego/scripts/build.sh @@ -1,8 +1,6 @@ #!/usr/bin/env bash -set -o errexit -set -o nounset -set -o pipefail +set -euo pipefail print_usage() { printf "Usage: build [OPTIONS] diff --git a/avalanchego/scripts/build_avalanche.sh b/avalanchego/scripts/build_avalanche.sh index 2756efde..b4ffdf1d 100755 --- a/avalanchego/scripts/build_avalanche.sh +++ b/avalanchego/scripts/build_avalanche.sh @@ -1,8 +1,6 @@ #!/usr/bin/env bash -set -o errexit -set -o nounset -set -o pipefail +set -euo pipefail print_usage() { printf "Usage: build_avalanche [OPTIONS] @@ -26,7 +24,6 @@ done # Changes to the minimum golang version must also be replicated in # scripts/build_avalanche.sh (here) -# scripts/local.Dockerfile # Dockerfile # README.md # go.mod diff --git a/avalanchego/scripts/build_fuzz.sh b/avalanchego/scripts/build_fuzz.sh old mode 100644 new mode 100755 index c51f438f..49378e2e --- a/avalanchego/scripts/build_fuzz.sh +++ b/avalanchego/scripts/build_fuzz.sh @@ -1,20 +1,34 @@ #!/usr/bin/env bash +# First argument is the time, in seconds, to run each fuzz test for. +# If not provided, defaults to 1 second. +# +# Second argument is the directory to run fuzz tests in. +# If not provided, defaults to the current directory. + +set -euo pipefail + # Mostly taken from https://github.com/golang/go/issues/46312#issuecomment-1153345129 +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +# Load the constants +source "$AVALANCHE_PATH"/scripts/constants.sh + fuzzTime=${1:-1} -files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' .) +fuzzDir=${2:-.} + +files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' "$fuzzDir") failed=false for file in ${files} do - funcs=$(grep -oP 'func \K(Fuzz\w*)' $file) + funcs=$(grep -oP 'func \K(Fuzz\w*)' "$file") for func in ${funcs} do echo "Fuzzing $func in $file" - parentDir=$(dirname $file) - go test $parentDir -run=$func -fuzz=$func -fuzztime=${fuzzTime}s + parentDir=$(dirname "$file") # If any of the fuzz tests fail, return exit code 1 - if [ $? -ne 0 ]; then + if ! go test "$parentDir" -run="$func" -fuzz="$func" -fuzztime="${fuzzTime}"s; then failed=true fi done diff --git a/avalanchego/scripts/build_image.sh b/avalanchego/scripts/build_image.sh index 2a15c8f6..38403a18 100755 --- a/avalanchego/scripts/build_image.sh +++ b/avalanchego/scripts/build_image.sh @@ -1,13 +1,26 @@ #!/usr/bin/env bash -set -o errexit -set -o nounset -set -o pipefail +set -euo pipefail -# Avalanchego root folder +# Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -# build_image_from_remote.sh is deprecated -source "$AVALANCHE_PATH"/scripts/build_local_image.sh +if [[ $current_branch == *"-race" ]]; then + echo "Branch name must not end in '-race'" + exit 1 +fi + +# WARNING: this will use the most recent commit even if there are un-committed changes present +full_commit_hash="$(git --git-dir="$AVALANCHE_PATH/.git" rev-parse HEAD)" +commit_hash="${full_commit_hash::8}" + +echo "Building Docker Image with tags: $avalanchego_dockerhub_repo:$commit_hash , $avalanchego_dockerhub_repo:$current_branch" +docker build -t "$avalanchego_dockerhub_repo:$commit_hash" \ + -t "$avalanchego_dockerhub_repo:$current_branch" "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" + +echo "Building Docker Image with tags: $avalanchego_dockerhub_repo:$commit_hash-race , $avalanchego_dockerhub_repo:$current_branch-race" +docker build --build-arg="RACE_FLAG=-r" -t "$avalanchego_dockerhub_repo:$commit_hash-race" \ + -t "$avalanchego_dockerhub_repo:$current_branch-race" "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" diff --git a/avalanchego/scripts/build_local_dep_image.sh b/avalanchego/scripts/build_local_dep_image.sh deleted file mode 100755 index 2db930fc..00000000 --- a/avalanchego/scripts/build_local_dep_image.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -echo "Building docker image based off of most recent local commits of avalanchego and coreth" - -AVALANCHE_REMOTE="git@github.com:ava-labs/avalanchego.git" -CORETH_REMOTE="git@github.com:ava-labs/coreth.git" -DOCKERHUB_REPO="avaplatform/avalanchego" - -DOCKER="${DOCKER:-docker}" -SCRIPT_DIRPATH=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) - -AVA_LABS_RELATIVE_PATH="src/github.com/ava-labs" -EXISTING_GOPATH="$GOPATH" - -export GOPATH="$SCRIPT_DIRPATH/.build_image_gopath" -WORKPREFIX="$GOPATH/src/github.com/ava-labs" - -# Clone the remotes and checkout the desired branch/commits -AVALANCHE_CLONE="$WORKPREFIX/avalanchego" -CORETH_CLONE="$WORKPREFIX/coreth" - -# Replace the WORKPREFIX directory -rm -rf "$WORKPREFIX" -mkdir -p "$WORKPREFIX" - - -AVALANCHE_COMMIT_HASH="$(git -C "$EXISTING_GOPATH/$AVA_LABS_RELATIVE_PATH/avalanchego" rev-parse --short HEAD)" -CORETH_COMMIT_HASH="$(git -C "$EXISTING_GOPATH/$AVA_LABS_RELATIVE_PATH/coreth" rev-parse --short HEAD)" - -git config --global credential.helper cache - -git clone "$AVALANCHE_REMOTE" "$AVALANCHE_CLONE" -git -C "$AVALANCHE_CLONE" checkout "$AVALANCHE_COMMIT_HASH" - -git clone "$CORETH_REMOTE" "$CORETH_CLONE" -git -C "$CORETH_CLONE" checkout "$CORETH_COMMIT_HASH" - -CONCATENATED_HASHES="$AVALANCHE_COMMIT_HASH-$CORETH_COMMIT_HASH" - -"$DOCKER" build -t "$DOCKERHUB_REPO:$CONCATENATED_HASHES" "$WORKPREFIX" -f "$SCRIPT_DIRPATH/local.Dockerfile" diff --git a/avalanchego/scripts/build_local_image.sh b/avalanchego/scripts/build_local_image.sh deleted file mode 100755 index 6a12774a..00000000 --- a/avalanchego/scripts/build_local_image.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -# Directory above this script -AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) - -# Load the constants -source "$AVALANCHE_PATH"/scripts/constants.sh - -# WARNING: this will use the most recent commit even if there are un-committed changes present -full_commit_hash="$(git --git-dir="$AVALANCHE_PATH/.git" rev-parse HEAD)" -commit_hash="${full_commit_hash::8}" - -echo "Building Docker Image with tags: $avalanchego_dockerhub_repo:$commit_hash , $avalanchego_dockerhub_repo:$current_branch" -docker build -t "$avalanchego_dockerhub_repo:$commit_hash" \ - -t "$avalanchego_dockerhub_repo:$current_branch" "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" diff --git a/avalanchego/scripts/build_test.sh b/avalanchego/scripts/build_test.sh index dbf7a51d..2251cc64 100755 --- a/avalanchego/scripts/build_test.sh +++ b/avalanchego/scripts/build_test.sh @@ -1,12 +1,12 @@ #!/usr/bin/env bash -set -o errexit -set -o nounset -set -o pipefail +set -euo pipefail # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -go test -race -timeout="120s" -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests) +# Ensure execution of fixture unit tests under tests/ but exclude ginkgo tests in tests/e2e and tests/upgrade +# shellcheck disable=SC2046 +go test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) diff --git a/avalanchego/scripts/build_tmpnetctl.sh b/avalanchego/scripts/build_tmpnetctl.sh new file mode 100755 index 00000000..132cf09e --- /dev/null +++ b/avalanchego/scripts/build_tmpnetctl.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Avalanchego root folder +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +# Load the constants +source "$AVALANCHE_PATH"/scripts/constants.sh + +echo "Building tmpnetctl..." +go build -ldflags\ + "-X github.com/ava-labs/avalanchego/version.GitCommit=$git_commit $static_ld_flags"\ + -o "$AVALANCHE_PATH/build/tmpnetctl"\ + "$AVALANCHE_PATH/tests/fixture/tmpnet/cmd/"*.go diff --git a/avalanchego/scripts/build_xsvm.sh b/avalanchego/scripts/build_xsvm.sh new file mode 100755 index 00000000..d67e2cbc --- /dev/null +++ b/avalanchego/scripts/build_xsvm.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if ! [[ "$0" =~ scripts/build_xsvm.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +source ./scripts/constants.sh + +echo "Building xsvm plugin..." +go build -o ./build/xsvm ./vms/example/xsvm/cmd/xsvm/ + +PLUGIN_DIR="$HOME/.avalanchego/plugins" +PLUGIN_PATH="${PLUGIN_DIR}/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH" +echo "Symlinking ./build/xsvm to ${PLUGIN_PATH}" +mkdir -p "${PLUGIN_DIR}" +ln -sf "${PWD}/build/xsvm" "${PLUGIN_PATH}" diff --git a/avalanchego/scripts/constants.sh b/avalanchego/scripts/constants.sh index 8b4474e5..b68c1cda 100644 --- a/avalanchego/scripts/constants.sh +++ b/avalanchego/scripts/constants.sh @@ -1,5 +1,10 @@ #!/usr/bin/env bash -# + +# Ignore warnings about variables appearing unused since this file is not the consumer of the variables it defines. +# shellcheck disable=SC2034 + +set -euo pipefail + # Use lower_case variables in the scripts and UPPER_CASE variables for override # Use the constants.sh for env overrides @@ -7,13 +12,11 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Direct # Where AvalancheGo binary goes avalanchego_path="$AVALANCHE_PATH/build/avalanchego" + +# Settings for coreth +coreth_version=${CORETH_VERSION:-'v0.13.0-rc.0'} plugin_dir=${PLUGIN_DIR:-$HOME/.avalanchego/plugins} evm_path=${EVM_PATH:-$plugin_dir/evm} -coreth_version=${CORETH_VERSION:-'v0.12.0-rc.2'} - -# Set the PATHS -GOPATH="$(go env GOPATH)" -# coreth_path=${CORETH_PATH:-"$GOPATH/pkg/mod/github.com/ava-labs/coreth@$coreth_version"} coreth_path="$AVALANCHE_PATH/../coreth" # Avalabs docker hub @@ -40,7 +43,10 @@ fi # # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. -export CGO_CFLAGS="-O -D__BLST_PORTABLE__" +export CGO_CFLAGS="-O2 -D__BLST_PORTABLE__" # While CGO_ENABLED doesn't need to be explicitly set, it produces a much more # clear error due to the default value change in go1.20. export CGO_ENABLED=1 + +# Disable version control fallbacks +export GOPROXY="https://proxy.golang.org" diff --git a/avalanchego/scripts/lint.sh b/avalanchego/scripts/lint.sh index 00e54624..a7982506 100755 --- a/avalanchego/scripts/lint.sh +++ b/avalanchego/scripts/lint.sh @@ -1,14 +1,22 @@ #!/usr/bin/env bash -set -o errexit -set -o pipefail -set -e +set -euo pipefail if ! [[ "$0" =~ scripts/lint.sh ]]; then echo "must be run from repository root" exit 255 fi +# The -P option is not supported by the grep version installed by +# default on macos. Since `-o errexit` is ignored in an if +# conditional, triggering the problem here ensures script failure when +# using an unsupported version of grep. +grep -P 'lint.sh' scripts/lint.sh &> /dev/null || (\ + >&2 echo "error: This script requires a recent version of gnu grep.";\ + >&2 echo " On macos, gnu grep can be installed with 'brew install grep'.";\ + >&2 echo " It will also be necessary to ensure that gnu grep is available in the path.";\ + exit 255 ) + if [ "$#" -eq 0 ]; then # by default, check all source code # to test only "snow" package @@ -21,36 +29,63 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_no_error_inline_func"} function test_golangci_lint { - go install -modcacherw -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 + go install -modcacherw -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.1 golangci-lint run --config .golangci.yml } -# find_go_files [package] -# all go files except generated ones -function find_go_files { - local target="${1}" - go fmt -n "${target}" | grep -Eo "([^ ]*)$" | grep -vE "(\\.pb\\.go|\\.pb\\.gw.go)" -} - # automatically checks license headers -# to modify the file headers (if missing), remove "--check" flag -# TESTS='license_header' ADDLICENSE_FLAGS="-v" ./scripts/lint.sh -_addlicense_flags=${ADDLICENSE_FLAGS:-"--check -v"} +# to modify the file headers (if missing), remove "--verify" flag +# TESTS='license_header' ADDLICENSE_FLAGS="--debug" ./scripts/lint.sh +_addlicense_flags=${ADDLICENSE_FLAGS:-"--verify --debug"} function test_license_header { - go install -modcacherw -v github.com/google/addlicense@latest - local target="${1}" + go install -v github.com/palantir/go-license@v1.25.0 local files=() - while IFS= read -r line; do files+=("$line"); done < <(find_go_files "${target}") + while IFS= read -r line; do files+=("$line"); done < <(find . -type f -name '*.go' ! -name '*.pb.go' ! -name 'mock_*.go') - addlicense \ - -f ./LICENSE.header \ + # shellcheck disable=SC2086 + go-license \ + --config=./header.yml \ ${_addlicense_flags} \ "${files[@]}" } +function test_single_import { + if grep -R -zo -P 'import \(\n\t".*"\n\)' .; then + echo "" + return 1 + fi +} + +function test_require_error_is_no_funcs_as_params { + if grep -R -zo -P 'require.ErrorIs\(.+?\)[^\n]*\)\n' .; then + echo "" + return 1 + fi +} + +function test_require_no_error_inline_func { + if grep -R -zo -P '\t+err :?= ((?!require|if).|\n)*require\.NoError\((t, )?err\)' .; then + echo "" + echo "Checking that a function with a single error return doesn't error should be done in-line." + echo "" + return 1 + fi +} + +# Ref: https://go.dev/doc/effective_go#blank_implements +function test_interface_compliance_nil { + if grep -R -o -P '_ .+? = &.+?\{\}' .; then + echo "" + echo "Interface compliance checks need to be of the form:" + echo " var _ json.Marshaler = (*RawMessage)(nil)" + echo "" + return 1 + fi +} + function run { local test="${1}" shift 1 diff --git a/avalanchego/scripts/local.Dockerfile b/avalanchego/scripts/local.Dockerfile deleted file mode 100644 index 0603b1a4..00000000 --- a/avalanchego/scripts/local.Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# syntax=docker/dockerfile:experimental - -# This Dockerfile is meant to be used with the build_local_dep_image.sh script -# in order to build an image using the local version of coreth - -# Changes to the minimum golang version must also be replicated in -# scripts/build_avalanche.sh -# scripts/local.Dockerfile (here) -# Dockerfile -# README.md -# go.mod -FROM golang:1.19.6-buster - -RUN mkdir -p /go/src/github.com/ava-labs - -WORKDIR $GOPATH/src/github.com/ava-labs -COPY avalanchego avalanchego - -WORKDIR $GOPATH/src/github.com/ava-labs/avalanchego -RUN ./scripts/build_avalanche.sh - -RUN ln -sv $GOPATH/src/github.com/ava-labs/avalanche-byzantine/ /avalanchego diff --git a/avalanchego/scripts/mock.gen.sh b/avalanchego/scripts/mock.gen.sh old mode 100644 new mode 100755 index 9a0cd075..ead92957 --- a/avalanchego/scripts/mock.gen.sh +++ b/avalanchego/scripts/mock.gen.sh @@ -1,27 +1,62 @@ -#!/bin/bash +#!/usr/bin/env bash -set -e +set -euo pipefail if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then echo "must be run from repository root" exit 255 fi -if ! command -v mockgen &> /dev/null -then - echo "mockgen not found, installing..." - # https://github.com/golang/mock - go install -v github.com/golang/mock/mockgen@v1.6.0 -fi +# https://github.com/uber-go/mock +go install -v go.uber.org/mock/mockgen@v0.4.0 + +source ./scripts/constants.sh + +outputted_files=() # tuples of (source interface import path, comma-separated interface names, output file path) input="scripts/mocks.mockgen.txt" while IFS= read -r line do - IFS='=' read src_import_path interface_name output_path <<< "${line}" - package_name=$(basename $(dirname $output_path)) + IFS='=' read -r src_import_path interface_name output_path <<< "${line}" + package_name="$(basename "$(dirname "$output_path")")" echo "Generating ${output_path}..." - mockgen -copyright_file=./LICENSE.header -package=${package_name} -destination=${output_path} ${src_import_path} ${interface_name} + outputted_files+=("${output_path}") + mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" + done < "$input" +# tuples of (source import path, comma-separated interface names to exclude, output file path) +input="scripts/mocks.mockgen.source.txt" +while IFS= read -r line +do + IFS='=' read -r source_path exclude_interfaces output_path <<< "${line}" + package_name=$(basename "$(dirname "$output_path")") + outputted_files+=("${output_path}") + echo "Generating ${output_path}..." + + mockgen \ + -source="${source_path}" \ + -destination="${output_path}" \ + -package="${package_name}" \ + -exclude_interfaces="${exclude_interfaces}" + +done < "$input" + +mapfile -t all_generated_files < <(grep -Rl 'Code generated by MockGen. DO NOT EDIT.') + +# Exclude certain files +outputted_files+=('scripts/mock.gen.sh') # This file +outputted_files+=('vms/components/avax/mock_transferable_out.go') # Embedded verify.IsState +outputted_files+=('vms/platformvm/fx/mock_fx.go') # Embedded verify.IsNotState + +mapfile -t diff_files < <(echo "${all_generated_files[@]}" "${outputted_files[@]}" | tr ' ' '\n' | sort | uniq -u) + +if (( ${#diff_files[@]} )); then + printf "\nFAILURE\n" + echo "Detected MockGen generated files that are not in scripts/mocks.mockgen.source.txt or scripts/mocks.mockgen.txt:" + printf "%s\n" "${diff_files[@]}" + exit 255 +fi + echo "SUCCESS" diff --git a/avalanchego/scripts/mocks.mockgen.source.txt b/avalanchego/scripts/mocks.mockgen.source.txt new file mode 100644 index 00000000..02782a7b --- /dev/null +++ b/avalanchego/scripts/mocks.mockgen.source.txt @@ -0,0 +1,10 @@ +snow/engine/common/sender.go=StateSummarySender,AcceptedStateSummarySender,FrontierSender,AcceptedSender,FetchSender,AppSender,QuerySender,CrossChainAppSender,NetworkAppSender,Gossiper=snow/engine/common/mock_sender.go +snow/networking/router/router.go=InternalHandler=snow/networking/router/mock_router.go +snow/networking/sender/external_sender.go==snow/networking/sender/mock_external_sender.go +snow/validators/manager.go=SetCallbackListener=snow/validators/mock_manager.go +vms/avm/block/executor/manager.go==vms/avm/block/executor/mock_manager.go +vms/avm/txs/tx.go==vms/avm/txs/mock_unsigned_tx.go +vms/platformvm/block/executor/manager.go==vms/platformvm/block/executor/mock_manager.go +vms/platformvm/txs/staker_tx.go=ValidatorTx,DelegatorTx,StakerTx,PermissionlessStaker=vms/platformvm/txs/mock_staker_tx.go +vms/platformvm/txs/unsigned_tx.go==vms/platformvm/txs/mock_unsigned_tx.go +x/merkledb/db.go=ChangeProofer,RangeProofer,Clearer,Prefetcher=x/merkledb/mock_db.go diff --git a/avalanchego/scripts/mocks.mockgen.txt b/avalanchego/scripts/mocks.mockgen.txt index cc95dc0e..ba2be886 100644 --- a/avalanchego/scripts/mocks.mockgen.txt +++ b/avalanchego/scripts/mocks.mockgen.txt @@ -2,50 +2,43 @@ github.com/ava-labs/avalanchego/api/server=Server=api/server/mock_server.go github.com/ava-labs/avalanchego/chains/atomic=SharedMemory=chains/atomic/mock_shared_memory.go github.com/ava-labs/avalanchego/codec=Manager=codec/mock_manager.go github.com/ava-labs/avalanchego/database=Batch=database/mock_batch.go +github.com/ava-labs/avalanchego/database=Iterator=database/mock_iterator.go github.com/ava-labs/avalanchego/message=OutboundMessage=message/mock_message.go github.com/ava-labs/avalanchego/message=OutboundMsgBuilder=message/mock_outbound_message_builder.go -github.com/ava-labs/avalanchego/network/peer=GossipTracker=network/peer/mock_gossip_tracker.go github.com/ava-labs/avalanchego/snow/consensus/snowman=Block=snow/consensus/snowman/mock_block.go github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex=LinearizableVM=snow/engine/avalanche/vertex/mock_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mocks/build_block_with_context_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mocks/chain_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=StateSyncableVM=snow/engine/snowman/block/mocks/state_syncable_vm.go -github.com/ava-labs/avalanchego/snow/engine/snowman/block=WithVerifyContext=snow/engine/snowman/block/mocks/with_verify_context.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mock_build_block_with_context_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mock_chain_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=StateSyncableVM=snow/engine/snowman/block/mock_state_syncable_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=WithVerifyContext=snow/engine/snowman/block/mock_with_verify_context.go github.com/ava-labs/avalanchego/snow/networking/handler=Handler=snow/networking/handler/mock_handler.go github.com/ava-labs/avalanchego/snow/networking/timeout=Manager=snow/networking/timeout/mock_manager.go github.com/ava-labs/avalanchego/snow/networking/tracker=Targeter=snow/networking/tracker/mock_targeter.go github.com/ava-labs/avalanchego/snow/networking/tracker=Tracker=snow/networking/tracker/mock_resource_tracker.go github.com/ava-labs/avalanchego/snow/uptime=Calculator=snow/uptime/mock_calculator.go -github.com/ava-labs/avalanchego/snow/validators=Manager=snow/validators/mock_manager.go github.com/ava-labs/avalanchego/snow/validators=State=snow/validators/mock_state.go github.com/ava-labs/avalanchego/snow/validators=SubnetConnector=snow/validators/mock_subnet_connector.go github.com/ava-labs/avalanchego/utils/crypto/keychain=Ledger=utils/crypto/keychain/mock_ledger.go github.com/ava-labs/avalanchego/utils/filesystem=Reader=utils/filesystem/mock_io.go github.com/ava-labs/avalanchego/utils/hashing=Hasher=utils/hashing/mock_hasher.go -github.com/ava-labs/avalanchego/utils/logging=Logger=utils/logging/mock_logger.go github.com/ava-labs/avalanchego/utils/resource=User=utils/resource/mock_user.go -github.com/ava-labs/avalanchego/vms/avm/blocks=Block=vms/avm/blocks/mock_block.go +github.com/ava-labs/avalanchego/vms/avm/block=Block=vms/avm/block/mock_block.go github.com/ava-labs/avalanchego/vms/avm/metrics=Metrics=vms/avm/metrics/mock_metrics.go -github.com/ava-labs/avalanchego/vms/avm/states=Chain,State,Diff=vms/avm/states/mock_states.go +github.com/ava-labs/avalanchego/vms/avm/state=Chain,State,Diff=vms/avm/state/mock_state.go github.com/ava-labs/avalanchego/vms/avm/txs/mempool=Mempool=vms/avm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/components/avax=TransferableIn=vms/components/avax/mock_transferable_in.go -github.com/ava-labs/avalanchego/vms/components/avax=TransferableOut=vms/components/avax/mock_transferable_out.go github.com/ava-labs/avalanchego/vms/components/verify=Verifiable=vms/components/verify/mock_verifiable.go -github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor=Manager=vms/platformvm/blocks/executor/mock_manager.go -github.com/ava-labs/avalanchego/vms/platformvm/blocks=Block=vms/platformvm/blocks/mock_block.go -github.com/ava-labs/avalanchego/vms/platformvm/fx=Fx,Owner=vms/platformvm/fx/mock_fx.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Chain=vms/platformvm/state/mock_chain.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Diff=vms/platformvm/state/mock_diff.go +github.com/ava-labs/avalanchego/vms/platformvm/block=Block=vms/platformvm/block/mock_block.go +github.com/ava-labs/avalanchego/vms/platformvm/state=Chain,Diff,State,Versions=vms/platformvm/state/mock_state.go github.com/ava-labs/avalanchego/vms/platformvm/state=StakerIterator=vms/platformvm/state/mock_staker_iterator.go -github.com/ava-labs/avalanchego/vms/platformvm/state=Versions=vms/platformvm/state/mock_versions.go -github.com/ava-labs/avalanchego/vms/platformvm/txs/builder=Builder=vms/platformvm/txs/builder/mock_builder.go github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool=Mempool=vms/platformvm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/platformvm/utxo=Verifier=vms/platformvm/utxo/mock_verifier.go github.com/ava-labs/avalanchego/vms/proposervm/proposer=Windower=vms/proposervm/proposer/mock_windower.go +github.com/ava-labs/avalanchego/vms/proposervm/scheduler=Scheduler=vms/proposervm/scheduler/mock_scheduler.go github.com/ava-labs/avalanchego/vms/proposervm/state=State=vms/proposervm/state/mock_state.go github.com/ava-labs/avalanchego/vms/proposervm=PostForkBlock=vms/proposervm/mock_post_fork_block.go github.com/ava-labs/avalanchego/vms/registry=VMGetter=vms/registry/mock_vm_getter.go -github.com/ava-labs/avalanchego/vms/registry=VMRegisterer=vms/registry/mock_vm_registerer.go github.com/ava-labs/avalanchego/vms/registry=VMRegistry=vms/registry/mock_vm_registry.go github.com/ava-labs/avalanchego/vms=Factory,Manager=vms/mock_manager.go -github.com/ava-labs/avalanchego/x/sync=Client=x/sync/mock_client.go \ No newline at end of file +github.com/ava-labs/avalanchego/x/sync=Client=x/sync/mock_client.go +github.com/ava-labs/avalanchego/x/sync=NetworkClient=x/sync/mock_network_client.go diff --git a/avalanchego/scripts/protobuf_codegen.sh b/avalanchego/scripts/protobuf_codegen.sh index 707ea5e8..5230ca7b 100755 --- a/avalanchego/scripts/protobuf_codegen.sh +++ b/avalanchego/scripts/protobuf_codegen.sh @@ -1,4 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash + +set -euo pipefail if ! [[ "$0" =~ scripts/protobuf_codegen.sh ]]; then echo "must be run from repository root" @@ -9,7 +11,7 @@ fi # any version changes here should also be bumped in Dockerfile.buf # ref. https://docs.buf.build/installation # ref. https://github.com/bufbuild/buf/releases -BUF_VERSION='1.11.0' +BUF_VERSION='1.29.0' if [[ $(buf --version | cut -f2 -d' ') != "${BUF_VERSION}" ]]; then echo "could not find buf ${BUF_VERSION}, is it installed + in PATH?" exit 255 @@ -18,7 +20,7 @@ fi ## install "protoc-gen-go" # any version changes here should also be bumped in Dockerfile.buf # ref. https://github.com/protocolbuffers/protobuf-go/releases -PROTOC_GEN_GO_VERSION='v1.28.1' +PROTOC_GEN_GO_VERSION='v1.30.0' go install -v google.golang.org/protobuf/cmd/protoc-gen-go@${PROTOC_GEN_GO_VERSION} if [[ $(protoc-gen-go --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_VERSION}" ]]; then # e.g., protoc-gen-go v1.28.1 @@ -30,37 +32,33 @@ fi # any version changes here should also be bumped in Dockerfile.buf # ref. https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc # ref. https://github.com/grpc/grpc-go/blob/master/cmd/protoc-gen-go-grpc/main.go -PROTOC_GEN_GO_GRPC_VERSION='1.2.0' +PROTOC_GEN_GO_GRPC_VERSION='1.3.0' go install -v google.golang.org/grpc/cmd/protoc-gen-go-grpc@v${PROTOC_GEN_GO_GRPC_VERSION} if [[ $(protoc-gen-go-grpc --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_GRPC_VERSION}" ]]; then - # e.g., protoc-gen-go-grpc 1.2.0 + # e.g., protoc-gen-go-grpc 1.3.0 echo "could not find protoc-gen-go-grpc ${PROTOC_GEN_GO_GRPC_VERSION}, is it installed + in PATH?" exit 255 fi TARGET=$PWD/proto -if [ -n "$1" ]; then +if [ -n "${1:-}" ]; then TARGET="$1" fi # move to api directory -cd $TARGET +cd "$TARGET" echo "Running protobuf fmt..." buf format -w echo "Running protobuf lint check..." -buf lint - -if [[ $? -ne 0 ]]; then +if ! buf lint; then echo "ERROR: protobuf linter failed" exit 1 fi echo "Re-generating protobuf..." -buf generate - -if [[ $? -ne 0 ]]; then +if ! buf generate; then echo "ERROR: protobuf generation failed" exit 1 fi diff --git a/avalanchego/scripts/shellcheck.sh b/avalanchego/scripts/shellcheck.sh new file mode 100755 index 00000000..8be06d02 --- /dev/null +++ b/avalanchego/scripts/shellcheck.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# This script can also be used to correct the problems detected by shellcheck by invoking as follows: +# +# ./scripts/tests.shellcheck.sh -f diff | git apply +# + +if ! [[ "$0" =~ scripts/shellcheck.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +VERSION="v0.9.0" + +function get_version { + local target_path=$1 + if command -v "${target_path}" > /dev/null; then + echo "v$("${target_path}" --version | grep version: | awk '{print $2}')" + fi +} + +SYSTEM_VERSION="$(get_version shellcheck)" +if [[ "${SYSTEM_VERSION}" == "${VERSION}" ]]; then + SHELLCHECK=shellcheck +else + # Try to install a local version + SHELLCHECK=./bin/shellcheck + LOCAL_VERSION="$(get_version "${SHELLCHECK}")" + if [[ -z "${LOCAL_VERSION}" || "${LOCAL_VERSION}" != "${VERSION}" ]]; then + if which sw_vers &> /dev/null; then + echo "on macos, only x86_64 binaries are available so rosetta is required" + echo "to avoid using rosetta, install via homebrew: brew install shellcheck" + DIST=darwin.x86_64 + else + # Linux - binaries for common arches *should* be available + arch="$(uname -i)" + DIST="linux.${arch}" + fi + curl -s -L "https://github.com/koalaman/shellcheck/releases/download/${VERSION}/shellcheck-${VERSION}.${DIST}.tar.xz" | tar Jxv -C /tmp > /dev/null + mkdir -p "$(dirname "${SHELLCHECK}")" + cp /tmp/shellcheck-"${VERSION}"/shellcheck "${SHELLCHECK}" + fi +fi + +# `find *` is the simplest way to ensure find does not include a +# leading `.` in filenames it emits. A leading `.` will prevent the +# use of `git apply` to fix reported shellcheck issues. This is +# compatible with both macos and linux (unlike the use of -printf). +# +# shellcheck disable=SC2035 +find * -name "*.sh" -type f -print0 | xargs -0 "${SHELLCHECK}" "${@}" diff --git a/avalanchego/scripts/test_deploy_validators.sh b/avalanchego/scripts/test_deploy_validators.sh old mode 100755 new mode 100644 diff --git a/avalanchego/scripts/test_pchain_import.sh b/avalanchego/scripts/test_pchain_import.sh old mode 100755 new mode 100644 diff --git a/avalanchego/scripts/tests.e2e.existing.sh b/avalanchego/scripts/tests.e2e.existing.sh new file mode 100755 index 00000000..d369a777 --- /dev/null +++ b/avalanchego/scripts/tests.e2e.existing.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +set -euo pipefail + +################################################################ +# This script deploys a temporary network and configures +# tests.e2e.sh to execute the e2e suite against it. This +# validates that tmpnetctl is capable of starting a network and +# that the e2e suite is capable of executing against a network +# that it did not create. +################################################################ + +# e.g., +# ./scripts/build.sh +# ./scripts/tests.e2e.existing.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo +# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially +# AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.existing.sh # Customization of avalanchego path +if ! [[ "$0" =~ scripts/tests.e2e.existing.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +# Ensure an absolute path to avoid dependency on the working directory +# of script execution. +AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" +export AVALANCHEGO_PATH + +# Provide visual separation between testing and setup/teardown +function print_separator { + printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ +} + +# Ensure network cleanup on teardown +function cleanup { + print_separator + echo "cleaning up temporary network" + if [[ -n "${TMPNET_NETWORK_DIR:-}" ]]; then + ./build/tmpnetctl stop-network + fi +} +trap cleanup EXIT + +# Start a temporary network +./scripts/build_tmpnetctl.sh +print_separator +./build/tmpnetctl start-network + +# Determine the network configuration path from the latest symlink +LATEST_SYMLINK_PATH="${HOME}/.tmpnet/networks/latest" +if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then + TMPNET_NETWORK_DIR="$(realpath "${LATEST_SYMLINK_PATH}")" + export TMPNET_NETWORK_DIR +else + echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" + exit 255 +fi + +print_separator +# - Setting E2E_USE_EXISTING_NETWORK configures tests.e2e.sh to use +# the temporary network identified by TMPNET_NETWORK_DIR. +# - Only a single test (selected with --ginkgo.focus-file) is required +# to validate that an existing network can be used by an e2e test +# suite run. Executing more tests would be duplicative of the testing +# performed against a network created by the test suite. +E2E_USE_EXISTING_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go diff --git a/avalanchego/scripts/tests.e2e.sh b/avalanchego/scripts/tests.e2e.sh index be92c580..3d433526 100755 --- a/avalanchego/scripts/tests.e2e.sh +++ b/avalanchego/scripts/tests.e2e.sh @@ -1,98 +1,63 @@ #!/usr/bin/env bash -set -e -set -o nounset -set -o pipefail + +set -euo pipefail # e.g., -# ./scripts/build.sh -# ./scripts/tests.e2e.sh ./build/avalanchego -# ENABLE_WHITELIST_VTX_TESTS=true ./scripts/tests.e2e.sh ./build/avalanchego +# ./scripts/tests.e2e.sh +# ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo +# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially +# AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.sh # Customization of avalanchego path +# E2E_USE_EXISTING_NETWORK=1 TMPNET_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against an existing network if ! [[ "$0" =~ scripts/tests.e2e.sh ]]; then echo "must be run from repository root" exit 255 fi -AVALANCHEGO_PATH="${1-}" -if [[ -z "${AVALANCHEGO_PATH}" ]]; then - echo "Missing AVALANCHEGO_PATH argument!" - echo "Usage: ${0} [AVALANCHEGO_PATH]" >> /dev/stderr - exit 255 -fi - -# Set the CGO flags to use the portable version of BLST -# -# We use "export" here instead of just setting a bash variable because we need -# to pass this flag to all child processes spawned by the shell. -export CGO_CFLAGS="-O -D__BLST_PORTABLE__" -# While CGO_ENABLED doesn't need to be explicitly set, it produces a much more -# clear error due to the default value change in go1.20. -export CGO_ENABLED=1 - -ENABLE_WHITELIST_VTX_TESTS=${ENABLE_WHITELIST_VTX_TESTS:-false} -# ref. https://onsi.github.io/ginkgo/#spec-labels -GINKGO_LABEL_FILTER="!whitelist-tx" -if [[ ${ENABLE_WHITELIST_VTX_TESTS} == true ]]; then - # run only "whitelist-tx" tests, no other test - GINKGO_LABEL_FILTER="whitelist-tx" -fi -echo GINKGO_LABEL_FILTER: ${GINKGO_LABEL_FILTER} - ################################# -# download avalanche-network-runner -# https://github.com/ava-labs/avalanche-network-runner -# TODO: migrate to upstream avalanche-network-runner -GOARCH=$(go env GOARCH) -GOOS=$(go env GOOS) -NETWORK_RUNNER_VERSION=1.3.5-rc.0 -DOWNLOAD_PATH=/tmp/avalanche-network-runner.tar.gz -DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_${GOOS}_${GOARCH}.tar.gz" - -rm -f ${DOWNLOAD_PATH} -rm -f /tmp/avalanche-network-runner - -echo "downloading avalanche-network-runner ${NETWORK_RUNNER_VERSION} at ${DOWNLOAD_URL} to ${DOWNLOAD_PATH}" -curl --fail -L ${DOWNLOAD_URL} -o ${DOWNLOAD_PATH} - -echo "extracting downloaded avalanche-network-runner" -tar xzvf ${DOWNLOAD_PATH} -C /tmp -/tmp/avalanche-network-runner -h - -GOPATH="$(go env GOPATH)" -PATH="${GOPATH}/bin:${PATH}" +# Sourcing constants.sh ensures that the necessary CGO flags are set to +# build the portable version of BLST. Without this, ginkgo may fail to +# build the test binary if run on a host (e.g. github worker) that lacks +# the instructions to build non-portable BLST. +source ./scripts/constants.sh ################################# echo "building e2e.test" # to install the ginkgo binary (required for test build and run) -go install -modcacherw -v github.com/onsi/ginkgo/v2/ginkgo@v2.1.4 +go install -modcacherw -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 ACK_GINKGO_RC=true ginkgo build ./tests/e2e ./tests/e2e/e2e.test --help ################################# -# run "avalanche-network-runner" server -echo "launch avalanche-network-runner in the background" -/tmp/avalanche-network-runner \ -server \ ---log-level debug \ ---port=":12342" \ ---disable-grpc-gateway & -PID=${!} +# Since TMPNET_NETWORK_DIR may be set in the environment (e.g. to configure ginkgo +# or tmpnetctl), configuring the use of an existing network with this script +# requires the extra step of setting E2E_USE_EXISTING_NETWORK=1. +if [[ -n "${E2E_USE_EXISTING_NETWORK:-}" && -n "${TMPNET_NETWORK_DIR:-}" ]]; then + E2E_ARGS="--use-existing-network" +else + AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" + E2E_ARGS="--avalanchego-path=${AVALANCHEGO_PATH}" +fi ################################# -echo "running e2e tests against the local cluster with ${AVALANCHEGO_PATH}" -./tests/e2e/e2e.test \ ---ginkgo.v \ ---log-level debug \ ---network-runner-grpc-endpoint="0.0.0.0:12342" \ ---network-runner-avalanchego-path=${AVALANCHEGO_PATH} \ ---network-runner-avalanchego-log-level="WARN" \ ---test-keys-file=tests/test.insecure.secp256k1.keys --ginkgo.label-filter="${GINKGO_LABEL_FILTER}" \ -&& EXIT_CODE=$? || EXIT_CODE=$? - -kill ${PID} - -if [[ ${EXIT_CODE} -gt 0 ]]; then - echo "FAILURE with exit code ${EXIT_CODE}" - exit ${EXIT_CODE} +# Determine ginkgo args +GINKGO_ARGS="" +if [[ -n "${E2E_SERIAL:-}" ]]; then + # Specs will be executed serially. This supports running e2e tests in CI + # where parallel execution of tests that start new nodes beyond the + # initial set of validators could overload the free tier CI workers. + # Forcing serial execution in this test script instead of marking + # resource-hungry tests as serial supports executing the test suite faster + # on powerful development workstations. + echo "tests will be executed serially to minimize resource requirements" else - echo "ALL SUCCESS!" + # Enable parallel execution of specs defined in the test binary by + # default. This requires invoking the binary via the ginkgo cli + # since the test binary isn't capable of executing specs in + # parallel. + echo "tests will be executed in parallel" + GINKGO_ARGS="-p" fi + +################################# +# - Execute in random order to identify unwanted dependency +ginkgo ${GINKGO_ARGS} -v --randomize-all ./tests/e2e/e2e.test -- "${E2E_ARGS[@]}" "${@}" diff --git a/avalanchego/scripts/tests.upgrade.sh b/avalanchego/scripts/tests.upgrade.sh index d7f9e0d8..142ccf46 100755 --- a/avalanchego/scripts/tests.upgrade.sh +++ b/avalanchego/scripts/tests.upgrade.sh @@ -1,27 +1,33 @@ #!/usr/bin/env bash -set -e + +set -euo pipefail # e.g., -# ./scripts/build.sh -# ./scripts/tests.upgrade.sh 1.7.16 ./build/avalanchego +# ./scripts/tests.upgrade.sh # Use default version +# ./scripts/tests.upgrade.sh 1.10.18 # Specify a version +# AVALANCHEGO_PATH=./path/to/avalanchego ./scripts/tests.upgrade.sh 1.10.18 # Customization of avalanchego path if ! [[ "$0" =~ scripts/tests.upgrade.sh ]]; then echo "must be run from repository root" exit 255 fi -VERSION=$1 +# The AvalancheGo local network does not support long-lived +# backwards-compatible networks. When a breaking change is made to the +# local network, this flag must be updated to the last compatible +# version with the latest code. +# +# v1.10.18 includes restrictions on ports sent over the p2p network along with +# proposervm and P-chain rule changes on the local network. +DEFAULT_VERSION="1.10.18" + +VERSION="${1:-${DEFAULT_VERSION}}" if [[ -z "${VERSION}" ]]; then echo "Missing version argument!" - echo "Usage: ${0} [VERSION] [NEW-BINARY]" >> /dev/stderr + echo "Usage: ${0} [VERSION]" >>/dev/stderr exit 255 fi -NEW_BINARY=$2 -if [[ -z "${NEW_BINARY}" ]]; then - echo "Missing new binary path argument!" - echo "Usage: ${0} [VERSION] [NEW-BINARY]" >> /dev/stderr - exit 255 -fi +AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" ################################# # download avalanchego @@ -36,77 +42,38 @@ if [[ ${GOOS} == "darwin" ]]; then fi rm -f ${DOWNLOAD_PATH} -rm -rf /tmp/avalanchego-v${VERSION} +rm -rf "/tmp/avalanchego-v${VERSION}" rm -rf /tmp/avalanchego-build echo "downloading avalanchego ${VERSION} at ${DOWNLOAD_URL}" -curl -L ${DOWNLOAD_URL} -o ${DOWNLOAD_PATH} +curl -L "${DOWNLOAD_URL}" -o "${DOWNLOAD_PATH}" echo "extracting downloaded avalanchego" if [[ ${GOOS} == "linux" ]]; then tar xzvf ${DOWNLOAD_PATH} -C /tmp elif [[ ${GOOS} == "darwin" ]]; then unzip ${DOWNLOAD_PATH} -d /tmp/avalanchego-build - mv /tmp/avalanchego-build/build /tmp/avalanchego-v${VERSION} -fi -find /tmp/avalanchego-v${VERSION} - -################################# -# download avalanche-network-runner -# https://github.com/ava-labs/avalanche-network-runner -NETWORK_RUNNER_VERSION=1.3.5-rc.0 -DOWNLOAD_PATH=/tmp/avalanche-network-runner.tar.gz -DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_${GOOS}_${GOARCH}.tar.gz" -if [[ ${GOOS} == "darwin" ]]; then - DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_darwin_amd64.tar.gz" + mv /tmp/avalanchego-build/build "/tmp/avalanchego-v${VERSION}" fi +find "/tmp/avalanchego-v${VERSION}" -rm -f ${DOWNLOAD_PATH} -rm -f /tmp/avalanche-network-runner - -echo "downloading avalanche-network-runner ${NETWORK_RUNNER_VERSION} at ${DOWNLOAD_URL}" -curl -L ${DOWNLOAD_URL} -o ${DOWNLOAD_PATH} - -echo "extracting downloaded avalanche-network-runner" -tar xzvf ${DOWNLOAD_PATH} -C /tmp -/tmp/avalanche-network-runner -h +# Sourcing constants.sh ensures that the necessary CGO flags are set to +# build the portable version of BLST. Without this, ginkgo may fail to +# build the test binary if run on a host (e.g. github worker) that lacks +# the instructions to build non-portable BLST. +source ./scripts/constants.sh ################################# echo "building upgrade.test" # to install the ginkgo binary (required for test build and run) -go install -modcacherw -v github.com/onsi/ginkgo/v2/ginkgo@v2.1.4 +go install -modcacherw -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 ACK_GINKGO_RC=true ginkgo build ./tests/upgrade ./tests/upgrade/upgrade.test --help -################################# -# run "avalanche-network-runner" server -echo "launch avalanche-network-runner in the background" -/tmp/avalanche-network-runner \ -server \ ---log-level debug \ ---port=":12340" \ ---disable-grpc-gateway & -PID=${!} - ################################# # By default, it runs all upgrade test cases! -echo "running upgrade tests against the local cluster with ${NEW_BINARY}" +echo "running upgrade tests against the local cluster with ${AVALANCHEGO_PATH}" ./tests/upgrade/upgrade.test \ ---ginkgo.v \ ---log-level debug \ ---network-runner-grpc-endpoint="0.0.0.0:12340" \ ---network-runner-avalanchego-path=/tmp/avalanchego-v${VERSION}/avalanchego \ ---network-runner-avalanchego-path-to-upgrade=${NEW_BINARY} \ ---network-runner-avalanchego-log-level="WARN" || EXIT_CODE=$? - -# "e2e.test" already terminates the cluster -# just in case tests are aborted, manually terminate them again -pkill -P ${PID} || true -kill -2 ${PID} - -if [[ ${EXIT_CODE} -gt 0 ]]; then - echo "FAILURE with exit code ${EXIT_CODE}" - exit ${EXIT_CODE} -else - echo "ALL SUCCESS!" -fi + --ginkgo.v \ + --avalanchego-path="/tmp/avalanchego-v${VERSION}/avalanchego" \ + --avalanchego-path-to-upgrade-to="${AVALANCHEGO_PATH}" diff --git a/avalanchego/scripts/versions.sh b/avalanchego/scripts/versions.sh index 15f96536..ab73ec20 100644 --- a/avalanchego/scripts/versions.sh +++ b/avalanchego/scripts/versions.sh @@ -7,5 +7,5 @@ # Set up the versions to be used # Don't export them as their used in the context of other calls -coreth_version=${CORETH_VERSION:-'v0.12.0-rc.2'} -avalanche_version=${AVALANCHE_VERSION:-'v1.10.0'} \ No newline at end of file +coreth_version=${CORETH_VERSION:-'v0.13.0-rc.0'} +avalanche_version=${AVALANCHE_VERSION:-'v1.11.0'} \ No newline at end of file diff --git a/avalanchego/snow/README.md b/avalanchego/snow/README.md index 8e67c9bd..b5b16bc9 100644 --- a/avalanchego/snow/README.md +++ b/avalanchego/snow/README.md @@ -16,7 +16,9 @@ graph LR ## Intro -The Avalanche network consists of 3 built-in blockchains: X-Chain, C-Chain, and P-Chain. The X-Chain is used to manage assets and uses the Avalanche consensus protocol. The C-Chain is used to create and interact with smart contracts and uses the Snowman consensus protocol. The P-Chain is used to coordinate validators and stake and also uses the Snowman consensus protocol. At the time of writing, the Avalanche network has ~1200 validators. A set of validators makes up a subnet. Subnets can validate 1 or more chains. It is a common misconception that 1 subnet = 1 chain and this is shown by the primary subnet of Avalanche which is made up of the X-Chain, C-Chain, and P-Chain. +The Avalanche primary network consists of 3 built-in blockchains: the X-Chain, C-Chain, and P-Chain. All three chains rely on the Snowman consensus protocol. The X-Chain, which previously used DAG-based Avalanche consensus, was upgraded to Snowman in the Cortina network update. + +The X-Chain is used to manage assets. The C-Chain is used to create and interact with smart contracts. The P-Chain is used to coordinate validators and stake. At the time of writing, the Avalanche network has ~1200 validators. A set of validators makes up a subnet. Subnets can validate 1 or more chains. It is a common misconception that 1 subnet = 1 chain and this is shown by the primary subnet of Avalanche which is made up of the X-Chain, C-Chain, and P-Chain. A node in the Avalanche network can either be a validator or a non-validator. A validator stakes AVAX tokens and participates in consensus to earn rewards. A non-validator does not participate in consensus or have any AVAX staked but is used as a public API. Both validators and non-validator need to have their own copy of the chain and to know the current state of the mempool. At the time of writing, there are ~1200 validators and ~1800 non-validator. @@ -68,7 +70,7 @@ The main role of the `sender` is to build and send outbound messages. It is actu ## [Consensus Engine](https://github.com/ava-labs/avalanchego/blob/master/snow/consensus/snowman/consensus.go) -Consensus is defined as getting a group of distributed systems to agree on an outcome. In the case of the Avalanche network, consensus is achieved when validators are in agreement with the state of the blockchain. The novel consensus algorithm is documented in the [white paper](https://assets.website-files.com/5d80307810123f5ffbb34d6e/6009805681b416f34dcae012_Avalanche%20Consensus%20Whitepaper.pdf). There are two main consensus algorithms: Avalanche and Snowman. The engine is responsible for adding proposing a new block to consensus, repeatedly polling the network for decisions (accept/reject), and communicating that decision to the `Sender`. +Consensus is defined as getting a group of distributed systems to agree on an outcome. In the case of the Avalanche network, consensus is achieved when validators are in agreement with the state of the blockchain. The novel consensus algorithm is documented in the [white paper](https://assets.website-files.com/5d80307810123f5ffbb34d6e/6009805681b416f34dcae012_Avalanche%20Consensus%20Whitepaper.pdf). There are two main consensus algorithms: Avalanche and Snowman. The engine is responsible for proposing a new block to consensus, repeatedly polling the network for decisions (accept/reject), and communicating that decision to the `Sender`. ## [Blockchain Creation](https://github.com/ava-labs/avalanchego/blob/master/chains/manager.go) diff --git a/avalanchego/snow/acceptor.go b/avalanchego/snow/acceptor.go index f1a92e2f..83575e5c 100644 --- a/avalanchego/snow/acceptor.go +++ b/avalanchego/snow/acceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -14,8 +14,6 @@ import ( ) var ( - _ Acceptor = noOpAcceptor{} - _ Acceptor = (*AcceptorTracker)(nil) _ Acceptor = acceptorWrapper{} _ AcceptorGroup = (*acceptorGroup)(nil) @@ -32,39 +30,6 @@ type Acceptor interface { Accept(ctx *ConsensusContext, containerID ids.ID, container []byte) error } -type noOpAcceptor struct{} - -func (noOpAcceptor) Accept(*ConsensusContext, ids.ID, []byte) error { - return nil -} - -// AcceptorTracker tracks the dispatched accept events by its ID and counts. -// Useful for testing. -type AcceptorTracker struct { - lock sync.RWMutex - accepted map[ids.ID]int -} - -func NewAcceptorTracker() *AcceptorTracker { - return &AcceptorTracker{ - accepted: make(map[ids.ID]int), - } -} - -func (a *AcceptorTracker) Accept(_ *ConsensusContext, containerID ids.ID, _ []byte) error { - a.lock.Lock() - a.accepted[containerID]++ - a.lock.Unlock() - return nil -} - -func (a *AcceptorTracker) IsAccepted(containerID ids.ID) (int, bool) { - a.lock.RLock() - count, ok := a.accepted[containerID] - a.lock.RUnlock() - return count, ok -} - type acceptorWrapper struct { Acceptor diff --git a/avalanchego/snow/choices/decidable.go b/avalanchego/snow/choices/decidable.go index 18ef4458..4c9ba886 100644 --- a/avalanchego/snow/choices/decidable.go +++ b/avalanchego/snow/choices/decidable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices @@ -24,11 +24,13 @@ type Decidable interface { // Accept this element. // // This element will be accepted by every correct node in the network. + // All subsequent Status calls return Accepted. Accept(context.Context) error // Reject this element. // // This element will not be accepted by any correct node in the network. + // All subsequent Status calls return Rejected. Reject(context.Context) error // Status returns this element's current status. diff --git a/avalanchego/snow/choices/status.go b/avalanchego/snow/choices/status.go index 255356b7..ff530e9b 100644 --- a/avalanchego/snow/choices/status.go +++ b/avalanchego/snow/choices/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices @@ -29,22 +29,19 @@ func (s Status) MarshalJSON() ([]byte, error) { if err := s.Valid(); err != nil { return nil, err } - return []byte("\"" + s.String() + "\""), nil + return []byte(`"` + s.String() + `"`), nil } func (s *Status) UnmarshalJSON(b []byte) error { - str := string(b) - if str == "null" { - return nil - } - switch str { - case "\"Unknown\"": + switch string(b) { + case "null": + case `"Unknown"`: *s = Unknown - case "\"Processing\"": + case `"Processing"`: *s = Processing - case "\"Rejected\"": + case `"Rejected"`: *s = Rejected - case "\"Accepted\"": + case `"Accepted"`: *s = Accepted default: return errUnknownStatus diff --git a/avalanchego/snow/choices/status_test.go b/avalanchego/snow/choices/status_test.go index 960af7b4..5134ca2b 100644 --- a/avalanchego/snow/choices/status_test.go +++ b/avalanchego/snow/choices/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices @@ -17,8 +17,8 @@ func TestStatusValid(t *testing.T) { require.NoError(Rejected.Valid()) require.NoError(Processing.Valid()) require.NoError(Unknown.Valid()) - - require.Error(Status(math.MaxInt32).Valid()) + err := Status(math.MaxInt32).Valid() + require.ErrorIs(err, errUnknownStatus) } func TestStatusDecided(t *testing.T) { @@ -28,7 +28,6 @@ func TestStatusDecided(t *testing.T) { require.True(Rejected.Decided()) require.False(Processing.Decided()) require.False(Unknown.Decided()) - require.False(Status(math.MaxInt32).Decided()) } @@ -39,7 +38,6 @@ func TestStatusFetched(t *testing.T) { require.True(Rejected.Fetched()) require.True(Processing.Fetched()) require.False(Unknown.Fetched()) - require.False(Status(math.MaxInt32).Fetched()) } @@ -50,6 +48,5 @@ func TestStatusString(t *testing.T) { require.Equal("Rejected", Rejected.String()) require.Equal("Processing", Processing.String()) require.Equal("Unknown", Unknown.String()) - require.Equal("Invalid status", Status(math.MaxInt32).String()) } diff --git a/avalanchego/snow/choices/test_decidable.go b/avalanchego/snow/choices/test_decidable.go index 055a5405..39e8ed67 100644 --- a/avalanchego/snow/choices/test_decidable.go +++ b/avalanchego/snow/choices/test_decidable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices diff --git a/avalanchego/snow/consensus/avalanche/consensus.go b/avalanchego/snow/consensus/avalanche/consensus.go deleted file mode 100644 index f31d78c6..00000000 --- a/avalanchego/snow/consensus/avalanche/consensus.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "github.com/ava-labs/avalanchego/api/health" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -// TODO: Implement pruning of accepted decisions. -// To perfectly preserve the protocol, this implementation will need to store -// the hashes of all accepted decisions. It is possible to add a heuristic that -// removes sufficiently old decisions. However, that will need to be analyzed to -// ensure safety. It is doable with a weak syncrony assumption. - -// Consensus represents a general avalanche instance that can be used directly -// to process a series of partially ordered elements. -type Consensus interface { - health.Checker - - // Takes in alpha, beta1, beta2, the accepted frontier, the join statuses, - // the mutation statuses, and the consumer statuses. If accept or reject is - // called, the status maps should be immediately updated accordingly. - // Assumes each element in the accepted frontier will return accepted from - // the join status map. - Initialize(context.Context, *snow.ConsensusContext, Parameters, []Vertex) error - - // Returns the number of vertices processing - NumProcessing() int - - // Returns true if the transaction is virtuous. - // That is, no transaction has been added that conflicts with it - IsVirtuous(snowstorm.Tx) bool - - // Adds a new decision. Assumes the dependencies have already been added. - // Assumes that mutations don't conflict with themselves. Returns if a - // critical error has occurred. - Add(context.Context, Vertex) error - - // VertexIssued returns true iff Vertex has been added - VertexIssued(Vertex) bool - - // TxIssued returns true if a vertex containing this transaction has been added - TxIssued(snowstorm.Tx) bool - - // Returns the set of transaction IDs that are virtuous but not contained in - // any preferred vertices. - Orphans() set.Set[ids.ID] - - // Returns a set of vertex IDs that were virtuous at the last update. - Virtuous() set.Set[ids.ID] - - // Returns a set of vertex IDs that are preferred - Preferences() set.Set[ids.ID] - - // RecordPoll collects the results of a network poll. If a result has not - // been added, the result is dropped. Returns if a critical error has - // occurred. - RecordPoll(context.Context, bag.UniqueBag[ids.ID]) error - - // Quiesce is guaranteed to return true if the instance is finalized. It - // may, but doesn't need to, return true if all processing vertices are - // rogue. It must return false if there is a virtuous vertex that is still - // processing. - Quiesce() bool - - // Finalized returns true if all transactions that have been added have been - // finalized. Note, it is possible that after returning finalized, a new - // decision may be added such that this instance is no longer finalized. - Finalized() bool -} diff --git a/avalanchego/snow/consensus/avalanche/consensus_test.go b/avalanchego/snow/consensus/avalanche/consensus_test.go deleted file mode 100644 index bfd255e5..00000000 --- a/avalanchego/snow/consensus/avalanche/consensus_test.go +++ /dev/null @@ -1,3184 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - "errors" - "math" - "path" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowball" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/compare" - "github.com/ava-labs/avalanchego/utils/set" -) - -type testFunc func(*testing.T, Factory) - -var ( - testFuncs = []testFunc{ - MetricsTest, - NumProcessingTest, - AddTest, - VertexIssuedTest, - TxIssuedTest, - VirtuousTest, - VirtuousSkippedUpdateTest, - VotingTest, - IgnoreInvalidVotingTest, - IgnoreInvalidTransactionVertexVotingTest, - TransitiveVotingTest, - StopVertexVerificationUnequalBetaValuesTest, - StopVertexVerificationEqualBetaValuesTest, - AcceptParentOfPreviouslyRejectedVertexTest, - RejectParentOfPreviouslyRejectedVertexTest, - QuiesceAfterRejectedVertexTest, - SplitVotingTest, - TransitiveRejectionTest, - IsVirtuousTest, - QuiesceTest, - QuiesceAfterVotingTest, - TransactionVertexTest, - OrphansTest, - OrphansUpdateTest, - ErrorOnVacuousAcceptTest, - ErrorOnTxAcceptTest, - ErrorOnVtxAcceptTest, - ErrorOnVtxRejectTest, - ErrorOnParentVtxRejectTest, - ErrorOnTransitiveVtxRejectTest, - SilenceTransactionVertexEventsTest, - } - - errTest = errors.New("non-nil error") -) - -func runConsensusTests(t *testing.T, factory Factory) { - for _, test := range testFuncs { - t.Run(getTestName(test), func(tt *testing.T) { - test(tt, factory) - }) - } -} - -func getTestName(i interface{}) string { - return strings.Split(path.Base(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()), ".")[1] -} - -func MetricsTest(t *testing.T, factory Factory) { - ctx := snow.DefaultConsensusContextTest() - - { - avl := factory.New() - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - }, - Parents: 2, - BatchSize: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "vtx_processing", - })) - if err != nil { - t.Fatal(err) - } - if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { - t.Fatalf("should have failed due to registering a duplicated statistic") - } - } - { - avl := factory.New() - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - }, - Parents: 2, - BatchSize: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "vtx_accepted", - })) - if err != nil { - t.Fatal(err) - } - if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { - t.Fatalf("should have failed due to registering a duplicated statistic") - } - } - { - avl := factory.New() - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - }, - Parents: 2, - BatchSize: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "vtx_rejected", - })) - if err != nil { - t.Fatal(err) - } - if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { - t.Fatalf("should have failed due to registering a duplicated statistic") - } - } -} - -func NumProcessingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID()} - - if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d vertices processing but returned %d", 0, numProcessing) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 1 { - t.Fatalf("expected %d vertices processing but returned %d", 1, numProcessing) - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 2 { - t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) - } - - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 2 { - t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) - } - - if err := avl.Add(context.Background(), vts[0]); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 2 { - t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.ID()) - if err := avl.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } - - if numProcessing := avl.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d vertices processing but returned %d", 0, numProcessing) - } -} - -func AddTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - seedVertices := []Vertex{ - &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - }, - &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - }, - } - - ctx := snow.DefaultConsensusContextTest() - // track consensus events to ensure idempotency in case of redundant vertex adds - vertexEvents := snow.NewAcceptorTracker() - ctx.VertexAcceptor = vertexEvents - - if err := avl.Initialize(context.Background(), ctx, params, seedVertices); err != nil { - t.Fatal(err) - } - - if !avl.Finalized() { - t.Fatal("An empty avalanche instance is not finalized") - } - if !compare.UnsortedEquals([]ids.ID{seedVertices[0].ID(), seedVertices[1].ID()}, avl.Preferences().List()) { - t.Fatal("Initial frontier failed to be set") - } - - utxos := []ids.ID{ids.GenerateTestID()} - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - TxsV: []snowstorm.Tx{ - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - }, - }, - } - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - TxsV: []snowstorm.Tx{ - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - }, - }, - } - - tt := []struct { - toAdd Vertex - err error - finalized bool - preferenceSet []ids.ID - issued int - accepted int - }{ - { - toAdd: vtx0, - err: nil, - finalized: false, - preferenceSet: []ids.ID{vtx0.IDV}, - issued: 1, // on "add", it should be issued - accepted: 0, - }, - { - toAdd: vtx1, - err: nil, - finalized: false, - preferenceSet: []ids.ID{vtx0.IDV}, - issued: 1, // on "add", it should be issued - accepted: 0, - }, - { - toAdd: seedVertices[0], - err: nil, - finalized: false, - preferenceSet: []ids.ID{vtx0.IDV}, - issued: 0, // initialized vertex should not belong to in-processing nodes - accepted: 0, - }, - } - for i, tv := range tt { - for _, j := range []int{1, 2} { // duplicate vertex add should be skipped - err := avl.Add(context.Background(), tv.toAdd) - if err != tv.err { - t.Fatalf("#%d-%d: expected error %v, got %v", i, j, tv.err, err) - } - finalized := avl.Finalized() - if finalized != tv.finalized { - t.Fatalf("#%d-%d: expected finalized %v, got %v", i, j, finalized, tv.finalized) - } - preferenceSet := avl.Preferences().List() - if !compare.UnsortedEquals(tv.preferenceSet, preferenceSet) { - t.Fatalf("#%d-%d: expected preferenceSet %v, got %v", i, j, preferenceSet, tv.preferenceSet) - } - if accepted, _ := vertexEvents.IsAccepted(tv.toAdd.ID()); accepted != tv.accepted { - t.Fatalf("#%d-%d: expected accepted %d, got %d", i, j, tv.accepted, accepted) - } - } - } -} - -func VertexIssuedTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID()} - - if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { - t.Fatal(err) - } - - if !avl.VertexIssued(vts[0]) { - t.Fatalf("Genesis Vertex not reported as issued") - } - - tx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx.InputIDsV = append(tx.InputIDsV, utxos[0]) - - vtx := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx}, - } - - if avl.VertexIssued(vtx) { - t.Fatalf("Vertex reported as issued") - } else if err := avl.Add(context.Background(), vtx); err != nil { - t.Fatal(err) - } else if !avl.VertexIssued(vtx) { - t.Fatalf("Vertex reported as not issued") - } -} - -func TxIssuedTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vts := []Vertex{&TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - TxsV: []snowstorm.Tx{tx0}, - }} - utxos := []ids.ID{ids.GenerateTestID()} - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { - t.Fatal(err) - } - - if !avl.TxIssued(tx0) { - t.Fatalf("Genesis Tx not reported as issued") - } else if avl.TxIssued(tx1) { - t.Fatalf("Tx reported as issued") - } - - vtx := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx); err != nil { - t.Fatal(err) - } else if !avl.TxIssued(tx1) { - t.Fatalf("Tx reported as not issued") - } -} - -func VirtuousTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 10, - BetaRogue: 20, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - virtuous := avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - tx2 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx2.InputIDsV = append(tx2.InputIDsV, utxos[1]) - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx2}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.IDV) { - t.Fatalf("Wrong virtuous") - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.IDV) { - t.Fatalf("Wrong virtuous") - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx1.ID()) - votes.Add(1, vtx1.ID()) - - if err := avl.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } - - virtuous = avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } - - if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - virtuous = avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } - - if err := avl.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } - - virtuous = avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } -} - -func VirtuousSkippedUpdateTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 10, - BetaRogue: 20, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - } - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - virtuous := avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.IDV) { - t.Fatalf("Wrong virtuous") - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.IDV) { - t.Fatalf("Wrong virtuous") - } else if err := avl.RecordPoll(context.Background(), bag.UniqueBag[ids.ID]{}); err != nil { - t.Fatal(err) - } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.IDV) { - t.Fatalf("Wrong virtuous") - } -} - -// Creates two conflicting transactions in different vertices -// and make sure only one is accepted -func VotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - // create two different transactions with the same input UTXO (double-spend) - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{utxos[0]}, - } - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{utxos[0]}, - } - - // put them in different vertices - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - // issue two vertices with conflicting transaction to the consensus instance - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - // create poll results, all vote for vtx1, not for vtx0 - sm := bag.UniqueBag[ids.ID]{} - sm.Add(0, vtx1.IDV) - sm.Add(1, vtx1.IDV) - - // "BetaRogue" is 2, thus consensus should not be finalized yet - err = avl.RecordPoll(context.Background(), sm) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): - t.Fatalf("An avalanche instance finalized too early") - case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Processing: - t.Fatalf("Tx should have been Processing") - case tx1.Status() != choices.Processing: - t.Fatalf("Tx should have been Processing") - } - - // second poll should reach consensus, - // and the other vertex of conflict transaction should be rejected - err = avl.RecordPoll(context.Background(), sm) - switch { - case err != nil: - t.Fatal(err) - case !avl.Finalized(): - t.Fatalf("An avalanche instance finalized too late") - case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): - // rejected vertex ID (vtx0) must have been removed from the preferred set - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Rejected: - t.Fatalf("Tx should have been rejected") - case tx1.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - } -} - -func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID()} - - if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - sm := bag.UniqueBag[ids.ID]{} - sm.Add(0, vtx0.IDV) - sm.Add(1, vtx1.IDV) - - // Add Illegal Vote cast by Response 2 - sm.Add(2, vtx0.IDV) - sm.Add(2, vtx1.IDV) - - if err := avl.RecordPoll(context.Background(), sm); err != nil { - t.Fatal(err) - } else if avl.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } -} - -func IgnoreInvalidTransactionVertexVotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - - if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { - t.Fatal(err) - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - } - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{vtx0.ID()}, - } - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - sm := bag.UniqueBag[ids.ID]{} - sm.Add(0, vtx0.IDV) - sm.Add(1, vtx1.IDV) - - // Add Illegal Vote cast by Response 2 - sm.Add(2, vtx0.IDV) - sm.Add(2, vtx1.IDV) - - if err := avl.RecordPoll(context.Background(), sm); err != nil { - t.Fatal(err) - } else if avl.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } -} - -func TransitiveVotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - } - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx1}, - HeightV: 3, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx0.IDV) - sm1.Add(1, vtx2.IDV) - - err = avl.RecordPoll(context.Background(), sm1) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): - t.Fatalf("An avalanche instance finalized too early") - case !compare.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - } - - sm2 := bag.UniqueBag[ids.ID]{} - sm2.Add(0, vtx2.IDV) - sm2.Add(1, vtx2.IDV) - - err = avl.RecordPoll(context.Background(), sm2) - switch { - case err != nil: - t.Fatal(err) - case !avl.Finalized(): - t.Fatalf("An avalanche instance finalized too late") - case !compare.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - case tx1.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - } -} - -func StopVertexVerificationUnequalBetaValuesTest(t *testing.T, factory Factory) { - require := require.New(t) - - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - vtx1A := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts[:1], - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - vtx1B := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts[1:], - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - stopVertex := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx1B}, - HasWhitelistV: true, - WhitelistV: set.Set[ids.ID]{ - vtx1B.IDV: struct{}{}, - tx1.IDV: struct{}{}, - }, - HeightV: 2, - } - - require.NoError(avl.Add(context.Background(), vtx0)) - require.NoError(avl.Add(context.Background(), vtx1A)) - require.NoError(avl.Add(context.Background(), vtx1B)) - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx1A.IDV, vtx1B.IDV) - - // Transaction vertex for vtx1A is now accepted - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Processing, tx0.Status()) - require.Equal(choices.Processing, tx1.Status()) - require.Equal(choices.Processing, vtx0.Status()) - require.Equal(choices.Processing, vtx1A.Status()) - require.Equal(choices.Processing, vtx1B.Status()) - - // Because vtx1A isn't accepted, the stopVertex verification passes - require.NoError(avl.Add(context.Background(), stopVertex)) - - // Because vtx1A is now accepted, the stopVertex should be rejected. - // However, because BetaVirtuous < BetaRogue it is possible for the - // stopVertex to be processing. - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Rejected, tx0.Status()) - require.Equal(choices.Accepted, tx1.Status()) - require.Equal(choices.Rejected, vtx0.Status()) - require.Equal(choices.Accepted, vtx1A.Status()) - require.Equal(choices.Accepted, vtx1B.Status()) - require.Equal(choices.Processing, stopVertex.Status()) -} - -func StopVertexVerificationEqualBetaValuesTest(t *testing.T, factory Factory) { - require := require.New(t) - - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 2, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - vtx1A := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts[:1], - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - vtx1B := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts[1:], - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - stopVertex := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx1B}, - HasWhitelistV: true, - WhitelistV: set.Set[ids.ID]{ - vtx1B.IDV: struct{}{}, - tx1.IDV: struct{}{}, - }, - HeightV: 2, - } - - require.NoError(avl.Add(context.Background(), vtx0)) - require.NoError(avl.Add(context.Background(), vtx1A)) - require.NoError(avl.Add(context.Background(), vtx1B)) - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx1A.IDV, vtx1B.IDV) - - // Transaction vertex for vtx1A can not be accepted because BetaVirtuous is - // equal to BetaRogue - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Processing, tx0.Status()) - require.Equal(choices.Processing, tx1.Status()) - require.Equal(choices.Processing, vtx0.Status()) - require.Equal(choices.Processing, vtx1A.Status()) - require.Equal(choices.Processing, vtx1B.Status()) - - // Because vtx1A isn't accepted, the stopVertex verification passes - require.NoError(avl.Add(context.Background(), stopVertex)) - - // Because vtx1A is now accepted, the stopVertex should be rejected - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Rejected, tx0.Status()) - require.Equal(choices.Accepted, tx1.Status()) - require.Equal(choices.Rejected, vtx0.Status()) - require.Equal(choices.Accepted, vtx1A.Status()) - require.Equal(choices.Accepted, vtx1B.Status()) - require.Equal(choices.Rejected, stopVertex.Status()) -} - -func AcceptParentOfPreviouslyRejectedVertexTest(t *testing.T, factory Factory) { - require := require.New(t) - - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - } - - tx1A := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - tx1B := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - - vtx1A := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1A}, - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - vtx1B := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1B}, - } - - require.NoError(avl.Add(context.Background(), vtx0)) - require.NoError(avl.Add(context.Background(), vtx1A)) - require.NoError(avl.Add(context.Background(), vtx1B)) - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx1A.IDV) - - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Accepted, tx1A.Status()) - require.Equal(choices.Accepted, vtx1A.Status()) - require.Equal(choices.Rejected, tx1B.Status()) - require.Equal(choices.Rejected, vtx1B.Status()) - require.Equal(1, avl.NumProcessing()) - require.Equal(choices.Processing, tx0.Status()) - require.Equal(choices.Processing, vtx0.Status()) - - sm0 := bag.UniqueBag[ids.ID]{} - sm0.Add(0, vtx0.IDV) - - require.NoError(avl.RecordPoll(context.Background(), sm0)) - require.Zero(avl.NumProcessing()) - require.Equal(choices.Accepted, tx0.Status()) - require.Equal(choices.Accepted, vtx0.Status()) - - prefs := avl.Preferences() - require.Len(prefs, 2) - require.Contains(prefs, vtx0.ID()) - require.Contains(prefs, vtx1A.ID()) -} - -func RejectParentOfPreviouslyRejectedVertexTest(t *testing.T, factory Factory) { - require := require.New(t) - - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) - - tx0A := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - } - tx0B := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - } - - tx1A := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - tx1B := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - - vtx0A := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0A}, - } - vtx1A := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1A}, - } - - vtx0B := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0B}, - } - vtx1B := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0B}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1B}, - } - - require.NoError(avl.Add(context.Background(), vtx0A)) - require.NoError(avl.Add(context.Background(), vtx1A)) - require.NoError(avl.Add(context.Background(), vtx0B)) - require.NoError(avl.Add(context.Background(), vtx1B)) - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx1A.IDV) - - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Accepted, tx1A.Status()) - require.Equal(choices.Accepted, vtx1A.Status()) - require.Equal(choices.Rejected, tx1B.Status()) - require.Equal(choices.Rejected, vtx1B.Status()) - require.Equal(2, avl.NumProcessing()) - require.Equal(choices.Processing, tx0A.Status()) - require.Equal(choices.Processing, vtx0A.Status()) - require.Equal(choices.Processing, tx0B.Status()) - require.Equal(choices.Processing, vtx0B.Status()) - - sm0 := bag.UniqueBag[ids.ID]{} - sm0.Add(0, vtx0A.IDV) - - require.NoError(avl.RecordPoll(context.Background(), sm0)) - require.Zero(avl.NumProcessing()) - require.Equal(choices.Accepted, tx0A.Status()) - require.Equal(choices.Accepted, vtx0A.Status()) - require.Equal(choices.Rejected, tx0B.Status()) - require.Equal(choices.Rejected, vtx0B.Status()) - - orphans := avl.Orphans() - require.Empty(orphans) -} - -func QuiesceAfterRejectedVertexTest(t *testing.T, factory Factory) { - require := require.New(t) - - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) - - txA := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - txB := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos, - } - - vtxA := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{txA}, - } - - vtxB0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{txB}, - } - vtxB1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtxB0}, - HeightV: 2, - TxsV: []snowstorm.Tx{txB}, - } - - require.NoError(avl.Add(context.Background(), vtxA)) - require.NoError(avl.Add(context.Background(), vtxB0)) - require.NoError(avl.Add(context.Background(), vtxB1)) - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtxA.IDV) - - require.NoError(avl.RecordPoll(context.Background(), sm1)) - require.Equal(choices.Accepted, txA.Status()) - require.Equal(choices.Accepted, vtxA.Status()) - require.Equal(choices.Rejected, txB.Status()) - require.Equal(choices.Rejected, vtxB0.Status()) - require.Equal(choices.Rejected, vtxB1.Status()) - require.Zero(avl.NumProcessing()) - require.True(avl.Finalized()) - require.True(avl.Quiesce()) -} - -func SplitVotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx0.IDV) // peer 0 votes for the tx though vtx0 - sm1.Add(1, vtx1.IDV) // peer 1 votes for the tx though vtx1 - - err = avl.RecordPoll(context.Background(), sm1) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): // avalanche shouldn't be finalized because the vertex transactions are still processing - t.Fatalf("An avalanche instance finalized too late") - case !compare.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - } - - // Give alpha votes for both tranaction vertices - sm2 := bag.UniqueBag[ids.ID]{} - sm2.Add(0, vtx0.IDV, vtx1.IDV) // peer 0 votes for vtx0 and vtx1 - sm2.Add(1, vtx0.IDV, vtx1.IDV) // peer 1 votes for vtx0 and vtx1 - - err = avl.RecordPoll(context.Background(), sm2) - switch { - case err != nil: - t.Fatal(err) - case !avl.Finalized(): - t.Fatalf("An avalanche instance finalized too late") - case !compare.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - } -} - -func TransitiveRejectionTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - tx2 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx2.InputIDsV = append(tx2.InputIDsV, utxos[1]) - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx2}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - sm := bag.UniqueBag[ids.ID]{} - sm.Add(0, vtx1.IDV) - sm.Add(1, vtx1.IDV) - - err = avl.RecordPoll(context.Background(), sm) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): - t.Fatalf("An avalanche instance finalized too early") - case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - } - - err = avl.RecordPoll(context.Background(), sm) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): - t.Fatalf("An avalanche instance finalized too early") - case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Rejected: - t.Fatalf("Tx should have been rejected") - case tx1.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - case tx2.Status() != choices.Processing: - t.Fatalf("Tx should not have been decided") - } - - err = avl.RecordPoll(context.Background(), sm) - switch { - case err != nil: - t.Fatal(err) - case avl.Finalized(): - t.Fatalf("An avalanche instance finalized too early") - case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): - t.Fatalf("Initial frontier failed to be set") - case tx0.Status() != choices.Rejected: - t.Fatalf("Tx should have been rejected") - case tx1.Status() != choices.Accepted: - t.Fatalf("Tx should have been accepted") - case tx2.Status() != choices.Processing: - t.Fatalf("Tx should not have been decided") - } -} - -func IsVirtuousTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - virtuous := avl.Virtuous() - switch { - case virtuous.Len() != 2: - t.Fatalf("Wrong number of virtuous.") - case !virtuous.Contains(vts[0].ID()): - t.Fatalf("Wrong virtuous") - case !virtuous.Contains(vts[1].ID()): - t.Fatalf("Wrong virtuous") - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if !avl.IsVirtuous(tx0) { - t.Fatalf("Should be virtuous.") - } else if !avl.IsVirtuous(tx1) { - t.Fatalf("Should be virtuous.") - } - - err = avl.Add(context.Background(), vtx0) - switch { - case err != nil: - t.Fatal(err) - case !avl.IsVirtuous(tx0): - t.Fatalf("Should be virtuous.") - case avl.IsVirtuous(tx1): - t.Fatalf("Should not be virtuous.") - } - - err = avl.Add(context.Background(), vtx1) - switch { - case err != nil: - t.Fatal(err) - case avl.IsVirtuous(tx0): - t.Fatalf("Should not be virtuous.") - case avl.IsVirtuous(tx1): - t.Fatalf("Should not be virtuous.") - } -} - -func QuiesceTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - tx2 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx2.InputIDsV = append(tx2.InputIDsV, utxos[1]) - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx2}, - } - - // Add [vtx0] containing [tx0]. Because [tx0] is virtuous, the instance - // shouldn't quiesce. - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if avl.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - // Add [vtx1] containing [tx1]. Because [tx1] conflicts with [tx0], neither - // [tx0] nor [tx1] are now virtuous. This means there are no virtuous - // transaction left in the consensus instance and it can quiesce. - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - // The virtuous frontier is only updated sometimes, so force the frontier to - // be re-calculated by changing the preference of tx1. - sm1 := bag.UniqueBag[ids.ID]{} - sm1.Add(0, vtx1.IDV) - if err := avl.RecordPoll(context.Background(), sm1); err != nil { - t.Fatal(err) - } - - if !avl.Quiesce() { - t.Fatalf("Should quiesce") - } - - // Add [vtx2] containing [tx2]. Because [tx2] is virtuous, the instance - // shouldn't quiesce, even though [tx0] and [tx1] conflict. - if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - if avl.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - sm2 := bag.UniqueBag[ids.ID]{} - sm2.Add(0, vtx2.IDV) - if err := avl.RecordPoll(context.Background(), sm2); err != nil { - t.Fatal(err) - } - - // Because [tx2] was accepted, there is again no remaining virtuous - // transactions left in consensus. - if !avl.Quiesce() { - t.Fatalf("Should quiesce") - } -} - -func QuiesceAfterVotingTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 2, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - } - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{ - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - }, - }, - } - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{ - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - }, - }, - } - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{ - &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - }, - }, - } - vtx3 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx3); err != nil { - t.Fatal(err) - } - - // Because [vtx2] and [vtx3] are virtuous, the instance shouldn't quiesce. - if avl.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - sm12 := bag.UniqueBag[ids.ID]{} - sm12.Add(0, vtx1.IDV, vtx2.IDV) - if err := avl.RecordPoll(context.Background(), sm12); err != nil { - t.Fatal(err) - } - - // Because [vtx2] and [vtx3] are still processing, the instance shouldn't - // quiesce. - if avl.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - sm023 := bag.UniqueBag[ids.ID]{} - sm023.Add(0, vtx0.IDV, vtx2.IDV, vtx3.IDV) - if err := avl.RecordPoll(context.Background(), sm023); err != nil { - t.Fatal(err) - } - - // Because [vtx3] is still processing, the instance shouldn't quiesce. - if avl.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - sm3 := bag.UniqueBag[ids.ID]{} - sm3.Add(0, vtx3.IDV) - if err := avl.RecordPoll(context.Background(), sm3); err != nil { - t.Fatal(err) - } - - // Because [vtx0] and [vtx1] are conflicting and [vtx2] and [vtx3] are - // accepted, the instance can quiesce. - if !avl.Quiesce() { - t.Fatalf("Should quiesce") - } -} - -func TransactionVertexTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - seedVertices := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, seedVertices) - if err != nil { - t.Fatal(err) - } - - // Add a vertex with no transactions to test that the transaction vertex is - // required to be accepted before the vertex is. - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - } - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if !avl.VertexIssued(vtx0) { - t.Fatal("vertex with no transaction must have been issued") - } - - // Because the transaction vertex should be processing, the vertex should - // still be processing. - if vtx0.Status() != choices.Processing { - t.Fatalf("vertex with no transaction should still be processing, got %v", vtx0.Status()) - } - - // After voting for the transaction vertex beta times, the vertex should - // also be accepted. - bags := bag.UniqueBag[ids.ID]{} - bags.Add(0, vtx0.IDV) - bags.Add(1, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), bags); err != nil { - t.Fatalf("unexpected RecordPoll error %v", err) - } - - switch { - case vtx0.Status() != choices.Accepted: - t.Fatalf("vertex with no transaction should have been accepted after polling, got %v", vtx0.Status()) - case !avl.Finalized(): - t.Fatal("expected finalized avalanche instance") - case !compare.UnsortedEquals([]ids.ID{vtx0.IDV}, avl.Preferences().List()): - t.Fatalf("unexpected frontier %v", avl.Preferences().List()) - } -} - -func OrphansTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - tx2 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx2.InputIDsV = append(tx2.InputIDsV, utxos[1]) - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx2}, - } - - // [vtx0] contains [tx0], both of which will be preferred, so [tx0] is not - // an orphan. - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if orphans := avl.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - // [vtx1] contains [tx1], which conflicts with [tx0]. [tx0] is contained in - // a preferred vertex, and neither [tx0] nor [tx1] are virtuous. - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - if orphans := avl.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - // [vtx2] contains [tx2], both of which will be preferred, so [tx2] is not - // an orphan. - if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - if orphans := avl.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - sm := bag.UniqueBag[ids.ID]{} - sm.Add(0, vtx1.IDV) - if err := avl.RecordPoll(context.Background(), sm); err != nil { - t.Fatal(err) - } - - // By voting for [vtx1], [vtx2] is no longer preferred because it's parent - // [vtx0] contains [tx0] that is not preferred. Because [tx2] is virtuous, - // but no longer contained in a preferred vertex, it should now be - // considered an orphan. - orphans := avl.Orphans() - if orphans.Len() != 1 { - t.Fatalf("Wrong number of orphans") - } - if !orphans.Contains(tx2.ID()) { - t.Fatalf("Wrong orphan") - } -} - -func OrphansUpdateTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - seedVertices := []Vertex{ - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - &TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - } - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, seedVertices) - if err != nil { - t.Fatal(err) - } - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - // tx0 is a virtuous transaction. - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - } - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - // tx1 conflicts with tx2. - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - tx2 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:], - } - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: seedVertices, - HeightV: 1, - TxsV: []snowstorm.Tx{tx2}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - // vtx0 is virtuous, so it should be preferred. vtx1 and vtx2 conflict, but - // vtx1 was issued before vtx2, so vtx1 should be preferred and vtx2 should - // not be preferred. - expectedPreferredSet := set.Set[ids.ID]{ - vtx0.ID(): struct{}{}, - vtx1.ID(): struct{}{}, - } - preferenceSet := avl.Preferences().List() - if !compare.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { - t.Fatalf("expected preferenceSet %v, got %v", expectedPreferredSet, preferenceSet) - } - - // Record a successful poll to change the preference from vtx1 to vtx2 and - // update the orphan set. - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx2.IDV) - if err := avl.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } - - // Because vtx2 was voted for over vtx1, they should be swapped in the - // preferred set. - expectedPreferredSet = set.Set[ids.ID]{ - vtx0.ID(): struct{}{}, - vtx2.ID(): struct{}{}, - } - preferenceSet = avl.Preferences().List() - if !compare.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { - t.Fatalf("expected preferenceSet %v, got %v", expectedPreferredSet, preferenceSet) - } - - // Because there are no virtuous transactions that are not in a preferred - // vertex, there should be no orphans. - expectedOrphanSet := set.Set[ids.ID]{} - orphanSet := avl.Orphans() - if !compare.UnsortedEquals(expectedOrphanSet.List(), orphanSet.List()) { - t.Fatalf("expected orphanSet %v, got %v", expectedOrphanSet, orphanSet) - } -} - -func ErrorOnVacuousAcceptTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - AcceptV: errTest, - StatusV: choices.Processing, - }} - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - if err := avl.Add(context.Background(), vtx0); err == nil { - t.Fatalf("Should have errored on vertex issuance") - } -} - -func ErrorOnTxAcceptTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - AcceptV: errTest, - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on vertex acceptance") - } -} - -func ErrorOnVtxAcceptTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - AcceptV: errTest, - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on vertex acceptance") - } -} - -func ErrorOnVtxRejectTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on vertex rejection") - } -} - -func ErrorOnParentVtxRejectTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx1}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on vertex rejection") - } -} - -func ErrorOnTransitiveVtxRejectTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - utxos := []ids.ID{ids.GenerateTestID()} - - err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) - if err != nil { - t.Fatal(err) - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - vtx1 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - } - - vtx2 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentsV: []Vertex{vtx1}, - HeightV: 1, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx1); err != nil { - t.Fatal(err) - } else if err := avl.Add(context.Background(), vtx2); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on vertex rejection") - } -} - -func SilenceTransactionVertexEventsTest(t *testing.T, factory Factory) { - avl := factory.New() - - params := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}} - - ctx := snow.DefaultConsensusContextTest() - tracker := snow.NewAcceptorTracker() - ctx.TxAcceptor = tracker - - err := avl.Initialize(context.Background(), ctx, params, vts) - if err != nil { - t.Fatal(err) - } - - vtx0 := &TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - } - - if err := avl.Add(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - votes := bag.UniqueBag[ids.ID]{} - votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } - - if _, accepted := tracker.IsAccepted(vtx0.ID()); accepted { - t.Fatalf("Shouldn't have reported the transaction vertex as accepted") - } -} diff --git a/avalanchego/snow/consensus/avalanche/factory.go b/avalanchego/snow/consensus/avalanche/factory.go deleted file mode 100644 index ed74831c..00000000 --- a/avalanchego/snow/consensus/avalanche/factory.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -// Factory returns new instances of Consensus -type Factory interface { - New() Consensus -} diff --git a/avalanchego/snow/consensus/avalanche/parameters.go b/avalanchego/snow/consensus/avalanche/parameters.go deleted file mode 100644 index 9027df61..00000000 --- a/avalanchego/snow/consensus/avalanche/parameters.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -// Parameters the avalanche parameters include the snowball parameters and the -// optimal number of parents -type Parameters struct { - snowball.Parameters - Parents int `json:"parents" yaml:"parents"` - BatchSize int `json:"batchSize" yaml:"batchSize"` -} - -// Valid returns nil if the parameters describe a valid initialization. -func (p Parameters) Valid() error { - switch { - case p.Parents <= 1: - return fmt.Errorf("parents = %d: Fails the condition that: 1 < Parents", p.Parents) - case p.BatchSize <= 0: - return fmt.Errorf("batchSize = %d: Fails the condition that: 0 < BatchSize", p.BatchSize) - default: - return p.Parameters.Verify() - } -} diff --git a/avalanchego/snow/consensus/avalanche/parameters_test.go b/avalanchego/snow/consensus/avalanche/parameters_test.go deleted file mode 100644 index 748aedf6..00000000 --- a/avalanchego/snow/consensus/avalanche/parameters_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "testing" - - "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -func TestParametersValid(t *testing.T) { - p := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - if err := p.Valid(); err != nil { - t.Fatal(err) - } -} - -func TestParametersInvalidParents(t *testing.T) { - p := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 1, - BatchSize: 1, - } - - if err := p.Valid(); err == nil { - t.Fatalf("Should have failed due to invalid parents") - } -} - -func TestParametersInvalidBatchSize(t *testing.T) { - p := Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 0, - } - - if err := p.Valid(); err == nil { - t.Fatalf("Should have failed due to invalid batch size") - } -} diff --git a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go deleted file mode 100644 index 5ae57232..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" - - sets "github.com/ava-labs/avalanchego/utils/set" -) - -var ( - _ Factory = (*earlyTermNoTraversalFactory)(nil) - _ Poll = (*earlyTermNoTraversalPoll)(nil) -) - -type earlyTermNoTraversalFactory struct { - alpha int -} - -// NewEarlyTermNoTraversalFactory returns a factory that returns polls with -// early termination, without doing DAG traversals -func NewEarlyTermNoTraversalFactory(alpha int) Factory { - return &earlyTermNoTraversalFactory{alpha: alpha} -} - -func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { - return &earlyTermNoTraversalPoll{ - polled: vdrs, - alpha: f.alpha, - } -} - -// earlyTermNoTraversalPoll finishes when any remaining validators can't change -// the result of the poll. However, does not terminate tightly with this bound. -// It terminates as quickly as it can without performing any DAG traversals. -type earlyTermNoTraversalPoll struct { - votes bag.UniqueBag[ids.ID] - polled bag.Bag[ids.NodeID] - alpha int -} - -// Vote registers a response for this poll -func (p *earlyTermNoTraversalPoll) Vote(vdr ids.NodeID, votes []ids.ID) { - count := p.polled.Count(vdr) - // make sure that a validator can't respond multiple times - p.polled.Remove(vdr) - - // track the votes the validator responded with - for i := 0; i < count; i++ { - p.votes.Add(uint(p.polled.Len()+i), votes...) - } -} - -// Finished returns true when all validators have voted -func (p *earlyTermNoTraversalPoll) Finished() bool { - // If there are no outstanding queries, the poll is finished - numPending := p.polled.Len() - if numPending == 0 { - return true - } - // If there are still enough pending responses to include another vertex, - // then the poll must wait for more responses - if numPending > p.alpha { - return false - } - - // Ignore any vertex that has already received alpha votes. To safely skip - // DAG traversal, assume that all votes for vertices with less than alpha - // votes will be applied to a single shared ancestor. In this case, the poll - // can terminate early, iff there are not enough pending votes for this - // ancestor to receive alpha votes. - partialVotes := sets.Bits64(0) - for _, vote := range p.votes.List() { - if voters := p.votes.GetSet(vote); voters.Len() < p.alpha { - partialVotes.Union(voters) - } - } - return partialVotes.Len()+numPending < p.alpha -} - -// Result returns the result of this poll -func (p *earlyTermNoTraversalPoll) Result() bag.UniqueBag[ids.ID] { - return p.votes -} - -func (p *earlyTermNoTraversalPoll) PrefixedString(prefix string) string { - return fmt.Sprintf( - "waiting on %s\n%sreceived %s", - p.polled.PrefixedString(prefix), - prefix, - p.votes.PrefixedString(prefix), - ) -} - -func (p *earlyTermNoTraversalPoll) String() string { - return p.PrefixedString("") -} diff --git a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go deleted file mode 100644 index 94643983..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" -) - -func TestEarlyTermNoTraversalResults(t *testing.T) { - alpha := 1 - - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } - - result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if set := result.GetSet(vtxID); set.Len() != 1 { - t.Fatalf("Wrong number of votes returned") - } -} - -func TestEarlyTermNoTraversalString(t *testing.T) { - alpha := 2 - - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - - expected := `waiting on Bag: (Size = 1) - NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 -received UniqueBag: (Size = 1) - SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 0000000000000002` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned:\n%s\nbut returned\n%s", expected, result) - } -} - -func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { - alpha := 2 - - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr1, votes) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } - poll.Vote(vdr2, votes) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } -} - -func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { - alpha := 3 - - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - vdr4 := ids.NodeID{4} - vdr5 := ids.NodeID{5} // k = 5 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr3, - vdr4, - vdr5, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr2, votes) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr3, votes) - if !poll.Finished() { - t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") - } -} - -func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { - alpha := 4 - - vtxA := ids.ID{1} - vtxB := ids.ID{2} - vtxC := ids.ID{3} - vtxD := ids.ID{4} - - // If validators 1-3 vote for frontier vertices - // B, C, and D respectively, which all share the common ancestor - // A, then we cannot terminate early with alpha = k = 4 - // If the final vote is cast for any of A, B, C, or D, then - // vertex A will have transitively received alpha = 4 votes - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - vdr4 := ids.NodeID{4} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) - vdrs.Add(vdr2) - vdrs.Add(vdr3) - vdrs.Add(vdr4) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, []ids.ID{vtxB}) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving one vote") - } - poll.Vote(vdr2, []ids.ID{vtxC}) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving two votes") - } - poll.Vote(vdr3, []ids.ID{vtxD}) - if poll.Finished() { - t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") - } - poll.Vote(vdr4, []ids.ID{vtxA}) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving all outstanding votes") - } -} - -func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { - alpha := 2 - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} // k = 3 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr3, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) - - poll.Vote(vdr1, nil) - if poll.Finished() { - t.Fatalf("Poll finished early after dropping one vote") - } - poll.Vote(vdr2, nil) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after dropping two votes") - } -} diff --git a/avalanchego/snow/consensus/avalanche/poll/interfaces.go b/avalanchego/snow/consensus/avalanche/poll/interfaces.go deleted file mode 100644 index 70cbfaba..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/interfaces.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/formatting" -) - -// Set is a collection of polls -type Set interface { - fmt.Stringer - - Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool - Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []bag.UniqueBag[ids.ID] - Len() int -} - -// Poll is an outstanding poll -type Poll interface { - formatting.PrefixedStringer - - Vote(vdr ids.NodeID, votes []ids.ID) - Finished() bool - Result() bag.UniqueBag[ids.ID] -} - -// Factory creates a new Poll -type Factory interface { - New(vdrs bag.Bag[ids.NodeID]) Poll -} diff --git a/avalanchego/snow/consensus/avalanche/poll/no_early_term.go b/avalanchego/snow/consensus/avalanche/poll/no_early_term.go deleted file mode 100644 index 21ff49de..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/no_early_term.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" -) - -var ( - _ Factory = (*noEarlyTermFactory)(nil) - _ Poll = (*noEarlyTermPoll)(nil) -) - -type noEarlyTermFactory struct{} - -// NewNoEarlyTermFactory returns a factory that returns polls with no early -// termination -func NewNoEarlyTermFactory() Factory { - return noEarlyTermFactory{} -} - -func (noEarlyTermFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { - return &noEarlyTermPoll{polled: vdrs} -} - -// noEarlyTermPoll finishes when all polled validators either respond to the -// query or a timeout occurs -type noEarlyTermPoll struct { - votes bag.UniqueBag[ids.ID] - polled bag.Bag[ids.NodeID] -} - -// Vote registers a response for this poll -func (p *noEarlyTermPoll) Vote(vdr ids.NodeID, votes []ids.ID) { - count := p.polled.Count(vdr) - // make sure that a validator can't respond multiple times - p.polled.Remove(vdr) - - for i := 0; i < count; i++ { - // track the votes the validator responded with - p.votes.Add(uint(p.polled.Len()+i), votes...) - } -} - -// Finished returns true when all validators have voted -func (p *noEarlyTermPoll) Finished() bool { - return p.polled.Len() == 0 -} - -// Result returns the result of this poll -func (p *noEarlyTermPoll) Result() bag.UniqueBag[ids.ID] { - return p.votes -} - -func (p *noEarlyTermPoll) PrefixedString(prefix string) string { - return fmt.Sprintf( - "waiting on %s\n%sreceived %s", - p.polled.PrefixedString(prefix), - prefix, - p.votes.PrefixedString(prefix), - ) -} - -func (p *noEarlyTermPoll) String() string { - return p.PrefixedString("") -} diff --git a/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go b/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go deleted file mode 100644 index d4ced58a..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" -) - -func TestNoEarlyTermResults(t *testing.T) { - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } - - result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if set := result.GetSet(vtxID); set.Len() != 1 { - t.Fatalf("Wrong number of votes returned") - } -} - -func TestNoEarlyTermString(t *testing.T) { - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - - expected := `waiting on Bag: (Size = 1) - NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 -received UniqueBag: (Size = 1) - SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 0000000000000002` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned %s but returned %s", expected, result) - } -} - -func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, votes) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr1, votes) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } - poll.Vote(vdr2, votes) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } -} diff --git a/avalanchego/snow/consensus/avalanche/poll/set.go b/avalanchego/snow/consensus/avalanche/poll/set.go deleted file mode 100644 index b829c44d..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/set.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "fmt" - "strings" - "time" - - "go.uber.org/zap" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" -) - -var ( - _ Set = (*set)(nil) - _ Poll = (*poll)(nil) -) - -type pollHolder interface { - GetPoll() Poll - StartTime() time.Time -} - -type poll struct { - Poll - start time.Time -} - -func (p poll) GetPoll() Poll { - return p -} - -func (p poll) StartTime() time.Time { - return p.start -} - -type set struct { - log logging.Logger - numPolls prometheus.Gauge - durPolls metric.Averager - factory Factory - // maps requestID -> poll - polls linkedhashmap.LinkedHashmap[uint32, pollHolder] -} - -// NewSet returns a new empty set of polls -func NewSet( - factory Factory, - log logging.Logger, - namespace string, - reg prometheus.Registerer, -) Set { - numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "polls", - Help: "Number of pending network polls", - }) - if err := reg.Register(numPolls); err != nil { - log.Error("failed to register polls statistics", - zap.Error(err), - ) - } - - durPolls, err := metric.NewAverager( - namespace, - "poll_duration", - "time (in ns) this poll took to complete", - reg, - ) - if err != nil { - log.Error("failed to register poll_duration statistics", - zap.Error(err), - ) - } - - return &set{ - log: log, - numPolls: numPolls, - durPolls: durPolls, - factory: factory, - polls: linkedhashmap.New[uint32, pollHolder](), - } -} - -// Add to the current set of polls -// Returns true if the poll was registered correctly and the network sample -// should be made. -func (s *set) Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool { - if _, exists := s.polls.Get(requestID); exists { - s.log.Debug("dropping poll", - zap.String("reason", "duplicated request"), - zap.Uint32("requestID", requestID), - ) - return false - } - - s.log.Verbo("creating poll", - zap.Uint32("requestID", requestID), - zap.Stringer("validators", &vdrs), - ) - - s.polls.Put(requestID, poll{ - Poll: s.factory.New(vdrs), // create the new poll - start: time.Now(), - }) - s.numPolls.Inc() // increase the metrics - return true -} - -// Vote registers the connections response to a query for [id]. If there was no -// query, or the response has already be registered, nothing is performed. -func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []bag.UniqueBag[ids.ID] { - holder, exists := s.polls.Get(requestID) - if !exists { - s.log.Verbo("dropping vote", - zap.String("reason", "unknown poll"), - zap.Stringer("validator", vdr), - zap.Uint32("requestID", requestID), - ) - return nil - } - - p := holder.GetPoll() - - s.log.Verbo("processing votes", - zap.Stringer("validator", vdr), - zap.Uint32("requestID", requestID), - zap.Stringers("votes", votes), - ) - - p.Vote(vdr, votes) - if !p.Finished() { - return nil - } - - var results []bag.UniqueBag[ids.ID] - - // iterate from oldest to newest - iter := s.polls.NewIterator() - for iter.Next() { - holder := iter.Value() - p := holder.GetPoll() - if !p.Finished() { - // since we're iterating from oldest to newest, if the next poll has not finished, - // we can break and return what we have so far - break - } - - s.log.Verbo("poll finished", - zap.Uint32("requestID", requestID), - zap.Stringer("poll", p), - ) - s.durPolls.Observe(float64(time.Since(holder.StartTime()))) - s.numPolls.Dec() // decrease the metrics - - results = append(results, p.Result()) - s.polls.Delete(iter.Key()) // remove the poll from the current set - } - - // only gets here if the poll has finished - // results will have values if this and other newer polls have finished - return results -} - -// Len returns the number of outstanding polls -func (s *set) Len() int { - return s.polls.Len() -} - -func (s *set) String() string { - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("current polls: (Size = %d)", s.polls.Len())) - iter := s.polls.NewIterator() - for iter.Next() { - requestID := iter.Key() - poll := iter.Value().(Poll) - sb.WriteString(fmt.Sprintf("\n RequestID %d:\n %s", requestID, poll.PrefixedString(" "))) - } - return sb.String() -} diff --git a/avalanchego/snow/consensus/avalanche/poll/set_test.go b/avalanchego/snow/consensus/avalanche/poll/set_test.go deleted file mode 100644 index 6343db8b..00000000 --- a/avalanchego/snow/consensus/avalanche/poll/set_test.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -func TestNewSetErrorOnMetrics(t *testing.T) { - factory := NewNoEarlyTermFactory() - log := logging.NoLog{} - namespace := "" - registerer := prometheus.NewRegistry() - - errs := wrappers.Errs{} - errs.Add( - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "polls", - })), - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "poll_duration", - })), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } - - if s := NewSet(factory, log, namespace, registerer); s == nil { - t.Fatalf("shouldn't have errored due to metrics failures") - } -} - -func TestCreateAndFinishPoll(t *testing.T) { - factory := NewNoEarlyTermFactory() - log := logging.NoLog{} - namespace := "" - registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - vtxID := ids.ID{1} - votes := []ids.ID{vtxID} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - if s.Len() != 0 { - t.Fatalf("Shouldn't have any active polls yet") - } else if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if s.Add(0, vdrs) { - t.Fatalf("Shouldn't have been able to add a duplicated poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if results := s.Vote(1, vdr1, votes); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results := s.Vote(0, vdr1, votes); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results := s.Vote(0, vdr1, votes); len(results) > 0 { - t.Fatalf("Should have dropped a duplicated poll") - } else if results := s.Vote(0, vdr2, votes); len(results) == 0 { - t.Fatalf("Should have finished the poll") - } else if len(results) != 1 { - t.Fatalf("Wrong number of results returned") - } else if list := results[0].List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if set := results[0].GetSet(vtxID); set.Len() != 2 { - t.Fatalf("Wrong number of votes returned") - } -} - -func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { - factory := NewNoEarlyTermFactory() - log := logging.NoLog{} - namespace := "" - registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - // create validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := []ids.NodeID{vdr1, vdr2, vdr3} - - // create two polls for the two vtxs - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added := s.Add(1, vdrBag) - require.True(t, added) - - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(2, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 2) - - // vote vtx1 for poll 1 - // vote vtx2 for poll 2 - vtx1 := ids.ID{1} - vtx2 := ids.ID{2} - - var results []bag.UniqueBag[ids.ID] - - // vote out of order - results = s.Vote(1, vdr1, []ids.ID{vtx1}) - require.Len(t, results, 0) - results = s.Vote(2, vdr2, []ids.ID{vtx2}) - require.Len(t, results, 0) - results = s.Vote(2, vdr3, []ids.ID{vtx2}) - require.Len(t, results, 0) - - results = s.Vote(1, vdr2, []ids.ID{vtx1}) - require.Len(t, results, 0) - - results = s.Vote(1, vdr3, []ids.ID{vtx1}) // poll 1 finished, poll 2 still remaining - require.Len(t, results, 1) // because 1 is the oldest - require.Equal(t, vtx1, results[0].List()[0]) - - results = s.Vote(2, vdr1, []ids.ID{vtx2}) // poll 2 finished - require.Len(t, results, 1) // because 2 is the oldest now - require.Equal(t, vtx2, results[0].List()[0]) -} - -func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { - factory := NewNoEarlyTermFactory() - log := logging.NoLog{} - namespace := "" - registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - // create validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := []ids.NodeID{vdr1, vdr2, vdr3} - - // create three polls for the two vtxs - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added := s.Add(1, vdrBag) - require.True(t, added) - - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(2, vdrBag) - require.True(t, added) - - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(3, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 3) - - // vote vtx1 for poll 1 - // vote vtx2 for poll 2 - // vote vtx3 for poll 3 - vtx1 := ids.ID{1} - vtx2 := ids.ID{2} - vtx3 := ids.ID{3} - - var results []bag.UniqueBag[ids.ID] - - // vote out of order - // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 - results = s.Vote(2, vdr3, []ids.ID{vtx2}) - require.Len(t, results, 0) - results = s.Vote(2, vdr2, []ids.ID{vtx2}) - require.Len(t, results, 0) - results = s.Vote(2, vdr1, []ids.ID{vtx2}) - require.Len(t, results, 0) - - // 3 finishes now, 2 has already finished but 1 is not finished so we expect to receive no results still - results = s.Vote(3, vdr2, []ids.ID{vtx3}) - require.Len(t, results, 0) - results = s.Vote(3, vdr3, []ids.ID{vtx3}) - require.Len(t, results, 0) - results = s.Vote(3, vdr1, []ids.ID{vtx3}) - require.Len(t, results, 0) - - // 1 finishes now, 2 and 3 have already finished so we expect 3 items in results - results = s.Vote(1, vdr1, []ids.ID{vtx1}) - require.Len(t, results, 0) - results = s.Vote(1, vdr2, []ids.ID{vtx1}) - require.Len(t, results, 0) - results = s.Vote(1, vdr3, []ids.ID{vtx1}) - require.Len(t, results, 3) - require.Equal(t, vtx1.String(), results[0].List()[0].String()) - require.Equal(t, vtx2.String(), results[1].List()[0].String()) - require.Equal(t, vtx3.String(), results[2].List()[0].String()) -} - -func TestSetString(t *testing.T) { - factory := NewNoEarlyTermFactory() - log := logging.NoLog{} - namespace := "" - registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) - - expected := `current polls: (Size = 1) - RequestID 0: - waiting on Bag: (Size = 1) - NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 - received UniqueBag: (Size = 0)` - if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if str := s.String(); expected != str { - t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s", - expected, - str) - } -} diff --git a/avalanchego/snow/consensus/avalanche/test_vertex.go b/avalanchego/snow/consensus/avalanche/test_vertex.go index be4037a5..a3bc2fb0 100644 --- a/avalanchego/snow/consensus/avalanche/test_vertex.go +++ b/avalanchego/snow/consensus/avalanche/test_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -6,10 +6,8 @@ package avalanche import ( "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/set" ) var _ Vertex = (*TestVertex)(nil) @@ -18,35 +16,19 @@ var _ Vertex = (*TestVertex)(nil) type TestVertex struct { choices.TestDecidable - VerifyErrV error - ParentsV []Vertex - ParentsErrV error - HasWhitelistV bool - WhitelistV set.Set[ids.ID] - WhitelistErrV error - HeightV uint64 - HeightErrV error - TxsV []snowstorm.Tx - TxsErrV error - BytesV []byte -} - -func (v *TestVertex) Verify(context.Context) error { - return v.VerifyErrV + ParentsV []Vertex + ParentsErrV error + HeightV uint64 + HeightErrV error + TxsV []snowstorm.Tx + TxsErrV error + BytesV []byte } func (v *TestVertex) Parents() ([]Vertex, error) { return v.ParentsV, v.ParentsErrV } -func (v *TestVertex) HasWhitelist() bool { - return v.HasWhitelistV -} - -func (v *TestVertex) Whitelist(context.Context) (set.Set[ids.ID], error) { - return v.WhitelistV, v.WhitelistErrV -} - func (v *TestVertex) Height() (uint64, error) { return v.HeightV, v.HeightErrV } diff --git a/avalanchego/snow/consensus/avalanche/topological.go b/avalanchego/snow/consensus/avalanche/topological.go deleted file mode 100644 index 6b17824c..00000000 --- a/avalanchego/snow/consensus/avalanche/topological.go +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - "errors" - "fmt" - "strings" - - "go.uber.org/zap" - - "golang.org/x/exp/maps" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/metrics" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -const minMapSize = 16 - -var ( - errNoLeaves = errors.New("couldn't pop a leaf from leaf set") - - _ Factory = (*TopologicalFactory)(nil) - _ Consensus = (*Topological)(nil) -) - -// TopologicalFactory implements Factory by returning a topological struct -type TopologicalFactory struct{} - -func (TopologicalFactory) New() Consensus { - return &Topological{} -} - -// TODO: Implement pruning of decisions. -// To perfectly preserve the protocol, this implementation will need to store -// the hashes of all accepted decisions. It is possible to add a heuristic that -// removes sufficiently old decisions. However, that will need to be analyzed to -// ensure safety. It is doable when adding in a weak synchrony assumption. - -// Topological performs the avalanche algorithm by utilizing a topological sort -// of the voting results. Assumes that vertices are inserted in topological -// order. -type Topological struct { - metrics.Latency - - // pollNumber is the number of times RecordPolls has been called - pollNumber uint64 - - // Context used for logging - ctx *snow.ConsensusContext - // Threshold for confidence increases - params Parameters - - // Maps vtxID -> transactionVertex wrapping the provided vertex as a - // transaction - nodes map[ids.ID]*transactionVertex - - // Tracks the conflict relations - cg snowstorm.Consensus - - // preferred is the frontier of vtxIDs that are strongly preferred - preferred set.Set[ids.ID] - - // virtuous is the frontier of vtxIDs that are strongly virtuous - virtuous set.Set[ids.ID] - - // orphans are the txIDs that are virtuous, but not preferred - orphans set.Set[ids.ID] - - // virtuousVoting are the txIDs that are virtuous and still awaiting - // additional votes before acceptance. transactionVertices whose vertices - // are not considered virtuous are removed from this set. - virtuousVoting set.Set[ids.ID] - - // frontier is the set of vts that have no descendents - // - // Invariant: frontier never contains a rejected vertex - frontier map[ids.ID]Vertex - // preferenceCache is the cache for strongly preferred checks - // virtuousCache is the cache for strongly virtuous checks - preferenceCache, virtuousCache map[ids.ID]bool - - // Used in [calculateInDegree] and [markAncestorInDegrees]. - // Should only be accessed in those methods. - // We use this one instance of set.Set instead of creating a - // new set.Set during each call to [calculateInDegree]. - leaves set.Set[ids.ID] - - // Kahn nodes used in [calculateInDegree] and [markAncestorInDegrees]. - // Should only be accessed in those methods. - // We use this one map instead of creating a new map - // during each call to [calculateInDegree]. - kahnNodes map[ids.ID]kahnNode - - // Used in [pushVotes]. Should only be accessed in that method. - // We use this one instance instead of creating a new bag.UniqueBag[ids.ID] - // during each call to [pushVotes]. - votes bag.UniqueBag[ids.ID] -} - -type kahnNode struct { - inDegree int - votes set.Bits64 -} - -func (ta *Topological) Initialize( - ctx context.Context, - chainCtx *snow.ConsensusContext, - params Parameters, - frontier []Vertex, -) error { - if err := params.Valid(); err != nil { - return err - } - - ta.ctx = chainCtx - ta.params = params - ta.leaves = set.Set[ids.ID]{} - ta.votes = bag.UniqueBag[ids.ID]{} - ta.kahnNodes = make(map[ids.ID]kahnNode) - - latencyMetrics, err := metrics.NewLatency("vtx", "vertex/vertices", chainCtx.Log, "", chainCtx.AvalancheRegisterer) - if err != nil { - return err - } - ta.Latency = latencyMetrics - - ta.nodes = make(map[ids.ID]*transactionVertex, minMapSize) - - ta.cg = &snowstorm.Directed{} - if err := ta.cg.Initialize(chainCtx, params.Parameters); err != nil { - return err - } - - ta.frontier = make(map[ids.ID]Vertex, minMapSize) - for _, vtx := range frontier { - ta.frontier[vtx.ID()] = vtx - } - return ta.updateFrontiers(ctx) -} - -func (ta *Topological) NumProcessing() int { - return len(ta.nodes) -} - -func (ta *Topological) IsVirtuous(tx snowstorm.Tx) bool { - return ta.cg.IsVirtuous(tx) -} - -func (ta *Topological) Add(ctx context.Context, vtx Vertex) error { - if vtx.Status().Decided() { - return nil // Already decided this vertex - } - - vtxID := vtx.ID() - if _, exists := ta.nodes[vtxID]; exists { - return nil // Already inserted this vertex - } - - txs, err := vtx.Txs(ctx) - if err != nil { - return err - } - for _, tx := range txs { - if !tx.Status().Decided() { - // Add the consumers to the conflict graph. - if err := ta.cg.Add(ctx, tx); err != nil { - return err - } - - // If the added transaction is virtuous, add it to the set of - // virtuous transactions that are still being voted on. - if vs := ta.cg.VirtuousVoting(); vs.Contains(tx.ID()) { - ta.virtuousVoting.Add(tx.ID()) - } - } - } - - txv := newTransactionVertex(vtx, ta.nodes) - - // Add the transaction vertex to the set of processing nodes. - ta.nodes[vtxID] = txv - - // Also add the transaction vertex to the conflict graph to track conflicts. - if err := ta.cg.Add(ctx, txv); err != nil { - return err - } - - // If the added transaction vertex is virtuous, add it to the set of - // virtuous transactions that are still being voted on. If the vertex isn't - // virtuous, then the ID will be removed on the subsequent call to update. - if vs := ta.cg.VirtuousVoting(); vs.Contains(vtxID) { - ta.virtuousVoting.Add(vtxID) - } - - ta.Latency.Issued(vtxID, ta.pollNumber) - - // Because we don't call [updateFrontiers], previous vertices that were - // marked as virtuous will not be updated to no longer being virtuous. Even - // if this newly added vertex conflicts with them. This is an optimization - // to avoid a re-traversal of the DAG in the issuance path. Their virtuous - // status will be updated during a future poll. This is safe because the - // virtuous frontier is only used optimistically to control when it is valid - // to quiesce. - return ta.update(ctx, vtx) // Update the vertices preference and virtuous status -} - -func (ta *Topological) VertexIssued(vtx Vertex) bool { - if vtx.Status().Decided() { - return true - } - _, ok := ta.nodes[vtx.ID()] - return ok -} - -func (ta *Topological) TxIssued(tx snowstorm.Tx) bool { - return ta.cg.Issued(tx) -} - -func (ta *Topological) Orphans() set.Set[ids.ID] { - return ta.orphans -} - -func (ta *Topological) Virtuous() set.Set[ids.ID] { - return ta.virtuous -} - -func (ta *Topological) Preferences() set.Set[ids.ID] { - return ta.preferred -} - -func (ta *Topological) RecordPoll(ctx context.Context, responses bag.UniqueBag[ids.ID]) error { - // Register a new poll call - ta.pollNumber++ - - // If it isn't possible to have alpha votes for any transaction, then we can - // just reset the confidence values in the conflict graph and not perform - // any traversals. - partialVotes := set.Bits64(0) - for vote := range responses { - votes := responses.GetSet(vote) - partialVotes.Union(votes) - if partialVotes.Len() >= ta.params.Alpha { - break - } - } - if partialVotes.Len() < ta.params.Alpha { - // Because there were less than alpha total returned votes, we can skip - // the traversals and fail the poll. - _, err := ta.cg.RecordPoll(ctx, bag.Bag[ids.ID]{}) - return err - } - - // Set up the topological sort: O(|Live Set|) - if err := ta.calculateInDegree(responses); err != nil { - return err - } - - // Collect the votes for each transaction: O(|Live Set|) - votes, err := ta.pushVotes(ctx) - if err != nil { - return err - } - - // Update the conflict graph: O(|Transactions|) - if updated, err := ta.cg.RecordPoll(ctx, votes); !updated || err != nil { - // If the transaction statuses weren't changed, there is no need to - // perform a traversal. - return err - } - - // Update the dag: O(|Live Set|) - return ta.updateFrontiers(ctx) -} - -func (ta *Topological) Quiesce() bool { - return ta.virtuousVoting.Len() == 0 -} - -func (ta *Topological) Finalized() bool { - return ta.cg.Finalized() -} - -// HealthCheck returns information about the consensus health. -func (ta *Topological) HealthCheck(ctx context.Context) (interface{}, error) { - numOutstandingVtx := ta.Latency.NumProcessing() - isOutstandingVtx := numOutstandingVtx <= ta.params.MaxOutstandingItems - healthy := isOutstandingVtx - details := map[string]interface{}{ - "outstandingVertices": numOutstandingVtx, - } - - // check for long running vertices - oldestProcessingDuration := ta.Latency.MeasureAndGetOldestDuration() - processingTimeOK := oldestProcessingDuration <= ta.params.MaxItemProcessingTime - healthy = healthy && processingTimeOK - details["longestRunningVertex"] = oldestProcessingDuration.String() - - snowstormReport, err := ta.cg.HealthCheck(ctx) - healthy = healthy && err == nil - details["snowstorm"] = snowstormReport - - if !healthy { - var errorReasons []string - if isOutstandingVtx { - errorReasons = append(errorReasons, fmt.Sprintf("number outstanding vertexes %d > %d", numOutstandingVtx, ta.params.MaxOutstandingItems)) - } - if !processingTimeOK { - errorReasons = append(errorReasons, fmt.Sprintf("vertex processing time %s > %s", oldestProcessingDuration, ta.params.MaxItemProcessingTime)) - } - if err != nil { - errorReasons = append(errorReasons, err.Error()) - } - return details, fmt.Errorf("avalanche consensus is not healthy reason: %s", strings.Join(errorReasons, ", ")) - } - return details, nil -} - -// Takes in a list of votes and sets up the topological ordering. Returns the -// reachable section of the graph annotated with the number of inbound edges and -// the non-transitively applied votes. Also returns the list of leaf nodes. -func (ta *Topological) calculateInDegree(responses bag.UniqueBag[ids.ID]) error { - // Clear the kahn node set - maps.Clear(ta.kahnNodes) - // Clear the leaf set - ta.leaves.Clear() - - for vote := range responses { - // If it is not found, then the vote is either for something decided, - // or something we haven't heard of yet. - if tv := ta.nodes[vote]; tv != nil { - vtx := tv.vtx - - kahn, previouslySeen := ta.kahnNodes[vote] - // Add this new vote to the current bag of votes - kahn.votes.Union(responses.GetSet(vote)) - ta.kahnNodes[vote] = kahn - - if !previouslySeen { - // If I've never seen this node before, it is currently a leaf. - ta.leaves.Add(vote) - parents, err := vtx.Parents() - if err != nil { - return err - } - ta.kahnNodes, err = ta.markAncestorInDegrees(ta.kahnNodes, parents) - if err != nil { - return err - } - } - } - } - return nil -} - -// adds a new in-degree reference for all nodes. -// should only be called from [calculateInDegree] -func (ta *Topological) markAncestorInDegrees( - kahns map[ids.ID]kahnNode, - deps []Vertex, -) (map[ids.ID]kahnNode, error) { - frontier := make([]Vertex, 0, len(deps)) - for _, vtx := range deps { - // The vertex may have been decided, no need to vote in that case - if !vtx.Status().Decided() { - frontier = append(frontier, vtx) - } - } - - for len(frontier) > 0 { - newLen := len(frontier) - 1 - current := frontier[newLen] - frontier = frontier[:newLen] - - currentID := current.ID() - kahn, alreadySeen := kahns[currentID] - // I got here through a transitive edge, so increase the in-degree - kahn.inDegree++ - kahns[currentID] = kahn - - if kahn.inDegree == 1 { - // If I am transitively seeing this node for the first - // time, it is no longer a leaf. - ta.leaves.Remove(currentID) - } - - if !alreadySeen { - // If I am seeing this node for the first time, I need to check its - // parents - parents, err := current.Parents() - if err != nil { - return nil, err - } - for _, depVtx := range parents { - // No need to traverse to a decided vertex - if !depVtx.Status().Decided() { - frontier = append(frontier, depVtx) - } - } - } - } - return kahns, nil -} - -// Count the number of votes for each operation by pushing votes upwards through -// vertex ancestors. -func (ta *Topological) pushVotes(ctx context.Context) (bag.Bag[ids.ID], error) { - ta.votes.Clear() - txConflicts := make(map[ids.ID]set.Set[ids.ID], minMapSize) - - // A leaf is a node with no inbound edges. This removes each leaf and pushes - // the votes upwards, potentially creating new leaves, until there are no - // more leaves. - for ta.leaves.Len() > 0 { - // Pop one node from [leaves] - leaf, ok := ta.leaves.Pop() - if !ok { - // Should never happen because we just checked that [ta.leaves] is - // not empty. - return bag.Bag[ids.ID]{}, errNoLeaves - } - - kahn := ta.kahnNodes[leaf] - - if tv := ta.nodes[leaf]; tv != nil { - vtx := tv.vtx - txs, err := vtx.Txs(ctx) - if err != nil { - return bag.Bag[ids.ID]{}, err - } - for _, tx := range txs { - // Give the votes to the consumer - txID := tx.ID() - ta.votes.UnionSet(txID, kahn.votes) - - // Map txID to set of Conflicts - if _, exists := txConflicts[txID]; !exists { - txConflicts[txID] = ta.cg.Conflicts(tx) - } - } - - // The leaf is the ID of the transaction vertex that was issued to - // the conflict graph for this vertex. Adding this vote is required - // to make progress toward accepting this transaction. - ta.votes.UnionSet(leaf, kahn.votes) - - // Map the vertexID to the set of conflicts from the transaction - // vertex. - if _, exists := txConflicts[leaf]; !exists { - txConflicts[leaf] = ta.cg.Conflicts(tv) - } - - parents, err := vtx.Parents() - if err != nil { - return bag.Bag[ids.ID]{}, err - } - for _, dep := range parents { - depID := dep.ID() - if depNode, notPruned := ta.kahnNodes[depID]; notPruned { - depNode.inDegree-- - // Give the votes to my parents - depNode.votes.Union(kahn.votes) - ta.kahnNodes[depID] = depNode - - if depNode.inDegree == 0 { - // Only traverse into the leaves - ta.leaves.Add(depID) - } - } - } - } - } - - // Create bag of votes for conflicting transactions - conflictingVotes := make(bag.UniqueBag[ids.ID]) - for txID, conflicts := range txConflicts { - for conflictTxID := range conflicts { - conflictingVotes.UnionSet(txID, ta.votes.GetSet(conflictTxID)) - } - } - - ta.votes.Difference(&conflictingVotes) - return ta.votes.Bag(ta.params.Alpha), nil -} - -// If I've already checked, do nothing -// If I'm decided, cache the preference and return -// At this point, I must be live -// I now try to accept all my consumers -// I now update all my ancestors -// If any of my parents are rejected, reject myself -// If I'm preferred, remove all my ancestors from the preferred frontier, add -// myself to the preferred frontier -// If all my parents are accepted and I'm acceptable, accept myself -func (ta *Topological) update(ctx context.Context, vtx Vertex) error { - vtxID := vtx.ID() - if _, cached := ta.preferenceCache[vtxID]; cached { - return nil // This vertex has already been updated - } - - // Drop all transaction vertices from the orphan set, as they can never be - // reissued. - ta.orphans.Remove(vtxID) - - // Note: it is not possible for the status to be rejected here. Update is - // only called when adding a new processing vertex and when updating the - // frontiers. If update is called with a rejected vertex when updating the - // frontiers, it is guaranteed that the vertex was rejected during the same - // frontier update. This means that the rejected vertex must have already - // been visited, which means update will have exited from the above - // preferenceCache check. - if vtx.Status() == choices.Accepted { - ta.preferred.Add(vtxID) // I'm preferred - ta.virtuous.Add(vtxID) // Accepted is defined as virtuous - - ta.frontier[vtxID] = vtx // I have no descendents yet - - ta.preferenceCache[vtxID] = true - ta.virtuousCache[vtxID] = true - return nil - } - - txs, err := vtx.Txs(ctx) - if err != nil { - return err - } - preferences := ta.cg.Preferences() - virtuousTxs := ta.cg.Virtuous() - - txv, ok := ta.nodes[vtxID] - if !ok { - return fmt.Errorf("transaction vertex %s not found in processing nodes set", vtxID) - } - - initialTxVStatus := txv.Status() - - // acceptable tracks if all transactions included in the vertex have been - // accepted and if all the parent vertices have been accepted. The - // transactions include the transaction vertex. - acceptable := initialTxVStatus == choices.Accepted - - // rejectable tracks if any of the transactions included in the vertex have - // been rejected or if any of the parent vertices have been rejected. The - // transactions include the transaction vertex. - rejectable := initialTxVStatus == choices.Rejected - - preferred := acceptable || preferences.Contains(vtxID) - virtuous := acceptable || virtuousTxs.Contains(vtxID) - - for _, tx := range txs { - txID := tx.ID() - s := tx.Status() - if s == choices.Rejected { - // If I contain a rejected consumer, I am rejectable - rejectable = true - preferred = false - virtuous = false - } - if s != choices.Accepted { - // If I contain a non-accepted consumer, I am not acceptable - acceptable = false - preferred = preferred && preferences.Contains(txID) - virtuous = virtuous && virtuousTxs.Contains(txID) - } - } - - deps, err := vtx.Parents() - if err != nil { - return err - } - // Update all of my dependencies - for _, dep := range deps { - if err := ta.update(ctx, dep); err != nil { - return err - } - - depID := dep.ID() - preferred = preferred && ta.preferenceCache[depID] - virtuous = virtuous && ta.virtuousCache[depID] - } - - // Check my parent statuses - for _, dep := range deps { - switch status := dep.Status(); status { - case choices.Rejected: - // My parent is rejected, so I should be rejected - ta.ctx.Log.Trace("rejecting vertex", - zap.String("reason", "rejected parent"), - zap.Stringer("vtxID", vtxID), - zap.Stringer("parentID", dep.ID()), - ) - // Note: because the parent was rejected, the transaction vertex - // will have already been marked as rejected by the conflict graph. - // However, we still need to remove it from the set of virtuous - // transactions. - ta.virtuousVoting.Remove(vtxID) - if err := vtx.Reject(ctx); err != nil { - return err - } - delete(ta.nodes, vtxID) - ta.Latency.Rejected(vtxID, ta.pollNumber, len(vtx.Bytes())) - - ta.preferenceCache[vtxID] = false - ta.virtuousCache[vtxID] = false - return nil - case choices.Accepted: - // If the dependency is accepted, then the vertex's acceptability - // doesn't change. - default: - acceptable = false // A parent isn't accepted, so I can't be - } - } - - // Technically, we could also check to see if there are direct conflicts - // between this vertex and a vertex in it's ancestry. If there does exist - // such a conflict, this vertex could also be rejected. However, this would - // require a traversal. Therefore, this memory optimization is ignored. - // Also, this will only happen from a byzantine node issuing the vertex. - // Therefore, this is very unlikely to actually be triggered in practice. - - // If the vertex is going to be rejected, it and all of its children are - // going to be removed from the graph. This means that the parents may still - // exist in the frontier. If the vertex is not rejectable, then it will - // still be in the graph and the parents can not be part of the frontier. - if !rejectable { - for _, dep := range deps { - delete(ta.frontier, dep.ID()) - } - ta.frontier[vtxID] = vtx // I have no descendents yet - } - - ta.preferenceCache[vtxID] = preferred - ta.virtuousCache[vtxID] = virtuous - - if preferred { - ta.preferred.Add(vtxID) // I'm preferred - for _, dep := range deps { - ta.preferred.Remove(dep.ID()) // My parents aren't part of the frontier - } - - // Transactions are marked as orphans if they are virtuous, but not - // contained in a preferred vertex. Since this vertex is preferred, - // remove all the internal transactions from the orphan set. - // - // As an optimization, we only iterate over the transactions if the set - // of orphans isn't empty. As if the set is empty, nothing will be able - // to be removed anyway. - if ta.orphans.Len() > 0 { - for _, tx := range txs { - if tx.Status() != choices.Accepted { - ta.orphans.Remove(tx.ID()) - } - } - } - } - - if virtuous { - ta.virtuous.Add(vtxID) // I'm virtuous - for _, dep := range deps { - ta.virtuous.Remove(dep.ID()) // My parents aren't part of the frontier - } - } else { - // If the vertex isn't virtuous, then we can remove the transaction - // vertex from the voting set to pessemistically quiesce early. - ta.virtuousVoting.Remove(vtxID) - } - - switch { - case acceptable: - // I'm acceptable, why not accept? - // Note that VertexAcceptor.Accept must be called before vtx.Accept to - // honor Acceptor.Accept's invariant. - vtxBytes := vtx.Bytes() - if err := ta.ctx.VertexAcceptor.Accept(ta.ctx, vtxID, vtxBytes); err != nil { - return err - } - - if err := vtx.Accept(ctx); err != nil { - return err - } - delete(ta.nodes, vtxID) - ta.Latency.Accepted(vtxID, ta.pollNumber, len(vtxBytes)) - case rejectable: - // I'm rejectable, why not reject? - ta.ctx.Log.Trace("rejecting vertex", - zap.String("reason", "conflicting acceptance"), - zap.Stringer("vtxID", vtxID), - ) - if !txv.Status().Decided() { - if err := ta.cg.Remove(ctx, vtxID); err != nil { - return fmt.Errorf("failed to remove transaction vertex %s from snowstorm before rejecting vertex itself", vtxID) - } - ta.virtuousVoting.Remove(vtxID) - } - if err := vtx.Reject(ctx); err != nil { - return err - } - delete(ta.nodes, vtxID) - ta.Latency.Rejected(vtxID, ta.pollNumber, len(vtx.Bytes())) - } - return nil -} - -// Update the frontier sets -func (ta *Topological) updateFrontiers(ctx context.Context) error { - vts := ta.frontier - - ta.preferred.Clear() - ta.virtuous.Clear() - ta.virtuousVoting.Clear() - - ta.orphans.Clear() - ta.frontier = make(map[ids.ID]Vertex, minMapSize) - ta.preferenceCache = make(map[ids.ID]bool, minMapSize) - ta.virtuousCache = make(map[ids.ID]bool, minMapSize) - - ta.virtuousVoting.Union(ta.cg.VirtuousVoting()) - ta.orphans.Union(ta.cg.Virtuous()) // Initially, nothing is preferred - - for _, vtx := range vts { - // Update all the vertices that were in my previous frontier - if err := ta.update(ctx, vtx); err != nil { - return err - } - } - return nil -} diff --git a/avalanchego/snow/consensus/avalanche/topological_test.go b/avalanchego/snow/consensus/avalanche/topological_test.go deleted file mode 100644 index 6c948ce2..00000000 --- a/avalanchego/snow/consensus/avalanche/topological_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "testing" -) - -func TestTopological(t *testing.T) { - runConsensusTests(t, TopologicalFactory{}) -} diff --git a/avalanchego/snow/consensus/avalanche/traced_consensus.go b/avalanchego/snow/consensus/avalanche/traced_consensus.go deleted file mode 100644 index dfabbad5..00000000 --- a/avalanchego/snow/consensus/avalanche/traced_consensus.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/trace" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -var _ Consensus = (*tracedConsensus)(nil) - -type tracedConsensus struct { - Consensus - tracer trace.Tracer -} - -func Trace(consensus Consensus, tracer trace.Tracer) Consensus { - return &tracedConsensus{ - Consensus: consensus, - tracer: tracer, - } -} - -func (c *tracedConsensus) Add(ctx context.Context, vtx Vertex) error { - ctx, span := c.tracer.Start(ctx, "tracedConsensus.Add", oteltrace.WithAttributes( - attribute.Stringer("vtxID", vtx.ID()), - )) - defer span.End() - - return c.Consensus.Add(ctx, vtx) -} - -func (c *tracedConsensus) RecordPoll(ctx context.Context, votes bag.UniqueBag[ids.ID]) error { - var allVotes set.Bits64 - for _, vote := range votes { - allVotes.Union(vote) - } - - ctx, span := c.tracer.Start(ctx, "tracedConsensus.RecordPoll", oteltrace.WithAttributes( - attribute.Int("numVotes", allVotes.Len()), - attribute.Int("numVtxIDs", len(votes)), - )) - defer span.End() - - return c.Consensus.RecordPoll(ctx, votes) -} diff --git a/avalanchego/snow/consensus/avalanche/transaction_vertex.go b/avalanchego/snow/consensus/avalanche/transaction_vertex.go deleted file mode 100644 index 51325026..00000000 --- a/avalanchego/snow/consensus/avalanche/transaction_vertex.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/set" -) - -var _ snowstorm.Tx = (*transactionVertex)(nil) - -// newTransactionVertex returns a new transactionVertex initialized with a -// processing status. -func newTransactionVertex(vtx Vertex, nodes map[ids.ID]*transactionVertex) *transactionVertex { - return &transactionVertex{ - vtx: vtx, - nodes: nodes, - status: choices.Processing, - } -} - -type transactionVertex struct { - // vtx is the vertex that this transaction is attempting to confirm. - vtx Vertex - - // nodes is used to look up other transaction vertices that are currently - // processing. This is used to get parent vertices of this transaction. - nodes map[ids.ID]*transactionVertex - - // status reports the status of this transaction vertex in snowstorm which - // is then used by avalanche to determine the accaptability of the vertex. - status choices.Status -} - -func (*transactionVertex) Bytes() []byte { - // Snowstorm uses the bytes of the transaction to broadcast through the - // decision dispatcher. Because this is an internal transaction type, we - // don't want to have this transaction broadcast. So, we return nil here. - return nil -} - -func (tv *transactionVertex) ID() ids.ID { - return tv.vtx.ID() -} - -func (tv *transactionVertex) Accept(context.Context) error { - tv.status = choices.Accepted - return nil -} - -func (tv *transactionVertex) Reject(context.Context) error { - tv.status = choices.Rejected - return nil -} - -func (tv *transactionVertex) Status() choices.Status { - return tv.status -} - -// Verify isn't called in the consensus code. So this implementation doesn't -// really matter. However it's used to implement the tx interface. -func (*transactionVertex) Verify(context.Context) error { - return nil -} - -// Dependencies returns the currently processing transaction vertices of this -// vertex's parents. -func (tv *transactionVertex) Dependencies() ([]snowstorm.Tx, error) { - parents, err := tv.vtx.Parents() - if err != nil { - return nil, err - } - txs := make([]snowstorm.Tx, 0, len(parents)) - for _, parent := range parents { - if parentTx, ok := tv.nodes[parent.ID()]; ok { - txs = append(txs, parentTx) - } - } - return txs, nil -} - -// InputIDs must return a non-empty slice to avoid having the snowstorm engine -// vacuously accept it. A slice is returned containing just the vertexID in -// order to produce no conflicts based on the consumed input. -func (tv *transactionVertex) InputIDs() []ids.ID { - return []ids.ID{tv.vtx.ID()} -} - -func (tv *transactionVertex) HasWhitelist() bool { - return tv.vtx.HasWhitelist() -} - -func (tv *transactionVertex) Whitelist(ctx context.Context) (set.Set[ids.ID], error) { - return tv.vtx.Whitelist(ctx) -} diff --git a/avalanchego/snow/consensus/avalanche/vertex.go b/avalanchego/snow/consensus/avalanche/vertex.go index 16a87e77..0356dc19 100644 --- a/avalanchego/snow/consensus/avalanche/vertex.go +++ b/avalanchego/snow/consensus/avalanche/vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -11,12 +11,11 @@ import ( ) // Vertex is a collection of multiple transactions tied to other vertices +// +// Note: Verify is not part of this interface because bootstrapping uses IDs to +// verify the vertex is valid. type Vertex interface { choices.Decidable - snowstorm.Whitelister - - // Vertex verification should be performed before issuance. - Verify(context.Context) error // Returns the vertices this vertex depends on Parents() ([]Vertex, error) diff --git a/avalanchego/snow/consensus/metrics/height.go b/avalanchego/snow/consensus/metrics/height.go deleted file mode 100644 index 7a485f72..00000000 --- a/avalanchego/snow/consensus/metrics/height.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -var _ Height = (*height)(nil) - -// Height reports the last accepted height -type Height interface { - Accepted(height uint64) -} - -type height struct { - // lastAcceptedHeight keeps track of the last accepted height - lastAcceptedHeight prometheus.Gauge -} - -func NewHeight(namespace string, reg prometheus.Registerer) (Height, error) { - h := &height{ - lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_height", - Help: "Last height accepted", - }), - } - return h, reg.Register(h.lastAcceptedHeight) -} - -func (h *height) Accepted(height uint64) { - h.lastAcceptedHeight.Set(float64(height)) -} diff --git a/avalanchego/snow/consensus/metrics/latency.go b/avalanchego/snow/consensus/metrics/latency.go deleted file mode 100644 index 4f5d413a..00000000 --- a/avalanchego/snow/consensus/metrics/latency.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "fmt" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ Latency = (*latency)(nil) - -type Latency interface { - // Issued marks the item as having been issued. - Issued(id ids.ID, pollNumber uint64) - - // Accepted marks the item as having been accepted. - // Pass the container size in bytes for metrics tracking. - Accepted(id ids.ID, pollNumber uint64, containerSize int) - - // Rejected marks the item as having been rejected. - // Pass the container size in bytes for metrics tracking. - Rejected(id ids.ID, pollNumber uint64, containerSize int) - - // MeasureAndGetOldestDuration returns the amount of time the oldest item - // has been processing. - MeasureAndGetOldestDuration() time.Duration - - // NumProcessing returns the number of currently processing items. - NumProcessing() int -} - -type opStart struct { - time time.Time - pollNumber uint64 -} - -// Latency reports commonly used consensus latency metrics. -type latency struct { - // ProcessingEntries keeps track of the [opStart] that each item was issued - // into the consensus instance. This is used to calculate the amount of time - // to accept or reject the item. - processingEntries linkedhashmap.LinkedHashmap[ids.ID, opStart] - - // log reports anomalous events. - log logging.Logger - - // numProcessing keeps track of the number of items processing - numProcessing prometheus.Gauge - - // pollsAccepted tracks the number of polls that an item was in processing - // for before being accepted - pollsAccepted metric.Averager - - // pollsRejected tracks the number of polls that an item was in processing - // for before being rejected - pollsRejected metric.Averager - - // latAccepted tracks the number of nanoseconds that an item was processing - // before being accepted - latAccepted metric.Averager - containerSizeAcceptedSum prometheus.Gauge - - // rejected tracks the number of nanoseconds that an item was processing - // before being rejected - latRejected metric.Averager - containerSizeRejectedSum prometheus.Gauge -} - -// Initialize the metrics with the provided names. -func NewLatency(metricName, descriptionName string, log logging.Logger, namespace string, reg prometheus.Registerer) (Latency, error) { - errs := wrappers.Errs{} - l := &latency{ - processingEntries: linkedhashmap.New[ids.ID, opStart](), - log: log, - - // e.g., - // "avalanche_7y7zwo7XatqnX4dtTakLo32o7jkMX4XuDa26WaxbCXoCT1qKK_blks_processing" to count how blocks are currently processing - numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_processing", metricName), - Help: fmt.Sprintf("Number of currently processing %s", metricName), - }), - - pollsAccepted: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_polls_accepted", metricName), - fmt.Sprintf("number of polls from issuance of a %s to its acceptance", descriptionName), - reg, - &errs, - ), - pollsRejected: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_polls_rejected", metricName), - fmt.Sprintf("number of polls from issuance of a %s to its rejection", descriptionName), - reg, - &errs, - ), - - // e.g., - // "avalanche_C_blks_accepted_count" to count how many "Observe" gets called -- count all "Accept" - // "avalanche_C_blks_accepted_sum" to count how many ns have elapsed since its issuance on acceptance - // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in ns - // "avalanche_C_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes - // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average block size - latAccepted: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_accepted", metricName), - fmt.Sprintf("time (in ns) from issuance of a %s to its acceptance", descriptionName), - reg, - &errs, - ), - containerSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_accepted_container_size_sum", metricName), - Help: fmt.Sprintf("Cumulative sum of container size of all accepted %s", metricName), - }), - - // e.g., - // "avalanche_P_blks_rejected_count" to count how many "Observe" gets called -- count all "Reject" - // "avalanche_P_blks_rejected_sum" to count how many ns have elapsed since its issuance on rejection - // "avalanche_P_blks_accepted_sum / avalanche_P_blks_accepted_count" is the average block acceptance latency in ns - // "avalanche_P_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes - // "avalanche_P_blks_accepted_container_size_sum / avalanche_P_blks_accepted_count" is the average block size - latRejected: metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_rejected", metricName), - fmt.Sprintf("time (in ns) from issuance of a %s to its rejection", descriptionName), - reg, - &errs, - ), - containerSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_rejected_container_size_sum", metricName), - Help: fmt.Sprintf("Cumulative sum of container size of all rejected %s", metricName), - }), - } - errs.Add( - reg.Register(l.numProcessing), - reg.Register(l.containerSizeAcceptedSum), - reg.Register(l.containerSizeRejectedSum), - ) - return l, errs.Err -} - -func (l *latency) Issued(id ids.ID, pollNumber uint64) { - l.processingEntries.Put(id, opStart{ - time: time.Now(), - pollNumber: pollNumber, - }) - l.numProcessing.Inc() -} - -func (l *latency) Accepted(id ids.ID, pollNumber uint64, containerSize int) { - start, ok := l.processingEntries.Get(id) - if !ok { - l.log.Debug("unable to measure tx latency", - zap.Stringer("status", choices.Accepted), - zap.Stringer("txID", id), - ) - return - } - l.processingEntries.Delete(id) - - l.pollsAccepted.Observe(float64(pollNumber - start.pollNumber)) - - duration := time.Since(start.time) - l.latAccepted.Observe(float64(duration)) - l.numProcessing.Dec() - - l.containerSizeAcceptedSum.Add(float64(containerSize)) -} - -func (l *latency) Rejected(id ids.ID, pollNumber uint64, containerSize int) { - start, ok := l.processingEntries.Get(id) - if !ok { - l.log.Debug("unable to measure tx latency", - zap.Stringer("status", choices.Rejected), - zap.Stringer("txID", id), - ) - return - } - l.processingEntries.Delete(id) - - l.pollsRejected.Observe(float64(pollNumber - start.pollNumber)) - - duration := time.Since(start.time) - l.latRejected.Observe(float64(duration)) - l.numProcessing.Dec() - - l.containerSizeRejectedSum.Add(float64(containerSize)) -} - -func (l *latency) MeasureAndGetOldestDuration() time.Duration { - _, oldestOp, exists := l.processingEntries.Oldest() - if !exists { - return 0 - } - return time.Since(oldestOp.time) -} - -func (l *latency) NumProcessing() int { - return l.processingEntries.Len() -} diff --git a/avalanchego/snow/consensus/metrics/polls.go b/avalanchego/snow/consensus/metrics/polls.go deleted file mode 100644 index 188bb217..00000000 --- a/avalanchego/snow/consensus/metrics/polls.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ Polls = (*polls)(nil) - -// Polls reports commonly used consensus poll metrics. -type Polls interface { - Successful() - Failed() -} - -type polls struct { - // numFailedPolls keeps track of the number of polls that failed - numFailedPolls prometheus.Counter - - // numSuccessfulPolls keeps track of the number of polls that succeeded - numSuccessfulPolls prometheus.Counter -} - -func NewPolls(namespace string, reg prometheus.Registerer) (Polls, error) { - p := &polls{ - numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_successful", - Help: "Number of successful polls", - }), - numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_failed", - Help: "Number of failed polls", - }), - } - errs := wrappers.Errs{} - errs.Add( - reg.Register(p.numFailedPolls), - reg.Register(p.numSuccessfulPolls), - ) - return p, errs.Err -} - -func (p *polls) Failed() { - p.numFailedPolls.Inc() -} - -func (p *polls) Successful() { - p.numSuccessfulPolls.Inc() -} diff --git a/avalanchego/snow/consensus/metrics/timestamp.go b/avalanchego/snow/consensus/metrics/timestamp.go deleted file mode 100644 index 0e784fa5..00000000 --- a/avalanchego/snow/consensus/metrics/timestamp.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var _ Timestamp = (*timestamp)(nil) - -// Timestamp reports the last accepted block time, -// to track it in unix seconds. -type Timestamp interface { - Accepted(ts time.Time) -} - -type timestamp struct { - // lastAcceptedTimestamp keeps track of the last accepted timestamp - lastAcceptedTimestamp prometheus.Gauge -} - -func NewTimestamp(namespace string, reg prometheus.Registerer) (Timestamp, error) { - t := ×tamp{ - lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_timestamp", - Help: "Last accepted block timestamp in unix seconds", - }), - } - return t, reg.Register(t.lastAcceptedTimestamp) -} - -func (t *timestamp) Accepted(ts time.Time) { - t.lastAcceptedTimestamp.Set(float64(ts.Unix())) -} diff --git a/avalanchego/snow/consensus/snowball/binary_slush.go b/avalanchego/snow/consensus/snowball/binary_slush.go index 3fed5c98..700c03c5 100644 --- a/avalanchego/snow/consensus/snowball/binary_slush.go +++ b/avalanchego/snow/consensus/snowball/binary_slush.go @@ -1,13 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -import ( - "fmt" -) +import "fmt" -var _ BinarySlush = (*binarySlush)(nil) +func newBinarySlush(choice int) binarySlush { + return binarySlush{ + preference: choice, + } +} // binarySlush is the implementation of a binary slush instance type binarySlush struct { @@ -17,10 +19,6 @@ type binarySlush struct { preference int } -func (sl *binarySlush) Initialize(choice int) { - sl.preference = choice -} - func (sl *binarySlush) Preference() int { return sl.preference } diff --git a/avalanchego/snow/consensus/snowball/binary_snowball.go b/avalanchego/snow/consensus/snowball/binary_snowball.go index 8f197f03..2e17bc93 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowball.go +++ b/avalanchego/snow/consensus/snowball/binary_snowball.go @@ -1,31 +1,31 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -import ( - "fmt" -) +import "fmt" -var _ BinarySnowball = (*binarySnowball)(nil) +var _ Binary = (*binarySnowball)(nil) + +func newBinarySnowball(beta, choice int) binarySnowball { + return binarySnowball{ + binarySnowflake: newBinarySnowflake(beta, choice), + preference: choice, + } +} // binarySnowball is the implementation of a binary snowball instance type binarySnowball struct { // wrap the binary snowflake logic binarySnowflake - // preference is the choice with the largest number of successful polls. - // Ties are broken by switching choice lazily + // preference is the choice with the largest number of polls which preferred + // the color. Ties are broken by switching choice lazily preference int - // numSuccessfulPolls tracks the total number of successful network polls of - // the 0 and 1 choices - numSuccessfulPolls [2]int -} - -func (sb *binarySnowball) Initialize(beta, choice int) { - sb.binarySnowflake.Initialize(beta, choice) - sb.preference = choice + // preferenceStrength tracks the total number of network polls which + // preferred each choice + preferenceStrength [2]int } func (sb *binarySnowball) Preference() int { @@ -40,18 +40,27 @@ func (sb *binarySnowball) Preference() int { } func (sb *binarySnowball) RecordSuccessfulPoll(choice int) { - sb.numSuccessfulPolls[choice]++ - if sb.numSuccessfulPolls[choice] > sb.numSuccessfulPolls[1-choice] { - sb.preference = choice - } + sb.increasePreferenceStrength(choice) sb.binarySnowflake.RecordSuccessfulPoll(choice) } +func (sb *binarySnowball) RecordPollPreference(choice int) { + sb.increasePreferenceStrength(choice) + sb.binarySnowflake.RecordPollPreference(choice) +} + func (sb *binarySnowball) String() string { return fmt.Sprintf( - "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, %s)", + "SB(Preference = %d, PreferenceStrength[0] = %d, PreferenceStrength[1] = %d, %s)", sb.preference, - sb.numSuccessfulPolls[0], - sb.numSuccessfulPolls[1], + sb.preferenceStrength[0], + sb.preferenceStrength[1], &sb.binarySnowflake) } + +func (sb *binarySnowball) increasePreferenceStrength(choice int) { + sb.preferenceStrength[choice]++ + if sb.preferenceStrength[choice] > sb.preferenceStrength[1-choice] { + sb.preference = choice + } +} diff --git a/avalanchego/snow/consensus/snowball/binary_snowball_test.go b/avalanchego/snow/consensus/snowball/binary_snowball_test.go index c2832a7d..2c2a8421 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/binary_snowball_test.go @@ -1,197 +1,175 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) func TestBinarySnowball(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 beta := 2 - sb := binarySnowball{} - sb.Initialize(beta, red) - - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb := newBinarySnowball(beta, red) + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb.RecordSuccessfulPoll(blue) + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) +} + +func TestBinarySnowballRecordPollPreference(t *testing.T) { + require := require.New(t) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + red := 0 + blue := 1 + + beta := 2 + + sb := newBinarySnowball(beta, red) + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) + + sb.RecordSuccessfulPoll(red) + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Didn't finalized correctly") - } + sb.RecordPollPreference(red) + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) + + sb.RecordSuccessfulPoll(red) + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) + + sb.RecordSuccessfulPoll(red) + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) + + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 1, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + require.Equal(expected, sb.String()) } func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 beta := 2 - sb := binarySnowball{} - sb.Initialize(beta, red) - - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb := newBinarySnowball(beta, red) + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } - - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" + require.Equal(expected, sb.String()) } func TestBinarySnowballAcceptWeirdColor(t *testing.T) { + require := require.New(t) + blue := 0 red := 1 beta := 2 - sb := binarySnowball{} - sb.Initialize(beta, red) + sb := newBinarySnowball(beta, red) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) + sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + expected := "SB(Preference = 1, PreferenceStrength[0] = 2, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + require.Equal(expected, sb.String()) } func TestBinarySnowballLockColor(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 beta := 1 - sb := binarySnowball{} - sb.Initialize(beta, red) + sb := newBinarySnowball(beta, red) sb.RecordSuccessfulPoll(red) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) + sb.RecordPollPreference(blue) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + expected := "SB(Preference = 1, PreferenceStrength[0] = 1, PreferenceStrength[1] = 3, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" + require.Equal(expected, sb.String()) } diff --git a/avalanchego/snow/consensus/snowball/binary_snowflake.go b/avalanchego/snow/consensus/snowball/binary_snowflake.go index 50507dc0..5f897af8 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/binary_snowflake.go @@ -1,13 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -import ( - "fmt" -) +import "fmt" -var _ BinarySnowflake = (*binarySnowflake)(nil) +var _ Binary = (*binarySnowflake)(nil) + +func newBinarySnowflake(beta, choice int) binarySnowflake { + return binarySnowflake{ + binarySlush: newBinarySlush(choice), + beta: beta, + } +} // binarySnowflake is the implementation of a binary snowflake instance type binarySnowflake struct { @@ -27,14 +32,9 @@ type binarySnowflake struct { finalized bool } -func (sf *binarySnowflake) Initialize(beta, choice int) { - sf.binarySlush.Initialize(choice) - sf.beta = beta -} - func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { if sf.finalized { - return // This instace is already decided. + return // This instance is already decided. } if preference := sf.Preference(); preference == choice { @@ -49,6 +49,15 @@ func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { sf.binarySlush.RecordSuccessfulPoll(choice) } +func (sf *binarySnowflake) RecordPollPreference(choice int) { + if sf.finalized { + return // This instance is already decided. + } + + sf.confidence = 0 + sf.binarySlush.RecordSuccessfulPoll(choice) +} + func (sf *binarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } diff --git a/avalanchego/snow/consensus/snowball/binary_snowflake_test.go b/avalanchego/snow/consensus/snowball/binary_snowflake_test.go index eb16d1e3..085b94c5 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/binary_snowflake_test.go @@ -1,56 +1,51 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) func TestBinarySnowflake(t *testing.T) { + require := require.New(t) + blue := 0 red := 1 beta := 2 - sf := binarySnowflake{} - sf.Initialize(beta, red) + sf := newBinarySnowflake(beta, red) - if pref := sf.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(red) - if pref := sf.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sf.Preference()) + require.False(sf.Finalized()) + + sf.RecordPollPreference(red) + require.Equal(red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) + require.Equal(blue, sf.Preference()) + require.False(sf.Finalized()) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sf.Finalized() { - t.Fatalf("Didn't finalized correctly") - } + sf.RecordSuccessfulPoll(blue) + require.Equal(blue, sf.Preference()) + require.True(sf.Finalized()) } diff --git a/avalanchego/snow/consensus/snowball/consensus.go b/avalanchego/snow/consensus/snowball/consensus.go index 1eb54694..82d57b74 100644 --- a/avalanchego/snow/consensus/snowball/consensus.go +++ b/avalanchego/snow/consensus/snowball/consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -15,9 +15,6 @@ import ( type Consensus interface { fmt.Stringer - // Takes in alpha, beta1, beta2, and the initial choice - Initialize(params Parameters, initialPreference ids.ID) - // Adds a new choice to vote on Add(newChoice ids.ID) @@ -43,20 +40,20 @@ type Consensus interface { Finalized() bool } -// NnarySnowball augments NnarySnowflake with a counter that tracks the total -// number of positive responses from a network sample. -type NnarySnowball interface{ NnarySnowflake } +// Factory produces Nnary and Unary decision instances +type Factory interface { + NewNnary(params Parameters, choice ids.ID) Nnary + NewUnary(params Parameters) Unary +} -// NnarySnowflake is a snowflake instance deciding between an unbounded number -// of values. After performing a network sample of k nodes, if you have alpha -// votes for one of the choices, you should vote for that choice. Otherwise, you -// should reset. -type NnarySnowflake interface { +// Nnary is a snow instance deciding between an unbounded number of values. +// The caller samples k nodes and then calls +// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes +// 2. RecordPollPreference if choice collects >= alphaPreference votes +// 3. RecordUnsuccessfulPoll otherwise +type Nnary interface { fmt.Stringer - // Takes in beta1, beta2, and the initial choice - Initialize(betaVirtuous, betaRogue int, initialPreference ids.ID) - // Adds a new possible choice Add(newChoice ids.ID) @@ -67,50 +64,10 @@ type NnarySnowflake interface { // specified choice. Assumes the choice was previously added. RecordSuccessfulPoll(choice ids.ID) - // RecordUnsuccessfulPoll resets the snowflake counter of this instance - RecordUnsuccessfulPoll() - - // Return whether a choice has been finalized - Finalized() bool -} - -// NnarySlush is a slush instance deciding between an unbounded number of -// values. After performing a network sample of k nodes, if you have alpha -// votes for one of the choices, you should vote for that choice. -type NnarySlush interface { - fmt.Stringer - - // Takes in the initial choice - Initialize(initialPreference ids.ID) - - // Returns the currently preferred choice to be finalized - Preference() ids.ID - - // RecordSuccessfulPoll records a successful poll towards finalizing the - // specified choice. Assumes the choice was previously added. - RecordSuccessfulPoll(choice ids.ID) -} - -// BinarySnowball augments BinarySnowflake with a counter that tracks the total -// number of positive responses from a network sample. -type BinarySnowball interface{ BinarySnowflake } - -// BinarySnowflake is a snowball instance deciding between two values -// After performing a network sample of k nodes, if you have alpha votes for -// one of the choices, you should vote for that choice. Otherwise, you should -// reset. -type BinarySnowflake interface { - fmt.Stringer - - // Takes in the beta value, and the initial choice - Initialize(beta, initialPreference int) - - // Returns the currently preferred choice to be finalized - Preference() int - - // RecordSuccessfulPoll records a successful poll towards finalizing the - // specified choice - RecordSuccessfulPoll(choice int) + // RecordPollPreference records a poll that preferred the specified choice + // but did not contribute towards finalizing the specified choice. Assumes + // the choice was previously added. + RecordPollPreference(choice ids.ID) // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() @@ -119,61 +76,48 @@ type BinarySnowflake interface { Finalized() bool } -// BinarySlush is a slush instance deciding between two values. After performing -// a network sample of k nodes, if you have alpha votes for one of the choices, -// you should vote for that choice. -type BinarySlush interface { +// Binary is a snow instance deciding between two values. +// The caller samples k nodes and then calls +// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes +// 2. RecordPollPreference if choice collects >= alphaPreference votes +// 3. RecordUnsuccessfulPoll otherwise +type Binary interface { fmt.Stringer - // Takes in the initial choice - Initialize(initialPreference int) - // Returns the currently preferred choice to be finalized Preference() int // RecordSuccessfulPoll records a successful poll towards finalizing the // specified choice RecordSuccessfulPoll(choice int) -} -// UnarySnowball is a snowball instance deciding on one value. After performing -// a network sample of k nodes, if you have alpha votes for the choice, you -// should vote. Otherwise, you should reset. -type UnarySnowball interface { - fmt.Stringer - - // Takes in the beta value - Initialize(beta int) - - // RecordSuccessfulPoll records a successful poll towards finalizing - RecordSuccessfulPoll() + // RecordPollPreference records a poll that preferred the specified choice + // but did not contribute towards finalizing the specified choice + RecordPollPreference(choice int) // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() // Return whether a choice has been finalized Finalized() bool - - // Returns a new binary snowball instance with the agreement parameters - // transferred. Takes in the new beta value and the original choice - Extend(beta, originalPreference int) BinarySnowball - - // Returns a new unary snowball instance with the same state - Clone() UnarySnowball } -// UnarySnowflake is a snowflake instance deciding on one value. After -// performing a network sample of k nodes, if you have alpha votes for the -// choice, you should vote. Otherwise, you should reset. -type UnarySnowflake interface { +// Unary is a snow instance deciding on one value. +// The caller samples k nodes and then calls +// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes +// 2. RecordPollPreference if choice collects >= alphaPreference votes +// 3. RecordUnsuccessfulPoll otherwise +type Unary interface { fmt.Stringer - // Takes in the beta value - Initialize(beta int) - - // RecordSuccessfulPoll records a successful poll towards finalizing + // RecordSuccessfulPoll records a successful poll that reaches an alpha + // confidence threshold. RecordSuccessfulPoll() + // RecordPollPreference records a poll that receives an alpha preference + // threshold, but not an alpha confidence threshold. + RecordPollPreference() + // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() @@ -182,8 +126,8 @@ type UnarySnowflake interface { // Returns a new binary snowball instance with the agreement parameters // transferred. Takes in the new beta value and the original choice - Extend(beta, originalPreference int) BinarySnowflake + Extend(beta, originalPreference int) Binary // Returns a new unary snowflake instance with the same state - Clone() UnarySnowflake + Clone() Unary } diff --git a/avalanchego/snow/consensus/snowball/consensus_performance_test.go b/avalanchego/snow/consensus/snowball/consensus_performance_test.go index 16d1a924..776e11fa 100644 --- a/avalanchego/snow/consensus/snowball/consensus_performance_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_performance_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -6,52 +6,93 @@ package snowball import ( "testing" - "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" ) -func TestSnowballOptimized(t *testing.T) { - numColors := 10 - numNodes := 100 - params := Parameters{ - K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, - } - seed := int64(0) +// Test that a network running the lower AlphaPreference converges faster than a +// network running equal Alpha values. +func TestDualAlphaOptimization(t *testing.T) { + require := require.New(t) + + var ( + numColors = 10 + numNodes = 100 + params = Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 15, + BetaRogue: 20, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) - nBitwise := Network{} - nBitwise.Initialize(params, numColors) + singleAlphaNetwork := NewNetwork(SnowballFactory, params, numColors, source) - nNaive := nBitwise + params.AlphaPreference = params.K/2 + 1 + dualAlphaNetwork := NewNetwork(SnowballFactory, params, numColors, source) - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { - nBitwise.AddNode(&Tree{}) + dualAlphaNetwork.AddNode(NewTree) } - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numNodes; i++ { - nNaive.AddNode(&Flat{}) + singleAlphaNetwork.AddNode(NewTree) } - numRounds := 0 - for !nBitwise.Finalized() && !nBitwise.Disagreement() && !nNaive.Finalized() && !nNaive.Disagreement() { - sampler.Seed(int64(numRounds) + seed) - nBitwise.Round() + // Although this can theoretically fail with a correct implementation, it + // shouldn't in practice + runNetworksInLockstep(require, seed, source, dualAlphaNetwork, singleAlphaNetwork) +} - sampler.Seed(int64(numRounds) + seed) - nNaive.Round() - numRounds++ +// Test that a network running the snowball tree converges faster than a network +// running the flat snowball protocol. +func TestTreeConvergenceOptimization(t *testing.T) { + require := require.New(t) + + var ( + numColors = 10 + numNodes = 100 + params = DefaultParameters + seed uint64 = 0 + source = prng.NewMT19937() + ) + + treeNetwork := NewNetwork(SnowballFactory, params, numColors, source) + flatNetwork := NewNetwork(SnowballFactory, params, numColors, source) + + source.Seed(seed) + for i := 0; i < numNodes; i++ { + treeNetwork.AddNode(NewTree) } - if nBitwise.Disagreement() || nNaive.Disagreement() { - t.Fatalf("Network agreed on inconsistent values") + source.Seed(seed) + for i := 0; i < numNodes; i++ { + flatNetwork.AddNode(NewFlat) } // Although this can theoretically fail with a correct implementation, it // shouldn't in practice - if !nBitwise.Finalized() { - t.Fatalf("Network agreed on values faster with naive implementation") - } - if !nBitwise.Agreement() { - t.Fatalf("Network agreed on inconsistent values") + runNetworksInLockstep(require, seed, source, treeNetwork, flatNetwork) +} + +func runNetworksInLockstep(require *require.Assertions, seed uint64, source *prng.MT19937, fast *Network, slow *Network) { + numRounds := 0 + for !fast.Finalized() && !fast.Disagreement() && !slow.Finalized() && !slow.Disagreement() { + source.Seed(uint64(numRounds) + seed) + fast.Round() + + source.Seed(uint64(numRounds) + seed) + slow.Round() + numRounds++ } + + require.False(fast.Disagreement()) + require.False(slow.Disagreement()) + require.True(fast.Finalized()) + require.True(fast.Agreement()) } diff --git a/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go b/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go index 7ce053c2..578fea1d 100644 --- a/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -6,39 +6,40 @@ package snowball import ( "testing" - "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" ) func TestSnowballGovernance(t *testing.T) { - numColors := 2 - numNodes := 100 - numByzantine := 10 - numRed := 55 - params := Parameters{ - K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, - } - seed := int64(0) + require := require.New(t) + + var ( + numColors = 2 + numNodes = 100 + numByzantine = 10 + numRed = 55 + params = DefaultParameters + seed uint64 = 0 + source = prng.NewMT19937() + ) - nBitwise := Network{} - nBitwise.Initialize(params, numColors) + nBitwise := NewNetwork(SnowballFactory, params, numColors, source) - sampler.Seed(seed) + source.Seed(seed) for i := 0; i < numRed; i++ { - nBitwise.AddNodeSpecificColor(&Tree{}, 0, []int{1}) + nBitwise.AddNodeSpecificColor(NewTree, 0, []int{1}) } for _, node := range nBitwise.nodes { - if node.Preference() != nBitwise.colors[0] { - t.Fatalf("Wrong preferences") - } + require.Equal(nBitwise.colors[0], node.Preference()) } for i := 0; i < numNodes-numByzantine-numRed; i++ { - nBitwise.AddNodeSpecificColor(&Tree{}, 1, []int{0}) + nBitwise.AddNodeSpecificColor(NewTree, 1, []int{0}) } for i := 0; i < numByzantine; i++ { - nBitwise.AddNodeSpecificColor(&Byzantine{}, 1, []int{0}) + nBitwise.AddNodeSpecificColor(NewByzantine, 1, []int{0}) } for !nBitwise.Finalized() { @@ -49,8 +50,6 @@ func TestSnowballGovernance(t *testing.T) { if _, ok := node.(*Byzantine); ok { continue } - if node.Preference() != nBitwise.colors[0] { - t.Fatalf("Wrong preferences") - } + require.Equal(nBitwise.colors[0], node.Preference()) } } diff --git a/avalanchego/snow/consensus/snowball/consensus_test.go b/avalanchego/snow/consensus/snowball/consensus_test.go index 0944c68d..a7c8d4e3 100644 --- a/avalanchego/snow/consensus/snowball/consensus_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -16,16 +16,18 @@ var ( _ Consensus = (*Byzantine)(nil) ) +func NewByzantine(_ Factory, _ Parameters, choice ids.ID) Consensus { + return &Byzantine{ + preference: choice, + } +} + // Byzantine is a naive implementation of a multi-choice snowball instance type Byzantine struct { // Hardcode the preference preference ids.ID } -func (b *Byzantine) Initialize(_ Parameters, choice ids.ID) { - b.preference = choice -} - func (*Byzantine) Add(ids.ID) {} func (b *Byzantine) Preference() ids.ID { diff --git a/avalanchego/snow/consensus/snowball/factory.go b/avalanchego/snow/consensus/snowball/factory.go index 716f8f6a..1de693b4 100644 --- a/avalanchego/snow/consensus/snowball/factory.go +++ b/avalanchego/snow/consensus/snowball/factory.go @@ -1,9 +1,35 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -// Factory returns new instances of Consensus -type Factory interface { - New() Consensus +import "github.com/ava-labs/avalanchego/ids" + +var ( + SnowballFactory Factory = snowballFactory{} + SnowflakeFactory Factory = snowflakeFactory{} +) + +type snowballFactory struct{} + +func (snowballFactory) NewNnary(params Parameters, choice ids.ID) Nnary { + sb := newNnarySnowball(params.BetaVirtuous, params.BetaRogue, choice) + return &sb +} + +func (snowballFactory) NewUnary(params Parameters) Unary { + sb := newUnarySnowball(params.BetaVirtuous) + return &sb +} + +type snowflakeFactory struct{} + +func (snowflakeFactory) NewNnary(params Parameters, choice ids.ID) Nnary { + sf := newNnarySnowflake(params.BetaVirtuous, params.BetaRogue, choice) + return &sf +} + +func (snowflakeFactory) NewUnary(params Parameters) Unary { + sf := newUnarySnowflake(params.BetaVirtuous) + return &sf } diff --git a/avalanchego/snow/consensus/snowball/flat.go b/avalanchego/snow/consensus/snowball/flat.go index cf08c9bd..01b5975c 100644 --- a/avalanchego/snow/consensus/snowball/flat.go +++ b/avalanchego/snow/consensus/snowball/flat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,38 +8,37 @@ import ( "github.com/ava-labs/avalanchego/utils/bag" ) -var ( - _ Factory = (*FlatFactory)(nil) - _ Consensus = (*Flat)(nil) -) - -// FlatFactory implements Factory by returning a flat struct -type FlatFactory struct{} +var _ Consensus = (*Flat)(nil) -func (FlatFactory) New() Consensus { - return &Flat{} +func NewFlat(factory Factory, params Parameters, choice ids.ID) Consensus { + return &Flat{ + Nnary: factory.NewNnary(params, choice), + params: params, + } } -// Flat is a naive implementation of a multi-choice snowball instance +// Flat is a naive implementation of a multi-choice snow instance type Flat struct { - // wraps the n-nary snowball logic - nnarySnowball + // wraps the n-nary snow logic + Nnary - // params contains all the configurations of a snowball instance + // params contains all the configurations of a snow instance params Parameters } -func (f *Flat) Initialize(params Parameters, choice ids.ID) { - f.nnarySnowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) - f.params = params -} - func (f *Flat) RecordPoll(votes bag.Bag[ids.ID]) bool { - if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { + pollMode, numVotes := votes.Mode() + switch { + // AlphaConfidence is guaranteed to be >= AlphaPreference, so we must check + // if the poll had enough votes to increase the confidence first. + case numVotes >= f.params.AlphaConfidence: f.RecordSuccessfulPoll(pollMode) return true + case numVotes >= f.params.AlphaPreference: + f.RecordPollPreference(pollMode) + return true + default: + f.RecordUnsuccessfulPoll() + return false } - - f.RecordUnsuccessfulPoll() - return false } diff --git a/avalanchego/snow/consensus/snowball/flat_test.go b/avalanchego/snow/consensus/snowball/flat_test.go index c5d292d1..3f6eab8d 100644 --- a/avalanchego/snow/consensus/snowball/flat_test.go +++ b/avalanchego/snow/consensus/snowball/flat_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" ) @@ -16,36 +15,48 @@ func TestFlat(t *testing.T) { require := require.New(t) params := Parameters{ - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + K: 3, + AlphaPreference: 2, + AlphaConfidence: 3, + BetaVirtuous: 1, + BetaRogue: 2, } - f := Flat{} - f.Initialize(params, Red) + f := NewFlat(SnowballFactory, params, Red) f.Add(Green) f.Add(Blue) require.Equal(Red, f.Preference()) require.False(f.Finalized()) - twoBlue := bag.Bag[ids.ID]{} - twoBlue.Add(Blue, Blue) - require.True(f.RecordPoll(twoBlue)) + threeBlue := bag.Of(Blue, Blue, Blue) + require.True(f.RecordPoll(threeBlue)) require.Equal(Blue, f.Preference()) require.False(f.Finalized()) - oneRedOneBlue := bag.Bag[ids.ID]{} - oneRedOneBlue.Add(Red, Blue) - require.False(f.RecordPoll(oneRedOneBlue)) + twoGreen := bag.Of(Green, Green) + require.True(f.RecordPoll(twoGreen)) require.Equal(Blue, f.Preference()) require.False(f.Finalized()) - require.True(f.RecordPoll(twoBlue)) - require.Equal(Blue, f.Preference()) + threeGreen := bag.Of(Green, Green, Green) + require.True(f.RecordPoll(threeGreen)) + require.Equal(Green, f.Preference()) require.False(f.Finalized()) - require.True(f.RecordPoll(twoBlue)) - require.Equal(Blue, f.Preference()) + // Reset the confidence from previous round + oneEach := bag.Of(Red, Green, Blue) + require.False(f.RecordPoll(oneEach)) + require.Equal(Green, f.Preference()) + require.False(f.Finalized()) + + require.True(f.RecordPoll(threeGreen)) + require.Equal(Green, f.Preference()) + require.False(f.Finalized()) // Not finalized before BetaRogue rounds + + require.True(f.RecordPoll(threeGreen)) + require.Equal(Green, f.Preference()) require.True(f.Finalized()) - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" + expected := "SB(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w, PreferenceStrength = 4, SF(Confidence = 2, Finalized = true, SL(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w)))" require.Equal(expected, f.String()) } diff --git a/avalanchego/snow/consensus/snowball/network_test.go b/avalanchego/snow/consensus/snowball/network_test.go index 67000438..2bfb6df4 100644 --- a/avalanchego/snow/consensus/snowball/network_test.go +++ b/avalanchego/snow/consensus/snowball/network_test.go @@ -1,59 +1,75 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( - "math/rand" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" ) +type newConsensusFunc func(factory Factory, params Parameters, choice ids.ID) Consensus + type Network struct { params Parameters colors []ids.ID + rngSource sampler.Source nodes, running []Consensus + factory Factory } -// Initialize sets the parameters for the network and adds [numColors] different -// possible colors to the network configuration. -func (n *Network) Initialize(params Parameters, numColors int) { - n.params = params +// Create a new network with [numColors] different possible colors to finalize. +func NewNetwork(factory Factory, params Parameters, numColors int, rngSource sampler.Source) *Network { + n := &Network{ + params: params, + rngSource: rngSource, + factory: factory, + } for i := 0; i < numColors; i++ { n.colors = append(n.colors, ids.Empty.Prefix(uint64(i))) } + return n } -func (n *Network) AddNode(sb Consensus) { - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.colors))) +func (n *Network) AddNode(newConsensusFunc newConsensusFunc) Consensus { + s := sampler.NewDeterministicUniform(n.rngSource) + s.Initialize(uint64(len(n.colors))) indices, _ := s.Sample(len(n.colors)) - sb.Initialize(n.params, n.colors[int(indices[0])]) + + consensus := newConsensusFunc(n.factory, n.params, n.colors[int(indices[0])]) for _, index := range indices[1:] { - sb.Add(n.colors[int(index)]) + consensus.Add(n.colors[int(index)]) } - n.nodes = append(n.nodes, sb) - if !sb.Finalized() { - n.running = append(n.running, sb) + n.nodes = append(n.nodes, consensus) + if !consensus.Finalized() { + n.running = append(n.running, consensus) } + + return consensus } -// AddNodeSpecificColor adds [sb] to the network which will initially prefer -// [initialPreference] and additionally adds each of the specified [options] to -// consensus. -func (n *Network) AddNodeSpecificColor(sb Consensus, initialPreference int, options []int) { - sb.Initialize(n.params, n.colors[initialPreference]) +// AddNodeSpecificColor adds a new consensus instance to the network which will +// initially prefer [initialPreference] and additionally adds each of the +// specified [options] to consensus. +func (n *Network) AddNodeSpecificColor( + newConsensusFunc newConsensusFunc, + initialPreference int, + options []int, +) Consensus { + consensus := newConsensusFunc(n.factory, n.params, n.colors[initialPreference]) + for _, i := range options { - sb.Add(n.colors[i]) + consensus.Add(n.colors[i]) } - n.nodes = append(n.nodes, sb) - if !sb.Finalized() { - n.running = append(n.running, sb) + n.nodes = append(n.nodes, consensus) + if !consensus.Finalized() { + n.running = append(n.running, consensus) } + + return consensus } // Finalized returns true iff every node added to the network has finished @@ -66,15 +82,14 @@ func (n *Network) Finalized() bool { // performing an unbiased poll of the nodes in the network for that node. func (n *Network) Round() { if len(n.running) > 0 { - runningInd := rand.Intn(len(n.running)) // #nosec G404 + s := sampler.NewDeterministicUniform(n.rngSource) + + s.Initialize(uint64(len(n.running))) + runningInd, _ := s.Next() running := n.running[runningInd] - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.nodes))) - count := len(n.nodes) - if count > n.params.K { - count = n.params.K - } + s.Initialize(uint64(len(n.nodes))) + count := min(n.params.K, len(n.nodes)) indices, _ := s.Sample(count) sampledColors := bag.Bag[ids.ID]{} for _, index := range indices { diff --git a/avalanchego/snow/consensus/snowball/nnary_slush.go b/avalanchego/snow/consensus/snowball/nnary_slush.go index b6fe581c..067e75c2 100644 --- a/avalanchego/snow/consensus/snowball/nnary_slush.go +++ b/avalanchego/snow/consensus/snowball/nnary_slush.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,7 +9,11 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySlush = (*nnarySlush)(nil) +func newNnarySlush(choice ids.ID) nnarySlush { + return nnarySlush{ + preference: choice, + } +} // nnarySlush is the implementation of a slush instance with an unbounded number // of choices @@ -20,10 +24,6 @@ type nnarySlush struct { preference ids.ID } -func (sl *nnarySlush) Initialize(choice ids.ID) { - sl.preference = choice -} - func (sl *nnarySlush) Preference() ids.ID { return sl.preference } diff --git a/avalanchego/snow/consensus/snowball/nnary_snowball.go b/avalanchego/snow/consensus/snowball/nnary_snowball.go index b31a5f3d..77ee4844 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowball.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,30 +9,31 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySnowball = (*nnarySnowball)(nil) +var _ Nnary = (*nnarySnowball)(nil) + +func newNnarySnowball(betaVirtuous, betaRogue int, choice ids.ID) nnarySnowball { + return nnarySnowball{ + nnarySnowflake: newNnarySnowflake(betaVirtuous, betaRogue, choice), + preference: choice, + preferenceStrength: make(map[ids.ID]int), + } +} // nnarySnowball is a naive implementation of a multi-color snowball instance type nnarySnowball struct { // wrap the n-nary snowflake logic nnarySnowflake - // preference is the choice with the largest number of successful polls. - // Ties are broken by switching choice lazily + // preference is the choice with the largest number of polls which preferred + // it. Ties are broken by switching choice lazily preference ids.ID - // maxSuccessfulPolls maximum number of successful polls this instance has - // gotten for any choice - maxSuccessfulPolls int - - // numSuccessfulPolls tracks the total number of successful network polls of - // the choices - numSuccessfulPolls map[ids.ID]int -} + // maxPreferenceStrength is the maximum value stored in [preferenceStrength] + maxPreferenceStrength int -func (sb *nnarySnowball) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { - sb.nnarySnowflake.Initialize(betaVirtuous, betaRogue, choice) - sb.preference = choice - sb.numSuccessfulPolls = make(map[ids.ID]int) + // preferenceStrength tracks the total number of network polls which + // preferred that choice + preferenceStrength map[ids.ID]int } func (sb *nnarySnowball) Preference() ids.ID { @@ -47,18 +48,26 @@ func (sb *nnarySnowball) Preference() ids.ID { } func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { - numSuccessfulPolls := sb.numSuccessfulPolls[choice] + 1 - sb.numSuccessfulPolls[choice] = numSuccessfulPolls - - if numSuccessfulPolls > sb.maxSuccessfulPolls { - sb.preference = choice - sb.maxSuccessfulPolls = numSuccessfulPolls - } - + sb.increasePreferenceStrength(choice) sb.nnarySnowflake.RecordSuccessfulPoll(choice) } +func (sb *nnarySnowball) RecordPollPreference(choice ids.ID) { + sb.increasePreferenceStrength(choice) + sb.nnarySnowflake.RecordPollPreference(choice) +} + func (sb *nnarySnowball) String() string { - return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, %s)", - sb.preference, sb.maxSuccessfulPolls, &sb.nnarySnowflake) + return fmt.Sprintf("SB(Preference = %s, PreferenceStrength = %d, %s)", + sb.preference, sb.maxPreferenceStrength, &sb.nnarySnowflake) +} + +func (sb *nnarySnowball) increasePreferenceStrength(choice ids.ID) { + preferenceStrength := sb.preferenceStrength[choice] + 1 + sb.preferenceStrength[choice] = preferenceStrength + + if preferenceStrength > sb.maxPreferenceStrength { + sb.preference = choice + sb.maxPreferenceStrength = preferenceStrength + } } diff --git a/avalanchego/snow/consensus/snowball/nnary_snowball_test.go b/avalanchego/snow/consensus/snowball/nnary_snowball_test.go index 3798f4fd..18bea5ee 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowball_test.go @@ -1,163 +1,129 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) func TestNnarySnowball(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 - sb := nnarySnowball{} - sb.Initialize(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(betaVirtuous, betaRogue, Red) sb.Add(Blue) sb.Add(Green) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb.RecordPollPreference(Red) + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(Red) + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb.RecordPollPreference(Blue) + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + sb.RecordSuccessfulPoll(Blue) + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) } func TestVirtuousNnarySnowball(t *testing.T) { + require := require.New(t) + betaVirtuous := 1 betaRogue := 2 - sb := nnarySnowball{} - sb.Initialize(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(betaVirtuous, betaRogue, Red) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 - sb := nnarySnowball{} - sb.Initialize(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(betaVirtuous, betaRogue, Red) sb.Add(Blue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() sb.RecordSuccessfulPoll(Blue) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, PreferenceStrength = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" + require.Equal(expected, sb.String()) for i := 0; i < 4; i++ { sb.RecordSuccessfulPoll(Red) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) } } func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 - sb := nnarySnowball{} - sb.Initialize(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(betaVirtuous, betaRogue, Red) sb.Add(Blue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - if pref := sb.nnarySnowflake.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } + require.Equal(Blue, sb.nnarySnowflake.Preference()) sb.RecordSuccessfulPoll(Red) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if pref := sb.nnarySnowflake.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } + require.Equal(Blue, sb.Preference()) + require.Equal(Red, sb.nnarySnowflake.Preference()) } diff --git a/avalanchego/snow/consensus/snowball/nnary_snowflake.go b/avalanchego/snow/consensus/snowball/nnary_snowflake.go index 57b32707..897b2bb1 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,7 +9,15 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySnowflake = (*nnarySnowflake)(nil) +var _ Nnary = (*nnarySnowflake)(nil) + +func newNnarySnowflake(betaVirtuous, betaRogue int, choice ids.ID) nnarySnowflake { + return nnarySnowflake{ + nnarySlush: newNnarySlush(choice), + betaVirtuous: betaVirtuous, + betaRogue: betaRogue, + } +} // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices @@ -37,12 +45,6 @@ type nnarySnowflake struct { finalized bool } -func (sf *nnarySnowflake) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { - sf.nnarySlush.Initialize(choice) - sf.betaVirtuous = betaVirtuous - sf.betaRogue = betaRogue -} - func (sf *nnarySnowflake) Add(choice ids.ID) { sf.rogue = sf.rogue || choice != sf.preference } @@ -65,6 +67,15 @@ func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { sf.nnarySlush.RecordSuccessfulPoll(choice) } +func (sf *nnarySnowflake) RecordPollPreference(choice ids.ID) { + if sf.finalized { + return // This instance is already decided. + } + + sf.confidence = 0 + sf.nnarySlush.RecordSuccessfulPoll(choice) +} + func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } diff --git a/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go b/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go index 36febe49..5df8c296 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go @@ -1,134 +1,130 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) func TestNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 - sf := nnarySnowflake{} - sf.Initialize(betaVirtuous, betaRogue, Red) + sf := newNnarySnowflake(betaVirtuous, betaRogue, Red) sf.Add(Blue) sf.Add(Green) - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Blue) + require.Equal(Blue, sf.Preference()) + require.False(sf.Finalized()) - if pref := sf.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + sf.RecordPollPreference(Red) + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Red) + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") - } + sf.RecordPollPreference(Blue) + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) sf.RecordSuccessfulPoll(Blue) + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) +} + +func TestNnarySnowflakeConfidenceReset(t *testing.T) { + require := require.New(t) - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") + betaVirtuous := 4 + betaRogue := 4 + + sf := newNnarySnowflake(betaVirtuous, betaRogue, Red) + sf.Add(Blue) + sf.Add(Green) + + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) + + // Increase Blue's confidence without finalizing + for i := 0; i < betaRogue-1; i++ { + sf.RecordSuccessfulPoll(Blue) + require.Equal(Blue, sf.Preference()) + require.False(sf.Finalized()) } + + // Increase Red's confidence without finalizing + for i := 0; i < betaRogue-1; i++ { + sf.RecordSuccessfulPoll(Red) + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) + } + + // One more round of voting for Red should accept Red + sf.RecordSuccessfulPoll(Red) + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) } func TestVirtuousNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 3 - sb := nnarySnowflake{} - sb.Initialize(betaVirtuous, betaRogue, Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + sb := newNnarySnowflake(betaVirtuous, betaRogue, Red) + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } func TestRogueNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 1 betaRogue := 2 - sb := nnarySnowflake{} - sb.Initialize(betaVirtuous, betaRogue, Red) - if sb.rogue { - t.Fatalf("Shouldn't be rogue") - } + sb := newNnarySnowflake(betaVirtuous, betaRogue, Red) + require.False(sb.rogue) sb.Add(Red) - if sb.rogue { - t.Fatalf("Shouldn't be rogue") - } + require.False(sb.rogue) sb.Add(Blue) - if !sb.rogue { - t.Fatalf("Should be rogue") - } + require.True(sb.rogue) sb.Add(Red) - if !sb.rogue { - t.Fatalf("Should be rogue") - } + require.True(sb.rogue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } diff --git a/avalanchego/snow/consensus/snowball/parameters.go b/avalanchego/snow/consensus/snowball/parameters.go index 460f9940..bf458fbf 100644 --- a/avalanchego/snow/consensus/snowball/parameters.go +++ b/avalanchego/snow/consensus/snowball/parameters.go @@ -1,38 +1,77 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( + "errors" "fmt" "time" ) const ( - errMsg = "" + - `__________ .___` + "\n" + - `\______ \____________ __| _/__.__.` + "\n" + - ` | | _/\_ __ \__ \ / __ < | |` + "\n" + - ` | | \ | | \// __ \_/ /_/ |\___ |` + "\n" + - ` |______ / |__| (____ /\____ |/ ____|` + "\n" + - ` \/ \/ \/\/` + "\n" + - "\n" + - ` 🏆 🏆 🏆 🏆 🏆 🏆 🏆` + "\n" + - ` ________ ________ ________________` + "\n" + - ` / _____/ \_____ \ / _ \__ ___/` + "\n" + - `/ \ ___ / | \ / /_\ \| |` + "\n" + - `\ \_\ \/ | \/ | \ |` + "\n" + - ` \______ /\_______ /\____|__ /____|` + "\n" + - ` \/ \/ \/` + "\n" + // MinPercentConnectedBuffer is the safety buffer for calculation of + // MinPercentConnected. This increases the required percentage above + // alpha/k. This value must be [0-1]. + // 0 means MinPercentConnected = alpha/k. + // 1 means MinPercentConnected = 1 (fully connected). + MinPercentConnectedBuffer = .2 + + errMsg = `__________ .___ +\______ \____________ __| _/__.__. + | | _/\_ __ \__ \ / __ < | | + | | \ | | \// __ \_/ /_/ |\___ | + |______ / |__| (____ /\____ |/ ____| + \/ \/ \/\/ + + 🏆 🏆 🏆 🏆 🏆 🏆 🏆 + ________ ________ ________________ + / _____/ \_____ \ / _ \__ ___/ +/ \ ___ / | \ / /_\ \| | +\ \_\ \/ | \/ | \ | + \______ /\_______ /\____|__ /____| + \/ \/ \/ +` +) + +var ( + DefaultParameters = Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 20, + BetaRogue: 20, + ConcurrentRepolls: 4, + OptimalProcessing: 10, + MaxOutstandingItems: 256, + MaxItemProcessingTime: 30 * time.Second, + } + + ErrParametersInvalid = errors.New("parameters invalid") ) // Parameters required for snowball consensus type Parameters struct { - K int `json:"k" yaml:"k"` - Alpha int `json:"alpha" yaml:"alpha"` - BetaVirtuous int `json:"betaVirtuous" yaml:"betaVirtuous"` - BetaRogue int `json:"betaRogue" yaml:"betaRogue"` + // K is the number of nodes to query and sample in a round. + K int `json:"k" yaml:"k"` + // Alpha is used for backwards compatibility purposes and is only referenced + // during json parsing. + Alpha *int `json:"alpha,omitempty" yaml:"alpha,omitempty"` + // AlphaPreference is the vote threshold to change your preference. + AlphaPreference int `json:"alphaPreference" yaml:"alphaPreference"` + // AlphaConfidence is the vote threshold to increase your confidence. + AlphaConfidence int `json:"alphaConfidence" yaml:"alphaConfidence"` + // BetaVirtuous is the number of consecutive successful queries required for + // finalization on a virtuous instance. + BetaVirtuous int `json:"betaVirtuous" yaml:"betaVirtuous"` + // BetaRogue is the number of consecutive successful queries required for + // finalization on a rogue instance. + BetaRogue int `json:"betaRogue" yaml:"betaRogue"` + // ConcurrentRepolls is the number of outstanding polls the engine will + // target to have while there is something processing. ConcurrentRepolls int `json:"concurrentRepolls" yaml:"concurrentRepolls"` + // OptimalProcessing is used to limit block creation when a large number of + // blocks are processing. OptimalProcessing int `json:"optimalProcessing" yaml:"optimalProcessing"` // Reports unhealthy if more than this number of items are outstanding. @@ -41,46 +80,54 @@ type Parameters struct { // Reports unhealthy if there is an item processing for longer than this // duration. MaxItemProcessingTime time.Duration `json:"maxItemProcessingTime" yaml:"maxItemProcessingTime"` - - // If this node is a validator, when a container is inserted into consensus, - // send a Push Query to this many validators and a Pull Query to the other - // k - MixedQueryNumPushVdr validators. Must be in [0, K]. - MixedQueryNumPushVdr int `json:"mixedQueryNumPushVdr" yaml:"mixedQueryNumPushVdr"` - - // If this node is not a validator, when a container is inserted into consensus, - // send a Push Query to this many validators and a Pull Query to the other - // k - MixedQueryNumPushVdr validators. Must be in [0, K]. - MixedQueryNumPushNonVdr int `json:"mixedQueryNumPushNonVdr" yaml:"mixedQueryNumPushNonVdr"` } // Verify returns nil if the parameters describe a valid initialization. +// +// An initialization is valid if the following conditions are met: +// +// - K/2 < AlphaPreference <= AlphaConfidence <= K +// - 0 < BetaVirtuous <= BetaRogue +// - 0 < ConcurrentRepolls <= BetaRogue +// - 0 < OptimalProcessing +// - 0 < MaxOutstandingItems +// - 0 < MaxItemProcessingTime +// +// Note: K/2 < K implies that 0 <= K/2, so we don't need an explicit check that +// AlphaPreference is positive. func (p Parameters) Verify() error { switch { - case p.Alpha <= p.K/2: - return fmt.Errorf("k = %d, alpha = %d: fails the condition that: k/2 < alpha", p.K, p.Alpha) - case p.K < p.Alpha: - return fmt.Errorf("k = %d, alpha = %d: fails the condition that: alpha <= k", p.K, p.Alpha) + case p.AlphaPreference <= p.K/2: + return fmt.Errorf("%w: k = %d, alphaPreference = %d: fails the condition that: k/2 < alphaPreference", ErrParametersInvalid, p.K, p.AlphaPreference) + case p.AlphaConfidence < p.AlphaPreference: + return fmt.Errorf("%w: alphaPreference = %d, alphaConfidence = %d: fails the condition that: alphaPreference <= alphaConfidence", ErrParametersInvalid, p.AlphaPreference, p.AlphaConfidence) + case p.K < p.AlphaConfidence: + return fmt.Errorf("%w: k = %d, alphaConfidence = %d: fails the condition that: alphaConfidence <= k", ErrParametersInvalid, p.K, p.AlphaConfidence) case p.BetaVirtuous <= 0: - return fmt.Errorf("betaVirtuous = %d: fails the condition that: 0 < betaVirtuous", p.BetaVirtuous) + return fmt.Errorf("%w: betaVirtuous = %d: fails the condition that: 0 < betaVirtuous", ErrParametersInvalid, p.BetaVirtuous) case p.BetaRogue == 3 && p.BetaVirtuous == 28: - return fmt.Errorf("betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue\n%s", p.BetaVirtuous, p.BetaRogue, errMsg) + return fmt.Errorf("%w: betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue\n%s", ErrParametersInvalid, p.BetaVirtuous, p.BetaRogue, errMsg) case p.BetaRogue < p.BetaVirtuous: - return fmt.Errorf("betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue", p.BetaVirtuous, p.BetaRogue) + return fmt.Errorf("%w: betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue", ErrParametersInvalid, p.BetaVirtuous, p.BetaRogue) case p.ConcurrentRepolls <= 0: - return fmt.Errorf("concurrentRepolls = %d: fails the condition that: 0 < concurrentRepolls", p.ConcurrentRepolls) + return fmt.Errorf("%w: concurrentRepolls = %d: fails the condition that: 0 < concurrentRepolls", ErrParametersInvalid, p.ConcurrentRepolls) case p.ConcurrentRepolls > p.BetaRogue: - return fmt.Errorf("concurrentRepolls = %d, betaRogue = %d: fails the condition that: concurrentRepolls <= betaRogue", p.ConcurrentRepolls, p.BetaRogue) + return fmt.Errorf("%w: concurrentRepolls = %d, betaRogue = %d: fails the condition that: concurrentRepolls <= betaRogue", ErrParametersInvalid, p.ConcurrentRepolls, p.BetaRogue) case p.OptimalProcessing <= 0: - return fmt.Errorf("optimalProcessing = %d: fails the condition that: 0 < optimalProcessing", p.OptimalProcessing) + return fmt.Errorf("%w: optimalProcessing = %d: fails the condition that: 0 < optimalProcessing", ErrParametersInvalid, p.OptimalProcessing) case p.MaxOutstandingItems <= 0: - return fmt.Errorf("maxOutstandingItems = %d: fails the condition that: 0 < maxOutstandingItems", p.MaxOutstandingItems) + return fmt.Errorf("%w: maxOutstandingItems = %d: fails the condition that: 0 < maxOutstandingItems", ErrParametersInvalid, p.MaxOutstandingItems) case p.MaxItemProcessingTime <= 0: - return fmt.Errorf("maxItemProcessingTime = %d: fails the condition that: 0 < maxItemProcessingTime", p.MaxItemProcessingTime) - case p.MixedQueryNumPushVdr > p.K: - return fmt.Errorf("mixedQueryNumPushVdr (%d) > K (%d)", p.MixedQueryNumPushVdr, p.K) - case p.MixedQueryNumPushNonVdr > p.K: - return fmt.Errorf("mixedQueryNumPushNonVdr (%d) > K (%d)", p.MixedQueryNumPushNonVdr, p.K) + return fmt.Errorf("%w: maxItemProcessingTime = %d: fails the condition that: 0 < maxItemProcessingTime", ErrParametersInvalid, p.MaxItemProcessingTime) default: return nil } } + +func (p Parameters) MinPercentConnectedHealthy() float64 { + // AlphaConfidence is used here to ensure that the node can still feasibly + // accept operations. If AlphaPreference were used, committing could be + // extremely unlikely to happen, even while healthy. + alphaRatio := float64(p.AlphaConfidence) / float64(p.K) + return alphaRatio*(1-MinPercentConnectedBuffer) + MinPercentConnectedBuffer +} diff --git a/avalanchego/snow/consensus/snowball/parameters_test.go b/avalanchego/snow/consensus/snowball/parameters_test.go index 3fc2632c..525001fd 100644 --- a/avalanchego/snow/consensus/snowball/parameters_test.go +++ b/avalanchego/snow/consensus/snowball/parameters_test.go @@ -1,232 +1,287 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( - "fmt" - "strings" "testing" + + "github.com/stretchr/testify/require" ) func TestParametersVerify(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err != nil { - t.Fatal(err) - } -} - -func TestParametersAnotherVerify(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 28, - BetaRogue: 30, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err != nil { - t.Fatal(err) - } -} - -func TestParametersYetAnotherVerify(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 3, - BetaRogue: 3, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err != nil { - t.Fatal(err) - } -} - -func TestParametersInvalidK(t *testing.T) { - p := Parameters{ - K: 0, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid k") - } -} - -func TestParametersInvalidAlpha(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 0, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid alpha") - } -} - -func TestParametersInvalidBetaVirtuous(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 0, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta virtuous") - } -} - -func TestParametersInvalidBetaRogue(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 0, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta rogue") - } -} - -func TestParametersAnotherInvalidBetaRogue(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 28, - BetaRogue: 3, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta rogue") - } else if !strings.Contains(err.Error(), "\n") { - t.Fatalf("Should have described the extensive error") - } -} - -func TestParametersInvalidConcurrentRepolls(t *testing.T) { - tests := []Parameters{ - { - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 2, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - { - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 0, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, + tests := []struct { + name string + params Parameters + expectedError error + }{ + { + name: "valid", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: nil, + }, + { + name: "invalid K", + params: Parameters{ + K: 0, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid AlphaPreference 1", + params: Parameters{ + K: 2, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid AlphaPreference 0", + params: Parameters{ + K: 1, + AlphaPreference: 0, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid AlphaConfidence", + params: Parameters{ + K: 3, + AlphaPreference: 3, + AlphaConfidence: 2, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid BetaVirtuous", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 0, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "first half fun BetaRogue", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 28, + BetaRogue: 30, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: nil, + }, + { + name: "second half fun BetaRogue", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 3, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: nil, + }, + { + name: "fun invalid BetaRogue", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 28, + BetaRogue: 3, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid BetaRogue", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "too few ConcurrentRepolls", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 0, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "too many ConcurrentRepolls", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 2, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid OptimalProcessing", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 0, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid MaxOutstandingItems", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 0, + MaxItemProcessingTime: 1, + }, + expectedError: ErrParametersInvalid, + }, + { + name: "invalid MaxItemProcessingTime", + params: Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 0, + }, + expectedError: ErrParametersInvalid, }, } - for _, p := range tests { - label := fmt.Sprintf("ConcurrentRepolls=%d", p.ConcurrentRepolls) - t.Run(label, func(t *testing.T) { - if err := p.Verify(); err == nil { - t.Error("Should have failed due to invalid concurrent repolls") - } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.params.Verify() + require.ErrorIs(t, err, test.expectedError) }) } } -func TestParametersInvalidOptimalProcessing(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 0, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid optimal processing") - } -} - -func TestParametersInvalidMaxOutstandingItems(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 0, - MaxItemProcessingTime: 1, - } - - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid max outstanding items") - } -} - -func TestParametersInvalidMaxItemProcessingTime(t *testing.T) { - p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 0, +func TestParametersMinPercentConnectedHealthy(t *testing.T) { + tests := []struct { + name string + params Parameters + expectedMinPercentConnected float64 + }{ + { + name: "default", + params: DefaultParameters, + expectedMinPercentConnected: 0.8, + }, + { + name: "custom", + params: Parameters{ + K: 5, + AlphaConfidence: 4, + }, + expectedMinPercentConnected: 0.84, + }, + { + name: "custom", + params: Parameters{ + K: 1001, + AlphaConfidence: 501, + }, + expectedMinPercentConnected: 0.6, + }, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid max item processing time") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + minStake := tt.params.MinPercentConnectedHealthy() + require.InEpsilon(t, tt.expectedMinPercentConnected, minStake, .001) + }) } } diff --git a/avalanchego/snow/consensus/snowball/tree.go b/avalanchego/snow/consensus/snowball/tree.go index 1e9aa701..4b8d8cfe 100644 --- a/avalanchego/snow/consensus/snowball/tree.go +++ b/avalanchego/snow/consensus/snowball/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -12,30 +12,37 @@ import ( ) var ( - _ Factory = (*TreeFactory)(nil) _ Consensus = (*Tree)(nil) _ node = (*unaryNode)(nil) _ node = (*binaryNode)(nil) ) -// TreeFactory implements Factory by returning a tree struct -type TreeFactory struct{} +func NewTree(factory Factory, params Parameters, choice ids.ID) Consensus { + t := &Tree{ + params: params, + factory: factory, + } + t.node = &unaryNode{ + tree: t, + preference: choice, + commonPrefix: ids.NumBits, // The initial state has no conflicts + snow: factory.NewUnary(params), + } -func (TreeFactory) New() Consensus { - return &Tree{} + return t } -// Tree implements the snowball interface by using a modified patricia tree. +// Tree implements the Consensus interface by using a modified patricia tree. type Tree struct { - // node is the root that represents the first snowball instance in the tree, - // and contains references to all the other snowball instances in the tree. + // node is the root that represents the first snow instance in the tree, + // and contains references to all the other snow instances in the tree. node - // params contains all the configurations of a snowball instance + // params contains all the configurations of a snow instance params Parameters // shouldReset is used as an optimization to prevent needless tree - // traversals. If a snowball instance does not get an alpha majority, that + // traversals. If a snow instance does not get an alpha majority, that // instance needs to reset by calling RecordUnsuccessfulPoll. Because the // tree splits votes based on the branch, when an instance doesn't get an // alpha majority none of the children of this instance can get an alpha @@ -44,20 +51,9 @@ type Tree struct { // that any later traversal into this sub-tree should call // RecordUnsuccessfulPoll before performing any other action. shouldReset bool -} -func (t *Tree) Initialize(params Parameters, choice ids.ID) { - t.params = params - - snowball := &unarySnowball{} - snowball.Initialize(params.BetaVirtuous) - - t.node = &unaryNode{ - tree: t, - preference: choice, - commonPrefix: ids.NumBits, // The initial state has no conflicts - snowball: snowball, - } + // factory is used to produce new snow instances as needed + factory Factory } func (t *Tree) Add(choice ids.ID) { @@ -80,11 +76,11 @@ func (t *Tree) RecordPoll(votes bag.Bag[ids.ID]) bool { }) // Now that the votes have been restricted to valid votes, pass them into - // the first snowball instance + // the first snow instance var successful bool t.node, successful = t.node.RecordPoll(filteredVotes, t.shouldReset) - // Because we just passed the reset into the snowball instance, we should no + // Because we just passed the reset into the snow instance, we should no // longer reset. t.shouldReset = false return successful @@ -143,7 +139,7 @@ type node interface { } // unary is a node with either no children, or a single child. It handles the -// voting on a range of identical, virtuous, snowball instances. +// voting on a range of identical, virtuous, snow instances. type unaryNode struct { // tree references the tree that contains this node tree *Tree @@ -159,8 +155,8 @@ type unaryNode struct { // references commonPrefix int // Will be in the range (decidedPrefix, 256) - // snowball wraps the snowball logic - snowball UnarySnowball + // snow wraps the unary decision logic + snow Unary // shouldReset is used as an optimization to prevent needless tree // traversals. It is the continuation of shouldReset in the Tree struct. @@ -179,7 +175,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } -//nolint:gofmt,gofumpt,goimports // this comment is formatted as intended +//nolint:gci,gofmt,gofumpt // this comment is formatted as intended // // This is by far the most complicated function in this algorithm. // The intuition is that this instance represents a series of consecutive unary @@ -334,8 +330,8 @@ func (u *unaryNode) Add(newChoice ids.ID) node { return u // Only happens if the tree is finalized, or it's a leaf node } - if index, found := ids.FirstDifferenceSubset( - u.decidedPrefix, u.commonPrefix, u.preference, newChoice); !found { + index, found := ids.FirstDifferenceSubset(u.decidedPrefix, u.commonPrefix, u.preference, newChoice) + if !found { // If the first difference doesn't exist, then this node shouldn't be // split if u.child != nil { @@ -346,69 +342,67 @@ func (u *unaryNode) Add(newChoice ids.ID) node { } // if u.child is nil, then we are attempting to add the same choice into // the tree, which should be a noop - } else { - // The difference was found, so this node must be split - - bit := u.preference.Bit(uint(index)) // The currently preferred bit - b := &binaryNode{ - tree: u.tree, - bit: index, - snowball: u.snowball.Extend(u.tree.params.BetaRogue, bit), - shouldReset: [2]bool{u.shouldReset, u.shouldReset}, - } - b.preferences[bit] = u.preference - b.preferences[1-bit] = newChoice + return u + } - newChildSnowball := &unarySnowball{} - newChildSnowball.Initialize(u.tree.params.BetaVirtuous) - newChild := &unaryNode{ - tree: u.tree, - preference: newChoice, - decidedPrefix: index + 1, // The new child assumes this branch has decided in it's favor - commonPrefix: ids.NumBits, // The new child has no conflicts under this branch - snowball: newChildSnowball, - } + // The difference was found, so this node must be split + bit := u.preference.Bit(uint(index)) // The currently preferred bit + b := &binaryNode{ + tree: u.tree, + bit: index, + snow: u.snow.Extend(u.tree.params.BetaRogue, bit), + shouldReset: [2]bool{u.shouldReset, u.shouldReset}, + } + b.preferences[bit] = u.preference + b.preferences[1-bit] = newChoice + + newChildSnow := u.tree.factory.NewUnary(u.tree.params) + newChild := &unaryNode{ + tree: u.tree, + preference: newChoice, + decidedPrefix: index + 1, // The new child assumes this branch has decided in it's favor + commonPrefix: ids.NumBits, // The new child has no conflicts under this branch + snow: newChildSnow, + } - switch { - case u.decidedPrefix == u.commonPrefix-1: - // This node was only voting over one bit. (Case 2. from above) - b.children[bit] = u.child - if u.child != nil { - b.children[1-bit] = newChild - } - return b - case index == u.decidedPrefix: - // This node was split on the first bit. (Case 3. from above) - u.decidedPrefix++ - b.children[bit] = u + switch { + case u.decidedPrefix == u.commonPrefix-1: + // This node was only voting over one bit. (Case 2. from above) + b.children[bit] = u.child + if u.child != nil { b.children[1-bit] = newChild - return b - case index == u.commonPrefix-1: - // This node was split on the last bit. (Case 4. from above) - u.commonPrefix-- - b.children[bit] = u.child - if u.child != nil { - b.children[1-bit] = newChild - } - u.child = b - return u - default: - // This node was split on an interior bit. (Case 5. from above) - originalDecidedPrefix := u.decidedPrefix - u.decidedPrefix = index + 1 - b.children[bit] = u + } + return b + case index == u.decidedPrefix: + // This node was split on the first bit. (Case 3. from above) + u.decidedPrefix++ + b.children[bit] = u + b.children[1-bit] = newChild + return b + case index == u.commonPrefix-1: + // This node was split on the last bit. (Case 4. from above) + u.commonPrefix-- + b.children[bit] = u.child + if u.child != nil { b.children[1-bit] = newChild - return &unaryNode{ - tree: u.tree, - preference: u.preference, - decidedPrefix: originalDecidedPrefix, - commonPrefix: index, - snowball: u.snowball.Clone(), - child: b, - } + } + u.child = b + return u + default: + // This node was split on an interior bit. (Case 5. from above) + originalDecidedPrefix := u.decidedPrefix + u.decidedPrefix = index + 1 + b.children[bit] = u + b.children[1-bit] = newChild + return &unaryNode{ + tree: u.tree, + preference: u.preference, + decidedPrefix: originalDecidedPrefix, + commonPrefix: index, + snow: u.snow.Clone(), + child: b, } } - return u // Do nothing, the choice was already rejected } func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { @@ -418,21 +412,26 @@ func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { // If my parent didn't get enough votes previously, then neither did I if reset { - u.snowball.RecordUnsuccessfulPoll() + u.snow.RecordUnsuccessfulPoll() u.shouldReset = true // Make sure my child is also reset correctly } - if votes.Len() < u.tree.params.Alpha { + switch numVotes := votes.Len(); { + case numVotes >= u.tree.params.AlphaConfidence: + // I got enough votes to increase my confidence + u.snow.RecordSuccessfulPoll() + case numVotes >= u.tree.params.AlphaPreference: + // I got enough votes to update my preference, but not increase my + // confidence. + u.snow.RecordPollPreference() + default: // I didn't get enough votes, I must reset and my child must reset as // well - u.snowball.RecordUnsuccessfulPoll() + u.snow.RecordUnsuccessfulPoll() u.shouldReset = true return u, false } - // I got enough votes this time - u.snowball.RecordSuccessfulPoll() - if u.child != nil { // We are guaranteed that u.commonPrefix will equal // u.child.DecidedPrefix(). Otherwise, there must have been a @@ -458,12 +457,12 @@ func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { } func (u *unaryNode) Finalized() bool { - return u.snowball.Finalized() + return u.snow.Finalized() } func (u *unaryNode) Printable() (string, []node) { s := fmt.Sprintf("%s Bits = [%d, %d)", - u.snowball, u.decidedPrefix, u.commonPrefix) + u.snow, u.decidedPrefix, u.commonPrefix) if u.child == nil { return s, nil } @@ -471,7 +470,7 @@ func (u *unaryNode) Printable() (string, []node) { } // binaryNode is a node with either no children, or two children. It handles the -// voting of a single, rogue, snowball instance. +// voting of a single, rogue, snow instance. type binaryNode struct { // tree references the tree that contains this node tree *Tree @@ -483,8 +482,8 @@ type binaryNode struct { // bit is the index in the id of the choice this node is deciding on bit int // Will be in the range [0, 256) - // snowball wraps the snowball logic - snowball BinarySnowball + // snow wraps the binary decision logic + snow Binary // shouldReset is used as an optimization to prevent needless tree // traversals. It is the continuation of shouldReset in the Tree struct. @@ -496,7 +495,7 @@ type binaryNode struct { } func (b *binaryNode) Preference() ids.ID { - return b.preferences[b.snowball.Preference()] + return b.preferences[b.snow.Preference()] } func (b *binaryNode) DecidedPrefix() int { @@ -530,28 +529,33 @@ func (b *binaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) bit := 0 // We only care about which bit is set if a successful poll can happen - if splitVotes[1].Len() >= b.tree.params.Alpha { + if splitVotes[1].Len() >= b.tree.params.AlphaPreference { bit = 1 } if reset { - b.snowball.RecordUnsuccessfulPoll() + b.snow.RecordUnsuccessfulPoll() b.shouldReset[bit] = true // 1-bit isn't set here because it is set below anyway } b.shouldReset[1-bit] = true // They didn't get the threshold of votes prunedVotes := splitVotes[bit] - if prunedVotes.Len() < b.tree.params.Alpha { - b.snowball.RecordUnsuccessfulPoll() + switch numVotes := prunedVotes.Len(); { + case numVotes >= b.tree.params.AlphaConfidence: + // I got enough votes to increase my confidence. + b.snow.RecordSuccessfulPoll(bit) + case numVotes >= b.tree.params.AlphaPreference: + // I got enough votes to update my preference, but not increase my + // confidence. + b.snow.RecordPollPreference(bit) + default: + b.snow.RecordUnsuccessfulPoll() // The winning child didn't get enough votes either b.shouldReset[bit] = true return b, false } - // This bit got alpha votes, it was a successful poll - b.snowball.RecordSuccessfulPoll(bit) - if child := b.children[bit]; child != nil { // The votes are filtered to ensure that they are votes that should // count for the child @@ -561,7 +565,7 @@ func (b *binaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) }) newChild, _ := child.RecordPoll(filteredVotes, b.shouldReset[bit]) - if b.snowball.Finalized() { + if b.snow.Finalized() { // If we are decided here, that means we must have decided due // to this poll. Therefore, we must have decided on bit. return newChild, true @@ -574,11 +578,11 @@ func (b *binaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) } func (b *binaryNode) Finalized() bool { - return b.snowball.Finalized() + return b.snow.Finalized() } func (b *binaryNode) Printable() (string, []node) { - s := fmt.Sprintf("%s Bit = %d", b.snowball, b.bit) + s := fmt.Sprintf("%s Bit = %d", b.snow, b.bit) if b.children[0] == nil { return s, nil } diff --git a/avalanchego/snow/consensus/snowball/tree_test.go b/avalanchego/snow/consensus/snowball/tree_test.go index b11b3286..1d337afd 100644 --- a/avalanchego/snow/consensus/snowball/tree_test.go +++ b/avalanchego/snow/consensus/snowball/tree_test.go @@ -1,6 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//nolint:goconst package snowball import ( @@ -8,29 +9,29 @@ import ( "testing" "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" ) -const ( - initialUnaryDescription = "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" -) +const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" func TestSnowballSingleton(t *testing.T) { require := require.New(t) params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 5, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 5, } - tree := Tree{} - tree.Initialize(params, Red) + tree := NewTree(SnowballFactory, params, Red) require.False(tree.Finalized()) - oneRed := bag.Bag[ids.ID]{} - oneRed.Add(Red) + oneRed := bag.Of(Red) require.True(tree.RecordPoll(oneRed)) require.False(tree.Finalized()) @@ -51,8 +52,7 @@ func TestSnowballSingleton(t *testing.T) { // Because the tree is already finalized, RecordPoll can return either true // or false. - oneBlue := bag.Bag[ids.ID]{} - oneBlue.Add(Blue) + oneBlue := bag.Of(Blue) tree.RecordPoll(oneBlue) require.Equal(Red, tree.Preference()) require.True(tree.Finalized()) @@ -62,15 +62,17 @@ func TestSnowballRecordUnsuccessfulPoll(t *testing.T) { require := require.New(t) params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 3, + BetaRogue: 5, } - tree := Tree{} - tree.Initialize(params, Red) + tree := NewTree(SnowballFactory, params, Red) require.False(tree.Finalized()) - oneRed := bag.Bag[ids.ID]{} - oneRed.Add(Red) + oneRed := bag.Of(Red) require.True(tree.RecordPoll(oneRed)) tree.RecordUnsuccessfulPoll() @@ -90,23 +92,24 @@ func TestSnowballBinary(t *testing.T) { require := require.New(t) params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, Red) + tree := NewTree(SnowballFactory, params, Red) tree.Add(Blue) require.Equal(Red, tree.Preference()) require.False(tree.Finalized()) - oneBlue := bag.Bag[ids.ID]{} - oneBlue.Add(Blue) + oneBlue := bag.Of(Blue) require.True(tree.RecordPoll(oneBlue)) require.Equal(Blue, tree.Preference()) require.False(tree.Finalized()) - oneRed := bag.Bag[ids.ID]{} - oneRed.Add(Red) + oneRed := bag.Of(Red) require.True(tree.RecordPoll(oneRed)) require.Equal(Blue, tree.Preference()) require.False(tree.Finalized()) @@ -132,36 +135,38 @@ func TestSnowballLastBinary(t *testing.T) { } params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, zero) + tree := NewTree(SnowballFactory, params, zero) tree.Add(one) // Should do nothing tree.Add(one) - expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255" + expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) - oneBag := bag.Bag[ids.ID]{} - oneBag.Add(one) + oneBag := bag.Of(one) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) - expected = "SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255" + expected = `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255` require.Equal(expected, tree.String()) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) require.True(tree.Finalized()) - expected = "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 1))) Bit = 255" + expected = "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 1))) Bit = 255" require.Equal(expected, tree.String()) } @@ -174,35 +179,37 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { four := ids.ID{0b00000100} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, zero) + tree := NewTree(SnowballFactory, params, zero) tree.Add(one) tree.Add(four) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) } - zeroBag := bag.Bag[ids.ID]{} - zeroBag.Add(zero) + zeroBag := bag.Of(zero) require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -211,11 +218,11 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { tree.Add(two) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -229,29 +236,31 @@ func TestSnowballNewUnary(t *testing.T) { one := ids.ID{0b00000001} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 3, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 3, } - tree := Tree{} - tree.Initialize(params, zero) + tree := NewTree(SnowballFactory, params, zero) tree.Add(one) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) } - oneBag := bag.Bag[ids.ID]{} - oneBag.Add(one) + oneBag := bag.Of(one) require.True(tree.RecordPoll(oneBag)) { - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -260,9 +269,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -277,38 +286,40 @@ func TestSnowballTransitiveReset(t *testing.T) { eight := ids.ID{0b00001000} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, zero) + tree := NewTree(SnowballFactory, params, zero) tree.Add(two) tree.Add(eight) { - expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) } - zeroBag := bag.Bag[ids.ID]{} - zeroBag.Add(zero) + zeroBag := bag.Of(zero) require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -318,13 +329,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -333,13 +344,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n" + - " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -348,7 +359,7 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" + expected := "SB(PreferenceStrength = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.True(tree.Finalized()) @@ -359,10 +370,13 @@ func TestSnowballTrinary(t *testing.T) { require := require.New(t) params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, Green) + tree := NewTree(SnowballFactory, params, Green) tree.Add(Red) tree.Add(Blue) @@ -375,22 +389,19 @@ func TestSnowballTrinary(t *testing.T) { require.Equal(Green, tree.Preference()) require.False(tree.Finalized()) - redBag := bag.Bag[ids.ID]{} - redBag.Add(Red) + redBag := bag.Of(Red) require.True(tree.RecordPoll(redBag)) require.Equal(Red, tree.Preference()) require.False(tree.Finalized()) - blueBag := bag.Bag[ids.ID]{} - blueBag.Add(Blue) + blueBag := bag.Of(Blue) require.True(tree.RecordPoll(blueBag)) require.Equal(Red, tree.Preference()) require.False(tree.Finalized()) // Here is a case where voting for a color makes a different color become // the preferred color. This is intended behavior. - greenBag := bag.Bag[ids.ID]{} - greenBag.Add(Green) + greenBag := bag.Of(Green) require.True(tree.RecordPoll(greenBag)) require.Equal(Blue, tree.Preference()) require.False(tree.Finalized()) @@ -413,10 +424,13 @@ func TestSnowballCloseTrinary(t *testing.T) { magenta := ids.ID{0x03} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, yellow) + tree := NewTree(SnowballFactory, params, yellow) tree.Add(cyan) tree.Add(magenta) @@ -429,21 +443,18 @@ func TestSnowballCloseTrinary(t *testing.T) { require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) - yellowBag := bag.Bag[ids.ID]{} - yellowBag.Add(yellow) + yellowBag := bag.Of(yellow) require.True(tree.RecordPoll(yellowBag)) require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) - magentaBag := bag.Bag[ids.ID]{} - magentaBag.Add(magenta) + magentaBag := bag.Of(magenta) require.True(tree.RecordPoll(magentaBag)) require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) // Cyan has already been rejected here, so these are not successful polls. - cyanBag := bag.Bag[ids.ID]{} - cyanBag.Add(cyan) + cyanBag := bag.Of(cyan) require.False(tree.RecordPoll(cyanBag)) require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) @@ -462,26 +473,28 @@ func TestSnowballAddRejected(t *testing.T) { c0010 := ids.ID{0x04} // 0010 params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, c0000) + tree := NewTree(SnowballFactory, params, c0000) tree.Add(c1000) tree.Add(c0010) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0010Bag := bag.Bag[ids.ID]{} - c0010Bag.Add(c0010) + c0010Bag := bag.Of(c0010) require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.False(tree.Finalized()) @@ -490,11 +503,11 @@ func TestSnowballAddRejected(t *testing.T) { tree.Add(c0101) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.False(tree.Finalized()) @@ -509,26 +522,28 @@ func TestSnowballResetChild(t *testing.T) { c1000 := ids.ID{0x01} // 1000 params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, c0000) + tree := NewTree(SnowballFactory, params, c0000) tree.Add(c0100) tree.Add(c1000) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0000Bag := bag.Bag[ids.ID]{} - c0000Bag.Add(c0000) + c0000Bag := bag.Of(c0000) require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -538,11 +553,11 @@ func TestSnowballResetChild(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -551,11 +566,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -570,41 +585,42 @@ func TestSnowballResetSibling(t *testing.T) { c1000 := ids.ID{0x01} // 1000 params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, c0000) + tree := NewTree(SnowballFactory, params, c0000) tree.Add(c0100) tree.Add(c1000) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0100Bag := bag.Bag[ids.ID]{} - c0100Bag.Add(c0100) + c0100Bag := bag.Of(c0100) require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) } - c1000Bag := bag.Bag[ids.ID]{} - c1000Bag.Add(c1000) + c1000Bag := bag.Of(c1000) require.True(tree.RecordPoll(c1000Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -613,11 +629,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -629,7 +645,11 @@ func TestSnowball5Colors(t *testing.T) { numColors := 5 params := Parameters{ - K: 5, Alpha: 5, BetaVirtuous: 20, BetaRogue: 30, + K: 5, + AlphaPreference: 5, + AlphaConfidence: 5, + BetaVirtuous: 20, + BetaRogue: 30, } colors := []ids.ID{} @@ -637,16 +657,14 @@ func TestSnowball5Colors(t *testing.T) { colors = append(colors, ids.Empty.Prefix(uint64(i))) } - tree0 := Tree{} - tree0.Initialize(params, colors[4]) + tree0 := NewTree(SnowballFactory, params, colors[4]) tree0.Add(colors[0]) tree0.Add(colors[1]) tree0.Add(colors[2]) tree0.Add(colors[3]) - tree1 := Tree{} - tree1.Initialize(params, colors[3]) + tree1 := NewTree(SnowballFactory, params, colors[3]) tree1.Add(colors[0]) tree1.Add(colors[1]) @@ -667,10 +685,13 @@ func TestSnowballFineGrained(t *testing.T) { c0010 := ids.ID{0x04} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, c0000) + tree := NewTree(SnowballFactory, params, c0000) require.Equal(initialUnaryDescription, tree.String()) require.Equal(c0000, tree.Preference()) @@ -679,9 +700,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -690,11 +711,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -703,44 +724,42 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } - c0000Bag := bag.Bag[ids.ID]{} - c0000Bag.Add(c0000) + c0000Bag := bag.Of(c0000) require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } - c0010Bag := bag.Bag[ids.ID]{} - c0010Bag.Add(c0010) + c0010Bag := bag.Of(c0010) require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -749,7 +768,7 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" + expected := "SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.True(tree.Finalized()) @@ -760,10 +779,13 @@ func TestSnowballDoubleAdd(t *testing.T) { require := require.New(t) params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 3, + BetaRogue: 5, } - tree := Tree{} - tree.Initialize(params, Red) + tree := NewTree(SnowballFactory, params, Red) tree.Add(Red) require.Equal(initialUnaryDescription, tree.String()) @@ -774,20 +796,25 @@ func TestSnowballDoubleAdd(t *testing.T) { func TestSnowballConsistent(t *testing.T) { require := require.New(t) - numColors := 50 - numNodes := 100 - params := Parameters{ - K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, - } - seed := int64(0) - - sampler.Seed(seed) - - n := Network{} - n.Initialize(params, numColors) - + var ( + numColors = 50 + numNodes = 100 + params = Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 20, + BetaRogue: 30, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) + + n := NewNetwork(SnowballFactory, params, numColors, source) + + source.Seed(seed) for i := 0; i < numNodes; i++ { - n.AddNode(&Tree{}) + n.AddNode(NewTree) } for !n.Finalized() && !n.Disagreement() { @@ -806,10 +833,13 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { c0010 := ids.ID{0b00000100} params := Parameters{ - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, } - tree := Tree{} - tree.Initialize(params, c0000) + tree := NewTree(SnowballFactory, params, c0000) require.Equal(initialUnaryDescription, tree.String()) require.Equal(c0000, tree.Preference()) @@ -818,9 +848,9 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -829,27 +859,26 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } - c0000Bag := bag.Bag[ids.ID]{} - c0000Bag.Add(c0000) + c0000Bag := bag.Of(c0000) require.True(tree.RecordPoll(c0000Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -858,26 +887,91 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0100) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } - c0100Bag := bag.Bag[ids.ID]{} - c0100Bag.Add(c0100) + c0100Bag := bag.Of(c0100) require.True(tree.RecordPoll(c0100Bag)) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)" + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } } + +func TestSnowballRecordPreferencePollBinary(t *testing.T) { + require := require.New(t) + + params := Parameters{ + K: 3, + AlphaPreference: 2, + AlphaConfidence: 3, + BetaVirtuous: 2, + BetaRogue: 2, + } + tree := NewTree(SnowballFactory, params, Red) + tree.Add(Blue) + require.Equal(Red, tree.Preference()) + require.False(tree.Finalized()) + + threeBlue := bag.Of(Blue, Blue, Blue) + require.True(tree.RecordPoll(threeBlue)) + require.Equal(Blue, tree.Preference()) + require.False(tree.Finalized()) + + twoRed := bag.Of(Red, Red) + require.True(tree.RecordPoll(twoRed)) + require.Equal(Blue, tree.Preference()) + require.False(tree.Finalized()) + + threeRed := bag.Of(Red, Red, Red) + require.True(tree.RecordPoll(threeRed)) + require.Equal(Red, tree.Preference()) + require.False(tree.Finalized()) + + require.True(tree.RecordPoll(threeRed)) + require.Equal(Red, tree.Preference()) + require.True(tree.Finalized()) +} + +func TestSnowballRecordPreferencePollUnary(t *testing.T) { + require := require.New(t) + + params := Parameters{ + K: 3, + AlphaPreference: 2, + AlphaConfidence: 3, + BetaVirtuous: 2, + BetaRogue: 2, + } + tree := NewTree(SnowballFactory, params, Red) + require.Equal(Red, tree.Preference()) + require.False(tree.Finalized()) + + twoRed := bag.Of(Red, Red) + require.True(tree.RecordPoll(twoRed)) + require.Equal(Red, tree.Preference()) + require.False(tree.Finalized()) + + tree.Add(Blue) + + threeBlue := bag.Of(Blue, Blue, Blue) + require.True(tree.RecordPoll(threeBlue)) + require.Equal(Red, tree.Preference()) + require.False(tree.Finalized()) + + require.True(tree.RecordPoll(threeBlue)) + require.Equal(Blue, tree.Preference()) + require.True(tree.Finalized()) +} diff --git a/avalanchego/snow/consensus/snowball/unary_snowball.go b/avalanchego/snow/consensus/snowball/unary_snowball.go index 44bf1c42..638b6d79 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowball.go +++ b/avalanchego/snow/consensus/snowball/unary_snowball.go @@ -1,29 +1,38 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -import ( - "fmt" -) +import "fmt" -var _ UnarySnowball = (*unarySnowball)(nil) +var _ Unary = (*unarySnowball)(nil) + +func newUnarySnowball(beta int) unarySnowball { + return unarySnowball{ + unarySnowflake: newUnarySnowflake(beta), + } +} // unarySnowball is the implementation of a unary snowball instance type unarySnowball struct { // wrap the unary snowflake logic unarySnowflake - // numSuccessfulPolls tracks the total number of successful network polls - numSuccessfulPolls int + // preferenceStrength tracks the total number of polls with a preference + preferenceStrength int } func (sb *unarySnowball) RecordSuccessfulPoll() { - sb.numSuccessfulPolls++ + sb.preferenceStrength++ sb.unarySnowflake.RecordSuccessfulPoll() } -func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { +func (sb *unarySnowball) RecordPollPreference() { + sb.preferenceStrength++ + sb.unarySnowflake.RecordUnsuccessfulPoll() +} + +func (sb *unarySnowball) Extend(beta int, choice int) Binary { bs := &binarySnowball{ binarySnowflake: binarySnowflake{ binarySlush: binarySlush{preference: choice}, @@ -33,17 +42,17 @@ func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { }, preference: choice, } - bs.numSuccessfulPolls[choice] = sb.numSuccessfulPolls + bs.preferenceStrength[choice] = sb.preferenceStrength return bs } -func (sb *unarySnowball) Clone() UnarySnowball { +func (sb *unarySnowball) Clone() Unary { newSnowball := *sb return &newSnowball } func (sb *unarySnowball) String() string { - return fmt.Sprintf("SB(NumSuccessfulPolls = %d, %s)", - sb.numSuccessfulPolls, + return fmt.Sprintf("SB(PreferenceStrength = %d, %s)", + sb.preferenceStrength, &sb.unarySnowflake) } diff --git a/avalanchego/snow/consensus/snowball/unary_snowball_test.go b/avalanchego/snow/consensus/snowball/unary_snowball_test.go index 012144bb..d94d2b61 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/unary_snowball_test.go @@ -1,86 +1,74 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) -func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedNumSuccessfulPolls, expectedConfidence int, expectedFinalized bool) { - if numSuccessfulPolls := sb.numSuccessfulPolls; numSuccessfulPolls != expectedNumSuccessfulPolls { - t.Fatalf("Wrong numSuccessfulPolls. Expected %d got %d", expectedNumSuccessfulPolls, numSuccessfulPolls) - } else if confidence := sb.confidence; confidence != expectedConfidence { - t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) - } else if finalized := sb.Finalized(); finalized != expectedFinalized { - t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) - } +func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceStrength, expectedConfidence int, expectedFinalized bool) { + require := require.New(t) + + require.Equal(expectedPreferenceStrength, sb.preferenceStrength) + require.Equal(expectedConfidence, sb.confidence) + require.Equal(expectedFinalized, sb.Finalized()) } func TestUnarySnowball(t *testing.T) { + require := require.New(t) + beta := 2 - sb := &unarySnowball{} - sb.Initialize(beta) + sb := newUnarySnowball(beta) + + sb.RecordSuccessfulPoll() + UnarySnowballStateTest(t, &sb, 1, 1, false) + + sb.RecordPollPreference() + UnarySnowballStateTest(t, &sb, 2, 0, false) sb.RecordSuccessfulPoll() - UnarySnowballStateTest(t, sb, 1, 1, false) + UnarySnowballStateTest(t, &sb, 3, 1, false) sb.RecordUnsuccessfulPoll() - UnarySnowballStateTest(t, sb, 1, 0, false) + UnarySnowballStateTest(t, &sb, 3, 0, false) sb.RecordSuccessfulPoll() - UnarySnowballStateTest(t, sb, 2, 1, false) + UnarySnowballStateTest(t, &sb, 4, 1, false) sbCloneIntf := sb.Clone() - sbClone, ok := sbCloneIntf.(*unarySnowball) - if !ok { - t.Fatalf("Unexpected clone type") - } + require.IsType(&unarySnowball{}, sbCloneIntf) + sbClone := sbCloneIntf.(*unarySnowball) - UnarySnowballStateTest(t, sbClone, 2, 1, false) + UnarySnowballStateTest(t, sbClone, 4, 1, false) binarySnowball := sbClone.Extend(beta, 0) - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))" - if result := binarySnowball.String(); result != expected { - t.Fatalf("Expected:\n%s\nReturned:\n%s", expected, result) - } + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))" + require.Equal(expected, binarySnowball.String()) binarySnowball.RecordUnsuccessfulPoll() - for i := 0; i < 3; i++ { - if binarySnowball.Preference() != 0 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + for i := 0; i < 5; i++ { + require.Zero(binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) binarySnowball.RecordUnsuccessfulPoll() } - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + require.Equal(1, binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + require.Equal(1, binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) + require.Equal(1, binarySnowball.Preference()) + require.True(binarySnowball.Finalized()) - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if !binarySnowball.Finalized() { - t.Fatalf("Should have finalized") - } - - expected = "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + expected = "SB(PreferenceStrength = 4, SF(Confidence = 1, Finalized = false))" + require.Equal(expected, sb.String()) } diff --git a/avalanchego/snow/consensus/snowball/unary_snowflake.go b/avalanchego/snow/consensus/snowball/unary_snowflake.go index 84ef6e42..f9c9b624 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/unary_snowflake.go @@ -1,13 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball -import ( - "fmt" -) +import "fmt" -var _ UnarySnowflake = (*unarySnowflake)(nil) +var _ Unary = (*unarySnowflake)(nil) + +func newUnarySnowflake(beta int) unarySnowflake { + return unarySnowflake{ + beta: beta, + } +} // unarySnowflake is the implementation of a unary snowflake instance type unarySnowflake struct { @@ -24,15 +28,18 @@ type unarySnowflake struct { finalized bool } -func (sf *unarySnowflake) Initialize(beta int) { - sf.beta = beta -} - func (sf *unarySnowflake) RecordSuccessfulPoll() { sf.confidence++ sf.finalized = sf.finalized || sf.confidence >= sf.beta } +// RecordPollPreference fails to reach an alpha threshold to increase our +// confidence, so this calls RecordUnsuccessfulPoll to reset the confidence +// counter. +func (sf *unarySnowflake) RecordPollPreference() { + sf.RecordUnsuccessfulPoll() +} + func (sf *unarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } @@ -41,7 +48,7 @@ func (sf *unarySnowflake) Finalized() bool { return sf.finalized } -func (sf *unarySnowflake) Extend(beta int, choice int) BinarySnowflake { +func (sf *unarySnowflake) Extend(beta int, choice int) Binary { return &binarySnowflake{ binarySlush: binarySlush{preference: choice}, confidence: sf.confidence, @@ -50,7 +57,7 @@ func (sf *unarySnowflake) Extend(beta int, choice int) BinarySnowflake { } } -func (sf *unarySnowflake) Clone() UnarySnowflake { +func (sf *unarySnowflake) Clone() Unary { newSnowflake := *sf return &newSnowflake } diff --git a/avalanchego/snow/consensus/snowball/unary_snowflake_test.go b/avalanchego/snow/consensus/snowball/unary_snowflake_test.go index ab76c94a..0791b688 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/unary_snowflake_test.go @@ -1,40 +1,40 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "testing" + + "github.com/stretchr/testify/require" ) func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { - if confidence := sf.confidence; confidence != expectedConfidence { - t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) - } else if finalized := sf.Finalized(); finalized != expectedFinalized { - t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) - } + require := require.New(t) + + require.Equal(expectedConfidence, sf.confidence) + require.Equal(expectedFinalized, sf.Finalized()) } func TestUnarySnowflake(t *testing.T) { + require := require.New(t) + beta := 2 - sf := &unarySnowflake{} - sf.Initialize(beta) + sf := newUnarySnowflake(beta) sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 1, false) + UnarySnowflakeStateTest(t, &sf, 1, false) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 0, false) + UnarySnowflakeStateTest(t, &sf, 0, false) sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 1, false) + UnarySnowflakeStateTest(t, &sf, 1, false) sfCloneIntf := sf.Clone() - sfClone, ok := sfCloneIntf.(*unarySnowflake) - if !ok { - t.Fatalf("Unexpected clone type") - } + require.IsType(&unarySnowflake{}, sfCloneIntf) + sfClone := sfCloneIntf.(*unarySnowflake) UnarySnowflakeStateTest(t, sfClone, 1, false) @@ -44,24 +44,19 @@ func TestUnarySnowflake(t *testing.T) { binarySnowflake.RecordSuccessfulPoll(1) - if binarySnowflake.Finalized() { - t.Fatalf("Should not have finalized") - } + require.False(binarySnowflake.Finalized()) binarySnowflake.RecordSuccessfulPoll(1) - if binarySnowflake.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if !binarySnowflake.Finalized() { - t.Fatalf("Should have finalized") - } + require.Equal(1, binarySnowflake.Preference()) + require.True(binarySnowflake.Finalized()) sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 2, true) + UnarySnowflakeStateTest(t, &sf, 2, true) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 0, true) + UnarySnowflakeStateTest(t, &sf, 0, true) sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, sf, 1, true) + UnarySnowflakeStateTest(t, &sf, 1, true) } diff --git a/avalanchego/snow/consensus/snowman/block.go b/avalanchego/snow/consensus/snowman/block.go index b5d79983..c950ac3c 100644 --- a/avalanchego/snow/consensus/snowman/block.go +++ b/avalanchego/snow/consensus/snowman/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/majority.go b/avalanchego/snow/consensus/snowman/bootstrapper/majority.go new file mode 100644 index 00000000..26150788 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/majority.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Majority)(nil) + +// Majority implements the bootstrapping poll to filter the initial set of +// potentially accaptable blocks into a set of accepted blocks to sync to. +// +// Once the last accepted blocks have been fetched from the initial set of +// peers, the set of blocks are sent to all peers. Each peer is expected to +// filter the provided blocks and report which of them they consider accepted. +// If a majority of the peers report that a block is accepted, then the node +// will consider that block to be accepted by the network. This assumes that a +// majority of the network is correct. If a majority of the network is +// malicious, the node may accept an incorrect block. +type Majority struct { + requests + + log logging.Logger + nodeWeights map[ids.NodeID]uint64 + + // received maps the blockID to the total sum of weight that has reported + // that block as accepted. + received map[ids.ID]uint64 + accepted []ids.ID +} + +func NewMajority( + log logging.Logger, + nodeWeights map[ids.NodeID]uint64, + maxOutstanding int, +) *Majority { + return &Majority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: set.Of(maps.Keys(nodeWeights)...), + }, + log: log, + nodeWeights: nodeWeights, + received: make(map[ids.ID]uint64), + } +} + +func (m *Majority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "majority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + weight := m.nodeWeights[nodeID] + for blkID := range blkIDs { + newWeight, err := math.Add64(m.received[blkID], weight) + if err != nil { + return err + } + m.received[blkID] = newWeight + } + + if !m.finished() { + return nil + } + + var ( + totalWeight uint64 + err error + ) + for _, weight := range m.nodeWeights { + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return err + } + } + + requiredWeight := totalWeight/2 + 1 + for blkID, weight := range m.received { + if weight >= requiredWeight { + m.accepted = append(m.accepted, blkID) + } + } + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "majority"), + zap.Stringers("accepted", m.accepted), + ) + return nil +} + +func (m *Majority) Result(context.Context) ([]ids.ID, bool) { + return m.accepted, m.finished() +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/majority_test.go b/avalanchego/snow/consensus/snowman/bootstrapper/majority_test.go new file mode 100644 index 00000000..819840f2 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/majority_test.go @@ -0,0 +1,396 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestNewMajority(t *testing.T) { + majority := NewMajority( + logging.NoLog{}, // log + map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, // nodeWeights + 2, // maxOutstanding + ) + + expectedMajority := &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + } + require.Equal(t, expectedMajority, majority) +} + +func TestMajorityGetPeers(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + majority: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.majority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.majority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMajorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + majority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 2, + nodeID1: 3, + }, + received: map[ids.ID]uint64{ + blkID0: 3, + }, + }, + expectedErr: nil, + }, + { + name: "overflow during response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "overflow during final response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: make(map[ids.ID]uint64), + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: math.MaxUint64, + }, + received: map[ids.ID]uint64{ + blkID0: math.MaxUint64, + }, + }, + expectedErr: safemath.ErrOverflow, + }, + { + name: "finished after response", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 1, + }, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + nodeID2: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 1, + blkID1: 2, + }, + accepted: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.majority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.majority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMajorityResult(t *testing.T) { + tests := []struct { + name string + majority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: make(map[ids.ID]uint64), + accepted: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + majority: &Majority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + nodeWeights: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + received: map[ids.ID]uint64{ + blkID0: 2, + }, + accepted: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.majority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/minority.go b/avalanchego/snow/consensus/snowman/bootstrapper/minority.go new file mode 100644 index 00000000..4674921a --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/minority.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Poll = (*Minority)(nil) + +// Minority implements the bootstrapping poll to determine the initial set of +// potentially accaptable blocks. +// +// This poll fetches the last accepted block from an initial set of peers. In +// order for the protocol to find a recently accepted block, there must be at +// least one correct node in this set of peers. If there is not a correct node +// in the set of peers, the node will not accept an incorrect block. However, +// the node may be unable to find an acceptable block. +type Minority struct { + requests + + log logging.Logger + + receivedSet set.Set[ids.ID] + received []ids.ID +} + +func NewMinority( + log logging.Logger, + frontierNodes set.Set[ids.NodeID], + maxOutstanding int, +) *Minority { + return &Minority{ + requests: requests{ + maxOutstanding: maxOutstanding, + pendingSend: frontierNodes, + }, + log: log, + } +} + +func (m *Minority) RecordOpinion(_ context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error { + if !m.recordResponse(nodeID) { + // The chain router should have already dropped unexpected messages. + m.log.Error("received unexpected opinion", + zap.String("pollType", "minority"), + zap.Stringer("nodeID", nodeID), + zap.Reflect("blkIDs", blkIDs), + ) + return nil + } + + m.receivedSet.Union(blkIDs) + + if !m.finished() { + return nil + } + + m.received = m.receivedSet.List() + + m.log.Debug("finalized bootstrapping poll", + zap.String("pollType", "minority"), + zap.Stringers("frontier", m.received), + ) + return nil +} + +func (m *Minority) Result(context.Context) ([]ids.ID, bool) { + return m.received, m.finished() +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/minority_test.go b/avalanchego/snow/consensus/snowman/bootstrapper/minority_test.go new file mode 100644 index 00000000..c44b314f --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/minority_test.go @@ -0,0 +1,242 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestNewMinority(t *testing.T) { + minority := NewMinority( + logging.NoLog{}, // log + set.Of(nodeID0), // frontierNodes + 2, // maxOutstanding + ) + + expectedMinority := &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + } + require.Equal(t, expectedMinority, minority) +} + +func TestMinorityGetPeers(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedState Poll + expectedPeers set.Set[ids.NodeID] + }{ + { + name: "max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: nil, + }, + { + name: "send until max outstanding", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0, nodeID1), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0, nodeID1), + }, + { + name: "send until no more to send", + minority: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 2, + pendingSend: set.Set[ids.NodeID]{}, + outstanding: set.Of(nodeID0), + }, + log: logging.NoLog{}, + }, + expectedPeers: set.Of(nodeID0), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + peers := test.minority.GetPeers(context.Background()) + require.Equal(test.expectedState, test.minority) + require.Equal(test.expectedPeers, peers) + }) + } +} + +func TestMinorityRecordOpinion(t *testing.T) { + tests := []struct { + name string + minority Poll + nodeID ids.NodeID + blkIDs set.Set[ids.ID] + expectedState Poll + expectedErr error + }{ + { + name: "unexpected response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID0, + blkIDs: nil, + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + expectedErr: nil, + }, + { + name: "unfinished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID1, + blkIDs: set.Of(blkID0), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + pendingSend: set.Of(nodeID0), + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + }, + expectedErr: nil, + }, + { + name: "finished after response", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID2), + }, + log: logging.NoLog{}, + }, + nodeID: nodeID2, + blkIDs: set.Of(blkID1), + expectedState: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Set[ids.NodeID]{}, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID1), + received: []ids.ID{blkID1}, + }, + expectedErr: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.minority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + require.Equal(test.expectedState, test.minority) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestMinorityResult(t *testing.T) { + tests := []struct { + name string + minority Poll + expectedAccepted []ids.ID + expectedFinalized bool + }{ + { + name: "not finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + outstanding: set.Of(nodeID1), + }, + log: logging.NoLog{}, + received: nil, + }, + expectedAccepted: nil, + expectedFinalized: false, + }, + { + name: "finalized", + minority: &Minority{ + requests: requests{ + maxOutstanding: 1, + }, + log: logging.NoLog{}, + receivedSet: set.Of(blkID0), + received: []ids.ID{blkID0}, + }, + expectedAccepted: []ids.ID{blkID0}, + expectedFinalized: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + accepted, finalized := test.minority.Result(context.Background()) + require.Equal(test.expectedAccepted, accepted) + require.Equal(test.expectedFinalized, finalized) + }) + } +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/noop.go b/avalanchego/snow/consensus/snowman/bootstrapper/noop.go new file mode 100644 index 00000000..6d97eed0 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/noop.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var Noop Poll = noop{} + +type noop struct{} + +func (noop) GetPeers(context.Context) set.Set[ids.NodeID] { + return nil +} + +func (noop) RecordOpinion(context.Context, ids.NodeID, set.Set[ids.ID]) error { + return nil +} + +func (noop) Result(context.Context) ([]ids.ID, bool) { + return nil, false +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/noop_test.go b/avalanchego/snow/consensus/snowman/bootstrapper/noop_test.go new file mode 100644 index 00000000..e0bccb8a --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/noop_test.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNoop(t *testing.T) { + require := require.New(t) + + require.Empty(Noop.GetPeers(context.Background())) + + require.NoError(Noop.RecordOpinion(context.Background(), nodeID0, nil)) + + blkIDs, finalized := Noop.Result(context.Background()) + require.Empty(blkIDs) + require.False(finalized) +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/poll.go b/avalanchego/snow/consensus/snowman/bootstrapper/poll.go new file mode 100644 index 00000000..0d3eb714 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/poll.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type Poll interface { + // GetPeers returns the set of peers whose opinion should be requested. It + // is expected to repeatedly call this function along with [RecordOpinion] + // until [Result] returns finalized. + GetPeers(ctx context.Context) (peers set.Set[ids.NodeID]) + // RecordOpinion of a node whose opinion was requested. + RecordOpinion(ctx context.Context, nodeID ids.NodeID, blkIDs set.Set[ids.ID]) error + // Result returns the evaluation of all the peer's opinions along with a + // flag to identify that the result has finished being calculated. + Result(ctx context.Context) (blkIDs []ids.ID, finalized bool) +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/poll_test.go b/avalanchego/snow/consensus/snowman/bootstrapper/poll_test.go new file mode 100644 index 00000000..bbdcc0db --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/poll_test.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import "github.com/ava-labs/avalanchego/ids" + +var ( + nodeID0 = ids.GenerateTestNodeID() + nodeID1 = ids.GenerateTestNodeID() + nodeID2 = ids.GenerateTestNodeID() + + blkID0 = ids.GenerateTestID() + blkID1 = ids.GenerateTestID() +) diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/requests.go b/avalanchego/snow/consensus/snowman/bootstrapper/requests.go new file mode 100644 index 00000000..c152d6aa --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/requests.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type requests struct { + maxOutstanding int + + pendingSend set.Set[ids.NodeID] + outstanding set.Set[ids.NodeID] +} + +func (r *requests) GetPeers(context.Context) set.Set[ids.NodeID] { + numPending := r.outstanding.Len() + if numPending >= r.maxOutstanding { + return nil + } + + numToSend := min( + r.maxOutstanding-numPending, + r.pendingSend.Len(), + ) + nodeIDs := set.NewSet[ids.NodeID](numToSend) + for i := 0; i < numToSend; i++ { + nodeID, _ := r.pendingSend.Pop() + nodeIDs.Add(nodeID) + } + r.outstanding.Union(nodeIDs) + return nodeIDs +} + +func (r *requests) recordResponse(nodeID ids.NodeID) bool { + wasOutstanding := r.outstanding.Contains(nodeID) + r.outstanding.Remove(nodeID) + return wasOutstanding +} + +func (r *requests) finished() bool { + return r.pendingSend.Len() == 0 && r.outstanding.Len() == 0 +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/sampler.go b/avalanchego/snow/consensus/snowman/bootstrapper/sampler.go new file mode 100644 index 00000000..b43f6d91 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/sampler.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" +) + +// Sample keys from [elements] uniformly by weight without replacement. The +// returned set will have size less than or equal to [maxSize]. This function +// will error if the sum of all weights overflows. +func Sample[T comparable](elements map[T]uint64, maxSize int) (set.Set[T], error) { + var ( + keys = make([]T, len(elements)) + weights = make([]uint64, len(elements)) + totalWeight uint64 + err error + ) + i := 0 + for key, weight := range elements { + keys[i] = key + weights[i] = weight + totalWeight, err = math.Add64(totalWeight, weight) + if err != nil { + return nil, err + } + i++ + } + + sampler := sampler.NewWeightedWithoutReplacement() + if err := sampler.Initialize(weights); err != nil { + return nil, err + } + + maxSize = int(min(uint64(maxSize), totalWeight)) + indices, err := sampler.Sample(maxSize) + if err != nil { + return nil, err + } + + sampledElements := set.NewSet[T](maxSize) + for _, index := range indices { + sampledElements.Add(keys[index]) + } + return sampledElements, nil +} diff --git a/avalanchego/snow/consensus/snowman/bootstrapper/sampler_test.go b/avalanchego/snow/consensus/snowman/bootstrapper/sampler_test.go new file mode 100644 index 00000000..b438a5fb --- /dev/null +++ b/avalanchego/snow/consensus/snowman/bootstrapper/sampler_test.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrapper + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +func TestSample(t *testing.T) { + tests := []struct { + name string + elements map[ids.NodeID]uint64 + maxSize int + expectedSampled set.Set[ids.NodeID] + expectedErr error + }{ + { + name: "sample everything", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + nodeID1: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0, nodeID1), + expectedErr: nil, + }, + { + name: "limit sample due to too few elements", + elements: map[ids.NodeID]uint64{ + nodeID0: 1, + }, + maxSize: 2, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "limit sample", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64 - 1, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: set.Of(nodeID0), + expectedErr: nil, + }, + { + name: "overflow", + elements: map[ids.NodeID]uint64{ + nodeID0: math.MaxUint64, + nodeID1: 1, + }, + maxSize: 1, + expectedSampled: nil, + expectedErr: safemath.ErrOverflow, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + sampled, err := Sample(test.elements, test.maxSize) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedSampled, sampled) + }) + } +} diff --git a/avalanchego/snow/consensus/snowman/consensus.go b/avalanchego/snow/consensus/snowman/consensus.go index d88c385b..3f100641 100644 --- a/avalanchego/snow/consensus/snowman/consensus.go +++ b/avalanchego/snow/consensus/snowman/consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -45,19 +45,19 @@ type Consensus interface { // chain. IsPreferred(Block) bool - // Returns the ID of the last accepted decision. - LastAccepted() ids.ID + // Returns the ID and height of the last accepted decision. + LastAccepted() (ids.ID, uint64) // Returns the ID of the tail of the strongly preferred sequence of // decisions. Preference() ids.ID + // Returns the ID of the strongly preferred decision with the provided + // height. Only the last accepted decision and processing decisions are + // tracked. + PreferenceAtHeight(height uint64) (ids.ID, bool) + // RecordPoll collects the results of a network poll. Assumes all decisions // have been previously added. Returns if a critical error has occurred. RecordPoll(context.Context, bag.Bag[ids.ID]) error - - // Finalized returns true if all decisions that have been added have been - // finalized. Note, it is possible that after returning finalized, a new - // decision may be added such that this instance is no longer finalized. - Finalized() bool } diff --git a/avalanchego/snow/consensus/snowman/consensus_test.go b/avalanchego/snow/consensus/snowman/consensus_test.go index ee731a28..1069ae35 100644 --- a/avalanchego/snow/consensus/snowman/consensus_test.go +++ b/avalanchego/snow/consensus/snowman/consensus_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -14,15 +14,14 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "gonum.org/v1/gonum/mathext/prng" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" ) type testFunc func(*testing.T, Factory) @@ -57,6 +56,7 @@ var ( RecordPollDivergedVotingTest, RecordPollDivergedVotingWithNoConflictingBitTest, RecordPollChangePreferredChainTest, + LastAcceptedTest, MetricsProcessingErrorTest, MetricsAcceptedErrorTest, MetricsRejectedErrorTest, @@ -65,8 +65,9 @@ var ( ErrorOnRejectSiblingTest, ErrorOnTransitiveRejectionTest, RandomizedConsistencyTest, - ErrorOnAddDecidedBlock, - ErrorOnAddDuplicateBlockID, + ErrorOnAddDecidedBlockTest, + ErrorOnAddDuplicateBlockIDTest, + RecordPollWithDefaultParameters, } errTest = errors.New("non-nil error") @@ -87,12 +88,16 @@ func getTestName(i interface{}) string { // Make sure that initialize sets the state correctly func InitializeTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -101,25 +106,24 @@ func InitializeTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - if pref := sm.Preference(); pref != GenesisID { - t.Fatalf("Wrong preference returned") - } else if !sm.Finalized() { - t.Fatalf("Wrong should have marked the instance as being finalized") - } + require.Equal(GenesisID, sm.Preference()) + require.Zero(sm.NumProcessing()) } // Make sure that the number of processing blocks is tracked correctly func NumProcessingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -127,9 +131,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -140,38 +142,31 @@ func NumProcessingTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if numProcessing := sm.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d blocks to be processing but returned %d", 0, numProcessing) - } + require.Zero(sm.NumProcessing()) // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) - if numProcessing := sm.NumProcessing(); numProcessing != 1 { - t.Fatalf("expected %d blocks to be processing but returned %d", 1, numProcessing) - } + require.Equal(1, sm.NumProcessing()) - votes := bag.Bag[ids.ID]{} - votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } + votes := bag.Of(block.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes)) - if numProcessing := sm.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d blocks to be processing but returned %d", 0, numProcessing) - } + require.Zero(sm.NumProcessing()) } // Make sure that adding a block to the tail updates the preference func AddToTailTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -179,9 +174,7 @@ func AddToTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -193,23 +186,27 @@ func AddToTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Wrong preference. Expected %s, got %s", block.ID(), pref) - } else if !sm.IsPreferred(block) { - t.Fatalf("Should have marked %s as being Preferred", pref) - } + require.NoError(sm.Add(context.Background(), block)) + require.Equal(block.ID(), sm.Preference()) + require.True(sm.IsPreferred(block)) + + pref, ok := sm.PreferenceAtHeight(block.Height()) + require.True(ok) + require.Equal(block.ID(), pref) } // Make sure that adding a block not to the tail doesn't change the preference func AddToNonTailTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -217,9 +214,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) firstBlock := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -239,30 +234,28 @@ func AddToNonTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), firstBlock); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.IDV { - t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) - } + require.NoError(sm.Add(context.Background(), firstBlock)) + require.Equal(firstBlock.IDV, sm.Preference()) // Adding to something other than the previous preference won't update the // preference - if err := sm.Add(context.Background(), secondBlock); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.IDV { - t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) - } + require.NoError(sm.Add(context.Background(), secondBlock)) + require.Equal(firstBlock.IDV, sm.Preference()) } // Make sure that adding a block that is detached from the rest of the tree // rejects the block func AddToUnknownTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -270,9 +263,7 @@ func AddToUnknownTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) parent := &TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1), @@ -290,22 +281,22 @@ func AddToUnknownTest(t *testing.T, factory Factory) { // Adding a block with an unknown parent means the parent must have already // been rejected. Therefore the block should be immediately rejected - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != GenesisID { - t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) - } else if status := block.Status(); status != choices.Rejected { - t.Fatalf("Should have rejected the block") - } + require.NoError(sm.Add(context.Background(), block)) + require.Equal(GenesisID, sm.Preference()) + require.Equal(choices.Rejected, block.Status()) } func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -313,31 +304,29 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - if Genesis.Status() != choices.Accepted { - t.Fatalf("Should have marked an accepted block as having been accepted") - } - if sm.Processing(Genesis.ID()) { - t.Fatalf("Shouldn't have marked an accepted block as having been processing") - } - if !sm.Decided(Genesis) { - t.Fatalf("Should have marked an accepted block as having been decided") - } - if !sm.IsPreferred(Genesis) { - t.Fatalf("Should have marked an accepted block as being preferred") - } + require.Equal(choices.Accepted, Genesis.Status()) + require.False(sm.Processing(Genesis.ID())) + require.True(sm.Decided(Genesis)) + require.True(sm.IsPreferred(Genesis)) + + pref, ok := sm.PreferenceAtHeight(Genesis.Height()) + require.True(ok) + require.Equal(Genesis.ID(), pref) } func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -345,9 +334,7 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -358,27 +345,26 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked a rejected block as having been accepted") - } - if sm.Processing(block.ID()) { - t.Fatalf("Shouldn't have marked a rejected block as having been processing") - } - if !sm.Decided(block) { - t.Fatalf("Should have marked a rejected block as having been decided") - } - if sm.IsPreferred(block) { - t.Fatalf("Shouldn't have marked a rejected block as being preferred") - } + require.Equal(choices.Rejected, block.Status()) + require.False(sm.Processing(block.ID())) + require.True(sm.Decided(block)) + require.False(sm.IsPreferred(block)) + + _, ok := sm.PreferenceAtHeight(block.Height()) + require.False(ok) } func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -386,9 +372,7 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -399,27 +383,26 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked an unissued block as having been accepted") - } - if sm.Processing(block.ID()) { - t.Fatalf("Shouldn't have marked an unissued block as having been processing") - } - if sm.Decided(block) { - t.Fatalf("Should't have marked an unissued block as having been decided") - } - if sm.IsPreferred(block) { - t.Fatalf("Shouldn't have marked an unissued block as being preferred") - } + require.Equal(choices.Processing, block.Status()) + require.False(sm.Processing(block.ID())) + require.False(sm.Decided(block)) + require.False(sm.IsPreferred(block)) + + _, ok := sm.PreferenceAtHeight(block.Height()) + require.False(ok) } func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 3, BetaRogue: 5, ConcurrentRepolls: 1, @@ -427,9 +410,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -440,30 +421,28 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked the block as accepted") - } - if !sm.Processing(block.ID()) { - t.Fatalf("Should have marked the block as processing") - } - if sm.Decided(block) { - t.Fatalf("Shouldn't have marked the block as decided") - } - if !sm.IsPreferred(block) { - t.Fatalf("Should have marked the tail as being preferred") - } + require.NoError(sm.Add(context.Background(), block)) + require.Equal(choices.Processing, block.Status()) + require.True(sm.Processing(block.ID())) + require.False(sm.Decided(block)) + require.True(sm.IsPreferred(block)) + + pref, ok := sm.PreferenceAtHeight(block.Height()) + require.True(ok) + require.Equal(block.ID(), pref) } func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 2, BetaRogue: 3, ConcurrentRepolls: 1, @@ -471,9 +450,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -484,38 +461,31 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) - votes := bag.Bag[ids.ID]{} - votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Preference returned the wrong block") - } else if sm.Finalized() { - t.Fatalf("Snowman instance finalized too soon") - } else if status := block.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Preference returned the wrong block") - } else if !sm.Finalized() { - t.Fatalf("Snowman instance didn't finalize") - } else if status := block.Status(); status != choices.Accepted { - t.Fatalf("Block's status should have been set to accepted") - } + votes := bag.Of(block.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(block.ID(), sm.Preference()) + require.Equal(1, sm.NumProcessing()) + require.Equal(choices.Processing, block.Status()) + + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(block.ID(), sm.Preference()) + require.Zero(sm.NumProcessing()) + require.Equal(choices.Accepted, block.Status()) } func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, @@ -523,9 +493,7 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) firstBlock := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -544,49 +512,37 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), firstBlock); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), secondBlock); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), firstBlock)) + require.NoError(sm.Add(context.Background(), secondBlock)) - votes := bag.Bag[ids.ID]{} - votes.Add(firstBlock.ID()) - - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.ID() { - t.Fatalf("Preference returned the wrong block") - } else if sm.Finalized() { - t.Fatalf("Snowman instance finalized too soon") - } else if status := firstBlock.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if status := secondBlock.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.ID() { - t.Fatalf("Preference returned the wrong block") - } else if !sm.Finalized() { - t.Fatalf("Snowman instance didn't finalize") - } else if status := firstBlock.Status(); status != choices.Accepted { - t.Fatalf("Block's status should have been set to accepted") - } else if status := secondBlock.Status(); status != choices.Rejected { - t.Fatalf("Block's status should have been set to rejected") - } + votes := bag.Of(firstBlock.ID()) + + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(firstBlock.ID(), sm.Preference()) + require.Equal(2, sm.NumProcessing()) + require.Equal(choices.Processing, firstBlock.Status()) + require.Equal(choices.Processing, secondBlock.Status()) + + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(firstBlock.ID(), sm.Preference()) + require.Zero(sm.NumProcessing()) + require.Equal(choices.Accepted, firstBlock.Status()) + require.Equal(choices.Rejected, secondBlock.Status()) } func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { require := require.New(t) sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) registerer := prometheus.NewRegistry() ctx.Registerer = registerer params := snowball.Parameters{ K: 2, - Alpha: 2, + AlphaPreference: 2, + AlphaConfidence: 2, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, @@ -616,36 +572,38 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { require.NoError(sm.Add(context.Background(), firstBlock)) require.NoError(sm.Add(context.Background(), secondBlock)) - votes := bag.Bag[ids.ID]{} - votes.Add(firstBlock.ID()) - votes.Add(secondBlock.ID()) + votes := bag.Of(firstBlock.ID(), secondBlock.ID()) // The first poll will accept shared bits require.NoError(sm.RecordPoll(context.Background(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) - require.False(sm.Finalized()) + require.Equal(2, sm.NumProcessing()) metrics := gatherCounterGauge(t, registerer) - require.EqualValues(0, metrics["polls_failed"]) - require.EqualValues(1, metrics["polls_successful"]) + require.Zero(metrics["polls_failed"]) + require.Equal(float64(1), metrics["polls_successful"]) // The second poll will do nothing require.NoError(sm.RecordPoll(context.Background(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) - require.False(sm.Finalized()) + require.Equal(2, sm.NumProcessing()) metrics = gatherCounterGauge(t, registerer) - require.EqualValues(1, metrics["polls_failed"]) - require.EqualValues(1, metrics["polls_successful"]) + require.Equal(float64(1), metrics["polls_failed"]) + require.Equal(float64(1), metrics["polls_successful"]) } func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, @@ -653,28 +611,25 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - votes := bag.Bag[ids.ID]{} - votes.Add(GenesisID) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if !sm.Finalized() { - t.Fatalf("Consensus should still be finalized") - } else if pref := sm.Preference(); GenesisID != pref { - t.Fatalf("Wrong preference listed") - } + votes := bag.Of(GenesisID) + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Zero(sm.NumProcessing()) + require.Equal(GenesisID, sm.Preference()) } func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -682,9 +637,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -711,13 +664,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) // Current graph structure: // G @@ -727,36 +676,31 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { // 2 // Tail = 0 - votes := bag.Bag[ids.ID]{} - votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } + votes := bag.Of(block0.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes)) // Current graph structure: // 0 // Tail = 0 - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if pref := sm.Preference(); block0.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if status := block0.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } else if status := block1.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block2.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } + require.Zero(sm.NumProcessing()) + require.Equal(block0.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Rejected, block1.Status()) + require.Equal(choices.Rejected, block2.Status()) } func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 2, BetaRogue: 2, ConcurrentRepolls: 1, @@ -764,9 +708,7 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -801,15 +743,10 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block3); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(context.Background(), block3)) // Current graph structure: // G @@ -818,63 +755,45 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { // / \ // 2 3 - votesFor2 := bag.Bag[ids.ID]{} - votesFor2.Add(block2.ID()) - if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } + votesFor2 := bag.Of(block2.ID()) + require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.Equal(4, sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) emptyVotes := bag.Bag[ids.ID]{} - if err := sm.RecordPoll(context.Background(), emptyVotes); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } - - votesFor3 := bag.Bag[ids.ID]{} - votesFor3.Add(block3.ID()) - if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { - t.Fatal(err) - } else if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if pref := sm.Preference(); block3.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if status := block0.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block1.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } else if status := block2.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block3.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } + require.NoError(sm.RecordPoll(context.Background(), emptyVotes)) + require.Equal(4, sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) + + require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.Equal(4, sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) + + votesFor3 := bag.Of(block3.ID()) + require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.Equal(2, sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) + + require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.Zero(sm.NumProcessing()) + require.Equal(block3.ID(), sm.Preference()) + require.Equal(choices.Rejected, block0.Status()) + require.Equal(choices.Accepted, block1.Status()) + require.Equal(choices.Rejected, block2.Status()) + require.Equal(choices.Accepted, block3.Status()) } func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 2, BetaRogue: 2, ConcurrentRepolls: 1, @@ -882,9 +801,7 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -896,36 +813,29 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { } unknownBlockID := ids.Empty.Prefix(2) - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) - validVotes := bag.Bag[ids.ID]{} - validVotes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), validVotes); err != nil { - t.Fatal(err) - } + validVotes := bag.Of(block.ID()) + require.NoError(sm.RecordPoll(context.Background(), validVotes)) - invalidVotes := bag.Bag[ids.ID]{} - invalidVotes.Add(unknownBlockID) - if err := sm.RecordPoll(context.Background(), invalidVotes); err != nil { - t.Fatal(err) - } else if err := sm.RecordPoll(context.Background(), validVotes); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block.ID() != pref { - t.Fatalf("Wrong preference listed") - } + invalidVotes := bag.Of(unknownBlockID) + require.NoError(sm.RecordPoll(context.Background(), invalidVotes)) + require.NoError(sm.RecordPoll(context.Background(), validVotes)) + require.Equal(1, sm.NumProcessing()) + require.Equal(block.ID(), sm.Preference()) } func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 3, - Alpha: 3, + AlphaPreference: 3, + AlphaConfidence: 3, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -933,9 +843,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -978,17 +886,11 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { HeightV: block3.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block3); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block4); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(context.Background(), block4)) // Current graph structure: // G @@ -1000,15 +902,8 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // 2 4 // Tail = 2 - votes0_2_4 := bag.Bag[ids.ID]{} - votes0_2_4.Add( - block0.ID(), - block2.ID(), - block4.ID(), - ) - if err := sm.RecordPoll(context.Background(), votes0_2_4); err != nil { - t.Fatal(err) - } + votes0_2_4 := bag.Of(block0.ID(), block2.ID(), block4.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes0_2_4)) // Current graph structure: // 0 @@ -1018,61 +913,40 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // 2 4 // Tail = 2 - pref := sm.Preference() - switch { - case block2.ID() != pref: - t.Fatalf("Wrong preference listed") - case sm.Finalized(): - t.Fatalf("Finalized too early") - case block0.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block1.Status() != choices.Processing: - t.Fatalf("Should have accepted") - case block2.Status() != choices.Processing: - t.Fatalf("Should have accepted") - case block3.Status() != choices.Processing: - t.Fatalf("Should have rejected") - case block4.Status() != choices.Processing: - t.Fatalf("Should have rejected") - } - - dep2_2_2 := bag.Bag[ids.ID]{} - dep2_2_2.AddCount(block2.ID(), 3) - if err := sm.RecordPoll(context.Background(), dep2_2_2); err != nil { - t.Fatal(err) - } + require.Equal(4, sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Processing, block1.Status()) + require.Equal(choices.Processing, block2.Status()) + require.Equal(choices.Processing, block3.Status()) + require.Equal(choices.Processing, block4.Status()) + + dep2_2_2 := bag.Of(block2.ID(), block2.ID(), block2.ID()) + require.NoError(sm.RecordPoll(context.Background(), dep2_2_2)) // Current graph structure: // 2 // Tail = 2 - pref = sm.Preference() - switch { - case block2.ID() != pref: - t.Fatalf("Wrong preference listed") - case !sm.Finalized(): - t.Fatalf("Finalized too late") - case block0.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block1.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block2.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block3.Status() != choices.Rejected: - t.Fatalf("Should have rejected") - case block4.Status() != choices.Rejected: - t.Fatalf("Should have rejected") - } + require.Zero(sm.NumProcessing()) + require.Equal(block2.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Accepted, block1.Status()) + require.Equal(choices.Accepted, block2.Status()) + require.Equal(choices.Rejected, block3.Status()) + require.Equal(choices.Rejected, block4.Status()) } func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, @@ -1080,8 +954,7 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) - require.NoError(err) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1116,31 +989,25 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { HeightV: block2.HeightV + 1, } - err = sm.Add(context.Background(), block0) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block0)) - err = sm.Add(context.Background(), block1) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block1)) // The first bit is contested as either 0 or 1. When voting for [block0] and // when the first bit is 1, the following bits have been decided to follow // the 255 remaining bits of [block0]. - votes0 := bag.Bag[ids.ID]{} - votes0.Add(block0.ID()) - err = sm.RecordPoll(context.Background(), votes0) - require.NoError(err) + votes0 := bag.Of(block0.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes0)) // Although we are adding in [block2] here - the underlying snowball // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - err = sm.Add(context.Background(), block2) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block2)) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - err = sm.Add(context.Background(), block3) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block3)) require.Equal(block0.ID(), sm.Preference()) require.Equal(choices.Processing, block0.Status(), "should not be accepted yet") @@ -1164,13 +1031,11 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { // [block0]. When [block0] is accepted, [block1] and [block2] are rejected // as conflicting. [block2]'s child, [block3], is then rejected // transitively. - votes3 := bag.Bag[ids.ID]{} - votes3.Add(block3.ID()) - err = sm.RecordPoll(context.Background(), votes3) - require.NoError(err) + votes3 := bag.Of(block3.ID()) + require.NoError(sm.RecordPoll(context.Background(), votes3)) - require.True(sm.Finalized(), "finalized too late") - require.Equal(choices.Accepted, block0.Status(), "should be accepted") + require.Zero(sm.NumProcessing()) + require.Equal(choices.Accepted, block0.Status()) require.Equal(choices.Rejected, block1.Status()) require.Equal(choices.Rejected, block2.Status()) require.Equal(choices.Rejected, block3.Status()) @@ -1180,10 +1045,12 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, @@ -1233,8 +1100,7 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // second bit is contested as either 0 or 1. For when the second bit is 1, // the following bits have been decided to follow the 254 remaining bits of // [block0]. - votes0 := bag.Bag[ids.ID]{} - votes0.Add(block0.ID()) + votes0 := bag.Of(block0.ID()) require.NoError(sm.RecordPoll(context.Background(), votes0)) // Although we are adding in [block2] here - the underlying snowball @@ -1269,11 +1135,10 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // dropped. Although the votes for [block3] are still applied, [block3] will // only be marked as accepted after [block2] is marked as accepted; which // will never happen. - votes3 := bag.Bag[ids.ID]{} - votes3.Add(block3.ID()) + votes3 := bag.Of(block3.ID()) require.NoError(sm.RecordPoll(context.Background(), votes3)) - require.False(sm.Finalized(), "finalized too early") + require.Equal(4, sm.NumProcessing()) require.Equal(choices.Processing, block0.Status()) require.Equal(choices.Processing, block1.Status()) require.Equal(choices.Processing, block2.Status()) @@ -1281,12 +1146,16 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact } func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 10, BetaRogue: 10, ConcurrentRepolls: 1, @@ -1294,9 +1163,7 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) a1Block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1331,95 +1198,157 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { HeightV: b1Block.HeightV + 1, } - if err := sm.Add(context.Background(), a1Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), a2Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), b1Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), b2Block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), a1Block)) + require.NoError(sm.Add(context.Background(), a2Block)) + require.NoError(sm.Add(context.Background(), b1Block)) + require.NoError(sm.Add(context.Background(), b2Block)) - if sm.Preference() != a2Block.ID() { - t.Fatal("Wrong preference reported") - } + require.Equal(a2Block.ID(), sm.Preference()) - if !sm.IsPreferred(a1Block) { - t.Fatalf("Should have reported a1 as being preferred") - } - if !sm.IsPreferred(a2Block) { - t.Fatalf("Should have reported a2 as being preferred") - } - if sm.IsPreferred(b1Block) { - t.Fatalf("Shouldn't have reported b1 as being preferred") - } - if sm.IsPreferred(b2Block) { - t.Fatalf("Shouldn't have reported b2 as being preferred") - } + require.True(sm.IsPreferred(a1Block)) + require.True(sm.IsPreferred(a2Block)) + require.False(sm.IsPreferred(b1Block)) + require.False(sm.IsPreferred(b2Block)) - b2Votes := bag.Bag[ids.ID]{} - b2Votes.Add(b2Block.ID()) + pref, ok := sm.PreferenceAtHeight(a1Block.Height()) + require.True(ok) + require.Equal(a1Block.ID(), pref) - if err := sm.RecordPoll(context.Background(), b2Votes); err != nil { - t.Fatal(err) - } + pref, ok = sm.PreferenceAtHeight(a2Block.Height()) + require.True(ok) + require.Equal(a2Block.ID(), pref) - if sm.Preference() != b2Block.ID() { - t.Fatal("Wrong preference reported") - } + b2Votes := bag.Of(b2Block.ID()) + require.NoError(sm.RecordPoll(context.Background(), b2Votes)) - if sm.IsPreferred(a1Block) { - t.Fatalf("Shouldn't have reported a1 as being preferred") - } - if sm.IsPreferred(a2Block) { - t.Fatalf("Shouldn't have reported a2 as being preferred") - } - if !sm.IsPreferred(b1Block) { - t.Fatalf("Should have reported b1 as being preferred") - } - if !sm.IsPreferred(b2Block) { - t.Fatalf("Should have reported b2 as being preferred") - } + require.Equal(b2Block.ID(), sm.Preference()) + require.False(sm.IsPreferred(a1Block)) + require.False(sm.IsPreferred(a2Block)) + require.True(sm.IsPreferred(b1Block)) + require.True(sm.IsPreferred(b2Block)) - a1Votes := bag.Bag[ids.ID]{} - a1Votes.Add(a1Block.ID()) + pref, ok = sm.PreferenceAtHeight(b1Block.Height()) + require.True(ok) + require.Equal(b1Block.ID(), pref) - if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { - t.Fatal(err) - } - if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { - t.Fatal(err) - } + pref, ok = sm.PreferenceAtHeight(b2Block.Height()) + require.True(ok) + require.Equal(b2Block.ID(), pref) + + a1Votes := bag.Of(a1Block.ID()) + require.NoError(sm.RecordPoll(context.Background(), a1Votes)) + require.NoError(sm.RecordPoll(context.Background(), a1Votes)) + + require.Equal(a2Block.ID(), sm.Preference()) + require.True(sm.IsPreferred(a1Block)) + require.True(sm.IsPreferred(a2Block)) + require.False(sm.IsPreferred(b1Block)) + require.False(sm.IsPreferred(b2Block)) + + pref, ok = sm.PreferenceAtHeight(a1Block.Height()) + require.True(ok) + require.Equal(a1Block.ID(), pref) + + pref, ok = sm.PreferenceAtHeight(a2Block.Height()) + require.True(ok) + require.Equal(a2Block.ID(), pref) +} + +func LastAcceptedTest(t *testing.T, factory Factory) { + sm := factory.New() + require := require.New(t) - if sm.Preference() != a2Block.ID() { - t.Fatal("Wrong preference reported") + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + params := snowball.Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - if !sm.IsPreferred(a1Block) { - t.Fatalf("Should have reported a1 as being preferred") + block0 := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: Genesis.IDV, + HeightV: Genesis.HeightV + 1, } - if !sm.IsPreferred(a2Block) { - t.Fatalf("Should have reported a2 as being preferred") + block1 := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: block0.IDV, + HeightV: block0.HeightV + 1, } - if sm.IsPreferred(b1Block) { - t.Fatalf("Shouldn't have reported b1 as being preferred") + block2 := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: block1.IDV, + HeightV: block1.HeightV + 1, } - if sm.IsPreferred(b2Block) { - t.Fatalf("Shouldn't have reported b2 as being preferred") + block1Conflict := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: block0.IDV, + HeightV: block0.HeightV + 1, } + + lastAcceptedID, lastAcceptedHeight := sm.LastAccepted() + require.Equal(GenesisID, lastAcceptedID) + require.Equal(GenesisHeight, lastAcceptedHeight) + + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block1Conflict)) + require.NoError(sm.Add(context.Background(), block2)) + + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(GenesisID, lastAcceptedID) + require.Equal(GenesisHeight, lastAcceptedHeight) + + require.NoError(sm.RecordPoll(context.Background(), bag.Of(block1.IDV))) + + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(block0.IDV, lastAcceptedID) + require.Equal(block0.HeightV, lastAcceptedHeight) + + require.NoError(sm.RecordPoll(context.Background(), bag.Of(block1.IDV))) + + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(block1.IDV, lastAcceptedID) + require.Equal(block1.HeightV, lastAcceptedHeight) + + require.NoError(sm.RecordPoll(context.Background(), bag.Of(block2.IDV))) + + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(block2.IDV, lastAcceptedID) + require.Equal(block2.HeightV, lastAcceptedHeight) } func MetricsProcessingErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1433,22 +1362,23 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { Name: "blks_processing", }) - if err := ctx.Registerer.Register(numProcessing); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numProcessing)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1462,22 +1392,23 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { Name: "blks_accepted_count", }) - if err := ctx.Registerer.Register(numAccepted); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numAccepted)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func MetricsRejectedErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1491,22 +1422,23 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { Name: "blks_rejected_count", }) - if err := ctx.Registerer.Register(numRejected); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numRejected)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1515,9 +1447,7 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) rejectedBlock := &TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1), @@ -1534,18 +1464,21 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { HeightV: rejectedBlock.HeightV + 1, } - if err := sm.Add(context.Background(), block); err == nil { - t.Fatalf("Should have errored on rejecting the rejectable block") - } + err := sm.Add(context.Background(), block) + require.ErrorIs(err, errTest) } func ErrorOnAcceptTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1554,9 +1487,7 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1568,24 +1499,24 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) - votes := bag.Bag[ids.ID]{} - votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on accepted the block") - } + votes := bag.Of(block.ID()) + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1594,9 +1525,7 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1616,26 +1545,25 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) - votes := bag.Bag[ids.ID]{} - votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on rejecting the block's sibling") - } + votes := bag.Of(block0.ID()) + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1644,9 +1572,7 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1674,66 +1600,61 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) - votes := bag.Bag[ids.ID]{} - votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on transitively rejecting the block") - } + votes := bag.Of(block0.ID()) + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func RandomizedConsistencyTest(t *testing.T, factory Factory) { - numColors := 50 - numNodes := 100 - params := snowball.Parameters{ - K: 20, - Alpha: 15, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - seed := int64(0) + require := require.New(t) - sampler.Seed(seed) + var ( + numColors = 50 + numNodes = 100 + params = snowball.Parameters{ + K: 20, + AlphaPreference: 15, + AlphaConfidence: 15, + BetaVirtuous: 20, + BetaRogue: 30, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + seed uint64 = 0 + source = prng.NewMT19937() + ) - n := Network{} - n.Initialize(params, numColors) + source.Seed(seed) + + n := NewNetwork(params, numColors, source) for i := 0; i < numNodes; i++ { - if err := n.AddNode(factory.New()); err != nil { - t.Fatal(err) - } + require.NoError(n.AddNode(t, factory.New())) } for !n.Finalized() { - if err := n.Round(); err != nil { - t.Fatal(err) - } + require.NoError(n.Round()) } - if !n.Agreement() { - t.Fatalf("Network agreed on inconsistent values") - } + require.True(n.Agreement()) } -func ErrorOnAddDecidedBlock(t *testing.T, factory Factory) { +func ErrorOnAddDecidedBlockTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1751,17 +1672,20 @@ func ErrorOnAddDecidedBlock(t *testing.T, factory Factory) { ParentV: Genesis.IDV, HeightV: Genesis.HeightV + 1, } - require.ErrorIs(sm.Add(context.Background(), block0), errDuplicateAdd) + err := sm.Add(context.Background(), block0) + require.ErrorIs(err, errDuplicateAdd) } -func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { +func ErrorOnAddDuplicateBlockIDTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) params := snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 1, @@ -1789,14 +1713,13 @@ func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { } require.NoError(sm.Add(context.Background(), block0)) - require.ErrorIs(sm.Add(context.Background(), block1), errDuplicateAdd) + err := sm.Add(context.Background(), block1) + require.ErrorIs(err, errDuplicateAdd) } func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { ms, err := reg.Gather() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mss := make(map[string]float64) for _, mf := range ms { name := mf.GetName() @@ -1815,3 +1738,45 @@ func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float } return mss } + +// You can run this test with "go test -v -run TestTopological/RecordPollWithDefaultParameters" +func RecordPollWithDefaultParameters(t *testing.T, factory Factory) { + require := require.New(t) + + sm := factory.New() + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + params := snowball.DefaultParameters + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + + // "blk1" and "blk2" are in conflict + blk1 := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.ID{1}, + StatusV: choices.Processing, + }, + ParentV: Genesis.IDV, + HeightV: Genesis.HeightV + 1, + } + blk2 := &TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.ID{2}, + StatusV: choices.Processing, + }, + ParentV: Genesis.IDV, + HeightV: Genesis.HeightV + 1, + } + require.NoError(sm.Add(context.Background(), blk1)) + require.NoError(sm.Add(context.Background(), blk2)) + + votes := bag.Bag[ids.ID]{} + votes.AddCount(blk1.ID(), params.AlphaConfidence) + // as "blk1" and "blk2" are in conflict, we need beta rogue rounds to finalize + for i := 0; i < params.BetaRogue; i++ { + // should not finalize with less than beta rogue rounds + require.Equal(2, sm.NumProcessing()) + require.NoError(sm.RecordPoll(context.Background(), votes)) + } + require.Zero(sm.NumProcessing()) +} diff --git a/avalanchego/snow/consensus/snowman/factory.go b/avalanchego/snow/consensus/snowman/factory.go index 06341981..c2fc76e8 100644 --- a/avalanchego/snow/consensus/snowman/factory.go +++ b/avalanchego/snow/consensus/snowman/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/consensus/snowman/metrics.go b/avalanchego/snow/consensus/snowman/metrics.go new file mode 100644 index 00000000..43e5d7d9 --- /dev/null +++ b/avalanchego/snow/consensus/snowman/metrics.go @@ -0,0 +1,270 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +type processingStart struct { + time time.Time + pollNumber uint64 +} + +type metrics struct { + log logging.Logger + + currentMaxVerifiedHeight uint64 + maxVerifiedHeight prometheus.Gauge + + lastAcceptedHeight prometheus.Gauge + lastAcceptedTimestamp prometheus.Gauge + + // processingBlocks keeps track of the [processingStart] that each block was + // issued into the consensus instance. This is used to calculate the amount + // of time to accept or reject the block. + processingBlocks linkedhashmap.LinkedHashmap[ids.ID, processingStart] + + // numProcessing keeps track of the number of processing blocks + numProcessing prometheus.Gauge + + blockSizeAcceptedSum prometheus.Gauge + // pollsAccepted tracks the number of polls that a block was in processing + // for before being accepted + pollsAccepted metric.Averager + // latAccepted tracks the number of nanoseconds that a block was processing + // before being accepted + latAccepted metric.Averager + buildLatencyAccepted prometheus.Gauge + + blockSizeRejectedSum prometheus.Gauge + // pollsRejected tracks the number of polls that a block was in processing + // for before being rejected + pollsRejected metric.Averager + // latRejected tracks the number of nanoseconds that a block was processing + // before being rejected + latRejected metric.Averager + + // numFailedPolls keeps track of the number of polls that failed + numFailedPolls prometheus.Counter + + // numSuccessfulPolls keeps track of the number of polls that succeeded + numSuccessfulPolls prometheus.Counter +} + +func newMetrics( + log logging.Logger, + namespace string, + reg prometheus.Registerer, + lastAcceptedHeight uint64, + lastAcceptedTime time.Time, +) (*metrics, error) { + errs := wrappers.Errs{} + m := &metrics{ + log: log, + currentMaxVerifiedHeight: lastAcceptedHeight, + maxVerifiedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "max_verified_height", + Help: "highest verified height", + }), + lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "last_accepted_height", + Help: "last height accepted", + }), + lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "last_accepted_timestamp", + Help: "timestamp of the last accepted block in unix seconds", + }), + + processingBlocks: linkedhashmap.New[ids.ID, processingStart](), + + // e.g., + // "avalanche_X_blks_processing" reports how many blocks are currently processing + numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_processing", + Help: "number of currently processing blocks", + }), + + blockSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_accepted_container_size_sum", + Help: "cumulative size of all accepted blocks", + }), + pollsAccepted: metric.NewAveragerWithErrs( + namespace, + "blks_polls_accepted", + "number of polls from the issuance of a block to its acceptance", + reg, + &errs, + ), + // e.g., + // "avalanche_C_blks_accepted_count" reports how many times "Observe" has been called which is the total number of blocks accepted + // "avalanche_C_blks_accepted_sum" reports the cumulative sum of all block acceptance latencies in nanoseconds + // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in nanoseconds + // "avalanche_C_blks_accepted_container_size_sum" reports the cumulative sum of all accepted blocks' sizes in bytes + // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average accepted block size in bytes + latAccepted: metric.NewAveragerWithErrs( + namespace, + "blks_accepted", + "time (in ns) from the issuance of a block to its acceptance", + reg, + &errs, + ), + buildLatencyAccepted: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_build_accept_latency", + Help: "time (in ns) from the timestamp of a block to the time it was accepted", + }), + + blockSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blks_rejected_container_size_sum", + Help: "cumulative size of all rejected blocks", + }), + pollsRejected: metric.NewAveragerWithErrs( + namespace, + "blks_polls_rejected", + "number of polls from the issuance of a block to its rejection", + reg, + &errs, + ), + // e.g., + // "avalanche_P_blks_rejected_count" reports how many times "Observe" has been called which is the total number of blocks rejected + // "avalanche_P_blks_rejected_sum" reports the cumulative sum of all block rejection latencies in nanoseconds + // "avalanche_P_blks_rejected_sum / avalanche_P_blks_rejected_count" is the average block rejection latency in nanoseconds + // "avalanche_P_blks_rejected_container_size_sum" reports the cumulative sum of all rejected blocks' sizes in bytes + // "avalanche_P_blks_rejected_container_size_sum / avalanche_P_blks_rejected_count" is the average rejected block size in bytes + latRejected: metric.NewAveragerWithErrs( + namespace, + "blks_rejected", + "time (in ns) from the issuance of a block to its rejection", + reg, + &errs, + ), + + numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "polls_successful", + Help: "number of successful polls", + }), + numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "polls_failed", + Help: "number of failed polls", + }), + } + + // Initially set the metrics for the last accepted block. + m.maxVerifiedHeight.Set(float64(lastAcceptedHeight)) + m.lastAcceptedHeight.Set(float64(lastAcceptedHeight)) + m.lastAcceptedTimestamp.Set(float64(lastAcceptedTime.Unix())) + + errs.Add( + reg.Register(m.maxVerifiedHeight), + reg.Register(m.lastAcceptedHeight), + reg.Register(m.lastAcceptedTimestamp), + reg.Register(m.numProcessing), + reg.Register(m.blockSizeAcceptedSum), + reg.Register(m.buildLatencyAccepted), + reg.Register(m.blockSizeRejectedSum), + reg.Register(m.numSuccessfulPolls), + reg.Register(m.numFailedPolls), + ) + return m, errs.Err +} + +func (m *metrics) Issued(blkID ids.ID, pollNumber uint64) { + m.processingBlocks.Put(blkID, processingStart{ + time: time.Now(), + pollNumber: pollNumber, + }) + m.numProcessing.Inc() +} + +func (m *metrics) Verified(height uint64) { + m.currentMaxVerifiedHeight = max(m.currentMaxVerifiedHeight, height) + m.maxVerifiedHeight.Set(float64(m.currentMaxVerifiedHeight)) +} + +func (m *metrics) Accepted( + blkID ids.ID, + height uint64, + timestamp time.Time, + pollNumber uint64, + blockSize int, +) { + start, ok := m.processingBlocks.Get(blkID) + if !ok { + m.log.Error("unable to measure latency", + zap.Stringer("blkID", blkID), + zap.Stringer("status", choices.Accepted), + ) + return + } + m.lastAcceptedHeight.Set(float64(height)) + m.lastAcceptedTimestamp.Set(float64(timestamp.Unix())) + m.processingBlocks.Delete(blkID) + m.numProcessing.Dec() + + m.blockSizeAcceptedSum.Add(float64(blockSize)) + + m.pollsAccepted.Observe(float64(pollNumber - start.pollNumber)) + + now := time.Now() + processingDuration := now.Sub(start.time) + m.latAccepted.Observe(float64(processingDuration)) + + builtDuration := now.Sub(timestamp) + m.buildLatencyAccepted.Add(float64(builtDuration)) +} + +func (m *metrics) Rejected(blkID ids.ID, pollNumber uint64, blockSize int) { + start, ok := m.processingBlocks.Get(blkID) + if !ok { + m.log.Error("unable to measure latency", + zap.Stringer("blkID", blkID), + zap.Stringer("status", choices.Rejected), + ) + return + } + m.processingBlocks.Delete(blkID) + m.numProcessing.Dec() + + m.blockSizeRejectedSum.Add(float64(blockSize)) + + m.pollsRejected.Observe(float64(pollNumber - start.pollNumber)) + + duration := time.Since(start.time) + m.latRejected.Observe(float64(duration)) +} + +func (m *metrics) MeasureAndGetOldestDuration() time.Duration { + _, oldestOp, exists := m.processingBlocks.Oldest() + if !exists { + return 0 + } + return time.Since(oldestOp.time) +} + +func (m *metrics) SuccessfulPoll() { + m.numSuccessfulPolls.Inc() +} + +func (m *metrics) FailedPoll() { + m.numFailedPolls.Inc() +} diff --git a/avalanchego/snow/consensus/snowman/mock_block.go b/avalanchego/snow/consensus/snowman/mock_block.go index 2d7eb12d..45393bfe 100644 --- a/avalanchego/snow/consensus/snowman/mock_block.go +++ b/avalanchego/snow/consensus/snowman/mock_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/consensus/snowman (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=snowman -destination=snow/consensus/snowman/mock_block.go github.com/ava-labs/avalanchego/snow/consensus/snowman Block +// // Package snowman is a generated GoMock package. package snowman @@ -14,7 +16,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" choices "github.com/ava-labs/avalanchego/snow/choices" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockBlock is a mock of Block interface. @@ -49,7 +51,7 @@ func (m *MockBlock) Accept(arg0 context.Context) error { } // Accept indicates an expected call of Accept. -func (mr *MockBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Accept(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockBlock)(nil).Accept), arg0) } @@ -119,7 +121,7 @@ func (m *MockBlock) Reject(arg0 context.Context) error { } // Reject indicates an expected call of Reject. -func (mr *MockBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Reject(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockBlock)(nil).Reject), arg0) } @@ -161,7 +163,7 @@ func (m *MockBlock) Verify(arg0 context.Context) error { } // Verify indicates an expected call of Verify. -func (mr *MockBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Verify(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockBlock)(nil).Verify), arg0) } diff --git a/avalanchego/snow/consensus/snowman/network_test.go b/avalanchego/snow/consensus/snowman/network_test.go index 8eadc4b5..aead346f 100644 --- a/avalanchego/snow/consensus/snowman/network_test.go +++ b/avalanchego/snow/consensus/snowman/network_test.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( "context" - "math/rand" + "testing" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" @@ -19,49 +19,57 @@ import ( type Network struct { params snowball.Parameters colors []*TestBlock + rngSource sampler.Source nodes, running []Consensus } -func (n *Network) shuffleColors() { - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.colors))) - indices, _ := s.Sample(len(n.colors)) - colors := []*TestBlock(nil) - for _, index := range indices { - colors = append(colors, n.colors[int(index)]) +func NewNetwork(params snowball.Parameters, numColors int, rngSource sampler.Source) *Network { + n := &Network{ + params: params, + colors: []*TestBlock{{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(rngSource.Uint64()), + StatusV: choices.Processing, + }, + ParentV: Genesis.IDV, + HeightV: 1, + }}, + rngSource: rngSource, } - n.colors = colors - utils.Sort(n.colors) -} - -func (n *Network) Initialize(params snowball.Parameters, numColors int) { - n.params = params - // #nosec G404 - n.colors = append(n.colors, &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(uint64(rand.Int63())), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: 1, - }) + s := sampler.NewDeterministicUniform(n.rngSource) for i := 1; i < numColors; i++ { - dependency := n.colors[rand.Intn(len(n.colors))] // #nosec G404 - // #nosec G404 + s.Initialize(uint64(len(n.colors))) + dependencyInd, _ := s.Next() + dependency := n.colors[dependencyInd] n.colors = append(n.colors, &TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(uint64(rand.Int63())), + IDV: ids.Empty.Prefix(rngSource.Uint64()), StatusV: choices.Processing, }, ParentV: dependency.IDV, HeightV: dependency.HeightV + 1, }) } + return n } -func (n *Network) AddNode(sm Consensus) error { - if err := sm.Initialize(snow.DefaultConsensusContextTest(), n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { +func (n *Network) shuffleColors() { + s := sampler.NewDeterministicUniform(n.rngSource) + s.Initialize(uint64(len(n.colors))) + indices, _ := s.Sample(len(n.colors)) + colors := []*TestBlock(nil) + for _, index := range indices { + colors = append(colors, n.colors[int(index)]) + } + n.colors = colors + utils.Sort(n.colors) +} + +func (n *Network) AddNode(t testing.TB, sm Consensus) error { + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + if err := sm.Initialize(ctx, n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { return err } @@ -101,11 +109,13 @@ func (n *Network) Round() error { return nil } - runningInd := rand.Intn(len(n.running)) // #nosec G404 + s := sampler.NewDeterministicUniform(n.rngSource) + s.Initialize(uint64(len(n.running))) + + runningInd, _ := s.Next() running := n.running[runningInd] - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.nodes))) + s.Initialize(uint64(len(n.nodes))) indices, _ := s.Sample(n.params.K) sampledColors := bag.Bag[ids.ID]{} for _, index := range indices { @@ -118,7 +128,7 @@ func (n *Network) Round() error { } // If this node has been finalized, remove it from the poller - if running.Finalized() { + if running.NumProcessing() == 0 { newSize := len(n.running) - 1 n.running[runningInd] = n.running[newSize] n.running = n.running[:newSize] diff --git a/avalanchego/snow/consensus/snowman/oracle_block.go b/avalanchego/snow/consensus/snowman/oracle_block.go index 0d8bd2be..2ca81680 100644 --- a/avalanchego/snow/consensus/snowman/oracle_block.go +++ b/avalanchego/snow/consensus/snowman/oracle_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -16,10 +16,7 @@ var ErrNotOracle = errors.New("block isn't an oracle") // This ordering does not need to be deterministically created from the chain // state. type OracleBlock interface { - Block - // Options returns the possible children of this block in the order this // validator prefers the blocks. - // Options is guaranteed to only be called on a verified block. Options(context.Context) ([2]Block, error) } diff --git a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go index 701eeed6..460805ab 100644 --- a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go +++ b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -11,19 +11,24 @@ import ( ) type earlyTermNoTraversalFactory struct { - alpha int + alphaPreference int + alphaConfidence int } // NewEarlyTermNoTraversalFactory returns a factory that returns polls with // early termination, without doing DAG traversals -func NewEarlyTermNoTraversalFactory(alpha int) Factory { - return &earlyTermNoTraversalFactory{alpha: alpha} +func NewEarlyTermNoTraversalFactory(alphaPreference int, alphaConfidence int) Factory { + return &earlyTermNoTraversalFactory{ + alphaPreference: alphaPreference, + alphaConfidence: alphaConfidence, + } } func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { return &earlyTermNoTraversalPoll{ - polled: vdrs, - alpha: f.alpha, + polled: vdrs, + alphaPreference: f.alphaPreference, + alphaConfidence: f.alphaConfidence, } } @@ -31,9 +36,10 @@ func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { // the result of the poll. However, does not terminate tightly with this bound. // It terminates as quickly as it can without performing any DAG traversals. type earlyTermNoTraversalPoll struct { - votes bag.Bag[ids.ID] - polled bag.Bag[ids.NodeID] - alpha int + votes bag.Bag[ids.ID] + polled bag.Bag[ids.NodeID] + alphaPreference int + alphaConfidence int } // Vote registers a response for this poll @@ -51,14 +57,30 @@ func (p *earlyTermNoTraversalPoll) Drop(vdr ids.NodeID) { p.polled.Remove(vdr) } -// Finished returns true when all validators have voted +// Finished returns true when one of the following conditions is met. +// +// 1. There are no outstanding votes. +// 2. It is impossible for the poll to achieve an alphaPreference majority +// after applying transitive voting. +// 3. A single element has achieved an alphaPreference majority and it is +// impossible for it to achieve an alphaConfidence majority after applying +// transitive voting. +// 4. A single element has achieved an alphaConfidence majority. func (p *earlyTermNoTraversalPoll) Finished() bool { remaining := p.polled.Len() + if remaining == 0 { + return true // Case 1 + } + received := p.votes.Len() + maxPossibleVotes := received + remaining + if maxPossibleVotes < p.alphaPreference { + return true // Case 2 + } + _, freq := p.votes.Mode() - return remaining == 0 || // All k nodes responded - freq >= p.alpha || // An alpha majority has returned - received+remaining < p.alpha // An alpha majority can never return + return freq >= p.alphaPreference && maxPossibleVotes < p.alphaConfidence || // Case 3 + freq >= p.alphaConfidence // Case 4 } // Result returns the result of this poll diff --git a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go index 63cca569..9d215c24 100644 --- a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -6,259 +6,180 @@ package poll import ( "testing" - "github.com/ava-labs/avalanchego/ids" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/bag" ) func TestEarlyTermNoTraversalResults(t *testing.T) { - alpha := 1 - - vtxID := ids.ID{1} + require := require.New(t) - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) + vdrs := bag.Of(vdr1) // k = 1 + alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha) + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) - poll.Vote(vdr1, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + poll.Vote(vdr1, blkID1) + require.True(poll.Finished()) result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 1 { - t.Fatalf("Wrong number of votes returned") - } + list := result.List() + require.Len(list, 1) + require.Equal(blkID1, list[0]) + require.Equal(1, result.Count(blkID1)) } func TestEarlyTermNoTraversalString(t *testing.T) { + vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - vtxID := ids.ID{1} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) - poll.Vote(vdr1, vtxID) + poll.Vote(vdr1, blkID1) - expected := `waiting on Bag: (Size = 1) + expected := `waiting on Bag[ids.NodeID]: (Size = 1) NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 -received Bag: (Size = 1) +received Bag[ids.ID]: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned %s but returned %s", expected, result) - } + require.Equal(t, expected, poll.String()) } func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { + require := require.New(t) + + vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - vtxID := ids.ID{1} + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + poll := factory.New(vdrs) - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 + poll.Vote(vdr1, blkID1) + require.False(poll.Finished()) - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) + poll.Vote(vdr1, blkID1) + require.False(poll.Finished()) - factory := NewEarlyTermNoTraversalFactory(alpha) + poll.Vote(vdr2, blkID1) + require.True(poll.Finished()) +} + +// Tests case 2 +func TestEarlyTermNoTraversalTerminatesEarlyWithoutAlphaPreference(t *testing.T) { + require := require.New(t) + + vdrs := bag.Of(vdr1, vdr2, vdr3) // k = 3 + alpha := 2 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) - poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } - poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + poll.Drop(vdr1) + require.False(poll.Finished()) + + poll.Drop(vdr2) + require.True(poll.Finished()) } -func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { - alpha := 3 +// Tests case 3 +func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaPreference(t *testing.T) { + require := require.New(t) - vtxID := ids.ID{1} + vdrs := bag.Of(vdr1, vdr2, vdr3, vdr4, vdr5) // k = 5 + alphaPreference := 3 + alphaConfidence := 5 - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - vdr4 := ids.NodeID{4} - vdr5 := ids.NodeID{5} // k = 5 + factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) + poll := factory.New(vdrs) - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr3, - vdr4, - vdr5, - ) + poll.Vote(vdr1, blkID1) + require.False(poll.Finished()) - factory := NewEarlyTermNoTraversalFactory(alpha) - poll := factory.New(vdrs) + poll.Vote(vdr2, blkID1) + require.False(poll.Finished()) + + poll.Vote(vdr3, blkID1) + require.False(poll.Finished()) - poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr2, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr3, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") - } + poll.Drop(vdr4) + require.True(poll.Finished()) } -func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { - alpha := 4 +// Tests case 4 +func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaConfidence(t *testing.T) { + require := require.New(t) + + vdrs := bag.Of(vdr1, vdr2, vdr3, vdr4, vdr5) // k = 5 + alphaPreference := 3 + alphaConfidence := 3 - vtxA := ids.ID{1} - vtxB := ids.ID{2} - vtxC := ids.ID{3} - vtxD := ids.ID{4} - - // If validators 1-3 vote for frontier vertices - // B, C, and D respectively, which all share the common ancestor - // A, then we cannot terminate early with alpha = k = 4 - // If the final vote is cast for any of A, B, C, or D, then - // vertex A will have transitively received alpha = 4 votes - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - vdr4 := ids.NodeID{4} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr3, - vdr4, - ) - - factory := NewEarlyTermNoTraversalFactory(alpha) + factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) poll := factory.New(vdrs) - poll.Vote(vdr1, vtxB) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving one vote") - } - poll.Vote(vdr2, vtxC) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving two votes") - } - poll.Vote(vdr3, vtxD) - if poll.Finished() { - t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") - } - poll.Vote(vdr4, vtxA) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving all outstanding votes") - } -} + poll.Vote(vdr1, blkID1) + require.False(poll.Finished()) -func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { - alpha := 2 + poll.Vote(vdr2, blkID1) + require.False(poll.Finished()) - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} // k = 3 + poll.Vote(vdr3, blkID1) + require.True(poll.Finished()) +} - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr3, - ) +// If validators 1-3 vote for blocks B, C, and D respectively, which all share +// the common ancestor A, then we cannot terminate early with alpha = k = 4. +// +// If the final vote is cast for any of A, B, C, or D, then A will have +// transitively received alpha = 4 votes +func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { + require := require.New(t) - factory := NewEarlyTermNoTraversalFactory(alpha) + vdrs := bag.Of(vdr1, vdr2, vdr3, vdr4) // k = 4 + alpha := 4 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) - poll.Drop(vdr1) - if poll.Finished() { - t.Fatalf("Poll finished early after dropping one vote") - } - poll.Drop(vdr2) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after dropping two votes") - } -} + poll.Vote(vdr1, blkID2) + require.False(poll.Finished()) -func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { - alpha := 2 + poll.Vote(vdr2, blkID3) + require.False(poll.Finished()) + + poll.Vote(vdr3, blkID4) + require.False(poll.Finished()) - vtxID := ids.ID{1} + poll.Vote(vdr4, blkID1) + require.True(poll.Finished()) +} - vdr1 := ids.NodeID{2} - vdr2 := ids.NodeID{3} +func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { + require := require.New(t) - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr2, - ) // k = 3 + vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 + alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha) + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) - poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving two votes") - } + poll.Vote(vdr2, blkID1) + require.True(poll.Finished()) result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 2 { - t.Fatalf("Wrong number of votes returned") - } + list := result.List() + require.Len(list, 1) + require.Equal(blkID1, list[0]) + require.Equal(2, result.Count(blkID1)) } func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { + vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 alpha := 2 - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - vdr2, - ) // k = 3 - - factory := NewEarlyTermNoTraversalFactory(alpha) + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) poll := factory.New(vdrs) poll.Drop(vdr2) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after dropping two votes") - } + require.True(t, poll.Finished()) } diff --git a/avalanchego/snow/consensus/snowman/poll/interfaces.go b/avalanchego/snow/consensus/snowman/poll/interfaces.go index cab31cfc..c1a776b4 100644 --- a/avalanchego/snow/consensus/snowman/poll/interfaces.go +++ b/avalanchego/snow/consensus/snowman/poll/interfaces.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll diff --git a/avalanchego/snow/consensus/snowman/poll/no_early_term.go b/avalanchego/snow/consensus/snowman/poll/no_early_term.go deleted file mode 100644 index ed5744d4..00000000 --- a/avalanchego/snow/consensus/snowman/poll/no_early_term.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" -) - -type noEarlyTermFactory struct{} - -// NewNoEarlyTermFactory returns a factory that returns polls with no early -// termination -func NewNoEarlyTermFactory() Factory { - return noEarlyTermFactory{} -} - -func (noEarlyTermFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { - return &noEarlyTermPoll{polled: vdrs} -} - -// noEarlyTermPoll finishes when all polled validators either respond to the -// query or a timeout occurs -type noEarlyTermPoll struct { - votes bag.Bag[ids.ID] - polled bag.Bag[ids.NodeID] -} - -// Vote registers a response for this poll -func (p *noEarlyTermPoll) Vote(vdr ids.NodeID, vote ids.ID) { - count := p.polled.Count(vdr) - // make sure that a validator can't respond multiple times - p.polled.Remove(vdr) - - // track the votes the validator responded with - p.votes.AddCount(vote, count) -} - -// Drop any future response for this poll -func (p *noEarlyTermPoll) Drop(vdr ids.NodeID) { - p.polled.Remove(vdr) -} - -// Finished returns true when all validators have voted -func (p *noEarlyTermPoll) Finished() bool { - return p.polled.Len() == 0 -} - -// Result returns the result of this poll -func (p *noEarlyTermPoll) Result() bag.Bag[ids.ID] { - return p.votes -} - -func (p *noEarlyTermPoll) PrefixedString(prefix string) string { - return fmt.Sprintf( - "waiting on %s\n%sreceived %s", - p.polled.PrefixedString(prefix), - prefix, - p.votes.PrefixedString(prefix), - ) -} - -func (p *noEarlyTermPoll) String() string { - return p.PrefixedString("") -} diff --git a/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go b/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go deleted file mode 100644 index fdc42a57..00000000 --- a/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package poll - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bag" -) - -func TestNoEarlyTermResults(t *testing.T) { - vtxID := ids.ID{1} - - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } - - result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 1 { - t.Fatalf("Wrong number of votes returned") - } -} - -func TestNoEarlyTermString(t *testing.T) { - vtxID := ids.ID{1} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, vtxID) - - expected := `waiting on Bag: (Size = 1) - NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 -received Bag: (Size = 1) - SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned %s but returned %s", expected, result) - } -} - -func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { - vtxID := ids.ID{1} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - factory := NewNoEarlyTermFactory() - poll := factory.New(vdrs) - - poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } - poll.Drop(vdr1) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } - poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } -} diff --git a/avalanchego/snow/consensus/snowman/poll/set.go b/avalanchego/snow/consensus/snowman/poll/set.go index e3182147..9a6b9b2d 100644 --- a/avalanchego/snow/consensus/snowman/poll/set.go +++ b/avalanchego/snow/consensus/snowman/poll/set.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll import ( + "errors" "fmt" "strings" "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -19,6 +19,11 @@ import ( "github.com/ava-labs/avalanchego/utils/metric" ) +var ( + errFailedPollsMetric = errors.New("failed to register polls metric") + errFailedPollDurationMetrics = errors.New("failed to register poll_duration metrics") +) + type pollHolder interface { GetPoll() Poll StartTime() time.Time @@ -52,16 +57,14 @@ func NewSet( log logging.Logger, namespace string, reg prometheus.Registerer, -) Set { +) (Set, error) { numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "polls", Help: "Number of pending network polls", }) if err := reg.Register(numPolls); err != nil { - log.Error("failed to register polls statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollsMetric, err) } durPolls, err := metric.NewAverager( @@ -71,9 +74,7 @@ func NewSet( reg, ) if err != nil { - log.Error("failed to register poll_duration statistics", - zap.Error(err), - ) + return nil, fmt.Errorf("%w: %w", errFailedPollDurationMetrics, err) } return &set{ @@ -82,7 +83,7 @@ func NewSet( durPolls: durPolls, factory: factory, polls: linkedhashmap.New[uint32, pollHolder](), - } + }, nil } // Add to the current set of polls diff --git a/avalanchego/snow/consensus/snowman/poll/set_test.go b/avalanchego/snow/consensus/snowman/poll/set_test.go index 277d6d3d..07172420 100644 --- a/avalanchego/snow/consensus/snowman/poll/set_test.go +++ b/avalanchego/snow/consensus/snowman/poll/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -6,324 +6,261 @@ package poll import ( "testing" - "github.com/stretchr/testify/require" - "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) -func TestNewSetErrorOnMetrics(t *testing.T) { - factory := NewNoEarlyTermFactory() +var ( + blkID1 = ids.ID{1} + blkID2 = ids.ID{2} + blkID3 = ids.ID{3} + blkID4 = ids.ID{4} + + vdr1 = ids.BuildTestNodeID([]byte{0x01}) + vdr2 = ids.BuildTestNodeID([]byte{0x02}) + vdr3 = ids.BuildTestNodeID([]byte{0x03}) + vdr4 = ids.BuildTestNodeID([]byte{0x04}) + vdr5 = ids.BuildTestNodeID([]byte{0x05}) // k = 5 +) + +func TestNewSetErrorOnPollsMetrics(t *testing.T) { + require := require.New(t) + + factory := NewEarlyTermNoTraversalFactory(1, 1) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - errs := wrappers.Errs{} - errs.Add( - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "polls", - })), - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "poll_duration", - })), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } - - s := NewSet(factory, log, namespace, registerer) - if s == nil { - t.Fatalf("shouldn't have failed due to a metrics initialization err") - } + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "polls", + }))) + + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollsMetric) } -func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { - factory := NewNoEarlyTermFactory() +func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { + require := require.New(t) + + factory := NewEarlyTermNoTraversalFactory(1, 1) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - // create validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "poll_duration_count", + }))) - vdrs := []ids.NodeID{vdr1, vdr2, vdr3} + _, err := NewSet(factory, log, namespace, registerer) + require.ErrorIs(err, errFailedPollDurationMetrics) +} + +func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { + require := require.New(t) - // create two polls for the two vtxs - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added := s.Add(1, vdrBag) - require.True(t, added) + vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 + alpha := 3 - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(2, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 2) + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) - // vote vtx1 for poll 1 - // vote vtx2 for poll 2 - vtx1 := ids.ID{1} - vtx2 := ids.ID{2} + // create two polls for the two blocks + vdrBag := bag.Of(vdrs...) + require.True(s.Add(1, vdrBag)) - var results []bag.Bag[ids.ID] + vdrBag = bag.Of(vdrs...) + require.True(s.Add(2, vdrBag)) + require.Equal(2, s.Len()) // vote out of order - results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) - results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) - results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) - - results = s.Vote(2, vdr1, vtx2) // poll 2 finished - require.Len(t, results, 0) // expect 2 to not have finished because 1 is still pending - - results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) - - results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 should be finished as well - require.Len(t, results, 2) - require.Equal(t, vtx1, results[0].List()[0]) - require.Equal(t, vtx2, results[1].List()[0]) + require.Empty(s.Vote(1, vdr1, blkID1)) + require.Empty(s.Vote(2, vdr2, blkID2)) + require.Empty(s.Vote(2, vdr3, blkID2)) + + // poll 2 finished + require.Empty(s.Vote(2, vdr1, blkID2)) // expect 2 to not have finished because 1 is still pending + + require.Empty(s.Vote(1, vdr2, blkID1)) + + results := s.Vote(1, vdr3, blkID1) // poll 1 finished, poll 2 should be finished as well + require.Len(results, 2) + require.Equal(blkID1, results[0].List()[0]) + require.Equal(blkID2, results[1].List()[0]) } func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { - factory := NewNoEarlyTermFactory() + require := require.New(t) + + vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 + alpha := 3 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - // create validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) - vdrs := []ids.NodeID{vdr1, vdr2, vdr3} + // create two polls for the two blocks + vdrBag := bag.Of(vdrs...) + require.True(s.Add(1, vdrBag)) - // create two polls for the two vtxs - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added := s.Add(1, vdrBag) - require.True(t, added) + vdrBag = bag.Of(vdrs...) + require.True(s.Add(2, vdrBag)) + require.Equal(2, s.Len()) - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(2, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 2) + // vote out of order + require.Empty(s.Vote(1, vdr1, blkID1)) + require.Empty(s.Vote(2, vdr2, blkID2)) + require.Empty(s.Vote(2, vdr3, blkID2)) - // vote vtx1 for poll 1 - // vote vtx2 for poll 2 - vtx1 := ids.ID{1} - vtx2 := ids.ID{2} + require.Empty(s.Vote(1, vdr2, blkID1)) - var results []bag.Bag[ids.ID] + results := s.Vote(1, vdr3, blkID1) // poll 1 finished, poll 2 still remaining + require.Len(results, 1) // because 1 is the oldest + require.Equal(blkID1, results[0].List()[0]) - // vote out of order - results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) - results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) - results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) - - results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) - - results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 still remaining - require.Len(t, results, 1) // because 1 is the oldest - require.Equal(t, vtx1, results[0].List()[0]) - - results = s.Vote(2, vdr1, vtx2) // poll 2 finished - require.Len(t, results, 1) // because 2 is the oldest now - require.Equal(t, vtx2, results[0].List()[0]) + results = s.Vote(2, vdr1, blkID2) // poll 2 finished + require.Len(results, 1) // because 2 is the oldest now + require.Equal(blkID2, results[0].List()[0]) } func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { - factory := NewNoEarlyTermFactory() + require := require.New(t) + + vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 + alpha := 3 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - // create validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := []ids.NodeID{vdr1, vdr2, vdr3} + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) - // create three polls for the two vtxs - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added := s.Add(1, vdrBag) - require.True(t, added) + // create three polls for the two blocks + vdrBag := bag.Of(vdrs...) + require.True(s.Add(1, vdrBag)) - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(2, vdrBag) - require.True(t, added) + vdrBag = bag.Of(vdrs...) + require.True(s.Add(2, vdrBag)) - vdrBag = bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrs...) - added = s.Add(3, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 3) - - // vote vtx1 for poll 1 - // vote vtx2 for poll 2 - // vote vtx3 for poll 3 - vtx1 := ids.ID{1} - vtx2 := ids.ID{2} - vtx3 := ids.ID{3} - - var results []bag.Bag[ids.ID] + vdrBag = bag.Of(vdrs...) + require.True(s.Add(3, vdrBag)) + require.Equal(3, s.Len()) // vote out of order // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 - results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) - results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) - results = s.Vote(2, vdr1, vtx2) - require.Len(t, results, 0) + require.Empty(s.Vote(2, vdr3, blkID2)) + require.Empty(s.Vote(2, vdr2, blkID2)) + require.Empty(s.Vote(2, vdr1, blkID2)) // 3 finishes now, 2 has already finished but 1 is not finished so we expect to receive no results still - results = s.Vote(3, vdr2, vtx3) - require.Len(t, results, 0) - results = s.Vote(3, vdr3, vtx3) - require.Len(t, results, 0) - results = s.Vote(3, vdr1, vtx3) - require.Len(t, results, 0) + require.Empty(s.Vote(3, vdr2, blkID3)) + require.Empty(s.Vote(3, vdr3, blkID3)) + require.Empty(s.Vote(3, vdr1, blkID3)) // 1 finishes now, 2 and 3 have already finished so we expect 3 items in results - results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) - results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) - results = s.Vote(1, vdr3, vtx1) - require.Len(t, results, 3) - require.Equal(t, vtx1, results[0].List()[0]) - require.Equal(t, vtx2, results[1].List()[0]) - require.Equal(t, vtx3, results[2].List()[0]) + require.Empty(s.Vote(1, vdr1, blkID1)) + require.Empty(s.Vote(1, vdr2, blkID1)) + results := s.Vote(1, vdr3, blkID1) + require.Len(results, 3) + require.Equal(blkID1, results[0].List()[0]) + require.Equal(blkID2, results[1].List()[0]) + require.Equal(blkID3, results[2].List()[0]) } func TestCreateAndFinishSuccessfulPoll(t *testing.T) { - factory := NewNoEarlyTermFactory() + require := require.New(t) + + vdrs := bag.Of(vdr1, vdr2) // k = 2 + alpha := 2 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - vtxID := ids.ID{1} - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - if s.Len() != 0 { - t.Fatalf("Shouldn't have any active polls yet") - } else if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if s.Add(0, vdrs) { - t.Fatalf("Shouldn't have been able to add a duplicated poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if results := s.Vote(1, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Should have dropped a duplicated poll") - } else if results := s.Vote(0, vdr2, vtxID); len(results) == 0 { - t.Fatalf("Should have finished the") - } else if len(results) != 1 { - t.Fatalf("Wrong number of results returned") - } else if list := results[0].List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if results[0].Count(vtxID) != 2 { - t.Fatalf("Wrong number of votes returned") - } + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) + + require.Zero(s.Len()) + + require.True(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + + require.False(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + + require.Empty(s.Vote(1, vdr1, blkID1)) + require.Empty(s.Vote(0, vdr1, blkID1)) + require.Empty(s.Vote(0, vdr1, blkID1)) + + results := s.Vote(0, vdr2, blkID1) + require.Len(results, 1) + list := results[0].List() + require.Len(list, 1) + require.Equal(blkID1, list[0]) + require.Equal(2, results[0].Count(blkID1)) } func TestCreateAndFinishFailedPoll(t *testing.T) { - factory := NewNoEarlyTermFactory() + require := require.New(t) + + vdrs := bag.Of(vdr1, vdr2) // k = 2 + alpha := 1 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} // k = 2 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - if s.Len() != 0 { - t.Fatalf("Shouldn't have any active polls yet") - } else if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if s.Add(0, vdrs) { - t.Fatalf("Shouldn't have been able to add a duplicated poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if results := s.Drop(1, vdr1); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results := s.Drop(0, vdr1); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results := s.Drop(0, vdr1); len(results) > 0 { - t.Fatalf("Should have dropped a duplicated poll") - } else if results := s.Drop(0, vdr2); len(results) == 0 { - t.Fatalf("Should have finished the") - } else if list := results[0].List(); len(list) != 0 { - t.Fatalf("Wrong number of vertices returned") - } + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) + + require.Zero(s.Len()) + + require.True(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + + require.False(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + + require.Empty(s.Drop(1, vdr1)) + require.Empty(s.Drop(0, vdr1)) + require.Empty(s.Drop(0, vdr1)) + + results := s.Drop(0, vdr2) + require.Len(results, 1) + require.Empty(results[0].List()) } func TestSetString(t *testing.T) { - factory := NewNoEarlyTermFactory() + require := require.New(t) + + vdrs := bag.Of(vdr1) // k = 1 + alpha := 1 + + factory := NewEarlyTermNoTraversalFactory(alpha, alpha) log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - s := NewSet(factory, log, namespace, registerer) - - vdr1 := ids.NodeID{1} // k = 1 - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add(vdr1) + s, err := NewSet(factory, log, namespace, registerer) + require.NoError(err) expected := `current polls: (Size = 1) RequestID 0: - waiting on Bag: (Size = 1) + waiting on Bag[ids.NodeID]: (Size = 1) NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 - received Bag: (Size = 0)` - if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if str := s.String(); expected != str { - t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s", - expected, - str) - } + received Bag[ids.ID]: (Size = 0)` + require.True(s.Add(0, vdrs)) + require.Equal(expected, s.String()) } diff --git a/avalanchego/snow/consensus/snowman/snowman_block.go b/avalanchego/snow/consensus/snowman/snowman_block.go index ddb3ae30..7e8d339d 100644 --- a/avalanchego/snow/consensus/snowman/snowman_block.go +++ b/avalanchego/snow/consensus/snowman/snowman_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -38,8 +38,7 @@ func (n *snowmanBlock) AddChild(child Block) { // if the snowball instance is nil, this is the first child. So the instance // should be initialized. if n.sb == nil { - n.sb = &snowball.Tree{} - n.sb.Initialize(n.params, childID) + n.sb = snowball.NewTree(snowball.SnowballFactory, n.params, childID) n.children = make(map[ids.ID]Block) } else { n.sb.Add(childID) diff --git a/avalanchego/snow/consensus/snowman/test_block.go b/avalanchego/snow/consensus/snowman/test_block.go index a02bf317..a32872ad 100644 --- a/avalanchego/snow/consensus/snowman/test_block.go +++ b/avalanchego/snow/consensus/snowman/test_block.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "cmp" "context" "time" @@ -48,6 +49,6 @@ func (b *TestBlock) Bytes() []byte { return b.BytesV } -func (b *TestBlock) Less(other *TestBlock) bool { - return b.HeightV < other.HeightV +func (b *TestBlock) Compare(other *TestBlock) int { + return cmp.Compare(b.HeightV, other.HeightV) } diff --git a/avalanchego/snow/consensus/snowman/topological.go b/avalanchego/snow/consensus/snowman/topological.go index 0d8fd26e..d99ac9c2 100644 --- a/avalanchego/snow/consensus/snowman/topological.go +++ b/avalanchego/snow/consensus/snowman/topological.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,24 +7,22 @@ import ( "context" "errors" "fmt" - "strings" "time" "go.uber.org/zap" - "golang.org/x/exp/maps" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/metrics" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/set" ) var ( - errDuplicateAdd = errors.New("duplicate block add") + errDuplicateAdd = errors.New("duplicate block add") + errTooManyProcessingBlocks = errors.New("too many processing blocks") + errBlockProcessingTooLong = errors.New("block processing too long") _ Factory = (*TopologicalFactory)(nil) _ Consensus = (*Topological)(nil) @@ -41,10 +39,7 @@ func (TopologicalFactory) New() Consensus { // strongly preferred branch. This tree structure amortizes network polls to // vote on more than just the next block. type Topological struct { - metrics.Latency - metrics.Polls - metrics.Height - metrics.Timestamp + metrics *metrics // pollNumber is the number of times RecordPolls has been called pollNumber uint64 @@ -56,11 +51,8 @@ type Topological struct { // instances params snowball.Parameters - // head is the last accepted block - head ids.ID - - // height is the height of the last accepted block - height uint64 + lastAcceptedID ids.ID + lastAcceptedHeight uint64 // blocks stores the last accepted block and all the pending blocks blocks map[ids.ID]*snowmanBlock // blockID -> snowmanBlock @@ -68,8 +60,12 @@ type Topological struct { // preferredIDs stores the set of IDs that are currently preferred. preferredIDs set.Set[ids.ID] - // tail is the preferred block with no children - tail ids.ID + // preferredHeights maps a height to the currently preferred block ID at + // that height. + preferredHeights map[uint64]ids.ID // height -> blockID + + // preference is the preferred block with highest height + preference ids.ID // Used in [calculateInDegree] and. // Should only be accessed in that method. @@ -101,50 +97,40 @@ type votes struct { votes bag.Bag[ids.ID] } -func (ts *Topological) Initialize(ctx *snow.ConsensusContext, params snowball.Parameters, rootID ids.ID, rootHeight uint64, rootTime time.Time) error { - if err := params.Verify(); err != nil { - return err - } - - latencyMetrics, err := metrics.NewLatency("blks", "block(s)", ctx.Log, "", ctx.Registerer) - if err != nil { - return err - } - ts.Latency = latencyMetrics - - pollsMetrics, err := metrics.NewPolls("", ctx.Registerer) +func (ts *Topological) Initialize( + ctx *snow.ConsensusContext, + params snowball.Parameters, + lastAcceptedID ids.ID, + lastAcceptedHeight uint64, + lastAcceptedTime time.Time, +) error { + err := params.Verify() if err != nil { return err } - ts.Polls = pollsMetrics - heightMetrics, err := metrics.NewHeight("", ctx.Registerer) - if err != nil { - return err - } - ts.Height = heightMetrics - - timestampMetrics, err := metrics.NewTimestamp("", ctx.Registerer) + ts.metrics, err = newMetrics( + ctx.Log, + "", + ctx.Registerer, + lastAcceptedHeight, + lastAcceptedTime, + ) if err != nil { return err } - ts.Timestamp = timestampMetrics ts.leaves = set.Set[ids.ID]{} ts.kahnNodes = make(map[ids.ID]kahnNode) ts.ctx = ctx ts.params = params - ts.head = rootID - ts.height = rootHeight + ts.lastAcceptedID = lastAcceptedID + ts.lastAcceptedHeight = lastAcceptedHeight ts.blocks = map[ids.ID]*snowmanBlock{ - rootID: {params: ts.params}, + lastAcceptedID: {params: ts.params}, } - ts.tail = rootID - - // Initially set the metrics for the last accepted block. - ts.Height.Accepted(ts.height) - ts.Timestamp.Accepted(rootTime) - + ts.preferredHeights = make(map[uint64]ids.ID) + ts.preference = lastAcceptedID return nil } @@ -154,6 +140,11 @@ func (ts *Topological) NumProcessing() int { func (ts *Topological) Add(ctx context.Context, blk Block) error { blkID := blk.ID() + height := blk.Height() + ts.ctx.Log.Verbo("adding block", + zap.Stringer("blkID", blkID), + zap.Uint64("height", height), + ) // Make sure a block is not inserted twice. This enforces the invariant that // blocks are always added in topological order. Essentially, a block that @@ -164,18 +155,25 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { return errDuplicateAdd } - ts.Latency.Issued(blkID, ts.pollNumber) + ts.metrics.Verified(height) + ts.metrics.Issued(blkID, ts.pollNumber) parentID := blk.Parent() parentNode, ok := ts.blocks[parentID] if !ok { + ts.ctx.Log.Verbo("block ancestor is missing, being rejected", + zap.Stringer("blkID", blkID), + zap.Uint64("height", height), + zap.Stringer("parentID", parentID), + ) + // If the ancestor is missing, this means the ancestor must have already // been pruned. Therefore, the dependent should be transitively // rejected. if err := blk.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) + ts.metrics.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) return nil } @@ -186,11 +184,18 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { blk: blk, } - // If we are extending the tail, this is the new tail - if ts.tail == parentID { - ts.tail = blkID + // If we are extending the preference, this is the new preference + if ts.preference == parentID { + ts.preference = blkID ts.preferredIDs.Add(blkID) + ts.preferredHeights[height] = blkID } + + ts.ctx.Log.Verbo("added block", + zap.Stringer("blkID", blkID), + zap.Uint64("height", height), + zap.Stringer("parentID", parentID), + ) return nil } @@ -201,17 +206,17 @@ func (ts *Topological) Decided(blk Block) bool { } // If the block is marked as fetched, we can check if it has been // transitively rejected. - return blk.Status() == choices.Processing && blk.Height() <= ts.height + return blk.Status() == choices.Processing && blk.Height() <= ts.lastAcceptedHeight } func (ts *Topological) Processing(blkID ids.ID) bool { // The last accepted block is in the blocks map, so we first must ensure the // requested block isn't the last accepted block. - if blkID == ts.head { + if blkID == ts.lastAcceptedID { return false } - // If the block is in the map of current blocks and not the head, then the - // block is currently processing. + // If the block is in the map of current blocks and not the last accepted + // block, then it is currently processing. _, ok := ts.blocks[blkID] return ok } @@ -224,12 +229,20 @@ func (ts *Topological) IsPreferred(blk Block) bool { return ts.preferredIDs.Contains(blk.ID()) } -func (ts *Topological) LastAccepted() ids.ID { - return ts.head +func (ts *Topological) LastAccepted() (ids.ID, uint64) { + return ts.lastAcceptedID, ts.lastAcceptedHeight } func (ts *Topological) Preference() ids.ID { - return ts.tail + return ts.preference +} + +func (ts *Topological) PreferenceAtHeight(height uint64) (ids.ID, bool) { + if height == ts.lastAcceptedHeight { + return ts.lastAcceptedID, true + } + blkID, ok := ts.preferredHeights[height] + return blkID, ok } // The votes bag contains at most K votes for blocks in the tree. If there is a @@ -240,8 +253,8 @@ func (ts *Topological) Preference() ids.ID { // Every other block will have an unsuccessful poll registered. // // After collecting which blocks should be voted on, the polls are registered -// and blocks are accepted/rejected as needed. The tail is then updated to equal -// the leaf on the preferred branch. +// and blocks are accepted/rejected as needed. The preference is then updated to +// equal the leaf on the preferred branch. // // To optimize the theoretical complexity of the vote propagation, a topological // sort is done over the blocks that are reachable from the provided votes. @@ -251,14 +264,14 @@ func (ts *Topological) Preference() ids.ID { // unsuccessful poll on that block and every descendant block. // // The complexity of this function is: -// - Runtime = 3 * |live set| + |votes| +// - Runtime = 4 * |live set| + |votes| // - Space = 2 * |live set| + |votes| func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) error { // Register a new poll call ts.pollNumber++ var voteStack []votes - if voteBag.Len() >= ts.params.Alpha { + if voteBag.Len() >= ts.params.AlphaPreference { // Since we received at least alpha votes, it's possible that // we reached an alpha majority on a processing block. // We must perform the traversals to calculate all block @@ -279,66 +292,74 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) } // If the set of preferred IDs already contains the preference, then the - // tail is guaranteed to already be set correctly. This is because the value - // returned from vote reports the next preferred block after the last + // preference is guaranteed to already be set correctly. This is because the + // value returned from vote reports the next preferred block after the last // preferred block that was voted for. If this block was previously // preferred, then we know that following the preferences down the chain - // will return the current tail. + // will return the current preference. if ts.preferredIDs.Contains(preferred) { return nil } - // Runtime = |live set| ; Space = Constant + // Runtime = 2 * |live set| ; Space = Constant ts.preferredIDs.Clear() + clear(ts.preferredHeights) - ts.tail = preferred - startBlock := ts.blocks[ts.tail] + ts.preference = preferred + startBlock := ts.blocks[ts.preference] // Runtime = |live set| ; Space = Constant // Traverse from the preferred ID to the last accepted ancestor. for block := startBlock; !block.Accepted(); { - ts.preferredIDs.Add(block.blk.ID()) + blkID := block.blk.ID() + ts.preferredIDs.Add(blkID) + ts.preferredHeights[block.blk.Height()] = blkID block = ts.blocks[block.blk.Parent()] } // Traverse from the preferred ID to the preferred child until there are no // children. - for block := startBlock; block.sb != nil; block = ts.blocks[ts.tail] { - ts.tail = block.sb.Preference() - ts.preferredIDs.Add(ts.tail) + for block := startBlock; block.sb != nil; { + ts.preference = block.sb.Preference() + ts.preferredIDs.Add(ts.preference) + block = ts.blocks[ts.preference] + // Invariant: Because the prior block had an initialized snowball + // instance, it must have a processing child. This guarantees that + // block.blk is non-nil here. + ts.preferredHeights[block.blk.Height()] = ts.preference } return nil } -func (ts *Topological) Finalized() bool { - return len(ts.blocks) == 1 -} - // HealthCheck returns information about the consensus health. func (ts *Topological) HealthCheck(context.Context) (interface{}, error) { - numOutstandingBlks := ts.Latency.NumProcessing() - isOutstandingBlks := numOutstandingBlks <= ts.params.MaxOutstandingItems - healthy := isOutstandingBlks - details := map[string]interface{}{ - "outstandingBlocks": numOutstandingBlks, + var errs []error + + numProcessingBlks := ts.NumProcessing() + if numProcessingBlks > ts.params.MaxOutstandingItems { + err := fmt.Errorf("%w: %d > %d", + errTooManyProcessingBlocks, + numProcessingBlks, + ts.params.MaxOutstandingItems, + ) + errs = append(errs, err) } - // check for long running blocks - timeReqRunning := ts.Latency.MeasureAndGetOldestDuration() - isProcessingTime := timeReqRunning <= ts.params.MaxItemProcessingTime - healthy = healthy && isProcessingTime - details["longestRunningBlock"] = timeReqRunning.String() - - if !healthy { - var errorReasons []string - if !isOutstandingBlks { - errorReasons = append(errorReasons, fmt.Sprintf("number of outstanding blocks %d > %d", numOutstandingBlks, ts.params.MaxOutstandingItems)) - } - if !isProcessingTime { - errorReasons = append(errorReasons, fmt.Sprintf("block processing time %s > %s", timeReqRunning, ts.params.MaxItemProcessingTime)) - } - return details, fmt.Errorf("snowman consensus is not healthy reason: %s", strings.Join(errorReasons, ", ")) + maxTimeProcessing := ts.metrics.MeasureAndGetOldestDuration() + if maxTimeProcessing > ts.params.MaxItemProcessingTime { + err := fmt.Errorf("%w: %s > %s", + errBlockProcessingTooLong, + maxTimeProcessing, + ts.params.MaxItemProcessingTime, + ) + errs = append(errs, err) } - return details, nil + + return map[string]interface{}{ + "processingBlocks": numProcessingBlks, + "longestProcessingBlock": maxTimeProcessing.String(), // .String() is needed here to ensure a human readable format + "lastAcceptedID": ts.lastAcceptedID, + "lastAcceptedHeight": ts.lastAcceptedHeight, + }, errors.Join(errs...) } // takes in a list of votes and sets up the topological ordering. Returns the @@ -346,7 +367,7 @@ func (ts *Topological) HealthCheck(context.Context) (interface{}, error) { // the non-transitively applied votes. Also returns the list of leaf blocks. func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { // Clear the Kahn node set - maps.Clear(ts.kahnNodes) + clear(ts.kahnNodes) // Clear the leaf set ts.leaves.Clear() @@ -422,7 +443,7 @@ func (ts *Topological) pushVotes() []votes { // If there are at least Alpha votes, then this block needs to record // the poll on the snowball instance - if kahnNode.votes.Len() >= ts.params.Alpha { + if kahnNode.votes.Len() >= ts.params.AlphaPreference { voteStack = append(voteStack, votes{ parentID: leafID, votes: kahnNode.votes, @@ -458,20 +479,20 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // If the voteStack is empty, then the full tree should falter. This won't // change the preferred branch. if len(voteStack) == 0 { - headBlock := ts.blocks[ts.head] - headBlock.shouldFalter = true + lastAcceptedBlock := ts.blocks[ts.lastAcceptedID] + lastAcceptedBlock.shouldFalter = true if numProcessing := len(ts.blocks) - 1; numProcessing > 0 { ts.ctx.Log.Verbo("no progress was made after processing pending blocks", zap.Int("numProcessing", numProcessing), ) - ts.Polls.Failed() + ts.metrics.FailedPoll() } - return ts.tail, nil + return ts.preference, nil } // keep track of the new preferred block - newPreferred := ts.head + newPreferred := ts.lastAcceptedID onPreferredBranch := true pollSuccessful := false for len(voteStack) > 0 { @@ -483,7 +504,7 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // get the block that we are going to vote on parentBlock, notRejected := ts.blocks[vote.parentID] - // if the block block we are going to vote on was already rejected, then + // if the block we are going to vote on was already rejected, then // we should stop applying the votes if !notRejected { break @@ -507,8 +528,9 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err // apply the votes for this snowball instance pollSuccessful = parentBlock.sb.RecordPoll(vote.votes) || pollSuccessful - // Only accept when you are finalized and the head. - if parentBlock.sb.Finalized() && ts.head == vote.parentID { + // Only accept when you are finalized and a child of the last accepted + // block. + if parentBlock.sb.Finalized() && ts.lastAcceptedID == vote.parentID { if err := ts.acceptPreferredChild(ctx, parentBlock); err != nil { return ids.ID{}, err } @@ -570,9 +592,9 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err } if pollSuccessful { - ts.Polls.Successful() + ts.metrics.SuccessfulPoll() } else { - ts.Polls.Failed() + ts.metrics.FailedPoll() } return newPreferred, nil } @@ -597,23 +619,32 @@ func (ts *Topological) acceptPreferredChild(ctx context.Context, n *snowmanBlock return err } + height := child.Height() + timestamp := child.Timestamp() ts.ctx.Log.Trace("accepting block", zap.Stringer("blkID", pref), + zap.Uint64("height", height), + zap.Time("timestamp", timestamp), ) if err := child.Accept(ctx); err != nil { return err } - // Because this is the newest accepted block, this is the new head. - ts.head = pref - ts.height = child.Height() + // Update the last accepted values to the newly accepted block. + ts.lastAcceptedID = pref + ts.lastAcceptedHeight = height // Remove the decided block from the set of processing IDs, as its status // now implies its preferredness. ts.preferredIDs.Remove(pref) - - ts.Latency.Accepted(pref, ts.pollNumber, len(bytes)) - ts.Height.Accepted(ts.height) - ts.Timestamp.Accepted(child.Timestamp()) + delete(ts.preferredHeights, height) + + ts.metrics.Accepted( + pref, + height, + timestamp, + ts.pollNumber, + len(bytes), + ) // Because ts.blocks contains the last accepted block, we don't delete the // block from the blocks map here. @@ -627,13 +658,14 @@ func (ts *Topological) acceptPreferredChild(ctx context.Context, n *snowmanBlock ts.ctx.Log.Trace("rejecting block", zap.String("reason", "conflict with accepted block"), - zap.Stringer("rejectedID", childID), - zap.Stringer("conflictedID", pref), + zap.Stringer("blkID", childID), + zap.Uint64("height", child.Height()), + zap.Stringer("conflictID", pref), ) if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) + ts.metrics.Rejected(childID, ts.pollNumber, len(child.Bytes())) // Track which blocks have been directly rejected rejects = append(rejects, childID) @@ -658,10 +690,16 @@ func (ts *Topological) rejectTransitively(ctx context.Context, rejected []ids.ID delete(ts.blocks, rejectedID) for childID, child := range rejectedNode.children { + ts.ctx.Log.Trace("rejecting block", + zap.String("reason", "rejected ancestor"), + zap.Stringer("blkID", childID), + zap.Uint64("height", child.Height()), + zap.Stringer("parentID", rejectedID), + ) if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) + ts.metrics.Rejected(childID, ts.pollNumber, len(child.Bytes())) // add the newly rejected block to the end of the stack rejected = append(rejected, childID) diff --git a/avalanchego/snow/consensus/snowman/topological_test.go b/avalanchego/snow/consensus/snowman/topological_test.go index f3e6ed6e..540b5a8f 100644 --- a/avalanchego/snow/consensus/snowman/topological_test.go +++ b/avalanchego/snow/consensus/snowman/topological_test.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman -import ( - "testing" -) +import "testing" func TestTopological(t *testing.T) { runConsensusTests(t, TopologicalFactory{}) diff --git a/avalanchego/snow/consensus/snowman/traced_consensus.go b/avalanchego/snow/consensus/snowman/traced_consensus.go index 67a8797b..10f49229 100644 --- a/avalanchego/snow/consensus/snowman/traced_consensus.go +++ b/avalanchego/snow/consensus/snowman/traced_consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,11 +8,11 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/bag" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ Consensus = (*tracedConsensus)(nil) diff --git a/avalanchego/snow/consensus/snowstorm/acceptor.go b/avalanchego/snow/consensus/snowstorm/acceptor.go deleted file mode 100644 index 798dbac7..00000000 --- a/avalanchego/snow/consensus/snowstorm/acceptor.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/events" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ events.Blockable = (*acceptor)(nil) - -type acceptor struct { - g *Directed - errs *wrappers.Errs - deps set.Set[ids.ID] - rejected bool - txID ids.ID -} - -func (a *acceptor) Dependencies() set.Set[ids.ID] { - return a.deps -} - -func (a *acceptor) Fulfill(ctx context.Context, id ids.ID) { - a.deps.Remove(id) - a.Update(ctx) -} - -func (a *acceptor) Abandon(context.Context, ids.ID) { - a.rejected = true -} - -func (a *acceptor) Update(ctx context.Context) { - // If I was rejected or I am still waiting on dependencies to finish or an - // error has occurred, I shouldn't do anything. - if a.rejected || a.deps.Len() != 0 || a.errs.Errored() { - return - } - a.errs.Add(a.g.accept(ctx, a.txID)) -} diff --git a/avalanchego/snow/consensus/snowstorm/benchmark_test.go b/avalanchego/snow/consensus/snowstorm/benchmark_test.go deleted file mode 100644 index 91feb10d..00000000 --- a/avalanchego/snow/consensus/snowstorm/benchmark_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "testing" - - "github.com/ava-labs/avalanchego/utils/sampler" - - sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -func Simulate( - numColors, colorsPerConsumer, maxInputConflicts, numNodes int, - params sbcon.Parameters, - seed int64, - fact Factory, -) error { - net := Network{} - sampler.Seed(seed) - net.Initialize( - params, - numColors, - colorsPerConsumer, - maxInputConflicts, - ) - - sampler.Seed(seed) - for i := 0; i < numNodes; i++ { - if err := net.AddNode(fact.New()); err != nil { - return err - } - } - - numRounds := 0 - for !net.Finalized() && !net.Disagreement() && numRounds < 50 { - sampler.Seed(int64(numRounds) + seed) - if err := net.Round(); err != nil { - return err - } - numRounds++ - } - return nil -} - -/* - ****************************************************************************** - ********************************** Virtuous ********************************** - ****************************************************************************** - */ - -func BenchmarkVirtuousDirected(b *testing.B) { - for n := 0; n < b.N; n++ { - err := Simulate( - /*numColors=*/ 25, - /*colorsPerConsumer=*/ 1, - /*maxInputConflicts=*/ 1, - /*numNodes=*/ 50, - /*params=*/ sbcon.Parameters{ - K: 20, - Alpha: 11, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - }, - /*seed=*/ 0, - /*fact=*/ DirectedFactory{}, - ) - if err != nil { - b.Fatal(err) - } - } -} - -/* - ****************************************************************************** - *********************************** Rogue ************************************ - ****************************************************************************** - */ - -func BenchmarkRogueDirected(b *testing.B) { - for n := 0; n < b.N; n++ { - err := Simulate( - /*numColors=*/ 25, - /*colorsPerConsumer=*/ 1, - /*maxInputConflicts=*/ 3, - /*numNodes=*/ 50, - /*params=*/ sbcon.Parameters{ - K: 20, - Alpha: 11, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - }, - /*seed=*/ 0, - /*fact=*/ DirectedFactory{}, - ) - if err != nil { - b.Fatal(err) - } - } -} - -/* - ****************************************************************************** - ******************************** Many Inputs ********************************* - ****************************************************************************** - */ - -func BenchmarkMultiDirected(b *testing.B) { - for n := 0; n < b.N; n++ { - err := Simulate( - /*numColors=*/ 50, - /*colorsPerConsumer=*/ 10, - /*maxInputConflicts=*/ 1, - /*numNodes=*/ 50, - /*params=*/ sbcon.Parameters{ - K: 20, - Alpha: 11, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - }, - /*seed=*/ 0, - /*fact=*/ DirectedFactory{}, - ) - if err != nil { - b.Fatal(err) - } - } -} - -/* - ****************************************************************************** - ***************************** Many Rogue Inputs ****************************** - ****************************************************************************** - */ - -func BenchmarkMultiRogueDirected(b *testing.B) { - for n := 0; n < b.N; n++ { - err := Simulate( - /*numColors=*/ 50, - /*colorsPerConsumer=*/ 10, - /*maxInputConflicts=*/ 3, - /*numNodes=*/ 50, - /*params=*/ sbcon.Parameters{ - K: 20, - Alpha: 11, - BetaVirtuous: 20, - BetaRogue: 30, - ConcurrentRepolls: 1, - }, - /*seed=*/ 0, - /*fact=*/ DirectedFactory{}, - ) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/avalanchego/snow/consensus/snowstorm/consensus.go b/avalanchego/snow/consensus/snowstorm/consensus.go deleted file mode 100644 index 62a2977e..00000000 --- a/avalanchego/snow/consensus/snowstorm/consensus.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - "fmt" - - "github.com/ava-labs/avalanchego/api/health" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" - - sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -// Consensus is a snowball instance deciding between an unbounded number of -// non-transitive conflicts. After performing a network sample of k nodes, you -// should call collect with the responses. -type Consensus interface { - fmt.Stringer - health.Checker - - // Takes in the context, alpha, betaVirtuous, and betaRogue - Initialize(*snow.ConsensusContext, sbcon.Parameters) error - - // Returns true if transaction is virtuous. - // That is, no transaction has been added that conflicts with - IsVirtuous(Tx) bool - - // Adds a new transaction to vote on. Returns if a critical error has - // occurred. - Add(context.Context, Tx) error - - // Remove a transaction from the set of currently processing txs. It is - // assumed that the provided transaction ID is currently processing. - Remove(context.Context, ids.ID) error - - // Returns true iff transaction has been added - Issued(Tx) bool - - // Returns the set of virtuous transactions - // that have not yet been accepted or rejected - Virtuous() set.Set[ids.ID] - - // Returns the currently preferred transactions to be finalized - Preferences() set.Set[ids.ID] - - // Return the current virtuous transactions that are being voted on. - VirtuousVoting() set.Set[ids.ID] - - // Returns the set of transactions conflicting with - Conflicts(Tx) set.Set[ids.ID] - - // Collects the results of a network poll. Assumes all transactions - // have been previously added. Returns true if any statuses or preferences - // changed. Returns if a critical error has occurred. - RecordPoll(context.Context, bag.Bag[ids.ID]) (bool, error) - - // Returns true iff all remaining transactions are rogue. Note, it is - // possible that after returning quiesce, a new decision may be added such - // that this instance should no longer quiesce. - Quiesce() bool - - // Returns true iff all added transactions have been finalized. Note, it is - // possible that after returning finalized, a new decision may be added such - // that this instance is no longer finalized. - Finalized() bool -} diff --git a/avalanchego/snow/consensus/snowstorm/consensus_test.go b/avalanchego/snow/consensus/snowstorm/consensus_test.go deleted file mode 100644 index 6aac3623..00000000 --- a/avalanchego/snow/consensus/snowstorm/consensus_test.go +++ /dev/null @@ -1,2007 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - "errors" - "path" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" - - sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -type testFunc func(*testing.T, Factory) - -var ( - testFuncs = []testFunc{ - MetricsTest, - IssuedTest, - LeftoverInputTest, - LowerConfidenceTest, - MiddleConfidenceTest, - IndependentTest, - VirtuousTest, - IsVirtuousTest, - QuiesceTest, - AddNonEmptyWhitelistTest, - AddWhitelistedVirtuousTest, - WhitelistConflictsTest, - AcceptingDependencyTest, - AcceptingSlowDependencyTest, - RejectingDependencyTest, - RejectMultipleTimesTest, - VacuouslyAcceptedTest, - ConflictsTest, - VirtuousDependsOnRogueTest, - ErrorOnVacuouslyAcceptedTest, - ErrorOnAcceptedTest, - ErrorOnRejectingLowerConfidenceConflictTest, - ErrorOnRejectingHigherConfidenceConflictTest, - UTXOCleanupTest, - RemoveVirtuousTest, - } - - Red, Green, Blue, Alpha *TestTx - - errTest = errors.New("non-nil error") -) - -// R - G - B - A -func Setup() { - Red = &TestTx{} - Green = &TestTx{} - Blue = &TestTx{} - Alpha = &TestTx{} - - for i, color := range []*TestTx{Red, Green, Blue, Alpha} { - color.IDV = ids.Empty.Prefix(uint64(i)) - color.AcceptV = nil - color.RejectV = nil - color.StatusV = choices.Processing - - color.DependenciesV = nil - color.InputIDsV = []ids.ID{} - color.VerifyV = nil - color.BytesV = []byte{byte(i)} - } - - x := ids.Empty.Prefix(4) - y := ids.Empty.Prefix(5) - z := ids.Empty.Prefix(6) - - Red.InputIDsV = append(Red.InputIDsV, x) - Green.InputIDsV = append(Green.InputIDsV, x) - Green.InputIDsV = append(Green.InputIDsV, y) - - Blue.InputIDsV = append(Blue.InputIDsV, y) - Blue.InputIDsV = append(Blue.InputIDsV, z) - - Alpha.InputIDsV = append(Alpha.InputIDsV, z) - - errs := wrappers.Errs{} - errs.Add( - Red.Verify(context.Background()), - Green.Verify(context.Background()), - Blue.Verify(context.Background()), - Alpha.Verify(context.Background()), - ) - if errs.Errored() { - panic(errs.Err) - } -} - -// Execute all tests against a consensus implementation -func runConsensusTests(t *testing.T, factory Factory, prefix string) { - for _, test := range testFuncs { - Setup() - t.Run(getTestName(test), func(tt *testing.T) { - test(tt, factory) - }) - } - Setup() - StringTest(t, factory, prefix) -} - -func getTestName(i interface{}) string { - return strings.Split(path.Base(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()), ".")[1] -} - -func MetricsTest(t *testing.T, factory Factory) { - { - ctx := snow.DefaultConsensusContextTest() - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "tx_processing", - })) - if err != nil { - t.Fatal(err) - } - graph := factory.New() - if err := graph.Initialize(ctx, params); err == nil { - t.Fatalf("should have errored due to a duplicated metric") - } - } - { - ctx := snow.DefaultConsensusContextTest() - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "tx_accepted", - })) - if err != nil { - t.Fatal(err) - } - graph := factory.New() - if err := graph.Initialize(ctx, params); err == nil { - t.Fatalf("should have errored due to a duplicated metric") - } - } - { - ctx := snow.DefaultConsensusContextTest() - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - } - err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "tx_rejected", - })) - if err != nil { - t.Fatal(err) - } - graph := factory.New() - if err := graph.Initialize(ctx, params); err == nil { - t.Fatalf("should have errored due to a duplicated metric") - } - } -} - -func IssuedTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if issued := graph.Issued(Red); issued { - t.Fatalf("Haven't issued anything yet.") - } else if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } else if issued := graph.Issued(Red); !issued { - t.Fatalf("Have already issued.") - } - - _ = Blue.Accept(context.Background()) - - if issued := graph.Issued(Blue); !issued { - t.Fatalf("Have already accepted.") - } -} - -func LeftoverInputTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } else if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 1: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - r := bag.Bag[ids.ID]{} - r.SetThreshold(2) - r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(context.Background(), r); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case !graph.Finalized(): - t.Fatalf("Finalized too late") - case Red.Status() != choices.Accepted: - t.Fatalf("%s should have been accepted", Red.ID()) - case Green.Status() != choices.Rejected: - t.Fatalf("%s should have been rejected", Green.ID()) - } -} - -func LowerConfidenceTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 1: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - r := bag.Bag[ids.ID]{} - r.SetThreshold(2) - r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(context.Background(), r); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 1: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Blue.ID()): - t.Fatalf("Wrong preference. Expected %s", Blue.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } -} - -func MiddleConfidenceTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Alpha); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(Alpha.ID()): - t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - r := bag.Bag[ids.ID]{} - r.SetThreshold(2) - r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(context.Background(), r); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 1: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Alpha.ID()): - t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } -} - -func IndependentTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 2, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Alpha); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(Alpha.ID()): - t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - ra := bag.Bag[ids.ID]{} - ra.SetThreshold(2) - ra.AddCount(Red.ID(), 2) - ra.AddCount(Alpha.ID(), 2) - if updated, err := graph.RecordPoll(context.Background(), ra); err != nil { - t.Fatal(err) - } else if updated { - t.Fatalf("Shouldn't have updated the frontiers") - } else if prefs := graph.Preferences(); prefs.Len() != 2 { - t.Fatalf("Wrong number of preferences.") - } else if !prefs.Contains(Red.ID()) { - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - } else if !prefs.Contains(Alpha.ID()) { - t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) - } else if graph.Finalized() { - t.Fatalf("Finalized too early") - } else if updated, err := graph.RecordPoll(context.Background(), ra); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } else if prefs := graph.Preferences(); prefs.Len() != 0 { - t.Fatalf("Wrong number of preferences.") - } else if !graph.Finalized() { - t.Fatalf("Finalized too late") - } -} - -func VirtuousTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(Red.ID()) { - t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) - } else if err := graph.Add(context.Background(), Alpha); err != nil { - t.Fatal(err) - } else if virtuous := graph.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(Red.ID()) { - t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) - } else if !virtuous.Contains(Alpha.ID()) { - t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } else if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(Alpha.ID()) { - t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } else if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } else if virtuous := graph.Virtuous(); virtuous.Len() != 0 { - t.Fatalf("Wrong number of virtuous.") - } -} - -func IsVirtuousTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - if err := graph.Initialize(snow.DefaultConsensusContextTest(), params); err != nil { - t.Fatal(err) - } - - switch { - case !graph.IsVirtuous(Red): - t.Fatalf("Should be virtuous") - case !graph.IsVirtuous(Green): - t.Fatalf("Should be virtuous") - case !graph.IsVirtuous(Blue): - t.Fatalf("Should be virtuous") - case !graph.IsVirtuous(Alpha): - t.Fatalf("Should be virtuous") - } - - err := graph.Add(context.Background(), Red) - switch { - case err != nil: - t.Fatal(err) - case !graph.IsVirtuous(Red): - t.Fatalf("Should be virtuous") - case graph.IsVirtuous(Green): - t.Fatalf("Should not be virtuous") - case !graph.IsVirtuous(Blue): - t.Fatalf("Should be virtuous") - case !graph.IsVirtuous(Alpha): - t.Fatalf("Should be virtuous") - } - - err = graph.Add(context.Background(), Green) - switch { - case err != nil: - t.Fatal(err) - case graph.IsVirtuous(Red): - t.Fatalf("Should not be virtuous") - case graph.IsVirtuous(Green): - t.Fatalf("Should not be virtuous") - case graph.IsVirtuous(Blue): - t.Fatalf("Should not be virtuous") - } -} - -func QuiesceTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if !graph.Quiesce() { - t.Fatalf("Should quiesce") - } else if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } else if graph.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } else if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } else if !graph.Quiesce() { - t.Fatalf("Should quiesce") - } -} - -func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - ctx := snow.DefaultConsensusContextTest() - reg := prometheus.NewRegistry() - ctx.AvalancheRegisterer = reg - err := graph.Initialize(ctx, params) - if err != nil { - t.Fatal(err) - } - - /* - [tx1] - ⬈ ⬉ - [tx2] [tx3] - ⬈ ⬉ ⬈ - [tx6] [tx4] - ⬆ ⬆ - {stop stx7} {stop stx5} - Add stx5 => no conflict - Add tx6 => stx5 conflicts with tx6 - Add stx7 => stx5 conflicts with tx6 - stx5 conflicts with stx7 - stx7 conflicts with tx3 - stx7 conflicts with tx4 - */ - tx1 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - BytesV: []byte{1}, - } - tx2 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx1}, - BytesV: []byte{2}, - } - tx3 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx1}, - BytesV: []byte{3}, - } - tx4 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx2, tx3}, - BytesV: []byte{4}, - } - stx5 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(5), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx1, tx2, tx3, tx4}, - HasWhitelistV: true, - WhitelistV: set.Set[ids.ID]{ - tx1.IDV: struct{}{}, - tx2.IDV: struct{}{}, - tx3.IDV: struct{}{}, - tx4.IDV: struct{}{}, - }, - BytesV: []byte{5}, - } - tx6 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(6), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx2}, - BytesV: []byte{6}, - } - stx7 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - DependenciesV: []Tx{tx1, tx2, tx6}, - HasWhitelistV: true, - WhitelistV: set.Set[ids.ID]{ - tx1.IDV: struct{}{}, - tx2.IDV: struct{}{}, - tx6.IDV: struct{}{}, - }, - BytesV: []byte{7}, - } - - txs := []*TestTx{tx1, tx2, tx3, tx4, stx5, tx6, stx7} - for _, tx := range txs { - if err := graph.Add(context.Background(), tx); err != nil { - t.Fatal(err) - } - } - - // check if stop vertex has been issued but not accepted - mss := gatherCounterGauge(t, reg) - require.Equal(t, 5., mss["rogue_tx_processing"]) - require.Equal(t, 2., mss["virtuous_tx_processing"]) - require.Equal(t, 0., mss["whitelist_tx_accepted_count"]) - require.Equal(t, 2., mss["whitelist_tx_processing"]) - - vset1 := graph.Virtuous() - if !vset1.Equals(set.Set[ids.ID]{ - tx1.IDV: struct{}{}, - tx2.IDV: struct{}{}, - }) { - t.Fatalf("unexpected virtuous %v", vset1) - } - pset1 := graph.Preferences() - if !pset1.Equals(set.Set[ids.ID]{ - tx1.IDV: struct{}{}, - tx2.IDV: struct{}{}, - tx3.IDV: struct{}{}, - tx4.IDV: struct{}{}, - stx5.IDV: struct{}{}, - }) { - t.Fatalf("unexpected preferences %v", pset1) - } - if graph.Finalized() { - t.Fatal("unexpected Finalized") - } - - r := bag.Bag[ids.ID]{} - r.SetThreshold(2) - r.AddCount(tx1.ID(), 2) - - updated, err := graph.RecordPoll(context.Background(), r) - if err != nil { - t.Fatal(err) - } - if !updated { - t.Fatal("should have updated the frontiers") - } - - vset2 := graph.Virtuous() - if !vset2.Equals(set.Set[ids.ID]{ - tx2.IDV: struct{}{}, - }) { - t.Fatalf("unexpected virtuous %v", vset2) - } - pset2 := graph.Preferences() - if !pset2.Equals(set.Set[ids.ID]{ - tx2.IDV: struct{}{}, - tx3.IDV: struct{}{}, - tx4.IDV: struct{}{}, - stx5.IDV: struct{}{}, - }) { - t.Fatalf("unexpected preferences %v", pset2) - } - - mss = gatherCounterGauge(t, reg) - require.Equal(t, 5., mss["rogue_tx_processing"]) - require.Equal(t, 1., mss["virtuous_tx_processing"]) - require.Equal(t, 0., mss["whitelist_tx_accepted_count"]) - require.Equal(t, 2., mss["whitelist_tx_processing"]) -} - -func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { - ms, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - mss := make(map[string]float64) - for _, mf := range ms { - name := mf.GetName() - for _, m := range mf.GetMetric() { - cnt := m.GetCounter() - if cnt != nil { - mss[name] = cnt.GetValue() - break - } - gg := m.GetGauge() - if gg != nil { - mss[name] = gg.GetValue() - break - } - } - } - return mss -} - -func AddWhitelistedVirtuousTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - tx0 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - BytesV: utils.RandomBytes(32), - } - tx1 := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.GenerateTestID()}, - BytesV: utils.RandomBytes(32), - HasWhitelistV: true, - } - - txs := []*TestTx{tx0, tx1} - for _, tx := range txs { - if err := graph.Add(context.Background(), tx); err != nil { - t.Fatal(err) - } - } - - vset := graph.Virtuous() - if vset.Len() != 0 { - t.Fatalf("unexpected virtuous %v", vset) - } -} - -// When a transaction supporting whitelisting is added to the conflict graph, -// all txs outside of its whitelist should be marked in conflict. -func WhitelistConflictsTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - n := 10 - txIDs := make([]ids.ID, n) - for i := range txIDs { - txIDs[i] = ids.GenerateTestID() - } - allTxIDs := set.NewSet[ids.ID](n) - allTxIDs.Add(txIDs...) - - // each spending each other - allTxs := make([]Tx, n) - for i, txID := range txIDs { - tx := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID, - AcceptV: nil, - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{txID}, - HasWhitelistV: false, - WhitelistV: nil, - } - allTxs[i] = tx - if err := graph.Add(context.Background(), tx); err != nil { - t.Fatal(err) - } - } - - whitelist := set.NewSet[ids.ID](1) - whitelist.Add(ids.GenerateTestID()) - - // make whitelist transaction that conflicts with tx outside of its - // whitelist - wlTxID := ids.GenerateTestID() - wlTx := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: wlTxID, - AcceptV: nil, - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{wlTxID}, - HasWhitelistV: true, - WhitelistV: whitelist, - WhitelistErrV: nil, - } - if err := graph.Add(context.Background(), wlTx); err != nil { - t.Fatal(err) - } - - for _, tx := range allTxs { - conflicts := graph.Conflicts(tx) - if conflicts.Len() != 1 { - t.Fatal("wrong number of conflicts") - } - if !conflicts.Contains(wlTxID) { - t.Fatal("unexpected conflict") - } - } - - // the transitive vertex should be conflicting with everything - conflicts := graph.Conflicts(wlTx) - if !allTxIDs.Equals(conflicts) { - t.Fatal("transitive vertex outs != all txs") - } -} - -func AcceptingDependencyTest(t *testing.T, factory Factory) { - graph := factory.New() - - purple := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - DependenciesV: []Tx{Red}, - } - purple.InputIDsV = append(purple.InputIDsV, ids.Empty.Prefix(8)) - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - if err := graph.Initialize(snow.DefaultConsensusContextTest(), params); err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - g := bag.Bag[ids.ID]{} - g.Add(Green.ID()) - if updated, err := graph.RecordPoll(context.Background(), g); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - rp := bag.Bag[ids.ID]{} - rp.Add(Red.ID(), purple.ID()) - if updated, err := graph.RecordPoll(context.Background(), rp); err != nil { - t.Fatal(err) - } else if updated { - t.Fatalf("Shouldn't have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - r := bag.Bag[ids.ID]{} - r.Add(Red.ID()) - if updated, err := graph.RecordPoll(context.Background(), r); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case Red.Status() != choices.Accepted: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) - case Green.Status() != choices.Rejected: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Rejected) - case purple.Status() != choices.Accepted: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) - } -} - -type singleAcceptTx struct { - Tx - - t *testing.T - accepted bool -} - -func (tx *singleAcceptTx) Accept(ctx context.Context) error { - if tx.accepted { - tx.t.Fatalf("accept called multiple times") - } - tx.accepted = true - return tx.Tx.Accept(ctx) -} - -func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { - graph := factory.New() - - rawPurple := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - DependenciesV: []Tx{Red}, - } - rawPurple.InputIDsV = append(rawPurple.InputIDsV, ids.Empty.Prefix(8)) - - purple := &singleAcceptTx{ - Tx: rawPurple, - t: t, - } - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - g := bag.Bag[ids.ID]{} - g.Add(Green.ID()) - if updated, err := graph.RecordPoll(context.Background(), g); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - p := bag.Bag[ids.ID]{} - p.Add(purple.ID()) - if updated, err := graph.RecordPoll(context.Background(), p); err != nil { - t.Fatal(err) - } else if updated { - t.Fatalf("Shouldn't have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - rp := bag.Bag[ids.ID]{} - rp.Add(Red.ID(), purple.ID()) - if updated, err := graph.RecordPoll(context.Background(), rp); err != nil { - t.Fatal(err) - } else if updated { - t.Fatalf("Shouldn't have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - r := bag.Bag[ids.ID]{} - r.Add(Red.ID()) - if updated, err := graph.RecordPoll(context.Background(), r); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case Red.Status() != choices.Accepted: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) - case Green.Status() != choices.Rejected: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Rejected) - case purple.Status() != choices.Accepted: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) - } -} - -func RejectingDependencyTest(t *testing.T, factory Factory) { - graph := factory.New() - - purple := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - DependenciesV: []Tx{Red, Blue}, - } - purple.InputIDsV = append(purple.InputIDsV, ids.Empty.Prefix(8)) - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case Blue.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - gp := bag.Bag[ids.ID]{} - gp.Add(Green.ID(), purple.ID()) - if updated, err := graph.RecordPoll(context.Background(), gp); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(purple.ID()): - t.Fatalf("Wrong preference. Expected %s", purple.ID()) - case Red.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) - case Green.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) - case Blue.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) - case purple.Status() != choices.Processing: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - if updated, err := graph.RecordPoll(context.Background(), gp); err != nil { - t.Fatal(err) - } else if !updated { - t.Fatalf("Should have updated the frontiers") - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case Red.Status() != choices.Rejected: - t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Rejected) - case Green.Status() != choices.Accepted: - t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Accepted) - case Blue.Status() != choices.Rejected: - t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Rejected) - case purple.Status() != choices.Rejected: - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Rejected) - } -} - -func RejectMultipleTimesTest(t *testing.T, factory Factory) { - require := require.New(t) - - purple := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - DependenciesV: []Tx{Green}, - InputIDsV: []ids.ID{ids.Empty.Prefix(8)}, - } - yellow := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(9), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{ids.Empty.Prefix(8)}, - } - - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - require.NoError(graph.Initialize(snow.DefaultConsensusContextTest(), params)) - require.NoError(graph.Add(context.Background(), Red)) - require.NoError(graph.Add(context.Background(), yellow)) - require.NoError(graph.Add(context.Background(), Green)) - require.NoError(graph.Add(context.Background(), purple)) - - prefs := graph.Preferences() - require.Len(prefs, 2) - require.Contains(prefs, Red.ID()) - require.Contains(prefs, yellow.ID()) - - y := bag.Bag[ids.ID]{} - y.Add(yellow.ID()) - - updated, err := graph.RecordPoll(context.Background(), y) - require.NoError(err) - require.True(updated) - require.Equal(choices.Processing, Red.Status()) - require.Equal(choices.Accepted, yellow.Status()) - require.Equal(choices.Processing, Green.Status()) - require.Equal(choices.Rejected, purple.Status()) - - r := bag.Bag[ids.ID]{} - r.Add(Red.ID()) - - // Accepting Red rejects Green which was a dependency of purple. This - // results in purple being rejected for a second time. - updated, err = graph.RecordPoll(context.Background(), r) - require.NoError(err) - require.True(updated) - require.True(graph.Finalized()) - require.Equal(choices.Accepted, Red.Status()) - require.Equal(choices.Accepted, yellow.Status()) - require.Equal(choices.Rejected, Green.Status()) - require.Equal(choices.Rejected, purple.Status()) -} - -func VacuouslyAcceptedTest(t *testing.T, factory Factory) { - graph := factory.New() - - purple := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }} - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } else if prefs := graph.Preferences(); prefs.Len() != 0 { - t.Fatalf("Wrong number of preferences.") - } else if status := purple.Status(); status != choices.Accepted { - t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) - } -} - -func ConflictsTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - conflictInputID := ids.Empty.Prefix(0) - - purple := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(6), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{conflictInputID}, - } - - orange := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }, - InputIDsV: []ids.ID{conflictInputID}, - } - - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { - t.Fatalf("Wrong number of conflicts") - } else if !orangeConflicts.Contains(purple.IDV) { - t.Fatalf("Conflicts does not contain the right transaction") - } else if err := graph.Add(context.Background(), orange); err != nil { - t.Fatal(err) - } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { - t.Fatalf("Wrong number of conflicts") - } else if !orangeConflicts.Contains(purple.IDV) { - t.Fatalf("Conflicts does not contain the right transaction") - } -} - -func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - rogue1 := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(0), - StatusV: choices.Processing, - }} - rogue2 := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }} - virtuous := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - DependenciesV: []Tx{rogue1}, - } - - input1 := ids.Empty.Prefix(3) - input2 := ids.Empty.Prefix(4) - - rogue1.InputIDsV = append(rogue1.InputIDsV, input1) - rogue2.InputIDsV = append(rogue2.InputIDsV, input1) - - virtuous.InputIDsV = append(virtuous.InputIDsV, input2) - - if err := graph.Add(context.Background(), rogue1); err != nil { - t.Fatal(err) - } else if err := graph.Add(context.Background(), rogue2); err != nil { - t.Fatal(err) - } else if err := graph.Add(context.Background(), virtuous); err != nil { - t.Fatal(err) - } - - votes := bag.Bag[ids.ID]{} - votes.Add(rogue1.ID()) - votes.Add(virtuous.ID()) - if updated, err := graph.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if updated { - t.Fatalf("Shouldn't have updated the frontiers") - } else if status := rogue1.Status(); status != choices.Processing { - t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) - } else if status := rogue2.Status(); status != choices.Processing { - t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) - } else if status := virtuous.Status(); status != choices.Processing { - t.Fatalf("Virtuous Tx is %s expected %s", status, choices.Processing) - } else if !graph.Quiesce() { - t.Fatalf("Should quiesce as there are no pending virtuous transactions") - } -} - -func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { - graph := factory.New() - - purple := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - AcceptV: errTest, - StatusV: choices.Processing, - }} - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), purple); err == nil { - t.Fatalf("Should have errored on acceptance") - } -} - -func ErrorOnAcceptedTest(t *testing.T, factory Factory) { - graph := factory.New() - - purple := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - AcceptV: errTest, - StatusV: choices.Processing, - }} - purple.InputIDsV = append(purple.InputIDsV, ids.Empty.Prefix(4)) - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } - - votes := bag.Bag[ids.ID]{} - votes.Add(purple.ID()) - if _, err := graph.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on accepting an invalid tx") - } -} - -func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) { - graph := factory.New() - - x := ids.Empty.Prefix(4) - - purple := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }} - purple.InputIDsV = append(purple.InputIDsV, x) - - pink := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(8), - RejectV: errTest, - StatusV: choices.Processing, - }} - pink.InputIDsV = append(pink.InputIDsV, x) - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } else if err := graph.Add(context.Background(), pink); err != nil { - t.Fatal(err) - } - - votes := bag.Bag[ids.ID]{} - votes.Add(purple.ID()) - if _, err := graph.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on rejecting an invalid tx") - } -} - -func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) { - graph := factory.New() - - x := ids.Empty.Prefix(4) - - purple := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(7), - StatusV: choices.Processing, - }} - purple.InputIDsV = append(purple.InputIDsV, x) - - pink := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(8), - RejectV: errTest, - StatusV: choices.Processing, - }} - pink.InputIDsV = append(pink.InputIDsV, x) - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), pink); err != nil { - t.Fatal(err) - } else if err := graph.Add(context.Background(), purple); err != nil { - t.Fatal(err) - } - - votes := bag.Bag[ids.ID]{} - votes.Add(purple.ID()) - if _, err := graph.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on rejecting an invalid tx") - } -} - -func UTXOCleanupTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - require.NoError(t, err) - - err = graph.Add(context.Background(), Red) - require.NoError(t, err) - - err = graph.Add(context.Background(), Green) - require.NoError(t, err) - - redVotes := bag.Bag[ids.ID]{} - redVotes.Add(Red.ID()) - changed, err := graph.RecordPoll(context.Background(), redVotes) - require.NoError(t, err) - require.False(t, changed, "shouldn't have accepted the red tx") - - changed, err = graph.RecordPoll(context.Background(), redVotes) - require.NoError(t, err) - require.True(t, changed, "should have accepted the red tx") - - require.Equal(t, choices.Accepted, Red.Status()) - require.Equal(t, choices.Rejected, Green.Status()) - - err = graph.Add(context.Background(), Blue) - require.NoError(t, err) - - blueVotes := bag.Bag[ids.ID]{} - blueVotes.Add(Blue.ID()) - changed, err = graph.RecordPoll(context.Background(), blueVotes) - require.NoError(t, err) - require.True(t, changed, "should have accepted the blue tx") - - require.Equal(t, choices.Accepted, Blue.Status()) -} - -func RemoveVirtuousTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - require.NoError(t, err) - - err = graph.Add(context.Background(), Red) - require.NoError(t, err) - - virtuous := graph.Virtuous() - require.NotEmpty(t, virtuous, "a virtuous transaction was added but not tracked") - - err = graph.Remove(context.Background(), Red.ID()) - require.NoError(t, err) - - virtuous = graph.Virtuous() - require.Empty(t, virtuous, "removal of a virtuous transaction should have emptied the virtuous set") -} - -func StringTest(t *testing.T, factory Factory, prefix string) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if err := graph.Add(context.Background(), Red); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Green); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } - if err := graph.Add(context.Background(), Alpha); err != nil { - t.Fatal(err) - } - - prefs := graph.Preferences() - switch { - case prefs.Len() != 1: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - rb := bag.Bag[ids.ID]{} - rb.SetThreshold(2) - rb.AddCount(Red.ID(), 2) - rb.AddCount(Blue.ID(), 2) - if changed, err := graph.RecordPoll(context.Background(), rb); err != nil { - t.Fatal(err) - } else if !changed { - t.Fatalf("Should have caused the frontiers to recalculate") - } else if err := graph.Add(context.Background(), Blue); err != nil { - t.Fatal(err) - } - - { - expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 0, Confidence = 0)\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + - ")" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(Blue.ID()): - t.Fatalf("Wrong preference. Expected %s", Blue.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - ga := bag.Bag[ids.ID]{} - ga.SetThreshold(2) - ga.AddCount(Green.ID(), 2) - ga.AddCount(Alpha.ID(), 2) - if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { - t.Fatal(err) - } else if changed { - t.Fatalf("Shouldn't have caused the frontiers to recalculate") - } - - { - expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 1)\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - ")" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(Blue.ID()): - t.Fatalf("Wrong preference. Expected %s", Blue.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - empty := bag.Bag[ids.ID]{} - if changed, err := graph.RecordPoll(context.Background(), empty); err != nil { - t.Fatal(err) - } else if changed { - t.Fatalf("Shouldn't have caused the frontiers to recalculate") - } - - { - expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - ")" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Red.ID()): - t.Fatalf("Wrong preference. Expected %s", Red.ID()) - case !prefs.Contains(Blue.ID()): - t.Fatalf("Wrong preference. Expected %s", Blue.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { - t.Fatal(err) - } else if !changed { - t.Fatalf("Should have caused the frontiers to recalculate") - } - - { - expected := prefix + "(\n" + - " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + - " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc SB(NumSuccessfulPolls = 2, Confidence = 1)\n" + - " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w SB(NumSuccessfulPolls = 1, Confidence = 0)\n" + - ")" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 2: - t.Fatalf("Wrong number of preferences.") - case !prefs.Contains(Green.ID()): - t.Fatalf("Wrong preference. Expected %s", Green.ID()) - case !prefs.Contains(Alpha.ID()): - t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) - case graph.Finalized(): - t.Fatalf("Finalized too early") - } - - if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { - t.Fatal(err) - } else if !changed { - t.Fatalf("Should have caused the frontiers to recalculate") - } - - { - expected := prefix + "()" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case !graph.Finalized(): - t.Fatalf("Finalized too late") - case Green.Status() != choices.Accepted: - t.Fatalf("%s should have been accepted", Green.ID()) - case Alpha.Status() != choices.Accepted: - t.Fatalf("%s should have been accepted", Alpha.ID()) - case Red.Status() != choices.Rejected: - t.Fatalf("%s should have been rejected", Red.ID()) - case Blue.Status() != choices.Rejected: - t.Fatalf("%s should have been rejected", Blue.ID()) - } - - if changed, err := graph.RecordPoll(context.Background(), rb); err != nil { - t.Fatal(err) - } else if changed { - t.Fatalf("Shouldn't have caused the frontiers to recalculate") - } - - { - expected := prefix + "()" - if str := graph.String(); str != expected { - t.Fatalf("Expected %s, got %s", expected, str) - } - } - - prefs = graph.Preferences() - switch { - case prefs.Len() != 0: - t.Fatalf("Wrong number of preferences.") - case !graph.Finalized(): - t.Fatalf("Finalized too late") - case Green.Status() != choices.Accepted: - t.Fatalf("%s should have been accepted", Green.ID()) - case Alpha.Status() != choices.Accepted: - t.Fatalf("%s should have been accepted", Alpha.ID()) - case Red.Status() != choices.Rejected: - t.Fatalf("%s should have been rejected", Red.ID()) - case Blue.Status() != choices.Rejected: - t.Fatalf("%s should have been rejected", Blue.ID()) - } -} diff --git a/avalanchego/snow/consensus/snowstorm/directed.go b/avalanchego/snow/consensus/snowstorm/directed.go deleted file mode 100644 index b88aea68..00000000 --- a/avalanchego/snow/consensus/snowstorm/directed.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - "fmt" - "strings" - - "github.com/prometheus/client_golang/prometheus" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/metrics" - "github.com/ava-labs/avalanchego/snow/events" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" - - sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -var ( - _ Factory = (*DirectedFactory)(nil) - _ Consensus = (*Directed)(nil) -) - -// DirectedFactory implements Factory by returning a directed struct -type DirectedFactory struct{} - -func (DirectedFactory) New() Consensus { - return &Directed{} -} - -// Directed is an implementation of a multi-color, non-transitive, snowball -// instance -type Directed struct { - metrics.Polls - metrics.Latency - whitelistTxLatency metrics.Latency - numVirtuousTxs prometheus.Gauge - numRogueTxs prometheus.Gauge - - // context that this consensus instance is executing in - ctx *snow.ConsensusContext - - // params describes how this instance was parameterized - params sbcon.Parameters - - // each element of preferences is the ID of a transaction that is preferred - preferences set.Set[ids.ID] - - // each element of virtuous is the ID of a transaction that is virtuous - virtuous set.Set[ids.ID] - - // each element is in the virtuous set and is still being voted on - virtuousVoting set.Set[ids.ID] - - // number of times RecordPoll has been called - pollNumber uint64 - - // keeps track of whether dependencies have been accepted - pendingAccept events.Blocker - - // keeps track of whether dependencies have been rejected - pendingReject events.Blocker - - // track any errors that occurred during callbacks - errs wrappers.Errs - - // Key: Transaction ID - // Value: Node that represents this transaction in the conflict graph - txs map[ids.ID]*directedTx - - // Key: UTXO ID - // Value: IDs of transactions that consume the UTXO specified in the key - utxos map[ids.ID]set.Set[ids.ID] - - // map transaction ID to the set of whitelisted transaction IDs. - whitelists map[ids.ID]set.Set[ids.ID] -} - -type directedTx struct { - snowball - - // pendingAccept identifies if this transaction has been marked as accepted - // once its transitive dependencies have also been accepted - pendingAccept bool - - // ins is the set of txIDs that this tx conflicts with that are less - // preferred than this tx - ins set.Set[ids.ID] - - // outs is the set of txIDs that this tx conflicts with that are more - // preferred than this tx - outs set.Set[ids.ID] - - // tx is the actual transaction this node represents - tx Tx -} - -func (dg *Directed) Initialize( - ctx *snow.ConsensusContext, - params sbcon.Parameters, -) error { - dg.ctx = ctx - dg.params = params - - var err error - dg.Polls, err = metrics.NewPolls("", ctx.AvalancheRegisterer) - if err != nil { - return fmt.Errorf("failed to create poll metrics: %w", err) - } - - dg.Latency, err = metrics.NewLatency("txs", "transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) - if err != nil { - return fmt.Errorf("failed to create latency metrics: %w", err) - } - - dg.whitelistTxLatency, err = metrics.NewLatency("whitelist_tx", "whitelist transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) - if err != nil { - return fmt.Errorf("failed to create whitelist tx metrics: %w", err) - } - - dg.numVirtuousTxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "virtuous_tx_processing", - Help: "Number of currently processing virtuous transaction(s)", - }) - err = ctx.AvalancheRegisterer.Register(dg.numVirtuousTxs) - if err != nil { - return fmt.Errorf("failed to create virtuous tx metrics: %w", err) - } - - dg.numRogueTxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "rogue_tx_processing", - Help: "Number of currently processing rogue transaction(s)", - }) - err = ctx.AvalancheRegisterer.Register(dg.numRogueTxs) - if err != nil { - return fmt.Errorf("failed to create rogue tx metrics: %w", err) - } - - dg.txs = make(map[ids.ID]*directedTx) - dg.utxos = make(map[ids.ID]set.Set[ids.ID]) - dg.whitelists = make(map[ids.ID]set.Set[ids.ID]) - - return params.Verify() -} - -func (dg *Directed) Virtuous() set.Set[ids.ID] { - return dg.virtuous -} - -func (dg *Directed) Preferences() set.Set[ids.ID] { - return dg.preferences -} - -func (dg *Directed) VirtuousVoting() set.Set[ids.ID] { - return dg.virtuousVoting -} - -func (dg *Directed) Quiesce() bool { - numVirtuous := dg.virtuousVoting.Len() - dg.ctx.Log.Verbo("conflict graph Quiesce was called", - zap.Int("numVirtuous", numVirtuous), - ) - return numVirtuous == 0 -} - -func (dg *Directed) Finalized() bool { - numPreferences := dg.preferences.Len() - dg.ctx.Log.Verbo("conflict graph Finalized was called", - zap.Int("numPreferences", numPreferences), - ) - return numPreferences == 0 -} - -// HealthCheck returns information about the consensus health. -func (dg *Directed) HealthCheck(context.Context) (interface{}, error) { - numOutstandingTxs := dg.Latency.NumProcessing() - isOutstandingTxs := numOutstandingTxs <= dg.params.MaxOutstandingItems - healthy := isOutstandingTxs - details := map[string]interface{}{ - "outstandingTransactions": numOutstandingTxs, - } - - // check for long running transactions - oldestProcessingDuration := dg.Latency.MeasureAndGetOldestDuration() - processingTimeOK := oldestProcessingDuration <= dg.params.MaxItemProcessingTime - healthy = healthy && processingTimeOK - details["longestRunningTransaction"] = oldestProcessingDuration.String() - - if !healthy { - var errorReasons []string - if !isOutstandingTxs { - errorReasons = append(errorReasons, fmt.Sprintf("number outstanding transactions %d > %d", numOutstandingTxs, dg.params.MaxOutstandingItems)) - } - if !processingTimeOK { - errorReasons = append(errorReasons, fmt.Sprintf("transaction processing time %s > %s", oldestProcessingDuration, dg.params.MaxItemProcessingTime)) - } - return details, fmt.Errorf("snowstorm consensus is not healthy reason: %s", strings.Join(errorReasons, ", ")) - } - return details, nil -} - -// shouldVote returns if the provided tx should be voted on to determine if it -// can be accepted. If the tx can be vacuously accepted, the tx will be accepted -// and will therefore not be valid to be voted on. -func (dg *Directed) shouldVote(ctx context.Context, tx Tx) (bool, error) { - if dg.Issued(tx) { - // If the tx was previously inserted, it shouldn't be re-inserted. - return false, nil - } - - txID := tx.ID() - - // Notify the metrics that this transaction is being issued. - if tx.HasWhitelist() { - dg.ctx.Log.Info("whitelist tx successfully issued", - zap.Stringer("txID", txID), - ) - dg.whitelistTxLatency.Issued(txID, dg.pollNumber) - } else { - dg.Latency.Issued(txID, dg.pollNumber) - } - - // If this tx has inputs, it needs to be voted on before being accepted. - if inputs := tx.InputIDs(); len(inputs) != 0 { - return true, nil - } - - // Since this tx doesn't have any inputs, it's impossible for there to be - // any conflicting transactions. Therefore, this transaction is treated as - // vacuously accepted and doesn't need to be voted on. - - // Notify those listening for accepted txs if the transaction has a binary - // format. - txBytes := tx.Bytes() - txBytesLen := len(txBytes) - if txBytesLen > 0 { - // Note that TxAcceptor.Accept must be called before tx.Accept to honor - // Acceptor.Accept's invariant. - if err := dg.ctx.TxAcceptor.Accept(dg.ctx, txID, txBytes); err != nil { - return false, err - } - } - - if err := tx.Accept(ctx); err != nil { - return false, err - } - - // Notify the metrics that this transaction was accepted. - dg.Latency.Accepted(txID, dg.pollNumber, txBytesLen) - return false, nil -} - -func (dg *Directed) IsVirtuous(tx Tx) bool { - txID := tx.ID() - // If the tx is currently processing, we should just return whether it was - // registered as rogue or not. - if node, exists := dg.txs[txID]; exists { - return !node.rogue - } - - // The tx isn't processing, so we need to check if it conflicts with any of - // the other txs that are currently processing. - for _, utxoID := range tx.InputIDs() { - if _, exists := dg.utxos[utxoID]; exists { - // A currently processing tx names the same input as the provided - // tx, so the provided tx would be rogue. - return false - } - } - - // This tx is virtuous as far as this consensus instance knows. - return true -} - -func (dg *Directed) Conflicts(tx Tx) set.Set[ids.ID] { - var conflicts set.Set[ids.ID] - if node, exists := dg.txs[tx.ID()]; exists { - // If the tx is currently processing, the conflicting txs are just the - // union of the inbound conflicts and the outbound conflicts. - // - // Only bother to call Union, which will do a memory allocation, if ins - // or outs are non-empty. - if node.ins.Len() > 0 || node.outs.Len() > 0 { - conflicts.Union(node.ins) - conflicts.Union(node.outs) - } - } else { - // If the tx isn't currently processing, the conflicting txs are the - // union of all the txs that spend an input that this tx spends. - for _, inputID := range tx.InputIDs() { - if spends, exists := dg.utxos[inputID]; exists { - conflicts.Union(spends) - } - } - } - return conflicts -} - -func (dg *Directed) Add(ctx context.Context, tx Tx) error { - if shouldVote, err := dg.shouldVote(ctx, tx); !shouldVote || err != nil { - return err - } - - txID := tx.ID() - txNode := &directedTx{tx: tx} - - // First check the other whitelist transactions. - for otherID, otherWhitelist := range dg.whitelists { - // [txID] is not whitelisted by [otherWhitelist] - if !otherWhitelist.Contains(txID) { - otherNode, exists := dg.txs[otherID] - if !exists { - // This is not expected to happen. - return fmt.Errorf("whitelist tx %s is not in the graph", otherID) - } - - // The [otherNode] should be preferred over [txNode] because a newly - // issued transaction's confidence is always 0 and ties are broken - // by the issuance order ("other_node" was issued before "tx_node"). - dg.addEdge(txNode, otherNode) - } - } - if tx.HasWhitelist() { - whitelist, err := tx.Whitelist(ctx) - if err != nil { - return err - } - dg.ctx.Log.Info("processing whitelist tx", - zap.Stringer("txID", txID), - ) - - // Find all transactions that are not explicitly whitelisted and mark - // them as conflicting. - for otherID, otherNode := range dg.txs { - // [otherID] is not whitelisted by [whitelist] - if !whitelist.Contains(otherID) { - // The [otherNode] should be preferred over [txNode] because a - // newly issued transaction's confidence is always 0 and ties - // are broken by the issuance order ("other_node" was issued - // before "tx_node"). - dg.addEdge(txNode, otherNode) - } - } - - // Record the whitelist for future calls. - dg.whitelists[txID] = whitelist - } - - // For each UTXO consumed by the tx: - // * Add edges between this tx and txs that consume this UTXO - // * Mark this tx as attempting to consume this UTXO - for _, inputID := range tx.InputIDs() { - // Get the set of txs that are currently processing that also consume - // this UTXO - spenders := dg.utxos[inputID] - - // Update txs conflicting with tx to account for its issuance - for conflictIDKey := range spenders { - // Get the node that contains this conflicting tx - conflict, exists := dg.txs[conflictIDKey] - if !exists { - // This is not expected to happen. - return fmt.Errorf("spender tx %s is not in the graph", conflictIDKey) - } - - // Add all the txs that spend this UTXO to this txs conflicts. These - // conflicting txs must be preferred over this tx. We know this - // because this tx currently has a bias of 0 and the tie goes to the - // tx whose bias was updated first. - dg.addEdge(txNode, conflict) - } - - // Add this tx to list of txs consuming the current UTXO - spenders.Add(txID) - - // spenders may be nil initially, so we should re-map the set. - dg.utxos[inputID] = spenders - } - - // Mark this transaction as rogue if it had any conflicts registered above - txNode.rogue = txNode.outs.Len() != 0 - if !txNode.rogue { - // If this tx is currently virtuous, add it to the virtuous sets - dg.virtuous.Add(txID) - dg.virtuousVoting.Add(txID) - - // If a tx is virtuous, it must be preferred. - dg.preferences.Add(txID) - } - - // Add this tx to the set of currently processing txs - dg.txs[txID] = txNode - - // If a tx that this tx depends on is rejected, this tx should also be - // rejected. - err := dg.registerRejector(ctx, tx) - - numVirtuous := dg.virtuous.Len() - dg.numVirtuousTxs.Set(float64(numVirtuous)) - dg.numRogueTxs.Set(float64(len(dg.txs) - numVirtuous)) - return err -} - -// addEdge between the [src] and [dst] txs to represent a conflict. -// -// The edge goes from [src] to [dst]: [src] -> [dst]. -// -// It is assumed that this is only called when [src] is being added. Which is -// why only [dst] is removed from the virtuous set and marked as rogue. [src] -// must be marked as rogue externally. -// -// For example: -// - TxA is issued -// - TxB is issued that consumes the same UTXO as TxA. -// - [addEdge(TxB, TxA)] would be called to register the conflict. -func (dg *Directed) addEdge(src, dst *directedTx) { - srcID, dstID := src.tx.ID(), dst.tx.ID() - - // Track the outbound edge from [src] to [dst]. - src.outs.Add(dstID) - - // Because we are adding a conflict, the transaction can't be virtuous. - dg.virtuous.Remove(dstID) - dg.virtuousVoting.Remove(dstID) - dst.rogue = true - - // Track the inbound edge to [dst] from [src]. - dst.ins.Add(srcID) -} - -func (dg *Directed) Remove(ctx context.Context, txID ids.ID) error { - err := dg.reject(ctx, set.Set[ids.ID]{ - txID: struct{}{}, - }) - - numVirtuous := dg.virtuous.Len() - dg.numVirtuousTxs.Set(float64(numVirtuous)) - dg.numRogueTxs.Set(float64(len(dg.txs) - numVirtuous)) - return err -} - -func (dg *Directed) Issued(tx Tx) bool { - // If the tx is either Accepted or Rejected, then it must have been issued - // previously. - if tx.Status().Decided() { - return true - } - - // If the tx is currently processing, then it must have been issued. - _, ok := dg.txs[tx.ID()] - return ok -} - -func (dg *Directed) RecordPoll(ctx context.Context, votes bag.Bag[ids.ID]) (bool, error) { - // Increase the vote ID. This is only updated here and is used to reset the - // confidence values of transactions lazily. - // This is also used to track the number of polls required to accept/reject - // a transaction. - dg.pollNumber++ - - // This flag tracks if the Avalanche instance needs to recompute its - // frontiers. Frontiers only need to be recalculated if preferences change - // or if a tx was accepted. - changed := false - - // We only want to iterate over txs that received alpha votes - votes.SetThreshold(dg.params.Alpha) - // Get the set of IDs that meet this alpha threshold - metThreshold := votes.Threshold() - for txIDKey := range metThreshold { - // Get the node this tx represents - txNode, exist := dg.txs[txIDKey] - if !exist { - // This tx may have already been accepted because of its - // dependencies. If this is the case, we can just drop the vote. - continue - } - - txNode.recordSuccessfulPoll(dg.pollNumber) - - // If the tx should be accepted, then we should defer its acceptance - // until its dependencies are decided. If this tx was already marked to - // be accepted, we shouldn't register it again. - if !txNode.pendingAccept && - txNode.finalized(dg.params.BetaVirtuous, dg.params.BetaRogue) { - // Mark that this tx is pending acceptance so acceptance is only - // registered once. - txNode.pendingAccept = true - - if err := dg.registerAcceptor(ctx, txNode.tx); err != nil { - return false, err - } - if dg.errs.Errored() { - return changed, dg.errs.Err - } - } - - if txNode.tx.Status() != choices.Accepted { - // If this tx wasn't accepted, then this instance is only changed if - // preferences changed. - edgeChanged, err := dg.redirectEdges(txNode) - if err != nil { - return false, err - } - changed = edgeChanged || changed - } else { - // By accepting a tx, the state of this instance has changed. - changed = true - } - } - - if len(dg.txs) > 0 { - if metThreshold.Len() == 0 { - dg.Failed() - } else { - dg.Successful() - } - } - - numVirtuous := dg.virtuous.Len() - dg.numVirtuousTxs.Set(float64(numVirtuous)) - dg.numRogueTxs.Set(float64(len(dg.txs) - numVirtuous)) - return changed, dg.errs.Err -} - -func (dg *Directed) String() string { - nodes := make([]*snowballNode, 0, len(dg.txs)) - for _, txNode := range dg.txs { - nodes = append(nodes, &snowballNode{ - txID: txNode.tx.ID(), - numSuccessfulPolls: txNode.numSuccessfulPolls, - confidence: txNode.getConfidence(dg.pollNumber), - }) - } - return consensusString(nodes) -} - -// accept the named txID and remove it from the graph -func (dg *Directed) accept(ctx context.Context, txID ids.ID) error { - txNode, exists := dg.txs[txID] - if !exists { - // This is not expected to happen. - return fmt.Errorf("accepted tx %s is not in the graph", txID) - } - // We are accepting the tx, so we should remove the node from the graph. - delete(dg.txs, txID) - delete(dg.whitelists, txID) - - // This tx is consuming all the UTXOs from its inputs, so we can prune them - // all from memory - for _, inputID := range txNode.tx.InputIDs() { - delete(dg.utxos, inputID) - } - - // This tx is now accepted, so it shouldn't be part of the virtuous set or - // the preferred set. Its status as Accepted implies these descriptions. - dg.virtuous.Remove(txID) - dg.preferences.Remove(txID) - - // Reject all the txs that conflicted with this tx. - if err := dg.reject(ctx, txNode.ins); err != nil { - return err - } - // While it is typically true that a tx that is being accepted is preferred, - // it is possible for this to not be the case. - if err := dg.reject(ctx, txNode.outs); err != nil { - return err - } - return dg.acceptTx(ctx, txNode.tx) -} - -// reject all the named txIDs and remove them from the graph -func (dg *Directed) reject(ctx context.Context, conflictIDs set.Set[ids.ID]) error { - for conflictKey := range conflictIDs { - conflict, exists := dg.txs[conflictKey] - if !exists { - // Transaction dependencies are cleaned up when the dependency is - // either accepted or rejected. However, a transaction may have - // already been rejected due to a conflict of its own. In this case, - // the transaction has already been cleaned up from memory and there - // is nothing more to be done. - continue - } - // This tx is no longer an option for consuming the UTXOs from its - // inputs, so we should remove their reference to this tx. - for _, inputID := range conflict.tx.InputIDs() { - txIDs, exists := dg.utxos[inputID] - if !exists { - // This UTXO may no longer exist because it was removed due to - // the acceptance of a tx. If that is the case, there is nothing - // left to remove from memory. - continue - } - delete(txIDs, conflictKey) - delete(dg.whitelists, conflictKey) - if txIDs.Len() == 0 { - // If this tx was the last tx consuming this UTXO, we should - // prune the UTXO from memory entirely. - delete(dg.utxos, inputID) - } else { - // If this UTXO still has txs consuming it, then we should make - // sure this update is written back to the UTXOs map. - dg.utxos[inputID] = txIDs - } - } - - // We are rejecting the tx, so we should remove it from the graph - delete(dg.txs, conflictKey) - - // It's statistically unlikely that something being rejected is - // preferred. However, it's possible. Additionally, any transaction may - // be removed at any time. - delete(dg.preferences, conflictKey) - delete(dg.virtuous, conflictKey) - delete(dg.virtuousVoting, conflictKey) - - // remove the edge between this node and all its neighbors - dg.removeConflict(conflictKey, conflict.ins) - dg.removeConflict(conflictKey, conflict.outs) - - if err := dg.rejectTx(ctx, conflict.tx); err != nil { - return err - } - } - return nil -} - -// redirectEdges attempts to turn outbound edges into inbound edges if the -// preferences have changed -func (dg *Directed) redirectEdges(tx *directedTx) (bool, error) { - changed := false - for conflictID := range tx.outs { - edgeChanged, err := dg.redirectEdge(tx, conflictID) - if err != nil { - return false, err - } - changed = edgeChanged || changed - } - return changed, nil -} - -// Fixes the direction of the edge between [txNode] and [conflictID] if needed. -// -// It is assumed the edge is currently directed as [txNode] -> [conflictID]. -// -// If [conflictID] has less successful polls than [txNode], the direction of the -// edge will be set to [conflictID] -> [txNode]. -// -// Returns true if the direction was switched. -func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) (bool, error) { - conflict, exists := dg.txs[conflictID] - if !exists { - // This is not expected to happen. - return false, fmt.Errorf("redirected tx %s is not in the graph", conflictID) - } - - if txNode.numSuccessfulPolls <= conflict.numSuccessfulPolls { - return false, nil - } - - // Because this tx has a higher preference than the conflicting tx, we must - // ensure that the edge is directed towards this tx. - nodeID := txNode.tx.ID() - - // Change the edge direction according to the conflict tx - conflict.ins.Remove(nodeID) - conflict.outs.Add(nodeID) - dg.preferences.Remove(conflictID) // This conflict has an outbound edge - - // Change the edge direction according to this tx - txNode.ins.Add(conflictID) - txNode.outs.Remove(conflictID) - if txNode.outs.Len() == 0 { - // If this tx doesn't have any outbound edges, it's preferred - dg.preferences.Add(nodeID) - } - return true, nil -} - -func (dg *Directed) removeConflict(txIDKey ids.ID, neighborIDs set.Set[ids.ID]) { - for neighborID := range neighborIDs { - neighbor, exists := dg.txs[neighborID] - if !exists { - // If the neighbor doesn't exist, they may have already been - // rejected, so this mapping can be skipped. - continue - } - - // Remove any edge to this tx. - delete(neighbor.ins, txIDKey) - delete(neighbor.outs, txIDKey) - - if neighbor.outs.Len() == 0 { - // If this tx should now be preferred, make sure its status is - // updated. - dg.preferences.Add(neighborID) - } - } -} - -// accept the provided tx. -func (dg *Directed) acceptTx(ctx context.Context, tx Tx) error { - txID := tx.ID() - dg.ctx.Log.Trace("accepting transaction", - zap.Stringer("txID", txID), - ) - - // Notify those listening that this tx has been accepted if the transaction - // has a binary format. - txBytes := tx.Bytes() - txBytesLen := len(txBytes) - if txBytesLen > 0 { - // Note that TxAcceptor.Accept must be called before tx.Accept to honor - // Acceptor.Accept's invariant. - if err := dg.ctx.TxAcceptor.Accept(dg.ctx, txID, txBytes); err != nil { - return err - } - } - - if err := tx.Accept(ctx); err != nil { - return err - } - - // Update the metrics to account for this transaction's acceptance - if tx.HasWhitelist() { - dg.ctx.Log.Info("whitelist tx accepted", - zap.Stringer("txID", txID), - ) - dg.whitelistTxLatency.Accepted(txID, dg.pollNumber, txBytesLen) - } else { - // just regular tx - dg.Latency.Accepted(txID, dg.pollNumber, txBytesLen) - } - - // If there is a tx that was accepted pending on this tx, the ancestor - // should be notified that it doesn't need to block on this tx anymore. - dg.pendingAccept.Fulfill(ctx, txID) - // If there is a tx that was issued pending on this tx, the ancestor tx - // doesn't need to be rejected because of this tx. - dg.pendingReject.Abandon(ctx, txID) - - return nil -} - -// reject the provided tx. -func (dg *Directed) rejectTx(ctx context.Context, tx Tx) error { - txID := tx.ID() - dg.ctx.Log.Trace("rejecting transaction", - zap.String("reason", "conflicting acceptance"), - zap.Stringer("txID", txID), - ) - - // Reject is called before notifying the IPC so that rejections that - // cause fatal errors aren't sent to an IPC peer. - if err := tx.Reject(ctx); err != nil { - return err - } - - // Update the metrics to account for this transaction's rejection - if tx.HasWhitelist() { - dg.ctx.Log.Info("whitelist tx rejected", - zap.Stringer("txID", txID), - ) - dg.whitelistTxLatency.Rejected(txID, dg.pollNumber, len(tx.Bytes())) - } else { - dg.Latency.Rejected(txID, dg.pollNumber, len(tx.Bytes())) - } - - // If there is a tx that was accepted pending on this tx, the ancestor tx - // can't be accepted. - dg.pendingAccept.Abandon(ctx, txID) - // If there is a tx that was issued pending on this tx, the ancestor tx must - // be rejected. - dg.pendingReject.Fulfill(ctx, txID) - return nil -} - -// registerAcceptor attempts to accept this tx once all its dependencies are -// accepted. If all the dependencies are already accepted, this function will -// immediately accept the tx. -func (dg *Directed) registerAcceptor(ctx context.Context, tx Tx) error { - txID := tx.ID() - - toAccept := &acceptor{ - g: dg, - errs: &dg.errs, - txID: txID, - } - - deps, err := tx.Dependencies() - if err != nil { - return err - } - for _, dependency := range deps { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. - // This tx should be accepted after this tx is accepted. Note that - // the dependencies can't already be rejected, because it is assumed - // that this tx is currently considered valid. - toAccept.deps.Add(dependency.ID()) - } - } - - // This tx is no longer being voted on, so we remove it from the voting set. - // This ensures that virtuous txs built on top of rogue txs don't force the - // node to treat the rogue tx as virtuous. - dg.virtuousVoting.Remove(txID) - dg.pendingAccept.Register(ctx, toAccept) - return nil -} - -// registerRejector rejects this tx if any of its dependencies are rejected. -func (dg *Directed) registerRejector(ctx context.Context, tx Tx) error { - // If a tx that this tx depends on is rejected, this tx should also be - // rejected. - toReject := &rejector{ - g: dg, - errs: &dg.errs, - txID: tx.ID(), - } - - // Register all of this txs dependencies as possibilities to reject this tx. - deps, err := tx.Dependencies() - if err != nil { - return err - } - for _, dependency := range deps { - if dependency.Status() != choices.Accepted { - // If the dependency isn't accepted, then it must be processing. So, - // this tx should be rejected if any of these processing txs are - // rejected. Note that the dependencies can't already be rejected, - // because it is assumed that this tx is currently considered valid. - toReject.deps.Add(dependency.ID()) - } - } - - // Register these dependencies - dg.pendingReject.Register(ctx, toReject) - return nil -} diff --git a/avalanchego/snow/consensus/snowstorm/directed_test.go b/avalanchego/snow/consensus/snowstorm/directed_test.go deleted file mode 100644 index b94f4f69..00000000 --- a/avalanchego/snow/consensus/snowstorm/directed_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "testing" -) - -func TestDirectedConsensus(t *testing.T) { - runConsensusTests(t, DirectedFactory{}, "DG") -} diff --git a/avalanchego/snow/consensus/snowstorm/factory.go b/avalanchego/snow/consensus/snowstorm/factory.go deleted file mode 100644 index 5dfe91b0..00000000 --- a/avalanchego/snow/consensus/snowstorm/factory.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -// Factory returns new instances of Consensus -type Factory interface { - New() Consensus -} diff --git a/avalanchego/snow/consensus/snowstorm/network_test.go b/avalanchego/snow/consensus/snowstorm/network_test.go deleted file mode 100644 index 5ac31a00..00000000 --- a/avalanchego/snow/consensus/snowstorm/network_test.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" - - sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" -) - -type Network struct { - params sbcon.Parameters - consumers []*TestTx - nodeTxs []map[ids.ID]*TestTx - nodes, running []Consensus -} - -func (n *Network) shuffleConsumers() { - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.consumers))) - indices, _ := s.Sample(len(n.consumers)) - consumers := []*TestTx(nil) - for _, index := range indices { - consumers = append(consumers, n.consumers[int(index)]) - } - n.consumers = consumers -} - -func (n *Network) Initialize( - params sbcon.Parameters, - numColors, - colorsPerConsumer, - maxInputConflicts int, -) { - n.params = params - - idCount := uint64(0) - - colorMap := map[ids.ID]int{} - colors := []ids.ID{} - for i := 0; i < numColors; i++ { - idCount++ - color := ids.Empty.Prefix(idCount) - colorMap[color] = i - colors = append(colors, color) - } - - count := map[ids.ID]int{} - for len(colors) > 0 { - selected := []ids.ID{} - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(colors))) - size := len(colors) - if size > colorsPerConsumer { - size = colorsPerConsumer - } - indices, _ := s.Sample(size) - for _, index := range indices { - selected = append(selected, colors[int(index)]) - } - - for _, sID := range selected { - newCount := count[sID] + 1 - count[sID] = newCount - if newCount >= maxInputConflicts { - i := colorMap[sID] - e := len(colorMap) - 1 - - eID := colors[e] - - colorMap[eID] = i - colors[i] = eID - - delete(colorMap, sID) - colors = colors[:e] - } - } - - idCount++ - tx := &TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(idCount), - StatusV: choices.Processing, - }} - tx.InputIDsV = append(tx.InputIDsV, selected...) - - n.consumers = append(n.consumers, tx) - } -} - -func (n *Network) AddNode(cg Consensus) error { - if err := cg.Initialize(snow.DefaultConsensusContextTest(), n.params); err != nil { - return err - } - - n.shuffleConsumers() - - txs := map[ids.ID]*TestTx{} - for _, tx := range n.consumers { - newTx := &TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: tx.ID(), - StatusV: choices.Processing, - }, - InputIDsV: tx.InputIDs(), - } - txs[newTx.ID()] = newTx - - if err := cg.Add(context.Background(), newTx); err != nil { - return err - } - } - - n.nodeTxs = append(n.nodeTxs, txs) - n.nodes = append(n.nodes, cg) - n.running = append(n.running, cg) - - return nil -} - -func (n *Network) Finalized() bool { - return len(n.running) == 0 -} - -func (n *Network) Round() error { - if len(n.running) == 0 { - return nil - } - - s := sampler.NewUniform() - _ = s.Initialize(uint64(len(n.running))) - runningInd, _ := s.Next() - - running := n.running[runningInd] - - _ = s.Initialize(uint64(len(n.nodes))) - indices, _ := s.Sample(n.params.K) - sampledColors := bag.Bag[ids.ID]{} - sampledColors.SetThreshold(n.params.Alpha) - for _, index := range indices { - peer := n.nodes[int(index)] - peerTxs := n.nodeTxs[int(index)] - - preferences := peer.Preferences() - for _, color := range preferences.List() { - sampledColors.Add(color) - } - for _, tx := range peerTxs { - if tx.Status() == choices.Accepted { - sampledColors.Add(tx.ID()) - } - } - } - - if _, err := running.RecordPoll(context.Background(), sampledColors); err != nil { - return err - } - - // If this node has been finalized, remove it from the poller - if running.Finalized() { - newSize := len(n.running) - 1 - n.running[runningInd] = n.running[newSize] - n.running = n.running[:newSize] - } - return nil -} - -func (n *Network) Disagreement() bool { - for _, color := range n.consumers { - accepted := false - rejected := false - for _, nodeTx := range n.nodeTxs { - tx := nodeTx[color.ID()] - accepted = accepted || tx.Status() == choices.Accepted - rejected = rejected || tx.Status() == choices.Rejected - } - if accepted && rejected { - return true - } - } - return false -} - -func (n *Network) Agreement() bool { - statuses := map[ids.ID]choices.Status{} - for _, color := range n.consumers { - for _, nodeTx := range n.nodeTxs { - colorID := color.ID() - tx := nodeTx[colorID] - prevStatus, exists := statuses[colorID] - if exists && prevStatus != tx.Status() { - return false - } - statuses[colorID] = tx.Status() - } - } - return !n.Disagreement() -} diff --git a/avalanchego/snow/consensus/snowstorm/rejector.go b/avalanchego/snow/consensus/snowstorm/rejector.go deleted file mode 100644 index a7e03ceb..00000000 --- a/avalanchego/snow/consensus/snowstorm/rejector.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/events" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ events.Blockable = (*rejector)(nil) - -type rejector struct { - g *Directed - errs *wrappers.Errs - deps set.Set[ids.ID] - rejected bool // true if the tx has been rejected - txID ids.ID -} - -func (r *rejector) Dependencies() set.Set[ids.ID] { - return r.deps -} - -func (r *rejector) Fulfill(ctx context.Context, _ ids.ID) { - if r.rejected || r.errs.Errored() { - return - } - r.rejected = true - asSet := set.NewSet[ids.ID](1) - asSet.Add(r.txID) - r.errs.Add(r.g.reject(ctx, asSet)) -} - -func (*rejector) Abandon(context.Context, ids.ID) {} - -func (*rejector) Update(context.Context) {} diff --git a/avalanchego/snow/consensus/snowstorm/snowball.go b/avalanchego/snow/consensus/snowstorm/snowball.go deleted file mode 100644 index e7fa9d09..00000000 --- a/avalanchego/snow/consensus/snowstorm/snowball.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -type snowball struct { - // numSuccessfulPolls is the number of times this choice was the successful - // result of a network poll - numSuccessfulPolls int - - // confidence is the number of consecutive times this choice was the - // successful result of a network poll as of [lastVote] - confidence int - - // lastVote is the last poll number that this choice was included in a - // successful network poll - lastVote uint64 - - // rogue identifies if there is a known conflict with this choice - rogue bool -} - -func (sb *snowball) getConfidence(currentVote uint64) int { - if sb.lastVote != currentVote { - return 0 - } - return sb.confidence -} - -func (sb *snowball) recordSuccessfulPoll(currentVote uint64) { - // If this choice wasn't voted for during the last poll, the confidence - // should have been reset during the last poll. So, we reset it now. - if sb.lastVote+1 != currentVote { - sb.confidence = 0 - } - - // This choice was voted for in this poll. Mark it as such. - sb.lastVote = currentVote - - // An affirmative vote increases both the snowball and snowflake counters. - sb.numSuccessfulPolls++ - sb.confidence++ -} - -func (sb *snowball) finalized(betaVirtuous, betaRogue int) bool { - // This choice is finalized if the snowflake counter is at least - // [betaRogue]. If there are no known conflicts with this operation, it can - // be accepted with a snowflake counter of at least [betaVirtuous]. - return (!sb.rogue && sb.confidence >= betaVirtuous) || - sb.confidence >= betaRogue -} diff --git a/avalanchego/snow/consensus/snowstorm/stringer.go b/avalanchego/snow/consensus/snowstorm/stringer.go deleted file mode 100644 index b60162e4..00000000 --- a/avalanchego/snow/consensus/snowstorm/stringer.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "fmt" - "strings" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/formatting" -) - -var _ utils.Sortable[*snowballNode] = (*snowballNode)(nil) - -type snowballNode struct { - txID ids.ID - numSuccessfulPolls int - confidence int -} - -func (sb *snowballNode) String() string { - return fmt.Sprintf( - "SB(NumSuccessfulPolls = %d, Confidence = %d)", - sb.numSuccessfulPolls, - sb.confidence) -} - -func (sb *snowballNode) Less(other *snowballNode) bool { - return sb.txID.Less(other.txID) -} - -// consensusString converts a list of snowball nodes into a human-readable -// string. -func consensusString(nodes []*snowballNode) string { - // Sort the nodes so that the string representation is canonical - utils.Sort(nodes) - - sb := strings.Builder{} - sb.WriteString("DG(") - - format := fmt.Sprintf( - "\n Choice[%s] = ID: %%50s %%s", - formatting.IntFormat(len(nodes)-1)) - for i, txNode := range nodes { - sb.WriteString(fmt.Sprintf(format, i, txNode.txID, txNode)) - } - - if len(nodes) > 0 { - sb.WriteString("\n") - } - sb.WriteString(")") - return sb.String() -} diff --git a/avalanchego/snow/consensus/snowstorm/stringer_test.go b/avalanchego/snow/consensus/snowstorm/stringer_test.go deleted file mode 100644 index 44c7d2aa..00000000 --- a/avalanchego/snow/consensus/snowstorm/stringer_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowstorm - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestSnowballNodeLess(t *testing.T) { - require := require.New(t) - - node1 := &snowballNode{ - txID: ids.ID{}, - } - node2 := &snowballNode{ - txID: ids.ID{}, - } - require.False(node1.Less(node2)) - require.False(node2.Less(node1)) - - node1 = &snowballNode{ - txID: ids.ID{1}, - } - node2 = &snowballNode{ - txID: ids.ID{}, - } - require.False(node1.Less(node2)) - require.True(node2.Less(node1)) - - node1 = &snowballNode{ - txID: ids.ID{1}, - } - node2 = &snowballNode{ - txID: ids.ID{1}, - } - require.False(node1.Less(node2)) - require.False(node2.Less(node1)) - - node1 = &snowballNode{ - txID: ids.ID{1}, - } - node2 = &snowballNode{ - txID: ids.ID{1, 2}, - } - require.True(node1.Less(node2)) - require.False(node2.Less(node1)) -} diff --git a/avalanchego/snow/consensus/snowstorm/test_tx.go b/avalanchego/snow/consensus/snowstorm/test_tx.go index 477f7438..a8b514c8 100644 --- a/avalanchego/snow/consensus/snowstorm/test_tx.go +++ b/avalanchego/snow/consensus/snowstorm/test_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm @@ -17,32 +17,16 @@ var _ Tx = (*TestTx)(nil) type TestTx struct { choices.TestDecidable - DependenciesV []Tx + DependenciesV set.Set[ids.ID] DependenciesErrV error - InputIDsV []ids.ID - HasWhitelistV bool - WhitelistV set.Set[ids.ID] - WhitelistErrV error VerifyV error BytesV []byte } -func (t *TestTx) Dependencies() ([]Tx, error) { +func (t *TestTx) MissingDependencies() (set.Set[ids.ID], error) { return t.DependenciesV, t.DependenciesErrV } -func (t *TestTx) InputIDs() []ids.ID { - return t.InputIDsV -} - -func (t *TestTx) HasWhitelist() bool { - return t.HasWhitelistV -} - -func (t *TestTx) Whitelist(context.Context) (set.Set[ids.ID], error) { - return t.WhitelistV, t.WhitelistErrV -} - func (t *TestTx) Verify(context.Context) error { return t.VerifyV } diff --git a/avalanchego/snow/consensus/snowstorm/tx.go b/avalanchego/snow/consensus/snowstorm/tx.go index 5a31181c..cc1cf649 100644 --- a/avalanchego/snow/consensus/snowstorm/tx.go +++ b/avalanchego/snow/consensus/snowstorm/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm @@ -11,37 +11,13 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -// Whitelister defines the interface for specifying whitelisted operations. -type Whitelister interface { - // Returns [true] if the underlying instance does implement whitelisted - // conflicts. - HasWhitelist() bool - - // Whitelist returns the set of transaction IDs that are explicitly - // whitelisted. Transactions that are not explicitly whitelisted are - // considered conflicting. - Whitelist(context.Context) (set.Set[ids.ID], error) -} - // Tx consumes state. type Tx interface { choices.Decidable - Whitelister - // Dependencies is a list of transactions upon which this transaction - // depends. Each element of Dependencies must be verified before Verify is - // called on this transaction. - // - // Similarly, each element of Dependencies must be accepted before this - // transaction is accepted. - Dependencies() ([]Tx, error) - - // InputIDs is a set where each element is the ID of a piece of state that - // will be consumed if this transaction is accepted. - // - // In the context of a UTXO-based payments system, for example, this would - // be the IDs of the UTXOs consumed by this transaction - InputIDs() []ids.ID + // MissingDependencies returns the set of transactions that must be accepted + // before this transaction is accepted. + MissingDependencies() (set.Set[ids.ID], error) // Verify that the state transition this transaction would make if it were // accepted is valid. If the state transition is invalid, a non-nil error diff --git a/avalanchego/snow/context.go b/avalanchego/snow/context.go index c89c2dd0..2cbbedb3 100644 --- a/avalanchego/snow/context.go +++ b/avalanchego/snow/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -96,33 +96,3 @@ type ConsensusContext struct { // True iff this chain is currently state-syncing StateSyncing utils.Atomic[bool] } - -func DefaultContextTest() *Context { - sk, err := bls.NewSecretKey() - if err != nil { - panic(err) - } - pk := bls.PublicFromSecretKey(sk) - return &Context{ - NetworkID: 0, - SubnetID: ids.Empty, - ChainID: ids.Empty, - NodeID: ids.EmptyNodeID, - PublicKey: pk, - Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), - Metrics: metrics.NewOptionalGatherer(), - ChainDataDir: "", - } -} - -func DefaultConsensusContextTest() *ConsensusContext { - return &ConsensusContext{ - Context: DefaultContextTest(), - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - BlockAcceptor: noOpAcceptor{}, - TxAcceptor: noOpAcceptor{}, - VertexAcceptor: noOpAcceptor{}, - } -} diff --git a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go index 1e222f8e..cd530d1c 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -1,14 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( "context" - "errors" "fmt" - "math" - "time" "go.uber.org/zap" @@ -18,8 +15,10 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/bimap" + "github.com/ava-labs/avalanchego/utils/heap" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -32,99 +31,61 @@ const ( stripeWidth = 5 cacheSize = 100000 - // Parameters for delaying bootstrapping to avoid potential CPU burns - bootstrappingDelay = 10 * time.Second -) - -var ( - _ common.BootstrapableEngine = (*bootstrapper)(nil) + // statusUpdateFrequency is how many containers should be processed between + // logs + statusUpdateFrequency = 5000 - errUnexpectedTimeout = errors.New("unexpected timeout fired") + // maxOutstandingGetAncestorsRequests is the maximum number of GetAncestors + // sent but not yet responded to/failed + maxOutstandingGetAncestorsRequests = 10 ) +var _ common.BootstrapableEngine = (*bootstrapper)(nil) + func New( - ctx context.Context, config Config, - startAvalancheConsensus func(ctx context.Context, lastReqID uint32) error, - startSnowmanBootstrapping func(ctx context.Context, lastReqID uint32) error, + onFinished func(ctx context.Context, lastReqID uint32) error, ) (common.BootstrapableEngine, error) { b := &bootstrapper{ Config: config, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(config.Ctx.Log), + AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), + AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), PutHandler: common.NewNoOpPutHandler(config.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(config.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), AppHandler: config.VM, - processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, - Fetcher: common.Fetcher{ - OnFinished: func(ctx context.Context, lastReqID uint32) error { - linearized, err := config.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if !linearized { - return startAvalancheConsensus(ctx, lastReqID) - } - - // Invariant: edge will only be the stop vertex after its - // acceptance. - edge := config.Manager.Edge(ctx) - stopVertexID := edge[0] - if err := config.VM.Linearize(ctx, stopVertexID); err != nil { - return err - } - return startSnowmanBootstrapping(ctx, lastReqID) - }, - }, - executedStateTransitions: math.MaxInt32, - } - - if err := b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer); err != nil { - return nil, err - } + outstandingRequests: bimap.New[common.Request, ids.ID](), - if err := b.VtxBlocked.SetParser(ctx, &vtxParser{ - log: config.Ctx.Log, - numAccepted: b.numAcceptedVts, - numDropped: b.numDroppedVts, - manager: b.Manager, - }); err != nil { - return nil, err - } - - if err := b.TxBlocked.SetParser(&txParser{ - log: config.Ctx.Log, - numAccepted: b.numAcceptedTxs, - numDropped: b.numDroppedTxs, - vm: b.VM, - }); err != nil { - return nil, err + processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, + onFinished: onFinished, } - - config.Config.Bootstrapable = b - b.Bootstrapper = common.NewCommonBootstrapper(config.Config) - return b, nil + return b, b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer) } +// Note: To align with the Snowman invariant, it should be guaranteed the VM is +// not used until after the bootstrapper has been Started. type bootstrapper struct { Config + common.Halter // list of NoOpsHandler for messages dropped by bootstrapper common.StateSummaryFrontierHandler common.AcceptedStateSummaryHandler + common.AcceptedFrontierHandler + common.AcceptedHandler common.PutHandler common.QueryHandler common.ChitsHandler common.AppHandler - common.Bootstrapper - common.Fetcher metrics - started bool + // tracks which validators were asked for which containers in which requests + outstandingRequests *bimap.BiMap[common.Request, ids.ID] // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests @@ -132,13 +93,22 @@ type bootstrapper struct { // Contains IDs of vertices that have recently been processed processedCache *cache.LRU[ids.ID, struct{}] - // number of state transitions executed - executedStateTransitions int - awaitingTimeout bool + // Tracks the last requestID that was used in a request + requestID uint32 + + // Called when bootstrapping is done on a specific chain + onFinished func(ctx context.Context, lastReqID uint32) error } -func (b *bootstrapper) Clear() error { +func (b *bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx +} + +func (b *bootstrapper) Clear(context.Context) error { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + if err := b.VtxBlocked.Clear(); err != nil { return err } @@ -173,7 +143,10 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request vtxs = vtxs[:b.Config.AncestorsMaxContainersReceived] } - requestedVtxID, requested := b.OutstandingRequests.Remove(nodeID, requestID) + requestedVtxID, requested := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) // first vertex should be the one we requested in GetAncestors request if err != nil { if !requested { @@ -184,19 +157,22 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request ) return nil } - b.Ctx.Log.Debug("failed to parse requested vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", requestedVtxID), - zap.Error(err), - ) - b.Ctx.Log.Verbo("failed to parse requested vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", requestedVtxID), - zap.Binary("vtxBytes", vtxs[0]), - zap.Error(err), - ) + if b.Ctx.Log.Enabled(logging.Verbo) { + b.Ctx.Log.Verbo("failed to parse requested vertex", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("vtxID", requestedVtxID), + zap.Binary("vtxBytes", vtxs[0]), + zap.Error(err), + ) + } else { + b.Ctx.Log.Debug("failed to parse requested vertex", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("vtxID", requestedVtxID), + zap.Error(err), + ) + } return b.fetch(ctx, requestedVtxID) } @@ -210,7 +186,7 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request ) return b.fetch(ctx, requestedVtxID) } - if !requested && !b.OutstandingRequests.Contains(vtxID) && !b.needToFetch.Contains(vtxID) { + if !requested && !b.outstandingRequests.HasValue(vtxID) && !b.needToFetch.Contains(vtxID) { b.Ctx.Log.Debug("received un-needed vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -277,7 +253,10 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - vtxID, ok := b.OutstandingRequests.Remove(nodeID, requestID) + vtxID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) if !ok { b.Ctx.Log.Debug("skipping GetAncestorsFailed call", zap.String("reason", "no matching outstanding request"), @@ -299,16 +278,7 @@ func (b *bootstrapper) Connected( return err } - if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if b.started || !b.StartupTracker.ShouldStart() { - return nil - } - - b.started = true - return b.Startup(ctx) + return b.StartupTracker.Connected(ctx, nodeID, nodeVersion) } func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { @@ -319,16 +289,8 @@ func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) erro return b.StartupTracker.Disconnected(ctx, nodeID) } -func (b *bootstrapper) Timeout(ctx context.Context) error { - if !b.awaitingTimeout { - return errUnexpectedTimeout - } - b.awaitingTimeout = false - - if !b.Config.BootstrapTracker.IsBootstrapped() { - return b.Restart(ctx, true) - } - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) +func (*bootstrapper) Timeout(context.Context) error { + return nil } func (*bootstrapper) Gossip(context.Context) error { @@ -337,6 +299,10 @@ func (*bootstrapper) Gossip(context.Context) error { func (b *bootstrapper) Shutdown(ctx context.Context) error { b.Ctx.Log.Info("shutting down bootstrapper") + + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + return b.VM.Shutdown(ctx) } @@ -356,17 +322,71 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { err) } - b.Config.SharedCfg.RequestID = startReqID + if err := b.VtxBlocked.SetParser(ctx, &vtxParser{ + log: b.Ctx.Log, + numAccepted: b.numAcceptedVts, + numDropped: b.numDroppedVts, + manager: b.Manager, + }); err != nil { + return err + } - if !b.StartupTracker.ShouldStart() { - return nil + if err := b.TxBlocked.SetParser(&txParser{ + log: b.Ctx.Log, + numAccepted: b.numAcceptedTxs, + numDropped: b.numDroppedTxs, + vm: b.VM, + }); err != nil { + return err + } + + b.requestID = startReqID + + // If the network was already linearized, don't attempt to linearize it + // again. + linearized, err := b.Manager.StopVertexAccepted(ctx) + if err != nil { + return fmt.Errorf("failed to get linearization status: %w", err) + } + if linearized { + return b.startSyncing(ctx, nil) } - b.started = true - return b.Startup(ctx) + // If a stop vertex is well known, accept that. + if b.Config.StopVertexID != ids.Empty { + b.Ctx.Log.Info("using well known stop vertex", + zap.Stringer("vtxID", b.Config.StopVertexID), + ) + + return b.startSyncing(ctx, []ids.ID{b.Config.StopVertexID}) + } + + // If a stop vertex isn't well known, treat the current state as the final + // DAG state. + // + // Note: This is used to linearize networks that were created after the + // linearization occurred. + edge := b.Manager.Edge(ctx) + stopVertex, err := b.Manager.BuildStopVtx(ctx, edge) + if err != nil { + return fmt.Errorf("failed to create stop vertex: %w", err) + } + if err := stopVertex.Accept(ctx); err != nil { + return fmt.Errorf("failed to accept stop vertex: %w", err) + } + + stopVertexID := stopVertex.ID() + b.Ctx.Log.Info("generated stop vertex", + zap.Stringer("vtxID", stopVertexID), + ) + + return b.startSyncing(ctx, nil) } func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + vmIntf, vmErr := b.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": struct{}{}, @@ -375,21 +395,16 @@ func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { return intf, vmErr } -func (b *bootstrapper) GetVM() common.VM { - return b.VM -} - // Add the vertices in [vtxIDs] to the set of vertices that we need to fetch, // and then fetch vertices (and their ancestors) until either there are no more // to fetch or we are at the maximum number of outstanding requests. func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) - for b.needToFetch.Len() > 0 && b.OutstandingRequests.Len() < common.MaxOutstandingGetAncestorsRequests { - vtxID := b.needToFetch.CappedList(1)[0] - b.needToFetch.Remove(vtxID) + for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < maxOutstandingGetAncestorsRequests { + vtxID, _ := b.needToFetch.Pop() // Length checked in predicate above // Make sure we haven't already requested this vertex - if b.OutstandingRequests.Contains(vtxID) { + if b.outstandingRequests.HasValue(vtxID) { continue } @@ -398,29 +413,36 @@ func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { continue } - validatorIDs, err := b.Config.Beacons.Sample(1) // validator to send request to + validatorIDs, err := b.Config.Beacons.Sample(b.Ctx.SubnetID, 1) // validator to send request to if err != nil { return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } validatorID := validatorIDs[0] - b.Config.SharedCfg.RequestID++ + b.requestID++ - b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, vtxID) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, vtxID) // request vertex and ancestors + b.outstandingRequests.Put( + common.Request{ + NodeID: validatorID, + RequestID: b.requestID, + }, + vtxID, + ) + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, vtxID) // request vertex and ancestors } return b.checkFinish(ctx) } // Process the vertices in [vtxs]. func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) error { - // Vertices that we need to process. Store them in a heap for deduplication - // and so we always process vertices further down in the DAG first. This helps - // to reduce the number of repeated DAG traversals. - toProcess := vertex.NewHeap() + // Vertices that we need to process prioritized by vertices that are unknown + // or the furthest down the DAG. Unknown vertices are prioritized to ensure + // that once we have made it below a certain height in DAG traversal we do + // not need to reset and repeat DAG traversals. + toProcess := heap.NewMap[ids.ID, avalanche.Vertex](vertexLess) for _, vtx := range vtxs { vtxID := vtx.ID() if _, ok := b.processedCache.Get(vtxID); !ok { // only process a vertex if we haven't already - toProcess.Push(vtx) + _, _ = toProcess.Push(vtxID, vtx) } else { b.VtxBlocked.RemoveMissingID(vtxID) } @@ -429,13 +451,15 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er vtxHeightSet := set.Set[ids.ID]{} prevHeight := uint64(0) - for toProcess.Len() > 0 { // While there are unprocessed vertices + for { if b.Halted() { return nil } - vtx := toProcess.Pop() // Get an unknown vertex or one furthest down the DAG - vtxID := vtx.ID() + vtxID, vtx, ok := toProcess.Pop() + if !ok { + break + } switch vtx.Status() { case choices.Unknown: @@ -487,16 +511,10 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er b.numFetchedVts.Inc() verticesFetchedSoFar := b.VtxBlocked.Jobs.PendingJobs() - if verticesFetchedSoFar%common.StatusUpdateFrequency == 0 { // Periodically print progress - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } else { - b.Ctx.Log.Debug("fetched vertices", - zap.Uint64("numVerticesFetched", verticesFetchedSoFar), - ) - } + if verticesFetchedSoFar%statusUpdateFrequency == 0 { // Periodically print progress + b.Ctx.Log.Info("fetched vertices", + zap.Uint64("numVerticesFetched", verticesFetchedSoFar), + ) } parents, err := vtx.Parents() @@ -507,7 +525,7 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er parentID := parent.ID() if _, ok := b.processedCache.Get(parentID); !ok { // But only if we haven't processed the parent if !vtxHeightSet.Contains(parentID) { - toProcess.Push(parent) + toProcess.Push(parentID, parent) } } } @@ -539,8 +557,8 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er return b.fetch(ctx) } -// ForceAccepted starts bootstrapping. Process the vertices in [accepterContainerIDs]. -func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { +// startSyncing starts bootstrapping. Process the vertices in [accepterContainerIDs]. +func (b *bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs []ids.ID) error { pendingContainerIDs := b.VtxBlocked.MissingIDs() // Append the list of accepted container IDs to pendingContainerIDs to ensure // we iterate over every container that must be traversed. @@ -568,85 +586,65 @@ func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs [ // checkFinish repeatedly executes pending transactions and requests new frontier blocks until there aren't any new ones // after which it finishes the bootstrap process func (b *bootstrapper) checkFinish(ctx context.Context) error { - // If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish - pendingJobs := b.VtxBlocked.MissingIDs() - if b.IsBootstrapped() || len(pendingJobs) > 0 || b.awaitingTimeout { + // If we still need to fetch vertices, we can't finish + if len(b.VtxBlocked.MissingIDs()) > 0 { return nil } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing transactions") - } else { - b.Ctx.Log.Debug("executing transactions") - } - + b.Ctx.Log.Info("executing transactions") _, err := b.TxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.TxAcceptor, ) if err != nil || b.Halted() { return err } - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("executing vertices") - } else { - b.Ctx.Log.Debug("executing vertices") - } - - executedVts, err := b.VtxBlocked.ExecuteAll( + b.Ctx.Log.Info("executing vertices") + _, err = b.VtxBlocked.ExecuteAll( ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + false, b.Ctx.VertexAcceptor, ) if err != nil || b.Halted() { return err } - // If the chain is linearized, we should immediately move on to start - // bootstrapping snowman. - linearized, err := b.Manager.StopVertexAccepted(ctx) - if err != nil { + // Invariant: edge will only be the stop vertex + edge := b.Manager.Edge(ctx) + stopVertexID := edge[0] + if err := b.VM.Linearize(ctx, stopVertexID); err != nil { return err } - if linearized { - b.processedCache.Flush() - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) - } - previouslyExecuted := b.executedStateTransitions - b.executedStateTransitions = executedVts + b.processedCache.Flush() + return b.onFinished(ctx, b.requestID) +} - // Note that executedVts < c*previouslyExecuted is enforced so that the - // bootstrapping process will terminate even as new vertices are being - // issued. - if executedVts > 0 && executedVts < previouslyExecuted/2 && b.Config.RetryBootstrap { - b.Ctx.Log.Debug("checking for more vertices before finishing bootstrapping") - return b.Restart(ctx, true) +// A vertex is less than another vertex if it is unknown. Ties are broken by +// prioritizing vertices that have a greater height. +func vertexLess(i, j avalanche.Vertex) bool { + if !i.Status().Fetched() { + return true + } + if !j.Status().Fetched() { + return false } - // Notify the subnet that this chain is synced - b.Config.BootstrapTracker.Bootstrapped(b.Ctx.ChainID) - b.processedCache.Flush() - - // If the subnet hasn't finished bootstrapping, this chain should remain - // syncing. - if !b.Config.BootstrapTracker.IsBootstrapped() { - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") - } else { - b.Ctx.Log.Debug("waiting for the remaining chains in this subnet to finish syncing") - } - // Restart bootstrapping after [bootstrappingDelay] to keep up to date - // on the latest tip. - b.Config.Timer.RegisterTimeout(bootstrappingDelay) - b.awaitingTimeout = true - return nil + // Treat errors on retrieving the height as if the vertex is not fetched + heightI, errI := i.Height() + if errI != nil { + return true + } + heightJ, errJ := j.Height() + if errJ != nil { + return false } - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + + return heightI > heightJ } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go index bc1d58cf..133e9051 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -8,6 +8,9 @@ import ( "context" "errors" "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -22,7 +25,10 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" ) var ( @@ -31,86 +37,74 @@ var ( errUnknownTx = errors.New("unknown tx") ) -func noopStarter(context.Context, uint32) error { +type testTx struct { + snowstorm.Tx + + tx *snowstorm.TestTx +} + +func (t *testTx) Accept(ctx context.Context) error { + if err := t.Tx.Accept(ctx); err != nil { + return err + } + t.tx.DependenciesV = nil return nil } func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.TestManager, *vertex.TestVM) { - ctx := snow.DefaultConsensusContextTest() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - peers := validators.NewSet() + vdrs := validators.NewManager() db := memdb.New() sender := &common.SenderTest{T: t} manager := vertex.NewTestManager(t) vm := &vertex.TestVM{} vm.T = t - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ - T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - sender.Default(true) manager.Default(true) vm.Default(true) - sender.CantSendGetAcceptedFrontier = false - peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.AvalancheRegisterer) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.AvalancheRegisterer) - if err != nil { - t.Fatal(err) - } + require.NoError(err) peerTracker := tracker.NewPeers() - startupTracker := tracker.NewStartup(peerTracker, peers.Weight()/2+1) - peers.RegisterCallbackListener(startupTracker) + totalWeight, err := vdrs.TotalWeight(constants.PrimaryNetworkID) + require.NoError(err) + startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) + vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, startupTracker) + + avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, ctx.AvalancheRegisterer) + require.NoError(err) - commonConfig := common.Config{ + return Config{ + AllGetsServer: avaGetHandler, Ctx: ctx, - Beacons: peers, - SampleK: peers.Len(), - Alpha: peers.Weight()/2 + 1, + Beacons: vdrs, StartupTracker: startupTracker, Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - avaGetHandler, err := getter.New(manager, commonConfig) - if err != nil { - t.Fatal(err) - } - - return Config{ - Config: commonConfig, - AllGetsServer: avaGetHandler, - VtxBlocked: vtxBlocker, - TxBlocked: txBlocker, - Manager: manager, - VM: vm, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + Manager: manager, + VM: vm, }, peer, sender, manager, vm } -// Three vertices in the accepted frontier. None have parents. No need to fetch anything +// Three vertices in the accepted frontier. None have parents. No need to fetch +// anything func TestBootstrapperSingleFrontier(t *testing.T) { + require := require.New(t) + config, _, _, manager, vm := newConfig(t) vtxID0 := ids.Empty.Prefix(0) @@ -134,20 +128,26 @@ func TestBootstrapperSingleFrontier(t *testing.T) { IDV: vtxID1, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx0, + }, + HeightV: 1, BytesV: vtxBytes1, } - vtx2 := &avalanche.TestVertex{ + vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex TestDecidable: choices.TestDecidable{ IDV: vtxID2, StatusV: choices.Processing, }, - HeightV: 0, + ParentsV: []avalanche.Vertex{ + vtx1, + }, + HeightV: 2, BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -156,18 +156,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { }) return nil }, - noopStarter, ) - if err != nil { - t.Fatal(err) - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID0, vtxID1, vtxID2} + require.NoError(err) manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { @@ -178,8 +168,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { case vtxID2: return vtx2, nil default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } } @@ -191,31 +181,40 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return vtx1, nil case bytes.Equal(vtxBytes, vtxBytes2): return vtx2, nil + default: + require.FailNow(errParsedUnknownVertex.Error()) + return nil, errParsedUnknownVertex } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { - t.Fatal(err) + manager.StopVertexAcceptedF = func(context.Context) (bool, error) { + return vtx2.Status() == choices.Accepted, nil } - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx2.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") + manager.EdgeF = func(context.Context) []ids.ID { + require.Equal(choices.Accepted, vtx2.Status()) + return []ids.ID{vtxID2} } + + vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { + require.Equal(vtxID2, stopVertexID) + return nil + } + + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, vtx0.Status()) + require.Equal(choices.Accepted, vtx1.Status()) + require.Equal(choices.Accepted, vtx2.Status()) } // Accepted frontier has one vertex, which has one vertex as a dependency. -// Requests again and gets an unexpected vertex. -// Requests again and gets the expected vertex and an additional vertex that should not be accepted. +// Requests again and gets an unexpected vertex. Requests again and gets the +// expected vertex and an additional vertex that should not be accepted. func TestBootstrapperByzantineResponses(t *testing.T) { + require := require.New(t) + config, peerID, sender, manager, vm := newConfig(t) vtxID0 := ids.Empty.Prefix(0) @@ -234,7 +233,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { HeightV: 0, BytesV: vtxBytes0, } - vtx1 := &avalanche.TestVertex{ + vtx1 := &avalanche.TestVertex{ // vtx1 is the stop vertex TestDecidable: choices.TestDecidable{ IDV: vtxID1, StatusV: choices.Processing, @@ -253,8 +252,8 @@ func TestBootstrapperByzantineResponses(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID1 bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -263,18 +262,8 @@ func TestBootstrapperByzantineResponses(t *testing.T) { }) return nil }, - noopStarter, ) - if err != nil { - t.Fatal(err) - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID1} + require.NoError(err) manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { @@ -283,21 +272,17 @@ func TestBootstrapperByzantineResponses(t *testing.T) { case vtxID0: return nil, errUnknownVertex default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } } requestID := new(uint32) reqVtxID := ids.Empty sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - switch { - case vdr != peerID: - t.Fatalf("Should have requested vertex from %s, requested from %s", - peerID, vdr) - case vtxID != vtxID0: - t.Fatalf("should have requested vtx0") - } + require.Equal(peerID, vdr) + require.Equal(vtxID0, vtxID) + *requestID = reqID reqVtxID = vtxID } @@ -313,27 +298,19 @@ func TestBootstrapperByzantineResponses(t *testing.T) { case bytes.Equal(vtxBytes, vtxBytes2): vtx2.StatusV = choices.Processing return vtx2, nil + default: + require.FailNow(errParsedUnknownVertex.Error()) + return nil, errParsedUnknownVertex } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 - t.Fatal(err) - } else if reqVtxID != vtxID0 { - t.Fatalf("should have requested vtxID0 but requested %s", reqVtxID) - } + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx0 + require.Equal(vtxID0, reqVtxID) oldReqID := *requestID - err = bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes2}) - switch { - case err != nil: // send unexpected vertex - t.Fatal(err) - case *requestID == oldReqID: - t.Fatal("should have issued new request") - case reqVtxID != vtxID0: - t.Fatalf("should have requested vtxID0 but requested %s", reqVtxID) - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes2})) // send unexpected vertex + require.NotEqual(oldReqID, *requestID) // should have sent a new request oldReqID = *requestID manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { @@ -343,35 +320,38 @@ func TestBootstrapperByzantineResponses(t *testing.T) { case vtxID0: return vtx0, nil default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } } - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2}); err != nil { // send expected vertex and vertex that should not be accepted - t.Fatal(err) + manager.StopVertexAcceptedF = func(context.Context) (bool, error) { + return vtx1.Status() == choices.Accepted, nil } - switch { - case *requestID != oldReqID: - t.Fatal("should not have issued new request") - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") + manager.EdgeF = func(context.Context) []ids.ID { + require.Equal(choices.Accepted, vtx1.Status()) + return []ids.ID{vtxID1} } - if vtx2.Status() == choices.Accepted { - t.Fatalf("Vertex should not have been accepted") + + vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { + require.Equal(vtxID1, stopVertexID) + return nil } + + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2})) // send expected vertex and vertex that should not be accepted + require.Equal(oldReqID, *requestID) // shouldn't have sent a new request + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, vtx0.Status()) + require.Equal(choices.Accepted, vtx1.Status()) + require.Equal(choices.Processing, vtx2.Status()) } // Vertex has a dependency and tx has a dependency func TestBootstrapperTxDependencies(t *testing.T) { - config, peerID, sender, manager, vm := newConfig(t) + require := require.New(t) - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + config, peerID, sender, manager, vm := newConfig(t) txID0 := ids.GenerateTestID() txID1 := ids.GenerateTestID() @@ -379,14 +359,13 @@ func TestBootstrapperTxDependencies(t *testing.T) { txBytes0 := []byte{0} txBytes1 := []byte{1} - tx0 := &snowstorm.TestTx{ + innerTx0 := &snowstorm.TestTx{ TestDecidable: choices.TestDecidable{ IDV: txID0, StatusV: choices.Processing, }, BytesV: txBytes0, } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) // Depends on tx0 tx1 := &snowstorm.TestTx{ @@ -394,10 +373,14 @@ func TestBootstrapperTxDependencies(t *testing.T) { IDV: txID1, StatusV: choices.Processing, }, - DependenciesV: []snowstorm.Tx{tx0}, + DependenciesV: set.Of(innerTx0.IDV), BytesV: txBytes1, } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) + + tx0 := &testTx{ + Tx: innerTx0, + tx: tx1, + } vtxID0 := ids.GenerateTestID() vtxID1 := ids.GenerateTestID() @@ -424,7 +407,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { TxsV: []snowstorm.Tx{tx1}, BytesV: vtxBytes0, } - vtx1 := &avalanche.TestVertex{ + vtx1 := &avalanche.TestVertex{ // vtx1 is the stop vertex TestDecidable: choices.TestDecidable{ IDV: vtxID1, StatusV: choices.Processing, @@ -435,8 +418,8 @@ func TestBootstrapperTxDependencies(t *testing.T) { BytesV: vtxBytes1, } + config.StopVertexID = vtxID1 bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -445,18 +428,8 @@ func TestBootstrapperTxDependencies(t *testing.T) { }) return nil }, - noopStarter, ) - if err != nil { - t.Fatal(err) - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID1} + require.NoError(err) manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { @@ -464,9 +437,10 @@ func TestBootstrapperTxDependencies(t *testing.T) { return vtx1, nil case bytes.Equal(vtxBytes, vtxBytes0): return vtx0, nil + default: + require.FailNow(errParsedUnknownVertex.Error()) + return nil, errParsedUnknownVertex } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex } manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { @@ -475,148 +449,22 @@ func TestBootstrapperTxDependencies(t *testing.T) { case vtxID0: return nil, errUnknownVertex default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } } reqIDPtr := new(uint32) sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case vtxID0: - default: - t.Fatal(errUnknownVertex) - } + require.Equal(peerID, vdr) + require.Equal(vtxID0, vtxID) *reqIDPtr = reqID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 - t.Fatal(err) - } - - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes1): - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - return vtx0, nil - } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex - } - - if err := bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { - t.Fatal(err) - } - - if config.Ctx.State.Get().State != snow.NormalOp { - t.Fatalf("Should have finished bootstrapping") - } - if tx0.Status() != choices.Accepted { - t.Fatalf("Tx should be accepted") - } - if tx1.Status() != choices.Accepted { - t.Fatalf("Tx should be accepted") - } - - if vtx0.Status() != choices.Accepted { - t.Fatalf("Vertex should be accepted") - } - if vtx1.Status() != choices.Accepted { - t.Fatalf("Vertex should be accepted") - } -} - -// Unfulfilled tx dependency -func TestBootstrapperMissingTxDependency(t *testing.T) { - config, peerID, sender, manager, vm := newConfig(t) - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - - txBytes1 := []byte{1} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: txID0, - StatusV: choices.Unknown, - }} - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID1, - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - BytesV: txBytes1, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - - vtxBytes0 := []byte{2} - vtxBytes1 := []byte{3} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, // depends on vtx0 - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - BytesV: vtxBytes1, - } - - bs, err := New( - context.Background(), - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID1} + require.NoError(bs.Start(context.Background(), 0)) - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID1: - return vtx1, nil - case vtxID0: - return nil, errUnknownVertex - default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) - } - } manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): @@ -624,53 +472,38 @@ func TestBootstrapperMissingTxDependency(t *testing.T) { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing return vtx0, nil - } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex - } - - reqIDPtr := new(uint32) - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) - } - switch { - case vtxID == vtxID0: default: - t.Fatalf("Requested wrong vertex") + require.FailNow(errParsedUnknownVertex.Error()) + return nil, errParsedUnknownVertex } - - *reqIDPtr = reqID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx1 - t.Fatal(err) + manager.StopVertexAcceptedF = func(context.Context) (bool, error) { + return vtx1.Status() == choices.Accepted, nil } - if err := bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { - t.Fatal(err) + manager.EdgeF = func(context.Context) []ids.ID { + require.Equal(choices.Accepted, vtx1.Status()) + return []ids.ID{vtxID1} } - if config.Ctx.State.Get().State != snow.NormalOp { - t.Fatalf("Bootstrapping should have finished") - } - if tx0.Status() != choices.Unknown { // never saw this tx - t.Fatalf("Tx should be unknown") - } - if tx1.Status() != choices.Processing { // can't accept because we don't have tx0 - t.Fatalf("Tx should be processing") + vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { + require.Equal(vtxID1, stopVertexID) + return nil } - if vtx0.Status() != choices.Accepted { - t.Fatalf("Vertex should be accepted") - } - if vtx1.Status() != choices.Processing { // can't accept because we don't have tx1 accepted - t.Fatalf("Vertex should be processing") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, tx0.Status()) + require.Equal(choices.Accepted, tx1.Status()) + require.Equal(choices.Accepted, vtx0.Status()) + require.Equal(choices.Accepted, vtx1.Status()) } // Ancestors only contains 1 of the two needed vertices; have to issue another GetAncestors func TestBootstrapperIncompleteAncestors(t *testing.T) { + require := require.New(t) + config, peerID, sender, manager, vm := newConfig(t) vtxID0 := ids.Empty.Prefix(0) @@ -698,7 +531,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { HeightV: 1, BytesV: vtxBytes1, } - vtx2 := &avalanche.TestVertex{ + vtx2 := &avalanche.TestVertex{ // vtx2 is the stop vertex TestDecidable: choices.TestDecidable{ IDV: vtxID2, StatusV: choices.Processing, @@ -708,8 +541,8 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { BytesV: vtxBytes2, } + config.StopVertexID = vtxID2 bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -718,18 +551,8 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { }) return nil }, - noopStarter, ) - if err != nil { - t.Fatal(err) - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID2} + require.NoError(err) manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch { @@ -740,8 +563,8 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { case vtxID == vtxID2: return vtx2, nil default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } } manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { @@ -749,196 +572,65 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): vtx1.StatusV = choices.Processing return vtx1, nil case bytes.Equal(vtxBytes, vtxBytes2): return vtx2, nil + default: + require.FailNow(errParsedUnknownVertex.Error()) + return nil, errParsedUnknownVertex } - t.Fatal(errParsedUnknownVertex) - return nil, errParsedUnknownVertex } reqIDPtr := new(uint32) requested := ids.Empty sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case vtxID1, vtxID0: - default: - t.Fatal(errUnknownVertex) - } + require.Equal(peerID, vdr) + require.Contains([]ids.ID{vtxID1, vtxID0}, vtxID) + *reqIDPtr = reqID requested = vtxID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx1 - t.Fatal(err) - } else if requested != vtxID1 { - t.Fatal("requested wrong vtx") - } - - err = bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1}) - switch { - case err != nil: // Provide vtx1; should request vtx0 - t.Fatal(err) - case bs.Context().State.Get().State == snow.NormalOp: - t.Fatalf("should not have finished") - case requested != vtxID0: - t.Fatal("should hae requested vtx0") - } - - err = bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}) - switch { - case err != nil: // Provide vtx0; can finish now - t.Fatal(err) - case bs.Context().State.Get().State != snow.NormalOp: - t.Fatal("should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatal("should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatal("should be accepted") - case vtx2.Status() != choices.Accepted: - t.Fatal("should be accepted") - } -} - -func TestBootstrapperFinalized(t *testing.T) { - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.Empty.Prefix(0) - vtxID1 := ids.Empty.Prefix(1) - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - - bs, err := New( - context.Background(), - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 + require.Equal(vtxID1, requested) - acceptedIDs := []ids.ID{vtxID0, vtxID1} + require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1})) // Provide vtx1; should request vtx0 + require.Equal(snow.Bootstrapping, bs.Context().State.Get().State) + require.Equal(vtxID0, requested) - parsedVtx0 := false - parsedVtx1 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - return nil, errUnknownVertex - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - return nil, errUnknownVertex - default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) - } - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - } - t.Fatal(errUnknownVertex) - return nil, errUnknownVertex + manager.StopVertexAcceptedF = func(context.Context) (bool, error) { + return vtx2.Status() == choices.Accepted, nil } - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - requestIDs[vtxID] = reqID + manager.EdgeF = func(context.Context) []ids.ID { + require.Equal(choices.Accepted, vtx2.Status()) + return []ids.ID{vtxID2} } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 and vtx1 - t.Fatal(err) + vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { + require.Equal(vtxID2, stopVertexID) + return nil } - reqID, ok := requestIDs[vtxID1] - if !ok { - t.Fatalf("should have requested vtx1") - } - - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { - t.Fatal(err) - } - - reqID, ok = requestIDs[vtxID0] - if !ok { - t.Fatalf("should have requested vtx0") - } - - err = bs.GetAncestorsFailed(context.Background(), peerID, reqID) - switch { - case err != nil: - t.Fatal(err) - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) // Provide vtx0; can finish now + require.Equal(snow.NormalOp, bs.Context().State.Get().State) + require.Equal(choices.Accepted, vtx0.Status()) + require.Equal(choices.Accepted, vtx1.Status()) + require.Equal(choices.Accepted, vtx2.Status()) } -// Test that Ancestors accepts the parents of the first vertex returned -func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { +func TestBootstrapperUnexpectedVertex(t *testing.T) { + require := require.New(t) + config, peerID, sender, manager, vm := newConfig(t) vtxID0 := ids.Empty.Prefix(0) vtxID1 := ids.Empty.Prefix(1) - vtxID2 := ids.Empty.Prefix(2) vtxBytes0 := []byte{0} vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} vtx0 := &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ @@ -948,7 +640,7 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { HeightV: 0, BytesV: vtxBytes0, } - vtx1 := &avalanche.TestVertex{ + vtx1 := &avalanche.TestVertex{ // vtx1 is the stop vertex TestDecidable: choices.TestDecidable{ IDV: vtxID1, StatusV: choices.Unknown, @@ -957,18 +649,9 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { HeightV: 1, BytesV: vtxBytes1, } - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } + config.StopVertexID = vtxID1 bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -977,22 +660,11 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { }) return nil }, - noopStarter, ) - if err != nil { - t.Fatal(err) - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - acceptedIDs := []ids.ID{vtxID2} + require.NoError(err) parsedVtx0 := false parsedVtx1 := false - parsedVtx2 := false manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -1005,203 +677,10 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { return vtx1, nil } return nil, errUnknownVertex - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) - } - return nil, errUnknownVertex - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - } - t.Fatal(errUnknownVertex) - return nil, errUnknownVertex - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - requestIDs[vtxID] = reqID - } - - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx2 - t.Fatal(err) - } - - reqID, ok := requestIDs[vtxID2] - if !ok { - t.Fatalf("should have requested vtx2") - } - - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes2, vtxBytes1, vtxBytes0}); err != nil { - t.Fatal(err) - } - - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx2.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - } -} - -func TestRestartBootstrapping(t *testing.T) { - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - vtxID2 := ids.GenerateTestID() - vtxID3 := ids.GenerateTestID() - vtxID4 := ids.GenerateTestID() - vtxID5 := ids.GenerateTestID() - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - vtxBytes2 := []byte{2} - vtxBytes3 := []byte{3} - vtxBytes4 := []byte{4} - vtxBytes5 := []byte{5} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID2, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx1}, - HeightV: 2, - BytesV: vtxBytes2, - } - vtx3 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID3, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes3, - } - vtx4 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID4, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx2}, - HeightV: 3, - BytesV: vtxBytes4, - } - vtx5 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID5, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx4}, - HeightV: 4, - BytesV: vtxBytes5, - } - - bsIntf, err := New( - context.Background(), - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*bootstrapper) - if !ok { - t.Fatal("unexpected bootstrapper type") - } - - vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - parsedVtx0 := false - parsedVtx1 := false - parsedVtx2 := false - parsedVtx3 := false - parsedVtx4 := false - parsedVtx5 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } + require.FailNow(errUnknownVertex.Error()) return nil, errUnknownVertex - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - return nil, errUnknownVertex - case vtxID2: - if parsedVtx2 { - return vtx2, nil - } - case vtxID3: - if parsedVtx3 { - return vtx3, nil - } - case vtxID4: - if parsedVtx4 { - return vtx4, nil - } - case vtxID5: - if parsedVtx5 { - return vtx5, nil - } - default: - t.Fatal(errUnknownVertex) - panic(errUnknownVertex) } - return nil, errUnknownVertex } manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { @@ -1213,114 +692,39 @@ func TestRestartBootstrapping(t *testing.T) { vtx1.StatusV = choices.Processing parsedVtx1 = true return vtx1, nil - case bytes.Equal(vtxBytes, vtxBytes2): - vtx2.StatusV = choices.Processing - parsedVtx2 = true - return vtx2, nil - case bytes.Equal(vtxBytes, vtxBytes3): - vtx3.StatusV = choices.Processing - parsedVtx3 = true - return vtx3, nil - case bytes.Equal(vtxBytes, vtxBytes4): - vtx4.StatusV = choices.Processing - parsedVtx4 = true - return vtx4, nil - case bytes.Equal(vtxBytes, vtxBytes5): - vtx5.StatusV = choices.Processing - parsedVtx5 = true - return vtx5, nil + default: + require.FailNow(errUnknownVertex.Error()) + return nil, errUnknownVertex } - t.Fatal(errUnknownVertex) - return nil, errUnknownVertex } requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } + require.Equal(peerID, vdr) requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted(context.Background(), []ids.ID{vtxID3, vtxID4}); err != nil { // should request vtx3 and vtx4 - t.Fatal(err) - } - - vtx3ReqID, ok := requestIDs[vtxID3] - if !ok { - t.Fatal("should have requested vtx4") - } - _, ok = requestIDs[vtxID4] - if !ok { - t.Fatal("should have requested vtx4") - } - - if err := bs.Ancestors(context.Background(), peerID, vtx3ReqID, [][]byte{vtxBytes3, vtxBytes2}); err != nil { - t.Fatal(err) - } - - _, ok = requestIDs[vtxID1] - if !ok { - t.Fatal("should have requested vtx1") - } - - if removed := bs.OutstandingRequests.RemoveAny(vtxID4); !removed { - t.Fatal("expected to find outstanding requested for vtx4") - } - - if removed := bs.OutstandingRequests.RemoveAny(vtxID1); !removed { - t.Fatal("expected to find outstanding requested for vtx1") - } - bs.needToFetch.Clear() - requestIDs = map[ids.ID]uint32{} - - if err := bs.ForceAccepted(context.Background(), []ids.ID{vtxID5, vtxID3}); err != nil { - t.Fatal(err) - } - - vtx1ReqID, ok := requestIDs[vtxID1] - if !ok { - t.Fatal("should have re-requested vtx1 from pending on prior run") - } - _, ok = requestIDs[vtxID4] - if !ok { - t.Fatal("should have re-requested vtx4 from pending on prior run") - } - vtx5ReqID, ok := requestIDs[vtxID5] - if !ok { - t.Fatal("should have requested vtx5") - } - if _, ok := requestIDs[vtxID3]; ok { - t.Fatal("should not have re-requested vtx3 since it has been processed") - } + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 + require.Contains(requestIDs, vtxID1) - if err := bs.Ancestors(context.Background(), peerID, vtx5ReqID, [][]byte{vtxBytes5, vtxBytes4, vtxBytes2, vtxBytes1}); err != nil { - t.Fatal(err) - } + reqID := requestIDs[vtxID1] + clear(requestIDs) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes0})) + require.Contains(requestIDs, vtxID1) - _, ok = requestIDs[vtxID0] - if !ok { - t.Fatal("should have requested vtx0 after ancestors ended prior to it") + manager.EdgeF = func(context.Context) []ids.ID { + require.Equal(choices.Accepted, vtx1.Status()) + return []ids.ID{vtxID1} } - if err := bs.Ancestors(context.Background(), peerID, vtx1ReqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { - t.Fatal(err) + vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { + require.Equal(vtxID1, stopVertexID) + return nil } - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case vtx0.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx1.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx2.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx3.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx4.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - case vtx5.Status() != choices.Accepted: - t.Fatalf("Vertex should be accepted") - } + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) + require.Equal(choices.Accepted, vtx0.Status()) + require.Equal(choices.Accepted, vtx1.Status()) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/config.go b/avalanchego/snow/engine/avalanche/bootstrap/config.go index 7cdbc82f..a674c275 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/config.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/config.go @@ -1,18 +1,31 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + Beacons validators.Manager + + StartupTracker tracker.Startup + Sender common.Sender + + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + AncestorsMaxContainersReceived int + // VtxBlocked tracks operations that are blocked on vertices VtxBlocked *queue.JobsWithMissing // TxBlocked tracks operations that are blocked on transactions @@ -20,4 +33,8 @@ type Config struct { Manager vertex.Manager VM vertex.LinearizableVM + + // If StopVertexID is empty, the engine will generate the stop vertex based + // on the current state. + StopVertexID ids.ID } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/metrics.go b/avalanchego/snow/engine/avalanche/bootstrap/metrics.go index 2033a776..cc357f25 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/metrics.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -6,7 +6,7 @@ package bootstrap import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -50,8 +50,7 @@ func (m *metrics) Initialize( Help: "Number of transactions accepted during bootstrapping", }) - errs := wrappers.Errs{} - errs.Add( + return utils.Err( registerer.Register(m.numFetchedVts), registerer.Register(m.numDroppedVts), registerer.Register(m.numAcceptedVts), @@ -59,5 +58,4 @@ func (m *metrics) Initialize( registerer.Register(m.numDroppedTxs), registerer.Register(m.numAcceptedTxs), ) - return errs.Err } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go b/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go index a9ead4a6..5a2ff3d9 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -9,7 +9,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -53,31 +52,13 @@ func (t *txJob) ID() ids.ID { } func (t *txJob) MissingDependencies(context.Context) (set.Set[ids.ID], error) { - missing := set.Set[ids.ID]{} - deps, err := t.tx.Dependencies() - if err != nil { - return missing, err - } - for _, dep := range deps { - if dep.Status() != choices.Accepted { - missing.Add(dep.ID()) - } - } - return missing, nil + return t.tx.MissingDependencies() } // Returns true if this tx job has at least 1 missing dependency func (t *txJob) HasMissingDependencies(context.Context) (bool, error) { - deps, err := t.tx.Dependencies() - if err != nil { - return false, err - } - for _, dep := range deps { - if dep.Status() != choices.Accepted { - return true, nil - } - } - return false, nil + deps, err := t.tx.MissingDependencies() + return deps.Len() > 0, err } func (t *txJob) Execute(ctx context.Context) error { diff --git a/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go b/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go index 3001ce89..8860b61d 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -9,7 +9,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" diff --git a/avalanchego/snow/engine/avalanche/config.go b/avalanchego/snow/engine/avalanche/config.go deleted file mode 100644 index 1bac47f5..00000000 --- a/avalanchego/snow/engine/avalanche/config.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" -) - -// Config wraps all the parameters needed for an avalanche engine -type Config struct { - Ctx *snow.ConsensusContext - common.AllGetsServer - VM vertex.LinearizableVM - Manager vertex.Manager - Sender common.Sender - Validators validators.Set - - Params avalanche.Parameters - Consensus avalanche.Consensus -} diff --git a/avalanchego/snow/engine/avalanche/config_test.go b/avalanchego/snow/engine/avalanche/config_test.go deleted file mode 100644 index c2bd205b..00000000 --- a/avalanchego/snow/engine/avalanche/config_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowball" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" - "github.com/ava-labs/avalanchego/snow/validators" -) - -func DefaultConfig() (common.Config, bootstrap.Config, Config) { - vtxBlocked, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - txBlocked, _ := queue.New(memdb.New(), "", prometheus.NewRegistry()) - - commonCfg := common.DefaultConfigTest() - - bootstrapConfig := bootstrap.Config{ - Config: commonCfg, - VtxBlocked: vtxBlocked, - TxBlocked: txBlocked, - Manager: &vertex.TestManager{}, - VM: &vertex.TestVM{}, - } - - engineConfig := Config{ - Ctx: bootstrapConfig.Ctx, - VM: bootstrapConfig.VM, - Manager: bootstrapConfig.Manager, - Sender: bootstrapConfig.Sender, - Validators: validators.NewSet(), - Params: avalanche.Parameters{ - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 100, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushVdr: 1, - MixedQueryNumPushNonVdr: 1, - }, - Parents: 2, - BatchSize: 1, - }, - Consensus: &avalanche.Topological{}, - } - - return commonCfg, bootstrapConfig, engineConfig -} diff --git a/avalanchego/snow/engine/avalanche/engine.go b/avalanchego/snow/engine/avalanche/engine.go index 02f035be..530a319e 100644 --- a/avalanchego/snow/engine/avalanche/engine.go +++ b/avalanchego/snow/engine/avalanche/engine.go @@ -1,21 +1,71 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( "context" + "errors" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" ) -// Engine describes the events that can occur on a consensus instance -type Engine interface { - common.Engine +var ( + _ common.Engine = (*engine)(nil) - // GetVtx returns a vertex by its ID. - // Returns an error if unknown. - GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) + errUnexpectedStart = errors.New("unexpectedly started engine") +) + +type engine struct { + common.AllGetsServer + + // list of NoOpsHandler for messages dropped by engine + common.StateSummaryFrontierHandler + common.AcceptedStateSummaryHandler + common.AcceptedFrontierHandler + common.AcceptedHandler + common.AncestorsHandler + common.PutHandler + common.QueryHandler + common.ChitsHandler + common.AppHandler + common.InternalHandler + + ctx *snow.ConsensusContext + vm common.VM +} + +func New( + ctx *snow.ConsensusContext, + gets common.AllGetsServer, + vm common.VM, +) common.Engine { + return &engine{ + AllGetsServer: gets, + StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(ctx.Log), + AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(ctx.Log), + AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(ctx.Log), + AcceptedHandler: common.NewNoOpAcceptedHandler(ctx.Log), + AncestorsHandler: common.NewNoOpAncestorsHandler(ctx.Log), + PutHandler: common.NewNoOpPutHandler(ctx.Log), + QueryHandler: common.NewNoOpQueryHandler(ctx.Log), + ChitsHandler: common.NewNoOpChitsHandler(ctx.Log), + AppHandler: common.NewNoOpAppHandler(ctx.Log), + InternalHandler: common.NewNoOpInternalHandler(ctx.Log), + ctx: ctx, + vm: vm, + } +} + +func (*engine) Start(context.Context, uint32) error { + return errUnexpectedStart +} + +func (e *engine) Context() *snow.ConsensusContext { + return e.ctx +} + +func (*engine) HealthCheck(context.Context) (interface{}, error) { + return nil, nil } diff --git a/avalanchego/snow/engine/avalanche/getter/getter.go b/avalanchego/snow/engine/avalanche/getter/getter.go index cc777bf4..a8e35fdd 100644 --- a/avalanchego/snow/engine/avalanche/getter/getter.go +++ b/avalanchego/snow/engine/avalanche/getter/getter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,6 +7,7 @@ import ( "context" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -25,12 +26,20 @@ import ( // Get requests are always served, regardless node state (bootstrapping or normal operations). var _ common.AllGetsServer = (*getter)(nil) -func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, error) { +func New( + storage vertex.Storage, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, +) (common.AllGetsServer, error) { gh := &getter{ - storage: storage, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + storage: storage, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,17 +47,18 @@ func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, "bs", "get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", - commonCfg.Ctx.AvalancheRegisterer, + reg, ) return gh, err } type getter struct { - storage vertex.Storage - sender common.Sender - cfg common.Config + storage vertex.Storage + sender common.Sender + log logging.Logger + maxTimeGetAncestors time.Duration + maxContainersGetAncestors int - log logging.Logger getAncestorsVtxs metric.Averager } @@ -62,7 +72,7 @@ func (gh *getter) GetStateSummaryFrontier(_ context.Context, nodeID ids.NodeID, return nil } -func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []uint64) error { +func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[uint64]) error { gh.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), @@ -72,15 +82,22 @@ func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, return nil } +// TODO: Remove support for GetAcceptedFrontier messages after v1.11.x is +// activated. func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { acceptedFrontier := gh.storage.Edge(ctx) - gh.sender.SendAcceptedFrontier(ctx, validatorID, requestID, acceptedFrontier) + // Since all the DAGs are linearized, we only need to return the stop + // vertex. + if len(acceptedFrontier) > 0 { + gh.sender.SendAcceptedFrontier(ctx, validatorID, requestID, acceptedFrontier[0]) + } return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedVtxIDs := make([]ids.ID, 0, len(containerIDs)) - for _, vtxID := range containerIDs { +// TODO: Remove support for GetAccepted messages after v1.11.x is activated. +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedVtxIDs := make([]ids.ID, 0, containerIDs.Len()) + for vtxID := range containerIDs { if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil && vtx.Status() == choices.Accepted { acceptedVtxIDs = append(acceptedVtxIDs, vtxID) } @@ -102,14 +119,13 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID return nil // Don't have the requested vertex. Drop message. } - queue := make([]avalanche.Vertex, 1, gh.cfg.AncestorsMaxContainersSent) // for BFS + queue := make([]avalanche.Vertex, 1, gh.maxContainersGetAncestors) // for BFS queue[0] = vertex - ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors - ancestorsBytes := make([][]byte, 0, gh.cfg.AncestorsMaxContainersSent) // vertex and its ancestors in BFS order - visited := set.Set[ids.ID]{} // IDs of vertices that have been in queue before - visited.Add(vertex.ID()) + ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors + ancestorsBytes := make([][]byte, 0, gh.maxContainersGetAncestors) // vertex and its ancestors in BFS order + visited := set.Of(vertex.ID()) // IDs of vertices that have been in queue before - for len(ancestorsBytes) < gh.cfg.AncestorsMaxContainersSent && len(queue) > 0 && time.Since(startTime) < gh.cfg.MaxTimeGetAncestors { + for len(ancestorsBytes) < gh.maxContainersGetAncestors && len(queue) > 0 && time.Since(startTime) < gh.maxTimeGetAncestors { var vtx avalanche.Vertex vtx, queue = queue[0], queue[1:] // pop vtxBytes := vtx.Bytes() diff --git a/avalanchego/snow/engine/avalanche/getter/getter_test.go b/avalanchego/snow/engine/avalanche/getter/getter_test.go index 613bb7b0..c052d0bc 100644 --- a/avalanchego/snow/engine/avalanche/getter/getter_test.go +++ b/avalanchego/snow/engine/avalanche/getter/getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,110 +7,66 @@ import ( "context" "errors" "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownVertex = errors.New("unknown vertex") -func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { - peers := validators.NewSet() - peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false +func newTest(t *testing.T) (common.AllGetsServer, *vertex.TestManager, *common.SenderTest) { + manager := vertex.NewTestManager(t) + manager.Default(true) - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - - commonConfig := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: peers, - SampleK: peers.Len(), - Alpha: peers.Weight()/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, } + sender.Default(true) - manager := vertex.NewTestManager(t) - manager.Default(true) - - return manager, sender, commonConfig + bs, err := New( + manager, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + return bs, manager, sender } func TestAcceptedFrontier(t *testing.T) { - manager, sender, config := testSetup(t) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - vtxID2 := ids.GenerateTestID() - - bsIntf, err := New(manager, config) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*getter) - if !ok { - t.Fatal("Unexpected get handler") - } + require := require.New(t) + bs, manager, sender := newTest(t) + vtxID := ids.GenerateTestID() manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{ - vtxID0, - vtxID1, + vtxID, } } - var accepted []ids.ID - sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { - accepted = frontier - } - - if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { - t.Fatal(err) - } - - acceptedSet := set.Set[ids.ID]{} - acceptedSet.Add(accepted...) - - manager.EdgeF = nil - - if !acceptedSet.Contains(vtxID0) { - t.Fatalf("Vtx should be accepted") - } - if !acceptedSet.Contains(vtxID1) { - t.Fatalf("Vtx should be accepted") - } - if acceptedSet.Contains(vtxID2) { - t.Fatalf("Vtx shouldn't be accepted") + var accepted ids.ID + sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, containerID ids.ID) { + accepted = containerID } + require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) + require.Equal(vtxID, accepted) } func TestFilterAccepted(t *testing.T) { - manager, sender, config := testSetup(t) + require := require.New(t) + bs, manager, sender := newTest(t) vtxID0 := ids.GenerateTestID() vtxID1 := ids.GenerateTestID() @@ -125,17 +81,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - bsIntf, err := New(manager, config) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*getter) - if !ok { - t.Fatal("Unexpected get handler") - } - - vtxIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: @@ -145,7 +90,7 @@ func TestFilterAccepted(t *testing.T) { case vtxID2: return nil, errUnknownVertex } - t.Fatal(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) return nil, errUnknownVertex } @@ -154,22 +99,10 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } - if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs); err != nil { - t.Fatal(err) - } - - acceptedSet := set.Set[ids.ID]{} - acceptedSet.Add(accepted...) + vtxIDs := set.Of(vtxID0, vtxID1, vtxID2) + require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs)) - manager.GetVtxF = nil - - if !acceptedSet.Contains(vtxID0) { - t.Fatalf("Vtx should be accepted") - } - if !acceptedSet.Contains(vtxID1) { - t.Fatalf("Vtx should be accepted") - } - if acceptedSet.Contains(vtxID2) { - t.Fatalf("Vtx shouldn't be accepted") - } + require.Contains(accepted, vtxID0) + require.Contains(accepted, vtxID1) + require.NotContains(accepted, vtxID2) } diff --git a/avalanchego/snow/engine/avalanche/issuer.go b/avalanchego/snow/engine/avalanche/issuer.go deleted file mode 100644 index b777e4df..00000000 --- a/avalanchego/snow/engine/avalanche/issuer.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -// issuer issues [vtx] into consensus after its dependencies are met. -type issuer struct { - t *Transitive - vtx avalanche.Vertex - issued, abandoned bool - vtxDeps, txDeps set.Set[ids.ID] -} - -// Register that a vertex we were waiting on has been issued to consensus. -func (i *issuer) FulfillVtx(ctx context.Context, id ids.ID) { - i.vtxDeps.Remove(id) - i.Update(ctx) -} - -// Register that a transaction we were waiting on has been issued to consensus. -func (i *issuer) FulfillTx(ctx context.Context, id ids.ID) { - i.txDeps.Remove(id) - i.Update(ctx) -} - -// Abandon this attempt to issue -func (i *issuer) Abandon(ctx context.Context) { - if !i.abandoned { - vtxID := i.vtx.ID() - i.t.pending.Remove(vtxID) - i.abandoned = true - i.t.vtxBlocked.Abandon(ctx, vtxID) // Inform vertices waiting on this vtx that it won't be issued - i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) - } -} - -// Issue the poll when all dependencies are met -func (i *issuer) Update(ctx context.Context) { - if i.abandoned || i.issued || i.vtxDeps.Len() != 0 || i.txDeps.Len() != 0 || i.t.Consensus.VertexIssued(i.vtx) || i.t.errs.Errored() { - return - } - - vtxID := i.vtx.ID() - - // All dependencies have been met - i.issued = true - - // check stop vertex validity - err := i.vtx.Verify(ctx) - if err != nil { - if i.vtx.HasWhitelist() { - // do not update "i.t.errs" since it's only used for critical errors - // which will cause chain shutdown in the engine - // (see "handleSyncMsg" and "handleChanMsg") - i.t.Ctx.Log.Debug("stop vertex verification failed", - zap.Stringer("vtxID", vtxID), - zap.Error(err), - ) - i.t.metrics.whitelistVtxIssueFailure.Inc() - } else { - i.t.Ctx.Log.Debug("vertex verification failed", - zap.Stringer("vtxID", vtxID), - zap.Error(err), - ) - } - - i.t.vtxBlocked.Abandon(ctx, vtxID) - return - } - - i.t.pending.Remove(vtxID) // Remove from set of vertices waiting to be issued. - - // Make sure the transactions in this vertex are valid - txs, err := i.vtx.Txs(ctx) - if err != nil { - i.t.errs.Add(err) - return - } - validTxs := make([]snowstorm.Tx, 0, len(txs)) - for _, tx := range txs { - if err := tx.Verify(ctx); err != nil { - txID := tx.ID() - i.t.Ctx.Log.Debug("transaction verification failed", - zap.Stringer("txID", txID), - zap.Error(err), - ) - i.t.txBlocked.Abandon(ctx, txID) - } else { - validTxs = append(validTxs, tx) - } - } - - // Some of the transactions weren't valid. Abandon this vertex. - // Take the valid transactions and issue a new vertex with them. - if len(validTxs) != len(txs) { - i.t.Ctx.Log.Debug("abandoning vertex", - zap.String("reason", "transaction verification failed"), - zap.Stringer("vtxID", vtxID), - ) - if _, err := i.t.batch(ctx, validTxs, batchOption{}); err != nil { - i.t.errs.Add(err) - } - i.t.vtxBlocked.Abandon(ctx, vtxID) - i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) - return - } - - i.t.Ctx.Log.Verbo("adding vertex to consensus", - zap.Stringer("vtxID", vtxID), - ) - - // Add this vertex to consensus. - if err := i.t.Consensus.Add(ctx, i.vtx); err != nil { - i.t.errs.Add(err) - return - } - - // Issue a poll for this vertex. - vdrIDs, err := i.t.Validators.Sample(i.t.Params.K) // Validators to sample - if err != nil { - i.t.Ctx.Log.Error("dropped query", - zap.String("reason", "insufficient number of validators"), - zap.Stringer("vtxID", vtxID), - ) - } - - vdrBag := bag.Bag[ids.NodeID]{} // Validators to sample repr. as a set - vdrBag.Add(vdrIDs...) - - i.t.RequestID++ - if err == nil && i.t.polls.Add(i.t.RequestID, vdrBag) { - numPushTo := i.t.Params.MixedQueryNumPushVdr - if !i.t.Validators.Contains(i.t.Ctx.NodeID) { - numPushTo = i.t.Params.MixedQueryNumPushNonVdr - } - common.SendMixedQuery( - ctx, - i.t.Sender, - vdrBag.List(), // Note that this doesn't contain duplicates; length may be < k - numPushTo, - i.t.RequestID, - vtxID, - i.vtx.Bytes(), - ) - } - - // Notify vertices waiting on this one that it (and its transactions) have been issued. - i.t.vtxBlocked.Fulfill(ctx, vtxID) - for _, tx := range txs { - i.t.txBlocked.Fulfill(ctx, tx.ID()) - } - i.t.metrics.blockerTxs.Set(float64(i.t.txBlocked.Len())) - i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) - - if i.vtx.HasWhitelist() { - i.t.Ctx.Log.Info("successfully issued stop vertex", - zap.Stringer("vtxID", vtxID), - ) - i.t.metrics.whitelistVtxIssueSuccess.Inc() - } - - // Issue a repoll - i.t.repoll(ctx) -} - -type vtxIssuer struct{ i *issuer } - -func (vi *vtxIssuer) Dependencies() set.Set[ids.ID] { - return vi.i.vtxDeps -} - -func (vi *vtxIssuer) Fulfill(ctx context.Context, id ids.ID) { - vi.i.FulfillVtx(ctx, id) -} - -func (vi *vtxIssuer) Abandon(ctx context.Context, _ ids.ID) { - vi.i.Abandon(ctx) -} - -func (vi *vtxIssuer) Update(ctx context.Context) { - vi.i.Update(ctx) -} - -type txIssuer struct{ i *issuer } - -func (ti *txIssuer) Dependencies() set.Set[ids.ID] { - return ti.i.txDeps -} - -func (ti *txIssuer) Fulfill(ctx context.Context, id ids.ID) { - ti.i.FulfillTx(ctx, id) -} - -func (ti *txIssuer) Abandon(ctx context.Context, _ ids.ID) { - ti.i.Abandon(ctx) -} - -func (ti *txIssuer) Update(ctx context.Context) { - ti.i.Update(ctx) -} diff --git a/avalanchego/snow/engine/avalanche/metrics.go b/avalanchego/snow/engine/avalanche/metrics.go deleted file mode 100644 index cae97abc..00000000 --- a/avalanchego/snow/engine/avalanche/metrics.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -type metrics struct { - bootstrapFinished, - numVtxRequests, numPendingVts, - numMissingTxs, pendingTxs, - blockerVtxs, blockerTxs prometheus.Gauge - - whitelistVtxIssueSuccess, whitelistVtxIssueFailure, - numUselessPutBytes, numUselessPushQueryBytes prometheus.Counter -} - -func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error { - errs := wrappers.Errs{} - m.bootstrapFinished = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bootstrap_finished", - Help: "Whether or not bootstrap process has completed. 1 is success, 0 is fail or ongoing.", - }) - m.numVtxRequests = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "vtx_requests", - Help: "Number of outstanding vertex requests", - }) - m.numPendingVts = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "pending_vts", - Help: "Number of pending vertices", - }) - m.numMissingTxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "missing_txs", - Help: "Number of missing transactions", - }) - m.pendingTxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "pending_txs", - Help: "Number of transactions from the VM waiting to be issued", - }) - m.blockerVtxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blocker_vtxs", - Help: "Number of vertices that are blocking other vertices from being issued because they haven't been issued", - }) - m.blockerTxs = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blocker_txs", - Help: "Number of transactions that are blocking other transactions from being issued because they haven't been issued", - }) - m.whitelistVtxIssueSuccess = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "whitelist_vtx_issue_success", - Help: "Number of DAG linearization request issued (pending, not necessarily accepted)", - }) - m.whitelistVtxIssueFailure = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "whitelist_vtx_issue_failure", - Help: "Number of DAG linearization request issue failed (verification failure)", - }) - m.numUselessPutBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_put_bytes", - Help: "Amount of useless bytes received in Put messages", - }) - m.numUselessPushQueryBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_push_query_bytes", - Help: "Amount of useless bytes received in PushQuery messages", - }) - - errs.Add( - reg.Register(m.bootstrapFinished), - reg.Register(m.numVtxRequests), - reg.Register(m.numPendingVts), - reg.Register(m.numMissingTxs), - reg.Register(m.pendingTxs), - reg.Register(m.blockerVtxs), - reg.Register(m.blockerTxs), - reg.Register(m.whitelistVtxIssueSuccess), - reg.Register(m.whitelistVtxIssueFailure), - reg.Register(m.numUselessPutBytes), - reg.Register(m.numUselessPushQueryBytes), - ) - return errs.Err -} diff --git a/avalanchego/snow/engine/avalanche/state/prefixed_state.go b/avalanchego/snow/engine/avalanche/state/prefixed_state.go index 5fac890b..1ff634d5 100644 --- a/avalanchego/snow/engine/avalanche/state/prefixed_state.go +++ b/avalanchego/snow/engine/avalanche/state/prefixed_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/snow/engine/avalanche/state/serializer.go b/avalanchego/snow/engine/avalanche/state/serializer.go index 274f3ea0..be8cabc6 100644 --- a/avalanchego/snow/engine/avalanche/state/serializer.go +++ b/avalanchego/snow/engine/avalanche/state/serializer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Package state manages the meta-data required by consensus for an avalanche @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" @@ -76,26 +75,9 @@ func (s *Serializer) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, return newUniqueVertex(ctx, s, b) } -func (s *Serializer) BuildVtx( - ctx context.Context, - parentIDs []ids.ID, - txs []snowstorm.Tx, -) (avalanche.Vertex, error) { - return s.buildVtx(ctx, parentIDs, txs, false) -} - func (s *Serializer) BuildStopVtx( ctx context.Context, parentIDs []ids.ID, -) (avalanche.Vertex, error) { - return s.buildVtx(ctx, parentIDs, nil, true) -} - -func (s *Serializer) buildVtx( - ctx context.Context, - parentIDs []ids.ID, - txs []snowstorm.Tx, - stopVtx bool, ) (avalanche.Vertex, error) { height := uint64(0) for _, parentID := range parentIDs { @@ -108,31 +90,14 @@ func (s *Serializer) buildVtx( if err != nil { return nil, err } - height = math.Max(height, childHeight) + height = max(height, childHeight) } - var ( - vtx vertex.StatelessVertex - err error + vtx, err := vertex.BuildStopVertex( + s.ChainID, + height, + parentIDs, ) - if !stopVtx { - txBytes := make([][]byte, len(txs)) - for i, tx := range txs { - txBytes[i] = tx.Bytes() - } - vtx, err = vertex.Build( - s.ChainID, - height, - parentIDs, - txBytes, - ) - } else { - vtx, err = vertex.BuildStopVertex( - s.ChainID, - height, - parentIDs, - ) - } if err != nil { return nil, err } diff --git a/avalanchego/snow/engine/avalanche/state/state.go b/avalanchego/snow/engine/avalanche/state/state.go index 54bb727c..021a4c7e 100644 --- a/avalanchego/snow/engine/avalanche/state/state.go +++ b/avalanchego/snow/engine/avalanche/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -109,7 +108,7 @@ func (s *state) Edge(id ids.ID) []ids.ID { frontierSize := p.UnpackInt() frontier := make([]ids.ID, frontierSize) for i := 0; i < int(frontierSize) && !p.Errored(); i++ { - id, err := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + id, err := ids.ToID(p.UnpackFixedBytes(ids.IDLen)) p.Add(err) frontier[i] = id } @@ -137,7 +136,7 @@ func (s *state) SetEdge(id ids.ID, frontier []ids.ID) error { return s.db.Delete(id[:]) } - size := wrappers.IntLen + hashing.HashLen*len(frontier) + size := wrappers.IntLen + ids.IDLen*len(frontier) p := wrappers.Packer{Bytes: make([]byte, size)} p.PackInt(uint32(len(frontier))) for _, id := range frontier { diff --git a/avalanchego/snow/engine/avalanche/state/unique_vertex.go b/avalanchego/snow/engine/avalanche/state/unique_vertex.go index e3a41ba4..20cc702c 100644 --- a/avalanchego/snow/engine/avalanche/state/unique_vertex.go +++ b/avalanchego/snow/engine/avalanche/state/unique_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,7 +8,6 @@ import ( "errors" "fmt" "strings" - "time" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" @@ -18,12 +17,15 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/set" ) var ( _ cache.Evictable[ids.ID] = (*uniqueVertex)(nil) _ avalanche.Vertex = (*uniqueVertex)(nil) + + errGetParents = errors.New("failed to get parents for vertex") + errGetHeight = errors.New("failed to get height for vertex") + errGetTxs = errors.New("failed to get txs for vertex") ) // uniqueVertex acts as a cache for vertices in the database. @@ -37,8 +39,6 @@ type uniqueVertex struct { id ids.ID v *vertexState - // default to "time.Now", used for testing - time func() time.Time } // newUniqueVertex returns a uniqueVertex instance from [b] by checking the cache @@ -225,7 +225,7 @@ func (vtx *uniqueVertex) Parents() ([]avalanche.Vertex, error) { vtx.refresh() if vtx.v.vtx == nil { - return nil, fmt.Errorf("failed to get parents for vertex with status: %s", vtx.v.status) + return nil, fmt.Errorf("%w with status: %s", errGetParents, vtx.v.status) } parentIDs := vtx.v.vtx.ParentIDs() @@ -242,212 +242,21 @@ func (vtx *uniqueVertex) Parents() ([]avalanche.Vertex, error) { return vtx.v.parents, nil } -var ( - errStopVertexNotAllowedTimestamp = errors.New("stop vertex not allowed timestamp") - errStopVertexAlreadyAccepted = errors.New("stop vertex already accepted") - errUnexpectedEdges = errors.New("unexpected edge, expected accepted frontier") - errUnexpectedDependencyStopVtx = errors.New("unexpected dependencies found in stop vertex transitive path") -) - -// "uniqueVertex" itself implements "Verify" regardless of whether the underlying vertex -// is stop vertex or not. Called before issuing the vertex to the consensus. -// No vertex should ever be able to refer to a stop vertex in its transitive closure. -func (vtx *uniqueVertex) Verify(ctx context.Context) error { - // first verify the underlying stateless vertex - if err := vtx.v.vtx.Verify(); err != nil { - return err - } - - whitelistVtx := vtx.v.vtx.StopVertex() - if whitelistVtx { - now := time.Now() - if vtx.time != nil { - now = vtx.time() - } - allowed := vtx.serializer.CortinaTime - if now.Before(allowed) { - return errStopVertexNotAllowedTimestamp - } - } - - // MUST error if stop vertex has already been accepted (can't be accepted twice) - // regardless of whether the underlying vertex is stop vertex or not - stopVtxAccepted, err := vtx.serializer.StopVertexAccepted(ctx) - if err != nil { - return err - } - if stopVtxAccepted { - return errStopVertexAlreadyAccepted - } - if !whitelistVtx { - // below are stop vertex specific verifications - // no need to continue - return nil - } - - // (accepted) (accepted) - // vtx_1 vtx_2 - // [tx_a, tx_b] [tx_c, tx_d] - // ⬆ ⬉ ⬈ ⬆ - // vtx_3 vtx_4 - // [tx_e, tx_f] [tx_g, tx_h] - // ⬆ - // stop_vertex_5 - // - // [tx_a, tx_b] transitively referenced by "stop_vertex_5" - // has the dependent transactions [tx_e, tx_f] - // that are not transitively referenced by "stop_vertex_5" - // in case "tx_g" depends on "tx_e" that is not in vtx4. - // Thus "stop_vertex_5" is invalid! - // - // To make sure such transitive paths of the stop vertex reach all accepted frontier: - // 1. check the edge of the transitive paths refers to the accepted frontier - // 2. check dependencies of all txs must be subset of transitive paths - queue := []avalanche.Vertex{vtx} - visitedVtx := set.NewSet[ids.ID](0) - - acceptedFrontier := set.NewSet[ids.ID](0) - transitivePaths := set.NewSet[ids.ID](0) - dependencies := set.NewSet[ids.ID](0) - for len(queue) > 0 { // perform BFS - cur := queue[0] - queue = queue[1:] - - curID := cur.ID() - if cur.Status() == choices.Accepted { - // 1. check the edge of the transitive paths refers to the accepted frontier - acceptedFrontier.Add(curID) - - // have reached the accepted frontier on the transitive closure - // no need to continue the search on this path - continue - } - - if visitedVtx.Contains(curID) { - continue - } - visitedVtx.Add(curID) - transitivePaths.Add(curID) - - txs, err := cur.Txs(ctx) - if err != nil { - return err - } - for _, tx := range txs { - transitivePaths.Add(tx.ID()) - deps, err := tx.Dependencies() - if err != nil { - return err - } - for _, dep := range deps { - // only add non-accepted dependencies - if dep.Status() != choices.Accepted { - dependencies.Add(dep.ID()) - } - } - } - - parents, err := cur.Parents() - if err != nil { - return err - } - queue = append(queue, parents...) - } - - acceptedEdges := set.NewSet[ids.ID](0) - acceptedEdges.Add(vtx.serializer.Edge(ctx)...) - - // stop vertex should be able to reach all IDs - // that are returned by the "Edge" - if !acceptedFrontier.Equals(acceptedEdges) { - return errUnexpectedEdges - } - - // 2. check dependencies of all txs must be subset of transitive paths - prev := transitivePaths.Len() - transitivePaths.Union(dependencies) - if prev != transitivePaths.Len() { - return errUnexpectedDependencyStopVtx - } - - return nil -} - -func (vtx *uniqueVertex) HasWhitelist() bool { - return vtx.v.vtx.StopVertex() -} - -// "uniqueVertex" itself implements "Whitelist" traversal iff its underlying -// "vertex.StatelessVertex" is marked as a stop vertex. -func (vtx *uniqueVertex) Whitelist(ctx context.Context) (set.Set[ids.ID], error) { - if !vtx.v.vtx.StopVertex() { - return nil, nil - } - - // perform BFS on transitive paths until reaching the accepted frontier - // represents all processing transaction IDs transitively referenced by the - // vertex - queue := []avalanche.Vertex{vtx} - whitlist := set.NewSet[ids.ID](0) - visitedVtx := set.NewSet[ids.ID](0) - for len(queue) > 0 { - cur := queue[0] - queue = queue[1:] - - if cur.Status() == choices.Accepted { - // have reached the accepted frontier on the transitive closure - // no need to continue the search on this path - continue - } - curID := cur.ID() - if visitedVtx.Contains(curID) { - continue - } - visitedVtx.Add(curID) - - txs, err := cur.Txs(ctx) - if err != nil { - return nil, err - } - for _, tx := range txs { - whitlist.Add(tx.ID()) - } - whitlist.Add(curID) - - parents, err := cur.Parents() - if err != nil { - return nil, err - } - queue = append(queue, parents...) - } - return whitlist, nil -} - func (vtx *uniqueVertex) Height() (uint64, error) { vtx.refresh() if vtx.v.vtx == nil { - return 0, fmt.Errorf("failed to get height for vertex with status: %s", vtx.v.status) + return 0, fmt.Errorf("%w with status: %s", errGetHeight, vtx.v.status) } return vtx.v.vtx.Height(), nil } -func (vtx *uniqueVertex) Epoch() (uint32, error) { - vtx.refresh() - - if vtx.v.vtx == nil { - return 0, fmt.Errorf("failed to get epoch for vertex with status: %s", vtx.v.status) - } - - return vtx.v.vtx.Epoch(), nil -} - func (vtx *uniqueVertex) Txs(ctx context.Context) ([]snowstorm.Tx, error) { vtx.refresh() if vtx.v.vtx == nil { - return nil, fmt.Errorf("failed to get txs for vertex with status: %s", vtx.v.status) + return nil, fmt.Errorf("%w with status: %s", errGetTxs, vtx.v.status) } txs := vtx.v.vtx.Txs() @@ -491,13 +300,13 @@ func (vtx *uniqueVertex) String() string { len(txs), )) - parentFormat := fmt.Sprintf("\n Parent[%s]: ID = %%s, Status = %%s", + parentFormat := fmt.Sprintf("\n Parent[%s]: ID = %%s, Status = %%s", //nolint:perfsprint formatting.IntFormat(len(parents)-1)) for i, parent := range parents { sb.WriteString(fmt.Sprintf(parentFormat, i, parent.ID(), parent.Status())) } - txFormat := fmt.Sprintf("\n Transaction[%s]: ID = %%s, Status = %%s", + txFormat := fmt.Sprintf("\n Transaction[%s]: ID = %%s, Status = %%s", //nolint:perfsprint formatting.IntFormat(len(txs)-1)) for i, tx := range txs { sb.WriteString(fmt.Sprintf(txFormat, i, tx.ID(), tx.Status())) diff --git a/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go b/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go index c77f2a50..6f644680 100644 --- a/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go +++ b/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,17 +8,16 @@ import ( "context" "errors" "testing" - "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/utils/compare" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/utils/logging" ) var errUnknownTx = errors.New("unknown tx") @@ -30,13 +29,12 @@ func newTestSerializer(t *testing.T, parse func(context.Context, []byte) (snowst vm.ParseTxF = parse baseDB := memdb.New() - ctx := snow.DefaultContextTest() s := NewSerializer( SerializerConfig{ - ChainID: ctx.ChainID, + ChainID: ids.Empty, VM: &vm, DB: baseDB, - Log: ctx.Log, + Log: logging.NoLog{}, }, ) @@ -44,6 +42,7 @@ func newTestSerializer(t *testing.T, parse func(context.Context, []byte) (snowst } func TestUnknownUniqueVertexErrors(t *testing.T) { + require := require.New(t) s := newTestSerializer(t, nil) uVtx := &uniqueVertex{ @@ -52,35 +51,27 @@ func TestUnknownUniqueVertexErrors(t *testing.T) { } status := uVtx.Status() - if status != choices.Unknown { - t.Fatalf("Expected vertex to have Unknown status") - } + require.Equal(choices.Unknown, status) _, err := uVtx.Parents() - if err == nil { - t.Fatalf("Parents should have produced error for unknown vertex") - } + require.ErrorIs(err, errGetParents) _, err = uVtx.Height() - if err == nil { - t.Fatalf("Height should have produced error for unknown vertex") - } + require.ErrorIs(err, errGetHeight) _, err = uVtx.Txs(context.Background()) - if err == nil { - t.Fatalf("Txs should have produced an error for unknown vertex") - } + require.ErrorIs(err, errGetTxs) } func TestUniqueVertexCacheHit(t *testing.T) { + require := require.New(t) + testTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.ID{1}, }} s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { - if !bytes.Equal(b, []byte{0}) { - t.Fatal("unknown tx") - } + require.Equal([]byte{0}, b) return testTx, nil }) @@ -95,17 +86,13 @@ func TestUniqueVertexCacheHit(t *testing.T) { parentIDs, [][]byte{{0}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) uVtx := &uniqueVertex{ id: id, serializer: s, } - if err := uVtx.setVertex(context.Background(), vtx); err != nil { - t.Fatalf("Failed to set vertex due to: %s", err) - } + require.NoError(uVtx.setVertex(context.Background(), vtx)) newUVtx := &uniqueVertex{ id: id, @@ -113,41 +100,25 @@ func TestUniqueVertexCacheHit(t *testing.T) { } parents, err := newUVtx.Parents() - if err != nil { - t.Fatalf("Error while retrieving parents of known vertex") - } - if len(parents) != 1 { - t.Fatalf("Parents should have length 1") - } - if parents[0].ID() != parentID { - t.Fatalf("ParentID is incorrect") - } + require.NoError(err) + require.Len(parents, 1) + require.Equal(parentID, parents[0].ID()) newHeight, err := newUVtx.Height() - if err != nil { - t.Fatalf("Error while retrieving height of known vertex") - } - if height != newHeight { - t.Fatalf("Vertex height should have been %d, but was: %d", height, newHeight) - } + require.NoError(err) + require.Equal(height, newHeight) txs, err := newUVtx.Txs(context.Background()) - if err != nil { - t.Fatalf("Error while retrieving txs of known vertex: %s", err) - } - if len(txs) != 1 { - t.Fatalf("Incorrect number of transactions") - } - if txs[0] != testTx { - t.Fatalf("Txs retrieved the wrong Tx") - } + require.NoError(err) + require.Len(txs, 1) + require.Equal(testTx, txs[0]) - if newUVtx.v != uVtx.v { - t.Fatalf("Unique vertex failed to get corresponding vertex state from cache") - } + require.Equal(uVtx.v, newUVtx.v) } func TestUniqueVertexCacheMiss(t *testing.T) { + require := require.New(t) + txBytesParent := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} testTxParent := &snowstorm.TestTx{ TestDecidable: choices.TestDecidable{ @@ -171,16 +142,14 @@ func TestUniqueVertexCacheMiss(t *testing.T) { if bytes.Equal(txBytes, b) { return testTx, nil } - t.Fatal("asked to parse unexpected transaction") + require.FailNow("asked to parse unexpected transaction") return nil, nil } s := newTestSerializer(t, parseTx) uvtxParent := newTestUniqueVertex(t, s, nil, [][]byte{txBytesParent}, false) - if err := uvtxParent.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(uvtxParent.Accept(context.Background())) parentID := uvtxParent.ID() parentIDs := []ids.ID{parentID} @@ -192,9 +161,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { parentIDs, [][]byte{txBytes}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) id := innerVertex.ID() vtxBytes := innerVertex.Bytes() @@ -205,60 +172,35 @@ func TestUniqueVertexCacheMiss(t *testing.T) { } // Register a cache miss - if status := uVtx.Status(); status != choices.Unknown { - t.Fatalf("expected status to be unknown, but found: %s", status) - } + require.Equal(choices.Unknown, uVtx.Status()) // Register cache hit vtx, err := newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := vtx.Status(); status != choices.Processing { - t.Fatalf("expected status to be processing, but found: %s", status) - } - - if err := vtx.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.Equal(choices.Processing, vtx.Status()) validateVertex := func(vtx *uniqueVertex, expectedStatus choices.Status) { - if status := vtx.Status(); status != expectedStatus { - t.Fatalf("expected status to be %s, but found: %s", expectedStatus, status) - } + require.Equal(expectedStatus, vtx.Status()) // Call bytes first to check for regression bug // where it's unsafe to call Bytes or Verify directly // after calling Status to refresh a vertex - if !bytes.Equal(vtx.Bytes(), vtxBytes) { - t.Fatalf("Found unexpected vertex bytes") - } + require.Equal(vtxBytes, vtx.Bytes()) vtxParents, err := vtx.Parents() - if err != nil { - t.Fatalf("Fetching vertex parents errored with: %s", err) - } + require.NoError(err) + require.Len(vtxParents, 1) + require.Equal(parentID, vtxParents[0].ID()) + vtxHeight, err := vtx.Height() - if err != nil { - t.Fatalf("Fetching vertex height errored with: %s", err) - } + require.NoError(err) + require.Equal(height, vtxHeight) + vtxTxs, err := vtx.Txs(context.Background()) - if err != nil { - t.Fatalf("Fetching vertx txs errored with: %s", err) - } - switch { - case vtxHeight != height: - t.Fatalf("Expected vertex height to be %d, but found %d", height, vtxHeight) - case len(vtxParents) != 1: - t.Fatalf("Expected vertex to have 1 parent, but found %d", len(vtxParents)) - case vtxParents[0].ID() != parentID: - t.Fatalf("Found unexpected parentID: %s, expected: %s", vtxParents[0].ID(), parentID) - case len(vtxTxs) != 1: - t.Fatalf("Exepcted vertex to have 1 transaction, but found %d", len(vtxTxs)) - case !bytes.Equal(vtxTxs[0].Bytes(), txBytes): - t.Fatalf("Found unexpected transaction bytes") - } + require.NoError(err) + require.Len(vtxTxs, 1) + require.Equal(txBytes, vtxTxs[0].Bytes()) } // Replace the vertex, so that it loses reference to parents, etc. @@ -272,9 +214,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { // Check that a newly parsed vertex refreshed from the cache is valid vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) validateVertex(vtx, choices.Processing) // Check that refreshing a vertex when it has been removed from @@ -289,22 +229,20 @@ func TestUniqueVertexCacheMiss(t *testing.T) { s.state.uniqueVtx.Flush() vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) validateVertex(vtx, choices.Processing) } func TestParseVertexWithIncorrectChainID(t *testing.T) { + require := require.New(t) + statelessVertex, err := vertex.Build( // regular, non-stop vertex ids.GenerateTestID(), 0, nil, [][]byte{{1}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vtxBytes := statelessVertex.Bytes() s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { @@ -314,22 +252,21 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { return nil, errUnknownTx }) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex due to invalid chainID") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errWrongChainID) } func TestParseVertexWithInvalidTxs(t *testing.T) { - ctx := snow.DefaultContextTest() + require := require.New(t) + + chainID := ids.Empty statelessVertex, err := vertex.Build( // regular, non-stop vertex - ctx.ChainID, + chainID, 0, nil, [][]byte{{1}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vtxBytes := statelessVertex.Bytes() s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { @@ -341,280 +278,34 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { } }) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex due to invalid transactions") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errUnknownTx) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex after previously error on parsing invalid transactions") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errUnknownTx) id := hashing.ComputeHash256Array(vtxBytes) - if _, err := s.GetVtx(context.Background(), id); err == nil { - t.Fatal("should have failed to lookup invalid vertex after previously error on parsing invalid transactions") - } + _, err = s.GetVtx(context.Background(), id) + require.ErrorIs(err, errUnknownVertex) childStatelessVertex, err := vertex.Build( // regular, non-stop vertex - ctx.ChainID, + chainID, 1, []ids.ID{id}, [][]byte{{2}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) childVtxBytes := childStatelessVertex.Bytes() childVtx, err := s.ParseVtx(context.Background(), childVtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) parents, err := childVtx.Parents() - if err != nil { - t.Fatal(err) - } - if len(parents) != 1 { - t.Fatal("wrong number of parents") - } + require.NoError(err) + require.Len(parents, 1) parent := parents[0] - if parent.Status().Fetched() { - t.Fatal("the parent is invalid, so it shouldn't be marked as fetched") - } -} - -func TestStopVertexWhitelistEmpty(t *testing.T) { - // vtx itself is accepted, no parent ==> empty transitives - _, parseTx := generateTestTxs('a') - - // create serializer object - ts := newTestSerializer(t, parseTx) - - uvtx := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}}, true) - if err := uvtx.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - tsv, err := uvtx.Whitelist(context.Background()) - if err != nil { - t.Fatalf("failed to get whitelist %v", err) - } - if tsv.Len() > 0 { - t.Fatal("expected empty whitelist") - } -} - -func TestStopVertexWhitelistWithParents(t *testing.T) { - t.Parallel() - - txs, parseTx := generateTestTxs('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h') - ts := newTestSerializer(t, parseTx) - - // (accepted) (accepted) - // vtx_1 vtx_2 - // [tx_a, tx_b] [tx_c, tx_d] - // ⬆ ⬉ ⬈ ⬆ - // vtx_3 vtx_4 - // [tx_e, tx_f] [tx_g, tx_h] - // ⬉ ⬆ - // stop_vertex_5 - uvtx1 := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}, {'b'}}, false) - if err := uvtx1.Accept(context.Background()); err != nil { - t.Fatal(err) - } - uvtx2 := newTestUniqueVertex(t, ts, nil, [][]byte{{'c'}, {'d'}}, false) - if err := uvtx2.Accept(context.Background()); err != nil { - t.Fatal(err) - } - uvtx3 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'e'}, {'f'}}, false) - uvtx4 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'g'}, {'h'}}, false) - svtx5 := newTestUniqueVertex(t, ts, []ids.ID{uvtx3.id, uvtx4.id}, nil, true) - - whitelist, err := svtx5.Whitelist(context.Background()) - if err != nil { - t.Fatalf("failed to get whitelist %v", err) - } - - expectedWhitelist := []ids.ID{ - txs[4].ID(), // 'e' - txs[5].ID(), // 'f' - txs[6].ID(), // 'g' - txs[7].ID(), // 'h' - uvtx3.ID(), - uvtx4.ID(), - svtx5.ID(), - } - if !compare.UnsortedEquals(whitelist.List(), expectedWhitelist) { - t.Fatalf("whitelist expected %v, got %v", expectedWhitelist, whitelist) - } -} - -func TestStopVertexWhitelistWithLinearChain(t *testing.T) { - t.Parallel() - - // 0 -> 1 -> 2 -> 3 -> 4 -> 5 - // all vertices on the transitive paths are processing - txs, parseTx := generateTestTxs('a', 'b', 'c', 'd', 'e') - - // create serializer object - ts := newTestSerializer(t, parseTx) - - uvtx5 := newTestUniqueVertex(t, ts, nil, [][]byte{{'e'}}, false) - if err := uvtx5.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - uvtx4 := newTestUniqueVertex(t, ts, []ids.ID{uvtx5.id}, [][]byte{{'d'}}, false) - uvtx3 := newTestUniqueVertex(t, ts, []ids.ID{uvtx4.id}, [][]byte{{'c'}}, false) - uvtx2 := newTestUniqueVertex(t, ts, []ids.ID{uvtx3.id}, [][]byte{{'b'}}, false) - uvtx1 := newTestUniqueVertex(t, ts, []ids.ID{uvtx2.id}, [][]byte{{'a'}}, false) - uvtx0 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id}, nil, true) - - whitelist, err := uvtx0.Whitelist(context.Background()) - if err != nil { - t.Fatalf("failed to get whitelist %v", err) - } - - expectedWhitelist := []ids.ID{ - txs[0].ID(), - txs[1].ID(), - txs[2].ID(), - txs[3].ID(), - uvtx0.ID(), - uvtx1.ID(), - uvtx2.ID(), - uvtx3.ID(), - uvtx4.ID(), - } - if !compare.UnsortedEquals(whitelist.List(), expectedWhitelist) { - t.Fatalf("whitelist expected %v, got %v", expectedWhitelist, whitelist) - } -} - -func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { - t.Parallel() - - txs, parseTx := generateTestTxs('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'x') - ts := newTestSerializer(t, parseTx) - - // (accepted) (accepted) - // vtx_1 vtx_2 - // [tx_a, tx_b] [tx_c, tx_d] - // ⬆ ⬉ ⬈ ⬆ - // vtx_3 vtx_4 - // [tx_e, tx_f] [tx_g, tx_h] - // ⬆ - // stop_vertex_5 - // - // [tx_a, tx_b] transitively referenced by "stop_vertex_5" - // has the dependent transactions [tx_e, tx_f] - // that are not transitively referenced by "stop_vertex_5" - // in case "tx_g" depends on "tx_e" that is not in vtx4. - // Thus "stop_vertex_5" is invalid! - - // "tx_g" depends on "tx_e" - txEInf := txs[4] - txGInf := txs[6] - txG, ok := txGInf.(*snowstorm.TestTx) - if !ok { - t.Fatalf("unexpected type %T", txGInf) - } - txG.DependenciesV = []snowstorm.Tx{txEInf} - - uvtx1 := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}, {'b'}}, false) - if err := uvtx1.Accept(context.Background()); err != nil { - t.Fatal(err) - } - uvtx2 := newTestUniqueVertex(t, ts, nil, [][]byte{{'c'}, {'d'}}, false) - if err := uvtx2.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - uvtx3 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'e'}, {'f'}}, false) - uvtx4 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'g'}, {'h'}}, false) - - svtx5 := newTestUniqueVertex(t, ts, []ids.ID{uvtx4.id}, nil, true) - if verr := svtx5.Verify(context.Background()); !errors.Is(verr, errUnexpectedDependencyStopVtx) { - t.Fatalf("stop vertex 'Verify' expected %v, got %v", errUnexpectedDependencyStopVtx, verr) - } - - // if "tx_e" that "tx_g" depends on were accepted, - // transitive closure is reaching all accepted frontier - txE, ok := txEInf.(*snowstorm.TestTx) - if !ok { - t.Fatalf("unexpected type %T", txEInf) - } - txE.StatusV = choices.Accepted - svtx5 = newTestUniqueVertex(t, ts, []ids.ID{uvtx4.id}, nil, true) - if verr := svtx5.Verify(context.Background()); verr != nil { - t.Fatalf("stop vertex 'Verify' expected nil, got %v", verr) - } - - // valid stop vertex - // - // (accepted) (accepted) - // vtx_1 vtx_2 - // [tx_a, tx_b] [tx_c, tx_d] - // ⬆ ⬉ ⬈ ⬆ - // vtx_3 vtx_4 - // [tx_e, tx_f] [tx_g, tx_h] - // ⬉ ⬆ - // stop_vertex_5 - svtx5 = newTestUniqueVertex(t, ts, []ids.ID{uvtx3.id, uvtx4.id}, nil, true) - if verr := svtx5.Verify(context.Background()); verr != nil { - t.Fatalf("stop vertex 'Verify' expected nil, got %v", verr) - } - if err := uvtx3.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := uvtx4.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := svtx5.Accept(context.Background()); err != nil { - t.Fatal(err) - } - // stop vertex cannot be issued twice - if verr := svtx5.Verify(context.Background()); !errors.Is(verr, errStopVertexAlreadyAccepted) { - t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexAlreadyAccepted, verr) - } - - // no vertex should never be able to refer to a stop vertex in its transitive closure - // regular vertex with stop vertex as a parent should fail! - // - // (accepted) (accepted) - // vtx_1 vtx_2 - // [tx_a, tx_b] [tx_c, tx_d] - // ⬆ ⬉ ⬈ ⬆ - // vtx_3 vtx_4 - // [tx_e, tx_f] [tx_g, tx_h] - // ⬉ ⬆ - // stop_vertex_5 - // ⬆ - // vtx_6 - // [tx_x] - // (should fail) - uvtx6 := newTestUniqueVertex(t, ts, []ids.ID{svtx5.id}, [][]byte{{'x'}}, false) - if verr := uvtx6.Verify(context.Background()); !errors.Is(verr, errStopVertexAlreadyAccepted) { - t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexAlreadyAccepted, verr) - } -} - -func TestStopVertexVerifyNotAllowedTimestamp(t *testing.T) { - t.Parallel() - - _, parseTx := generateTestTxs('a') - ts := newTestSerializer(t, parseTx) - ts.CortinaTime = version.CortinaDefaultTime - - svtx := newTestUniqueVertex(t, ts, nil, nil, true) - svtx.time = func() time.Time { - return version.CortinaDefaultTime.Add(-time.Second) - } - - if verr := svtx.Verify(context.Background()); !errors.Is(verr, errStopVertexNotAllowedTimestamp) { - t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexNotAllowedTimestamp, verr) - } + require.False(parent.Status().Fetched()) } func newTestUniqueVertex( @@ -624,6 +315,8 @@ func newTestUniqueVertex( txs [][]byte, stopVertex bool, ) *uniqueVertex { + require := require.New(t) + var ( vtx vertex.StatelessVertex err error @@ -642,34 +335,8 @@ func newTestUniqueVertex( parentIDs, ) } - if err != nil { - t.Fatal(err) - } + require.NoError(err) uvtx, err := newUniqueVertex(context.Background(), s, vtx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) return uvtx } - -func generateTestTxs(idSlice ...byte) ([]snowstorm.Tx, func(context.Context, []byte) (snowstorm.Tx, error)) { - txs := make([]snowstorm.Tx, len(idSlice)) - bytesToTx := make(map[string]snowstorm.Tx, len(idSlice)) - for i, b := range idSlice { - txs[i] = &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{b}, - }, - BytesV: []byte{b}, - } - bytesToTx[string([]byte{b})] = txs[i] - } - parseTx := func(_ context.Context, b []byte) (snowstorm.Tx, error) { - tx, ok := bytesToTx[string(b)] - if !ok { - return nil, errUnknownTx - } - return tx, nil - } - return txs, parseTx -} diff --git a/avalanchego/snow/engine/avalanche/test_engine.go b/avalanchego/snow/engine/avalanche/test_engine.go deleted file mode 100644 index 8fe8589c..00000000 --- a/avalanchego/snow/engine/avalanche/test_engine.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - "errors" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/common" -) - -var ( - _ Engine = (*EngineTest)(nil) - - errGetVtx = errors.New("unexpectedly called GetVtx") -) - -// EngineTest is a test engine -type EngineTest struct { - common.EngineTest - - CantGetVtx bool - GetVtxF func(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) -} - -func (e *EngineTest) Default(cant bool) { - e.EngineTest.Default(cant) - e.CantGetVtx = false -} - -func (e *EngineTest) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if e.GetVtxF != nil { - return e.GetVtxF(ctx, vtxID) - } - if e.CantGetVtx && e.T != nil { - e.T.Fatalf("Unexpectedly called GetVtx") - } - return nil, errGetVtx -} diff --git a/avalanchego/snow/engine/avalanche/traced_engine.go b/avalanchego/snow/engine/avalanche/traced_engine.go deleted file mode 100644 index b35771f8..00000000 --- a/avalanchego/snow/engine/avalanche/traced_engine.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/trace" -) - -var _ Engine = (*tracedEngine)(nil) - -type tracedEngine struct { - common.Engine - engine Engine - tracer trace.Tracer -} - -func TraceEngine(engine Engine, tracer trace.Tracer) Engine { - return &tracedEngine{ - Engine: common.TraceEngine(engine, tracer), - engine: engine, - tracer: tracer, - } -} - -func (e *tracedEngine) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - ctx, span := e.tracer.Start(ctx, "tracedEngine.GetVtx", oteltrace.WithAttributes( - attribute.Stringer("vtxID", vtxID), - )) - defer span.End() - - return e.engine.GetVtx(ctx, vtxID) -} diff --git a/avalanchego/snow/engine/avalanche/transitive.go b/avalanchego/snow/engine/avalanche/transitive.go deleted file mode 100644 index 81e19008..00000000 --- a/avalanchego/snow/engine/avalanche/transitive.go +++ /dev/null @@ -1,792 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche/poll" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/events" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/sampler" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" -) - -var _ Engine = (*Transitive)(nil) - -func New( - config Config, - startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error, -) (Engine, error) { - return newTransitive(config, startSnowmanConsensus) -} - -// Transitive implements the Engine interface by attempting to fetch all -// transitive dependencies. -type Transitive struct { - Config - metrics - - // list of NoOpsHandler for messages dropped by engine - common.StateSummaryFrontierHandler - common.AcceptedStateSummaryHandler - common.AcceptedFrontierHandler - common.AcceptedHandler - common.AncestorsHandler - common.AppHandler - validators.Connector - - RequestID uint32 - - // acceptedFrontiers of the other validators of this chain - acceptedFrontiers tracker.Accepted - - polls poll.Set // track people I have asked for their preference - - // The set of vertices that have been requested in Get messages but not yet received - outstandingVtxReqs common.Requests - - // missingTxs tracks transaction that are missing - missingTxs set.Set[ids.ID] - - // IDs of vertices that are queued to be added to consensus but haven't yet been - // because of missing dependencies - pending set.Set[ids.ID] - - // vtxBlocked tracks operations that are blocked on vertices - // txBlocked tracks operations that are blocked on transactions - vtxBlocked, txBlocked events.Blocker - - // transactions that have been provided from the VM but that are pending to - // be issued once the number of processing vertices has gone below the - // optimal number. - pendingTxs []snowstorm.Tx - - startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error - - // A uniform sampler without replacement - uniformSampler sampler.Uniform - - errs wrappers.Errs -} - -func newTransitive( - config Config, - startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error, -) (*Transitive, error) { - config.Ctx.Log.Info("initializing consensus engine") - - acceptedFrontiers := tracker.NewAccepted() - config.Validators.RegisterCallbackListener(acceptedFrontiers) - - factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha) - - t := &Transitive{ - Config: config, - StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), - AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(config.Ctx.Log), - AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), - AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), - AncestorsHandler: common.NewNoOpAncestorsHandler(config.Ctx.Log), - AppHandler: config.VM, - Connector: config.VM, - acceptedFrontiers: acceptedFrontiers, - polls: poll.NewSet(factory, - config.Ctx.Log, - "", - config.Ctx.AvalancheRegisterer, - ), - startSnowmanConsensus: startSnowmanConsensus, - uniformSampler: sampler.NewUniform(), - } - - return t, t.metrics.Initialize("", config.Ctx.AvalancheRegisterer) -} - -func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { - t.Ctx.Log.Verbo("called Put", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - - // If the chain is linearized, we should immediately drop all put messages. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - return nil - } - - vtx, err := t.Manager.ParseVtx(ctx, vtxBytes) - if err != nil { - t.Ctx.Log.Debug("failed to parse vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - t.Ctx.Log.Verbo("failed to parse vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Binary("vertex", vtxBytes), - zap.Error(err), - ) - return t.GetFailed(ctx, nodeID, requestID) - } - - actualVtxID := vtx.ID() - expectedVtxID, ok := t.outstandingVtxReqs.Get(nodeID, requestID) - // If the provided vertex is not the requested vertex, we need to explicitly - // mark the request as failed to avoid having a dangling dependency. - if ok && actualVtxID != expectedVtxID { - t.Ctx.Log.Debug("incorrect vertex returned in Put", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", actualVtxID), - zap.Stringer("expectedVtxID", expectedVtxID), - ) - // We assume that [vtx] is useless because it doesn't match what we - // expected. - return t.GetFailed(ctx, nodeID, requestID) - } - - if t.Consensus.VertexIssued(vtx) || t.pending.Contains(actualVtxID) { - t.metrics.numUselessPutBytes.Add(float64(len(vtxBytes))) - } - - if _, err := t.issueFrom(ctx, nodeID, vtx); err != nil { - return err - } - return t.attemptToIssueTxs(ctx) -} - -func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // If the chain is linearized, we don't care that a get request failed, we - // have already moved into snowman consensus. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - return nil - } - - vtxID, ok := t.outstandingVtxReqs.Remove(nodeID, requestID) - if !ok { - t.Ctx.Log.Debug("unexpected GetFailed", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - t.vtxBlocked.Abandon(ctx, vtxID) - - if t.outstandingVtxReqs.Len() == 0 { - for txID := range t.missingTxs { - t.txBlocked.Abandon(ctx, txID) - } - t.missingTxs.Clear() - } - - // Track performance statistics - t.metrics.numVtxRequests.Set(float64(t.outstandingVtxReqs.Len())) - t.metrics.numMissingTxs.Set(float64(t.missingTxs.Len())) - t.metrics.blockerVtxs.Set(float64(t.vtxBlocked.Len())) - t.metrics.blockerTxs.Set(float64(t.txBlocked.Len())) - return t.attemptToIssueTxs(ctx) -} - -func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { - // If the chain is linearized, we don't care to attempt to issue any new - // vertices. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - // Immediately respond to the query with the stop vertex. - // - // Invariant: This is done here, because the Consensus instance may have - // never been initialized if bootstrapping accepted the stop vertex. - edge := t.Manager.Edge(ctx) - t.Sender.SendChits(ctx, nodeID, requestID, edge, edge) - return nil - } - - // Immediately respond to the query with the current consensus preferences. - t.Sender.SendChits(ctx, nodeID, requestID, t.Consensus.Preferences().List(), t.Manager.Edge(ctx)) - - // If we have [vtxID], attempt to put it into consensus, if we haven't - // already. If we don't not have [vtxID], fetch it from [nodeID]. - if _, err := t.issueFromByID(ctx, nodeID, vtxID); err != nil { - return err - } - - return t.attemptToIssueTxs(ctx) -} - -func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { - // If the chain is linearized, we don't care to attempt to issue any new - // vertices. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - // Immediately respond to the query with the stop vertex. - // - // Invariant: This is done here, because the Consensus instance may have - // never been initialized if bootstrapping accepted the stop vertex. - edge := t.Manager.Edge(ctx) - t.Sender.SendChits(ctx, nodeID, requestID, edge, edge) - return nil - } - - // Immediately respond to the query with the current consensus preferences. - t.Sender.SendChits(ctx, nodeID, requestID, t.Consensus.Preferences().List(), t.Manager.Edge(ctx)) - - vtx, err := t.Manager.ParseVtx(ctx, vtxBytes) - if err != nil { - t.Ctx.Log.Debug("failed to parse vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - t.Ctx.Log.Verbo("failed to parse vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Binary("vertex", vtxBytes), - zap.Error(err), - ) - return nil - } - - if t.Consensus.VertexIssued(vtx) || t.pending.Contains(vtx.ID()) { - t.metrics.numUselessPushQueryBytes.Add(float64(len(vtxBytes))) - } - - if _, err := t.issueFrom(ctx, nodeID, vtx); err != nil { - return err - } - - return t.attemptToIssueTxs(ctx) -} - -func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) error { - // If the chain is linearized, we don't care to apply any votes. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - return nil - } - - t.acceptedFrontiers.SetAcceptedFrontier(nodeID, accepted) - - v := &voter{ - t: t, - vdr: nodeID, - requestID: requestID, - response: votes, - } - for _, vote := range votes { - if added, err := t.issueFromByID(ctx, nodeID, vote); err != nil { - return err - } else if !added { - v.deps.Add(vote) - } - } - - t.vtxBlocked.Register(ctx, v) - t.metrics.blockerVtxs.Set(float64(t.vtxBlocked.Len())) - return t.attemptToIssueTxs(ctx) -} - -func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // If the chain is linearized, we don't care to apply any votes. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - return nil - } - - lastAccepted := t.acceptedFrontiers.AcceptedFrontier(nodeID) - return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted) -} - -func (*Transitive) Timeout(context.Context) error { - return nil -} - -func (t *Transitive) Gossip(ctx context.Context) error { - edge := t.Manager.Edge(ctx) - if len(edge) == 0 { - t.Ctx.Log.Verbo("dropping gossip request as no vertices have been accepted") - return nil - } - - if err := t.uniformSampler.Initialize(uint64(len(edge))); err != nil { - return err // Should never happen - } - indices, err := t.uniformSampler.Sample(1) - if err != nil { - return err // Also should never really happen because the edge has positive length - } - vtxID := edge[int(indices[0])] - vtx, err := t.Manager.GetVtx(ctx, vtxID) - if err != nil { - t.Ctx.Log.Warn("dropping gossip request", - zap.String("reason", "couldn't load vertex"), - zap.Stringer("vtxID", vtxID), - zap.Error(err), - ) - return nil - } - - t.Ctx.Log.Verbo("gossiping accepted vertex to the network", - zap.Stringer("vtxID", vtxID), - ) - t.Sender.SendGossip(ctx, vtx.Bytes()) - return nil -} - -func (*Transitive) Halt(context.Context) {} - -func (t *Transitive) Shutdown(ctx context.Context) error { - t.Ctx.Log.Info("shutting down consensus engine") - return t.VM.Shutdown(ctx) -} - -func (t *Transitive) Notify(ctx context.Context, msg common.Message) error { - // If the chain is linearized, we shouldn't be processing any messages from - // the VM anymore. - linearized, err := t.Manager.StopVertexAccepted(ctx) - if err != nil { - return err - } - if linearized { - return nil - } - - switch msg { - case common.PendingTxs: - // After the linearization, we shouldn't be building any new vertices - if cortinaTime, ok := version.CortinaTimes[t.Ctx.NetworkID]; ok && time.Now().After(cortinaTime) { - return nil - } - - txs := t.VM.PendingTxs(ctx) - t.pendingTxs = append(t.pendingTxs, txs...) - t.metrics.pendingTxs.Set(float64(len(t.pendingTxs))) - return t.attemptToIssueTxs(ctx) - - case common.StopVertex: - // stop vertex doesn't have any txs, issue directly! - return t.issueStopVtx(ctx) - - default: - t.Ctx.Log.Warn("received an unexpected message from the VM", - zap.Stringer("messageString", msg), - ) - return nil - } -} - -func (t *Transitive) Context() *snow.ConsensusContext { - return t.Ctx -} - -func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { - t.RequestID = startReqID - // Load the vertices that were last saved as the accepted frontier - edge := t.Manager.Edge(ctx) - frontier := make([]avalanche.Vertex, 0, len(edge)) - for _, vtxID := range edge { - if vtx, err := t.Manager.GetVtx(ctx, vtxID); err == nil { - frontier = append(frontier, vtx) - } else { - t.Ctx.Log.Error("failed to load vertex from the frontier", - zap.Stringer("vtxID", vtxID), - zap.Error(err), - ) - } - } - - t.Ctx.Log.Info("consensus starting", - zap.Int("lenFrontier", len(frontier)), - ) - t.metrics.bootstrapFinished.Set(1) - - t.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - if err := t.VM.SetState(ctx, snow.NormalOp); err != nil { - return fmt.Errorf("failed to notify VM that consensus has started: %w", - err) - } - return t.Consensus.Initialize(ctx, t.Ctx, t.Params, frontier) -} - -func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { - consensusIntf, consensusErr := t.Consensus.HealthCheck(ctx) - vmIntf, vmErr := t.VM.HealthCheck(ctx) - intf := map[string]interface{}{ - "consensus": consensusIntf, - "vm": vmIntf, - } - if consensusErr == nil { - return intf, vmErr - } - if vmErr == nil { - return intf, consensusErr - } - return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) -} - -func (t *Transitive) GetVM() common.VM { - return t.VM -} - -func (t *Transitive) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - // GetVtx returns a vertex by its ID. - // Returns database.ErrNotFound if unknown. - return t.Manager.GetVtx(ctx, vtxID) -} - -func (t *Transitive) attemptToIssueTxs(ctx context.Context) error { - err := t.errs.Err - if err != nil { - return err - } - - t.pendingTxs, err = t.batch(ctx, t.pendingTxs, batchOption{limit: true}) - t.metrics.pendingTxs.Set(float64(len(t.pendingTxs))) - return err -} - -// If there are pending transactions from the VM, issue them. -// If we're not already at the limit for number of concurrent polls, issue a new -// query. -func (t *Transitive) repoll(ctx context.Context) { - for i := t.polls.Len(); i < t.Params.ConcurrentRepolls && !t.errs.Errored(); i++ { - t.issueRepoll(ctx) - } -} - -// issueFromByID issues the branch ending with vertex [vtxID] to consensus. -// Fetches [vtxID] if we don't have it locally. -// Returns true if [vtx] has been added to consensus (now or previously) -func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, vtxID ids.ID) (bool, error) { - vtx, err := t.Manager.GetVtx(ctx, vtxID) - if err != nil { - // We don't have [vtxID]. Request it. - t.sendRequest(ctx, nodeID, vtxID) - return false, nil - } - return t.issueFrom(ctx, nodeID, vtx) -} - -// issueFrom issues the branch ending with [vtx] to consensus. -// Assumes we have [vtx] locally -// Returns true if [vtx] has been added to consensus (now or previously) -func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, vtx avalanche.Vertex) (bool, error) { - issued := true - // Before we issue [vtx] into consensus, we have to issue its ancestors. - // Go through [vtx] and its ancestors. issue each ancestor that hasn't yet been issued. - // If we find a missing ancestor, fetch it and note that we can't issue [vtx] yet. - ancestry := vertex.NewHeap() - ancestry.Push(vtx) - for ancestry.Len() > 0 { - vtx := ancestry.Pop() - - if t.Consensus.VertexIssued(vtx) { - // This vertex has been issued --> its ancestors have been issued. - // No need to try to issue it or its ancestors - continue - } - if t.pending.Contains(vtx.ID()) { - issued = false - continue - } - - parents, err := vtx.Parents() - if err != nil { - return false, err - } - // Ensure we have ancestors of this vertex - for _, parent := range parents { - if !parent.Status().Fetched() { - // We don't have the parent. Request it. - t.sendRequest(ctx, nodeID, parent.ID()) - // We're missing an ancestor so we can't have issued the vtx in this method's argument - issued = false - } else { - // Come back to this vertex later to make sure it and its ancestors have been fetched/issued - ancestry.Push(parent) - } - } - - // Queue up this vertex to be issued once its dependencies are met - if err := t.issue(ctx, vtx); err != nil { - return false, err - } - } - return issued, nil -} - -// issue queues [vtx] to be put into consensus after its dependencies are met. -// Assumes we have [vtx]. -func (t *Transitive) issue(ctx context.Context, vtx avalanche.Vertex) error { - vtxID := vtx.ID() - - // Add to set of vertices that have been queued up to be issued but haven't been yet - t.pending.Add(vtxID) - t.outstandingVtxReqs.RemoveAny(vtxID) - - // Will put [vtx] into consensus once dependencies are met - i := &issuer{ - t: t, - vtx: vtx, - } - - parents, err := vtx.Parents() - if err != nil { - return err - } - for _, parent := range parents { - if !t.Consensus.VertexIssued(parent) { - // This parent hasn't been issued yet. Add it as a dependency. - i.vtxDeps.Add(parent.ID()) - } - } - - txs, err := vtx.Txs(ctx) - if err != nil { - return err - } - txIDs := set.NewSet[ids.ID](len(txs)) - for _, tx := range txs { - txIDs.Add(tx.ID()) - } - - for _, tx := range txs { - deps, err := tx.Dependencies() - if err != nil { - return err - } - for _, dep := range deps { - depID := dep.ID() - if !txIDs.Contains(depID) && !t.Consensus.TxIssued(dep) { - // This transaction hasn't been issued yet. Add it as a dependency. - t.missingTxs.Add(depID) - i.txDeps.Add(depID) - } - } - } - - t.Ctx.Log.Verbo("vertex is blocking", - zap.Stringer("vtxID", vtxID), - zap.Int("numVtxDeps", i.vtxDeps.Len()), - zap.Int("numTxDeps", i.txDeps.Len()), - ) - - // Wait until all the parents of [vtx] are added to consensus before adding [vtx] - t.vtxBlocked.Register(ctx, &vtxIssuer{i: i}) - // Wait until all the parents of [tx] are added to consensus before adding [vtx] - t.txBlocked.Register(ctx, &txIssuer{i: i}) - - if t.outstandingVtxReqs.Len() == 0 { - // There are no outstanding vertex requests but we don't have these transactions, so we're not getting them. - for txID := range t.missingTxs { - t.txBlocked.Abandon(ctx, txID) - } - t.missingTxs.Clear() - } - - // Track performance statistics - t.metrics.numVtxRequests.Set(float64(t.outstandingVtxReqs.Len())) - t.metrics.numMissingTxs.Set(float64(t.missingTxs.Len())) - t.metrics.numPendingVts.Set(float64(len(t.pending))) - t.metrics.blockerVtxs.Set(float64(t.vtxBlocked.Len())) - t.metrics.blockerTxs.Set(float64(t.txBlocked.Len())) - return t.errs.Err -} - -type batchOption struct { - // if [force], allow for a conflict to be issued, and force each tx to be issued - // otherwise, some txs may not be put into vertices that are issued. - force bool - // if [limit], stop when "Params.OptimalProcessing <= Consensus.NumProcessing" - limit bool -} - -// Batches [txs] into vertices and issue them. -func (t *Transitive) batch(ctx context.Context, txs []snowstorm.Tx, opt batchOption) ([]snowstorm.Tx, error) { - if len(txs) == 0 { - return nil, nil - } - if opt.limit && t.Params.OptimalProcessing <= t.Consensus.NumProcessing() { - return txs, nil - } - issuedTxs := set.Set[ids.ID]{} - consumed := set.Set[ids.ID]{} - orphans := t.Consensus.Orphans() - start := 0 - end := 0 - for end < len(txs) { - tx := txs[end] - inputs := set.Set[ids.ID]{} - inputs.Add(tx.InputIDs()...) - overlaps := consumed.Overlaps(inputs) - if end-start >= t.Params.BatchSize || (opt.force && overlaps) { - if err := t.issueBatch(ctx, txs[start:end]); err != nil { - return nil, err - } - if opt.limit && t.Params.OptimalProcessing <= t.Consensus.NumProcessing() { - return txs[end:], nil - } - start = end - consumed.Clear() - overlaps = false - } - - if txID := tx.ID(); !overlaps && // should never allow conflicting txs in the same vertex - !issuedTxs.Contains(txID) && // shouldn't issue duplicated transactions to the same vertex - (opt.force || t.Consensus.IsVirtuous(tx)) && // force allows for a conflict to be issued - (!t.Consensus.TxIssued(tx) || orphans.Contains(txID)) { // should only reissue orphaned txs - end++ - issuedTxs.Add(txID) - consumed.Union(inputs) - } else { - newLen := len(txs) - 1 - txs[end] = txs[newLen] - txs[newLen] = nil - txs = txs[:newLen] - } - } - - if end > start { - return txs[end:], t.issueBatch(ctx, txs[start:end]) - } - return txs[end:], nil -} - -// Issues a new poll for a preferred vertex in order to move consensus along -func (t *Transitive) issueRepoll(ctx context.Context) { - preferredIDs := t.Consensus.Preferences() - if preferredIDs.Len() == 0 { - t.Ctx.Log.Error("re-query attempt was dropped due to no pending vertices") - return - } - - vtxID := preferredIDs.CappedList(1)[0] - vdrIDs, err := t.Validators.Sample(t.Params.K) // Validators to sample - if err != nil { - t.Ctx.Log.Error("dropped re-query", - zap.String("reason", "insufficient number of validators"), - zap.Stringer("vtxID", vtxID), - zap.Error(err), - ) - return - } - - vdrBag := bag.Bag[ids.NodeID]{} // IDs of validators to be sampled - vdrBag.Add(vdrIDs...) - - vdrList := vdrBag.List() - vdrSet := set.NewSet[ids.NodeID](len(vdrList)) - vdrSet.Add(vdrList...) - - // Poll the network - t.RequestID++ - if t.polls.Add(t.RequestID, vdrBag) { - t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, vtxID) - } -} - -// Puts a batch of transactions into a vertex and issues it into consensus. -func (t *Transitive) issueBatch(ctx context.Context, txs []snowstorm.Tx) error { - t.Ctx.Log.Verbo("batching transactions into a new vertex", - zap.Int("numTxs", len(txs)), - ) - - // Randomly select parents of this vertex from among the virtuous set - virtuousIDs := t.Consensus.Virtuous().CappedList(t.Params.Parents) - numVirtuousIDs := len(virtuousIDs) - if err := t.uniformSampler.Initialize(uint64(numVirtuousIDs)); err != nil { - return err - } - - indices, err := t.uniformSampler.Sample(numVirtuousIDs) - if err != nil { - return err - } - - parentIDs := make([]ids.ID, len(indices)) - for i, index := range indices { - parentIDs[i] = virtuousIDs[int(index)] - } - - vtx, err := t.Manager.BuildVtx(ctx, parentIDs, txs) - if err != nil { - t.Ctx.Log.Warn("error building new vertex", - zap.Int("numParents", len(parentIDs)), - zap.Int("numTxs", len(txs)), - zap.Error(err), - ) - return nil - } - - return t.issue(ctx, vtx) -} - -// to be triggered via X-Chain API -func (t *Transitive) issueStopVtx(ctx context.Context) error { - // use virtuous frontier (accepted) as parents - virtuousSet := t.Consensus.Virtuous() - vtx, err := t.Manager.BuildStopVtx(ctx, virtuousSet.List()) - if err != nil { - t.Ctx.Log.Warn("error building new stop vertex", - zap.Int("numParents", virtuousSet.Len()), - zap.Error(err), - ) - return nil - } - return t.issue(ctx, vtx) -} - -// Send a request to [vdr] asking them to send us vertex [vtxID] -func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, vtxID ids.ID) { - if t.outstandingVtxReqs.Contains(vtxID) { - t.Ctx.Log.Debug("not sending request for vertex", - zap.String("reason", "existing outstanding request"), - zap.Stringer("vtxID", vtxID), - ) - return - } - t.RequestID++ - t.outstandingVtxReqs.Add(nodeID, t.RequestID, vtxID) // Mark that there is an outstanding request for this vertex - t.Sender.SendGet(ctx, nodeID, t.RequestID, vtxID) - t.metrics.numVtxRequests.Set(float64(t.outstandingVtxReqs.Len())) // Tracks performance statistics -} diff --git a/avalanchego/snow/engine/avalanche/transitive_test.go b/avalanchego/snow/engine/avalanche/transitive_test.go deleted file mode 100644 index 605e5505..00000000 --- a/avalanchego/snow/engine/avalanche/transitive_test.go +++ /dev/null @@ -1,5018 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "bytes" - "context" - "errors" - "fmt" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "golang.org/x/exp/slices" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowball" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" - - avagetter "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" -) - -var ( - errUnknownVertex = errors.New("unknown vertex") - errFailedParsing = errors.New("failed parsing") - errMissing = errors.New("missing") - errTest = errors.New("non-nil error") -) - -type dummyHandler struct { - startEngineF func(ctx context.Context, startReqID uint32) error -} - -func (dh *dummyHandler) onDoneBootstrapping(ctx context.Context, lastReqID uint32) error { - lastReqID++ - return dh.startEngineF(ctx, lastReqID) -} - -func noopStarter(context.Context, uint32) error { - return nil -} - -func TestEngineShutdown(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vmShutdownCalled := false - vm := &vertex.TestVM{} - vm.T = t - vm.ShutdownF = func(context.Context) error { - vmShutdownCalled = true - return nil - } - engCfg.VM = vm - - transitive, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - if err := transitive.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - if !vmShutdownCalled { - t.Fatal("Shutting down the Transitive did not shutdown the VM") - } -} - -func TestEngineAdd(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - manager.Default(true) - - manager.CantEdge = false - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if te.Ctx.ChainID != ids.Empty { - t.Fatalf("Wrong chain ID") - } - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{ - &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }}, - }, - BytesV: []byte{1}, - } - - asked := new(bool) - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { - *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - *asked = true - if vdr != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx.ParentsV[0].ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if !bytes.Equal(b, vtx.Bytes()) { - t.Fatalf("Wrong bytes") - } - return vtx, nil - } - - if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { - t.Fatal(err) - } - - manager.ParseVtxF = nil - - if !*asked { - t.Fatalf("Didn't ask for a missing vertex") - } - - if len(te.vtxBlocked) != 1 { - t.Fatalf("Should have been blocking on request") - } - - manager.ParseVtxF = func(context.Context, []byte) (avalanche.Vertex, error) { - return nil, errFailedParsing - } - - if err := te.Put(context.Background(), vdr, *reqID, nil); err != nil { - t.Fatal(err) - } - - manager.ParseVtxF = nil - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Should have finished blocking issue") - } -} - -func TestEngineQuery(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - manager.Default(true) - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vertexed := new(bool) - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if *vertexed { - t.Fatalf("Sent multiple requests") - } - *vertexed = true - if vtxID != vtx0.ID() { - t.Fatalf("Wrong vertex requested") - } - return nil, errUnknownVertex - } - - chitted := new(bool) - sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, prefs []ids.ID, _ []ids.ID) { - if *chitted { - t.Fatalf("Sent multiple chits") - } - *chitted = true - if len(prefs) != 2 { - t.Fatalf("Wrong chits preferences") - } - } - - asked := new(bool) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, _ uint32, vtxID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } - *asked = true - if vdr != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx0.ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - } - - // After receiving the pull query for [vtx0] we will first request [vtx0] - // from the peer, because it is currently unknown to the engine. - if err := te.PullQuery(context.Background(), vdr, 0, vtx0.ID()); err != nil { - t.Fatal(err) - } - - if !*vertexed { - t.Fatalf("Didn't request vertex") - } - if !*asked { - t.Fatalf("Didn't request vertex from validator") - } - - queried := new(bool) - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } - *queried = true - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(vtx0.Bytes(), vtx) { - t.Fatalf("Asking for wrong vertex") - } - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if !bytes.Equal(b, vtx0.Bytes()) { - t.Fatalf("Wrong bytes") - } - return vtx0, nil - } - - // Once the peer returns [vtx0], we will respond to its query and then issue - // our own push query for [vtx0]. - if err := te.Put(context.Background(), vdr, 0, vtx0.Bytes()); err != nil { - t.Fatal(err) - } - manager.ParseVtxF = nil - - if !*queried { - t.Fatalf("Didn't ask for preferences") - } - if !*chitted { - t.Fatalf("Didn't provide preferences") - } - - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{5, 4, 3, 2, 1, 9}, - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtx0.ID() { - return &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - StatusV: choices.Unknown, - }, - }, nil - } - if vtxID == vtx1.ID() { - return nil, errUnknownVertex - } - t.Fatalf("Wrong vertex requested") - panic("Should have failed") - } - - *asked = false - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, _ uint32, vtxID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } - *asked = true - if vdr != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx1.ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - } - - // The peer returned [vtx1] from our query for [vtx0], which means we will - // need to request the missing [vtx1]. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx1.ID()}, nil); err != nil { - t.Fatal(err) - } - - *queried = false - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } - *queried = true - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(vtx1.Bytes(), vtx) { - t.Fatalf("Asking for wrong vertex") - } - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if !bytes.Equal(b, vtx1.Bytes()) { - t.Fatalf("Wrong bytes") - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtx0.ID() { - return &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - StatusV: choices.Processing, - }, - }, nil - } - if vtxID == vtx1.ID() { - return vtx1, nil - } - t.Fatalf("Wrong vertex requested") - panic("Should have failed") - } - - return vtx1, nil - } - - // Once the peer returns [vtx1], the poll that was issued for [vtx0] will be - // able to terminate. Additionally the node will issue a push query with - // [vtx1]. - if err := te.Put(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { - t.Fatal(err) - } - manager.ParseVtxF = nil - - // Because [vtx1] does not transitively reference [vtx0], the transaction - // vertex for [vtx0] was never voted for. This results in [vtx0] still being - // in processing. - if vtx0.Status() != choices.Processing { - t.Fatalf("Shouldn't have executed the vertex yet") - } - if vtx1.Status() != choices.Accepted { - t.Fatalf("Should have executed the vertex") - } - if tx0.Status() != choices.Accepted { - t.Fatalf("Should have executed the transaction") - } - - // Make sure there is no memory leak for missing vertex tracking. - if len(te.vtxBlocked) != 0 { - t.Fatalf("Should have finished blocking") - } - - sender.CantSendPullQuery = false - - // Abandon the query for [vtx1]. This will result in a re-query for [vtx0]. - if err := te.QueryFailed(context.Background(), vdr, *queryRequestID); err != nil { - t.Fatal(err) - } - if len(te.vtxBlocked) != 0 { - t.Fatalf("Should have finished blocking") - } -} - -func TestEngineMultipleQuery(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - engCfg.Params = avalanche.Parameters{ - Parameters: snowball.Parameters{ - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 100, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 3, - }, - Parents: 2, - BatchSize: 1, - } - - vdr0 := ids.GenerateTestNodeID() - vdr1 := ids.GenerateTestNodeID() - vdr2 := ids.GenerateTestNodeID() - - errs := wrappers.Errs{} - errs.Add( - vals.Add(vdr0, nil, ids.Empty, 1), - vals.Add(vdr1, nil, ids.Empty, 1), - vals.Add(vdr2, nil, ids.Empty, 1), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - queried := new(bool) - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } - *queried = true - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(vtx0.Bytes(), vtx) { - t.Fatalf("Asking for wrong vertex") - } - } - - if err := te.issue(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx0.ID(): - return vtx0, nil - case vtx1.ID(): - return nil, errUnknownVertex - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - asked := new(bool) - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { - *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - *asked = true - if vdr0 != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx1.ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - } - - s0 := []ids.ID{vtx0.ID(), vtx1.ID()} - - s2 := []ids.ID{vtx0.ID()} - - if err := te.Chits(context.Background(), vdr0, *queryRequestID, s0, nil); err != nil { - t.Fatal(err) - } - if err := te.QueryFailed(context.Background(), vdr1, *queryRequestID); err != nil { - t.Fatal(err) - } - if err := te.Chits(context.Background(), vdr2, *queryRequestID, s2, nil); err != nil { - t.Fatal(err) - } - - // Should be dropped because the query was marked as failed - if err := te.Chits(context.Background(), vdr1, *queryRequestID, s0, nil); err != nil { - t.Fatal(err) - } - - if err := te.GetFailed(context.Background(), vdr0, *reqID); err != nil { - t.Fatal(err) - } - - if vtx0.Status() != choices.Accepted { - t.Fatalf("Should have executed vertex") - } - if len(te.vtxBlocked) != 0 { - t.Fatalf("Should have finished blocking") - } -} - -func TestEngineBlockedIssue(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{ - &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: vtx0.IDV, - StatusV: choices.Unknown, - }}, - }, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - vtx1.ParentsV[0] = vtx0 - if err := te.issue(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - if prefs := te.Consensus.Preferences(); prefs.Len() != 1 || !prefs.Contains(vtx1.ID()) { - t.Fatalf("Should have issued vtx1") - } -} - -func TestEngineAbandonResponse(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - manager.GetVtxF = func(context.Context, ids.ID) (avalanche.Vertex, error) { - return nil, errUnknownVertex - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, vID ids.NodeID, requestID uint32, vtxID ids.ID) { - *reqID = requestID - } - sender.CantSendChits = false - - if err := te.PullQuery(context.Background(), vdr, 0, vtx.ID()); err != nil { - t.Fatal(err) - } - if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Should have removed blocking event") - } -} - -func TestEngineScheduleRepoll(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - manager.Default(true) - manager.CantEdge = false - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - requestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], reqID uint32, _ []byte) { - *requestID = reqID - } - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } - - sender.SendPushQueryF = nil - - repolled := new(bool) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, vtxID ids.ID) { - *repolled = true - if vtxID != vtx.ID() { - t.Fatalf("Wrong vertex queried") - } - } - - if err := te.QueryFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - if !*repolled { - t.Fatalf("Should have issued a noop") - } -} - -func TestEngineRejectDoubleSpendTx(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.BatchSize = 2 - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - manager.Default(true) - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - engCfg.VM = vm - vm.Default(true) - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - return &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - }, nil - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - sender.CantSendPushQuery = false - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx0, tx1} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } -} - -func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.BatchSize = 2 - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - manager.Default(true) - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - engCfg.VM = vm - vm.Default(true) - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - return &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - }, nil - } - - sender.CantSendPushQuery = false - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx0} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx1} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } -} - -func TestEngineIssueRepoll(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.BatchSize = 2 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - sender.SendPullQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtxID ids.ID) { - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !vdrs.Equals(vdrSet) { - t.Fatalf("Wrong query recipients") - } - if vtxID != gVtx.ID() && vtxID != mVtx.ID() { - t.Fatalf("Unknown re-query") - } - } - - te.repoll(context.Background()) - if err := te.errs.Err; err != nil { - t.Fatal(err) - } -} - -func TestEngineReissue(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.BatchSize = 2 - engCfg.Params.BetaVirtuous = 5 - engCfg.Params.BetaRogue = 5 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - tx2 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx2.InputIDsV = append(tx2.InputIDsV, utxos[1]) - - tx3 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx3.InputIDsV = append(tx3.InputIDsV, utxos[0]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: []snowstorm.Tx{tx2}, - BytesV: []byte{42}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx.ID(): - return vtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - lastVtx = &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - } - return lastVtx, nil - } - - vm.GetTxF = func(_ context.Context, id ids.ID) (snowstorm.Tx, error) { - if id != tx0.ID() { - t.Fatalf("Wrong tx") - } - return tx0, nil - } - - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { - *queryRequestID = requestID - } - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx0, tx1} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if !bytes.Equal(b, vtx.Bytes()) { - t.Fatalf("Wrong bytes") - } - return vtx, nil - } - - // must vote on the first poll for the second one to settle - // *queryRequestID is 1 - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx.ID()}, nil); err != nil { - t.Fatal(err) - } - - if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { - t.Fatal(err) - } - manager.ParseVtxF = nil - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx3} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - // vote on second poll, *queryRequestID is 2 - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx.ID()}, nil); err != nil { - t.Fatal(err) - } - - // all polls settled - - if len(lastVtx.TxsV) != 1 || lastVtx.TxsV[0].ID() != tx0.ID() { - t.Fatalf("Should have re-issued the tx") - } -} - -func TestEngineLargeIssue(t *testing.T) { - _, _, engCfg := DefaultConfig() - engCfg.Params.BatchSize = 1 - engCfg.Params.BetaVirtuous = 5 - engCfg.Params.BetaRogue = 5 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - lastVtx = &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - } - return lastVtx, nil - } - - sender.CantSendPushQuery = false - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx0, tx1} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - if len(lastVtx.TxsV) != 1 || lastVtx.TxsV[0].ID() != tx1.ID() { - t.Fatalf("Should have issued txs differently") - } -} - -func TestEngineGetVertex(t *testing.T) { - commonCfg, _, engCfg := DefaultConfig() - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vdrID := ids.GenerateTestNodeID() - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - avaGetHandler, err := avagetter.New(manager, commonCfg) - if err != nil { - t.Fatal(err) - } - engCfg.AllGetsServer = avaGetHandler - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - sender.SendPutF = func(_ context.Context, v ids.NodeID, _ uint32, vtx []byte) { - if v != vdrID { - t.Fatalf("Wrong validator") - } - if !bytes.Equal(mVtx.Bytes(), vtx) { - t.Fatalf("Wrong vertex") - } - } - - if err := te.Get(context.Background(), vdrID, 0, mVtx.ID()); err != nil { - t.Fatal(err) - } -} - -func TestEngineInsufficientValidators(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - queried := new(bool) - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { - *queried = true - } - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } - - if *queried { - t.Fatalf("Unknown query") - } -} - -func TestEnginePushGossip(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx.ID(): - return vtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - requested := new(bool) - sender.SendGetF = func(_ context.Context, vdr ids.NodeID, _ uint32, vtxID ids.ID) { - *requested = true - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx.BytesV) { - return vtx, nil - } - t.Fatalf("Unknown vertex bytes") - panic("Should have errored") - } - - sender.CantSendPushQuery = false - sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, vtx.Bytes()); err != nil { - t.Fatal(err) - } - - if *requested { - t.Fatalf("Shouldn't have requested the vertex") - } -} - -func TestEngineSingleQuery(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx.ID(): - return vtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - sender.CantSendPushQuery = false - sender.CantSendPullQuery = false - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } -} - -func TestEngineParentBlockingInsert(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - missingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - parentVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{missingVtx}, - HeightV: 2, - BytesV: []byte{0, 1, 2, 3}, - } - - blockingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{parentVtx}, - HeightV: 3, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), parentVtx); err != nil { - t.Fatal(err) - } - if err := te.issue(context.Background(), blockingVtx); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 2 { - t.Fatalf("Both inserts should be blocking") - } - - sender.CantSendPushQuery = false - - missingVtx.StatusV = choices.Processing - if err := te.issue(context.Background(), missingVtx); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } -} - -func TestEngineAbandonChit(t *testing.T) { - require := require.New(t) - - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - err := vals.Add(vdr, nil, ids.Empty, 1) - require.NoError(err) - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - require.NoError(err) - - err = te.Start(context.Background(), 0) - require.NoError(err) - - var reqID uint32 - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { - reqID = requestID - } - - err = te.issue(context.Background(), vtx) - require.NoError(err) - - fakeVtxID := ids.GenerateTestID() - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - require.Equal(fakeVtxID, id) - return nil, errMissing - } - - sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { - reqID = requestID - } - - // Register a voter dependency on an unknown vertex. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeVtxID}, nil) - require.NoError(err) - require.Len(te.vtxBlocked, 1) - - sender.CantSendPullQuery = false - - err = te.GetFailed(context.Background(), vdr, reqID) - require.NoError(err) - require.Empty(te.vtxBlocked) -} - -func TestEngineAbandonChitWithUnexpectedPutVertex(t *testing.T) { - require := require.New(t) - - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - err := vals.Add(vdr, nil, ids.Empty, 1) - require.NoError(err) - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - BytesV: []byte{0}, - } - mVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - BytesV: []byte{1}, - } - - vts := []avalanche.Vertex{gVtx, mVtx} - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - require.NoError(err) - - err = te.Start(context.Background(), 0) - require.NoError(err) - - var reqID uint32 - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { - reqID = requestID - } - - err = te.issue(context.Background(), vtx) - require.NoError(err) - - fakeVtxID := ids.GenerateTestID() - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - require.Equal(fakeVtxID, id) - return nil, errMissing - } - - sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { - reqID = requestID - } - - // Register a voter dependency on an unknown vertex. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeVtxID}, nil) - require.NoError(err) - require.Len(te.vtxBlocked, 1) - - sender.CantSendPullQuery = false - - gVtxBytes := gVtx.Bytes() - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - require.Equal(gVtxBytes, b) - return gVtx, nil - } - - // Respond with an unexpected vertex and verify that the request is - // correctly cleared. - err = te.Put(context.Background(), vdr, reqID, gVtxBytes) - require.NoError(err) - require.Empty(te.vtxBlocked) -} - -func TestEngineBlockingChitRequest(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - missingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - parentVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{missingVtx}, - HeightV: 2, - BytesV: []byte{1, 1, 2, 3}, - } - - blockingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{parentVtx}, - HeightV: 3, - BytesV: []byte{2, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), parentVtx); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == blockingVtx.ID() { - return blockingVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, blockingVtx.Bytes()) { - return blockingVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - sender.CantSendChits = false - - if err := te.PushQuery(context.Background(), vdr, 0, blockingVtx.Bytes()); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 2 { - t.Fatalf("Both inserts should be blocking") - } - - sender.CantSendPushQuery = false - - missingVtx.StatusV = choices.Processing - if err := te.issue(context.Background(), missingVtx); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } -} - -func TestEngineBlockingChitResponse(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - issuedVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - missingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{1, 1, 2, 3}, - } - - blockingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{missingVtx}, - HeightV: 2, - BytesV: []byte{2, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), blockingVtx); err != nil { - t.Fatal(err) - } - - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(issuedVtx.Bytes(), vtx) { - t.Fatalf("Asking for wrong vertex") - } - } - - if err := te.issue(context.Background(), issuedVtx); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - if id == blockingVtx.ID() { - return blockingVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}, nil); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 2 { - t.Fatalf("The insert should be blocking, as well as the chit response") - } - - sender.SendPushQueryF = nil - sender.CantSendPushQuery = false - sender.CantSendChits = false - - missingVtx.StatusV = choices.Processing - if err := te.issue(context.Background(), missingVtx); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } -} - -func TestEngineMissingTx(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - - issuedVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{0, 1, 2, 3}, - } - - missingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - BytesV: []byte{1, 1, 2, 3}, - } - - blockingVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{missingVtx}, - HeightV: 2, - BytesV: []byte{2, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), blockingVtx); err != nil { - t.Fatal(err) - } - - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(issuedVtx.Bytes(), vtx) { - t.Fatalf("Asking for wrong vertex") - } - } - - if err := te.issue(context.Background(), issuedVtx); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - if id == blockingVtx.ID() { - return blockingVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}, nil); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 2 { - t.Fatalf("The insert should be blocking, as well as the chit response") - } - - sender.SendPushQueryF = nil - sender.CantSendPushQuery = false - sender.CantSendChits = false - - missingVtx.StatusV = choices.Processing - if err := te.issue(context.Background(), missingVtx); err != nil { - t.Fatal(err) - } - - if len(te.vtxBlocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } -} - -func TestEngineIssueBlockingTx(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0, tx1}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } - - if prefs := te.Consensus.Preferences(); !prefs.Contains(vtx.ID()) { - t.Fatalf("Vertex should be preferred") - } -} - -func TestEngineReissueAbortedVertex(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - manager.TestStorage.CantEdge = false - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx} - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - BytesV: vtxBytes1, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID()} - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == gVtx.ID() { - return gVtx, nil - } - t.Fatalf("Unknown vertex requested") - panic("Unknown vertex requested") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - manager.EdgeF = nil - manager.GetVtxF = nil - - requestID := new(uint32) - sender.SendGetF = func(_ context.Context, vID ids.NodeID, reqID uint32, vtxID ids.ID) { - *requestID = reqID - } - sender.CantSendChits = false - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtxBytes1) { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID1 { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { - t.Fatal(err) - } - - sender.SendGetF = nil - manager.ParseVtxF = nil - - if err := te.GetFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - requested := new(bool) - sender.SendGetF = func(_ context.Context, _ ids.NodeID, _ uint32, vtxID ids.ID) { - if vtxID == vtxID0 { - *requested = true - } - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID1 { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := te.PullQuery(context.Background(), vdr, 0, vtxID1); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested the missing vertex") - } -} - -func TestEngineBootstrappingIntoConsensus(t *testing.T) { - _, bootCfg, engCfg := DefaultConfig() - - vals := validators.NewSet() - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - peers := tracker.NewPeers() - startup := tracker.NewStartup(peers, 0) - vals.RegisterCallbackListener(startup) - - bootCfg.Beacons = vals - bootCfg.StartupTracker = startup - engCfg.Validators = vals - - bootCfg.SampleK = vals.Len() - - sender := &common.SenderTest{T: t} - sender.Default(true) - bootCfg.Sender = sender - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - manager.TestStorage.CantEdge = false - bootCfg.Manager = manager - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - bootCfg.VM = vm - engCfg.VM = vm - - vm.CantSetState = false - vm.CantConnected = false - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - - txBytes0 := []byte{0} - txBytes1 := []byte{1} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID0, - StatusV: choices.Processing, - }, - BytesV: txBytes0, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID1, - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - BytesV: txBytes1, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - - vtxBytes0 := []byte{2} - vtxBytes1 := []byte{3} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Processing, - }, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - BytesV: vtxBytes1, - } - - requested := new(bool) - requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - *requested = true - *requestID = reqID - } - - dh := &dummyHandler{} - bootstrapper, err := bootstrap.New( - context.Background(), - bootCfg, - dh.onDoneBootstrapping, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - dh.startEngineF = te.Start - - if err := bootstrapper.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := bootstrapper.Connected(context.Background(), vdr, version.CurrentApp); err != nil { - t.Fatal(err) - } - - sender.SendGetAcceptedFrontierF = nil - - if !*requested { - t.Fatalf("Should have requested from the validators during Initialize") - } - - acceptedFrontier := []ids.ID{vtxID0} - - *requested = false - sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - if !slices.Equal(acceptedFrontier, proposedAccepted) { - t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) - } - *requested = true - *requestID = reqID - } - - if err := bootstrapper.AcceptedFrontier(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested from the validators during AcceptedFrontier") - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID0 { - return nil, errMissing - } - t.Fatalf("Unknown vertex requested") - panic("Unknown vertex requested") - } - - sender.SendGetAncestorsF = func(_ context.Context, inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx0.ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - *requestID = reqID - } - - if err := bootstrapper.Accepted(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = nil - sender.SendGetF = nil - - vm.ParseTxF = func(_ context.Context, b []byte) (snowstorm.Tx, error) { - if bytes.Equal(b, txBytes0) { - return tx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtxBytes0) { - return vtx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vtxID0} - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID0 { - return vtx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := bootstrapper.Ancestors(context.Background(), vdr, *requestID, [][]byte{vtxBytes0}); err != nil { - t.Fatal(err) - } - - vm.ParseTxF = nil - manager.ParseVtxF = nil - manager.EdgeF = nil - manager.GetVtxF = nil - - if tx0.Status() != choices.Accepted { - t.Fatalf("Should have accepted %s", txID0) - } - if vtx0.Status() != choices.Accepted { - t.Fatalf("Should have accepted %s", vtxID0) - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtxBytes1) { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, chits []ids.ID, _ []ids.ID) { - if inVdr != vdr { - t.Fatalf("Sent to the wrong validator") - } - - expected := []ids.ID{vtxID0} - - if !slices.Equal(expected, chits) { - t.Fatalf("Returned wrong chits") - } - } - sender.SendPushQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtx []byte) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - - if !bytes.Equal(vtxBytes1, vtx) { - t.Fatalf("Sent wrong query bytes") - } - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID1 { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := te.PushQuery(context.Background(), vdr, 0, vtxBytes1); err != nil { - t.Fatal(err) - } - - manager.ParseVtxF = nil - sender.SendChitsF = nil - sender.SendPushQueryF = nil - manager.GetVtxF = nil -} - -func TestEngineReBootstrapFails(t *testing.T) { - _, bootCfg, engCfg := DefaultConfig() - bootCfg.Alpha = 1 - bootCfg.RetryBootstrap = true - bootCfg.RetryBootstrapWarnFrequency = 4 - - vals := validators.NewSet() - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - peers := tracker.NewPeers() - startup := tracker.NewStartup(peers, 0) - vals.RegisterCallbackListener(startup) - - bootCfg.Beacons = vals - bootCfg.StartupTracker = startup - engCfg.Validators = vals - - bootCfg.SampleK = vals.Len() - - sender := &common.SenderTest{T: t} - sender.Default(true) - bootCfg.Sender = sender - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - bootCfg.Manager = manager - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - bootCfg.VM = vm - engCfg.VM = vm - - vm.CantSetState = false - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - - txBytes0 := []byte{0} - txBytes1 := []byte{1} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID0, - StatusV: choices.Processing, - }, - BytesV: txBytes0, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID1, - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - BytesV: txBytes1, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - requested := new(bool) - requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { - // instead of triggering the timeout here, we'll just invoke the GetAcceptedFrontierFailed func - // - // s.router.GetAcceptedFrontierFailed(context.Background(), vID, s.ctx.ChainID, requestID) - // -> chain.GetAcceptedFrontierFailed(context.Background(), validatorID, requestID) - // ---> h.sendReliableMsg(message{ - // messageType: constants.GetAcceptedFrontierFailedMsg, - // validatorID: validatorID, - // requestID: requestID, - // }) - // -----> h.engine.GetAcceptedFrontierFailed(context.Background(), msg.validatorID, msg.requestID) - // -------> return b.AcceptedFrontier(context.Background(), validatorID, requestID, nil) - - // ensure the request is made to the correct validators - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - *requested = true - *requestID = reqID - } - - dh := &dummyHandler{} - bootstrapper, err := bootstrap.New( - context.Background(), - bootCfg, - dh.onDoneBootstrapping, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - - if err := bootstrapper.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested from the validators during Initialize") - } - - // reset requested - *requested = false - sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - *requested = true - *requestID = reqID - } - - // mimic a GetAcceptedFrontierFailedMsg - // only validator that was requested timed out on the request - if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - // mimic a GetAcceptedFrontierFailedMsg - // only validator that was requested timed out on the request - if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - bootCfg.Ctx.AvalancheRegisterer = prometheus.NewRegistry() - - // re-register the Transitive - bootstrapper2, err := bootstrap.New( - context.Background(), - bootCfg, - dh.onDoneBootstrapping, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - - if err := bootstrapper2.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := bootstrapper2.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - if err := bootstrapper2.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested from the validators during AcceptedFrontier") - } -} - -func TestEngineReBootstrappingIntoConsensus(t *testing.T) { - _, bootCfg, engCfg := DefaultConfig() - bootCfg.Alpha = 1 - bootCfg.RetryBootstrap = true - bootCfg.RetryBootstrapWarnFrequency = 4 - - vals := validators.NewSet() - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - peers := tracker.NewPeers() - startup := tracker.NewStartup(peers, 0) - vals.RegisterCallbackListener(startup) - - bootCfg.Beacons = vals - bootCfg.StartupTracker = startup - engCfg.Validators = vals - - bootCfg.SampleK = vals.Len() - - sender := &common.SenderTest{T: t} - sender.Default(true) - bootCfg.Sender = sender - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - bootCfg.Manager = manager - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - bootCfg.VM = vm - engCfg.VM = vm - - vm.CantSetState = false - vm.CantConnected = false - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - - txBytes0 := []byte{0} - txBytes1 := []byte{1} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID0, - StatusV: choices.Processing, - }, - BytesV: txBytes0, - } - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: txID1, - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - BytesV: txBytes1, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - - vtxBytes0 := []byte{2} - vtxBytes1 := []byte{3} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Processing, - }, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - BytesV: vtxBytes1, - } - - requested := new(bool) - requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - *requested = true - *requestID = reqID - } - - dh := &dummyHandler{} - bootstrapper, err := bootstrap.New( - context.Background(), - bootCfg, - dh.onDoneBootstrapping, - noopStarter, - ) - if err != nil { - t.Fatal(err) - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - dh.startEngineF = te.Start - - if err := bootstrapper.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - if err := bootstrapper.Connected(context.Background(), vdr, version.CurrentApp); err != nil { - t.Fatal(err) - } - - // fail the AcceptedFrontier - if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - // fail the GetAcceptedFailed - if err := bootstrapper.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested from the validators during Initialize") - } - - acceptedFrontier := []ids.ID{vtxID0} - - *requested = false - sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - if !slices.Equal(acceptedFrontier, proposedAccepted) { - t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) - } - *requested = true - *requestID = reqID - } - - if err := bootstrapper.AcceptedFrontier(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { - t.Fatal(err) - } - - if !*requested { - t.Fatalf("Should have requested from the validators during AcceptedFrontier") - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID0 { - return nil, errMissing - } - t.Fatalf("Unknown vertex requested") - panic("Unknown vertex requested") - } - - sender.SendGetAncestorsF = func(_ context.Context, inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != inVdr { - t.Fatalf("Asking wrong validator for vertex") - } - if vtx0.ID() != vtxID { - t.Fatalf("Asking for wrong vertex") - } - *requestID = reqID - } - - if err := bootstrapper.Accepted(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = nil - - vm.ParseTxF = func(_ context.Context, b []byte) (snowstorm.Tx, error) { - if bytes.Equal(b, txBytes0) { - return tx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtxBytes0) { - return vtx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vtxID0} - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID0 { - return vtx0, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := bootstrapper.Ancestors(context.Background(), vdr, *requestID, [][]byte{vtxBytes0}); err != nil { - t.Fatal(err) - } - - sender.SendGetAcceptedFrontierF = nil - sender.SendGetF = nil - vm.ParseTxF = nil - manager.ParseVtxF = nil - manager.EdgeF = nil - manager.GetVtxF = nil - - if tx0.Status() != choices.Accepted { - t.Fatalf("Should have accepted %s", txID0) - } - if vtx0.Status() != choices.Accepted { - t.Fatalf("Should have accepted %s", vtxID0) - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtxBytes1) { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, chits []ids.ID, _ []ids.ID) { - if inVdr != vdr { - t.Fatalf("Sent to the wrong validator") - } - - expected := []ids.ID{vtxID1} - - if !slices.Equal(expected, chits) { - t.Fatalf("Returned wrong chits") - } - } - sender.SendPushQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtx []byte) { - if vdrs.Len() != 1 { - t.Fatalf("Should have requested from the validators") - } - if !vdrs.Contains(vdr) { - t.Fatalf("Should have requested from %s", vdr) - } - - if !bytes.Equal(vtxBytes1, vtx) { - t.Fatalf("Sent wrong query bytes") - } - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == vtxID1 { - return vtx1, nil - } - t.Fatalf("Unknown bytes provided") - panic("Unknown bytes provided") - } - - if err := bootstrapper.PushQuery(context.Background(), vdr, 0, vtxBytes1); err != nil { - t.Fatal(err) - } - - manager.ParseVtxF = nil - sender.SendChitsF = nil - sender.SendPushQueryF = nil - manager.GetVtxF = nil -} - -func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - VerifyV: errTest, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - te.Sender = sender - - reqID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { - *reqID = requestID - } - - if err := te.issue(context.Background(), vtx0); err != nil { - t.Fatal(err) - } - - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { - t.Fatalf("should have failed verification") - } - - if err := te.issue(context.Background(), vtx1); err != nil { - t.Fatal(err) - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtx0.ID(): - return vtx0, nil - case vtx1.ID(): - return vtx1, nil - } - return nil, errUnknownVertex - } - - if err := te.Chits(context.Background(), vdr, *reqID, []ids.ID{vtx1.ID()}, nil); err != nil { - t.Fatal(err) - } - - if status := vtx0.Status(); status != choices.Accepted { - t.Fatalf("should have accepted the vertex due to transitive voting") - } -} - -func TestEnginePartiallyValidVertex(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - VerifyV: errTest, - } - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0, tx1}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - expectedVtxBytes := []byte{1} - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - return &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: txs, - BytesV: expectedVtxBytes, - }, nil - } - - sender := &common.SenderTest{T: t} - te.Sender = sender - - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, vtx []byte) { - if !bytes.Equal(expectedVtxBytes, vtx) { - t.Fatalf("wrong vertex queried") - } - } - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } -} - -func TestEngineGossip(t *testing.T) { - _, _, engCfg := DefaultConfig() - - sender := &common.SenderTest{T: t} - sender.Default(true) - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if vtxID == gVtx.ID() { - return gVtx, nil - } - t.Fatal(errUnknownVertex) - return nil, errUnknownVertex - } - - called := new(bool) - sender.SendGossipF = func(_ context.Context, vtxBytes []byte) { - *called = true - if !bytes.Equal(vtxBytes, gVtx.Bytes()) { - t.Fatal(errUnknownVertex) - } - } - - if err := te.Gossip(context.Background()); err != nil { - t.Fatal(err) - } - - if !*called { - t.Fatalf("Should have gossiped the vertex") - } -} - -func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - secondVdr := ids.GenerateTestNodeID() - - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vals.Add(secondVdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - BytesV: []byte{0}, - } - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{1}, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - BytesV: []byte{2}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - parsed := new(bool) - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx1.Bytes()) { - *parsed = true - return vtx1, nil - } - return nil, errUnknownVertex - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if !*parsed { - return nil, errUnknownVertex - } - - if vtxID == vtx1.ID() { - return vtx1, nil - } - return nil, errUnknownVertex - } - - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { - *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if vtxID != vtx0.ID() { - t.Fatalf("Wrong vertex requested") - } - } - - if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { - t.Fatal(err) - } - - if err := te.Put(context.Background(), secondVdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } - - *parsed = false - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx0.Bytes()) { - *parsed = true - return vtx0, nil - } - return nil, errUnknownVertex - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if !*parsed { - return nil, errUnknownVertex - } - - if vtxID == vtx0.ID() { - return vtx0, nil - } - return nil, errUnknownVertex - } - sender.CantSendPushQuery = false - sender.CantSendChits = false - - vtx0.StatusV = choices.Processing - - if err := te.Put(context.Background(), vdr, *reqID, vtx0.Bytes()); err != nil { - t.Fatal(err) - } - - prefs := te.Consensus.Preferences() - if !prefs.Contains(vtx1.ID()) { - t.Fatalf("Shouldn't have abandoned the pending vertex") - } -} - -func TestEnginePushQueryRequestIDConflict(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - BytesV: []byte{0}, - } - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{1}, - } - - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx1}, - BytesV: []byte{2}, - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - parsed := new(bool) - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx1.Bytes()) { - *parsed = true - return vtx1, nil - } - return nil, errUnknownVertex - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if !*parsed { - return nil, errUnknownVertex - } - - if vtxID == vtx1.ID() { - return vtx1, nil - } - return nil, errUnknownVertex - } - - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { - *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if vtxID != vtx0.ID() { - t.Fatalf("Wrong vertex requested") - } - } - - if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { - t.Fatal(err) - } - - sender.SendGetF = nil - sender.CantSendGet = false - - if err := te.PushQuery(context.Background(), vdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } - - *parsed = false - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx0.Bytes()) { - *parsed = true - return vtx0, nil - } - return nil, errUnknownVertex - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if !*parsed { - return nil, errUnknownVertex - } - - if vtxID == vtx0.ID() { - return vtx0, nil - } - return nil, errUnknownVertex - } - sender.CantSendPushQuery = false - sender.CantSendChits = false - - vtx0.StatusV = choices.Processing - - if err := te.Put(context.Background(), vdr, *reqID, vtx0.Bytes()); err != nil { - t.Fatal(err) - } - - prefs := te.Consensus.Preferences() - if !prefs.Contains(vtx1.ID()) { - t.Fatalf("Shouldn't have abandoned the pending vertex") - } -} - -func TestEngineAggressivePolling(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.ConcurrentRepolls = 3 - engCfg.Params.BetaRogue = 3 - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - BytesV: []byte{0}, - } - - vts := []avalanche.Vertex{gVtx} - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) - - tx1 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{1}, - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - parsed := new(bool) - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx.Bytes()) { - *parsed = true - return vtx, nil - } - return nil, errUnknownVertex - } - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - if !*parsed { - return nil, errUnknownVertex - } - - if vtxID == vtx.ID() { - return vtx, nil - } - return nil, errUnknownVertex - } - - numPushQueries := new(int) - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { - *numPushQueries++ - } - - numPullQueries := new(int) - sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) { - *numPullQueries++ - } - - vm.CantPendingTxs = false - - if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { - t.Fatal(err) - } - - if *numPushQueries != 1 { - t.Fatalf("should have issued one push query") - } - if *numPullQueries != 2 { - t.Fatalf("should have issued two pull queries") - } -} - -func TestEngineDuplicatedIssuance(t *testing.T) { - _, _, engCfg := DefaultConfig() - engCfg.Params.BatchSize = 1 - engCfg.Params.BetaVirtuous = 5 - engCfg.Params.BetaRogue = 5 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - - manager.Default(true) - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - } - tx.InputIDsV = append(tx.InputIDsV, utxos[0]) - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - lastVtx = &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - } - return lastVtx, nil - } - - sender.CantSendPushQuery = false - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - if len(lastVtx.TxsV) != 1 || lastVtx.TxsV[0].ID() != tx.ID() { - t.Fatalf("Should have issued txs differently") - } - - manager.BuildVtxF = func(context.Context, []ids.ID, []snowstorm.Tx) (avalanche.Vertex, error) { - t.Fatalf("shouldn't have attempted to issue a duplicated tx") - return nil, nil - } - - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } -} - -func TestEngineDoubleChit(t *testing.T) { - _, _, engCfg := DefaultConfig() - - engCfg.Params.Alpha = 2 - engCfg.Params.K = 2 - engCfg.Params.MixedQueryNumPushNonVdr = 2 - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr0 := ids.GenerateTestNodeID() - vdr1 := ids.GenerateTestNodeID() - - if err := vals.Add(vdr0, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vals.Add(vdr1, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - vts := []avalanche.Vertex{gVtx, mVtx} - utxos := []ids.ID{ids.GenerateTestID()} - - tx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }} - tx.InputIDsV = append(tx.InputIDsV, utxos[0]) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: vts, - HeightV: 1, - TxsV: []snowstorm.Tx{tx}, - BytesV: []byte{1, 1, 2, 3}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{vts[0].ID(), vts[1].ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - reqID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { - *reqID = requestID - if inVdrs.Len() != 2 { - t.Fatalf("Wrong number of validators") - } - if !bytes.Equal(vtx.Bytes(), vtxBytes) { - t.Fatalf("Wrong vertex requested") - } - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - if id == vtx.ID() { - return vtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - if err := te.issue(context.Background(), vtx); err != nil { - t.Fatal(err) - } - - votes := []ids.ID{vtx.ID()} - - if status := tx.Status(); status != choices.Processing { - t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) - } - - if err := te.Chits(context.Background(), vdr0, *reqID, votes, nil); err != nil { - t.Fatal(err) - } - - if status := tx.Status(); status != choices.Processing { - t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) - } - - if err := te.Chits(context.Background(), vdr0, *reqID, votes, nil); err != nil { - t.Fatal(err) - } - - if status := tx.Status(); status != choices.Processing { - t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) - } - - if err := te.Chits(context.Background(), vdr1, *reqID, votes, nil); err != nil { - t.Fatal(err) - } - - if status := tx.Status(); status != choices.Accepted { - t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted) - } -} - -func TestEngineBubbleVotes(t *testing.T) { - _, _, engCfg := DefaultConfig() - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - err := vals.Add(vdr, nil, ids.Empty, 1) - require.NoError(t, err) - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - utxos := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - ids.GenerateTestID(), - } - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[:1], - } - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:2], - } - tx2 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - InputIDsV: utxos[1:2], - } - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 0, - TxsV: []snowstorm.Tx{tx0}, - BytesV: []byte{0}, - } - - missingVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }} - - pendingVtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{vtx, missingVtx}, - HeightV: 1, - TxsV: []snowstorm.Tx{tx1}, - BytesV: []byte{1}, - } - - pendingVtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{pendingVtx0}, - HeightV: 2, - TxsV: []snowstorm.Tx{tx2}, - BytesV: []byte{2}, - } - - manager.EdgeF = func(context.Context) []ids.ID { - return nil - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case vtx.ID(): - return vtx, nil - case missingVtx.ID(): - return nil, errMissing - case pendingVtx0.ID(): - return pendingVtx0, nil - case pendingVtx1.ID(): - return pendingVtx1, nil - } - require.FailNow(t, "unknown vertex", "vtxID: %s", id) - panic("should have errored") - } - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - queryReqID := new(uint32) - queried := new(bool) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { - require.Len(t, inVdrs, 1, "wrong number of validators") - *queryReqID = requestID - require.Equal(t, vtx.Bytes(), vtxBytes, "wrong vertex requested") - *queried = true - } - - getReqID := new(uint32) - fetched := new(bool) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { - require.Equal(t, vdr, inVdr, "wrong validator") - *getReqID = requestID - require.Equal(t, missingVtx.ID(), vtxID, "wrong vertex requested") - *fetched = true - } - - issued, err := te.issueFrom(context.Background(), vdr, pendingVtx1) - require.NoError(t, err) - require.False(t, issued, "shouldn't have been able to issue %s", pendingVtx1.ID()) - require.True(t, *queried, "should have queried for %s", vtx.ID()) - require.True(t, *fetched, "should have fetched %s", missingVtx.ID()) - - // can't apply votes yet because pendingVtx0 isn't issued because missingVtx - // is missing - err = te.Chits(context.Background(), vdr, *queryReqID, []ids.ID{pendingVtx1.ID()}, nil) - require.NoError(t, err) - require.Equal(t, choices.Processing, tx0.Status(), "wrong tx status") - require.Equal(t, choices.Processing, tx1.Status(), "wrong tx status") - - // vote for pendingVtx1 should be bubbled up to pendingVtx0 and then to vtx - err = te.GetFailed(context.Background(), vdr, *getReqID) - require.NoError(t, err) - require.Equal(t, choices.Accepted, tx0.Status(), "wrong tx status") - require.Equal(t, choices.Processing, tx1.Status(), "wrong tx status") -} - -func TestEngineIssue(t *testing.T) { - _, _, engCfg := DefaultConfig() - engCfg.Params.BatchSize = 1 - engCfg.Params.BetaVirtuous = 1 - engCfg.Params.BetaRogue = 1 - engCfg.Params.OptimalProcessing = 1 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx0 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - InputIDsV: utxos[:1], - } - tx1 := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - InputIDsV: utxos[1:], - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - vm.CantSetState = true - numBuilt := 0 - vtxID := ids.GenerateTestID() - manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - numBuilt++ - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID, - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - HeightV: 1, - TxsV: txs, - BytesV: []byte{1}, - } - - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx.ID(): - return vtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - return vtx, nil - } - - var queryRequestID uint32 - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { - queryRequestID = requestID - } - - vm.PendingTxsF = func(context.Context) []snowstorm.Tx { - return []snowstorm.Tx{tx0, tx1} - } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } - - if numBuilt != 1 { - t.Fatalf("Should have issued txs differently") - } - - if err := te.Chits(context.Background(), vdr, queryRequestID, []ids.ID{vtxID}, nil); err != nil { - t.Fatal(err) - } - - if numBuilt != 2 { - t.Fatalf("Should have issued txs differently") - } -} - -// Test that a transaction is abandoned if a dependency fails verification, -// even if there are outstanding requests for vertices when the -// dependency fails verification. -func TestAbandonTx(t *testing.T) { - require := require.New(t) - _, _, engCfg := DefaultConfig() - engCfg.Params.BatchSize = 1 - engCfg.Params.BetaVirtuous = 1 - engCfg.Params.BetaRogue = 1 - engCfg.Params.OptimalProcessing = 1 - - sender := &common.SenderTest{ - T: t, - CantSendGetAcceptedFrontier: false, - } - sender.Default(true) - engCfg.Sender = sender - - engCfg.Validators = validators.NewSet() - vdr := ids.GenerateTestNodeID() - if err := engCfg.Validators.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - manager := vertex.NewTestManager(t) - manager.Default(true) - manager.CantEdge = false - manager.CantGetVtx = false - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - vm.CantSetState = false - - engCfg.VM = vm - - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - tx0 := &snowstorm.TestTx{ // Fails verification - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - InputIDsV: []ids.ID{gTx.ID()}, - BytesV: utils.RandomBytes(32), - VerifyV: errTest, - } - - tx1 := &snowstorm.TestTx{ // Depends on tx0 - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{tx0}, - InputIDsV: []ids.ID{gTx.ID()}, - BytesV: utils.RandomBytes(32), - } - - vtx0 := &avalanche.TestVertex{ // Contains tx0, which will fail verification - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{gVtx}, - HeightV: gVtx.HeightV + 1, - TxsV: []snowstorm.Tx{tx0}, - } - - // Contains tx1, which depends on tx0. - // vtx0 and vtx1 are siblings. - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{gVtx}, - HeightV: gVtx.HeightV + 1, - TxsV: []snowstorm.Tx{tx1}, - } - - // Cause the engine to send a Get request for vtx1, vtx0, and some other vtx that doesn't exist - sender.CantSendGet = false - sender.CantSendChits = false - err = te.PullQuery(context.Background(), vdr, 0, vtx1.ID()) - require.NoError(err) - err = te.PullQuery(context.Background(), vdr, 0, vtx0.ID()) - require.NoError(err) - err = te.PullQuery(context.Background(), vdr, 0, ids.GenerateTestID()) - require.NoError(err) - - // Give the engine vtx1. It should wait to issue vtx1 - // until tx0 is issued, because tx1 depends on tx0. - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx1.BytesV) { - vtx1.StatusV = choices.Processing - return vtx1, nil - } - require.FailNow("should have asked to parse vtx1") - return nil, nil - } - err = te.Put(context.Background(), vdr, 0, vtx1.Bytes()) - require.NoError(err) - - // Verify that vtx1 is waiting to be issued. - require.True(te.pending.Contains(vtx1.ID())) - - // Give the engine vtx0. It should try to issue vtx0 - // but then abandon it because tx0 fails verification. - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - if bytes.Equal(b, vtx0.BytesV) { - vtx0.StatusV = choices.Processing - return vtx0, nil - } - require.FailNow("should have asked to parse vtx0") - return nil, nil - } - err = te.Put(context.Background(), vdr, 0, vtx0.Bytes()) - require.NoError(err) - - // Despite the fact that there is still an outstanding vertex request, - // vtx1 should have been abandoned because tx0 failed verification - require.False(te.pending.Contains(vtx1.ID())) - // sanity check that there is indeed an outstanding vertex request - require.True(te.outstandingVtxReqs.Len() == 1) -} - -func TestSendMixedQuery(t *testing.T) { - type test struct { - isVdr bool - } - tests := []test{ - {isVdr: true}, - {isVdr: false}, - } - for _, tt := range tests { - t.Run( - fmt.Sprintf("is validator: %v", tt.isVdr), - func(t *testing.T) { - _, _, engCfg := DefaultConfig() - sender := &common.SenderTest{T: t} - engCfg.Sender = sender - sender.Default(true) - manager := vertex.NewTestManager(t) - engCfg.Manager = manager - // Override the parameters k, MixedQueryNumPushVdr, MixedQueryNumPushNonVdr, - // and update the validator set to have k validators. - engCfg.Params.K = 20 - engCfg.Params.Alpha = 12 - engCfg.Params.MixedQueryNumPushVdr = 12 - engCfg.Params.MixedQueryNumPushNonVdr = 11 - te, err := newTransitive(engCfg, noopStarter) - if err != nil { - t.Fatal(err) - } - startReqID := uint32(0) - if err := te.Start(context.Background(), startReqID); err != nil { - t.Fatal(err) - } - - vdrs := set.Set[ids.NodeID]{} - te.Validators = validators.NewSet() - for i := 0; i < engCfg.Params.K; i++ { - vdrID := ids.GenerateTestNodeID() - vdrs.Add(vdrID) - err := te.Validators.Add(vdrID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } - } - if tt.isVdr { - vdrs.Add(engCfg.Ctx.NodeID) - err := te.Validators.Add(engCfg.Ctx.NodeID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } - } - - // [blk1] is a child of [gBlk] and passes verification - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{ - &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }}, - }, - BytesV: []byte{1}, - } - - manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(b, vtx1.Bytes()): - return vtx1, nil - default: - t.Fatalf("Unknown block bytes") - return nil, nil - } - } - - pullQuerySent := new(bool) - pullQueryReqID := new(uint32) - pullQueriedVdrs := set.Set[ids.NodeID]{} - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { - switch { - case *pullQuerySent: - t.Fatalf("Asked multiple times") - case vtxID != vtx1.ID(): - t.Fatalf("Expected engine to request vtx1") - } - pullQueriedVdrs.Union(inVdrs) - *pullQuerySent = true - *pullQueryReqID = requestID - } - - pushQuerySent := new(bool) - pushQueryReqID := new(uint32) - pushQueriedVdrs := set.Set[ids.NodeID]{} - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { - switch { - case *pushQuerySent: - t.Fatal("Asked multiple times") - case !bytes.Equal(vtx, vtx1.Bytes()): - t.Fatal("got unexpected block bytes instead of blk1") - } - *pushQuerySent = true - *pushQueryReqID = requestID - pushQueriedVdrs.Union(inVdrs) - } - - // Give the engine vtx1. It should insert it into consensus and send a mixed query - // consisting of 12 pull queries and 8 push queries. - if err := te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, vtx1.Bytes()); err != nil { - t.Fatal(err) - } - - switch { - case !*pullQuerySent: - t.Fatal("expected us to send pull queries") - case !*pushQuerySent: - t.Fatal("expected us to send push queries") - case *pushQueryReqID != *pullQueryReqID: - t.Fatalf("expected equal push query (%v) and pull query (%v) req IDs", *pushQueryReqID, *pullQueryReqID) - case pushQueriedVdrs.Len()+pullQueriedVdrs.Len() != te.Config.Params.K: - t.Fatalf("expected num push queried (%d) + num pull queried (%d) to be %d", pushQueriedVdrs.Len(), pullQueriedVdrs.Len(), te.Config.Params.K) - case tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushVdr: - t.Fatalf("expected num push queried (%d) to be %d", pullQueriedVdrs.Len(), te.Params.MixedQueryNumPushVdr) - case !tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushNonVdr: - t.Fatalf("expected num push queried (%d) to be %d", pullQueriedVdrs.Len(), te.Params.MixedQueryNumPushNonVdr) - } - - pullQueriedVdrs.Union(pushQueriedVdrs) // Now this holds all queried validators (push and pull) - for vdr := range pullQueriedVdrs { - if !vdrs.Contains(vdr) { - t.Fatalf("got unexpected vdr %v", vdr) - } - } - }) - } -} - -func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { - require := require.New(t) - - _, _, engCfg := DefaultConfig() - engCfg.Params.BatchSize = 1 - engCfg.Params.BetaVirtuous = 2 - engCfg.Params.BetaRogue = 2 - engCfg.Params.OptimalProcessing = 1 - - sender := &common.SenderTest{T: t} - sender.Default(true) - sender.CantSendGetAcceptedFrontier = false - engCfg.Sender = sender - - vals := validators.NewSet() - engCfg.Validators = vals - - vdr := ids.GenerateTestNodeID() - require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) - - manager := vertex.NewTestManager(t) - manager.Default(true) - engCfg.Manager = manager - - vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} - vm.Default(true) - engCfg.VM = vm - - gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - - utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - - tx := &snowstorm.TestTx{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - DependenciesV: []snowstorm.Tx{gTx}, - InputIDsV: utxos[:1], - } - - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{gVtx.ID(), mVtx.ID()} - } - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - } - t.Fatalf("Unknown vertex") - panic("Should have errored") - } - - vm.CantSetState = false - te, err := newTransitive(engCfg, noopStarter) - require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) - - vtx := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentsV: []avalanche.Vertex{gVtx, mVtx}, - TxsV: []snowstorm.Tx{tx}, - BytesV: utils.RandomBytes(32), - } - - queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { - require.Contains(inVdrs, vdr) - require.Equal(vtx.Bytes(), vtxBytes) - *queryRequestID = requestID - } - - require.NoError(te.issue(context.Background(), vtx)) - - manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { - switch id { - case gVtx.ID(): - return gVtx, nil - case mVtx.ID(): - return mVtx, nil - case vtx.ID(): - return vtx, nil - } - t.Fatalf("unknown vertex") - panic("Should have errored") - } - - require.Equal(choices.Processing, vtx.Status()) - - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { - require.Contains(inVdrs, vdr) - require.Equal(vtx.ID(), vtxID) - *queryRequestID = requestID - } - - vtxIDs := []ids.ID{vtx.ID()} - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, vtxIDs, vtxIDs)) - - require.Equal(choices.Processing, vtx.Status()) - - require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) - - require.Equal(choices.Accepted, vtx.Status()) -} diff --git a/avalanchego/snow/engine/avalanche/vertex/builder.go b/avalanchego/snow/engine/avalanche/vertex/builder.go index 506b2132..cf3e88ee 100644 --- a/avalanchego/snow/engine/avalanche/vertex/builder.go +++ b/avalanchego/snow/engine/avalanche/vertex/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,15 +8,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) // Builder builds a vertex given a set of parentIDs and transactions. type Builder interface { - // Build a new vertex from the contents of a vertex - BuildVtx(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) // Build a new stop vertex from the parents BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) } @@ -65,10 +62,10 @@ func buildVtx( utils.Sort(parentIDs) utils.SortByHash(txs) - codecVer := codecVersion + codecVer := CodecVersion if stopVertex { // use new codec version for the "StopVertex" - codecVer = codecVersionWithStopVtx + codecVer = CodecVersionWithStopVtx } innerVtx := innerStatelessVertex{ @@ -83,7 +80,7 @@ func buildVtx( return nil, err } - vtxBytes, err := c.Marshal(innerVtx.Version, innerVtx) + vtxBytes, err := Codec.Marshal(innerVtx.Version, innerVtx) vtx := statelessVertex{ innerStatelessVertex: innerVtx, id: hashing.ComputeHash256Array(vtxBytes), diff --git a/avalanchego/snow/engine/avalanche/vertex/builder_test.go b/avalanchego/snow/engine/avalanche/vertex/builder_test.go index 7ed6a9a5..a70b14ba 100644 --- a/avalanchego/snow/engine/avalanche/vertex/builder_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -11,7 +11,9 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestBuildInvalid(t *testing.T) { +func TestBuildDuplicateTxs(t *testing.T) { + require := require.New(t) + chainID := ids.ID{1} height := uint64(2) parentIDs := []ids.ID{{4}, {5}} @@ -22,10 +24,12 @@ func TestBuildInvalid(t *testing.T) { parentIDs, txs, ) - require.Error(t, err, "build should have errored because restrictions were provided in epoch 0") + require.ErrorIs(err, errInvalidTxs) } func TestBuildValid(t *testing.T) { + require := require.New(t) + chainID := ids.ID{1} height := uint64(2) parentIDs := []ids.ID{{4}, {5}} @@ -36,9 +40,9 @@ func TestBuildValid(t *testing.T) { parentIDs, txs, ) - require.NoError(t, err) - require.Equal(t, chainID, vtx.ChainID()) - require.Equal(t, height, vtx.Height()) - require.Equal(t, parentIDs, vtx.ParentIDs()) - require.Equal(t, txs, vtx.Txs()) + require.NoError(err) + require.Equal(chainID, vtx.ChainID()) + require.Equal(height, vtx.Height()) + require.Equal(parentIDs, vtx.ParentIDs()) + require.Equal(txs, vtx.Txs()) } diff --git a/avalanchego/snow/engine/avalanche/vertex/codec.go b/avalanchego/snow/engine/avalanche/vertex/codec.go index 564d699a..12f387d0 100644 --- a/avalanchego/snow/engine/avalanche/vertex/codec.go +++ b/avalanchego/snow/engine/avalanche/vertex/codec.go @@ -1,35 +1,38 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" ) const ( + CodecVersion uint16 = 0 + CodecVersionWithStopVtx uint16 = 1 + // maxSize is the maximum allowed vertex size. It is necessary to deter DoS maxSize = units.MiB - - codecVersion uint16 = 0 - codecVersionWithStopVtx uint16 = 1 ) -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.New([]string{reflectcodec.DefaultTagName + "V0"}, maxSize) - lc2 := linearcodec.New([]string{reflectcodec.DefaultTagName + "V1"}, maxSize) + lc0 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V0"}, maxSize) + lc1 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V1"}, maxSize) - c = codec.NewManager(maxSize) - // for backward compatibility, still register the initial codec version - if err := c.RegisterCodec(codecVersion, lc); err != nil { - panic(err) - } - if err := c.RegisterCodec(codecVersionWithStopVtx, lc2); err != nil { + Codec = codec.NewManager(maxSize) + err := utils.Err( + Codec.RegisterCodec(CodecVersion, lc0), + Codec.RegisterCodec(CodecVersionWithStopVtx, lc1), + ) + if err != nil { panic(err) } } diff --git a/avalanchego/snow/engine/avalanche/vertex/heap.go b/avalanchego/snow/engine/avalanche/vertex/heap.go deleted file mode 100644 index fa9a0a83..00000000 --- a/avalanchego/snow/engine/avalanche/vertex/heap.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package vertex - -import ( - "container/heap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/utils/set" -) - -var ( - _ Heap = (*maxHeightVertexHeap)(nil) - _ heap.Interface = (*priorityQueue)(nil) -) - -type priorityQueue []avalanche.Vertex - -func (pq priorityQueue) Len() int { - return len(pq) -} - -// Returns true if the vertex at index i has greater height than the vertex at -// index j. -func (pq priorityQueue) Less(i, j int) bool { - statusI := pq[i].Status() - statusJ := pq[j].Status() - - // Put unknown vertices at the front of the heap to ensure once we have made - // it below a certain height in DAG traversal we do not need to reset - if !statusI.Fetched() { - return true - } - if !statusJ.Fetched() { - return false - } - - // Treat errors on retrieving the height as if the vertex is not fetched - heightI, errI := pq[i].Height() - if errI != nil { - return true - } - heightJ, errJ := pq[j].Height() - if errJ != nil { - return false - } - return heightI > heightJ -} - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] -} - -// Push adds an item to this priority queue. x must have type *vertexItem -func (pq *priorityQueue) Push(x interface{}) { - item := x.(avalanche.Vertex) - *pq = append(*pq, item) -} - -// Pop returns the last item in this priorityQueue -func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - old[n-1] = nil - *pq = old[0 : n-1] - return item -} - -// Heap defines the functionality of a heap of vertices with unique VertexIDs -// ordered by height -type Heap interface { - // Empty the heap. - Clear() - - // Add the provided vertex to the heap. Vertices are de-duplicated, returns - // true if the vertex was added, false if it was dropped. - Push(avalanche.Vertex) bool - - // Remove the top vertex. Assumes that there is at least one element. - Pop() avalanche.Vertex - - // Returns if a vertex with the provided ID is currently in the heap. - Contains(ids.ID) bool - - // Returns the number of vertices in the heap. - Len() int -} - -// NewHeap returns an empty Heap -func NewHeap() Heap { - return &maxHeightVertexHeap{} -} - -type maxHeightVertexHeap struct { - heap priorityQueue - elementIDs set.Set[ids.ID] -} - -func (vh *maxHeightVertexHeap) Clear() { - vh.heap = priorityQueue{} - vh.elementIDs.Clear() -} - -// Push adds an element to this heap. Returns true if the element was added. -// Returns false if it was already in the heap. -func (vh *maxHeightVertexHeap) Push(vtx avalanche.Vertex) bool { - vtxID := vtx.ID() - if vh.elementIDs.Contains(vtxID) { - return false - } - - vh.elementIDs.Add(vtxID) - heap.Push(&vh.heap, vtx) - return true -} - -// If there are any vertices in this heap with status Unknown, removes one such -// vertex and returns it. Otherwise, removes and returns the vertex in this heap -// with the greatest height. -func (vh *maxHeightVertexHeap) Pop() avalanche.Vertex { - vtx := heap.Pop(&vh.heap).(avalanche.Vertex) - vh.elementIDs.Remove(vtx.ID()) - return vtx -} - -func (vh *maxHeightVertexHeap) Len() int { - return vh.heap.Len() -} - -func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { - return vh.elementIDs.Contains(vtxID) -} diff --git a/avalanchego/snow/engine/avalanche/vertex/heap_test.go b/avalanchego/snow/engine/avalanche/vertex/heap_test.go deleted file mode 100644 index b4e049b5..00000000 --- a/avalanchego/snow/engine/avalanche/vertex/heap_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package vertex - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" -) - -// This example inserts several ints into an IntHeap, checks the minimum, -// and removes them in order of priority. -func TestUniqueVertexHeapReturnsOrdered(t *testing.T) { - h := NewHeap() - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 1, - } - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 1, - } - vtx3 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 3, - } - vtx4 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - HeightV: 0, - } - - vts := []avalanche.Vertex{vtx0, vtx1, vtx2, vtx3, vtx4} - - for _, vtx := range vts { - h.Push(vtx) - } - - vtxZ := h.Pop() - if vtxZ.ID() != vtx4.ID() { - t.Fatalf("Heap did not pop unknown element first") - } - - vtxA := h.Pop() - if height, err := vtxA.Height(); err != nil || height != 3 { - t.Fatalf("First height from heap was incorrect") - } else if vtxA.ID() != vtx3.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } - - vtxB := h.Pop() - if height, err := vtxB.Height(); err != nil || height != 1 { - t.Fatalf("First height from heap was incorrect") - } else if vtxB.ID() != vtx1.ID() && vtxB.ID() != vtx2.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } - - vtxC := h.Pop() - if height, err := vtxC.Height(); err != nil || height != 1 { - t.Fatalf("First height from heap was incorrect") - } else if vtxC.ID() != vtx1.ID() && vtxC.ID() != vtx2.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } - - if vtxB.ID() == vtxC.ID() { - t.Fatalf("Heap returned same element more than once") - } - - vtxD := h.Pop() - if height, err := vtxD.Height(); err != nil || height != 0 { - t.Fatalf("Last height returned was incorrect") - } else if vtxD.ID() != vtx0.ID() { - t.Fatalf("Last item from heap had incorrect ID") - } - - if h.Len() != 0 { - t.Fatalf("Heap was not empty after popping all of its elements") - } -} - -func TestUniqueVertexHeapRemainsUnique(t *testing.T) { - h := NewHeap() - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 0, - } - vtx1 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 1, - } - - sharedID := ids.GenerateTestID() - vtx2 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: sharedID, - StatusV: choices.Processing, - }, - HeightV: 1, - } - vtx3 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: sharedID, - StatusV: choices.Processing, - }, - HeightV: 2, - } - - pushed1 := h.Push(vtx0) - pushed2 := h.Push(vtx1) - pushed3 := h.Push(vtx2) - pushed4 := h.Push(vtx3) - switch { - case h.Len() != 3: - t.Fatalf("Unique Vertex Heap has incorrect length: %d", h.Len()) - case !(pushed1 && pushed2 && pushed3): - t.Fatalf("Failed to push a new unique element") - case pushed4: - t.Fatalf("Pushed non-unique element to the unique vertex heap") - } -} diff --git a/avalanchego/snow/engine/avalanche/vertex/manager.go b/avalanchego/snow/engine/avalanche/vertex/manager.go index cf206742..a300affd 100644 --- a/avalanchego/snow/engine/avalanche/vertex/manager.go +++ b/avalanchego/snow/engine/avalanche/vertex/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/mock_vm.go b/avalanchego/snow/engine/avalanche/vertex/mock_vm.go index 2597d64e..7ad293f6 100644 --- a/avalanchego/snow/engine/avalanche/vertex/mock_vm.go +++ b/avalanchego/snow/engine/avalanche/vertex/mock_vm.go @@ -1,25 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex (interfaces: LinearizableVM) +// +// Generated by this command: +// +// mockgen -package=vertex -destination=snow/engine/avalanche/vertex/mock_vm.go github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex LinearizableVM +// // Package vertex is a generated GoMock package. package vertex import ( context "context" + http "net/http" reflect "reflect" time "time" - manager "github.com/ava-labs/avalanchego/database/manager" + database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" snowstorm "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" common "github.com/ava-labs/avalanchego/snow/engine/common" version "github.com/ava-labs/avalanchego/version" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockLinearizableVM is a mock of LinearizableVM interface. @@ -54,7 +57,7 @@ func (m *MockLinearizableVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, ar } // AppGossip indicates an expected call of AppGossip. -func (mr *MockLinearizableVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppGossip(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockLinearizableVM)(nil).AppGossip), arg0, arg1, arg2) } @@ -68,23 +71,23 @@ func (m *MockLinearizableVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, a } // AppRequest indicates an expected call of AppRequest. -func (mr *MockLinearizableVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) } // AppRequestFailed mocks base method. -func (m *MockLinearizableVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { +func (m *MockLinearizableVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockLinearizableVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequestFailed), arg0, arg1, arg2, arg3) } // AppResponse mocks base method. @@ -96,7 +99,7 @@ func (m *MockLinearizableVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, } // AppResponse indicates an expected call of AppResponse. -func (mr *MockLinearizableVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).AppResponse), arg0, arg1, arg2, arg3) } @@ -111,7 +114,7 @@ func (m *MockLinearizableVM) BuildBlock(arg0 context.Context) (snowman.Block, er } // BuildBlock indicates an expected call of BuildBlock. -func (mr *MockLinearizableVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) BuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockLinearizableVM)(nil).BuildBlock), arg0) } @@ -125,41 +128,26 @@ func (m *MockLinearizableVM) Connected(arg0 context.Context, arg1 ids.NodeID, ar } // Connected indicates an expected call of Connected. -func (mr *MockLinearizableVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockLinearizableVM)(nil).Connected), arg0, arg1, arg2) } // CreateHandlers mocks base method. -func (m *MockLinearizableVM) CreateHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { +func (m *MockLinearizableVM) CreateHandlers(arg0 context.Context) (map[string]http.Handler, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateHandlers", arg0) - ret0, _ := ret[0].(map[string]*common.HTTPHandler) + ret0, _ := ret[0].(map[string]http.Handler) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateHandlers indicates an expected call of CreateHandlers. -func (mr *MockLinearizableVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CreateHandlers(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateHandlers), arg0) } -// CreateStaticHandlers mocks base method. -func (m *MockLinearizableVM) CreateStaticHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) - ret0, _ := ret[0].(map[string]*common.HTTPHandler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateStaticHandlers indicates an expected call of CreateStaticHandlers. -func (mr *MockLinearizableVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateStaticHandlers), arg0) -} - // CrossChainAppRequest mocks base method. func (m *MockLinearizableVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { m.ctrl.T.Helper() @@ -169,23 +157,23 @@ func (m *MockLinearizableVM) CrossChainAppRequest(arg0 context.Context, arg1 ids } // CrossChainAppRequest indicates an expected call of CrossChainAppRequest. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) } // CrossChainAppRequestFailed mocks base method. -func (m *MockLinearizableVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { +func (m *MockLinearizableVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2, arg3) } // CrossChainAppResponse mocks base method. @@ -197,7 +185,7 @@ func (m *MockLinearizableVM) CrossChainAppResponse(arg0 context.Context, arg1 id } // CrossChainAppResponse indicates an expected call of CrossChainAppResponse. -func (mr *MockLinearizableVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) } @@ -211,7 +199,7 @@ func (m *MockLinearizableVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) } // Disconnected indicates an expected call of Disconnected. -func (mr *MockLinearizableVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockLinearizableVM)(nil).Disconnected), arg0, arg1) } @@ -226,43 +214,43 @@ func (m *MockLinearizableVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowma } // GetBlock indicates an expected call of GetBlock. -func (mr *MockLinearizableVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) GetBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockLinearizableVM)(nil).GetBlock), arg0, arg1) } -// GetTx mocks base method. -func (m *MockLinearizableVM) GetTx(arg0 context.Context, arg1 ids.ID) (snowstorm.Tx, error) { +// GetBlockIDAtHeight mocks base method. +func (m *MockLinearizableVM) GetBlockIDAtHeight(arg0 context.Context, arg1 uint64) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0, arg1) - ret0, _ := ret[0].(snowstorm.Tx) + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0, arg1) + ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetTx indicates an expected call of GetTx. -func (mr *MockLinearizableVMMockRecorder) GetTx(arg0, arg1 interface{}) *gomock.Call { +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockLinearizableVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockLinearizableVM)(nil).GetTx), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockLinearizableVM)(nil).GetBlockIDAtHeight), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockLinearizableVM) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockLinearizableVM) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockLinearizableVM)(nil).HealthCheck), arg0) } // Initialize mocks base method. -func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { +func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 database.Database, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) ret0, _ := ret[0].(error) @@ -270,7 +258,7 @@ func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context } // Initialize indicates an expected call of Initialize. -func (mr *MockLinearizableVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockLinearizableVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } @@ -285,7 +273,7 @@ func (m *MockLinearizableVM) LastAccepted(arg0 context.Context) (ids.ID, error) } // LastAccepted indicates an expected call of LastAccepted. -func (mr *MockLinearizableVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) LastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockLinearizableVM)(nil).LastAccepted), arg0) } @@ -299,7 +287,7 @@ func (m *MockLinearizableVM) Linearize(arg0 context.Context, arg1 ids.ID) error } // Linearize indicates an expected call of Linearize. -func (mr *MockLinearizableVMMockRecorder) Linearize(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Linearize(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Linearize", reflect.TypeOf((*MockLinearizableVM)(nil).Linearize), arg0, arg1) } @@ -314,7 +302,7 @@ func (m *MockLinearizableVM) ParseBlock(arg0 context.Context, arg1 []byte) (snow } // ParseBlock indicates an expected call of ParseBlock. -func (mr *MockLinearizableVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) ParseBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockLinearizableVM)(nil).ParseBlock), arg0, arg1) } @@ -329,25 +317,11 @@ func (m *MockLinearizableVM) ParseTx(arg0 context.Context, arg1 []byte) (snowsto } // ParseTx indicates an expected call of ParseTx. -func (mr *MockLinearizableVMMockRecorder) ParseTx(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) ParseTx(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseTx", reflect.TypeOf((*MockLinearizableVM)(nil).ParseTx), arg0, arg1) } -// PendingTxs mocks base method. -func (m *MockLinearizableVM) PendingTxs(arg0 context.Context) []snowstorm.Tx { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PendingTxs", arg0) - ret0, _ := ret[0].([]snowstorm.Tx) - return ret0 -} - -// PendingTxs indicates an expected call of PendingTxs. -func (mr *MockLinearizableVMMockRecorder) PendingTxs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingTxs", reflect.TypeOf((*MockLinearizableVM)(nil).PendingTxs), arg0) -} - // SetPreference mocks base method. func (m *MockLinearizableVM) SetPreference(arg0 context.Context, arg1 ids.ID) error { m.ctrl.T.Helper() @@ -357,7 +331,7 @@ func (m *MockLinearizableVM) SetPreference(arg0 context.Context, arg1 ids.ID) er } // SetPreference indicates an expected call of SetPreference. -func (mr *MockLinearizableVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) SetPreference(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockLinearizableVM)(nil).SetPreference), arg0, arg1) } @@ -371,7 +345,7 @@ func (m *MockLinearizableVM) SetState(arg0 context.Context, arg1 snow.State) err } // SetState indicates an expected call of SetState. -func (mr *MockLinearizableVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) SetState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockLinearizableVM)(nil).SetState), arg0, arg1) } @@ -385,11 +359,25 @@ func (m *MockLinearizableVM) Shutdown(arg0 context.Context) error { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockLinearizableVM)(nil).Shutdown), arg0) } +// VerifyHeightIndex mocks base method. +func (m *MockLinearizableVM) VerifyHeightIndex(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyHeightIndex", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyHeightIndex indicates an expected call of VerifyHeightIndex. +func (mr *MockLinearizableVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockLinearizableVM)(nil).VerifyHeightIndex), arg0) +} + // Version mocks base method. func (m *MockLinearizableVM) Version(arg0 context.Context) (string, error) { m.ctrl.T.Helper() @@ -400,7 +388,7 @@ func (m *MockLinearizableVM) Version(arg0 context.Context) (string, error) { } // Version indicates an expected call of Version. -func (mr *MockLinearizableVMMockRecorder) Version(arg0 interface{}) *gomock.Call { +func (mr *MockLinearizableVMMockRecorder) Version(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockLinearizableVM)(nil).Version), arg0) } diff --git a/avalanchego/snow/engine/avalanche/vertex/parser.go b/avalanchego/snow/engine/avalanche/vertex/parser.go index cd409c7e..41f848e7 100644 --- a/avalanchego/snow/engine/avalanche/vertex/parser.go +++ b/avalanchego/snow/engine/avalanche/vertex/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -19,7 +19,7 @@ type Parser interface { // Parse parses the provided vertex bytes into a stateless vertex func Parse(bytes []byte) (StatelessVertex, error) { vtx := innerStatelessVertex{} - version, err := c.Unmarshal(bytes, &vtx) + version, err := Codec.Unmarshal(bytes, &vtx) if err != nil { return nil, err } diff --git a/avalanchego/snow/engine/avalanche/vertex/parser_test.go b/avalanchego/snow/engine/avalanche/vertex/parser_test.go index 8f1f2e9e..f3016895 100644 --- a/avalanchego/snow/engine/avalanche/vertex/parser_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,16 +8,19 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" ) func TestParseInvalid(t *testing.T) { - vtxBytes := []byte{} + vtxBytes := []byte{1, 2, 3, 4, 5} _, err := Parse(vtxBytes) - require.Error(t, err, "parse on an invalid vertex should have errored") + require.ErrorIs(t, err, codec.ErrUnknownVersion) } func TestParseValid(t *testing.T) { + require := require.New(t) + chainID := ids.ID{1} height := uint64(2) parentIDs := []ids.ID{{4}, {5}} @@ -28,10 +31,10 @@ func TestParseValid(t *testing.T) { parentIDs, txs, ) - require.NoError(t, err) + require.NoError(err) vtxBytes := vtx.Bytes() parsedVtx, err := Parse(vtxBytes) - require.NoError(t, err) - require.Equal(t, vtx, parsedVtx) + require.NoError(err) + require.Equal(vtx, parsedVtx) } diff --git a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go index f87996a4..88884d9e 100644 --- a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go +++ b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -23,7 +23,7 @@ const ( var ( errBadVersion = errors.New("invalid version") errBadEpoch = errors.New("invalid epoch") - errTooManyparentIDs = fmt.Errorf("vertex contains more than %d parentIDs", maxNumParents) + errTooManyParentIDs = fmt.Errorf("vertex contains more than %d parentIDs", maxNumParents) errNoOperations = errors.New("vertex contains no operations") errTooManyTxs = fmt.Errorf("vertex contains more than %d transactions", maxTxsPerVtx) errInvalidParents = errors.New("vertex contains non-sorted or duplicated parentIDs") @@ -73,7 +73,7 @@ func (v statelessVertex) ChainID() ids.ID { } func (v statelessVertex) StopVertex() bool { - return v.innerStatelessVertex.Version == codecVersionWithStopVtx + return v.innerStatelessVertex.Version == CodecVersionWithStopVtx } func (v statelessVertex) Height() uint64 { @@ -94,15 +94,15 @@ func (v statelessVertex) Txs() [][]byte { type innerStatelessVertex struct { Version uint16 `json:"version"` - ChainID ids.ID `serializeV0:"true" serializeV1:"true" json:"chainID"` - Height uint64 `serializeV0:"true" serializeV1:"true" json:"height"` - Epoch uint32 `serializeV0:"true" json:"epoch"` - ParentIDs []ids.ID `serializeV0:"true" serializeV1:"true" len:"128" json:"parentIDs"` - Txs [][]byte `serializeV0:"true" len:"128" json:"txs"` + ChainID ids.ID `json:"chainID" serializeV0:"true" serializeV1:"true"` + Height uint64 `json:"height" serializeV0:"true" serializeV1:"true"` + Epoch uint32 `json:"epoch" serializeV0:"true"` + ParentIDs []ids.ID `json:"parentIDs" serializeV0:"true" serializeV1:"true"` + Txs [][]byte `json:"txs" serializeV0:"true"` } func (v innerStatelessVertex) Verify() error { - if v.Version == codecVersionWithStopVtx { + if v.Version == CodecVersionWithStopVtx { return v.verifyStopVertex() } return v.verify() @@ -110,17 +110,17 @@ func (v innerStatelessVertex) Verify() error { func (v innerStatelessVertex) verify() error { switch { - case v.Version != codecVersion: + case v.Version != CodecVersion: return errBadVersion case v.Epoch != 0: return errBadEpoch case len(v.ParentIDs) > maxNumParents: - return errTooManyparentIDs + return errTooManyParentIDs case len(v.Txs) == 0: return errNoOperations case len(v.Txs) > maxTxsPerVtx: return errTooManyTxs - case !utils.IsSortedAndUniqueSortable(v.ParentIDs): + case !utils.IsSortedAndUnique(v.ParentIDs): return errInvalidParents case !utils.IsSortedAndUniqueByHash(v.Txs): return errInvalidTxs @@ -131,15 +131,15 @@ func (v innerStatelessVertex) verify() error { func (v innerStatelessVertex) verifyStopVertex() error { switch { - case v.Version != codecVersionWithStopVtx: + case v.Version != CodecVersionWithStopVtx: return errBadVersion case v.Epoch != 0: return errBadEpoch case len(v.ParentIDs) > maxNumParents: - return errTooManyparentIDs + return errTooManyParentIDs case len(v.Txs) != 0: return errTooManyTxs - case !utils.IsSortedAndUniqueSortable(v.ParentIDs): + case !utils.IsSortedAndUnique(v.ParentIDs): return errInvalidParents default: return nil diff --git a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go index af9819da..35ece98c 100644 --- a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -6,6 +6,8 @@ package vertex import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -24,14 +26,14 @@ func TestVertexVerify(t *testing.T) { } tests := []struct { - name string - vertex StatelessVertex - shouldErr bool + name string + vertex StatelessVertex + expectedErr error }{ { - name: "zero vertex", - vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{}}, - shouldErr: true, + name: "zero vertex", + vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{}}, + expectedErr: errNoOperations, }, { name: "valid vertex", @@ -43,7 +45,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{}}, }}, - shouldErr: false, + expectedErr: nil, }, { name: "invalid vertex epoch", @@ -55,7 +57,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errBadEpoch, }, { name: "too many vertex parents", @@ -67,7 +69,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: tooManyParents, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errTooManyParentIDs, }, { name: "no vertex txs", @@ -79,7 +81,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{}, }}, - shouldErr: true, + expectedErr: errNoOperations, }, { name: "too many vertex txs", @@ -91,7 +93,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: tooManyTxs, }}, - shouldErr: true, + expectedErr: errTooManyTxs, }, { name: "unsorted vertex parents", @@ -103,7 +105,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{{1}, {0}}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errInvalidParents, }, { name: "unsorted vertex txs", @@ -115,7 +117,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{0}, {1}}, // note that txs are sorted by their hashes }}, - shouldErr: true, + expectedErr: errInvalidTxs, }, { name: "duplicate vertex parents", @@ -127,7 +129,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{{0}, {0}}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errInvalidParents, }, { name: "duplicate vertex txs", @@ -139,17 +141,13 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{0}, {0}}, // note that txs are sorted by their hashes }}, - shouldErr: true, + expectedErr: errInvalidTxs, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := test.vertex.Verify() - if test.shouldErr && err == nil { - t.Fatal("expected verify to return an error but it didn't") - } else if !test.shouldErr && err != nil { - t.Fatalf("expected verify to pass but it returned: %s", err) - } + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/avalanchego/snow/engine/avalanche/vertex/storage.go b/avalanchego/snow/engine/avalanche/vertex/storage.go index 40ec863d..cac766c6 100644 --- a/avalanchego/snow/engine/avalanche/vertex/storage.go +++ b/avalanchego/snow/engine/avalanche/vertex/storage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/test_builder.go b/avalanchego/snow/engine/avalanche/vertex/test_builder.go index a0811527..53462937 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_builder.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,9 +8,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" ) var ( @@ -22,7 +23,6 @@ var ( type TestBuilder struct { T *testing.T CantBuildVtx bool - BuildVtxF func(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) BuildStopVtxF func(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) } @@ -30,22 +30,12 @@ func (b *TestBuilder) Default(cant bool) { b.CantBuildVtx = cant } -func (b *TestBuilder) BuildVtx(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - if b.BuildVtxF != nil { - return b.BuildVtxF(ctx, parentIDs, txs) - } - if b.CantBuildVtx && b.T != nil { - b.T.Fatal(errBuild) - } - return nil, errBuild -} - func (b *TestBuilder) BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) { if b.BuildStopVtxF != nil { return b.BuildStopVtxF(ctx, parentIDs) } if b.CantBuildVtx && b.T != nil { - b.T.Fatal(errBuild) + require.FailNow(b.T, errBuild.Error()) } return nil, errBuild } diff --git a/avalanchego/snow/engine/avalanche/vertex/test_manager.go b/avalanchego/snow/engine/avalanche/vertex/test_manager.go index a2f55ee7..6954161c 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_manager.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/test_parser.go b/avalanchego/snow/engine/avalanche/vertex/test_parser.go index ef680ee8..2ee10add 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_parser.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -32,7 +34,7 @@ func (p *TestParser) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, return p.ParseVtxF(ctx, b) } if p.CantParseVtx && p.T != nil { - p.T.Fatal(errParse) + require.FailNow(p.T, errParse.Error()) } return nil, errParse } diff --git a/avalanchego/snow/engine/avalanche/vertex/test_storage.go b/avalanchego/snow/engine/avalanche/vertex/test_storage.go index 10403a92..8e0b8bc1 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_storage.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_storage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -38,7 +40,7 @@ func (s *TestStorage) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Verte return s.GetVtxF(ctx, vtxID) } if s.CantGetVtx && s.T != nil { - s.T.Fatal(errGet) + require.FailNow(s.T, errGet.Error()) } return nil, errGet } @@ -48,7 +50,7 @@ func (s *TestStorage) Edge(ctx context.Context) []ids.ID { return s.EdgeF(ctx) } if s.CantEdge && s.T != nil { - s.T.Fatal(errEdge) + require.FailNow(s.T, errEdge.Error()) } return nil } @@ -58,7 +60,7 @@ func (s *TestStorage) StopVertexAccepted(ctx context.Context) (bool, error) { return s.StopVertexAcceptedF(ctx) } if s.CantStopVertexAccepted && s.T != nil { - s.T.Fatal(errStopVertexAccepted) + require.FailNow(s.T, errStopVertexAccepted.Error()) } return false, nil } diff --git a/avalanchego/snow/engine/avalanche/vertex/test_vm.go b/avalanchego/snow/engine/avalanche/vertex/test_vm.go index 576cfba1..ee17c8b1 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_vm.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -7,13 +7,14 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) var ( - errPending = errors.New("unexpectedly called Pending") errLinearize = errors.New("unexpectedly called Linearize") _ LinearizableVM = (*TestVM)(nil) @@ -22,20 +23,16 @@ var ( type TestVM struct { block.TestVM - CantLinearize, CantPendingTxs, CantParse, CantGet bool + CantLinearize, CantParse bool - LinearizeF func(context.Context, ids.ID) error - PendingTxsF func(context.Context) []snowstorm.Tx - ParseTxF func(context.Context, []byte) (snowstorm.Tx, error) - GetTxF func(context.Context, ids.ID) (snowstorm.Tx, error) + LinearizeF func(context.Context, ids.ID) error + ParseTxF func(context.Context, []byte) (snowstorm.Tx, error) } func (vm *TestVM) Default(cant bool) { vm.TestVM.Default(cant) - vm.CantPendingTxs = cant vm.CantParse = cant - vm.CantGet = cant } func (vm *TestVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { @@ -43,37 +40,17 @@ func (vm *TestVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { return vm.LinearizeF(ctx, stopVertexID) } if vm.CantLinearize && vm.T != nil { - vm.T.Fatal(errLinearize) + require.FailNow(vm.T, errLinearize.Error()) } return errLinearize } -func (vm *TestVM) PendingTxs(ctx context.Context) []snowstorm.Tx { - if vm.PendingTxsF != nil { - return vm.PendingTxsF(ctx) - } - if vm.CantPendingTxs && vm.T != nil { - vm.T.Fatal(errPending) - } - return nil -} - func (vm *TestVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { if vm.ParseTxF != nil { return vm.ParseTxF(ctx, b) } if vm.CantParse && vm.T != nil { - vm.T.Fatal(errParse) + require.FailNow(vm.T, errParse.Error()) } return nil, errParse } - -func (vm *TestVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { - if vm.GetTxF != nil { - return vm.GetTxF(ctx, txID) - } - if vm.CantGet && vm.T != nil { - vm.T.Fatal(errGet) - } - return nil, errGet -} diff --git a/avalanchego/snow/engine/avalanche/vertex/vm.go b/avalanchego/snow/engine/avalanche/vertex/vm.go index 710dfad7..9987fe16 100644 --- a/avalanchego/snow/engine/avalanche/vertex/vm.go +++ b/avalanchego/snow/engine/avalanche/vertex/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -56,17 +56,7 @@ type LinearizableVM interface { // implement type DAGVM interface { block.ChainVM - Getter - - // Return any transactions that have not been sent to consensus yet - PendingTxs(ctx context.Context) []snowstorm.Tx // Convert a stream of bytes to a transaction or return an error ParseTx(ctx context.Context, txBytes []byte) (snowstorm.Tx, error) } - -// Getter defines the functionality for fetching a tx/block by its ID. -type Getter interface { - // Retrieve a transaction that was submitted previously - GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) -} diff --git a/avalanchego/snow/engine/avalanche/voter.go b/avalanchego/snow/engine/avalanche/voter.go deleted file mode 100644 index e9aa585d..00000000 --- a/avalanchego/snow/engine/avalanche/voter.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Voter records chits received from [vdr] once its dependencies are met. -type voter struct { - t *Transitive - vdr ids.NodeID - requestID uint32 - response []ids.ID - deps set.Set[ids.ID] -} - -func (v *voter) Dependencies() set.Set[ids.ID] { - return v.deps -} - -// Mark that a dependency has been met. -func (v *voter) Fulfill(ctx context.Context, id ids.ID) { - v.deps.Remove(id) - v.Update(ctx) -} - -// Abandon this attempt to record chits. -func (v *voter) Abandon(ctx context.Context, id ids.ID) { - v.Fulfill(ctx, id) -} - -func (v *voter) Update(ctx context.Context) { - if v.deps.Len() != 0 || v.t.errs.Errored() { - return - } - - results := v.t.polls.Vote(v.requestID, v.vdr, v.response) - if len(results) == 0 { - return - } - - previouslyLinearized, err := v.t.Manager.StopVertexAccepted(ctx) - if err != nil { - v.t.errs.Add(err) - return - } - - for _, result := range results { - result := result - v.t.Ctx.Log.Debug("filtering poll results", - zap.Stringer("result", &result), - ) - - _, err := v.bubbleVotes(ctx, result) - if err != nil { - v.t.errs.Add(err) - return - } - - v.t.Ctx.Log.Debug("finishing poll", - zap.Stringer("result", &result), - ) - if err := v.t.Consensus.RecordPoll(ctx, result); err != nil { - v.t.errs.Add(err) - return - } - } - - linearized, err := v.t.Manager.StopVertexAccepted(ctx) - if err != nil { - v.t.errs.Add(err) - return - } - - if linearized { - // We guard here to ensure we only call the underlying vm.Linearize and - // startSnowmanConsensus calls once. - if !previouslyLinearized { - // After the chain has been linearized, we will not be issuing any new - // vertices. - v.t.pendingTxs = nil - v.t.metrics.pendingTxs.Set(0) - - // Invariant: The edge should only be the stop vertex after the - // linearization. - edge := v.t.Manager.Edge(ctx) - stopVertexID := edge[0] - if err := v.t.VM.Linearize(ctx, stopVertexID); err != nil { - v.t.errs.Add(err) - return - } - if err := v.t.startSnowmanConsensus(ctx, v.t.RequestID); err != nil { - v.t.errs.Add(err) - } - } - // If the chain has been linearized, there can't be any orphans, so we - // can exit here. - return - } - - orphans := v.t.Consensus.Orphans() - txs := make([]snowstorm.Tx, 0, orphans.Len()) - for orphanID := range orphans { - if tx, err := v.t.VM.GetTx(ctx, orphanID); err == nil { - txs = append(txs, tx) - } else { - v.t.Ctx.Log.Warn("failed to fetch tx during attempted re-issuance", - zap.Stringer("txID", orphanID), - zap.Error(err), - ) - } - } - if len(txs) > 0 { - v.t.Ctx.Log.Debug("re-issuing transactions", - zap.Int("numTxs", len(txs)), - ) - } - if _, err := v.t.batch(ctx, txs, batchOption{force: true}); err != nil { - v.t.errs.Add(err) - return - } - - if v.t.Consensus.Quiesce() { - v.t.Ctx.Log.Debug("avalanche engine can quiesce") - return - } - - v.t.Ctx.Log.Debug("avalanche engine can't quiesce") - v.t.repoll(ctx) -} - -func (v *voter) bubbleVotes(ctx context.Context, votes bag.UniqueBag[ids.ID]) (bag.UniqueBag[ids.ID], error) { - vertexHeap := vertex.NewHeap() - for vote, set := range votes { - vtx, err := v.t.Manager.GetVtx(ctx, vote) - if err != nil { - v.t.Ctx.Log.Debug("dropping vote(s)", - zap.String("reason", "failed to fetch vertex"), - zap.Stringer("voteID", vote), - zap.Int("numVotes", set.Len()), - zap.Error(err), - ) - votes.RemoveSet(vote) - continue - } - vertexHeap.Push(vtx) - } - - for vertexHeap.Len() > 0 { - vtx := vertexHeap.Pop() - vtxID := vtx.ID() - set := votes.GetSet(vtxID) - status := vtx.Status() - - if !status.Fetched() { - v.t.Ctx.Log.Debug("dropping vote(s)", - zap.String("reason", "vertex unknown"), - zap.Int("numVotes", set.Len()), - zap.Stringer("vtxID", vtxID), - ) - votes.RemoveSet(vtxID) - continue - } - - if status.Decided() { - v.t.Ctx.Log.Verbo("dropping vote(s)", - zap.String("reason", "vertex already decided"), - zap.Int("numVotes", set.Len()), - zap.Stringer("vtxID", vtxID), - zap.Stringer("status", status), - ) - - votes.RemoveSet(vtxID) - continue - } - - if !v.t.Consensus.VertexIssued(vtx) { - v.t.Ctx.Log.Verbo("bubbling vote(s)", - zap.String("reason", "vertex not issued"), - zap.Int("numVotes", set.Len()), - zap.Stringer("vtxID", vtxID), - ) - votes.RemoveSet(vtxID) // Remove votes for this vertex because it hasn't been issued - - parents, err := vtx.Parents() - if err != nil { - return votes, err - } - for _, parentVtx := range parents { - votes.UnionSet(parentVtx.ID(), set) - vertexHeap.Push(parentVtx) - } - } - } - - return votes, nil -} diff --git a/avalanchego/snow/engine/avalanche/voter_test.go b/avalanchego/snow/engine/avalanche/voter_test.go deleted file mode 100644 index 2429b025..00000000 --- a/avalanchego/snow/engine/avalanche/voter_test.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" -) - -func TestVotingFinishesWithAbandonedDep(t *testing.T) { - _, _, engCfg := DefaultConfig() - mngr := vertex.NewTestManager(t) - engCfg.Manager = mngr - transitive, err := newTransitive(engCfg, noopStarter) - require.NoError(t, err) - require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) - - // prepare 3 validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - // add poll for request 1 - transitive.polls.Add(1, vdrs) - - vdrs = bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr3, - ) - - // add poll for request 2 - transitive.polls.Add(2, vdrs) - - // expect 2 pending polls - require.Equal(t, 2, transitive.polls.Len()) - - // vote on request 2 first - vote1 := ids.GenerateTestID() - vote2 := ids.GenerateTestID() - - voter1 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: set.NewSet[ids.ID](0), - vdr: vdr1, - } - - voter3 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: set.NewSet[ids.ID](0), - vdr: vdr3, - } - - voter1.Update(context.Background()) - voter3.Update(context.Background()) - - // still expect 2 pending polls since request 1 voting is still pending - require.Equal(t, 2, transitive.polls.Len()) - - // vote on request 1 - // add dependency to voter1's vote which has to be fulfilled prior to finishing - voter1Dep := ids.GenerateTestID() - voter1DepSet := set.NewSet[ids.ID](1) - voter1DepSet.Add(voter1Dep) - - voter1 = &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: voter1DepSet, - vdr: vdr1, - } - - voter2 := &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: set.NewSet[ids.ID](0), - vdr: vdr2, - } - - voter1.Update(context.Background()) // does nothing because the dependency is still pending - voter2.Update(context.Background()) // voter1 is still remaining with the pending dependency - - voter1.Abandon(context.Background(), voter1Dep) // voter1 abandons dep1 - - // expect all polls to have finished - require.Equal(t, 0, transitive.polls.Len()) -} - -func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { - _, _, engCfg := DefaultConfig() - mngr := vertex.NewTestManager(t) - engCfg.Manager = mngr - transitive, err := newTransitive(engCfg, noopStarter) - require.NoError(t, err) - require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) - - // prepare 3 validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - // add poll for request 1 - transitive.polls.Add(1, vdrs) - - vdrs = bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr3, - ) - - // add poll for request 2 - transitive.polls.Add(2, vdrs) - - vdrs = bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr2, - vdr3, - ) - - // add poll for request 3 - transitive.polls.Add(3, vdrs) - - // expect 3 pending polls - require.Equal(t, 3, transitive.polls.Len()) - - vote1 := ids.GenerateTestID() - vote2 := ids.GenerateTestID() - vote3 := ids.GenerateTestID() - - // vote on request 3 first - req3Voter1 := &voter{ - t: transitive, - requestID: 3, - response: []ids.ID{vote3}, - deps: set.NewSet[ids.ID](0), - vdr: vdr3, - } - - req3Voter2 := &voter{ - t: transitive, - requestID: 3, - response: []ids.ID{vote3}, - deps: set.NewSet[ids.ID](0), - vdr: vdr2, - } - - req3Voter1.Update(context.Background()) - req3Voter2.Update(context.Background()) - - // expect 3 pending polls since 2 and 1 are still pending - require.Equal(t, 3, transitive.polls.Len()) - - // vote on request 2 - // add dependency to req2/voter3's vote which has to be fulfilled prior to finishing - req2Voter2Dep := ids.GenerateTestID() - req2Voter2DepSet := set.NewSet[ids.ID](1) - req2Voter2DepSet.Add(req2Voter2Dep) - - req2Voter1 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: set.NewSet[ids.ID](0), - vdr: vdr1, - } - - req2Voter2 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: req2Voter2DepSet, - vdr: vdr3, - } - - req2Voter1.Update(context.Background()) // does nothing because dep is unfulfilled - req2Voter2.Update(context.Background()) - - // still expect 3 pending polls since request 1 voting is still pending - require.Equal(t, 3, transitive.polls.Len()) - - // vote on request 1 - // add dependency to voter1's vote which has to be fulfilled prior to finishing - req1Voter1Dep := ids.GenerateTestID() - req1Voter1DepSet := set.NewSet[ids.ID](1) - req1Voter1DepSet.Add(req1Voter1Dep) - req1Voter1 := &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: req1Voter1DepSet, - vdr: vdr1, - } - - req1Voter2 := &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: set.NewSet[ids.ID](0), - vdr: vdr2, - } - - req1Voter1.Update(context.Background()) // does nothing because the req2/voter1 dependency is still pending - req1Voter2.Update(context.Background()) // voter1 is still remaining with the pending dependency - - // abandon dep on voter3 - req2Voter2.Abandon(context.Background(), req2Voter2Dep) // voter3 abandons dep1 - - // expect polls to be pending as req1/voter1's dep is still unfulfilled - require.Equal(t, 3, transitive.polls.Len()) - - req1Voter1.Abandon(context.Background(), req1Voter1Dep) - - // expect all polls to have finished - require.Equal(t, 0, transitive.polls.Len()) -} - -func TestSharedDependency(t *testing.T) { - _, _, engCfg := DefaultConfig() - mngr := vertex.NewTestManager(t) - engCfg.Manager = mngr - transitive, err := newTransitive(engCfg, noopStarter) - require.NoError(t, err) - require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) - - // prepare 3 validators - vdr1 := ids.NodeID{1} - vdr2 := ids.NodeID{2} - vdr3 := ids.NodeID{3} - - vdrs := bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr2, - ) - - // add poll for request 1 - transitive.polls.Add(1, vdrs) - - vdrs = bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr1, - vdr3, - ) - - // add poll for request 2 - transitive.polls.Add(2, vdrs) - - vdrs = bag.Bag[ids.NodeID]{} - vdrs.Add( - vdr2, - vdr3, - ) - - // add poll for request 3 - transitive.polls.Add(3, vdrs) - - // expect 3 pending polls - require.Equal(t, 3, transitive.polls.Len()) - - vote1 := ids.GenerateTestID() - vote2 := ids.GenerateTestID() - vote3 := ids.GenerateTestID() - - // req3 voters all vote - - req3Voter1 := &voter{ - t: transitive, - requestID: 3, - response: []ids.ID{vote3}, - deps: set.NewSet[ids.ID](0), - vdr: vdr3, - } - - req3Voter1.Update(context.Background()) - - req3Voter2 := &voter{ - t: transitive, - requestID: 3, - response: []ids.ID{vote3}, - deps: set.NewSet[ids.ID](0), - vdr: vdr2, - } - - req3Voter2.Update(context.Background()) - - // 3 polls pending because req 2 and 1 have not voted - require.Equal(t, 3, transitive.polls.Len()) - - // setup common dependency - dep := ids.GenerateTestID() - depSet := set.NewSet[ids.ID](1) - depSet.Add(dep) - - req2Voter1 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: depSet, - vdr: vdr1, - } - - // does nothing because dependency is unfulfilled - req2Voter1.Update(context.Background()) - - req2Voter2 := &voter{ - t: transitive, - requestID: 2, - response: []ids.ID{vote2}, - deps: set.NewSet[ids.ID](0), - vdr: vdr3, - } - - req2Voter2.Update(context.Background()) - - // 3 polls pending as req 2 dependency is unfulfilled and 1 has not voted - require.Equal(t, 3, transitive.polls.Len()) - - req1Voter1 := &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: depSet, - vdr: vdr1, - } - - // does nothing because dependency is unfulfilled - req1Voter1.Update(context.Background()) - - req1Voter2 := &voter{ - t: transitive, - requestID: 1, - response: []ids.ID{vote1}, - deps: set.NewSet[ids.ID](0), - vdr: vdr2, - } - - req1Voter2.Update(context.Background()) - - // 3 polls pending as req2 and req 1 dependencies are unfulfilled - require.Equal(t, 3, transitive.polls.Len()) - - // abandon dependency - req1Voter1.Abandon(context.Background(), dep) - req2Voter1.Abandon(context.Background(), dep) - - // expect no pending polls - require.Equal(t, 0, transitive.polls.Len()) -} diff --git a/avalanchego/snow/engine/common/appsender/appsender_client.go b/avalanchego/snow/engine/common/appsender/appsender_client.go index a816dd68..ed1248d2 100644 --- a/avalanchego/snow/engine/common/appsender/appsender_client.go +++ b/avalanchego/snow/engine/common/appsender/appsender_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender @@ -48,12 +48,25 @@ func (c *Client) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, return err } +func (c *Client) SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error { + _, err := c.client.SendCrossChainAppError( + ctx, + &appsenderpb.SendCrossChainAppErrorMsg{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + ) + + return err +} + func (c *Client) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppRequest( @@ -71,7 +84,7 @@ func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request _, err := c.client.SendAppResponse( ctx, &appsenderpb.SendAppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -79,6 +92,19 @@ func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request return err } +func (c *Client) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + _, err := c.client.SendAppError(ctx, + &appsenderpb.SendAppErrorMsg{ + NodeId: nodeID[:], + RequestId: requestID, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + }, + ) + + return err +} + func (c *Client) SendAppGossip(ctx context.Context, msg []byte) error { _, err := c.client.SendAppGossip( ctx, @@ -93,8 +119,7 @@ func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids. nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { - nodeID := nodeID // Prevent overwrite in next iteration - nodeIDsBytes[i] = nodeID[:] + nodeIDsBytes[i] = nodeID.Bytes() i++ } _, err := c.client.SendAppGossipSpecific( diff --git a/avalanchego/snow/engine/common/appsender/appsender_server.go b/avalanchego/snow/engine/common/appsender/appsender_server.go index 3583940d..eedce556 100644 --- a/avalanchego/snow/engine/common/appsender/appsender_server.go +++ b/avalanchego/snow/engine/common/appsender/appsender_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender @@ -45,6 +45,15 @@ func (s *Server) SendCrossChainAppResponse(ctx context.Context, msg *appsenderpb return &emptypb.Empty{}, s.appSender.SendCrossChainAppResponse(ctx, chainID, msg.RequestId, msg.Response) } +func (s *Server) SendCrossChainAppError(ctx context.Context, msg *appsenderpb.SendCrossChainAppErrorMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return &emptypb.Empty{}, err + } + + return &emptypb.Empty{}, s.appSender.SendCrossChainAppError(ctx, chainID, msg.RequestId, msg.ErrorCode, msg.ErrorMessage) +} + func (s *Server) SendAppRequest(ctx context.Context, req *appsenderpb.SendAppRequestMsg) (*emptypb.Empty, error) { nodeIDs := set.NewSet[ids.NodeID](len(req.NodeIds)) for _, nodeIDBytes := range req.NodeIds { @@ -67,6 +76,16 @@ func (s *Server) SendAppResponse(ctx context.Context, req *appsenderpb.SendAppRe return &emptypb.Empty{}, err } +func (s *Server) SendAppError(ctx context.Context, req *appsenderpb.SendAppErrorMsg) (*emptypb.Empty, error) { + nodeID, err := ids.ToNodeID(req.NodeId) + if err != nil { + return nil, err + } + + err = s.appSender.SendAppError(ctx, nodeID, req.RequestId, req.ErrorCode, req.ErrorMessage) + return &emptypb.Empty{}, err +} + func (s *Server) SendAppGossip(ctx context.Context, req *appsenderpb.SendAppGossipMsg) (*emptypb.Empty, error) { err := s.appSender.SendAppGossip(ctx, req.Msg) return &emptypb.Empty{}, err diff --git a/avalanchego/snow/engine/common/bootstrap_tracker.go b/avalanchego/snow/engine/common/bootstrap_tracker.go index bf81644b..bd2ef43c 100644 --- a/avalanchego/snow/engine/common/bootstrap_tracker.go +++ b/avalanchego/snow/engine/common/bootstrap_tracker.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" // BootstrapTracker describes the standard interface for tracking the status of // a subnet bootstrapping diff --git a/avalanchego/snow/engine/common/bootstrapable.go b/avalanchego/snow/engine/common/bootstrapable.go index f18b3295..517eba2a 100644 --- a/avalanchego/snow/engine/common/bootstrapable.go +++ b/avalanchego/snow/engine/common/bootstrapable.go @@ -1,25 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" -) +import "context" type BootstrapableEngine interface { - Bootstrapable Engine -} - -// Bootstrapable defines the functionality required to support bootstrapping -type Bootstrapable interface { - // Force the provided containers to be accepted. Only returns fatal errors - // if they occur. - ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error // Clear removes all containers to be processed upon bootstrapping - Clear() error + Clear(ctx context.Context) error } diff --git a/avalanchego/snow/engine/common/bootstrapper.go b/avalanchego/snow/engine/common/bootstrapper.go deleted file mode 100644 index 82ef874d..00000000 --- a/avalanchego/snow/engine/common/bootstrapper.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - - stdmath "math" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" -) - -const ( - // StatusUpdateFrequency is how many containers should be processed between - // logs - StatusUpdateFrequency = 5000 - - // MaxOutstandingGetAncestorsRequests is the maximum number of GetAncestors - // sent but not responded to/failed - MaxOutstandingGetAncestorsRequests = 10 - - // MaxOutstandingBroadcastRequests is the maximum number of requests to have - // outstanding when broadcasting. - MaxOutstandingBroadcastRequests = 50 -) - -var _ Bootstrapper = (*bootstrapper)(nil) - -type Bootstrapper interface { - AcceptedFrontierHandler - AcceptedHandler - Haltable - Startup(context.Context) error - Restart(ctx context.Context, reset bool) error -} - -// It collects mechanisms common to both snowman and avalanche bootstrappers -type bootstrapper struct { - Config - Halter - - // Holds the beacons that were sampled for the accepted frontier - sampledBeacons validators.Set - // IDs of validators we should request an accepted frontier from - pendingSendAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators we requested an accepted frontier from but haven't - // received a reply yet - pendingReceiveAcceptedFrontier set.Set[ids.NodeID] - // IDs of validators that failed to respond with their accepted frontier - failedAcceptedFrontier set.Set[ids.NodeID] - // IDs of all the returned accepted frontiers - acceptedFrontierSet set.Set[ids.ID] - - // IDs of validators we should request filtering the accepted frontier from - pendingSendAccepted set.Set[ids.NodeID] - // IDs of validators we requested filtering the accepted frontier from but - // haven't received a reply yet - pendingReceiveAccepted set.Set[ids.NodeID] - // IDs of validators that failed to respond with their filtered accepted - // frontier - failedAccepted set.Set[ids.NodeID] - // IDs of the returned accepted containers and the stake weight that has - // marked them as accepted - acceptedVotes map[ids.ID]uint64 - acceptedFrontier []ids.ID - - // number of times the bootstrap has been attempted - bootstrapAttempts int -} - -func NewCommonBootstrapper(config Config) Bootstrapper { - return &bootstrapper{ - Config: config, - } -} - -func (b *bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - if !b.pendingReceiveAcceptedFrontier.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected AcceptedFrontier message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - // Mark that we received a response from [nodeID] - b.pendingReceiveAcceptedFrontier.Remove(nodeID) - - // Union the reported accepted frontier from [nodeID] with the accepted - // frontier we got from others - b.acceptedFrontierSet.Add(containerIDs...) - - b.sendGetAcceptedFrontiers(ctx) - - // still waiting on requests - if b.pendingReceiveAcceptedFrontier.Len() != 0 { - return nil - } - - // We've received the accepted frontier from every bootstrap validator - // Ask each bootstrap validator to filter the list of containers that we were - // told are on the accepted frontier such that the list only contains containers - // they think are accepted. - // - // Create a newAlpha taking using the sampled beacon - // Keep the proportion of b.Alpha in the newAlpha - // newAlpha := totalSampledWeight * b.Alpha / totalWeight - - newAlpha := float64(b.sampledBeacons.Weight()*b.Alpha) / float64(b.Beacons.Weight()) - - failedBeaconWeight := b.Beacons.SubsetWeight(b.failedAcceptedFrontier) - - // fail the bootstrap if the weight is not enough to bootstrap - if float64(b.sampledBeacons.Weight())-newAlpha < float64(failedBeaconWeight) { - if b.Config.RetryBootstrap { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough frontiers received"), - zap.Int("numBeacons", b.Beacons.Len()), - zap.Int("numFailedBootstrappers", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttemps", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - - b.Ctx.Log.Debug("didn't receive enough frontiers", - zap.Int("numFailedValidators", b.failedAcceptedFrontier.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - } - - b.Config.SharedCfg.RequestID++ - b.acceptedFrontier = b.acceptedFrontierSet.List() - - b.sendGetAccepted(ctx) - return nil -} - -func (b *bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - // If we can't get a response from [nodeID], act as though they said their - // accepted frontier is empty and we add the validator to the failed list - b.failedAcceptedFrontier.Add(nodeID) - return b.AcceptedFrontier(ctx, nodeID, requestID, nil) -} - -func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync Accepted message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - if !b.pendingReceiveAccepted.Contains(nodeID) { - b.Ctx.Log.Debug("received unexpected Accepted message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - // Mark that we received a response from [nodeID] - b.pendingReceiveAccepted.Remove(nodeID) - - weight := b.Beacons.GetWeight(nodeID) - for _, containerID := range containerIDs { - previousWeight := b.acceptedVotes[containerID] - newWeight, err := math.Add64(weight, previousWeight) - if err != nil { - b.Ctx.Log.Error("failed calculating the Accepted votes", - zap.Uint64("weight", weight), - zap.Uint64("previousWeight", previousWeight), - zap.Error(err), - ) - newWeight = stdmath.MaxUint64 - } - b.acceptedVotes[containerID] = newWeight - } - - b.sendGetAccepted(ctx) - - // wait on pending responses - if b.pendingReceiveAccepted.Len() != 0 { - return nil - } - - // We've received the filtered accepted frontier from every bootstrap validator - // Accept all containers that have a sufficient weight behind them - accepted := make([]ids.ID, 0, len(b.acceptedVotes)) - for containerID, weight := range b.acceptedVotes { - if weight >= b.Alpha { - accepted = append(accepted, containerID) - } - } - - // if we don't have enough weight for the bootstrap to be accepted then - // retry or fail the bootstrap - size := len(accepted) - if size == 0 && b.Beacons.Len() > 0 { - // if we had too many timeouts when asking for validator votes, we - // should restart bootstrap hoping for the network problems to go away; - // otherwise, we received enough (>= b.Alpha) responses, but no frontier - // was supported by a majority of validators (i.e. votes are split - // between minorities supporting different frontiers). - failedBeaconWeight := b.Beacons.SubsetWeight(b.failedAccepted) - votingStakes := b.Beacons.Weight() - failedBeaconWeight - if b.Config.RetryBootstrap && votingStakes < b.Alpha { - b.Ctx.Log.Debug("restarting bootstrap", - zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", b.Beacons.Len()), - zap.Int("numFailedBootstrappers", b.failedAccepted.Len()), - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - return b.Restart(ctx, false) - } - } - - if !b.Config.SharedCfg.Restarted { - b.Ctx.Log.Info("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) - } else { - b.Ctx.Log.Debug("bootstrapping started syncing", - zap.Int("numVerticesInFrontier", size), - ) - } - - return b.Bootstrapable.ForceAccepted(ctx, accepted) -} - -func (b *bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - // ignores any late responses - if requestID != b.Config.SharedCfg.RequestID { - b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("expectedRequestID", b.Config.SharedCfg.RequestID), - zap.Uint32("requestID", requestID), - ) - return nil - } - - // If we can't get a response from [nodeID], act as though they said that - // they think none of the containers we sent them in GetAccepted are - // accepted - b.failedAccepted.Add(nodeID) - return b.Accepted(ctx, nodeID, requestID, nil) -} - -func (b *bootstrapper) Startup(ctx context.Context) error { - beaconIDs, err := b.Beacons.Sample(b.Config.SampleK) - if err != nil { - return err - } - - b.sampledBeacons = validators.NewSet() - b.pendingSendAcceptedFrontier.Clear() - for _, nodeID := range beaconIDs { - if !b.sampledBeacons.Contains(nodeID) { - // Invariant: We never use the TxID or BLS keys populated here. - err = b.sampledBeacons.Add(nodeID, nil, ids.Empty, 1) - } else { - err = b.sampledBeacons.AddWeight(nodeID, 1) - } - if err != nil { - return err - } - b.pendingSendAcceptedFrontier.Add(nodeID) - } - - b.pendingReceiveAcceptedFrontier.Clear() - b.failedAcceptedFrontier.Clear() - b.acceptedFrontierSet.Clear() - - b.pendingSendAccepted.Clear() - for _, vdr := range b.Beacons.List() { - b.pendingSendAccepted.Add(vdr.NodeID) - } - - b.pendingReceiveAccepted.Clear() - b.failedAccepted.Clear() - b.acceptedVotes = make(map[ids.ID]uint64) - - b.bootstrapAttempts++ - if b.pendingSendAcceptedFrontier.Len() == 0 { - b.Ctx.Log.Info("bootstrapping skipped", - zap.String("reason", "no provided bootstraps"), - ) - return b.Bootstrapable.ForceAccepted(ctx, nil) - } - - b.Config.SharedCfg.RequestID++ - b.sendGetAcceptedFrontiers(ctx) - return nil -} - -func (b *bootstrapper) Restart(ctx context.Context, reset bool) error { - // resets the attempts when we're pulling blocks/vertices we don't want to - // fail the bootstrap at that stage - if reset { - b.Ctx.Log.Debug("Checking for new frontiers") - - b.Config.SharedCfg.Restarted = true - b.bootstrapAttempts = 0 - } - - if b.bootstrapAttempts > 0 && b.bootstrapAttempts%b.RetryBootstrapWarnFrequency == 0 { - b.Ctx.Log.Debug("check internet connection", - zap.Int("numBootstrapAttempts", b.bootstrapAttempts), - ) - } - - return b.Startup(ctx) -} - -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their accepted frontier with the current accepted frontier -func (b *bootstrapper) sendGetAcceptedFrontiers(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAcceptedFrontier.Len() > 0 && b.pendingReceiveAcceptedFrontier.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAcceptedFrontier.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAcceptedFrontier.Add(vdr) - } - - if vdrs.Len() > 0 { - b.Sender.SendGetAcceptedFrontier(ctx, vdrs, b.Config.SharedCfg.RequestID) - } -} - -// Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send -// their filtered accepted frontier -func (b *bootstrapper) sendGetAccepted(ctx context.Context) { - vdrs := set.NewSet[ids.NodeID](1) - for b.pendingSendAccepted.Len() > 0 && b.pendingReceiveAccepted.Len() < MaxOutstandingBroadcastRequests { - vdr, _ := b.pendingSendAccepted.Pop() - // Add the validator to the set to send the messages to - vdrs.Add(vdr) - // Add the validator to send pending receipt set - b.pendingReceiveAccepted.Add(vdr) - } - - if vdrs.Len() > 0 { - b.Ctx.Log.Debug("sent GetAccepted messages", - zap.Int("numSent", vdrs.Len()), - zap.Int("numPending", b.pendingSendAccepted.Len()), - ) - b.Sender.SendGetAccepted(ctx, vdrs, b.Config.SharedCfg.RequestID, b.acceptedFrontier) - } -} diff --git a/avalanchego/snow/engine/common/config.go b/avalanchego/snow/engine/common/config.go deleted file mode 100644 index 57507e7e..00000000 --- a/avalanchego/snow/engine/common/config.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "time" - - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/validators" -) - -// Config wraps the common configurations that are needed by a Snow consensus -// engine -type Config struct { - Ctx *snow.ConsensusContext - Beacons validators.Set - - SampleK int - Alpha uint64 - StartupTracker tracker.Startup - Sender Sender - Bootstrapable Bootstrapable - BootstrapTracker BootstrapTracker - Timer Timer - - // Should Bootstrap be retried - RetryBootstrap bool - - // Max number of times to retry bootstrap before warning the node operator - RetryBootstrapWarnFrequency int - - // Max time to spend fetching a container and its ancestors when responding - // to a GetAncestors - MaxTimeGetAncestors time.Duration - - // Max number of containers in an ancestors message sent by this node. - AncestorsMaxContainersSent int - - // This node will only consider the first [AncestorsMaxContainersReceived] - // containers in an ancestors message it receives. - AncestorsMaxContainersReceived int - - SharedCfg *SharedConfig -} - -func (c *Config) Context() *snow.ConsensusContext { - return c.Ctx -} - -// IsBootstrapped returns true iff this chain is done bootstrapping -func (c *Config) IsBootstrapped() bool { - return c.Ctx.State.Get().State == snow.NormalOp -} - -// Shared among common.bootstrapper and snowman/avalanche bootstrapper -type SharedConfig struct { - // Tracks the last requestID that was used in a request - RequestID uint32 - - // True if RestartBootstrap has been called at least once - Restarted bool -} diff --git a/avalanchego/snow/engine/common/engine.go b/avalanchego/snow/engine/common/engine.go index 142351b2..cbd9c37d 100644 --- a/avalanchego/snow/engine/common/engine.go +++ b/avalanchego/snow/engine/common/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -11,9 +11,15 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" ) -// Engine describes the standard interface of a consensus engine +// Engine describes the standard interface of a consensus engine. +// +// All nodeIDs are assumed to be authenticated. +// +// A consensus engine may recover after returning an error, but it isn't +// required. type Engine interface { Handler @@ -26,9 +32,6 @@ type Engine interface { // Returns nil if the engine is healthy. // Periodically called and reported through the health API health.Checker - - // GetVM returns this engine's VM - GetVM() VM } type Handler interface { @@ -54,489 +57,424 @@ type AllGetsServer interface { GetHandler } -// GetStateSummaryFrontierHandler defines how a consensus engine reacts to a get -// state summary frontier message from another validator. Functions only return -// fatal errors. type GetStateSummaryFrontierHandler interface { - // Notify this engine of a request for the frontier of state summaries. - // - // The accepted frontier is the last state summary available locally. + // Notify this engine of a request for a StateSummaryFrontier message with + // the same requestID and the engine's most recently accepted state summary. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. - // - // This engine should respond with an StateSummaryFrontier message with the - // same requestID, and the engine's current state summary frontier. - GetStateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function can be called by any node at any time. + GetStateSummaryFrontier( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// StateSummaryFrontierHandler defines how a consensus engine reacts to a state -// summary frontier message from other validators. Functions only return fatal -// errors. type StateSummaryFrontierHandler interface { - // Notify this engine of a state summary frontier. + // Notify this engine of the response to a previously sent + // GetStateSummaryFrontier message with the same requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is in response to a GetStateSummaryFrontier message, is - // utilizing a unique requestID, or that the summary bytes are from a valid - // state summary. - StateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, summary []byte) error + // It is not guaranteed that the summary bytes are from a valid state + // summary. + StateSummaryFrontier( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + summary []byte, + ) error - // Notify this engine that a get state summary frontier request it issued - // has failed. - // - // This function will be called if the engine sent a GetStateSummaryFrontier - // message that is not anticipated to be responded to. This could be because - // the recipient of the message is unknown or if the message request has - // timed out. + // Notify this engine that a GetStateSummaryFrontier request it issued has + // failed. // - // The validatorID, and requestID, are assumed to be the same as those sent - // in the GetStateSummaryFrontier message. - GetStateSummaryFrontierFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a GetStateSummaryFrontier message with + // nodeID and requestID was previously sent by this engine and will not + // receive a response. + GetStateSummaryFrontierFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// GetAcceptedStateSummaryHandler defines how a consensus engine reacts to a get -// accepted state summary message from another validator. Functions only return -// fatal errors. type GetAcceptedStateSummaryHandler interface { - // Notify this engine of a request to return state summary IDs referenced by - // the provided keys. + // Notify this engine of a request for an AcceptedStateSummary message with + // the same requestID and the state summary IDs at the requested heights. + // If this node doesn't have access to a state summary ID at a requested + // height, that height should be ignored. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. However, the validatorID is - // assumed to be authenticated. - // - // This engine should respond with an AcceptedStateSummary message with the - // same requestID, and the subset of the state summaries that this node has - // locally available. - GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error + // This function can be called by any node at any time. + GetAcceptedStateSummary( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + heights set.Set[uint64], + ) error } -// AcceptedStateSummaryHandler defines how a consensus engine reacts to an -// accepted state summary message from another validator. Functions only return -// fatal errors. type AcceptedStateSummaryHandler interface { - // Notify this engine of a set of state summaries. + // Notify this engine of the response to a previously sent + // GetAcceptedStateSummary message with the same requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is in response to a GetAcceptedStateSummary message, - // is utilizing a unique requestID, or that the summaryIDs are a subset of the - // state summaries requested by key from a GetAcceptedStateSummary message. - AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + // It is not guaranteed that the summaryIDs have heights corresponding to + // the heights in the request. + AcceptedStateSummary( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + summaryIDs set.Set[ids.ID], + ) error - // Notify this engine that a get accepted state summary request it issued has + // Notify this engine that a GetAcceptedStateSummary request it issued has // failed. // - // This function will be called if the engine sent a GetAcceptedStateSummary - // message that is not anticipated to be responded to. This could be because - // the recipient of the message is unknown or if the message request has - // timed out. - // - // The validatorID, and requestID, are assumed to be the same as those sent - // in the GetAcceptedStateSummary message. - GetAcceptedStateSummaryFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a GetAcceptedStateSummary message with + // nodeID and requestID was previously sent by this engine and will not + // receive a response. + GetAcceptedStateSummaryFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// GetAcceptedFrontierHandler defines how a consensus engine reacts to a get -// accepted frontier message from another validator. Functions only return fatal -// errors. type GetAcceptedFrontierHandler interface { - // Notify this engine of a request for the accepted frontier of vertices. + // Notify this engine of a request for an AcceptedFrontier message with the + // same requestID and the ID of the most recently accepted container. // - // The accepted frontier is the set of accepted vertices that do not have - // any accepted descendants. - // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. - // - // This engine should respond with an AcceptedFrontier message with the same - // requestID, and the engine's current accepted frontier. - GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function can be called by any node at any time. + GetAcceptedFrontier( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// AcceptedFrontierHandler defines how a consensus engine reacts to accepted -// frontier messages from other validators. Functions only return fatal errors. type AcceptedFrontierHandler interface { - // Notify this engine of an accepted frontier. - // - // This function can be called by any validator. It is not safe to assume - // this message is in response to a GetAcceptedFrontier message, is - // utilizing a unique requestID, or that the containerIDs from a valid - // frontier. + // Notify this engine of the response to a previously sent + // GetAcceptedFrontier message with the same requestID. AcceptedFrontier( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerID ids.ID, ) error - // Notify this engine that a get accepted frontier request it issued has + // Notify this engine that a GetAcceptedFrontier request it issued has // failed. // - // This function will be called if the engine sent a GetAcceptedFrontier - // message that is not anticipated to be responded to. This could be because - // the recipient of the message is unknown or if the message request has - // timed out. - // - // The validatorID, and requestID, are assumed to be the same as those sent - // in the GetAcceptedFrontier message. - GetAcceptedFrontierFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a GetAcceptedFrontier message with + // nodeID and requestID was previously sent by this engine and will not + // receive a response. + GetAcceptedFrontierFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// GetAcceptedHandler defines how a consensus engine reacts to a get accepted -// message from another validator. Functions only return fatal errors. type GetAcceptedHandler interface { - // Notify this engine of a request to filter non-accepted vertices. - // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. However, the validatorID is - // assumed to be authenticated. + // Notify this engine of a request for an Accepted message with the same + // requestID and the subset of containerIDs that this node has accepted. // - // This engine should respond with an Accepted message with the same - // requestID, and the subset of the containerIDs that this node has decided - // are accepted. + // This function can be called by any node at any time. GetAccepted( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error } -// AcceptedHandler defines how a consensus engine reacts to accepted messages -// from other validators. Functions only return fatal -// errors. type AcceptedHandler interface { - // Notify this engine of a set of accepted vertices. + // Notify this engine of the response to a previously sent GetAccepted + // message with the same requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is in response to a GetAccepted message, is utilizing a - // unique requestID, or that the containerIDs are a subset of the - // containerIDs from a GetAccepted message. + // It is not guaranteed that the containerIDs are a subset of the + // containerIDs provided in the request. Accepted( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerIDs set.Set[ids.ID], ) error - // Notify this engine that a get accepted request it issued has failed. - // - // This function will be called if the engine sent a GetAccepted message - // that is not anticipated to be responded to. This could be because the - // recipient of the message is unknown or if the message request has timed - // out. + // Notify this engine that a GetAccepted request it issued has failed. // - // The validatorID, and requestID, are assumed to be the same as those sent - // in the GetAccepted message. - GetAcceptedFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a GetAccepted message with nodeID and + // requestID was previously sent by this engine and will not receive a + // response. + GetAcceptedFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// GetAncestorsHandler defines how a consensus engine reacts to a get ancestors -// message from another validator. Functions only return fatal errors. type GetAncestorsHandler interface { - // Notify this engine of a request for a container and its ancestors. - // - // The request is from validator [validatorID]. The requested container is - // [containerID]. + // Notify this engine of a request for an Ancestors message with the same + // requestID, containerID, and some of its ancestors on a best effort basis. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. It is also not safe to - // assume the requested containerID exists. - // - // This engine should respond with an Ancestors message with the same - // requestID, which contains [containerID] as well as its ancestors. See - // Ancestors's documentation. - // - // If this engine doesn't have some ancestors, it should reply with its best - // effort attempt at getting them. If this engine doesn't have [containerID] - // it can ignore this message. - GetAncestors(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) error + // This function can be called by any node at any time. + GetAncestors( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + containerID ids.ID, + ) error } -// AncestorsHandler defines how a consensus engine reacts to bootstrapping -// retrieval messages from other validators. Functions only return fatal errors. type AncestorsHandler interface { - // Notify this engine of multiple containers. - // - // Each element of [containers] is the byte representation of a container. - // - // This should only be called during bootstrapping, in response to a - // GetAncestors message to [validatorID] with request ID [requestID]. - // - // This call should contain the container requested in that message, along - // with ancestors. The containers should be in BFS order (ie the first - // container must be the container requested in the GetAncestors message and - // further back ancestors are later in [containers] + // Notify this engine of the response to a previously sent GetAncestors + // message with the same requestID. // - // It is not safe to assume this message is in response to a GetAncestor - // message, that this message has a unique requestID or that any of the - // containers in [containers] are valid. + // It is expected, but not guaranteed, that the first element in containers + // should be the container referenced in the request and that the rest of + // the containers should be referenced by a prior container in the list. Ancestors( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, containers [][]byte, ) error // Notify this engine that a GetAncestors request it issued has failed. // - // This function will be called if the engine sent a GetAncestors message - // that is not anticipated to be responded to. This could be because the - // recipient of the message is unknown or if the message request has timed - // out. - // - // The validatorID and requestID are assumed to be the same as those sent in - // the GetAncestors message. - GetAncestorsFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a GetAncestors message with nodeID and + // requestID was previously sent by this engine and will not receive a + // response. + GetAncestorsFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// GetHandler defines how a consensus engine reacts to get message from another -// validator. Functions only return fatal errors. type GetHandler interface { - // Notify this engine of a request for a container. + // Notify this engine of a request for a Put message with the same requestID + // and the container whose ID is containerID. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. It is also not safe to - // assume the requested containerID exists. - // - // There should never be a situation where a virtuous node sends a Get - // request to another virtuous node that does not have the requested - // container. - // - // This engine should respond with a Put message with the same requestID if - // the container was locally available. Otherwise, the message can be safely - // dropped. - Get(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) error + // This function can be called by any node at any time. + Get( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + containerID ids.ID, + ) error } -// PutHandler defines how a consensus engine reacts to put messages from other -// validators. Functions only return fatal errors. type PutHandler interface { - // Notify this engine of a container. + // Notify this engine of either the response to a previously sent Get + // message with the same requestID or an unsolicited container if the + // requestID is MaxUint32. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. + // It is not guaranteed that container can be parsed or issued. Put( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, container []byte, ) error - // Notify this engine that a get request it issued has failed. - // - // This function will be called if the engine sent a Get message that is not - // anticipated to be responded to. This could be because the recipient of - // the message is unknown or if the message request has timed out. + // Notify this engine that a Get request it issued has failed. // - // The validatorID and requestID are assumed to be the same as those sent in - // the Get message. - GetFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a Get message with nodeID and requestID + // was previously sent by this engine and will not receive a response. + GetFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// QueryHandler defines how a consensus engine reacts to query messages from -// other validators. Functions only return fatal errors. type QueryHandler interface { - // Notify this engine of a request for our preferences. + // Notify this engine of a request for a Chits message with the same + // requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. However, the validatorID is - // assumed to be authenticated. + // If the provided containerID is not processing, the engine is expected to + // respond with the node's current preferences before attempting to issue + // it. // - // If the container or its ancestry is incomplete, this engine is expected - // to request the missing containers from the validator. Once the ancestry - // is complete, this engine should send this validator the current - // preferences in a Chits message. The Chits message should have the same - // requestID that was passed in here. + // This function can be called by any node at any time. PullQuery( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, containerID ids.ID, + requestedHeight uint64, ) error - // Notify this engine of a request for our preferences. + // Notify this engine of a request for a Chits message with the same + // requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is utilizing a unique requestID. + // If the provided container is not processing, the engine is expected to + // respond with the node's current preferences before attempting to issue + // it. // - // This function is meant to behave the same way as PullQuery, except the - // container is optimistically provided to potentially remove the need for - // a series of Get/Put messages. + // It is not guaranteed that container can be parsed or issued. // - // If the ancestry of the container is incomplete, this engine is expected - // to request the ancestry from the validator. Once the ancestry is - // complete, this engine should send this validator the current preferences - // in a Chits message. The Chits message should have the same requestID that - // was passed in here. + // This function can be called by any node at any time. PushQuery( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, container []byte, + requestedHeight uint64, ) error } -// ChitsHandler defines how a consensus engine reacts to query response messages -// from other validators. Functions only return fatal errors. type ChitsHandler interface { - // Notify this engine of the specified validators preferences. + // Notify this engine of the response to a previously sent PullQuery or + // PushQuery message with the same requestID. // - // This function can be called by any validator. It is not safe to assume - // this message is in response to a PullQuery or a PushQuery message. - // However, the validatorID is assumed to be authenticated. + // It is expected, but not guaranteed, that preferredID transitively + // references preferredIDAtHeight and acceptedID. Chits( ctx context.Context, - validatorID ids.NodeID, + nodeID ids.NodeID, requestID uint32, - preferredContainerIDs []ids.ID, - acceptedContainerIDs []ids.ID, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, ) error - // Notify this engine that a query it issued has failed. + // Notify this engine that a Query request it issued has failed. // - // This function will be called if the engine sent a PullQuery or PushQuery - // message that is not anticipated to be responded to. This could be because - // the recipient of the message is unknown or if the message request has - // timed out. - // - // The validatorID and the requestID are assumed to be the same as those - // sent in the Query message. - QueryFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error + // This function will be called if a PullQuery or PushQuery message with + // nodeID and requestID was previously sent by this engine and will not + // receive a response. + QueryFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + ) error } -// NetworkAppHandler defines how a consensus engine reacts to app specific -// messages from the network. -// -// Functions only return fatal errors. type NetworkAppHandler interface { - // Notify this engine of a request for data from [nodeID]. + AppRequestHandler + AppResponseHandler + AppGossipHandler +} + +type AppRequestHandler interface { + // Notify this engine of a request for an AppResponse with the same + // requestID. // - // The meaning of [request], and what should be sent in response to it, is + // The meaning of request, and what should be sent in response to it, is // application (VM) specific. // - // It is not guaranteed that: - // * [request] is well-formed/valid. + // It is not guaranteed that request is well-formed or valid. // - // This node should typically send an AppResponse to [nodeID] in response to - // a valid message using the same request ID before the deadline. However, - // the VM may arbitrarily choose to not send a response to this request. - AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error + // This function can be called by any node at any time. + AppRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + deadline time.Time, + request []byte, + ) error +} - // Notify this engine that an AppRequest message it sent to [nodeID] with - // request ID [requestID] failed. +type AppResponseHandler interface { + // Notify this engine of the response to a previously sent AppRequest with + // the same requestID. // - // This may be because the request timed out or because the message couldn't - // be sent to [nodeID]. + // The meaning of response is application (VM) specifc. // - // It is guaranteed that: - // * This engine sent a request to [nodeID] with ID [requestID]. - // * AppRequestFailed([nodeID], [requestID]) has not already been called. - // * AppResponse([nodeID], [requestID]) has not already been called. - AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + // It is not guaranteed that response is well-formed or valid. + AppResponse( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + response []byte, + ) error - // Notify this engine of a response to the AppRequest message it sent to - // [nodeID] with request ID [requestID]. - // - // The meaning of [response] is application (VM) specifc. - // - // It is guaranteed that: - // * This engine sent a request to [nodeID] with ID [requestID]. - // * AppRequestFailed([nodeID], [requestID]) has not already been called. - // * AppResponse([nodeID], [requestID]) has not already been called. - // - // It is not guaranteed that: - // * [response] contains the expected response - // * [response] is well-formed/valid. + // Notify this engine that an AppRequest it issued has failed. // - // If [response] is invalid or not the expected response, the VM chooses how - // to react. For example, the VM may send another AppRequest, or it may give - // up trying to get the requested information. - AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error + // This function will be called if an AppRequest message with nodeID and + // requestID was previously sent by this engine and will not receive a + // response. + AppRequestFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + appErr *AppError, + ) error +} - // Notify this engine of a gossip message from [nodeID]. +type AppGossipHandler interface { + // Notify this engine of a gossip message from nodeID. // - // The meaning of [msg] is application (VM) specific, and the VM defines how + // The meaning of msg is application (VM) specific, and the VM defines how // to react to this message. // // This message is not expected in response to any event, and it does not // need to be responded to. - // - // A node may gossip the same message multiple times. That is, - // AppGossip([nodeID], [msg]) may be called multiple times. - AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error + AppGossip( + ctx context.Context, + nodeID ids.NodeID, + msg []byte, + ) error } -// CrossChainAppHandler defines how a consensus engine reacts to cross-chain app -// specific messages. -// -// Functions only return fatal errors. type CrossChainAppHandler interface { - // CrossChainAppRequest Notify this engine of a request for data from - // [chainID]. + CrossChainAppRequestHandler + CrossChainAppResponseHandler +} + +type CrossChainAppRequestHandler interface { + // Notify this engine of a request for a CrossChainAppResponse with the same + // requestID. // - // The meaning of [request], and what should be sent in response to it, is + // The meaning of request, and what should be sent in response to it, is // application (VM) specific. // // Guarantees surrounding the request are specific to the implementation of // the requesting VM. For example, the request may or may not be guaranteed // to be well-formed/valid depending on the implementation of the requesting // VM. + CrossChainAppRequest( + ctx context.Context, + chainID ids.ID, + requestID uint32, + deadline time.Time, + request []byte, + ) error +} + +type CrossChainAppResponseHandler interface { + // Notify this engine of the response to a previously sent + // CrossChainAppRequest with the same requestID. // - // This node should typically send a CrossChainAppResponse to [chainID] in - // response to a valid message using the same request ID before the - // deadline. However, the VM may arbitrarily choose to not send a response - // to this request. - CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error - // CrossChainAppRequestFailed notifies this engine that a - // CrossChainAppRequest message it sent to [chainID] with request ID - // [requestID] failed. - // - // This may be because the request timed out or because the message couldn't - // be sent to [chainID]. - // - // It is guaranteed that: - // * This engine sent a request to [chainID] with ID [requestID]. - // * CrossChainAppRequestFailed([chainID], [requestID]) has not already been - // called. - // * CrossChainAppResponse([chainID], [requestID]) has not already been - // called. - CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error - // CrossChainAppResponse notifies this engine of a response to the - // CrossChainAppRequest message it sent to [chainID] with request ID - // [requestID]. - // - // The meaning of [response] is application (VM) specific. - // - // It is guaranteed that: - // * This engine sent a request to [chainID] with ID [requestID]. - // * CrossChainAppRequestFailed([chainID], [requestID]) has not already been - // called. - // * CrossChainAppResponse([chainID], [requestID]) has not already been - // called. + // The meaning of response is application (VM) specifc. // // Guarantees surrounding the response are specific to the implementation of // the responding VM. For example, the response may or may not be guaranteed // to be well-formed/valid depending on the implementation of the requesting // VM. + CrossChainAppResponse( + ctx context.Context, + chainID ids.ID, + requestID uint32, + response []byte, + ) error + + // Notify this engine that a CrossChainAppRequest it issued has failed. // - // If [response] is invalid or not the expected response, the VM chooses how - // to react. For example, the VM may send another CrossChainAppRequest, or - // it may give up trying to get the requested information. - CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error + // This function will be called if a CrossChainAppRequest message with + // nodeID and requestID was previously sent by this engine and will not + // receive a response. + CrossChainAppRequestFailed( + ctx context.Context, + chainID ids.ID, + requestID uint32, + appErr *AppError, + ) error } -// AppHandler defines how a consensus engine reacts to app specific messages. -// Functions only return fatal errors. type AppHandler interface { NetworkAppHandler CrossChainAppHandler } -// InternalHandler defines how this consensus engine reacts to messages from -// other components of this validator. Functions only return fatal errors if -// they occur. type InternalHandler interface { // Notify this engine of peer changes. validators.Connector diff --git a/avalanchego/snow/engine/common/error.go b/avalanchego/snow/engine/common/error.go new file mode 100644 index 00000000..261fedaa --- /dev/null +++ b/avalanchego/snow/engine/common/error.go @@ -0,0 +1,43 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import "fmt" + +var ( + _ error = (*AppError)(nil) + + // ErrUndefined indicates an undefined error + ErrUndefined = &AppError{ + Code: 0, + Message: "undefined", + } + + // ErrTimeout is used to signal a response timeout + ErrTimeout = &AppError{ + Code: -1, + Message: "timed out", + } +) + +// AppError is an application-defined error +type AppError struct { + // Code is application-defined and should be used for error matching + Code int32 + // Message is a human-readable error message + Message string +} + +func (a *AppError) Error() string { + return fmt.Sprintf("%d: %s", a.Code, a.Message) +} + +func (a *AppError) Is(target error) bool { + appErr, ok := target.(*AppError) + if !ok { + return false + } + + return a.Code == appErr.Code +} diff --git a/avalanchego/snow/engine/common/error_test.go b/avalanchego/snow/engine/common/error_test.go new file mode 100644 index 00000000..0204e010 --- /dev/null +++ b/avalanchego/snow/engine/common/error_test.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +// Tests the invariant that AppErrors are matched against their error codes +func TestAppErrorEqual(t *testing.T) { + tests := []struct { + name string + err1 *AppError + err2 error + expected bool + }{ + { + name: "is - equal", + err1: &AppError{ + Code: 1, + }, + err2: &AppError{ + Code: 1, + }, + expected: true, + }, + { + name: "is - same error code different messages", + err1: &AppError{ + Code: 1, + Message: "foo", + }, + err2: &AppError{ + Code: 1, + Message: "bar", + }, + expected: true, + }, + { + name: "not is - different error code", + err1: &AppError{ + Code: 1, + }, + err2: &AppError{ + Code: 2, + }, + }, + { + name: "not is - different type", + err1: &AppError{ + Code: 1, + }, + err2: errors.New("foobar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, errors.Is(tt.err1, tt.err2)) + }) + } +} + +// Tests reserved error types +func TestErrorCode(t *testing.T) { + tests := []struct { + name string + code int32 + expected *AppError + }{ + { + name: "undefined", + code: 0, + expected: ErrUndefined, + }, + { + name: "undefined", + code: -1, + expected: ErrTimeout, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.ErrorIs(t, tt.expected, &AppError{Code: tt.code}) + }) + } +} diff --git a/avalanchego/snow/engine/common/fetcher.go b/avalanchego/snow/engine/common/fetcher.go deleted file mode 100644 index 9e90da3d..00000000 --- a/avalanchego/snow/engine/common/fetcher.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import "context" - -type Fetcher struct { - // tracks which validators were asked for which containers in which requests - OutstandingRequests Requests - - // Called when bootstrapping is done on a specific chain - OnFinished func(ctx context.Context, lastReqID uint32) error -} diff --git a/avalanchego/snow/engine/common/fx.go b/avalanchego/snow/engine/common/fx.go index 414b5a7d..000c22ed 100644 --- a/avalanchego/snow/engine/common/fx.go +++ b/avalanchego/snow/engine/common/fx.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" // Fx wraps an instance of a feature extension type Fx struct { diff --git a/avalanchego/snow/engine/common/halter.go b/avalanchego/snow/engine/common/halter.go index bdfe3c9d..1fcea981 100644 --- a/avalanchego/snow/engine/common/halter.go +++ b/avalanchego/snow/engine/common/halter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/http_handler.go b/avalanchego/snow/engine/common/http_handler.go deleted file mode 100644 index 48724a2b..00000000 --- a/avalanchego/snow/engine/common/http_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "net/http" -) - -// LockOption allows the vm to specify their lock option based on their endpoint -type LockOption uint32 - -// List of all allowed options -const ( - WriteLock = iota - ReadLock - NoLock -) - -type HTTPHandler struct { - LockOptions LockOption - Handler http.Handler -} diff --git a/avalanchego/snow/engine/common/message.go b/avalanchego/snow/engine/common/message.go index e46238bc..1bc05991 100644 --- a/avalanchego/snow/engine/common/message.go +++ b/avalanchego/snow/engine/common/message.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "fmt" -) +import "fmt" // TODO: Consider renaming Message to, say, VMMessage @@ -13,17 +11,18 @@ import ( type Message uint32 const ( - // PendingTxs notifies a consensus engine that - // its VM has pending transactions - // (i.e. it would like to add a new block/vertex to consensus) + // PendingTxs notifies a consensus engine that its VM has pending + // transactions. + // + // The consensus engine must eventually call BuildBlock at least once after + // receiving this message. If the consensus engine receives multiple + // PendingTxs messages between calls to BuildBlock, the engine may only call + // BuildBlock once. PendingTxs Message = iota + 1 // StateSyncDone notifies the state syncer engine that the VM has finishing // syncing the requested state summary. StateSyncDone - - // StopVertex notifies a consensus that it has a pending stop vertex - StopVertex ) func (msg Message) String() string { @@ -32,8 +31,6 @@ func (msg Message) String() string { return "Pending Transactions" case StateSyncDone: return "State Sync Done" - case StopVertex: - return "Pending Stop Vertex" default: return fmt.Sprintf("Unknown Message: %d", msg) } diff --git a/avalanchego/snow/engine/common/mixed_query.go b/avalanchego/snow/engine/common/mixed_query.go deleted file mode 100644 index 653297ce..00000000 --- a/avalanchego/snow/engine/common/mixed_query.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Send a query composed partially of push queries and partially of pull queries. -// The validators in [vdrs] will be queried. -// This function sends at most [numPushTo] push queries. The rest are pull queries. -// If [numPushTo] > len(vdrs), len(vdrs) push queries are sent. -// [containerID] and [container] are the ID and body of the container being queried. -// [sender] is used to actually send the queries. -func SendMixedQuery( - ctx context.Context, - sender Sender, - vdrs []ids.NodeID, - numPushTo int, - reqID uint32, - containerID ids.ID, - container []byte, -) { - if numPushTo > len(vdrs) { - numPushTo = len(vdrs) - } - if numPushTo > 0 { - sendPushQueryTo := set.NewSet[ids.NodeID](numPushTo) - sendPushQueryTo.Add(vdrs[:numPushTo]...) - sender.SendPushQuery(ctx, sendPushQueryTo, reqID, container) - } - if numPullTo := len(vdrs) - numPushTo; numPullTo > 0 { - sendPullQueryTo := set.NewSet[ids.NodeID](numPullTo) - sendPullQueryTo.Add(vdrs[numPushTo:]...) - sender.SendPullQuery(ctx, sendPullQueryTo, reqID, containerID) - } -} diff --git a/avalanchego/snow/engine/common/mixed_query_test.go b/avalanchego/snow/engine/common/mixed_query_test.go deleted file mode 100644 index 4d488dc2..00000000 --- a/avalanchego/snow/engine/common/mixed_query_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -func TestSendMixedQuery(t *testing.T) { - type test struct { - senderF func() *MockSender - vdrs []ids.NodeID - numPushTo int - } - reqID := uint32(1337) - containerID := ids.GenerateTestID() - containerBytes := []byte{'y', 'e', 'e', 't'} - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vdr1, vdr2, vdr3 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - tests := []test{ - { - senderF: func() *MockSender { - s := NewMockSender(ctrl) - s.EXPECT().SendPushQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}, vdr3: struct{}{}}, - reqID, - containerBytes, - ).Times(1) - s.EXPECT().SendPullQuery( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Times(0) - return s - }, - vdrs: []ids.NodeID{vdr1, vdr2, vdr3}, - numPushTo: 3, - }, - { - senderF: func() *MockSender { - s := NewMockSender(ctrl) - s.EXPECT().SendPushQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr1: struct{}{}}, - reqID, - containerBytes, - ).Times(1) - s.EXPECT().SendPullQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr2: struct{}{}, vdr3: struct{}{}}, - reqID, - containerID, - ).Times(1) - return s - }, - vdrs: []ids.NodeID{vdr1, vdr2, vdr3}, - numPushTo: 1, - }, - { - senderF: func() *MockSender { - s := NewMockSender(ctrl) - s.EXPECT().SendPushQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}}, - reqID, - containerBytes, - ).Times(1) - s.EXPECT().SendPullQuery( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Times(0) - return s - }, - vdrs: []ids.NodeID{vdr1, vdr2}, - numPushTo: 2, - }, - { - senderF: func() *MockSender { - s := NewMockSender(ctrl) - s.EXPECT().SendPushQuery( - gomock.Any(), - gomock.Any(), - reqID, - containerBytes, - ).Times(0) - s.EXPECT().SendPullQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr1: struct{}{}}, - reqID, - containerID, - ).Times(1) - return s - }, - vdrs: []ids.NodeID{vdr1}, - numPushTo: 0, - }, - { - senderF: func() *MockSender { - s := NewMockSender(ctrl) - s.EXPECT().SendPushQuery( - gomock.Any(), - set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}}, - reqID, - containerBytes, - ).Times(1) - s.EXPECT().SendPullQuery( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Times(0) - return s - }, - vdrs: []ids.NodeID{vdr1, vdr2}, - numPushTo: 4, - }, - } - - for _, tt := range tests { - t.Run( - fmt.Sprintf("numPushTo: %d, numVdrs: %d", tt.numPushTo, len(tt.vdrs)), - func(t *testing.T) { - sender := tt.senderF() - SendMixedQuery( - context.Background(), - sender, - tt.vdrs, - tt.numPushTo, - reqID, - containerID, - containerBytes, - ) - }, - ) - } -} diff --git a/avalanchego/snow/engine/common/mock_sender.go b/avalanchego/snow/engine/common/mock_sender.go index c599077d..c22cfb45 100644 --- a/avalanchego/snow/engine/common/mock_sender.go +++ b/avalanchego/snow/engine/common/mock_sender.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/engine/common (interfaces: Sender) +// Source: snow/engine/common/sender.go +// +// Generated by this command: +// +// mockgen -source=snow/engine/common/sender.go -destination=snow/engine/common/mock_sender.go -package=common -exclude_interfaces=StateSummarySender,AcceptedStateSummarySender,FrontierSender,AcceptedSender,FetchSender,AppSender,QuerySender,CrossChainAppSender,NetworkAppSender,Gossiper +// // Package common is a generated GoMock package. package common @@ -14,7 +16,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" set "github.com/ava-labs/avalanchego/utils/set" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockSender is a mock of Sender interface. @@ -41,291 +43,319 @@ func (m *MockSender) EXPECT() *MockSenderMockRecorder { } // Accept mocks base method. -func (m *MockSender) Accept(arg0 *snow.ConsensusContext, arg1 ids.ID, arg2 []byte) error { +func (m *MockSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "Accept", ctx, containerID, container) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockSenderMockRecorder) Accept(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) Accept(ctx, containerID, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), ctx, containerID, container) } // SendAccepted mocks base method. -func (m *MockSender) SendAccepted(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAccepted", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAccepted", ctx, nodeID, requestID, containerIDs) } // SendAccepted indicates an expected call of SendAccepted. -func (mr *MockSenderMockRecorder) SendAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAccepted(ctx, nodeID, requestID, containerIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), ctx, nodeID, requestID, containerIDs) } // SendAcceptedFrontier mocks base method. -func (m *MockSender) SendAcceptedFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedFrontier", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAcceptedFrontier", ctx, nodeID, requestID, containerID) } // SendAcceptedFrontier indicates an expected call of SendAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedFrontier(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), ctx, nodeID, requestID, containerID) } // SendAcceptedStateSummary mocks base method. -func (m *MockSender) SendAcceptedStateSummary(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedStateSummary", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAcceptedStateSummary", ctx, nodeID, requestID, summaryIDs) } // SendAcceptedStateSummary indicates an expected call of SendAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(ctx, nodeID, requestID, summaryIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), ctx, nodeID, requestID, summaryIDs) } // SendAncestors mocks base method. -func (m *MockSender) SendAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 [][]byte) { +func (m *MockSender) SendAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAncestors", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendAncestors", ctx, nodeID, requestID, containers) } // SendAncestors indicates an expected call of SendAncestors. -func (mr *MockSenderMockRecorder) SendAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAncestors(ctx, nodeID, requestID, containers any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), ctx, nodeID, requestID, containers) +} + +// SendAppError mocks base method. +func (m *MockSender) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAppError", ctx, nodeID, requestID, errorCode, errorMessage) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAppError indicates an expected call of SendAppError. +func (mr *MockSenderMockRecorder) SendAppError(ctx, nodeID, requestID, errorCode, errorMessage any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppError", reflect.TypeOf((*MockSender)(nil).SendAppError), ctx, nodeID, requestID, errorCode, errorMessage) } // SendAppGossip mocks base method. -func (m *MockSender) SendAppGossip(arg0 context.Context, arg1 []byte) error { +func (m *MockSender) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossip", arg0, arg1) + ret := m.ctrl.Call(m, "SendAppGossip", ctx, appGossipBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppGossip indicates an expected call of SendAppGossip. -func (mr *MockSenderMockRecorder) SendAppGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossip(ctx, appGossipBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), ctx, appGossipBytes) } // SendAppGossipSpecific mocks base method. -func (m *MockSender) SendAppGossipSpecific(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 []byte) error { +func (m *MockSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossipSpecific", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "SendAppGossipSpecific", ctx, nodeIDs, appGossipBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppGossipSpecific indicates an expected call of SendAppGossipSpecific. -func (mr *MockSenderMockRecorder) SendAppGossipSpecific(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossipSpecific(ctx, nodeIDs, appGossipBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), ctx, nodeIDs, appGossipBytes) } // SendAppRequest mocks base method. -func (m *MockSender) SendAppRequest(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppRequest", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendAppRequest", ctx, nodeIDs, requestID, appRequestBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppRequest indicates an expected call of SendAppRequest. -func (mr *MockSenderMockRecorder) SendAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppRequest(ctx, nodeIDs, requestID, appRequestBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), ctx, nodeIDs, requestID, appRequestBytes) } // SendAppResponse mocks base method. -func (m *MockSender) SendAppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppResponse", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendAppResponse", ctx, nodeID, requestID, appResponseBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppResponse indicates an expected call of SendAppResponse. -func (mr *MockSenderMockRecorder) SendAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppResponse(ctx, nodeID, requestID, appResponseBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), ctx, nodeID, requestID, appResponseBytes) } // SendChits mocks base method. -func (m *MockSender) SendChits(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3, arg4 []ids.ID) { +func (m *MockSender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID, preferredIDAtHeight, acceptedID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendChits", arg0, arg1, arg2, arg3, arg4) + m.ctrl.Call(m, "SendChits", ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } // SendChits indicates an expected call of SendChits. -func (mr *MockSenderMockRecorder) SendChits(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendChits(ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) +} + +// SendCrossChainAppError mocks base method. +func (m *MockSender) SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCrossChainAppError", ctx, chainID, requestID, errorCode, errorMessage) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCrossChainAppError indicates an expected call of SendCrossChainAppError. +func (mr *MockSenderMockRecorder) SendCrossChainAppError(ctx, chainID, requestID, errorCode, errorMessage any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppError", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppError), ctx, chainID, requestID, errorCode, errorMessage) } // SendCrossChainAppRequest mocks base method. -func (m *MockSender) SendCrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCrossChainAppRequest", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendCrossChainAppRequest", ctx, chainID, requestID, appRequestBytes) ret0, _ := ret[0].(error) return ret0 } // SendCrossChainAppRequest indicates an expected call of SendCrossChainAppRequest. -func (mr *MockSenderMockRecorder) SendCrossChainAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendCrossChainAppRequest(ctx, chainID, requestID, appRequestBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppRequest", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppRequest), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppRequest", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppRequest), ctx, chainID, requestID, appRequestBytes) } // SendCrossChainAppResponse mocks base method. -func (m *MockSender) SendCrossChainAppResponse(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { +func (m *MockSender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendCrossChainAppResponse", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "SendCrossChainAppResponse", ctx, chainID, requestID, appResponseBytes) ret0, _ := ret[0].(error) return ret0 } // SendCrossChainAppResponse indicates an expected call of SendCrossChainAppResponse. -func (mr *MockSenderMockRecorder) SendCrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendCrossChainAppResponse(ctx, chainID, requestID, appResponseBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppResponse", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppResponse), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppResponse", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppResponse), ctx, chainID, requestID, appResponseBytes) } // SendGet mocks base method. -func (m *MockSender) SendGet(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGet", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGet", ctx, nodeID, requestID, containerID) } // SendGet indicates an expected call of SendGet. -func (mr *MockSenderMockRecorder) SendGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGet(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), ctx, nodeID, requestID, containerID) } // SendGetAccepted mocks base method. -func (m *MockSender) SendGetAccepted(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []ids.ID) { +func (m *MockSender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAccepted", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAccepted", ctx, nodeIDs, requestID, containerIDs) } // SendGetAccepted indicates an expected call of SendGetAccepted. -func (mr *MockSenderMockRecorder) SendGetAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAccepted(ctx, nodeIDs, requestID, containerIDs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), ctx, nodeIDs, requestID, containerIDs) } // SendGetAcceptedFrontier mocks base method. -func (m *MockSender) SendGetAcceptedFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { +func (m *MockSender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedFrontier", arg0, arg1, arg2) + m.ctrl.Call(m, "SendGetAcceptedFrontier", ctx, nodeIDs, requestID) } // SendGetAcceptedFrontier indicates an expected call of SendGetAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(ctx, nodeIDs, requestID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), ctx, nodeIDs, requestID) } // SendGetAcceptedStateSummary mocks base method. -func (m *MockSender) SendGetAcceptedStateSummary(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []uint64) { +func (m *MockSender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedStateSummary", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAcceptedStateSummary", ctx, nodeIDs, requestID, heights) } // SendGetAcceptedStateSummary indicates an expected call of SendGetAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(ctx, nodeIDs, requestID, heights any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), ctx, nodeIDs, requestID, heights) } // SendGetAncestors mocks base method. -func (m *MockSender) SendGetAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAncestors", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendGetAncestors", ctx, nodeID, requestID, containerID) } // SendGetAncestors indicates an expected call of SendGetAncestors. -func (mr *MockSenderMockRecorder) SendGetAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAncestors(ctx, nodeID, requestID, containerID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), ctx, nodeID, requestID, containerID) } // SendGetStateSummaryFrontier mocks base method. -func (m *MockSender) SendGetStateSummaryFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { +func (m *MockSender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetStateSummaryFrontier", arg0, arg1, arg2) + m.ctrl.Call(m, "SendGetStateSummaryFrontier", ctx, nodeIDs, requestID) } // SendGetStateSummaryFrontier indicates an expected call of SendGetStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(ctx, nodeIDs, requestID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), ctx, nodeIDs, requestID) } // SendGossip mocks base method. -func (m *MockSender) SendGossip(arg0 context.Context, arg1 []byte) { +func (m *MockSender) SendGossip(ctx context.Context, container []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGossip", arg0, arg1) + m.ctrl.Call(m, "SendGossip", ctx, container) } // SendGossip indicates an expected call of SendGossip. -func (mr *MockSenderMockRecorder) SendGossip(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGossip(ctx, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), ctx, container) } // SendPullQuery mocks base method. -func (m *MockSender) SendPullQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 ids.ID) { +func (m *MockSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID, requestedHeight uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPullQuery", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPullQuery", ctx, nodeIDs, requestID, containerID, requestedHeight) } // SendPullQuery indicates an expected call of SendPullQuery. -func (mr *MockSenderMockRecorder) SendPullQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPullQuery(ctx, nodeIDs, requestID, containerID, requestedHeight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), ctx, nodeIDs, requestID, containerID, requestedHeight) } // SendPushQuery mocks base method. -func (m *MockSender) SendPushQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) { +func (m *MockSender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte, requestedHeight uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPushQuery", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPushQuery", ctx, nodeIDs, requestID, container, requestedHeight) } // SendPushQuery indicates an expected call of SendPushQuery. -func (mr *MockSenderMockRecorder) SendPushQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPushQuery(ctx, nodeIDs, requestID, container, requestedHeight any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), ctx, nodeIDs, requestID, container, requestedHeight) } // SendPut mocks base method. -func (m *MockSender) SendPut(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { +func (m *MockSender) SendPut(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPut", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendPut", ctx, nodeID, requestID, container) } // SendPut indicates an expected call of SendPut. -func (mr *MockSenderMockRecorder) SendPut(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPut(ctx, nodeID, requestID, container any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), ctx, nodeID, requestID, container) } // SendStateSummaryFrontier mocks base method. -func (m *MockSender) SendStateSummaryFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { +func (m *MockSender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendStateSummaryFrontier", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "SendStateSummaryFrontier", ctx, nodeID, requestID, summary) } // SendStateSummaryFrontier indicates an expected call of SendStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(ctx, nodeID, requestID, summary any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), ctx, nodeID, requestID, summary) } diff --git a/avalanchego/snow/engine/common/no_ops_handlers.go b/avalanchego/snow/engine/common/no_ops_handlers.go index c2f2c211..870c6694 100644 --- a/avalanchego/snow/engine/common/no_ops_handlers.go +++ b/avalanchego/snow/engine/common/no_ops_handlers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -13,6 +13,8 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -25,6 +27,7 @@ var ( _ QueryHandler = (*noOpQueryHandler)(nil) _ ChitsHandler = (*noOpChitsHandler)(nil) _ AppHandler = (*noOpAppHandler)(nil) + _ InternalHandler = (*noOpInternalHandler)(nil) ) type noOpStateSummaryFrontierHandler struct { @@ -63,7 +66,7 @@ func NewNoOpAcceptedStateSummaryHandler(log logging.Logger) AcceptedStateSummary return &noOpAcceptedStateSummaryHandler{log: log} } -func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedStateSummaryOp), @@ -91,12 +94,13 @@ func NewNoOpAcceptedFrontierHandler(log logging.Logger) AcceptedFrontierHandler return &noOpAcceptedFrontierHandler{log: log} } -func (nop *noOpAcceptedFrontierHandler) AcceptedFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedFrontierHandler) AcceptedFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Stringer("containerID", containerID), ) return nil } @@ -119,7 +123,7 @@ func NewNoOpAcceptedHandler(log logging.Logger) AcceptedHandler { return &noOpAcceptedHandler{log: log} } -func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.AcceptedOp), @@ -212,22 +216,25 @@ func NewNoOpQueryHandler(log logging.Logger) QueryHandler { return &noOpQueryHandler{log: log} } -func (nop *noOpQueryHandler) PullQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, _ ids.ID) error { +func (nop *noOpQueryHandler) PullQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID, requestedHeight uint64) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.PullQueryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Stringer("containerID", containerID), + zap.Uint64("requestedHeight", requestedHeight), ) return nil } -func (nop *noOpQueryHandler) PushQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { +func (nop *noOpQueryHandler) PushQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte, requestedHeight uint64) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.PushQueryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Uint64("requestedHeight", requestedHeight), ) return nil } @@ -240,12 +247,15 @@ func NewNoOpChitsHandler(log logging.Logger) ChitsHandler { return &noOpChitsHandler{log: log} } -func (nop *noOpChitsHandler) Chits(_ context.Context, nodeID ids.NodeID, requestID uint32, _, _ []ids.ID) error { +func (nop *noOpChitsHandler) Chits(_ context.Context, nodeID ids.NodeID, requestID uint32, preferredID, preferredIDAtHeight, acceptedID ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), zap.Stringer("messageOp", message.ChitsOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Stringer("preferredID", preferredID), + zap.Stringer("preferredIDAtHeight", preferredIDAtHeight), + zap.Stringer("acceptedID", acceptedID), ) return nil } @@ -278,12 +288,13 @@ func (nop *noOpAppHandler) CrossChainAppRequest(_ context.Context, chainID ids.I return nil } -func (nop *noOpAppHandler) CrossChainAppRequestFailed(_ context.Context, chainID ids.ID, requestID uint32) error { +func (nop *noOpAppHandler) CrossChainAppRequestFailed(_ context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.CrossChainAppRequestFailedOp), + zap.Stringer("messageOp", message.CrossChainAppErrorOp), zap.Stringer("chainID", chainID), zap.Uint32("requestID", requestID), + zap.Error(appErr), ) return nil } @@ -308,12 +319,13 @@ func (nop *noOpAppHandler) AppRequest(_ context.Context, nodeID ids.NodeID, requ return nil } -func (nop *noOpAppHandler) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAppHandler) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppRequestFailedOp), + zap.Stringer("messageOp", message.AppErrorOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), + zap.Error(appErr), ) return nil } @@ -336,3 +348,74 @@ func (nop *noOpAppHandler) AppGossip(_ context.Context, nodeID ids.NodeID, _ []b ) return nil } + +type noOpInternalHandler struct { + log logging.Logger +} + +func NewNoOpInternalHandler(log logging.Logger) InternalHandler { + return &noOpInternalHandler{log: log} +} + +func (nop *noOpInternalHandler) Connected( + _ context.Context, + nodeID ids.NodeID, + nodeVersion *version.Application, +) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.ConnectedOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("version", nodeVersion), + ) + return nil +} + +func (nop *noOpInternalHandler) Disconnected(_ context.Context, nodeID ids.NodeID) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.DisconnectedOp), + zap.Stringer("nodeID", nodeID), + ) + return nil +} + +func (nop *noOpInternalHandler) Timeout(context.Context) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.TimeoutOp), + ) + return nil +} + +func (nop *noOpInternalHandler) Gossip(context.Context) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.GossipRequestOp), + ) + return nil +} + +func (nop *noOpInternalHandler) Halt(context.Context) { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.String("messageOp", "halt"), + ) +} + +func (nop *noOpInternalHandler) Shutdown(context.Context) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.String("messageOp", "shutdown"), + ) + return nil +} + +func (nop *noOpInternalHandler) Notify(_ context.Context, msg Message) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.NotifyOp), + zap.Stringer("message", msg), + ) + return nil +} diff --git a/avalanchego/snow/engine/common/queue/job.go b/avalanchego/snow/engine/common/queue/job.go index 4ac5a60f..3b36893f 100644 --- a/avalanchego/snow/engine/common/queue/job.go +++ b/avalanchego/snow/engine/common/queue/job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/avalanchego/snow/engine/common/queue/jobs.go b/avalanchego/snow/engine/common/queue/jobs.go index 728edcc9..a8955a8d 100644 --- a/avalanchego/snow/engine/common/queue/jobs.go +++ b/avalanchego/snow/engine/common/queue/jobs.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -9,7 +9,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" @@ -17,9 +16,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const progressUpdateFrequency = 30 * time.Second @@ -425,10 +424,8 @@ func (jm *JobsWithMissing) cleanRunnableStack(ctx context.Context) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( runnableJobsIter.Error(), jm.Commit(), ) - return errs.Err } diff --git a/avalanchego/snow/engine/common/queue/jobs_test.go b/avalanchego/snow/engine/common/queue/jobs_test.go index 296266ee..06a6d6d9 100644 --- a/avalanchego/snow/engine/common/queue/jobs_test.go +++ b/avalanchego/snow/engine/common/queue/jobs_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -6,17 +6,17 @@ package queue import ( "bytes" "context" + "math" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/set" ) @@ -32,7 +32,7 @@ func testJob(t *testing.T, jobID ids.ID, executed *bool, parentID ids.ID, parent }, MissingDependenciesF: func(context.Context) (set.Set[ids.ID], error) { if parentID != ids.Empty && !*parentExecuted { - return set.Set[ids.ID]{parentID: struct{}{}}, nil + return set.Of(parentID), nil } return set.Set[ids.ID]{}, nil }, @@ -62,12 +62,8 @@ func TestNew(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) dbSize, err := database.Size(db) require.NoError(err) @@ -83,12 +79,8 @@ func TestPushAndExecute(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) @@ -104,14 +96,11 @@ func TestPushAndExecute(t *testing.T) { require.NoError(err) require.True(has) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(parser)) has, err = jobs.Has(jobID) require.NoError(err) @@ -126,7 +115,8 @@ func TestPushAndExecute(t *testing.T) { return job, nil } - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(1, count) @@ -152,12 +142,8 @@ func TestRemoveDependency(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false @@ -196,7 +182,8 @@ func TestRemoveDependency(t *testing.T) { } } - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed0) @@ -218,9 +205,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) @@ -233,8 +218,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { require.False(pushed) require.NoError(err) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -251,9 +235,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) job0ID, executed0 := ids.GenerateTestID(), false job1ID := ids.GenerateTestID() @@ -267,8 +249,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { require.False(pushed) require.NoError(err) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -286,9 +267,7 @@ func TestMissingJobs(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -296,14 +275,12 @@ func TestMissingJobs(t *testing.T) { jobs.AddMissingID(job0ID) jobs.AddMissingID(job1ID) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) numMissingIDs := jobs.NumMissingIDs() require.Equal(2, numMissingIDs) - missingIDSet := set.Set[ids.ID]{} - missingIDSet.Add(jobs.MissingIDs()...) + missingIDSet := set.Of(jobs.MissingIDs()...) containsJob0ID := missingIDSet.Contains(job0ID) require.True(containsJob0ID) @@ -313,17 +290,13 @@ func TestMissingJobs(t *testing.T) { jobs.RemoveMissingID(job1ID) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(context.Background(), parser)) - missingIDSet = set.Set[ids.ID]{} - missingIDSet.Add(jobs.MissingIDs()...) + missingIDSet = set.Of(jobs.MissingIDs()...) containsJob0ID = missingIDSet.Contains(job0ID) require.True(containsJob0ID) @@ -339,12 +312,8 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false @@ -387,10 +356,11 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { } } - _, err = jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + snowCtx := snowtest.Context(t, snowtest.CChainID) + _, err = jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) // Assert that the database closed error on job1 causes ExecuteAll // to fail in the middle of execution. - require.Error(err) + require.ErrorIs(err, database.ErrClosed) require.True(executed0) require.False(executed1) @@ -403,15 +373,11 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { // Create jobs queue from the same database and ensure that the jobs queue // recovers correctly. jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) missingIDs := jobs.MissingIDs() - require.Equal(1, len(missingIDs)) + require.Len(missingIDs, 1) require.Equal(missingIDs[0], job0.ID()) @@ -423,7 +389,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.True(hasNext) - count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) + count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed1) @@ -436,12 +402,8 @@ func TestInitializeNumJobs(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -482,27 +444,20 @@ func TestInitializeNumJobs(t *testing.T) { pushed, err := jobs.Push(context.Background(), job0) require.True(pushed) require.NoError(err) - require.EqualValues(1, jobs.state.numJobs) + require.Equal(uint64(1), jobs.state.numJobs) pushed, err = jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) - require.EqualValues(2, jobs.state.numJobs) + require.Equal(uint64(2), jobs.state.numJobs) - err = jobs.Commit() - require.NoError(err) - - err = database.Clear(jobs.state.metadataDB, jobs.state.metadataDB) - require.NoError(err) - - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) + require.NoError(database.Clear(jobs.state.metadataDB, math.MaxInt)) + require.NoError(jobs.Commit()) jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - require.EqualValues(2, jobs.state.numJobs) + require.NoError(err) + require.Equal(uint64(2), jobs.state.numJobs) } func TestClearAll(t *testing.T) { @@ -512,12 +467,8 @@ func TestClearAll(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) diff --git a/avalanchego/snow/engine/common/queue/parser.go b/avalanchego/snow/engine/common/queue/parser.go index ee8f3980..07e9df50 100644 --- a/avalanchego/snow/engine/common/queue/parser.go +++ b/avalanchego/snow/engine/common/queue/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue diff --git a/avalanchego/snow/engine/common/queue/state.go b/avalanchego/snow/engine/common/queue/state.go index 5e5ccb23..76bce7c8 100644 --- a/avalanchego/snow/engine/common/queue/state.go +++ b/avalanchego/snow/engine/common/queue/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -15,8 +15,9 @@ import ( "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( @@ -61,7 +62,7 @@ func newState( metricsNamespace string, metricsRegisterer prometheus.Registerer, ) (*state, error) { - jobsCacheMetricsNamespace := fmt.Sprintf("%s_jobs_cache", metricsNamespace) + jobsCacheMetricsNamespace := metric.AppendNamespace(metricsNamespace, "jobs_cache") jobsCache, err := metercacher.New[ids.ID, Job]( jobsCacheMetricsNamespace, metricsRegisterer, @@ -152,14 +153,12 @@ func (s *state) Clear() error { return err } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( runJobsIter.Error(), jobsIter.Error(), depsIter.Error(), missJobsIter.Error(), ) - return errs.Err } // AddRunnableJob adds [jobID] to the runnable queue diff --git a/avalanchego/snow/engine/common/queue/test_job.go b/avalanchego/snow/engine/common/queue/test_job.go index 09e51855..fd9af544 100644 --- a/avalanchego/snow/engine/common/queue/test_job.go +++ b/avalanchego/snow/engine/common/queue/test_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) @@ -47,7 +49,7 @@ func (j *TestJob) ID() ids.ID { return j.IDF() } if j.CantID && j.T != nil { - j.T.Fatalf("Unexpectedly called ID") + require.FailNow(j.T, "Unexpectedly called ID") } return ids.ID{} } @@ -57,7 +59,7 @@ func (j *TestJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], err return j.MissingDependenciesF(ctx) } if j.CantMissingDependencies && j.T != nil { - j.T.Fatalf("Unexpectedly called MissingDependencies") + require.FailNow(j.T, "Unexpectedly called MissingDependencies") } return set.Set[ids.ID]{}, nil } @@ -67,7 +69,7 @@ func (j *TestJob) Execute(ctx context.Context) error { return j.ExecuteF(ctx) } if j.CantExecute && j.T != nil { - j.T.Fatal(errExecute) + require.FailNow(j.T, errExecute.Error()) } return errExecute } @@ -77,7 +79,7 @@ func (j *TestJob) Bytes() []byte { return j.BytesF() } if j.CantBytes && j.T != nil { - j.T.Fatalf("Unexpectedly called Bytes") + require.FailNow(j.T, "Unexpectedly called Bytes") } return nil } @@ -87,7 +89,7 @@ func (j *TestJob) HasMissingDependencies(ctx context.Context) (bool, error) { return j.HasMissingDependenciesF(ctx) } if j.CantHasMissingDependencies && j.T != nil { - j.T.Fatal(errHasMissingDependencies) + require.FailNow(j.T, errHasMissingDependencies.Error()) } return false, errHasMissingDependencies } diff --git a/avalanchego/snow/engine/common/queue/test_parser.go b/avalanchego/snow/engine/common/queue/test_parser.go index 1e7fa9cd..1cc1cfd2 100644 --- a/avalanchego/snow/engine/common/queue/test_parser.go +++ b/avalanchego/snow/engine/common/queue/test_parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue @@ -7,6 +7,8 @@ import ( "context" "errors" "testing" + + "github.com/stretchr/testify/require" ) var errParse = errors.New("unexpectedly called Parse") @@ -29,7 +31,7 @@ func (p *TestParser) Parse(ctx context.Context, b []byte) (Job, error) { return p.ParseF(ctx, b) } if p.CantParse && p.T != nil { - p.T.Fatal(errParse) + require.FailNow(p.T, errParse.Error()) } return nil, errParse } diff --git a/avalanchego/snow/engine/common/request.go b/avalanchego/snow/engine/common/request.go new file mode 100644 index 00000000..f92e347c --- /dev/null +++ b/avalanchego/snow/engine/common/request.go @@ -0,0 +1,19 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +type Request struct { + NodeID ids.NodeID + RequestID uint32 +} + +func (r Request) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%s:%d", r.NodeID, r.RequestID)), nil +} diff --git a/avalanchego/snow/engine/common/request_test.go b/avalanchego/snow/engine/common/request_test.go new file mode 100644 index 00000000..0da4c8c4 --- /dev/null +++ b/avalanchego/snow/engine/common/request_test.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestRequestJSONMarshal(t *testing.T) { + requestMap := map[Request]ids.ID{ + { + NodeID: ids.GenerateTestNodeID(), + RequestID: 12345, + }: ids.GenerateTestID(), + } + _, err := json.Marshal(requestMap) + require.NoError(t, err) +} diff --git a/avalanchego/snow/engine/common/requests.go b/avalanchego/snow/engine/common/requests.go deleted file mode 100644 index ce66585e..00000000 --- a/avalanchego/snow/engine/common/requests.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "fmt" - "strings" - - "github.com/ava-labs/avalanchego/ids" -) - -const ( - minRequestsSize = 32 -) - -type req struct { - vdr ids.NodeID - id uint32 -} - -// Requests tracks pending container messages from a peer. -type Requests struct { - reqsToID map[ids.NodeID]map[uint32]ids.ID - idToReq map[ids.ID]req -} - -// Add a request. Assumes that requestIDs are unique. Assumes that containerIDs -// are only in one request at a time. -func (r *Requests) Add(vdr ids.NodeID, requestID uint32, containerID ids.ID) { - if r.reqsToID == nil { - r.reqsToID = make(map[ids.NodeID]map[uint32]ids.ID, minRequestsSize) - } - vdrReqs, ok := r.reqsToID[vdr] - if !ok { - vdrReqs = make(map[uint32]ids.ID) - r.reqsToID[vdr] = vdrReqs - } - vdrReqs[requestID] = containerID - - if r.idToReq == nil { - r.idToReq = make(map[ids.ID]req, minRequestsSize) - } - r.idToReq[containerID] = req{ - vdr: vdr, - id: requestID, - } -} - -// Get the containerID the request is expecting and if the request exists. -func (r *Requests) Get(vdr ids.NodeID, requestID uint32) (ids.ID, bool) { - containerID, ok := r.reqsToID[vdr][requestID] - return containerID, ok -} - -// Remove attempts to abandon a requestID sent to a validator. If the request is -// currently outstanding, the requested ID will be returned along with true. If -// the request isn't currently outstanding, false will be returned. -func (r *Requests) Remove(vdr ids.NodeID, requestID uint32) (ids.ID, bool) { - vdrReqs := r.reqsToID[vdr] - containerID, ok := vdrReqs[requestID] - if !ok { - return ids.ID{}, false - } - - if len(vdrReqs) == 1 { - delete(r.reqsToID, vdr) - } else { - delete(vdrReqs, requestID) - } - - delete(r.idToReq, containerID) - return containerID, true -} - -// RemoveAny outstanding requests for the container ID. True is returned if the -// container ID had an outstanding request. -func (r *Requests) RemoveAny(containerID ids.ID) bool { - req, ok := r.idToReq[containerID] - if !ok { - return false - } - - r.Remove(req.vdr, req.id) - return true -} - -// Len returns the total number of outstanding requests. -func (r *Requests) Len() int { - return len(r.idToReq) -} - -// Contains returns true if there is an outstanding request for the container -// ID. -func (r *Requests) Contains(containerID ids.ID) bool { - _, ok := r.idToReq[containerID] - return ok -} - -func (r Requests) String() string { - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("Requests: (Num Validators = %d)", len(r.reqsToID))) - for vdr, reqs := range r.reqsToID { - sb.WriteString(fmt.Sprintf("\n VDR[%s]: (Outstanding Requests %d)", vdr, len(reqs))) - for reqID, containerID := range reqs { - sb.WriteString(fmt.Sprintf("\n Request[%d]: %s", reqID, containerID)) - } - } - return sb.String() -} diff --git a/avalanchego/snow/engine/common/requests_test.go b/avalanchego/snow/engine/common/requests_test.go deleted file mode 100644 index 4d779a64..00000000 --- a/avalanchego/snow/engine/common/requests_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestRequests(t *testing.T) { - req := Requests{} - - length := req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") - - _, removed := req.Remove(ids.EmptyNodeID, 0) - require.False(t, removed, "shouldn't have removed the request") - - removed = req.RemoveAny(ids.Empty) - require.False(t, removed, "shouldn't have removed the request") - - constains := req.Contains(ids.Empty) - require.False(t, constains, "shouldn't contain this request") - - req.Add(ids.EmptyNodeID, 0, ids.Empty) - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") - - _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(t, removed, "shouldn't have removed the request") - - _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(t, removed, "shouldn't have removed the request") - - constains = req.Contains(ids.Empty) - require.True(t, constains, "should contain this request") - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") - - req.Add(ids.EmptyNodeID, 10, ids.Empty.Prefix(0)) - - length = req.Len() - require.Equal(t, 2, length, "should have had two outstanding requests") - - _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(t, removed, "shouldn't have removed the request") - - _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(t, removed, "shouldn't have removed the request") - - constains = req.Contains(ids.Empty) - require.True(t, constains, "should contain this request") - - length = req.Len() - require.Equal(t, 2, length, "should have had two outstanding requests") - - removedID, removed := req.Remove(ids.EmptyNodeID, 0) - require.Equal(t, ids.Empty, removedID, "should have removed the requested ID") - require.True(t, removed, "should have removed the request") - - removedID, removed = req.Remove(ids.EmptyNodeID, 10) - require.Equal(t, ids.Empty.Prefix(0), removedID, "should have removed the requested ID") - require.True(t, removed, "should have removed the request") - - length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") - - req.Add(ids.EmptyNodeID, 0, ids.Empty) - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") - - removed = req.RemoveAny(ids.Empty) - require.True(t, removed, "should have removed the request") - - length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") - - removed = req.RemoveAny(ids.Empty) - require.False(t, removed, "shouldn't have removed the request") - - length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") -} diff --git a/avalanchego/snow/engine/common/sender.go b/avalanchego/snow/engine/common/sender.go index 1c657daa..d0ba8563 100644 --- a/avalanchego/snow/engine/common/sender.go +++ b/avalanchego/snow/engine/common/sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -12,7 +12,28 @@ import ( ) // Sender defines how a consensus engine sends messages and requests to other -// validators +// validators. +// +// Messages can be categorized as either: requests, responses, or gossip. Gossip +// messages do not include requestIDs, because no response is expected from the +// peer. However, both requests and responses include requestIDs. +// +// It is expected that each [nodeID + requestID + expected response type] that +// is outstanding at any given time is unique. +// +// As an example, it is valid to send `Get(nodeA, request0)` and +// `PullQuery(nodeA, request0)` because they have different expected response +// types, `Put` and `Chits`. +// +// Additionally, after having sent `Get(nodeA, request0)` and receiving either +// `Put(nodeA, request0)` or `GetFailed(nodeA, request0)`, it is valid to resend +// `Get(nodeA, request0)`. Because the initial `Get` request is no longer +// outstanding. +// +// This means that requestIDs can be reused. In practice, requests always have a +// reasonable maximum timeout, so it is generally safe to assume that by the +// time the requestID space has been exhausted, the beginning of the requestID +// space is free of conflicts. type Sender interface { snow.Acceptor @@ -62,7 +83,7 @@ type FrontierSender interface { ctx context.Context, nodeID ids.NodeID, requestID uint32, - containerIDs []ids.ID, + containerID ids.ID, ) } @@ -110,14 +131,33 @@ type QuerySender interface { // existence of the specified container. // This is the same as PullQuery, except that this message includes the body // of the container rather than its ID. - SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) + SendPushQuery( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + requestID uint32, + container []byte, + requestedHeight uint64, + ) // Request from the specified nodes their preferred frontier, given the // existence of the specified container. - SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) + SendPullQuery( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + requestID uint32, + containerID ids.ID, + requestedHeight uint64, + ) // Send chits to the specified node - SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) + SendChits( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, + ) } // Gossiper defines how a consensus engine gossips a container on the accepted @@ -135,15 +175,14 @@ type NetworkAppSender interface { // * An AppResponse from nodeID with ID [requestID] // * An AppRequestFailed from nodeID with ID [requestID] // Exactly one of the above messages will eventually be received per nodeID. - // A non-nil error should be considered fatal. SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error // Send an application-level response to a request. // This response must be in response to an AppRequest that the VM corresponding // to this AppSender received from [nodeID] with ID [requestID]. - // A non-nil error should be considered fatal. SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error + // SendAppError sends an application-level error to an AppRequest + SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error // Gossip an application-level message. - // A non-nil error should be considered fatal. SendAppGossip(ctx context.Context, appGossipBytes []byte) error SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error } @@ -159,7 +198,6 @@ type CrossChainAppSender interface { // * A CrossChainAppRequestFailed from [chainID] with ID [requestID] // Exactly one of the above messages will eventually be received from // [chainID]. - // A non-nil error should be considered fatal. SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error // SendCrossChainAppResponse sends an application-level response to a // specific chain @@ -167,8 +205,9 @@ type CrossChainAppSender interface { // This response must be in response to a CrossChainAppRequest that the VM // corresponding to this CrossChainAppSender received from [chainID] with ID // [requestID]. - // A non-nil error should be considered fatal. SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error + // SendCrossChainAppError sends an application-level error to a CrossChainAppRequest + SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error } // AppSender sends application (VM) level messages. diff --git a/avalanchego/snow/engine/common/state_syncer.go b/avalanchego/snow/engine/common/state_syncer.go index e23ad126..a6d159bb 100644 --- a/avalanchego/snow/engine/common/state_syncer.go +++ b/avalanchego/snow/engine/common/state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/test_bootstrap_tracker.go b/avalanchego/snow/engine/common/test_bootstrap_tracker.go index 5c5ec4d7..2e940f1a 100644 --- a/avalanchego/snow/engine/common/test_bootstrap_tracker.go +++ b/avalanchego/snow/engine/common/test_bootstrap_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,6 +6,8 @@ package common import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -36,7 +38,7 @@ func (s *BootstrapTrackerTest) IsBootstrapped() bool { return s.IsBootstrappedF() } if s.CantIsBootstrapped && s.T != nil { - s.T.Fatalf("Unexpectedly called IsBootstrapped") + require.FailNow(s.T, "Unexpectedly called IsBootstrapped") } return false } @@ -48,7 +50,7 @@ func (s *BootstrapTrackerTest) Bootstrapped(chainID ids.ID) { if s.BootstrappedF != nil { s.BootstrappedF(chainID) } else if s.CantBootstrapped && s.T != nil { - s.T.Fatalf("Unexpectedly called Bootstrapped") + require.FailNow(s.T, "Unexpectedly called Bootstrapped") } } @@ -56,7 +58,7 @@ func (s *BootstrapTrackerTest) OnBootstrapCompleted() chan struct{} { if s.OnBootstrapCompletedF != nil { return s.OnBootstrapCompletedF() } else if s.CantOnBootstrapCompleted && s.T != nil { - s.T.Fatalf("Unexpectedly called OnBootstrapCompleted") + require.FailNow(s.T, "Unexpectedly called OnBootstrapCompleted") } return nil } diff --git a/avalanchego/snow/engine/common/test_bootstrapable.go b/avalanchego/snow/engine/common/test_bootstrapable.go deleted file mode 100644 index ddc67b48..00000000 --- a/avalanchego/snow/engine/common/test_bootstrapable.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "context" - "errors" - "testing" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - _ Bootstrapable = (*BootstrapableTest)(nil) - - errForceAccepted = errors.New("unexpectedly called ForceAccepted") - errClear = errors.New("unexpectedly called Clear") -) - -// BootstrapableTest is a test engine that supports bootstrapping -type BootstrapableTest struct { - T *testing.T - - CantForceAccepted, CantClear bool - - ClearF func() error - ForceAcceptedF func(ctx context.Context, acceptedContainerIDs []ids.ID) error -} - -// Default sets the default on call handling -func (b *BootstrapableTest) Default(cant bool) { - b.CantForceAccepted = cant -} - -func (b *BootstrapableTest) Clear() error { - if b.ClearF != nil { - return b.ClearF() - } else if b.CantClear { - if b.T != nil { - b.T.Fatalf("Unexpectedly called Clear") - } - return errClear - } - return nil -} - -func (b *BootstrapableTest) ForceAccepted(ctx context.Context, containerIDs []ids.ID) error { - if b.ForceAcceptedF != nil { - return b.ForceAcceptedF(ctx, containerIDs) - } else if b.CantForceAccepted { - if b.T != nil { - b.T.Fatalf("Unexpectedly called ForceAccepted") - } - return errForceAccepted - } - return nil -} diff --git a/avalanchego/snow/engine/common/test_bootstrapper.go b/avalanchego/snow/engine/common/test_bootstrapper.go index 1f8fd59b..259fcb07 100644 --- a/avalanchego/snow/engine/common/test_bootstrapper.go +++ b/avalanchego/snow/engine/common/test_bootstrapper.go @@ -1,20 +1,41 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common +import ( + "context" + "errors" + + "github.com/stretchr/testify/require" +) + var ( - _ Engine = (*BootstrapperTest)(nil) - _ Bootstrapable = (*BootstrapperTest)(nil) + _ BootstrapableEngine = (*BootstrapperTest)(nil) + + errClear = errors.New("unexpectedly called Clear") ) -// EngineTest is a test engine type BootstrapperTest struct { - BootstrapableTest EngineTest + + CantClear bool + + ClearF func(ctx context.Context) error } func (b *BootstrapperTest) Default(cant bool) { - b.BootstrapableTest.Default(cant) b.EngineTest.Default(cant) + + b.CantClear = cant +} + +func (b *BootstrapperTest) Clear(ctx context.Context) error { + if b.ClearF != nil { + return b.ClearF(ctx) + } + if b.CantClear && b.T != nil { + require.FailNow(b.T, errClear.Error()) + } + return errClear } diff --git a/avalanchego/snow/engine/common/test_config.go b/avalanchego/snow/engine/common/test_config.go deleted file mode 100644 index ceca80f2..00000000 --- a/avalanchego/snow/engine/common/test_config.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/validators" -) - -// DefaultConfigTest returns a test configuration -func DefaultConfigTest() Config { - isBootstrapped := false - bootstrapTracker := &BootstrapTrackerTest{ - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - - beacons := validators.NewSet() - - connectedPeers := tracker.NewPeers() - startupTracker := tracker.NewStartup(connectedPeers, 0) - beacons.RegisterCallbackListener(startupTracker) - - return Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: beacons, - StartupTracker: startupTracker, - Sender: &SenderTest{}, - Bootstrapable: &BootstrapableTest{}, - BootstrapTracker: bootstrapTracker, - Timer: &TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &SharedConfig{}, - } -} diff --git a/avalanchego/snow/engine/common/test_engine.go b/avalanchego/snow/engine/common/test_engine.go index 6645efad..e07352d4 100644 --- a/avalanchego/snow/engine/common/test_engine.go +++ b/avalanchego/snow/engine/common/test_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -9,8 +9,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -105,24 +108,27 @@ type EngineTest struct { CantGetVM bool - StartF func(ctx context.Context, startReqID uint32) error - IsBootstrappedF func() bool - ContextF func() *snow.ConsensusContext - HaltF func(context.Context) - TimeoutF, GossipF, ShutdownF func(context.Context) error - NotifyF func(context.Context, Message) error - GetF, GetAncestorsF, PullQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error - PutF, PushQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error - AncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error - AcceptedFrontierF, GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID) error - ChitsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error + StartF func(ctx context.Context, startReqID uint32) error + IsBootstrappedF func() bool + ContextF func() *snow.ConsensusContext + HaltF func(context.Context) + TimeoutF, GossipF, ShutdownF func(context.Context) error + NotifyF func(context.Context, Message) error + GetF, GetAncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error + PullQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID, requestedHeight uint64) error + PutF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error + PushQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte, requestedHeight uint64) error + AncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error + AcceptedFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error + GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs set.Set[ids.ID]) error + ChitsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error GetStateSummaryFrontierF, GetStateSummaryFrontierFailedF, GetAcceptedStateSummaryFailedF, GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF, QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error - AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error StateSummaryFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) error - GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys []uint64) error - AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys set.Set[uint64]) error + AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error HealthF func(context.Context) (interface{}, error) @@ -132,7 +138,7 @@ type EngineTest struct { AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error - CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error } func (e *EngineTest) Default(cant bool) { @@ -187,7 +193,7 @@ func (e *EngineTest) Start(ctx context.Context, startReqID uint32) error { return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called Start") + require.FailNow(e.T, errStart.Error()) } return errStart } @@ -200,7 +206,7 @@ func (e *EngineTest) Context() *snow.ConsensusContext { return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called Context") + require.FailNow(e.T, "Unexpectedly called Context") } return nil } @@ -213,7 +219,7 @@ func (e *EngineTest) Timeout(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errTimeout) + require.FailNow(e.T, errTimeout.Error()) } return errTimeout } @@ -226,7 +232,7 @@ func (e *EngineTest) Gossip(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errGossip) + require.FailNow(e.T, errGossip.Error()) } return errGossip } @@ -236,8 +242,11 @@ func (e *EngineTest) Halt(ctx context.Context) { e.HaltF(ctx) return } - if e.CantHalt && e.T != nil { - e.T.Fatalf("Unexpectedly called Halt") + if !e.CantHalt { + return + } + if e.T != nil { + require.FailNow(e.T, "Unexpectedly called Halt") } } @@ -249,7 +258,7 @@ func (e *EngineTest) Shutdown(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errShutdown) + require.FailNow(e.T, errShutdown.Error()) } return errShutdown } @@ -262,7 +271,7 @@ func (e *EngineTest) Notify(ctx context.Context, msg Message) error { return nil } if e.T != nil { - e.T.Fatal(errNotify) + require.FailNow(e.T, errNotify.Error()) } return errNotify } @@ -275,7 +284,7 @@ func (e *EngineTest) GetStateSummaryFrontier(ctx context.Context, validatorID id return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetStateSummaryFrontier") + require.FailNow(e.T, errGetStateSummaryFrontier.Error()) } return errGetStateSummaryFrontier } @@ -288,7 +297,7 @@ func (e *EngineTest) StateSummaryFrontier(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called CantStateSummaryFrontier") + require.FailNow(e.T, errStateSummaryFrontier.Error()) } return errStateSummaryFrontier } @@ -301,12 +310,12 @@ func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validato return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetStateSummaryFrontierFailed") + require.FailNow(e.T, errGetStateSummaryFrontierFailed.Error()) } return errGetStateSummaryFrontierFailed } -func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error { +func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys set.Set[uint64]) error { if e.GetAcceptedStateSummaryF != nil { return e.GetAcceptedStateSummaryF(ctx, validatorID, requestID, keys) } @@ -314,12 +323,12 @@ func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID id return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetAcceptedStateSummary") + require.FailNow(e.T, errGetAcceptedStateSummary.Error()) } return errGetAcceptedStateSummary } -func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { if e.AcceptedStateSummaryF != nil { return e.AcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } @@ -327,7 +336,7 @@ func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called AcceptedStateSummary") + require.FailNow(e.T, errAcceptedStateSummary.Error()) } return errAcceptedStateSummary } @@ -340,7 +349,7 @@ func (e *EngineTest) GetAcceptedStateSummaryFailed(ctx context.Context, validato return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetAcceptedStateSummaryFailed") + require.FailNow(e.T, errGetAcceptedStateSummaryFailed.Error()) } return errGetAcceptedStateSummaryFailed } @@ -353,7 +362,7 @@ func (e *EngineTest) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFrontier) + require.FailNow(e.T, errGetAcceptedFrontier.Error()) } return errGetAcceptedFrontier } @@ -366,25 +375,25 @@ func (e *EngineTest) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.N return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFrontierFailed) + require.FailNow(e.T, errGetAcceptedFrontierFailed.Error()) } return errGetAcceptedFrontierFailed } -func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { if e.AcceptedFrontierF != nil { - return e.AcceptedFrontierF(ctx, nodeID, requestID, containerIDs) + return e.AcceptedFrontierF(ctx, nodeID, requestID, containerID) } if !e.CantAcceptedFrontier { return nil } if e.T != nil { - e.T.Fatal(errAcceptedFrontier) + require.FailNow(e.T, errAcceptedFrontier.Error()) } return errAcceptedFrontier } -func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.GetAcceptedF != nil { return e.GetAcceptedF(ctx, nodeID, requestID, containerIDs) } @@ -392,7 +401,7 @@ func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errGetAccepted) + require.FailNow(e.T, errGetAccepted.Error()) } return errGetAccepted } @@ -405,12 +414,12 @@ func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, r return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFailed) + require.FailNow(e.T, errGetAcceptedFailed.Error()) } return errGetAcceptedFailed } -func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { if e.AcceptedF != nil { return e.AcceptedF(ctx, nodeID, requestID, containerIDs) } @@ -418,7 +427,7 @@ func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errAccepted) + require.FailNow(e.T, errAccepted.Error()) } return errAccepted } @@ -431,7 +440,7 @@ func (e *EngineTest) Get(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - e.T.Fatal(errGet) + require.FailNow(e.T, errGet.Error()) } return errGet } @@ -444,7 +453,7 @@ func (e *EngineTest) GetAncestors(ctx context.Context, nodeID ids.NodeID, reques return nil } if e.T != nil { - e.T.Fatal(errGetAncestors) + require.FailNow(e.T, errGetAncestors.Error()) } return errGetAncestors } @@ -457,7 +466,7 @@ func (e *EngineTest) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errGetFailed) + require.FailNow(e.T, errGetFailed.Error()) } return errGetFailed } @@ -466,11 +475,11 @@ func (e *EngineTest) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, if e.GetAncestorsFailedF != nil { return e.GetAncestorsFailedF(ctx, nodeID, requestID) } - if e.CantGetAncestorsFailed { + if !e.CantGetAncestorsFailed { return nil } if e.T != nil { - e.T.Fatal(errGetAncestorsFailed) + require.FailNow(e.T, errGetAncestorsFailed.Error()) } return errGetAncestorsFailed } @@ -483,7 +492,7 @@ func (e *EngineTest) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - e.T.Fatal(errPut) + require.FailNow(e.T, errPut.Error()) } return errPut } @@ -496,33 +505,33 @@ func (e *EngineTest) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errAncestors) + require.FailNow(e.T, errAncestors.Error()) } return errAncestors } -func (e *EngineTest) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { +func (e *EngineTest) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte, requestedHeight uint64) error { if e.PushQueryF != nil { - return e.PushQueryF(ctx, nodeID, requestID, container) + return e.PushQueryF(ctx, nodeID, requestID, container, requestedHeight) } if !e.CantPushQuery { return nil } if e.T != nil { - e.T.Fatal(errPushQuery) + require.FailNow(e.T, errPushQuery.Error()) } return errPushQuery } -func (e *EngineTest) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { +func (e *EngineTest) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID, requestedHeight uint64) error { if e.PullQueryF != nil { - return e.PullQueryF(ctx, nodeID, requestID, containerID) + return e.PullQueryF(ctx, nodeID, requestID, containerID, requestedHeight) } if !e.CantPullQuery { return nil } if e.T != nil { - e.T.Fatal(errPullQuery) + require.FailNow(e.T, errPullQuery.Error()) } return errPullQuery } @@ -535,7 +544,7 @@ func (e *EngineTest) QueryFailed(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errQueryFailed) + require.FailNow(e.T, errQueryFailed.Error()) } return errQueryFailed } @@ -548,20 +557,20 @@ func (e *EngineTest) CrossChainAppRequest(ctx context.Context, chainID ids.ID, r return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppRequest) + require.FailNow(e.T, errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } -func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { if e.CrossChainAppRequestFailedF != nil { - return e.CrossChainAppRequestFailedF(ctx, chainID, requestID) + return e.CrossChainAppRequestFailedF(ctx, chainID, requestID, appErr) } if !e.CantCrossChainAppRequestFailed { return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppRequestFailed) + require.FailNow(e.T, errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -574,7 +583,7 @@ func (e *EngineTest) CrossChainAppResponse(ctx context.Context, chainID ids.ID, return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppResponse) + require.FailNow(e.T, errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -587,7 +596,7 @@ func (e *EngineTest) AppRequest(ctx context.Context, nodeID ids.NodeID, requestI return nil } if e.T != nil { - e.T.Fatal(errAppRequest) + require.FailNow(e.T, errAppRequest.Error()) } return errAppRequest } @@ -600,20 +609,20 @@ func (e *EngineTest) AppResponse(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errAppResponse) + require.FailNow(e.T, errAppResponse.Error()) } return errAppResponse } -func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { if e.AppRequestFailedF != nil { - return e.AppRequestFailedF(ctx, nodeID, requestID) + return e.AppRequestFailedF(ctx, nodeID, requestID, appErr) } if !e.CantAppRequestFailed { return nil } if e.T != nil { - e.T.Fatal(errAppRequestFailed) + require.FailNow(e.T, errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -626,20 +635,20 @@ func (e *EngineTest) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byt return nil } if e.T != nil { - e.T.Fatal(errAppGossip) + require.FailNow(e.T, errAppGossip.Error()) } return errAppGossip } -func (e *EngineTest) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { +func (e *EngineTest) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error { if e.ChitsF != nil { - return e.ChitsF(ctx, nodeID, requestID, preferredIDs, acceptedIDs) + return e.ChitsF(ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } if !e.CantChits { return nil } if e.T != nil { - e.T.Fatal(errChits) + require.FailNow(e.T, errChits.Error()) } return errChits } @@ -652,7 +661,7 @@ func (e *EngineTest) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersi return nil } if e.T != nil { - e.T.Fatal(errConnected) + require.FailNow(e.T, errConnected.Error()) } return errConnected } @@ -665,7 +674,7 @@ func (e *EngineTest) Disconnected(ctx context.Context, nodeID ids.NodeID) error return nil } if e.T != nil { - e.T.Fatal(errDisconnected) + require.FailNow(e.T, errDisconnected.Error()) } return errDisconnected } @@ -678,17 +687,7 @@ func (e *EngineTest) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } if e.T != nil { - e.T.Fatal(errHealthCheck) + require.FailNow(e.T, errHealthCheck.Error()) } return nil, errHealthCheck } - -func (e *EngineTest) GetVM() VM { - if e.GetVMF != nil { - return e.GetVMF() - } - if e.CantGetVM && e.T != nil { - e.T.Fatalf("Unexpectedly called GetVM") - } - return nil -} diff --git a/avalanchego/snow/engine/common/test_sender.go b/avalanchego/snow/engine/common/test_sender.go index 0a32dcc0..ef77fc65 100644 --- a/avalanchego/snow/engine/common/test_sender.go +++ b/avalanchego/snow/engine/common/test_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,7 +6,8 @@ package common import ( "context" "errors" - "testing" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -14,18 +15,20 @@ import ( ) var ( - _ Sender = (*SenderTest)(nil) + _ Sender = (*SenderTest)(nil) + _ AppSender = (*FakeSender)(nil) errAccept = errors.New("unexpectedly called Accept") errSendAppRequest = errors.New("unexpectedly called SendAppRequest") errSendAppResponse = errors.New("unexpectedly called SendAppResponse") + errSendAppError = errors.New("unexpectedly called SendAppError") errSendAppGossip = errors.New("unexpectedly called SendAppGossip") errSendAppGossipSpecific = errors.New("unexpectedly called SendAppGossipSpecific") ) // SenderTest is a test sender type SenderTest struct { - T *testing.T + T require.TestingT CantAccept, CantSendGetStateSummaryFrontier, CantSendStateSummaryFrontier, @@ -35,8 +38,9 @@ type SenderTest struct { CantSendGet, CantSendGetAncestors, CantSendPut, CantSendAncestors, CantSendPullQuery, CantSendPushQuery, CantSendChits, CantSendGossip, - CantSendAppRequest, CantSendAppResponse, CantSendAppGossip, CantSendAppGossipSpecific, - CantSendCrossChainAppRequest, CantSendCrossChainAppResponse bool + CantSendAppRequest, CantSendAppResponse, CantSendAppError, + CantSendAppGossip, CantSendAppGossipSpecific, + CantSendCrossChainAppRequest, CantSendCrossChainAppResponse, CantSendCrossChainAppError bool AcceptF func(*snow.ConsensusContext, ids.ID, []byte) error SendGetStateSummaryFrontierF func(context.Context, set.Set[ids.NodeID], uint32) @@ -44,23 +48,25 @@ type SenderTest struct { SendGetAcceptedStateSummaryF func(context.Context, set.Set[ids.NodeID], uint32, []uint64) SendAcceptedStateSummaryF func(context.Context, ids.NodeID, uint32, []ids.ID) SendGetAcceptedFrontierF func(context.Context, set.Set[ids.NodeID], uint32) - SendAcceptedFrontierF func(context.Context, ids.NodeID, uint32, []ids.ID) + SendAcceptedFrontierF func(context.Context, ids.NodeID, uint32, ids.ID) SendGetAcceptedF func(context.Context, set.Set[ids.NodeID], uint32, []ids.ID) SendAcceptedF func(context.Context, ids.NodeID, uint32, []ids.ID) SendGetF func(context.Context, ids.NodeID, uint32, ids.ID) SendGetAncestorsF func(context.Context, ids.NodeID, uint32, ids.ID) SendPutF func(context.Context, ids.NodeID, uint32, []byte) SendAncestorsF func(context.Context, ids.NodeID, uint32, [][]byte) - SendPushQueryF func(context.Context, set.Set[ids.NodeID], uint32, []byte) - SendPullQueryF func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) - SendChitsF func(context.Context, ids.NodeID, uint32, []ids.ID, []ids.ID) + SendPushQueryF func(context.Context, set.Set[ids.NodeID], uint32, []byte, uint64) + SendPullQueryF func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) + SendChitsF func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) SendGossipF func(context.Context, []byte) SendAppRequestF func(context.Context, set.Set[ids.NodeID], uint32, []byte) error SendAppResponseF func(context.Context, ids.NodeID, uint32, []byte) error + SendAppErrorF func(context.Context, ids.NodeID, uint32, int32, string) error SendAppGossipF func(context.Context, []byte) error SendAppGossipSpecificF func(context.Context, set.Set[ids.NodeID], []byte) error SendCrossChainAppRequestF func(context.Context, ids.ID, uint32, []byte) SendCrossChainAppResponseF func(context.Context, ids.ID, uint32, []byte) + SendCrossChainAppErrorF func(context.Context, ids.ID, uint32, int32, string) } // Default set the default callable value to [cant] @@ -90,9 +96,9 @@ func (s *SenderTest) Default(cant bool) { s.CantSendCrossChainAppResponse = cant } -// SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was initialized. If it -// wasn't initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. +// Accept calls AcceptF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { if s.AcceptF != nil { return s.AcceptF(ctx, containerID, container) @@ -101,52 +107,52 @@ func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, cont return nil } if s.T != nil { - s.T.Fatal(errAccept) + require.FailNow(s.T, errAccept.Error()) } return errAccept } -// SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was initialized. If it -// wasn't initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. +// SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendGetStateSummaryFrontier(ctx context.Context, validatorIDs set.Set[ids.NodeID], requestID uint32) { if s.SendGetStateSummaryFrontierF != nil { s.SendGetStateSummaryFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetStateSummaryFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetStateSummaryFrontier") + require.FailNow(s.T, "Unexpectedly called SendGetStateSummaryFrontier") } } -// SendAcceptedFrontier calls SendAcceptedFrontierF if it was initialized. If it -// wasn't initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. +// SendStateSummaryFrontier calls SendStateSummaryFrontierF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendStateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, summary []byte) { if s.SendStateSummaryFrontierF != nil { s.SendStateSummaryFrontierF(ctx, validatorID, requestID, summary) } else if s.CantSendStateSummaryFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendStateSummaryFrontier") + require.FailNow(s.T, "Unexpectedly called SendStateSummaryFrontier") } } -// SendGetAcceptedStateSummary calls SendGetAcceptedStateSummaryF if it was initialized. If it wasn't -// initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. +// SendGetAcceptedStateSummary calls SendGetAcceptedStateSummaryF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { if s.SendGetAcceptedStateSummaryF != nil { s.SendGetAcceptedStateSummaryF(ctx, nodeIDs, requestID, heights) } else if s.CantSendGetAcceptedStateSummary && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAcceptedStateSummaryF") + require.FailNow(s.T, "Unexpectedly called SendGetAcceptedStateSummaryF") } } -// SendAcceptedStateSummary calls SendAcceptedStateSummaryF if it was initialized. If it wasn't -// initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. +// SendAcceptedStateSummary calls SendAcceptedStateSummaryF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { if s.SendAcceptedStateSummaryF != nil { s.SendAcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } else if s.CantSendAcceptedStateSummary && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAcceptedStateSummary") + require.FailNow(s.T, "Unexpectedly called SendAcceptedStateSummary") } } @@ -157,18 +163,18 @@ func (s *SenderTest) SendGetAcceptedFrontier(ctx context.Context, validatorIDs s if s.SendGetAcceptedFrontierF != nil { s.SendGetAcceptedFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetAcceptedFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAcceptedFrontier") + require.FailNow(s.T, "Unexpectedly called SendGetAcceptedFrontier") } } // SendAcceptedFrontier calls SendAcceptedFrontierF if it was initialized. If it // wasn't initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (s *SenderTest) SendAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) { if s.SendAcceptedFrontierF != nil { - s.SendAcceptedFrontierF(ctx, validatorID, requestID, containerIDs) + s.SendAcceptedFrontierF(ctx, validatorID, requestID, containerID) } else if s.CantSendAcceptedFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAcceptedFrontier") + require.FailNow(s.T, "Unexpectedly called SendAcceptedFrontier") } } @@ -179,7 +185,7 @@ func (s *SenderTest) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.No if s.SendGetAcceptedF != nil { s.SendGetAcceptedF(ctx, nodeIDs, requestID, containerIDs) } else if s.CantSendGetAccepted && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAccepted") + require.FailNow(s.T, "Unexpectedly called SendGetAccepted") } } @@ -190,84 +196,84 @@ func (s *SenderTest) SendAccepted(ctx context.Context, validatorID ids.NodeID, r if s.SendAcceptedF != nil { s.SendAcceptedF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAccepted && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAccepted") + require.FailNow(s.T, "Unexpectedly called SendAccepted") } } // SendGet calls SendGetF if it was initialized. If it wasn't initialized and // this function shouldn't be called and testing was initialized, then testing // will fail. -func (s *SenderTest) SendGet(ctx context.Context, vdr ids.NodeID, requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendGet(ctx context.Context, vdr ids.NodeID, requestID uint32, containerID ids.ID) { if s.SendGetF != nil { - s.SendGetF(ctx, vdr, requestID, vtxID) + s.SendGetF(ctx, vdr, requestID, containerID) } else if s.CantSendGet && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGet") + require.FailNow(s.T, "Unexpectedly called SendGet") } } // SendGetAncestors calls SendGetAncestorsF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendGetAncestors(ctx context.Context, validatorID ids.NodeID, requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendGetAncestors(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) { if s.SendGetAncestorsF != nil { - s.SendGetAncestorsF(ctx, validatorID, requestID, vtxID) + s.SendGetAncestorsF(ctx, validatorID, requestID, containerID) } else if s.CantSendGetAncestors && s.T != nil { - s.T.Fatalf("Unexpectedly called SendCantSendGetAncestors") + require.FailNow(s.T, "Unexpectedly called SendCantSendGetAncestors") } } // SendPut calls SendPutF if it was initialized. If it wasn't initialized and // this function shouldn't be called and testing was initialized, then testing // will fail. -func (s *SenderTest) SendPut(ctx context.Context, vdr ids.NodeID, requestID uint32, vtx []byte) { +func (s *SenderTest) SendPut(ctx context.Context, vdr ids.NodeID, requestID uint32, container []byte) { if s.SendPutF != nil { - s.SendPutF(ctx, vdr, requestID, vtx) + s.SendPutF(ctx, vdr, requestID, container) } else if s.CantSendPut && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPut") + require.FailNow(s.T, "Unexpectedly called SendPut") } } // SendAncestors calls SendAncestorsF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAncestors(ctx context.Context, vdr ids.NodeID, requestID uint32, vtxs [][]byte) { +func (s *SenderTest) SendAncestors(ctx context.Context, vdr ids.NodeID, requestID uint32, containers [][]byte) { if s.SendAncestorsF != nil { - s.SendAncestorsF(ctx, vdr, requestID, vtxs) + s.SendAncestorsF(ctx, vdr, requestID, containers) } else if s.CantSendAncestors && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAncestors") + require.FailNow(s.T, "Unexpectedly called SendAncestors") } } // SendPushQuery calls SendPushQueryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendPushQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { +func (s *SenderTest) SendPushQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, container []byte, requestedHeight uint64) { if s.SendPushQueryF != nil { - s.SendPushQueryF(ctx, vdrs, requestID, vtx) + s.SendPushQueryF(ctx, vdrs, requestID, container, requestedHeight) } else if s.CantSendPushQuery && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPushQuery") + require.FailNow(s.T, "Unexpectedly called SendPushQuery") } } // SendPullQuery calls SendPullQueryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendPullQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendPullQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, containerID ids.ID, requestedHeight uint64) { if s.SendPullQueryF != nil { - s.SendPullQueryF(ctx, vdrs, requestID, vtxID) + s.SendPullQueryF(ctx, vdrs, requestID, containerID, requestedHeight) } else if s.CantSendPullQuery && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPullQuery") + require.FailNow(s.T, "Unexpectedly called SendPullQuery") } } // SendChits calls SendChitsF if it was initialized. If it wasn't initialized // and this function shouldn't be called and testing was initialized, then // testing will fail. -func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { +func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) { if s.SendChitsF != nil { - s.SendChitsF(ctx, vdr, requestID, votes, accepted) + s.SendChitsF(ctx, vdr, requestID, preferredID, preferredIDAtHeight, acceptedID) } else if s.CantSendChits && s.T != nil { - s.T.Fatalf("Unexpectedly called SendChits") + require.FailNow(s.T, "Unexpectedly called SendChits") } } @@ -278,24 +284,42 @@ func (s *SenderTest) SendGossip(ctx context.Context, container []byte) { if s.SendGossipF != nil { s.SendGossipF(ctx, container) } else if s.CantSendGossip && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGossip") + require.FailNow(s.T, "Unexpectedly called SendGossip") } } +// SendCrossChainAppRequest calls SendCrossChainAppRequestF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { if s.SendCrossChainAppRequestF != nil { s.SendCrossChainAppRequestF(ctx, chainID, requestID, appRequestBytes) } else if s.CantSendCrossChainAppRequest && s.T != nil { - s.T.Fatal("Unexpectedly called SendCrossChainAppRequest") + require.FailNow(s.T, "Unexpectedly called SendCrossChainAppRequest") } return nil } +// SendCrossChainAppResponse calls SendCrossChainAppResponseF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. func (s *SenderTest) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { if s.SendCrossChainAppResponseF != nil { s.SendCrossChainAppResponseF(ctx, chainID, requestID, appResponseBytes) } else if s.CantSendCrossChainAppResponse && s.T != nil { - s.T.Fatal("Unexpectedly called SendCrossChainAppResponse") + require.FailNow(s.T, "Unexpectedly called SendCrossChainAppResponse") + } + return nil +} + +// SendCrossChainAppError calls SendCrossChainAppErrorF if it was +// initialized. If it wasn't initialized and this function shouldn't be called +// and testing was initialized, then testing will fail. +func (s *SenderTest) SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error { + if s.SendCrossChainAppErrorF != nil { + s.SendCrossChainAppErrorF(ctx, chainID, requestID, errorCode, errorMessage) + } else if s.CantSendCrossChainAppError && s.T != nil { + require.FailNow(s.T, "Unexpectedly called SendCrossChainAppError") } return nil } @@ -308,7 +332,7 @@ func (s *SenderTest) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.Nod case s.SendAppRequestF != nil: return s.SendAppRequestF(ctx, nodeIDs, requestID, appRequestBytes) case s.CantSendAppRequest && s.T != nil: - s.T.Fatal(errSendAppRequest) + require.FailNow(s.T, errSendAppRequest.Error()) } return errSendAppRequest } @@ -321,11 +345,24 @@ func (s *SenderTest) SendAppResponse(ctx context.Context, nodeID ids.NodeID, req case s.SendAppResponseF != nil: return s.SendAppResponseF(ctx, nodeID, requestID, appResponseBytes) case s.CantSendAppResponse && s.T != nil: - s.T.Fatal(errSendAppResponse) + require.FailNow(s.T, errSendAppResponse.Error()) } return errSendAppResponse } +// SendAppError calls SendAppErrorF if it was initialized. If it wasn't +// initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *SenderTest) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, code int32, message string) error { + switch { + case s.SendAppErrorF != nil: + return s.SendAppErrorF(ctx, nodeID, requestID, code, message) + case s.CantSendAppError && s.T != nil: + require.FailNow(s.T, errSendAppError.Error()) + } + return errSendAppError +} + // SendAppGossip calls SendAppGossipF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. @@ -334,7 +371,7 @@ func (s *SenderTest) SendAppGossip(ctx context.Context, appGossipBytes []byte) e case s.SendAppGossipF != nil: return s.SendAppGossipF(ctx, appGossipBytes) case s.CantSendAppGossip && s.T != nil: - s.T.Fatal(errSendAppGossip) + require.FailNow(s.T, errSendAppGossip.Error()) } return errSendAppGossip } @@ -347,7 +384,94 @@ func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ case s.SendAppGossipSpecificF != nil: return s.SendAppGossipSpecificF(ctx, nodeIDs, appGossipBytes) case s.CantSendAppGossipSpecific && s.T != nil: - s.T.Fatal(errSendAppGossipSpecific) + require.FailNow(s.T, errSendAppGossipSpecific.Error()) } return errSendAppGossipSpecific } + +// FakeSender is used for testing +type FakeSender struct { + SentAppRequest, SentAppResponse, + SentAppGossip, SentAppGossipSpecific, + SentCrossChainAppRequest, SentCrossChainAppResponse chan []byte + + SentAppError, SentCrossChainAppError chan *AppError +} + +func (f FakeSender) SendAppRequest(_ context.Context, _ set.Set[ids.NodeID], _ uint32, bytes []byte) error { + if f.SentAppRequest == nil { + return nil + } + + f.SentAppRequest <- bytes + return nil +} + +func (f FakeSender) SendAppResponse(_ context.Context, _ ids.NodeID, _ uint32, bytes []byte) error { + if f.SentAppResponse == nil { + return nil + } + + f.SentAppResponse <- bytes + return nil +} + +func (f FakeSender) SendAppError(_ context.Context, _ ids.NodeID, _ uint32, errorCode int32, errorMessage string) error { + if f.SentAppError == nil { + return nil + } + + f.SentAppError <- &AppError{ + Code: errorCode, + Message: errorMessage, + } + return nil +} + +func (f FakeSender) SendAppGossip(_ context.Context, bytes []byte) error { + if f.SentAppGossip == nil { + return nil + } + + f.SentAppGossip <- bytes + return nil +} + +func (f FakeSender) SendAppGossipSpecific(_ context.Context, _ set.Set[ids.NodeID], bytes []byte) error { + if f.SentAppGossipSpecific == nil { + return nil + } + + f.SentAppGossipSpecific <- bytes + return nil +} + +func (f FakeSender) SendCrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, bytes []byte) error { + if f.SentCrossChainAppRequest == nil { + return nil + } + + f.SentCrossChainAppRequest <- bytes + return nil +} + +func (f FakeSender) SendCrossChainAppResponse(_ context.Context, _ ids.ID, _ uint32, bytes []byte) error { + if f.SentCrossChainAppResponse == nil { + return nil + } + + f.SentCrossChainAppResponse <- bytes + return nil +} + +func (f FakeSender) SendCrossChainAppError(_ context.Context, _ ids.ID, _ uint32, errorCode int32, errorMessage string) error { + if f.SentCrossChainAppError == nil { + return nil + } + + f.SentCrossChainAppError <- &AppError{ + Code: errorCode, + Message: errorMessage, + } + return nil +} diff --git a/avalanchego/snow/engine/common/test_timer.go b/avalanchego/snow/engine/common/test_timer.go index a563e65c..6da0d925 100644 --- a/avalanchego/snow/engine/common/test_timer.go +++ b/avalanchego/snow/engine/common/test_timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,6 +6,8 @@ package common import ( "testing" "time" + + "github.com/stretchr/testify/require" ) var _ Timer = (*TimerTest)(nil) @@ -28,6 +30,6 @@ func (t *TimerTest) RegisterTimeout(delay time.Duration) { if t.RegisterTimeoutF != nil { t.RegisterTimeoutF(delay) } else if t.CantRegisterTimout && t.T != nil { - t.T.Fatalf("Unexpectedly called RegisterTimeout") + require.FailNow(t.T, "Unexpectedly called RegisterTimeout") } } diff --git a/avalanchego/snow/engine/common/test_vm.go b/avalanchego/snow/engine/common/test_vm.go index bbf10d4d..828b49f5 100644 --- a/avalanchego/snow/engine/common/test_vm.go +++ b/avalanchego/snow/engine/common/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,14 +6,16 @@ package common import ( "context" "errors" + "net/http" "testing" "time" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/version" + "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -21,7 +23,6 @@ var ( errSetState = errors.New("unexpectedly called SetState") errShutdown = errors.New("unexpectedly called Shutdown") errCreateHandlers = errors.New("unexpectedly called CreateHandlers") - errCreateStaticHandlers = errors.New("unexpectedly called CreateStaticHandlers") errHealthCheck = errors.New("unexpectedly called HealthCheck") errConnected = errors.New("unexpectedly called Connected") errDisconnected = errors.New("unexpectedly called Disconnected") @@ -42,27 +43,26 @@ type TestVM struct { T *testing.T CantInitialize, CantSetState, - CantShutdown, CantCreateHandlers, CantCreateStaticHandlers, + CantShutdown, CantCreateHandlers, CantHealthCheck, CantConnected, CantDisconnected, CantVersion, CantAppRequest, CantAppResponse, CantAppGossip, CantAppRequestFailed, CantCrossChainAppRequest, CantCrossChainAppResponse, CantCrossChainAppRequestFailed bool - InitializeF func(ctx context.Context, chainCtx *snow.Context, db manager.Manager, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error + InitializeF func(ctx context.Context, chainCtx *snow.Context, db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error SetStateF func(ctx context.Context, state snow.State) error ShutdownF func(context.Context) error - CreateHandlersF func(context.Context) (map[string]*HTTPHandler, error) - CreateStaticHandlersF func(context.Context) (map[string]*HTTPHandler, error) + CreateHandlersF func(context.Context) (map[string]http.Handler, error) ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error HealthCheckF func(context.Context) (interface{}, error) AppRequestF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, msg []byte) error AppResponseF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error - AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error VersionF func(context.Context) (string, error) CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error - CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error } func (vm *TestVM) Default(cant bool) { @@ -70,7 +70,6 @@ func (vm *TestVM) Default(cant bool) { vm.CantSetState = cant vm.CantShutdown = cant vm.CantCreateHandlers = cant - vm.CantCreateStaticHandlers = cant vm.CantHealthCheck = cant vm.CantAppRequest = cant vm.CantAppRequestFailed = cant @@ -87,7 +86,7 @@ func (vm *TestVM) Default(cant bool) { func (vm *TestVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, @@ -109,7 +108,7 @@ func (vm *TestVM) Initialize( ) } if vm.CantInitialize && vm.T != nil { - vm.T.Fatal(errInitialize) + require.FailNow(vm.T, errInitialize.Error()) } return errInitialize } @@ -120,7 +119,7 @@ func (vm *TestVM) SetState(ctx context.Context, state snow.State) error { } if vm.CantSetState { if vm.T != nil { - vm.T.Fatal(errSetState) + require.FailNow(vm.T, errSetState.Error()) } return errSetState } @@ -133,29 +132,19 @@ func (vm *TestVM) Shutdown(ctx context.Context) error { } if vm.CantShutdown { if vm.T != nil { - vm.T.Fatal(errShutdown) + require.FailNow(vm.T, errShutdown.Error()) } return errShutdown } return nil } -func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]*HTTPHandler, error) { +func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]http.Handler, error) { if vm.CreateHandlersF != nil { return vm.CreateHandlersF(ctx) } if vm.CantCreateHandlers && vm.T != nil { - vm.T.Fatal(errCreateHandlers) - } - return nil, nil -} - -func (vm *TestVM) CreateStaticHandlers(ctx context.Context) (map[string]*HTTPHandler, error) { - if vm.CreateStaticHandlersF != nil { - return vm.CreateStaticHandlersF(ctx) - } - if vm.CantCreateStaticHandlers && vm.T != nil { - vm.T.Fatal(errCreateStaticHandlers) + require.FailNow(vm.T, errCreateHandlers.Error()) } return nil, nil } @@ -165,7 +154,7 @@ func (vm *TestVM) HealthCheck(ctx context.Context) (interface{}, error) { return vm.HealthCheckF(ctx) } if vm.CantHealthCheck && vm.T != nil { - vm.T.Fatal(errHealthCheck) + require.FailNow(vm.T, errHealthCheck.Error()) } return nil, errHealthCheck } @@ -178,20 +167,20 @@ func (vm *TestVM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u return nil } if vm.T != nil { - vm.T.Fatal(errAppRequest) + require.FailNow(vm.T, errAppRequest.Error()) } return errAppRequest } -func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { if vm.AppRequestFailedF != nil { - return vm.AppRequestFailedF(ctx, nodeID, requestID) + return vm.AppRequestFailedF(ctx, nodeID, requestID, appErr) } if !vm.CantAppRequestFailed { return nil } if vm.T != nil { - vm.T.Fatal(errAppRequestFailed) + require.FailNow(vm.T, errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -204,7 +193,7 @@ func (vm *TestVM) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID return nil } if vm.T != nil { - vm.T.Fatal(errAppResponse) + require.FailNow(vm.T, errAppResponse.Error()) } return errAppResponse } @@ -217,7 +206,7 @@ func (vm *TestVM) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) return nil } if vm.T != nil { - vm.T.Fatal(errAppGossip) + require.FailNow(vm.T, errAppGossip.Error()) } return errAppGossip } @@ -230,20 +219,20 @@ func (vm *TestVM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requ return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppRequest) + require.FailNow(vm.T, errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } -func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { if vm.CrossChainAppRequestFailedF != nil { - return vm.CrossChainAppRequestFailedF(ctx, chainID, requestID) + return vm.CrossChainAppRequestFailedF(ctx, chainID, requestID, appErr) } if !vm.CantCrossChainAppRequestFailed { return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppRequestFailed) + require.FailNow(vm.T, errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -256,7 +245,7 @@ func (vm *TestVM) CrossChainAppResponse(ctx context.Context, chainID ids.ID, req return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppResponse) + require.FailNow(vm.T, errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -266,7 +255,7 @@ func (vm *TestVM) Connected(ctx context.Context, id ids.NodeID, nodeVersion *ver return vm.ConnectedF(ctx, id, nodeVersion) } if vm.CantConnected && vm.T != nil { - vm.T.Fatal(errConnected) + require.FailNow(vm.T, errConnected.Error()) } return nil } @@ -276,7 +265,7 @@ func (vm *TestVM) Disconnected(ctx context.Context, id ids.NodeID) error { return vm.DisconnectedF(ctx, id) } if vm.CantDisconnected && vm.T != nil { - vm.T.Fatal(errDisconnected) + require.FailNow(vm.T, errDisconnected.Error()) } return nil } @@ -286,7 +275,7 @@ func (vm *TestVM) Version(ctx context.Context) (string, error) { return vm.VersionF(ctx) } if vm.CantVersion && vm.T != nil { - vm.T.Fatal(errVersion) + require.FailNow(vm.T, errVersion.Error()) } return "", nil } diff --git a/avalanchego/snow/engine/common/timer.go b/avalanchego/snow/engine/common/timer.go index 56d98a05..432bb917 100644 --- a/avalanchego/snow/engine/common/timer.go +++ b/avalanchego/snow/engine/common/timer.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import ( - "time" -) +import "time" // Timer describes the standard interface for specifying a timeout type Timer interface { diff --git a/avalanchego/snow/engine/common/traced_bootstrapable_engine.go b/avalanchego/snow/engine/common/traced_bootstrapable_engine.go index c2379799..4c64206a 100644 --- a/avalanchego/snow/engine/common/traced_bootstrapable_engine.go +++ b/avalanchego/snow/engine/common/traced_bootstrapable_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -6,11 +6,6 @@ package common import ( "context" - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" ) @@ -29,15 +24,9 @@ func TraceBootstrapableEngine(bootstrapableEngine BootstrapableEngine, tracer tr } } -func (e *tracedBootstrapableEngine) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { - ctx, span := e.tracer.Start(ctx, "tracedBootstrapableEngine.ForceAccepted", oteltrace.WithAttributes( - attribute.Int("numAcceptedContainerIDs", len(acceptedContainerIDs)), - )) +func (e *tracedBootstrapableEngine) Clear(ctx context.Context) error { + ctx, span := e.tracer.Start(ctx, "tracedBootstrapableEngine.Clear") defer span.End() - return e.bootstrapableEngine.ForceAccepted(ctx, acceptedContainerIDs) -} - -func (e *tracedBootstrapableEngine) Clear() error { - return e.bootstrapableEngine.Clear() + return e.bootstrapableEngine.Clear(ctx) } diff --git a/avalanchego/snow/engine/common/traced_engine.go b/avalanchego/snow/engine/common/traced_engine.go index f2e723a4..f4fd4943 100644 --- a/avalanchego/snow/engine/common/traced_engine.go +++ b/avalanchego/snow/engine/common/traced_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -9,12 +9,13 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ Engine = (*tracedEngine)(nil) @@ -62,22 +63,22 @@ func (e *tracedEngine) GetStateSummaryFrontierFailed(ctx context.Context, nodeID return e.engine.GetStateSummaryFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numHeights", len(heights)), + attribute.Int("numHeights", heights.Len()), )) defer span.End() return e.engine.GetAcceptedStateSummary(ctx, nodeID, requestID, heights) } -func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedStateSummary", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numSummaryIDs", len(summaryIDs)), + attribute.Int("numSummaryIDs", summaryIDs.Len()), )) defer span.End() @@ -104,15 +105,15 @@ func (e *tracedEngine) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeI return e.engine.GetAcceptedFrontier(ctx, nodeID, requestID) } -func (e *tracedEngine) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedFrontier", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Stringer("containerID", containerID), )) defer span.End() - return e.engine.AcceptedFrontier(ctx, nodeID, requestID, containerIDs) + return e.engine.AcceptedFrontier(ctx, nodeID, requestID, containerID) } func (e *tracedEngine) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { @@ -125,22 +126,22 @@ func (e *tracedEngine) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids return e.engine.GetAcceptedFrontierFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAccepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() return e.engine.GetAccepted(ctx, nodeID, requestID, containerIDs) } -func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.Accepted", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Int("numContainerIDs", containerIDs.Len()), )) defer span.End() @@ -221,38 +222,41 @@ func (e *tracedEngine) GetFailed(ctx context.Context, nodeID ids.NodeID, request return e.engine.GetFailed(ctx, nodeID, requestID) } -func (e *tracedEngine) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { +func (e *tracedEngine) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID, requestedHeight uint64) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.PullQuery", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), attribute.Stringer("containerID", containerID), + attribute.Int64("requestedHeight", int64(requestedHeight)), )) defer span.End() - return e.engine.PullQuery(ctx, nodeID, requestID, containerID) + return e.engine.PullQuery(ctx, nodeID, requestID, containerID, requestedHeight) } -func (e *tracedEngine) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { +func (e *tracedEngine) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte, requestedHeight uint64) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.PushQuery", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), attribute.Int("containerLen", len(container)), + attribute.Int64("requestedHeight", int64(requestedHeight)), )) defer span.End() - return e.engine.PushQuery(ctx, nodeID, requestID, container) + return e.engine.PushQuery(ctx, nodeID, requestID, container, requestedHeight) } -func (e *tracedEngine) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { +func (e *tracedEngine) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.Chits", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numPreferredIDs", len(preferredIDs)), - attribute.Int("numAcceptedIDs", len(acceptedIDs)), + attribute.Stringer("preferredID", preferredID), + attribute.Stringer("preferredIDAtHeight", preferredIDAtHeight), + attribute.Stringer("acceptedID", acceptedID), )) defer span.End() - return e.engine.Chits(ctx, nodeID, requestID, preferredIDs, acceptedIDs) + return e.engine.Chits(ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } func (e *tracedEngine) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { @@ -287,14 +291,14 @@ func (e *tracedEngine) AppResponse(ctx context.Context, nodeID ids.NodeID, reque return e.engine.AppResponse(ctx, nodeID, requestID, response) } -func (e *tracedEngine) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { +func (e *tracedEngine) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *AppError) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.AppRequestFailed", oteltrace.WithAttributes( attribute.Stringer("nodeID", nodeID), attribute.Int64("requestID", int64(requestID)), )) defer span.End() - return e.engine.AppRequestFailed(ctx, nodeID, requestID) + return e.engine.AppRequestFailed(ctx, nodeID, requestID, appErr) } func (e *tracedEngine) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { @@ -329,14 +333,14 @@ func (e *tracedEngine) CrossChainAppResponse(ctx context.Context, chainID ids.ID return e.engine.CrossChainAppResponse(ctx, chainID, requestID, response) } -func (e *tracedEngine) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { +func (e *tracedEngine) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *AppError) error { ctx, span := e.tracer.Start(ctx, "tracedEngine.CrossChainAppRequestFailed", oteltrace.WithAttributes( attribute.Stringer("chainID", chainID), attribute.Int64("requestID", int64(requestID)), )) defer span.End() - return e.engine.CrossChainAppRequestFailed(ctx, chainID, requestID) + return e.engine.CrossChainAppRequestFailed(ctx, chainID, requestID, appErr) } func (e *tracedEngine) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { @@ -414,7 +418,3 @@ func (e *tracedEngine) HealthCheck(ctx context.Context) (interface{}, error) { return e.engine.HealthCheck(ctx) } - -func (e *tracedEngine) GetVM() VM { - return e.engine.GetVM() -} diff --git a/avalanchego/snow/engine/common/traced_state_syncer.go b/avalanchego/snow/engine/common/traced_state_syncer.go index db2569ee..e598b609 100644 --- a/avalanchego/snow/engine/common/traced_state_syncer.go +++ b/avalanchego/snow/engine/common/traced_state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/tracker/accepted.go b/avalanchego/snow/engine/common/tracker/accepted.go index 4b3f0f1a..f6c63e3f 100644 --- a/avalanchego/snow/engine/common/tracker/accepted.go +++ b/avalanchego/snow/engine/common/tracker/accepted.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" ) var _ Accepted = (*accepted)(nil) @@ -16,24 +17,23 @@ var _ Accepted = (*accepted)(nil) type Accepted interface { validators.SetCallbackListener - // SetAcceptedFrontier updates the latest frontier for [nodeID] to - // [frontier]. If [nodeID] is not currently a validator, this is a noop. - SetAcceptedFrontier(nodeID ids.NodeID, frontier []ids.ID) - // AcceptedFrontier returns the latest known accepted frontier of [nodeID]. - // If [nodeID]'s last accepted frontier is unknown, an empty slice will be - // returned. - AcceptedFrontier(nodeID ids.NodeID) []ids.ID + // SetLastAccepted updates the latest accepted block for [nodeID] to + // [blockID]. If [nodeID] is not currently a validator, this is a noop. + SetLastAccepted(nodeID ids.NodeID, blockID ids.ID) + // LastAccepted returns the latest known accepted block of [nodeID]. If + // [nodeID]'s last accepted block was never unknown, false will be returned. + LastAccepted(nodeID ids.NodeID) (ids.ID, bool) } type accepted struct { - lock sync.RWMutex - // frontier contains an entry for all current validators - frontier map[ids.NodeID][]ids.ID + lock sync.RWMutex + validators set.Set[ids.NodeID] + frontier map[ids.NodeID]ids.ID } func NewAccepted() Accepted { return &accepted{ - frontier: make(map[ids.NodeID][]ids.ID), + frontier: make(map[ids.NodeID]ids.ID), } } @@ -41,30 +41,32 @@ func (a *accepted) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.I a.lock.Lock() defer a.lock.Unlock() - a.frontier[nodeID] = nil + a.validators.Add(nodeID) } func (a *accepted) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { a.lock.Lock() defer a.lock.Unlock() + a.validators.Remove(nodeID) delete(a.frontier, nodeID) } func (*accepted) OnValidatorWeightChanged(_ ids.NodeID, _, _ uint64) {} -func (a *accepted) SetAcceptedFrontier(nodeID ids.NodeID, frontier []ids.ID) { +func (a *accepted) SetLastAccepted(nodeID ids.NodeID, frontier ids.ID) { a.lock.Lock() defer a.lock.Unlock() - if _, ok := a.frontier[nodeID]; ok { + if a.validators.Contains(nodeID) { a.frontier[nodeID] = frontier } } -func (a *accepted) AcceptedFrontier(nodeID ids.NodeID) []ids.ID { +func (a *accepted) LastAccepted(nodeID ids.NodeID) (ids.ID, bool) { a.lock.RLock() defer a.lock.RUnlock() - return a.frontier[nodeID] + acceptedID, ok := a.frontier[nodeID] + return acceptedID, ok } diff --git a/avalanchego/snow/engine/common/tracker/accepted_test.go b/avalanchego/snow/engine/common/tracker/accepted_test.go index dbd5faf8..8ff489f5 100644 --- a/avalanchego/snow/engine/common/tracker/accepted_test.go +++ b/avalanchego/snow/engine/common/tracker/accepted_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -15,27 +15,35 @@ func TestAccepted(t *testing.T) { require := require.New(t) nodeID := ids.GenerateTestNodeID() - frontier0 := []ids.ID{ids.GenerateTestID()} - frontier1 := []ids.ID{ids.GenerateTestID()} + blkID0 := ids.GenerateTestID() + blkID1 := ids.GenerateTestID() a := NewAccepted() - require.Empty(a.AcceptedFrontier(nodeID)) + _, ok := a.LastAccepted(nodeID) + require.False(ok) - a.SetAcceptedFrontier(nodeID, frontier0) - require.Empty(a.AcceptedFrontier(nodeID)) + a.SetLastAccepted(nodeID, blkID0) + _, ok = a.LastAccepted(nodeID) + require.False(ok) a.OnValidatorAdded(nodeID, nil, ids.GenerateTestID(), 1) - require.Empty(a.AcceptedFrontier(nodeID)) + _, ok = a.LastAccepted(nodeID) + require.False(ok) - a.SetAcceptedFrontier(nodeID, frontier0) - require.Equal(frontier0, a.AcceptedFrontier(nodeID)) + a.SetLastAccepted(nodeID, blkID0) + blkID, ok := a.LastAccepted(nodeID) + require.True(ok) + require.Equal(blkID0, blkID) - a.SetAcceptedFrontier(nodeID, frontier1) - require.Equal(frontier1, a.AcceptedFrontier(nodeID)) + a.SetLastAccepted(nodeID, blkID1) + blkID, ok = a.LastAccepted(nodeID) + require.True(ok) + require.Equal(blkID1, blkID) a.OnValidatorRemoved(nodeID, 1) - require.Empty(a.AcceptedFrontier(nodeID)) + _, ok = a.LastAccepted(nodeID) + require.False(ok) } diff --git a/avalanchego/snow/engine/common/tracker/peers.go b/avalanchego/snow/engine/common/tracker/peers.go index 47ba2827..fdf07061 100644 --- a/avalanchego/snow/engine/common/tracker/peers.go +++ b/avalanchego/snow/engine/common/tracker/peers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -7,14 +7,21 @@ import ( "context" "sync" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) -var _ Peers = (*peers)(nil) +var ( + _ Peers = (*lockedPeers)(nil) + _ Peers = (*meteredPeers)(nil) + _ Peers = (*peerData)(nil) +) type Peers interface { validators.SetCallbackListener @@ -22,16 +29,180 @@ type Peers interface { // ConnectedWeight returns the currently connected stake weight ConnectedWeight() uint64 + // ConnectedPercent returns the currently connected stake percentage [0, 1] + ConnectedPercent() float64 + // TotalWeight returns the total validator weight + TotalWeight() uint64 + // SampleValidator returns a randomly selected connected validator. If there + // are no currently connected validators then it will return false. + SampleValidator() (ids.NodeID, bool) // PreferredPeers returns the currently connected validators. If there are // no currently connected validators then it will return the currently // connected peers. PreferredPeers() set.Set[ids.NodeID] } -type peers struct { - lock sync.RWMutex +type lockedPeers struct { + lock sync.RWMutex + peers Peers +} + +func NewPeers() Peers { + return &lockedPeers{ + peers: &peerData{ + validators: make(map[ids.NodeID]uint64), + }, + } +} + +func (p *lockedPeers) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + p.lock.Lock() + defer p.lock.Unlock() + + p.peers.OnValidatorAdded(nodeID, pk, txID, weight) +} + +func (p *lockedPeers) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { + p.lock.Lock() + defer p.lock.Unlock() + + p.peers.OnValidatorRemoved(nodeID, weight) +} + +func (p *lockedPeers) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { + p.lock.Lock() + defer p.lock.Unlock() + + p.peers.OnValidatorWeightChanged(nodeID, oldWeight, newWeight) +} + +func (p *lockedPeers) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + p.lock.Lock() + defer p.lock.Unlock() + + return p.peers.Connected(ctx, nodeID, version) +} + +func (p *lockedPeers) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + p.lock.Lock() + defer p.lock.Unlock() + + return p.peers.Disconnected(ctx, nodeID) +} + +func (p *lockedPeers) ConnectedWeight() uint64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.ConnectedWeight() +} + +func (p *lockedPeers) ConnectedPercent() float64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.ConnectedPercent() +} + +func (p *lockedPeers) TotalWeight() uint64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.TotalWeight() +} + +func (p *lockedPeers) SampleValidator() (ids.NodeID, bool) { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.SampleValidator() +} + +func (p *lockedPeers) PreferredPeers() set.Set[ids.NodeID] { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.PreferredPeers() +} + +type meteredPeers struct { + Peers + + percentConnected prometheus.Gauge + numValidators prometheus.Gauge + totalWeight prometheus.Gauge +} + +func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) { + percentConnected := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "percent_connected", + Help: "Percent of connected stake", + }) + totalWeight := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "total_weight", + Help: "Total stake", + }) + numValidators := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_validators", + Help: "Total number of validators", + }) + err := utils.Err( + reg.Register(percentConnected), + reg.Register(totalWeight), + reg.Register(numValidators), + ) + return &lockedPeers{ + peers: &meteredPeers{ + Peers: &peerData{ + validators: make(map[ids.NodeID]uint64), + }, + percentConnected: percentConnected, + totalWeight: totalWeight, + numValidators: numValidators, + }, + }, err +} + +func (p *meteredPeers) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + p.Peers.OnValidatorAdded(nodeID, pk, txID, weight) + p.numValidators.Inc() + p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.percentConnected.Set(p.Peers.ConnectedPercent()) +} + +func (p *meteredPeers) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { + p.Peers.OnValidatorRemoved(nodeID, weight) + p.numValidators.Dec() + p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.percentConnected.Set(p.Peers.ConnectedPercent()) +} + +func (p *meteredPeers) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { + p.Peers.OnValidatorWeightChanged(nodeID, oldWeight, newWeight) + p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.percentConnected.Set(p.Peers.ConnectedPercent()) +} + +func (p *meteredPeers) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + err := p.Peers.Connected(ctx, nodeID, version) + p.percentConnected.Set(p.Peers.ConnectedPercent()) + return err +} + +func (p *meteredPeers) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + err := p.Peers.Disconnected(ctx, nodeID) + p.percentConnected.Set(p.Peers.ConnectedPercent()) + return err +} + +type peerData struct { // validators maps nodeIDs to their current stake weight validators map[ids.NodeID]uint64 + // totalWeight is the total weight of all validators + totalWeight uint64 // connectedWeight contains the sum of all connected validator weights connectedWeight uint64 // connectedValidators is the set of currently connected peers with a @@ -41,49 +212,35 @@ type peers struct { connectedPeers set.Set[ids.NodeID] } -func NewPeers() Peers { - return &peers{ - validators: make(map[ids.NodeID]uint64), - } -} - -func (p *peers) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, weight uint64) { - p.lock.Lock() - defer p.lock.Unlock() - +func (p *peerData) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, weight uint64) { p.validators[nodeID] = weight + p.totalWeight += weight if p.connectedPeers.Contains(nodeID) { p.connectedWeight += weight p.connectedValidators.Add(nodeID) } } -func (p *peers) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { - p.lock.Lock() - defer p.lock.Unlock() - +func (p *peerData) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { delete(p.validators, nodeID) + p.totalWeight -= weight if p.connectedPeers.Contains(nodeID) { p.connectedWeight -= weight p.connectedValidators.Remove(nodeID) } } -func (p *peers) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { - p.lock.Lock() - defer p.lock.Unlock() - +func (p *peerData) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { p.validators[nodeID] = newWeight + p.totalWeight -= oldWeight + p.totalWeight += newWeight if p.connectedPeers.Contains(nodeID) { p.connectedWeight -= oldWeight p.connectedWeight += newWeight } } -func (p *peers) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { - p.lock.Lock() - defer p.lock.Unlock() - +func (p *peerData) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { if weight, ok := p.validators[nodeID]; ok { p.connectedWeight += weight p.connectedValidators.Add(nodeID) @@ -92,10 +249,7 @@ func (p *peers) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Appli return nil } -func (p *peers) Disconnected(_ context.Context, nodeID ids.NodeID) error { - p.lock.Lock() - defer p.lock.Unlock() - +func (p *peerData) Disconnected(_ context.Context, nodeID ids.NodeID) error { if weight, ok := p.validators[nodeID]; ok { p.connectedWeight -= weight p.connectedValidators.Remove(nodeID) @@ -104,17 +258,26 @@ func (p *peers) Disconnected(_ context.Context, nodeID ids.NodeID) error { return nil } -func (p *peers) ConnectedWeight() uint64 { - p.lock.RLock() - defer p.lock.RUnlock() - +func (p *peerData) ConnectedWeight() uint64 { return p.connectedWeight } -func (p *peers) PreferredPeers() set.Set[ids.NodeID] { - p.lock.RLock() - defer p.lock.RUnlock() +func (p *peerData) ConnectedPercent() float64 { + if p.totalWeight == 0 { + return 1 + } + return float64(p.connectedWeight) / float64(p.totalWeight) +} + +func (p *peerData) TotalWeight() uint64 { + return p.totalWeight +} + +func (p *peerData) SampleValidator() (ids.NodeID, bool) { + return p.connectedValidators.Peek() +} +func (p *peerData) PreferredPeers() set.Set[ids.NodeID] { if p.connectedValidators.Len() == 0 { connectedPeers := set.NewSet[ids.NodeID](p.connectedPeers.Len()) connectedPeers.Union(p.connectedPeers) diff --git a/avalanchego/snow/engine/common/tracker/peers_test.go b/avalanchego/snow/engine/common/tracker/peers_test.go index 8c8b0821..b627b79a 100644 --- a/avalanchego/snow/engine/common/tracker/peers_test.go +++ b/avalanchego/snow/engine/common/tracker/peers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -20,32 +20,36 @@ func TestPeers(t *testing.T) { p := NewPeers() + require.Zero(p.TotalWeight()) require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Zero(p.ConnectedWeight()) + require.Equal(uint64(5), p.TotalWeight()) require.Empty(p.PreferredPeers()) - err := p.Connected(context.Background(), nodeID, version.CurrentApp) - require.NoError(err) - require.EqualValues(5, p.ConnectedWeight()) + require.NoError(p.Connected(context.Background(), nodeID, version.CurrentApp)) + require.Equal(uint64(5), p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorWeightChanged(nodeID, 5, 10) - require.EqualValues(10, p.ConnectedWeight()) + require.Equal(uint64(10), p.ConnectedWeight()) + require.Equal(uint64(10), p.TotalWeight()) require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorRemoved(nodeID, 10) require.Zero(p.ConnectedWeight()) + require.Zero(p.TotalWeight()) require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) - require.EqualValues(5, p.ConnectedWeight()) + require.Equal(uint64(5), p.ConnectedWeight()) + require.Equal(uint64(5), p.TotalWeight()) require.Contains(p.PreferredPeers(), nodeID) - err = p.Disconnected(context.Background(), nodeID) - require.NoError(err) + require.NoError(p.Disconnected(context.Background(), nodeID)) require.Zero(p.ConnectedWeight()) + require.Equal(uint64(5), p.TotalWeight()) require.Empty(p.PreferredPeers()) } diff --git a/avalanchego/snow/engine/common/tracker/startup.go b/avalanchego/snow/engine/common/tracker/startup.go index 282d88ce..c5e75613 100644 --- a/avalanchego/snow/engine/common/tracker/startup.go +++ b/avalanchego/snow/engine/common/tracker/startup.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/avalanchego/snow/engine/common/vm.go b/avalanchego/snow/engine/common/vm.go index 5fedd50f..65cbfb15 100644 --- a/avalanchego/snow/engine/common/vm.go +++ b/avalanchego/snow/engine/common/vm.go @@ -1,13 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( "context" + "net/http" "github.com/ava-labs/avalanchego/api/health" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -46,7 +47,7 @@ type VM interface { Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -64,25 +65,6 @@ type VM interface { // Version returns the version of the VM. Version(context.Context) (string, error) - // Creates the HTTP handlers for custom VM network calls. - // - // This exposes handlers that the outside world can use to communicate with - // a static reference to the VM. Each handler has the path: - // [Address of node]/ext/VM/[VM ID]/[extension] - // - // Returns a mapping from [extension]s to HTTP handlers. - // - // Each extension can specify how locking is managed for convenience. - // - // For example, it might make sense to have an extension for creating - // genesis bytes this VM can interpret. - // - // Note: If this method is called, no other method will be called on this VM. - // Each registered VM will have a single instance created to handle static - // APIs. This instance will be handled separately from instances created to - // service an instance of a chain. - CreateStaticHandlers(context.Context) (map[string]*HTTPHandler, error) - // Creates the HTTP handlers for custom chain network calls. // // This exposes handlers that the outside world can use to communicate with @@ -91,10 +73,8 @@ type VM interface { // // Returns a mapping from [extension]s to HTTP handlers. // - // Each extension can specify how locking is managed for convenience. - // // For example, if this VM implements an account-based payments system, // it have an extension called `accounts`, where clients could get // information about their accounts. - CreateHandlers(context.Context) (map[string]*HTTPHandler, error) + CreateHandlers(context.Context) (map[string]http.Handler, error) } diff --git a/avalanchego/snow/engine/snowman/ancestor/tree.go b/avalanchego/snow/engine/snowman/ancestor/tree.go new file mode 100644 index 00000000..9e0eb4e4 --- /dev/null +++ b/avalanchego/snow/engine/snowman/ancestor/tree.go @@ -0,0 +1,132 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ancestor + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Tree = (*tree)(nil) + +// Tree manages a (potentially partial) view of a tree. +// +// For example, assume this is the full tree: +// +// A +// / \ +// B D +// | | +// C E +// +// A partial view of this tree may be: +// +// A +// / +// B D +// | | +// C E +// +// Or: +// +// B D +// | | +// C E +// +// This structure is designed to update and traverse these partial views. +type Tree interface { + // Add a mapping from blkID to parentID. + // + // Invariant: blkID must not be equal to parentID + // Invariant: a given blkID must only ever have one parentID + Add(blkID ids.ID, parentID ids.ID) + + // Has returns if blkID's parentID is known by the tree. + Has(blkID ids.ID) bool + + // GetAncestor returns the oldest known ancestor of blkID. If there is no + // known parentID of blkID, blkID will be returned. + GetAncestor(blkID ids.ID) ids.ID + + // Remove the mapping from blkID to its parentID from the tree. + Remove(blkID ids.ID) + + // RemoveDescendants removes blkID from the tree along with all of its known + // descendants. + RemoveDescendants(blkID ids.ID) + + // Len returns the total number of blkID to parentID mappings that are + // currently tracked by the tree. + Len() int +} + +type tree struct { + childToParent map[ids.ID]ids.ID + parentToChildren map[ids.ID]set.Set[ids.ID] +} + +func NewTree() Tree { + return &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + } +} + +func (t *tree) Add(blkID ids.ID, parentID ids.ID) { + t.childToParent[blkID] = parentID + + children := t.parentToChildren[parentID] + children.Add(blkID) + t.parentToChildren[parentID] = children +} + +func (t *tree) Has(blkID ids.ID) bool { + _, ok := t.childToParent[blkID] + return ok +} + +func (t *tree) GetAncestor(blkID ids.ID) ids.ID { + for { + parentID, ok := t.childToParent[blkID] + // this is the furthest parent available, break loop and return blkID + if !ok { + return blkID + } + // continue to loop with parentID + blkID = parentID + } +} + +func (t *tree) Remove(blkID ids.ID) { + parent, ok := t.childToParent[blkID] + if !ok { + return + } + delete(t.childToParent, blkID) + // remove blkID from children + children := t.parentToChildren[parent] + children.Remove(blkID) + // this parent has no more children, remove it from map + if children.Len() == 0 { + delete(t.parentToChildren, parent) + } +} + +func (t *tree) RemoveDescendants(blkID ids.ID) { + childrenList := []ids.ID{blkID} + for len(childrenList) > 0 { + newChildrenSize := len(childrenList) - 1 + childID := childrenList[newChildrenSize] + childrenList = childrenList[:newChildrenSize] + t.Remove(childID) + // get children of child + for grandChildID := range t.parentToChildren[childID] { + childrenList = append(childrenList, grandChildID) + } + } +} + +func (t *tree) Len() int { + return len(t.childToParent) +} diff --git a/avalanchego/snow/engine/snowman/ancestor/tree_test.go b/avalanchego/snow/engine/snowman/ancestor/tree_test.go new file mode 100644 index 00000000..d17d38ce --- /dev/null +++ b/avalanchego/snow/engine/snowman/ancestor/tree_test.go @@ -0,0 +1,354 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ancestor + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + id1 = ids.GenerateTestID() + id2 = ids.GenerateTestID() + id3 = ids.GenerateTestID() + id4 = ids.GenerateTestID() +) + +func TestAdd(t *testing.T) { + tests := map[string]struct { + initial Tree + blkID ids.ID + parentID ids.ID + expected Tree + }{ + "add to empty tree": { + initial: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + blkID: id1, + parentID: id2, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + }, + "add new parent": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id3, + parentID: id4, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id3: id4, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + id4: set.Of(id3), + }, + }, + }, + "add new block to existing parent": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id3, + parentID: id2, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1, id3), + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + at := test.initial + at.Add(test.blkID, test.parentID) + require.Equal(t, test.expected, at) + }) + } +} + +func TestRemove(t *testing.T) { + tests := map[string]struct { + initial Tree + blkID ids.ID + expected Tree + }{ + "remove from empty tree": { + initial: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + blkID: id1, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + "remove block and parent from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id1, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + "remove block and not parent from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1, id3), + }, + }, + blkID: id1, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id3), + }, + }, + }, + "remove untracked block from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id2, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + at := test.initial + at.Remove(test.blkID) + require.Equal(t, test.expected, at) + }) + } +} + +func TestRemoveDescendants(t *testing.T) { + tests := map[string]struct { + initial Tree + blkID ids.ID + expected Tree + }{ + "remove from empty tree": { + initial: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + blkID: id1, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + "remove block and parent from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id1, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + "remove block and not parent from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1, id3), + }, + }, + blkID: id1, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id3), + }, + }, + }, + "remove untracked block from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + blkID: id3, + expected: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + }, + }, + }, + "remove children from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id3: id2, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1, id3), + }, + }, + blkID: id2, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + "remove grand child from tree": { + initial: &tree{ + childToParent: map[ids.ID]ids.ID{ + id1: id2, + id2: id3, + }, + parentToChildren: map[ids.ID]set.Set[ids.ID]{ + id2: set.Of(id1), + id3: set.Of(id2), + }, + }, + blkID: id3, + expected: &tree{ + childToParent: make(map[ids.ID]ids.ID), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), + }, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + at := test.initial + at.RemoveDescendants(test.blkID) + require.Equal(t, test.expected, at) + }) + } +} + +func TestHas(t *testing.T) { + require := require.New(t) + + at := NewTree() + require.False(at.Has(id1)) + require.False(at.Has(id2)) + require.False(at.Has(id3)) + + at.Add(id1, id2) + require.True(at.Has(id1)) + require.False(at.Has(id2)) + require.False(at.Has(id3)) + + at.Add(id2, id3) + require.True(at.Has(id1)) + require.True(at.Has(id2)) + require.False(at.Has(id3)) +} + +func TestGetAncestor(t *testing.T) { + require := require.New(t) + + at := NewTree() + require.Equal(id1, at.GetAncestor(id1)) + require.Equal(id2, at.GetAncestor(id2)) + require.Equal(id3, at.GetAncestor(id3)) + require.Equal(id4, at.GetAncestor(id4)) + + at.Add(id1, id2) + require.Equal(id2, at.GetAncestor(id1)) + require.Equal(id2, at.GetAncestor(id2)) + require.Equal(id3, at.GetAncestor(id3)) + require.Equal(id4, at.GetAncestor(id4)) + + at.Add(id2, id3) + require.Equal(id3, at.GetAncestor(id1)) + require.Equal(id3, at.GetAncestor(id2)) + require.Equal(id3, at.GetAncestor(id3)) + require.Equal(id4, at.GetAncestor(id4)) + + at.Add(id4, id3) + require.Equal(id3, at.GetAncestor(id1)) + require.Equal(id3, at.GetAncestor(id2)) + require.Equal(id3, at.GetAncestor(id3)) + require.Equal(id3, at.GetAncestor(id4)) +} + +func TestLen(t *testing.T) { + require := require.New(t) + + at := NewTree() + require.Zero(at.Len()) + + at.Add(id1, id2) + require.Equal(1, at.Len()) + + at.Add(id2, id3) + require.Equal(2, at.Len()) + + at.Add(id4, id3) + require.Equal(3, at.Len()) +} diff --git a/avalanchego/snow/engine/snowman/ancestor_tree.go b/avalanchego/snow/engine/snowman/ancestor_tree.go deleted file mode 100644 index 7a8f514b..00000000 --- a/avalanchego/snow/engine/snowman/ancestor_tree.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -type AncestorTree interface { - Add(blkID ids.ID, parentID ids.ID) - Has(blkID ids.ID) bool - GetRoot(blkID ids.ID) ids.ID - Remove(blkID ids.ID) - RemoveSubtree(blkID ids.ID) - Len() int -} - -type ancestorTree struct { - childToParent map[ids.ID]ids.ID - parentToChildren map[ids.ID]set.Set[ids.ID] -} - -func NewAncestorTree() AncestorTree { - return &ancestorTree{ - childToParent: make(map[ids.ID]ids.ID), - parentToChildren: make(map[ids.ID]set.Set[ids.ID]), - } -} - -// Add maps given blkID to given parentID -func (p *ancestorTree) Add(blkID ids.ID, parentID ids.ID) { - p.childToParent[blkID] = parentID - - children := p.parentToChildren[parentID] - children.Add(blkID) - p.parentToChildren[parentID] = children -} - -// GetRoot returns the oldest parent of blkID, might return blkID if no parent is available. -func (p *ancestorTree) GetRoot(blkID ids.ID) ids.ID { - for { - parentID, ok := p.childToParent[blkID] - // this is the furthest parent available, break loop and return blkID - if !ok { - return blkID - } - // continue to loop with parentID - blkID = parentID - } -} - -// Has returns if blkID is in the tree or not -func (p *ancestorTree) Has(blkID ids.ID) bool { - _, ok := p.childToParent[blkID] - return ok -} - -// Remove removes blkID from the tree -func (p *ancestorTree) Remove(blkID ids.ID) { - parent, ok := p.childToParent[blkID] - if !ok { - return - } - delete(p.childToParent, blkID) - - // remove blkID from children - children := p.parentToChildren[parent] - children.Remove(blkID) - // this parent has no more children, remove it from map - if children.Len() == 0 { - delete(p.parentToChildren, parent) - } -} - -// Returns tree length -func (p *ancestorTree) Len() int { - return len(p.childToParent) -} - -// RemoveSubtree removes whole subtree that blkID holds -func (p *ancestorTree) RemoveSubtree(blkID ids.ID) { - childrenList := []ids.ID{blkID} - for len(childrenList) > 0 { - newChildrenSize := len(childrenList) - 1 - childID := childrenList[newChildrenSize] - childrenList = childrenList[:newChildrenSize] - p.Remove(childID) - // get children of child - for grandChildID := range p.parentToChildren[childID] { - childrenList = append(childrenList, grandChildID) - } - } -} diff --git a/avalanchego/snow/engine/snowman/ancestor_tree_test.go b/avalanchego/snow/engine/snowman/ancestor_tree_test.go deleted file mode 100644 index f2419d16..00000000 --- a/avalanchego/snow/engine/snowman/ancestor_tree_test.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestAdd(t *testing.T) { - tests := map[string]struct { - method func(require *require.Assertions, at AncestorTree) - }{ - "should return false if not found": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id := at.GetRoot(id1) - require.Equal(id1, id) - }, - }, - "should add to tree and return id2 root": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - at.Add(id1, id2) - require.True(at.Has(id1)) - result := at.GetRoot(id1) - require.Equal(result, id2) - }, - }, - "should return ancestor id3 through id2": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - require.True(at.Has(id2)) - result := at.GetRoot(id1) - require.Equal(result, id3) - }, - }, - "should also return root id3 for another child": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - id4 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - at.Add(id4, id2) - result := at.GetRoot(id4) - require.Equal(result, id3) - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - at := NewAncestorTree() - test.method(require, at) - }) - } -} - -func TestRemove(t *testing.T) { - tests := map[string]struct { - method func(require *require.Assertions, at AncestorTree) - }{ - "removing root should not affect child roots": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - at.Remove(id3) - require.True(at.Has(id1)) - require.True(at.Has(id2)) - require.False(at.Has(id3)) - id := at.GetRoot(id2) - require.Equal(id3, id) - id = at.GetRoot(id1) - require.Equal(id3, id) - }, - }, - "removing parent should change root": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - id4 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - at.Add(id3, id4) - id := at.GetRoot(id1) - require.Equal(id4, id) - at.Remove(id3) - id = at.GetRoot(id1) - require.Equal(id3, id) - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - at := NewAncestorTree() - test.method(require, at) - }) - } -} - -func TestRemoveSubtree(t *testing.T) { - tests := map[string]struct { - method func(require *require.Assertions, at AncestorTree) - }{ - "remove root's subtree": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - at.RemoveSubtree(id3) - require.False(at.Has(id1)) - require.False(at.Has(id2)) - require.False(at.Has(id3)) - id := at.GetRoot(id2) - require.Equal(id2, id) - id = at.GetRoot(id1) - require.Equal(id1, id) - }, - }, - "remove subtree": { - method: func(require *require.Assertions, at AncestorTree) { - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - id3 := ids.GenerateTestID() - id4 := ids.GenerateTestID() - id5 := ids.GenerateTestID() - at.Add(id1, id2) - at.Add(id2, id3) - at.Add(id3, id4) - at.Add(id4, id5) - at.RemoveSubtree(id3) - require.False(at.Has(id1)) - require.False(at.Has(id2)) - require.False(at.Has(id3)) - id := at.GetRoot(id1) - require.Equal(id, id1) - id = at.GetRoot(id3) - require.Equal(id, id3) - require.True(at.Has(id4)) - id = at.GetRoot(id4) - require.Equal(id5, id) - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - at := NewAncestorTree() - test.method(require, at) - }) - } -} diff --git a/avalanchego/snow/engine/snowman/block/batched_vm.go b/avalanchego/snow/engine/snowman/block/batched_vm.go index 5cb63cd2..ad52e359 100644 --- a/avalanchego/snow/engine/snowman/block/batched_vm.go +++ b/avalanchego/snow/engine/snowman/block/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -8,9 +8,12 @@ import ( "errors" "time" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -33,6 +36,7 @@ type BatchedChainVM interface { func GetAncestors( ctx context.Context, + log logging.Logger, vm Getter, // fetch blocks blkID ids.ID, // first requested block maxBlocksNum int, // max number of blocks to be retrieved @@ -60,7 +64,7 @@ func GetAncestors( startTime := time.Now() blk, err := vm.GetBlock(ctx, blkID) if err == database.ErrNotFound { - // special case ErrNotFound as an empty response: this signals + // Special case ErrNotFound as an empty response: this signals // the client to avoid contacting this node for further ancestors // as they may have been pruned or unavailable due to state-sync. return nil, nil @@ -74,8 +78,17 @@ func GetAncestors( ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors for numFetched := 1; numFetched < maxBlocksNum && time.Since(startTime) < maxBlocksRetrivalTime; numFetched++ { - blk, err = vm.GetBlock(ctx, blk.Parent()) + parentID := blk.Parent() + blk, err = vm.GetBlock(ctx, parentID) + if err == database.ErrNotFound { + // After state sync we may not have the full chain + break + } if err != nil { + log.Error("failed to get block during ancestors lookup", + zap.String("parentID", parentID.String()), + zap.Error(err), + ) break } blkBytes := blk.Bytes() @@ -84,7 +97,7 @@ func GetAncestors( // is repr. by an int. newLen := ancestorsBytesLen + len(blkBytes) + wrappers.IntLen if newLen > maxBlocksSize { - // reached maximum response size + // Reached maximum response size break } ancestorsBytes = append(ancestorsBytes, blkBytes) diff --git a/avalanchego/snow/engine/snowman/block/batched_vm_test.go b/avalanchego/snow/engine/snowman/block/batched_vm_test.go index be536d03..b4d251c2 100644 --- a/avalanchego/snow/engine/snowman/block/batched_vm_test.go +++ b/avalanchego/snow/engine/snowman/block/batched_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -14,32 +14,37 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/logging" ) var errTest = errors.New("non-nil error") func TestGetAncestorsDatabaseNotFound(t *testing.T) { + require := require.New(t) + vm := &TestVM{} someID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(t, someID, id) + require.Equal(someID, id) return nil, database.ErrNotFound } - containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) - require.NoError(t, err) - require.Len(t, containers, 0) + containers, err := GetAncestors(context.Background(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) + require.NoError(err) + require.Empty(containers) } // TestGetAncestorsPropagatesErrors checks errors other than // database.ErrNotFound propagate to caller. func TestGetAncestorsPropagatesErrors(t *testing.T) { + require := require.New(t) + vm := &TestVM{} someID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(t, someID, id) + require.Equal(someID, id) return nil, errTest } - containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) - require.Nil(t, containers) - require.ErrorIs(t, err, errTest) + containers, err := GetAncestors(context.Background(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) + require.Nil(containers) + require.ErrorIs(err, errTest) } diff --git a/avalanchego/snow/engine/snowman/block/block_context_vm.go b/avalanchego/snow/engine/snowman/block/block_context_vm.go index 4a259571..6b8b7823 100644 --- a/avalanchego/snow/engine/snowman/block/block_context_vm.go +++ b/avalanchego/snow/engine/snowman/block/block_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/snow/engine/snowman/block/height_indexed_vm.go b/avalanchego/snow/engine/snowman/block/height_indexed_vm.go deleted file mode 100644 index da449c3f..00000000 --- a/avalanchego/snow/engine/snowman/block/height_indexed_vm.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. -package block - -import ( - "context" - "errors" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - ErrHeightIndexedVMNotImplemented = errors.New("vm does not implement HeightIndexedChainVM interface") - ErrIndexIncomplete = errors.New("query failed because height index is incomplete") -) - -// HeightIndexedChainVM extends ChainVM to allow querying block IDs by height. -type HeightIndexedChainVM interface { - // VerifyHeightIndex should return: - // - nil if the height index is available. - // - ErrHeightIndexedVMNotImplemented if the height index is not supported. - // - ErrIndexIncomplete if the height index is not currently available. - // - Any other non-standard error that may have occurred when verifying the - // index. - VerifyHeightIndex(context.Context) error - - // GetBlockIDAtHeight returns: - // - The ID of the block that was accepted with [height]. - // - database.ErrNotFound if the [height] index is unknown. - // - // Note: A returned value of [database.ErrNotFound] typically means that the - // underlying VM was state synced and does not have access to the - // blockID at [height]. - GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) -} diff --git a/avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go b/avalanchego/snow/engine/snowman/block/mock_build_block_with_context_vm.go similarity index 80% rename from avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go rename to avalanchego/snow/engine/snowman/block/mock_build_block_with_context_vm.go index 9b01018a..016007b0 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go +++ b/avalanchego/snow/engine/snowman/block/mock_build_block_with_context_vm.go @@ -1,19 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: BuildBlockWithContextChainVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_build_block_with_context_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block BuildBlockWithContextChainVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockBuildBlockWithContextChainVM is a mock of BuildBlockWithContextChainVM interface. @@ -40,7 +41,7 @@ func (m *MockBuildBlockWithContextChainVM) EXPECT() *MockBuildBlockWithContextCh } // BuildBlockWithContext mocks base method. -func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Context, arg1 *block.Context) (snowman.Block, error) { +func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Context, arg1 *Context) (snowman.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BuildBlockWithContext", arg0, arg1) ret0, _ := ret[0].(snowman.Block) @@ -49,7 +50,7 @@ func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Co } // BuildBlockWithContext indicates an expected call of BuildBlockWithContext. -func (mr *MockBuildBlockWithContextChainVMMockRecorder) BuildBlockWithContext(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBuildBlockWithContextChainVMMockRecorder) BuildBlockWithContext(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlockWithContext", reflect.TypeOf((*MockBuildBlockWithContextChainVM)(nil).BuildBlockWithContext), arg0, arg1) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go b/avalanchego/snow/engine/snowman/block/mock_chain_vm.go similarity index 77% rename from avalanchego/snow/engine/snowman/block/mocks/chain_vm.go rename to avalanchego/snow/engine/snowman/block/mock_chain_vm.go index 73821562..ad99e3f7 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go +++ b/avalanchego/snow/engine/snowman/block/mock_chain_vm.go @@ -1,24 +1,27 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: ChainVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_chain_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block ChainVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" + http "net/http" reflect "reflect" time "time" - manager "github.com/ava-labs/avalanchego/database/manager" + database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" common "github.com/ava-labs/avalanchego/snow/engine/common" version "github.com/ava-labs/avalanchego/version" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockChainVM is a mock of ChainVM interface. @@ -53,7 +56,7 @@ func (m *MockChainVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []by } // AppGossip indicates an expected call of AppGossip. -func (mr *MockChainVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppGossip(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockChainVM)(nil).AppGossip), arg0, arg1, arg2) } @@ -67,23 +70,23 @@ func (m *MockChainVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, arg2 uin } // AppRequest indicates an expected call of AppRequest. -func (mr *MockChainVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockChainVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) } // AppRequestFailed mocks base method. -func (m *MockChainVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { +func (m *MockChainVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockChainVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), arg0, arg1, arg2, arg3) } // AppResponse mocks base method. @@ -95,7 +98,7 @@ func (m *MockChainVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 ui } // AppResponse indicates an expected call of AppResponse. -func (mr *MockChainVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockChainVM)(nil).AppResponse), arg0, arg1, arg2, arg3) } @@ -110,7 +113,7 @@ func (m *MockChainVM) BuildBlock(arg0 context.Context) (snowman.Block, error) { } // BuildBlock indicates an expected call of BuildBlock. -func (mr *MockChainVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) BuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockChainVM)(nil).BuildBlock), arg0) } @@ -124,41 +127,26 @@ func (m *MockChainVM) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *ver } // Connected indicates an expected call of Connected. -func (mr *MockChainVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockChainVM)(nil).Connected), arg0, arg1, arg2) } // CreateHandlers mocks base method. -func (m *MockChainVM) CreateHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { +func (m *MockChainVM) CreateHandlers(arg0 context.Context) (map[string]http.Handler, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateHandlers", arg0) - ret0, _ := ret[0].(map[string]*common.HTTPHandler) + ret0, _ := ret[0].(map[string]http.Handler) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateHandlers indicates an expected call of CreateHandlers. -func (mr *MockChainVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CreateHandlers(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateHandlers), arg0) } -// CreateStaticHandlers mocks base method. -func (m *MockChainVM) CreateStaticHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) - ret0, _ := ret[0].(map[string]*common.HTTPHandler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateStaticHandlers indicates an expected call of CreateStaticHandlers. -func (mr *MockChainVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateStaticHandlers), arg0) -} - // CrossChainAppRequest mocks base method. func (m *MockChainVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { m.ctrl.T.Helper() @@ -168,23 +156,23 @@ func (m *MockChainVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, ar } // CrossChainAppRequest indicates an expected call of CrossChainAppRequest. -func (mr *MockChainVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) } // CrossChainAppRequestFailed mocks base method. -func (m *MockChainVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { +func (m *MockChainVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 *common.AppError) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. -func (mr *MockChainVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2, arg3) } // CrossChainAppResponse mocks base method. @@ -196,7 +184,7 @@ func (m *MockChainVM) CrossChainAppResponse(arg0 context.Context, arg1 ids.ID, a } // CrossChainAppResponse indicates an expected call of CrossChainAppResponse. -func (mr *MockChainVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) } @@ -210,7 +198,7 @@ func (m *MockChainVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) error } // Disconnected indicates an expected call of Disconnected. -func (mr *MockChainVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockChainVM)(nil).Disconnected), arg0, arg1) } @@ -225,28 +213,43 @@ func (m *MockChainVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowman.Block } // GetBlock indicates an expected call of GetBlock. -func (mr *MockChainVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) GetBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChainVM)(nil).GetBlock), arg0, arg1) } +// GetBlockIDAtHeight mocks base method. +func (m *MockChainVM) GetBlockIDAtHeight(arg0 context.Context, arg1 uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0, arg1) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockChainVMMockRecorder) GetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockChainVM)(nil).GetBlockIDAtHeight), arg0, arg1) +} + // HealthCheck mocks base method. -func (m *MockChainVM) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockChainVM) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockChainVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockChainVM)(nil).HealthCheck), arg0) } // Initialize mocks base method. -func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { +func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 database.Database, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) ret0, _ := ret[0].(error) @@ -254,7 +257,7 @@ func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 } // Initialize indicates an expected call of Initialize. -func (mr *MockChainVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockChainVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } @@ -269,7 +272,7 @@ func (m *MockChainVM) LastAccepted(arg0 context.Context) (ids.ID, error) { } // LastAccepted indicates an expected call of LastAccepted. -func (mr *MockChainVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) LastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockChainVM)(nil).LastAccepted), arg0) } @@ -284,7 +287,7 @@ func (m *MockChainVM) ParseBlock(arg0 context.Context, arg1 []byte) (snowman.Blo } // ParseBlock indicates an expected call of ParseBlock. -func (mr *MockChainVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) ParseBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockChainVM)(nil).ParseBlock), arg0, arg1) } @@ -298,7 +301,7 @@ func (m *MockChainVM) SetPreference(arg0 context.Context, arg1 ids.ID) error { } // SetPreference indicates an expected call of SetPreference. -func (mr *MockChainVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetPreference(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockChainVM)(nil).SetPreference), arg0, arg1) } @@ -312,7 +315,7 @@ func (m *MockChainVM) SetState(arg0 context.Context, arg1 snow.State) error { } // SetState indicates an expected call of SetState. -func (mr *MockChainVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockChainVM)(nil).SetState), arg0, arg1) } @@ -326,11 +329,25 @@ func (m *MockChainVM) Shutdown(arg0 context.Context) error { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockChainVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockChainVM)(nil).Shutdown), arg0) } +// VerifyHeightIndex mocks base method. +func (m *MockChainVM) VerifyHeightIndex(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyHeightIndex", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyHeightIndex indicates an expected call of VerifyHeightIndex. +func (mr *MockChainVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockChainVM)(nil).VerifyHeightIndex), arg0) +} + // Version mocks base method. func (m *MockChainVM) Version(arg0 context.Context) (string, error) { m.ctrl.T.Helper() @@ -341,7 +358,7 @@ func (m *MockChainVM) Version(arg0 context.Context) (string, error) { } // Version indicates an expected call of Version. -func (mr *MockChainVMMockRecorder) Version(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Version(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockChainVM)(nil).Version), arg0) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/mock_state_syncable_vm.go similarity index 82% rename from avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go rename to avalanchego/snow/engine/snowman/block/mock_state_syncable_vm.go index 58af74a5..8d8abca5 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/mock_state_syncable_vm.go @@ -1,18 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: StateSyncableVM) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_state_syncable_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block StateSyncableVM +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockStateSyncableVM is a mock of StateSyncableVM interface. @@ -39,61 +40,61 @@ func (m *MockStateSyncableVM) EXPECT() *MockStateSyncableVMMockRecorder { } // GetLastStateSummary mocks base method. -func (m *MockStateSyncableVM) GetLastStateSummary(arg0 context.Context) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetLastStateSummary(arg0 context.Context) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLastStateSummary", arg0) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetLastStateSummary indicates an expected call of GetLastStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetLastStateSummary), arg0) } // GetOngoingSyncStateSummary mocks base method. -func (m *MockStateSyncableVM) GetOngoingSyncStateSummary(arg0 context.Context) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetOngoingSyncStateSummary(arg0 context.Context) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetOngoingSyncStateSummary", arg0) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetOngoingSyncStateSummary indicates an expected call of GetOngoingSyncStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOngoingSyncStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetOngoingSyncStateSummary), arg0) } // GetStateSummary mocks base method. -func (m *MockStateSyncableVM) GetStateSummary(arg0 context.Context, arg1 uint64) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetStateSummary(arg0 context.Context, arg1 uint64) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStateSummary", arg0, arg1) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStateSummary indicates an expected call of GetStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetStateSummary), arg0, arg1) } // ParseStateSummary mocks base method. -func (m *MockStateSyncableVM) ParseStateSummary(arg0 context.Context, arg1 []byte) (block.StateSummary, error) { +func (m *MockStateSyncableVM) ParseStateSummary(arg0 context.Context, arg1 []byte) (StateSummary, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ParseStateSummary", arg0, arg1) - ret0, _ := ret[0].(block.StateSummary) + ret0, _ := ret[0].(StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // ParseStateSummary indicates an expected call of ParseStateSummary. -func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).ParseStateSummary), arg0, arg1) } @@ -108,7 +109,7 @@ func (m *MockStateSyncableVM) StateSyncEnabled(arg0 context.Context) (bool, erro } // StateSyncEnabled indicates an expected call of StateSyncEnabled. -func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled(arg0 interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEnabled", reflect.TypeOf((*MockStateSyncableVM)(nil).StateSyncEnabled), arg0) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go b/avalanchego/snow/engine/snowman/block/mock_with_verify_context.go similarity index 84% rename from avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go rename to avalanchego/snow/engine/snowman/block/mock_with_verify_context.go index 473ca027..1c18e3e9 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go +++ b/avalanchego/snow/engine/snowman/block/mock_with_verify_context.go @@ -1,18 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: WithVerifyContext) +// +// Generated by this command: +// +// mockgen -package=block -destination=snow/engine/snowman/block/mock_with_verify_context.go github.com/ava-labs/avalanchego/snow/engine/snowman/block WithVerifyContext +// -// Package mocks is a generated GoMock package. -package mocks +// Package block is a generated GoMock package. +package block import ( context "context" reflect "reflect" - block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockWithVerifyContext is a mock of WithVerifyContext interface. @@ -48,13 +49,13 @@ func (m *MockWithVerifyContext) ShouldVerifyWithContext(arg0 context.Context) (b } // ShouldVerifyWithContext indicates an expected call of ShouldVerifyWithContext. -func (mr *MockWithVerifyContextMockRecorder) ShouldVerifyWithContext(arg0 interface{}) *gomock.Call { +func (mr *MockWithVerifyContextMockRecorder) ShouldVerifyWithContext(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldVerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).ShouldVerifyWithContext), arg0) } // VerifyWithContext mocks base method. -func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *block.Context) error { +func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyWithContext", arg0, arg1) ret0, _ := ret[0].(error) @@ -62,7 +63,7 @@ func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *bl } // VerifyWithContext indicates an expected call of VerifyWithContext. -func (mr *MockWithVerifyContextMockRecorder) VerifyWithContext(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockWithVerifyContextMockRecorder) VerifyWithContext(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).VerifyWithContext), arg0, arg1) } diff --git a/avalanchego/snow/engine/snowman/block/state_summary.go b/avalanchego/snow/engine/snowman/block/state_summary.go index 337a27d9..d89d77a2 100644 --- a/avalanchego/snow/engine/snowman/block/state_summary.go +++ b/avalanchego/snow/engine/snowman/block/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/snow/engine/snowman/block/state_sync_mode.go b/avalanchego/snow/engine/snowman/block/state_sync_mode.go index 79f5c2e8..35da3ab4 100644 --- a/avalanchego/snow/engine/snowman/block/state_sync_mode.go +++ b/avalanchego/snow/engine/snowman/block/state_sync_mode.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/snow/engine/snowman/block/state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/state_syncable_vm.go index 5c25f37a..04575051 100644 --- a/avalanchego/snow/engine/snowman/block/state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/snow/engine/snowman/block/test_batched_vm.go b/avalanchego/snow/engine/snowman/block/test_batched_vm.go index f5a94bb9..e5d654ec 100644 --- a/avalanchego/snow/engine/snowman/block/test_batched_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) @@ -63,7 +65,7 @@ func (vm *TestBatchedVM) GetAncestors( ) } if vm.CantGetAncestors && vm.T != nil { - vm.T.Fatal(errGetAncestor) + require.FailNow(vm.T, errGetAncestor.Error()) } return nil, errGetAncestor } @@ -76,7 +78,7 @@ func (vm *TestBatchedVM) BatchedParseBlock( return vm.BatchedParseBlockF(ctx, blks) } if vm.CantBatchParseBlock && vm.T != nil { - vm.T.Fatal(errBatchedParseBlock) + require.FailNow(vm.T, errBatchedParseBlock.Error()) } return nil, errBatchedParseBlock } diff --git a/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go b/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go deleted file mode 100644 index c1587a72..00000000 --- a/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package block - -import ( - "context" - "errors" - "testing" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - errVerifyHeightIndex = errors.New("unexpectedly called VerifyHeightIndex") - errGetBlockIDAtHeight = errors.New("unexpectedly called GetBlockIDAtHeight") - - _ HeightIndexedChainVM = (*TestHeightIndexedVM)(nil) -) - -// TestBatchedVM is a BatchedVM that is useful for testing. -type TestHeightIndexedVM struct { - T *testing.T - - CantVerifyHeightIndex bool - CantGetBlockIDAtHeight bool - - VerifyHeightIndexF func(context.Context) error - GetBlockIDAtHeightF func(ctx context.Context, height uint64) (ids.ID, error) -} - -func (vm *TestHeightIndexedVM) VerifyHeightIndex(ctx context.Context) error { - if vm.VerifyHeightIndexF != nil { - return vm.VerifyHeightIndexF(ctx) - } - if vm.CantVerifyHeightIndex && vm.T != nil { - vm.T.Fatal(errVerifyHeightIndex) - } - return errVerifyHeightIndex -} - -func (vm *TestHeightIndexedVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { - if vm.GetBlockIDAtHeightF != nil { - return vm.GetBlockIDAtHeightF(ctx, height) - } - if vm.CantGetBlockIDAtHeight && vm.T != nil { - vm.T.Fatal(errGetAncestor) - } - return ids.Empty, errGetBlockIDAtHeight -} diff --git a/avalanchego/snow/engine/snowman/block/test_state_summary.go b/avalanchego/snow/engine/snowman/block/test_state_summary.go index 26cd9fcc..7287cff1 100644 --- a/avalanchego/snow/engine/snowman/block/test_state_summary.go +++ b/avalanchego/snow/engine/snowman/block/test_state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -44,7 +46,7 @@ func (s *TestStateSummary) Accept(ctx context.Context) (StateSyncMode, error) { return s.AcceptF(ctx) } if s.CantAccept && s.T != nil { - s.T.Fatal(errAccept) + require.FailNow(s.T, errAccept.Error()) } return StateSyncSkipped, errAccept } diff --git a/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go index 60e179e5..f1eeb960 100644 --- a/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -7,6 +7,8 @@ import ( "context" "errors" "testing" + + "github.com/stretchr/testify/require" ) var ( @@ -40,7 +42,7 @@ func (vm *TestStateSyncableVM) StateSyncEnabled(ctx context.Context) (bool, erro return vm.StateSyncEnabledF(ctx) } if vm.CantStateSyncEnabled && vm.T != nil { - vm.T.Fatal(errStateSyncEnabled) + require.FailNow(vm.T, errStateSyncEnabled.Error()) } return false, errStateSyncEnabled } @@ -50,7 +52,7 @@ func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary(ctx context.Context) ( return vm.GetOngoingSyncStateSummaryF(ctx) } if vm.CantStateSyncGetOngoingSummary && vm.T != nil { - vm.T.Fatal(errStateSyncGetOngoingSummary) + require.FailNow(vm.T, errStateSyncGetOngoingSummary.Error()) } return nil, errStateSyncGetOngoingSummary } @@ -60,7 +62,7 @@ func (vm *TestStateSyncableVM) GetLastStateSummary(ctx context.Context) (StateSu return vm.GetLastStateSummaryF(ctx) } if vm.CantGetLastStateSummary && vm.T != nil { - vm.T.Fatal(errGetLastStateSummary) + require.FailNow(vm.T, errGetLastStateSummary.Error()) } return nil, errGetLastStateSummary } @@ -70,7 +72,7 @@ func (vm *TestStateSyncableVM) ParseStateSummary(ctx context.Context, summaryByt return vm.ParseStateSummaryF(ctx, summaryBytes) } if vm.CantParseStateSummary && vm.T != nil { - vm.T.Fatal(errParseStateSummary) + require.FailNow(vm.T, errParseStateSummary.Error()) } return nil, errParseStateSummary } @@ -80,7 +82,7 @@ func (vm *TestStateSyncableVM) GetStateSummary(ctx context.Context, summaryHeigh return vm.GetStateSummaryF(ctx, summaryHeight) } if vm.CantGetStateSummary && vm.T != nil { - vm.T.Fatal(errGetStateSummary) + require.FailNow(vm.T, errGetStateSummary.Error()) } return nil, errGetStateSummary } diff --git a/avalanchego/snow/engine/snowman/block/test_vm.go b/avalanchego/snow/engine/snowman/block/test_vm.go index b2cce5e2..376dd270 100644 --- a/avalanchego/snow/engine/snowman/block/test_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -7,16 +7,20 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" ) var ( - errBuildBlock = errors.New("unexpectedly called BuildBlock") - errParseBlock = errors.New("unexpectedly called ParseBlock") - errGetBlock = errors.New("unexpectedly called GetBlock") - errLastAccepted = errors.New("unexpectedly called LastAccepted") + errBuildBlock = errors.New("unexpectedly called BuildBlock") + errParseBlock = errors.New("unexpectedly called ParseBlock") + errGetBlock = errors.New("unexpectedly called GetBlock") + errLastAccepted = errors.New("unexpectedly called LastAccepted") + errVerifyHeightIndex = errors.New("unexpectedly called VerifyHeightIndex") + errGetBlockIDAtHeight = errors.New("unexpectedly called GetBlockIDAtHeight") _ ChainVM = (*TestVM)(nil) ) @@ -29,13 +33,17 @@ type TestVM struct { CantParseBlock, CantGetBlock, CantSetPreference, - CantLastAccepted bool + CantLastAccepted, + CantVerifyHeightIndex, + CantGetBlockIDAtHeight bool - BuildBlockF func(context.Context) (snowman.Block, error) - ParseBlockF func(context.Context, []byte) (snowman.Block, error) - GetBlockF func(context.Context, ids.ID) (snowman.Block, error) - SetPreferenceF func(context.Context, ids.ID) error - LastAcceptedF func(context.Context) (ids.ID, error) + BuildBlockF func(context.Context) (snowman.Block, error) + ParseBlockF func(context.Context, []byte) (snowman.Block, error) + GetBlockF func(context.Context, ids.ID) (snowman.Block, error) + SetPreferenceF func(context.Context, ids.ID) error + LastAcceptedF func(context.Context) (ids.ID, error) + VerifyHeightIndexF func(context.Context) error + GetBlockIDAtHeightF func(ctx context.Context, height uint64) (ids.ID, error) } func (vm *TestVM) Default(cant bool) { @@ -53,7 +61,7 @@ func (vm *TestVM) BuildBlock(ctx context.Context) (snowman.Block, error) { return vm.BuildBlockF(ctx) } if vm.CantBuildBlock && vm.T != nil { - vm.T.Fatal(errBuildBlock) + require.FailNow(vm.T, errBuildBlock.Error()) } return nil, errBuildBlock } @@ -63,7 +71,7 @@ func (vm *TestVM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, erro return vm.ParseBlockF(ctx, b) } if vm.CantParseBlock && vm.T != nil { - vm.T.Fatal(errParseBlock) + require.FailNow(vm.T, errParseBlock.Error()) } return nil, errParseBlock } @@ -73,7 +81,7 @@ func (vm *TestVM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error return vm.GetBlockF(ctx, id) } if vm.CantGetBlock && vm.T != nil { - vm.T.Fatal(errGetBlock) + require.FailNow(vm.T, errGetBlock.Error()) } return nil, errGetBlock } @@ -83,7 +91,7 @@ func (vm *TestVM) SetPreference(ctx context.Context, id ids.ID) error { return vm.SetPreferenceF(ctx, id) } if vm.CantSetPreference && vm.T != nil { - vm.T.Fatalf("Unexpectedly called SetPreference") + require.FailNow(vm.T, "Unexpectedly called SetPreference") } return nil } @@ -93,7 +101,27 @@ func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { return vm.LastAcceptedF(ctx) } if vm.CantLastAccepted && vm.T != nil { - vm.T.Fatal(errLastAccepted) + require.FailNow(vm.T, errLastAccepted.Error()) } return ids.ID{}, errLastAccepted } + +func (vm *TestVM) VerifyHeightIndex(ctx context.Context) error { + if vm.VerifyHeightIndexF != nil { + return vm.VerifyHeightIndexF(ctx) + } + if vm.CantVerifyHeightIndex && vm.T != nil { + require.FailNow(vm.T, errVerifyHeightIndex.Error()) + } + return errVerifyHeightIndex +} + +func (vm *TestVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { + if vm.GetBlockIDAtHeightF != nil { + return vm.GetBlockIDAtHeightF(ctx, height) + } + if vm.CantGetBlockIDAtHeight && vm.T != nil { + require.FailNow(vm.T, errGetAncestor.Error()) + } + return ids.Empty, errGetBlockIDAtHeight +} diff --git a/avalanchego/snow/engine/snowman/block/vm.go b/avalanchego/snow/engine/snowman/block/vm.go index c096f9f0..4153632a 100644 --- a/avalanchego/snow/engine/snowman/block/vm.go +++ b/avalanchego/snow/engine/snowman/block/vm.go @@ -1,16 +1,23 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( "context" + "errors" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" ) +// ErrIndexIncomplete is used to indicate that the VM is currently repairing its +// index. +// +// TODO: Remove after v1.11.x activates. +var ErrIndexIncomplete = errors.New("query failed because height index is incomplete") + // ChainVM defines the required functionality of a Snowman VM. // // A Snowman VM is responsible for defining the representation of state, @@ -48,6 +55,24 @@ type ChainVM interface { // a definitionally accepted block, the Genesis block, that will be // returned. LastAccepted(context.Context) (ids.ID, error) + + // VerifyHeightIndex should return: + // - nil if the height index is available. + // - ErrIndexIncomplete if the height index is not currently available. + // - Any other non-standard error that may have occurred when verifying the + // index. + // + // TODO: Remove after v1.11.x activates. + VerifyHeightIndex(context.Context) error + + // GetBlockIDAtHeight returns: + // - The ID of the block that was accepted with [height]. + // - database.ErrNotFound if the [height] index is unknown. + // + // Note: A returned value of [database.ErrNotFound] typically means that the + // underlying VM was state synced and does not have access to the + // blockID at [height]. + GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) } // Getter defines the functionality for fetching a block by its ID. diff --git a/avalanchego/snow/engine/snowman/bootstrap/block_job.go b/avalanchego/snow/engine/snowman/bootstrap/block_job.go index f782a804..a9496316 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/block_job.go +++ b/avalanchego/snow/engine/snowman/bootstrap/block_job.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -9,7 +9,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -35,7 +34,6 @@ func (p *parser) Parse(ctx context.Context, blkBytes []byte) (queue.Job, error) return nil, err } return &blockJob{ - parser: p, log: p.log, numAccepted: p.numAccepted, numDropped: p.numDropped, @@ -45,7 +43,6 @@ func (p *parser) Parse(ctx context.Context, blkBytes []byte) (queue.Job, error) } type blockJob struct { - parser *parser log logging.Logger numAccepted, numDropped prometheus.Counter blk snowman.Block @@ -100,7 +97,8 @@ func (b *blockJob) Execute(ctx context.Context) error { b.numAccepted.Inc() b.log.Trace("accepting block in bootstrapping", zap.Stringer("blkID", blkID), - zap.Uint64("blkHeight", b.blk.Height()), + zap.Uint64("height", b.blk.Height()), + zap.Time("timestamp", b.blk.Timestamp()), ) if err := b.blk.Accept(ctx); err != nil { b.log.Debug("failed to accept block during bootstrapping", diff --git a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go index 134ce8ca..29754a24 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -18,24 +18,54 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/bootstrapper" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) -// Parameters for delaying bootstrapping to avoid potential CPU burns -const bootstrappingDelay = 10 * time.Second +const ( + // Delay bootstrapping to avoid potential CPU burns + bootstrappingDelay = 10 * time.Second + + // statusUpdateFrequency is how many containers should be processed between + // logs + statusUpdateFrequency = 5000 + + // maxOutstandingBroadcastRequests is the maximum number of requests to have + // outstanding when broadcasting. + maxOutstandingBroadcastRequests = 50 +) var ( - _ common.BootstrapableEngine = (*bootstrapper)(nil) + _ common.BootstrapableEngine = (*Bootstrapper)(nil) errUnexpectedTimeout = errors.New("unexpected timeout fired") ) -type bootstrapper struct { +// bootstrapper repeatedly performs the bootstrapping protocol. +// +// 1. Wait until a sufficient amount of stake is connected. +// 2. Sample a small number of nodes to get the last accepted block ID +// 3. Verify against the full network that the last accepted block ID received +// in step 2 is an accepted block. +// 4. Sync the full ancestry of the last accepted block. +// 5. Execute all the fetched blocks that haven't already been executed. +// 6. Restart the bootstrapping protocol until the number of blocks being +// accepted during a bootstrapping round stops decreasing. +// +// Note: Because of step 6, the bootstrapping protocol will generally be +// performed multiple times. +// +// Invariant: The VM is not guaranteed to be initialized until Start has been +// called, so it must be guaranteed the VM is not used until after Start. +type Bootstrapper struct { Config + common.Halter + *metrics // list of NoOpsHandler for messages dropped by bootstrapper common.StateSummaryFrontierHandler @@ -45,21 +75,26 @@ type bootstrapper struct { common.ChitsHandler common.AppHandler - common.Bootstrapper - common.Fetcher - *metrics + requestID uint32 // Tracks the last requestID that was used in a request + + started bool + restarted bool - started bool + minority bootstrapper.Poll + majority bootstrapper.Poll - // Greatest height of the blocks passed in ForceAccepted + // Greatest height of the blocks passed in startSyncing tipHeight uint64 // Height of the last accepted block when bootstrapping starts startingHeight uint64 - // Number of blocks that were fetched on ForceAccepted + // Number of blocks that were fetched on startSyncing initiallyFetched uint64 - // Time that ForceAccepted was last called + // Time that startSyncing was last called startTime time.Time + // tracks which validators were asked for which containers in which requests + outstandingRequests *bimap.BiMap[common.Request, ids.ID] + // number of state transitions executed executedStateTransitions int @@ -79,15 +114,14 @@ type bootstrapper struct { // bootstrappedOnce ensures that the [Bootstrapped] callback is only invoked // once, even if bootstrapping is retried. bootstrappedOnce sync.Once + + // Called when bootstrapping is done on a specific chain + onFinished func(ctx context.Context, lastReqID uint32) error } -func New(ctx context.Context, config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (common.BootstrapableEngine, error) { +func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (*Bootstrapper, error) { metrics, err := newMetrics("bs", config.Ctx.Registerer) - if err != nil { - return nil, err - } - - b := &bootstrapper{ + return &Bootstrapper{ Config: config, metrics: metrics, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), @@ -97,29 +131,31 @@ func New(ctx context.Context, config Config, onFinished func(ctx context.Context ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), AppHandler: config.VM, - Fetcher: common.Fetcher{ - OnFinished: onFinished, - }, - executedStateTransitions: math.MaxInt32, - } + minority: bootstrapper.Noop, + majority: bootstrapper.Noop, - b.parser = &parser{ - log: config.Ctx.Log, - numAccepted: b.numAccepted, - numDropped: b.numDropped, - vm: b.VM, - } - if err := b.Blocked.SetParser(ctx, b.parser); err != nil { - return nil, err - } + outstandingRequests: bimap.New[common.Request, ids.ID](), - config.Bootstrapable = b - b.Bootstrapper = common.NewCommonBootstrapper(config.Config) + executedStateTransitions: math.MaxInt, + onFinished: onFinished, + }, err +} - return b, nil +func (b *Bootstrapper) Context() *snow.ConsensusContext { + return b.Ctx } -func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { +func (b *Bootstrapper) Clear(context.Context) error { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + + if err := b.Config.Blocked.Clear(); err != nil { + return err + } + return b.Config.Blocked.Commit() +} + +func (b *Bootstrapper) Start(ctx context.Context, startReqID uint32) error { b.Ctx.Log.Info("starting bootstrapper") b.Ctx.State.Set(snow.EngineState{ @@ -131,6 +167,16 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { err) } + b.parser = &parser{ + log: b.Ctx.Log, + numAccepted: b.numAccepted, + numDropped: b.numDropped, + vm: b.VM, + } + if err := b.Blocked.SetParser(ctx, b.parser); err != nil { + return err + } + // Set the starting height lastAcceptedID, err := b.VM.LastAccepted(ctx) if err != nil { @@ -141,200 +187,218 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { return fmt.Errorf("couldn't get last accepted block: %w", err) } b.startingHeight = lastAccepted.Height() - b.Config.SharedCfg.RequestID = startReqID + b.requestID = startReqID - if !b.StartupTracker.ShouldStart() { - return nil + return b.tryStartBootstrapping(ctx) +} + +func (b *Bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { + return err } - b.started = true - return b.Startup(ctx) + if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + // Ensure fetchFrom reflects proper validator list + if _, ok := b.Beacons.GetValidator(b.Ctx.SubnetID, nodeID); ok { + b.fetchFrom.Add(nodeID) + } + + return b.tryStartBootstrapping(ctx) } -// Ancestors handles the receipt of multiple containers. Should be received in -// response to a GetAncestors message to [nodeID] with request ID [requestID] -func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { - // Make sure this is in response to a request we made - wantedBlkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) - if !ok { // this message isn't in response to a request we made - b.Ctx.Log.Debug("received unexpected Ancestors", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil +func (b *Bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := b.VM.Disconnected(ctx, nodeID); err != nil { + return err } - lenBlks := len(blks) - if lenBlks == 0 { - b.Ctx.Log.Debug("received Ancestors with no block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) + if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { + return err + } - b.markUnavailable(nodeID) + b.markUnavailable(nodeID) + return nil +} - // Send another request for this - return b.fetch(ctx, wantedBlkID) +// tryStartBootstrapping will start bootstrapping the first time it is called +// while the startupTracker is reporting that the protocol should start. +func (b *Bootstrapper) tryStartBootstrapping(ctx context.Context) error { + if b.started || !b.StartupTracker.ShouldStart() { + return nil } - // This node has responded - so add it back into the set - b.fetchFrom.Add(nodeID) + b.started = true + return b.startBootstrapping(ctx) +} - if lenBlks > b.Config.AncestorsMaxContainersReceived { - blks = blks[:b.Config.AncestorsMaxContainersReceived] - b.Ctx.Log.Debug("ignoring containers in Ancestors", - zap.Int("numContainers", lenBlks-b.Config.AncestorsMaxContainersReceived), - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) +func (b *Bootstrapper) startBootstrapping(ctx context.Context) error { + currentBeacons := b.Beacons.GetMap(b.Ctx.SubnetID) + nodeWeights := make(map[ids.NodeID]uint64, len(currentBeacons)) + for nodeID, beacon := range currentBeacons { + nodeWeights[nodeID] = beacon.Weight } - blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) - if err != nil { // the provided blocks couldn't be parsed - b.Ctx.Log.Debug("failed to parse blocks in Ancestors", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - return b.fetch(ctx, wantedBlkID) + frontierNodes, err := bootstrapper.Sample(nodeWeights, b.SampleK) + if err != nil { + return err } - if len(blocks) == 0 { - b.Ctx.Log.Debug("parsing blocks returned an empty set of blocks", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return b.fetch(ctx, wantedBlkID) - } + b.Ctx.Log.Debug("sampled nodes to seed bootstrapping frontier", + zap.Reflect("sampledNodes", frontierNodes), + zap.Int("numNodes", len(nodeWeights)), + ) - requestedBlock := blocks[0] - if actualID := requestedBlock.ID(); actualID != wantedBlkID { - b.Ctx.Log.Debug("first block is not the requested block", - zap.Stringer("expectedBlkID", wantedBlkID), - zap.Stringer("blkID", actualID), + b.minority = bootstrapper.NewMinority( + b.Ctx.Log, + frontierNodes, + maxOutstandingBroadcastRequests, + ) + b.majority = bootstrapper.NewMajority( + b.Ctx.Log, + nodeWeights, + maxOutstandingBroadcastRequests, + ) + + if accepted, finalized := b.majority.Result(ctx); finalized { + b.Ctx.Log.Info("bootstrapping skipped", + zap.String("reason", "no provided bootstraps"), ) - return b.fetch(ctx, wantedBlkID) + return b.startSyncing(ctx, accepted) } - blockSet := make(map[ids.ID]snowman.Block, len(blocks)) - for _, block := range blocks[1:] { - blockSet[block.ID()] = block - } - return b.process(ctx, requestedBlock, blockSet) + b.requestID++ + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - blkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) - if !ok { - b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) +func (b *Bootstrapper) sendBootstrappingMessagesOrFinish(ctx context.Context) error { + if peers := b.minority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAcceptedFrontier(ctx, peers, b.requestID) return nil } - // This node timed out their request, so we can add them back to [fetchFrom] - b.fetchFrom.Add(nodeID) - - // Send another request for this - return b.fetch(ctx, blkID) -} - -func (b *bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { - return err + potentialAccepted, finalized := b.minority.Result(ctx) + if !finalized { + // We haven't finalized the accepted frontier, so we should wait for the + // outstanding requests. + return nil } - if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - // Ensure fetchFrom reflects proper validator list - if b.Beacons.Contains(nodeID) { - b.fetchFrom.Add(nodeID) + if peers := b.majority.GetPeers(ctx); peers.Len() > 0 { + b.Sender.SendGetAccepted(ctx, peers, b.requestID, potentialAccepted) + return nil } - if b.started || !b.StartupTracker.ShouldStart() { + accepted, finalized := b.majority.Result(ctx) + if !finalized { + // We haven't finalized the accepted set, so we should wait for the + // outstanding requests. return nil } - b.started = true - return b.Startup(ctx) -} - -func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - if err := b.VM.Disconnected(ctx, nodeID); err != nil { - return err + numAccepted := len(accepted) + if numAccepted == 0 { + b.Ctx.Log.Debug("restarting bootstrap", + zap.String("reason", "no blocks accepted"), + zap.Int("numBeacons", b.Beacons.Count(b.Ctx.SubnetID)), + ) + // Invariant: These functions are mutualy recursive. However, when + // [startBootstrapping] calls [sendMessagesOrFinish], it is guaranteed + // to exit when sending GetAcceptedFrontier requests. + return b.startBootstrapping(ctx) } - if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { - return err + if !b.restarted { + b.Ctx.Log.Info("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), + ) + } else { + b.Ctx.Log.Debug("bootstrapping started syncing", + zap.Int("numAccepted", numAccepted), + ) } - b.markUnavailable(nodeID) - return nil + return b.startSyncing(ctx, accepted) } -func (b *bootstrapper) Timeout(ctx context.Context) error { - if !b.awaitingTimeout { - return errUnexpectedTimeout +func (b *Bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil } - b.awaitingTimeout = false - if !b.Config.BootstrapTracker.IsBootstrapped() { - return b.Restart(ctx, true) + if err := b.minority.RecordOpinion(ctx, nodeID, set.Of(containerID)); err != nil { + return err } - b.fetchETA.Set(0) - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (*bootstrapper) Gossip(context.Context) error { - return nil -} +func (b *Bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil + } -func (b *bootstrapper) Shutdown(ctx context.Context) error { - b.Ctx.Log.Info("shutting down bootstrapper") - return b.VM.Shutdown(ctx) + if err := b.minority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) Notify(_ context.Context, msg common.Message) error { - if msg != common.StateSyncDone { - b.Ctx.Log.Warn("received an unexpected message from the VM", - zap.Stringer("msg", msg), +func (b *Bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync Accepted message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), ) return nil } - b.Ctx.StateSyncing.Set(false) - return nil + if err := b.majority.RecordOpinion(ctx, nodeID, containerIDs); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { - vmIntf, vmErr := b.VM.HealthCheck(ctx) - intf := map[string]interface{}{ - "consensus": struct{}{}, - "vm": vmIntf, +func (b *Bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + if requestID != b.requestID { + b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", + zap.Stringer("nodeID", nodeID), + zap.Uint32("expectedRequestID", b.requestID), + zap.Uint32("requestID", requestID), + ) + return nil } - return intf, vmErr -} -func (b *bootstrapper) GetVM() common.VM { - return b.VM + if err := b.majority.RecordOpinion(ctx, nodeID, nil); err != nil { + return err + } + return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { - pendingContainerIDs := b.Blocked.MissingIDs() - +func (b *Bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs []ids.ID) error { // Initialize the fetch from set to the currently preferred peers b.fetchFrom = b.StartupTracker.PreferredPeers() + pendingContainerIDs := b.Blocked.MissingIDs() // Append the list of accepted container IDs to pendingContainerIDs to ensure // we iterate over every container that must be traversed. pendingContainerIDs = append(pendingContainerIDs, acceptedContainerIDs...) - toProcess := make([]snowman.Block, 0, len(pendingContainerIDs)) b.Ctx.Log.Debug("starting bootstrapping", zap.Int("numPendingBlocks", len(pendingContainerIDs)), zap.Int("numAcceptedBlocks", len(acceptedContainerIDs)), ) + + toProcess := make([]snowman.Block, 0, len(pendingContainerIDs)) for _, blkID := range pendingContainerIDs { b.Blocked.AddMissingID(blkID) @@ -360,19 +424,19 @@ func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs [ } } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // Get block [blkID] and its ancestors from a validator -func (b *bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { +func (b *Bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // Make sure we haven't already requested this block - if b.OutstandingRequests.Contains(blkID) { + if b.outstandingRequests.HasValue(blkID) { return nil } // Make sure we don't already have this block if _, err := b.VM.GetBlock(ctx, blkID); err == nil { - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } validatorID, ok := b.fetchFrom.Peek() @@ -383,17 +447,118 @@ func (b *bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // We only allow one outbound request at a time from a node b.markUnavailable(validatorID) - b.Config.SharedCfg.RequestID++ + b.requestID++ - b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, blkID) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, blkID) // request block and ancestors + b.outstandingRequests.Put( + common.Request{ + NodeID: validatorID, + RequestID: b.requestID, + }, + blkID, + ) + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, blkID) // request block and ancestors return nil } +// Ancestors handles the receipt of multiple containers. Should be received in +// response to a GetAncestors message to [nodeID] with request ID [requestID] +func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { + // Make sure this is in response to a request we made + wantedBlkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) + if !ok { // this message isn't in response to a request we made + b.Ctx.Log.Debug("received unexpected Ancestors", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + + lenBlks := len(blks) + if lenBlks == 0 { + b.Ctx.Log.Debug("received Ancestors with no block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + + b.markUnavailable(nodeID) + + // Send another request for this + return b.fetch(ctx, wantedBlkID) + } + + // This node has responded - so add it back into the set + b.fetchFrom.Add(nodeID) + + if lenBlks > b.Config.AncestorsMaxContainersReceived { + blks = blks[:b.Config.AncestorsMaxContainersReceived] + b.Ctx.Log.Debug("ignoring containers in Ancestors", + zap.Int("numContainers", lenBlks-b.Config.AncestorsMaxContainersReceived), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + } + + blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) + if err != nil { // the provided blocks couldn't be parsed + b.Ctx.Log.Debug("failed to parse blocks in Ancestors", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) + return b.fetch(ctx, wantedBlkID) + } + + if len(blocks) == 0 { + b.Ctx.Log.Debug("parsing blocks returned an empty set of blocks", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return b.fetch(ctx, wantedBlkID) + } + + requestedBlock := blocks[0] + if actualID := requestedBlock.ID(); actualID != wantedBlkID { + b.Ctx.Log.Debug("first block is not the requested block", + zap.Stringer("expectedBlkID", wantedBlkID), + zap.Stringer("blkID", actualID), + ) + return b.fetch(ctx, wantedBlkID) + } + + blockSet := make(map[ids.ID]snowman.Block, len(blocks)) + for _, block := range blocks[1:] { + blockSet[block.ID()] = block + } + return b.process(ctx, requestedBlock, blockSet) +} + +func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + blkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + NodeID: nodeID, + RequestID: requestID, + }) + if !ok { + b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + + // This node timed out their request, so we can add them back to [fetchFrom] + b.fetchFrom.Add(nodeID) + + // Send another request for this + return b.fetch(ctx, blkID) +} + // markUnavailable removes [nodeID] from the set of peers used to fetch // ancestors. If the set becomes empty, it is reset to the currently preferred // peers so bootstrapping can continue. -func (b *bootstrapper) markUnavailable(nodeID ids.NodeID) { +func (b *Bootstrapper) markUnavailable(nodeID ids.NodeID) { b.fetchFrom.Remove(nodeID) // if [fetchFrom] has become empty, reset it to the currently preferred @@ -403,13 +568,6 @@ func (b *bootstrapper) markUnavailable(nodeID ids.NodeID) { } } -func (b *bootstrapper) Clear() error { - if err := b.Config.Blocked.Clear(); err != nil { - return err - } - return b.Config.Blocked.Commit() -} - // process a series of consecutive blocks starting at [blk]. // // - blk is a block that is assumed to have been marked as acceptable by the @@ -420,7 +578,7 @@ func (b *bootstrapper) Clear() error { // // If [blk]'s height is <= the last accepted height, then it will be removed // from the missingIDs set. -func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { +func (b *Bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { for { blkID := blk.ID() if b.Halted() { @@ -446,7 +604,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // If this block is going to be accepted, make sure to update the @@ -456,7 +614,6 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin } pushed, err := b.Blocked.Push(ctx, &blockJob{ - parser: b.parser, log: b.Ctx.Log, numAccepted: b.numAccepted, numDropped: b.numDropped, @@ -473,7 +630,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } // We added a new block to the queue, so track that it was fetched @@ -481,7 +638,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin // Periodically log progress blocksFetchedSoFar := b.Blocked.Jobs.PendingJobs() - if blocksFetchedSoFar%common.StatusUpdateFrequency == 0 { + if blocksFetchedSoFar%statusUpdateFrequency == 0 { totalBlocksToFetch := b.tipHeight - b.startingHeight eta := timer.EstimateETA( b.startTime, @@ -490,7 +647,7 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin ) b.fetchETA.Set(float64(eta)) - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("fetching blocks", zap.Uint64("numFetchedBlocks", blocksFetchedSoFar), zap.Uint64("numTotalBlocks", totalBlocksToFetch), @@ -534,22 +691,23 @@ func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processin if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish(ctx) + return b.tryStartExecuting(ctx) } } -// checkFinish repeatedly executes pending transactions and requests new frontier vertices until there aren't any new ones -// after which it finishes the bootstrap process -func (b *bootstrapper) checkFinish(ctx context.Context) error { +// tryStartExecuting executes all pending blocks if there are no more blocks +// being fetched. After executing all pending blocks it will either restart +// bootstrapping, or transition into normal operations. +func (b *Bootstrapper) tryStartExecuting(ctx context.Context) error { if numPending := b.Blocked.NumMissingIDs(); numPending != 0 { return nil } - if b.IsBootstrapped() || b.awaitingTimeout { + if b.Ctx.State.Get().State == snow.NormalOp || b.awaitingTimeout { return nil } - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("executing blocks", zap.Uint64("numPendingJobs", b.Blocked.PendingJobs()), ) @@ -563,7 +721,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { ctx, b.Config.Ctx, b, - b.Config.SharedCfg.Restarted, + b.restarted, b.Ctx.BlockAcceptor, ) if err != nil || b.Halted() { @@ -576,16 +734,14 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { // Note that executedBlocks < c*previouslyExecuted ( 0 <= c < 1 ) is enforced // so that the bootstrapping process will terminate even as new blocks are // being issued. - if b.Config.RetryBootstrap && executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { - return b.Restart(ctx, true) + if executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { + return b.restartBootstrapping(ctx) } // If there is an additional callback, notify them that this chain has been // synced. if b.Bootstrapped != nil { - b.bootstrappedOnce.Do(func() { - b.Bootstrapped() - }) + b.bootstrappedOnce.Do(b.Bootstrapped) } // Notify the subnet that this chain is synced @@ -594,7 +750,7 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { // If the subnet hasn't finished bootstrapping, this chain should remain // syncing. if !b.Config.BootstrapTracker.IsBootstrapped() { - if !b.Config.SharedCfg.Restarted { + if !b.restarted { b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") } else { b.Ctx.Log.Debug("waiting for the remaining chains in this subnet to finish syncing") @@ -606,5 +762,62 @@ func (b *bootstrapper) checkFinish(ctx context.Context) error { return nil } b.fetchETA.Set(0) - return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + return b.onFinished(ctx, b.requestID) +} + +func (b *Bootstrapper) Timeout(ctx context.Context) error { + if !b.awaitingTimeout { + return errUnexpectedTimeout + } + b.awaitingTimeout = false + + if !b.Config.BootstrapTracker.IsBootstrapped() { + return b.restartBootstrapping(ctx) + } + b.fetchETA.Set(0) + return b.onFinished(ctx, b.requestID) +} + +func (b *Bootstrapper) restartBootstrapping(ctx context.Context) error { + b.Ctx.Log.Debug("Checking for new frontiers") + b.restarted = true + b.outstandingRequests = bimap.New[common.Request, ids.ID]() + return b.startBootstrapping(ctx) +} + +func (b *Bootstrapper) Notify(_ context.Context, msg common.Message) error { + if msg != common.StateSyncDone { + b.Ctx.Log.Warn("received an unexpected message from the VM", + zap.Stringer("msg", msg), + ) + return nil + } + + b.Ctx.StateSyncing.Set(false) + return nil +} + +func (b *Bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + + vmIntf, vmErr := b.VM.HealthCheck(ctx) + intf := map[string]interface{}{ + "consensus": struct{}{}, + "vm": vmIntf, + } + return intf, vmErr +} + +func (b *Bootstrapper) Shutdown(ctx context.Context) error { + b.Ctx.Log.Info("shutting down bootstrapper") + + b.Ctx.Lock.Lock() + defer b.Ctx.Lock.Unlock() + + return b.VM.Shutdown(ctx) +} + +func (*Bootstrapper) Gossip(context.Context) error { + return nil } diff --git a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go index 2fc50adb..d5cb9cc7 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -8,9 +8,9 @@ import ( "context" "errors" "testing" + "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -25,8 +25,10 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -34,9 +36,12 @@ import ( var errUnknownBlock = errors.New("unknown block") func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.TestVM) { - ctx := snow.DefaultConsensusContextTest() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - peers := validators.NewSet() + vdrs := validators.NewManager() sender := &common.SenderTest{} vm := &block.TestVM{} @@ -61,43 +66,32 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vdrs.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) peerTracker := tracker.NewPeers() - startupTracker := tracker.NewStartup(peerTracker, peers.Weight()/2+1) - peers.RegisterCallbackListener(startupTracker) + totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) + vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) - if err := startupTracker.Connected(context.Background(), peer, version.CurrentApp); err != nil { - t.Fatal(err) - } + require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) + + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) + require.NoError(err) - commonConfig := common.Config{ + blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) + return Config{ + AllGetsServer: snowGetHandler, Ctx: ctx, - Beacons: peers, - SampleK: peers.Len(), - Alpha: peers.Weight()/2 + 1, + Beacons: vdrs, + SampleK: vdrs.Count(ctx.SubnetID), StartupTracker: startupTracker, Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := getter.New(vm, commonConfig) - if err != nil { - t.Fatal(err) - } - - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - return Config{ - Config: commonConfig, - AllGetsServer: snowGetHandler, - Blocked: blocker, - VM: vm, + Blocked: blocker, + VM: vm, }, peer, sender, vm } @@ -111,39 +105,33 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { sender.Default(true) vm.Default(true) - + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) // create boostrapper configuration - peers := validators.NewSet() + peers := validators.NewManager() sampleK := 2 alpha := uint64(10) startupAlpha := alpha peerTracker := tracker.NewPeers() startupTracker := tracker.NewStartup(peerTracker, startupAlpha) - peers.RegisterCallbackListener(startupTracker) + peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), + blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) + require.NoError(err) + cfg := Config{ + AllGetsServer: snowGetHandler, + Ctx: ctx, Beacons: peers, SampleK: sampleK, - Alpha: alpha, StartupTracker: startupTracker, Sender: sender, BootstrapTracker: &common.BootstrapTrackerTest{}, Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) - snowGetHandler, err := getter.New(vm, commonCfg) - require.NoError(err) - cfg := Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocker, - VM: vm, + Blocked: blocker, + VM: vm, } blkID0 := ids.Empty.Prefix(0) @@ -173,7 +161,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { }) return nil } - bs, err := New(context.Background(), cfg, dummyCallback) + bs, err := New(cfg, dummyCallback) require.NoError(err) vm.CantSetState = false @@ -194,7 +182,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(peers.Add(vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(peers.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) require.NoError(bs.Connected(context.Background(), vdr0, version.CurrentApp)) require.NoError(bs.Start(context.Background(), 0)) @@ -202,13 +190,15 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(peers.Add(vdr, nil, ids.Empty, startupAlpha)) + require.NoError(peers.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) require.NoError(bs.Connected(context.Background(), vdr, version.CurrentApp)) require.True(frontierRequested) } // Single node in the accepted frontier; no need to fetch parent func TestBootstrapperSingleFrontier(t *testing.T) { + require := require.New(t) + config, _, _, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -240,12 +230,11 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -255,14 +244,10 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID1} @@ -273,8 +258,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { case blkID0: return blk0, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -284,19 +269,13 @@ func TestBootstrapperSingleFrontier(t *testing.T) { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - err = bs.ForceAccepted(context.Background(), acceptedIDs) - switch { - case err != nil: // should finish - t.Fatal(err) - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk1.Status()) } // Requests the unknown block and gets back a Ancestors with unexpected request ID. @@ -304,6 +283,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { // Requests again and gets an unexpected block. // Requests again and gets the expected block. func TestBootstrapperUnknownByzantineResponse(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -347,12 +328,11 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -362,15 +342,9 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } - - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(err) - acceptedIDs := []ids.ID{blkID2} + require.NoError(bs.Start(context.Background(), 0)) parsedBlk1 := false vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -385,8 +359,8 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { case blkID2: return blk2, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -400,64 +374,39 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { case bytes.Equal(blkBytes, blkBytes2): return blk2, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - requestID := new(uint32) - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch { - case vtxID == blkID1: - default: - t.Fatalf("should have requested blk1") - } - *requestID = reqID + var requestID uint32 + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + require.Equal(blkID1, blkID) + requestID = reqID } vm.CantSetState = false - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk1 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) // should request blk1 - oldReqID := *requestID - if err := bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID - t.Fatal(err) - } else if oldReqID != *requestID { - t.Fatal("should not have sent new request") - } + oldReqID := requestID + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes0})) // respond with wrong block + require.NotEqual(oldReqID, requestID) - if err := bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer - t.Fatal(err) - } else if oldReqID != *requestID { - t.Fatal("should not have sent new request") - } + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes1})) - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block - t.Fatal(err) - } else if oldReqID == *requestID { - t.Fatal("should have sent new request") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) - err = bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}) - switch { - case err != nil: // respond with right block - t.Fatal(err) - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } // There are multiple needed blocks and Ancestors returns one at a time func TestBootstrapperPartialFetch(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -511,12 +460,11 @@ func TestBootstrapperPartialFetch(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -526,14 +474,10 @@ func TestBootstrapperPartialFetch(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -556,8 +500,8 @@ func TestBootstrapperPartialFetch(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -575,56 +519,41 @@ func TestBootstrapperPartialFetch(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestID := new(uint32) requested := ids.Empty - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case blkID1, blkID2: - default: - t.Fatalf("should have requested blk1 or blk2") - } + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + require.Contains([]ids.ID{blkID1, blkID2}, blkID) *requestID = reqID - requested = vtxID + requested = blkID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 - t.Fatal(err) - } else if requested != blkID1 { - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2})) // respond with blk2 + require.Equal(blkID1, requested) - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 - t.Fatal(err) - } else if requested != blkID1 { - t.Fatal("should not have requested another block") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) // respond with blk1 + require.Equal(blkID1, requested) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } // There are multiple needed blocks and some validators do not have all the blocks // This test was modeled after TestBootstrapperPartialFetch. func TestBootstrapperEmptyResponse(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -678,12 +607,11 @@ func TestBootstrapperEmptyResponse(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -693,14 +621,10 @@ func TestBootstrapperEmptyResponse(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -723,8 +647,8 @@ func TestBootstrapperEmptyResponse(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -742,7 +666,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -756,63 +680,42 @@ func TestBootstrapperEmptyResponse(t *testing.T) { } // should request blk2 - err = bs.ForceAccepted(context.Background(), acceptedIDs) - switch { - case err != nil: - t.Fatal(err) - case requestedVdr != peerID: - t.Fatal("should have requested from peerID") - case requestedBlock != blkID2: - t.Fatal("should have requested blk2") - } + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(peerID, requestedVdr) + require.Equal(blkID2, requestedBlock) // add another two validators to the fetch set to test behavior on empty response newPeerID := ids.GenerateTestNodeID() - bs.(*bootstrapper).fetchFrom.Add(newPeerID) + bs.fetchFrom.Add(newPeerID) newPeerID = ids.GenerateTestNodeID() - bs.(*bootstrapper).fetchFrom.Add(newPeerID) + bs.fetchFrom.Add(newPeerID) - if err := bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 - t.Fatal(err) - } else if requestedBlock != blkID1 { - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2})) + require.Equal(blkID1, requestedBlock) peerToBlacklist := requestedVdr // respond with empty - err = bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil) - switch { - case err != nil: - t.Fatal(err) - case requestedVdr == peerToBlacklist: - t.Fatal("shouldn't have requested from peerToBlacklist") - case requestedBlock != blkID1: - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil)) + require.NotEqual(peerToBlacklist, requestedVdr) + require.Equal(blkID1, requestedBlock) - if err := bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1})) // respond with blk1 - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) // check peerToBlacklist was removed from the fetch set - require.False(t, bs.(*bootstrapper).fetchFrom.Contains(peerToBlacklist)) + require.NotContains(bs.fetchFrom, peerToBlacklist) } // There are multiple needed blocks and Ancestors returns all at once func TestBootstrapperAncestors(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -867,12 +770,11 @@ func TestBootstrapperAncestors(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -882,13 +784,9 @@ func TestBootstrapperAncestors(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -911,8 +809,8 @@ func TestBootstrapperAncestors(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -930,48 +828,35 @@ func TestBootstrapperAncestors(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestID := new(uint32) requested := ids.Empty - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case blkID1, blkID2: - default: - t.Fatalf("should have requested blk1 or blk2") - } + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + require.Contains([]ids.ID{blkID1, blkID2}, blkID) *requestID = reqID - requested = vtxID + requested = blkID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1})) // respond with blk2 and blk1 + require.Equal(blkID2, requested) - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1 - t.Fatal(err) - } else if requested != blkID2 { - t.Fatal("should not have requested another block") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestBootstrapperFinalized(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -1014,11 +899,10 @@ func TestBootstrapperFinalized(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( - context.Background(), config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1028,14 +912,10 @@ func TestBootstrapperFinalized(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) parsedBlk1 := false parsedBlk2 := false @@ -1054,8 +934,8 @@ func TestBootstrapperFinalized(t *testing.T) { } return nil, database.ErrNotFound default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1071,44 +951,35 @@ func TestBootstrapperFinalized(t *testing.T) { parsedBlk2 = true return blk2, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - requestIDs[vtxID] = reqID + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID } - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID1, blkID2}); err != nil { // should request blk2 and blk1 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 reqIDBlk2, ok := requestIDs[blkID2] - if !ok { - t.Fatalf("should have requested blk2") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestRestartBootstrapping(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -1201,8 +1072,8 @@ func TestRestartBootstrapping(t *testing.T) { } return nil, database.ErrNotFound default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1226,12 +1097,11 @@ func TestRestartBootstrapping(t *testing.T) { parsedBlk4 = true return blk4, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - bsIntf, err := New( - context.Background(), + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1241,93 +1111,59 @@ func TestRestartBootstrapping(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*bootstrapper) - if !ok { - t.Fatal("unexpected bootstrapper type") - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - requestIDs[vtxID] = reqID + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID } // Force Accept blk3 - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID3}); err != nil { // should request blk3 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID3})) // should request blk3 reqID, ok := requestIDs[blkID3] - if !ok { - t.Fatalf("should have requested blk3") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2})) - if _, ok := requestIDs[blkID1]; !ok { - t.Fatal("should have requested blk1") - } + require.Contains(requestIDs, blkID1) - // Remove request, so we can restart bootstrapping via ForceAccepted - if removed := bs.OutstandingRequests.RemoveAny(blkID1); !removed { - t.Fatal("Expected to find an outstanding request for blk1") - } + // Remove request, so we can restart bootstrapping via startSyncing + _, removed := bs.outstandingRequests.DeleteValue(blkID1) + require.True(removed) requestIDs = map[ids.ID]uint32{} - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID4}); err != nil { - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) blk1RequestID, ok := requestIDs[blkID1] - if !ok { - t.Fatal("should have re-requested blk1 on restart") - } + require.True(ok) blk4RequestID, ok := requestIDs[blkID4] - if !ok { - t.Fatal("should have requested blk4 as new accepted frontier") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1})) - if config.Ctx.State.Get().State == snow.NormalOp { - t.Fatal("Bootstrapping should not have finished with outstanding request for blk4") - } + require.NotEqual(snow.NormalOp, config.Ctx.State.Get().State) - if err := bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4})) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk3.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk4.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + require.Equal(choices.Accepted, blk3.Status()) + require.Equal(choices.Accepted, blk4.Status()) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } func TestBootstrapOldBlockAfterStateSync(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blk0 := &snowman.TestBlock{ @@ -1358,8 +1194,8 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { case blk1.ID(): return blk1, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1369,12 +1205,11 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { case bytes.Equal(blkBytes, blk1.Bytes()): return blk1, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - bsIntf, err := New( - context.Background(), + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1384,52 +1219,32 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*bootstrapper) - if !ok { - t.Fatal("unexpected bootstrapper type") - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - requestIDs[vtxID] = reqID + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID } // Force Accept, the already transitively accepted, blk0 - if err := bs.ForceAccepted(context.Background(), []ids.ID{blk0.ID()}); err != nil { // should request blk0 - t.Fatal(err) - } + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk0.ID()})) // should request blk0 reqID, ok := requestIDs[blk0.ID()] - if !ok { - t.Fatalf("should have requested blk0") - } + require.True(ok) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()})) - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()}); err != nil { - t.Fatal(err) - } - - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Processing: - t.Fatalf("Block should be processing") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Processing, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) } func TestBootstrapContinueAfterHalt(t *testing.T) { + require := require.New(t) + config, _, _, vm := newConfig(t) blk0 := &snowman.TestBlock{ @@ -1463,8 +1278,7 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { return blk0.ID(), nil } - bsIntf, err := New( - context.Background(), + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ @@ -1474,13 +1288,7 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*bootstrapper) - if !ok { - t.Fatal("unexpected bootstrapper type") - } + require.NoError(err) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1492,21 +1300,250 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { case blk2.ID(): return blk2, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) + require.NoError(bs.Start(context.Background(), 0)) + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk2.ID()})) + + require.Equal(1, bs.Blocked.NumMissingIDs()) +} + +func TestBootstrapNoParseOnNew(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + peers := validators.NewManager() + + sender := &common.SenderTest{} + vm := &block.TestVM{} + + sender.T = t + vm.T = t + + sender.Default(true) + vm.Default(true) + + isBootstrapped := false + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, + } + + sender.CantSendGetAcceptedFrontier = false + + peer := ids.GenerateTestNodeID() + require.NoError(peers.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) + + peerTracker := tracker.NewPeers() + totalWeight, err := peers.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) + peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) + require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) + + snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) + require.NoError(err) + + queueDB := memdb.New() + blocker, err := queue.NewWithMissing(queueDB, "", prometheus.NewRegistry()) + require.NoError(err) + + blk0 := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }, + HeightV: 0, + BytesV: utils.RandomBytes(32), + } + + blk1 := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: blk0.ID(), + HeightV: 1, + BytesV: utils.RandomBytes(32), } - if err := bs.ForceAccepted(context.Background(), []ids.ID{blk2.ID()}); err != nil { - t.Fatal(err) + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + require.Equal(blk0.ID(), blkID) + return blk0, nil } - if bs.Blocked.NumMissingIDs() != 1 { - t.Fatal("Should have left blk1 as missing") + pushed, err := blocker.Push(context.Background(), &blockJob{ + log: logging.NoLog{}, + numAccepted: prometheus.NewCounter(prometheus.CounterOpts{}), + numDropped: prometheus.NewCounter(prometheus.CounterOpts{}), + blk: blk1, + vm: vm, + }) + require.NoError(err) + require.True(pushed) + + require.NoError(blocker.Commit()) + + vm.GetBlockF = nil + + blocker, err = queue.NewWithMissing(queueDB, "", prometheus.NewRegistry()) + require.NoError(err) + + config := Config{ + AllGetsServer: snowGetHandler, + Ctx: ctx, + Beacons: peers, + SampleK: peers.Count(ctx.SubnetID), + StartupTracker: startupTracker, + Sender: sender, + BootstrapTracker: bootstrapTracker, + Timer: &common.TimerTest{}, + AncestorsMaxContainersReceived: 2000, + Blocked: blocker, + VM: vm, } + + _, err = New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, + ) + require.NoError(err) +} + +func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { + require := require.New(t) + + config, peerID, sender, vm := newConfig(t) + + var ( + blkID0 = ids.GenerateTestID() + blkBytes0 = utils.RandomBytes(1024) + blk0 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID0, + StatusV: choices.Accepted, + }, + HeightV: 0, + BytesV: blkBytes0, + } + + blkID1 = ids.GenerateTestID() + blkBytes1 = utils.RandomBytes(1024) + blk1 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID1, + StatusV: choices.Processing, + }, + ParentV: blk0.IDV, + HeightV: blk0.HeightV + 1, + BytesV: blkBytes1, + } + + blkID2 = ids.GenerateTestID() + blkBytes2 = utils.RandomBytes(1024) + blk2 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID2, + StatusV: choices.Processing, + }, + ParentV: blk1.IDV, + HeightV: blk1.HeightV + 1, + BytesV: blkBytes2, + } + ) + + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + require.Equal(blkID0, blkID) + return blk0, nil + } + bs, err := New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, + ) + require.NoError(err) + + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) + + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case blkID0: + return blk0, nil + case blkID1: + if blk1.StatusV == choices.Accepted { + return blk1, nil + } + return nil, database.ErrNotFound + case blkID2: + if blk2.StatusV == choices.Accepted { + return blk2, nil + } + return nil, database.ErrNotFound + default: + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound + } + } + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes0): + return blk0, nil + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + case bytes.Equal(blkBytes, blkBytes2): + return blk2, nil + default: + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock + } + } + + requestIDs := map[ids.ID]uint32{} + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID + } + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 + + reqIDBlk1, ok := requestIDs[blkID1] + require.True(ok) + reqIDBlk2, ok := requestIDs[blkID2] + require.True(ok) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) + + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, [][]byte{blkBytes1})) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) } diff --git a/avalanchego/snow/engine/snowman/bootstrap/config.go b/avalanchego/snow/engine/snowman/bootstrap/config.go index 0c05feb7..6fb8894d 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/config.go +++ b/avalanchego/snow/engine/snowman/bootstrap/config.go @@ -1,18 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + Beacons validators.Manager + + SampleK int + StartupTracker tracker.Startup + Sender common.Sender + BootstrapTracker common.BootstrapTracker + Timer common.Timer + + // This node will only consider the first [AncestorsMaxContainersReceived] + // containers in an ancestors message it receives. + AncestorsMaxContainersReceived int + // Blocked tracks operations that are blocked on blocks // // It should be guaranteed that `MissingIDs` should contain all IDs diff --git a/avalanchego/snow/engine/snowman/bootstrap/metrics.go b/avalanchego/snow/engine/snowman/bootstrap/metrics.go index 91260df3..f6ad90d1 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/metrics.go +++ b/avalanchego/snow/engine/snowman/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -6,7 +6,7 @@ package bootstrap import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { @@ -38,12 +38,11 @@ func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, e }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(m.numFetched), registerer.Register(m.numDropped), registerer.Register(m.numAccepted), registerer.Register(m.fetchETA), ) - return m, errs.Err + return m, err } diff --git a/avalanchego/snow/engine/snowman/config.go b/avalanchego/snow/engine/snowman/config.go index 32f92380..3162471a 100644 --- a/avalanchego/snow/engine/snowman/config.go +++ b/avalanchego/snow/engine/snowman/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -16,10 +17,12 @@ import ( type Config struct { common.AllGetsServer - Ctx *snow.ConsensusContext - VM block.ChainVM - Sender common.Sender - Validators validators.Set - Params snowball.Parameters - Consensus snowman.Consensus + Ctx *snow.ConsensusContext + VM block.ChainVM + Sender common.Sender + Validators validators.Manager + ConnectedValidators tracker.Peers + Params snowball.Parameters + Consensus snowman.Consensus + PartialSync bool } diff --git a/avalanchego/snow/engine/snowman/config_test.go b/avalanchego/snow/engine/snowman/config_test.go index c01731cd..fe66256c 100644 --- a/avalanchego/snow/engine/snowman/config_test.go +++ b/avalanchego/snow/engine/snowman/config_test.go @@ -1,33 +1,39 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "testing" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" ) -func DefaultConfigs() Config { - commonCfg := common.DefaultConfigTest() +func DefaultConfig(t testing.TB) Config { + ctx := snowtest.Context(t, snowtest.PChainID) + return Config{ - Ctx: commonCfg.Ctx, - Sender: commonCfg.Sender, - Validators: validators.NewSet(), - VM: &block.TestVM{}, + Ctx: snowtest.ConsensusContext(ctx), + VM: &block.TestVM{}, + Sender: &common.SenderTest{}, + Validators: validators.NewManager(), + ConnectedValidators: tracker.NewPeers(), Params: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 100, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 1, + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 100, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, }, Consensus: &snowman.Topological{}, } diff --git a/avalanchego/snow/engine/snowman/engine.go b/avalanchego/snow/engine/snowman/engine.go index 37985f5b..b5e3fb10 100644 --- a/avalanchego/snow/engine/snowman/engine.go +++ b/avalanchego/snow/engine/snowman/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/engine/snowman/getter/getter.go b/avalanchego/snow/engine/snowman/getter/getter.go index 429826bf..b58d7eb8 100644 --- a/avalanchego/snow/engine/snowman/getter/getter.go +++ b/avalanchego/snow/engine/snowman/getter/getter.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( "context" + "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -15,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/set" ) // Get requests are always served, regardless node state (bootstrapping or normal operations). @@ -22,15 +25,20 @@ var _ common.AllGetsServer = (*getter)(nil) func New( vm block.ChainVM, - commonCfg common.Config, + sender common.Sender, + log logging.Logger, + maxTimeGetAncestors time.Duration, + maxContainersGetAncestors int, + reg prometheus.Registerer, ) (common.AllGetsServer, error) { ssVM, _ := vm.(block.StateSyncableVM) gh := &getter{ - vm: vm, - ssVM: ssVM, - sender: commonCfg.Sender, - cfg: commonCfg, - log: commonCfg.Ctx.Log, + vm: vm, + ssVM: ssVM, + sender: sender, + log: log, + maxTimeGetAncestors: maxTimeGetAncestors, + maxContainersGetAncestors: maxContainersGetAncestors, } var err error @@ -38,18 +46,23 @@ func New( "bs", "get_ancestors_blks", "blocks fetched in a call to GetAncestors", - commonCfg.Ctx.Registerer, + reg, ) return gh, err } type getter struct { - vm block.ChainVM - ssVM block.StateSyncableVM // can be nil + vm block.ChainVM + ssVM block.StateSyncableVM // can be nil + sender common.Sender - cfg common.Config + log logging.Logger + // Max time to spend fetching a container and its ancestors when responding + // to a GetAncestors + maxTimeGetAncestors time.Duration + // Max number of containers in an ancestors message sent by this node. + maxContainersGetAncestors int - log logging.Logger getAncestorsBlks metric.Averager } @@ -81,10 +94,10 @@ func (gh *getter) GetStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID return nil } -func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights set.Set[uint64]) error { // If there are no requested heights, then we can return the result // immediately, regardless of if the underlying VM implements state sync. - if len(heights) == 0 { + if heights.Len() == 0 { gh.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, nil) return nil } @@ -101,8 +114,8 @@ func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID return nil } - summaryIDs := make([]ids.ID, 0, len(heights)) - for _, height := range heights { + summaryIDs := make([]ids.ID, 0, heights.Len()) + for height := range heights { summary, err := gh.ssVM.GetStateSummary(ctx, height) if err == block.ErrStateSyncableVMNotImplemented { gh.log.Debug("dropping GetAcceptedStateSummary message", @@ -131,13 +144,13 @@ func (gh *getter) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re if err != nil { return err } - gh.sender.SendAcceptedFrontier(ctx, nodeID, requestID, []ids.ID{lastAccepted}) + gh.sender.SendAcceptedFrontier(ctx, nodeID, requestID, lastAccepted) return nil } -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - acceptedIDs := make([]ids.ID, 0, len(containerIDs)) - for _, blkID := range containerIDs { +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { + acceptedIDs := make([]ids.ID, 0, containerIDs.Len()) + for blkID := range containerIDs { blk, err := gh.vm.GetBlock(ctx, blkID) if err == nil && blk.Status() == choices.Accepted { acceptedIDs = append(acceptedIDs, blkID) @@ -150,11 +163,12 @@ func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { ancestorsBytes, err := block.GetAncestors( ctx, + gh.log, gh.vm, blkID, - gh.cfg.AncestorsMaxContainersSent, + gh.maxContainersGetAncestors, constants.MaxContainersLen, - gh.cfg.MaxTimeGetAncestors, + gh.maxTimeGetAncestors, ) if err != nil { gh.log.Verbo("dropping GetAncestors message", diff --git a/avalanchego/snow/engine/snowman/getter/getter_test.go b/avalanchego/snow/engine/snowman/getter/getter_test.go index 40e6fc03..7d6482a1 100644 --- a/avalanchego/snow/engine/snowman/getter/getter_test.go +++ b/avalanchego/snow/engine/snowman/getter/getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter @@ -7,19 +7,18 @@ import ( "context" "errors" "testing" + "time" - "github.com/golang/mock/gomock" - + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -27,116 +26,56 @@ var errUnknownBlock = errors.New("unknown block") type StateSyncEnabledMock struct { *block.TestVM - *mocks.MockStateSyncableVM + *block.MockStateSyncableVM } -func testSetup( - t *testing.T, - ctrl *gomock.Controller, -) (StateSyncEnabledMock, *common.SenderTest, common.Config) { - ctx := snow.DefaultConsensusContextTest() +func newTest(t *testing.T) (common.AllGetsServer, StateSyncEnabledMock, *common.SenderTest) { + ctrl := gomock.NewController(t) - peers := validators.NewSet() - sender := &common.SenderTest{} vm := StateSyncEnabledMock{ TestVM: &block.TestVM{}, - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } - sender.T = t - - sender.Default(true) - - isBootstrapped := false - bootstrapTracker := &common.BootstrapTrackerTest{ + sender := &common.SenderTest{ T: t, - IsBootstrappedF: func() bool { - return isBootstrapped - }, - BootstrappedF: func(ids.ID) { - isBootstrapped = true - }, - } - - sender.CantSendGetAcceptedFrontier = false - - peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - - commonConfig := common.Config{ - Ctx: ctx, - Beacons: peers, - SampleK: peers.Len(), - Alpha: peers.Weight()/2 + 1, - Sender: sender, - BootstrapTracker: bootstrapTracker, - Timer: &common.TimerTest{}, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, } + sender.Default(true) - return vm, sender, commonConfig + bs, err := New( + vm, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + return bs, vm, sender } func TestAcceptedFrontier(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vm, sender, config := testSetup(t, ctrl) + require := require.New(t) + bs, vm, sender := newTest(t) blkID := ids.GenerateTestID() - - dummyBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: []byte{1, 2, 3}, - } - vm.CantLastAccepted = false vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return blkID, nil } - vm.GetBlockF = func(_ context.Context, bID ids.ID) (snowman.Block, error) { - require.Equal(t, blkID, bID) - return dummyBlk, nil - } - bsIntf, err := New(vm, config) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*getter) - if !ok { - t.Fatal("Unexpected get handler") - } - - var accepted []ids.ID - sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { - accepted = frontier + var accepted ids.ID + sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, containerID ids.ID) { + accepted = containerID } - if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { - t.Fatal(err) - } - - if len(accepted) != 1 { - t.Fatalf("Only one block should be accepted") - } - if accepted[0] != blkID { - t.Fatalf("Blk should be accepted") - } + require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) + require.Equal(blkID, accepted) } func TestFilterAccepted(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vm, sender, config := testSetup(t, ctrl) + require := require.New(t) + bs, vm, sender := newTest(t) blkID0 := ids.GenerateTestID() blkID1 := ids.GenerateTestID() @@ -151,25 +90,6 @@ func TestFilterAccepted(t *testing.T) { StatusV: choices.Accepted, }} - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk1.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk1.ID(), blkID) - return blk1, nil - } - - bsIntf, err := New(vm, config) - if err != nil { - t.Fatal(err) - } - bs, ok := bsIntf.(*getter) - if !ok { - t.Fatal("Unexpected get handler") - } - - blkIDs := []ids.ID{blkID0, blkID1, blkID2} vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: @@ -179,7 +99,7 @@ func TestFilterAccepted(t *testing.T) { case blkID2: return nil, errUnknownBlock } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -188,23 +108,11 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } - if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs); err != nil { - t.Fatal(err) - } - - acceptedSet := set.Set[ids.ID]{} - acceptedSet.Add(accepted...) + blkIDs := set.Of(blkID0, blkID1, blkID2) + require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) - if acceptedSet.Len() != 2 { - t.Fatalf("Two blocks should be accepted") - } - if !acceptedSet.Contains(blkID0) { - t.Fatalf("Blk should be accepted") - } - if !acceptedSet.Contains(blkID1) { - t.Fatalf("Blk should be accepted") - } - if acceptedSet.Contains(blkID2) { - t.Fatalf("Blk shouldn't be accepted") - } + require.Len(accepted, 2) + require.Contains(accepted, blkID0) + require.Contains(accepted, blkID1) + require.NotContains(accepted, blkID2) } diff --git a/avalanchego/snow/engine/snowman/issuer.go b/avalanchego/snow/engine/snowman/issuer.go index f7446167..d952dfe2 100644 --- a/avalanchego/snow/engine/snowman/issuer.go +++ b/avalanchego/snow/engine/snowman/issuer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -6,6 +6,8 @@ package snowman import ( "context" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/set" @@ -13,10 +15,13 @@ import ( // issuer issues [blk] into to consensus after its dependencies are met. type issuer struct { - t *Transitive - blk snowman.Block - abandoned bool - deps set.Set[ids.ID] + t *Transitive + nodeID ids.NodeID // nodeID of the peer that provided this block + blk snowman.Block + issuedMetric prometheus.Counter + abandoned bool + deps set.Set[ids.ID] + push bool } func (i *issuer) Dependencies() set.Set[ids.ID] { @@ -50,5 +55,5 @@ func (i *issuer) Update(ctx context.Context) { return } // Issue the block into consensus - i.t.errs.Add(i.t.deliver(ctx, i.blk)) + i.t.errs.Add(i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric)) } diff --git a/avalanchego/snow/engine/snowman/memory_block.go b/avalanchego/snow/engine/snowman/memory_block.go index 957b22d1..d91118af 100644 --- a/avalanchego/snow/engine/snowman/memory_block.go +++ b/avalanchego/snow/engine/snowman/memory_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,6 +7,7 @@ import ( "context" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/ancestor" ) var _ snowman.Block = (*memoryBlock)(nil) @@ -15,20 +16,20 @@ var _ snowman.Block = (*memoryBlock)(nil) type memoryBlock struct { snowman.Block - tree AncestorTree + tree ancestor.Tree metrics *metrics } // Accept accepts the underlying block & removes sibling subtrees func (mb *memoryBlock) Accept(ctx context.Context) error { - mb.tree.RemoveSubtree(mb.Parent()) + mb.tree.RemoveDescendants(mb.Parent()) mb.metrics.numNonVerifieds.Set(float64(mb.tree.Len())) return mb.Block.Accept(ctx) } // Reject rejects the underlying block & removes child subtrees func (mb *memoryBlock) Reject(ctx context.Context) error { - mb.tree.RemoveSubtree(mb.ID()) + mb.tree.RemoveDescendants(mb.ID()) mb.metrics.numNonVerifieds.Set(float64(mb.tree.Len())) return mb.Block.Reject(ctx) } diff --git a/avalanchego/snow/engine/snowman/metrics.go b/avalanchego/snow/engine/snowman/metrics.go index 19fb4229..5dd65d8a 100644 --- a/avalanchego/snow/engine/snowman/metrics.go +++ b/avalanchego/snow/engine/snowman/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -10,13 +10,35 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +const ( + pullGossipSource = "pull_gossip" + pushGossipSource = "push_gossip" + putGossipSource = "put_gossip" + builtSource = "built" + unknownSource = "unknown" +) + type metrics struct { - bootstrapFinished, numRequests, numBlocked, numBlockers, numNonVerifieds prometheus.Gauge - numBuilt, numBuildsFailed, numUselessPutBytes, numUselessPushQueryBytes prometheus.Counter - getAncestorsBlks metric.Averager + bootstrapFinished prometheus.Gauge + numRequests prometheus.Gauge + numBlocked prometheus.Gauge + numBlockers prometheus.Gauge + numNonVerifieds prometheus.Gauge + numBuilt prometheus.Counter + numBuildsFailed prometheus.Counter + numUselessPutBytes prometheus.Counter + numUselessPushQueryBytes prometheus.Counter + numMissingAcceptedBlocks prometheus.Counter + numProcessingAncestorFetchesFailed prometheus.Counter + numProcessingAncestorFetchesDropped prometheus.Counter + numProcessingAncestorFetchesSucceeded prometheus.Counter + numProcessingAncestorFetchesUnneeded prometheus.Counter + getAncestorsBlks metric.Averager + selectedVoteIndex metric.Averager + issuerStake metric.Averager + issued *prometheus.CounterVec } -// Initialize the metrics func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error { errs := wrappers.Errs{} m.bootstrapFinished = prometheus.NewGauge(prometheus.GaugeOpts{ @@ -39,6 +61,11 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error Name: "blockers", Help: "Number of blocks that are blocking other blocks from being issued because they haven't been issued", }) + m.numNonVerifieds = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "non_verified_blks", + Help: "Number of non-verified blocks in the memory", + }) m.numBuilt = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "blks_built", @@ -59,6 +86,31 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error Name: "num_useless_push_query_bytes", Help: "Amount of useless bytes received in PushQuery messages", }) + m.numMissingAcceptedBlocks = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_missing_accepted_blocks", + Help: "Number of times an accepted block height was referenced and it wasn't locally available", + }) + m.numProcessingAncestorFetchesFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_processing_ancestor_fetches_failed", + Help: "Number of votes that were dropped due to unknown blocks", + }) + m.numProcessingAncestorFetchesDropped = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_processing_ancestor_fetches_dropped", + Help: "Number of votes that were dropped due to decided blocks", + }) + m.numProcessingAncestorFetchesSucceeded = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_processing_ancestor_fetches_succeeded", + Help: "Number of votes that were applied to ancestor blocks", + }) + m.numProcessingAncestorFetchesUnneeded = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_processing_ancestor_fetches_unneeded", + Help: "Number of votes that were directly applied to blocks", + }) m.getAncestorsBlks = metric.NewAveragerWithErrs( namespace, "get_ancestors_blks", @@ -66,11 +118,32 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg, &errs, ) - m.numNonVerifieds = prometheus.NewGauge(prometheus.GaugeOpts{ + m.selectedVoteIndex = metric.NewAveragerWithErrs( + namespace, + "selected_vote_index", + "index of the voteID that was passed into consensus", + reg, + &errs, + ) + m.issuerStake = metric.NewAveragerWithErrs( + namespace, + "issuer_stake", + "stake weight of the peer who provided a block that was issued into consensus", + reg, + &errs, + ) + m.issued = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, - Name: "non_verified_blks", - Help: "Number of non-verified blocks in the memory", - }) + Name: "blks_issued", + Help: "number of blocks that have been issued into consensus by discovery mechanism", + }, []string{"source"}) + + // Register the labels + m.issued.WithLabelValues(pullGossipSource) + m.issued.WithLabelValues(pushGossipSource) + m.issued.WithLabelValues(putGossipSource) + m.issued.WithLabelValues(builtSource) + m.issued.WithLabelValues(unknownSource) errs.Add( reg.Register(m.bootstrapFinished), @@ -82,6 +155,12 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg.Register(m.numBuildsFailed), reg.Register(m.numUselessPutBytes), reg.Register(m.numUselessPushQueryBytes), + reg.Register(m.numMissingAcceptedBlocks), + reg.Register(m.numProcessingAncestorFetchesFailed), + reg.Register(m.numProcessingAncestorFetchesDropped), + reg.Register(m.numProcessingAncestorFetchesSucceeded), + reg.Register(m.numProcessingAncestorFetchesUnneeded), + reg.Register(m.issued), ) return errs.Err } diff --git a/avalanchego/snow/engine/snowman/syncer/config.go b/avalanchego/snow/engine/snowman/syncer/config.go index 7b2d59f5..8adb1e6d 100644 --- a/avalanchego/snow/engine/snowman/syncer/config.go +++ b/avalanchego/snow/engine/snowman/syncer/config.go @@ -1,19 +1,27 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer import ( + "fmt" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { - common.Config common.AllGetsServer + Ctx *snow.ConsensusContext + + StartupTracker tracker.Startup + Sender common.Sender + // SampleK determines the number of nodes to attempt to fetch the latest // state sync summary from. In order for a round of voting to succeed, there // must be at least one correct node sampled. @@ -25,46 +33,49 @@ type Config struct { // StateSyncBeacons are the nodes that will be used to sample and vote over // state summaries. - StateSyncBeacons validators.Set + StateSyncBeacons validators.Manager VM block.ChainVM } func NewConfig( - commonCfg common.Config, - stateSyncerIDs []ids.NodeID, snowGetHandler common.AllGetsServer, + ctx *snow.ConsensusContext, + startupTracker tracker.Startup, + sender common.Sender, + beacons validators.Manager, + sampleK int, + alpha uint64, + stateSyncerIDs []ids.NodeID, vm block.ChainVM, ) (Config, error) { - // Initialize the default values that will be used if stateSyncerIDs is - // empty. - var ( - stateSyncBeacons = commonCfg.Beacons - syncAlpha = commonCfg.Alpha - syncSampleK = commonCfg.SampleK - ) + // Initialize the beacons that will be used if stateSyncerIDs is empty. + stateSyncBeacons := beacons // If the user has manually provided state syncer IDs, then override the // state sync beacons to them. if len(stateSyncerIDs) != 0 { - stateSyncBeacons = validators.NewSet() + stateSyncBeacons = validators.NewManager() for _, peerID := range stateSyncerIDs { // Invariant: We never use the TxID or BLS keys populated here. - if err := stateSyncBeacons.Add(peerID, nil, ids.Empty, 1); err != nil { + if err := stateSyncBeacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1); err != nil { return Config{}, err } } - stateSyncingWeight := stateSyncBeacons.Weight() - if uint64(syncSampleK) > stateSyncingWeight { - syncSampleK = int(stateSyncingWeight) + stateSyncingWeight, err := stateSyncBeacons.TotalWeight(ctx.SubnetID) + if err != nil { + return Config{}, fmt.Errorf("failed to calculate total weight of state sync beacons for subnet %s: %w", ctx.SubnetID, err) } - syncAlpha = stateSyncingWeight/2 + 1 // must be > 50% + sampleK = int(min(uint64(sampleK), stateSyncingWeight)) + alpha = stateSyncingWeight/2 + 1 // must be > 50% } return Config{ - Config: commonCfg, AllGetsServer: snowGetHandler, - SampleK: syncSampleK, - Alpha: syncAlpha, + Ctx: ctx, + StartupTracker: startupTracker, + Sender: sender, + SampleK: sampleK, + Alpha: alpha, StateSyncBeacons: stateSyncBeacons, VM: vm, }, nil diff --git a/avalanchego/snow/engine/snowman/syncer/state_syncer.go b/avalanchego/snow/engine/snowman/syncer/state_syncer.go index fe75d137..bc549a0c 100644 --- a/avalanchego/snow/engine/snowman/syncer/state_syncer.go +++ b/avalanchego/snow/engine/snowman/syncer/state_syncer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -6,8 +6,7 @@ package syncer import ( "context" "fmt" - - stdmath "math" + "math" "go.uber.org/zap" @@ -18,11 +17,17 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) +// maxOutstandingBroadcastRequests is the maximum number of requests to have +// outstanding when broadcasting. +const maxOutstandingBroadcastRequests = 50 + var _ common.StateSyncer = (*stateSyncer)(nil) // summary content as received from network, along with accumulated weight. @@ -58,7 +63,7 @@ type stateSyncer struct { // Holds the beacons that were sampled for the accepted frontier // Won't be consumed as seeders are reached out. Used to rescale // alpha for frontiers - frontierSeeders validators.Set + frontierSeeders validators.Manager // IDs of validators we should request state summary frontier from. // Will be consumed seeders are reached out for frontier. targetSeeders set.Set[ids.NodeID] @@ -83,9 +88,6 @@ type stateSyncer struct { // we keep a list of deduplicated height ready for voting summariesHeights set.Set[uint64] uniqueSummariesHeights []uint64 - - // number of times the state sync has been attempted - attempts int } func New( @@ -107,6 +109,57 @@ func New( } } +func (ss *stateSyncer) Context() *snow.ConsensusContext { + return ss.Ctx +} + +func (ss *stateSyncer) Start(ctx context.Context, startReqID uint32) error { + ss.Ctx.Log.Info("starting state sync") + + ss.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.StateSyncing, + }) + if err := ss.VM.SetState(ctx, snow.StateSyncing); err != nil { + return fmt.Errorf("failed to notify VM that state syncing has started: %w", err) + } + + ss.requestID = startReqID + + return ss.tryStartSyncing(ctx) +} + +func (ss *stateSyncer) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := ss.VM.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + + if err := ss.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { + return err + } + + return ss.tryStartSyncing(ctx) +} + +func (ss *stateSyncer) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := ss.VM.Disconnected(ctx, nodeID); err != nil { + return err + } + + return ss.StartupTracker.Disconnected(ctx, nodeID) +} + +// tryStartSyncing will start syncing the first time it is called while the +// startupTracker is reporting that the protocol should start. +func (ss *stateSyncer) tryStartSyncing(ctx context.Context) error { + if ss.started || !ss.StartupTracker.ShouldStart() { + return nil + } + + ss.started = true + return ss.startup(ctx) +} + func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryBytes []byte) error { // ignores any late responses if requestID != ss.requestID { @@ -142,13 +195,16 @@ func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.Node ss.uniqueSummariesHeights = append(ss.uniqueSummariesHeights, height) } } else { - ss.Ctx.Log.Debug("failed to parse summary", - zap.Error(err), - ) - ss.Ctx.Log.Verbo("failed to parse summary", - zap.Binary("summary", summaryBytes), - zap.Error(err), - ) + if ss.Ctx.Log.Enabled(logging.Verbo) { + ss.Ctx.Log.Verbo("failed to parse summary", + zap.Binary("summary", summaryBytes), + zap.Error(err), + ) + } else { + ss.Ctx.Log.Debug("failed to parse summary", + zap.Error(err), + ) + } } return ss.receivedStateSummaryFrontier(ctx) @@ -187,20 +243,27 @@ func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { // If we got too many timeouts, we restart state syncing hoping that network // problems will go away and we can collect a qualified frontier. // We assume the frontier is qualified after an alpha proportion of frontier seeders have responded - frontierAlpha := float64(ss.frontierSeeders.Weight()*ss.Alpha) / float64(ss.StateSyncBeacons.Weight()) - failedBeaconWeight := ss.StateSyncBeacons.SubsetWeight(ss.failedSeeders) + frontiersTotalWeight, err := ss.frontierSeeders.TotalWeight(ss.Ctx.SubnetID) + if err != nil { + return fmt.Errorf("failed to get total weight of frontier seeders for subnet %s: %w", ss.Ctx.SubnetID, err) + } + beaconsTotalWeight, err := ss.StateSyncBeacons.TotalWeight(ss.Ctx.SubnetID) + if err != nil { + return fmt.Errorf("failed to get total weight of state sync beacons for subnet %s: %w", ss.Ctx.SubnetID, err) + } + frontierAlpha := float64(frontiersTotalWeight*ss.Alpha) / float64(beaconsTotalWeight) + failedBeaconWeight, err := ss.StateSyncBeacons.SubsetWeight(ss.Ctx.SubnetID, ss.failedSeeders) + if err != nil { + return fmt.Errorf("failed to get total weight of failed beacons: %w", err) + } - frontierStake := ss.frontierSeeders.Weight() - failedBeaconWeight + frontierStake := frontiersTotalWeight - failedBeaconWeight if float64(frontierStake) < frontierAlpha { - ss.Ctx.Log.Debug("didn't receive enough frontiers", + ss.Ctx.Log.Debug("restarting state sync", + zap.String("reason", "didn't receive enough frontiers"), zap.Int("numFailedValidators", ss.failedSeeders.Len()), - zap.Int("numStateSyncAttempts", ss.attempts), ) - - if ss.Config.RetryBootstrap { - ss.Ctx.Log.Debug("restarting state sync") - return ss.restart(ctx) - } + return ss.startup(ctx) } ss.requestID++ @@ -208,7 +271,7 @@ func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { return nil } -func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs set.Set[ids.ID]) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync AcceptedStateSummary message", @@ -229,27 +292,44 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node // Mark that we received a response from [nodeID] ss.pendingVoters.Remove(nodeID) - weight := ss.StateSyncBeacons.GetWeight(nodeID) - for _, summaryID := range summaryIDs { + nodeWeight := ss.StateSyncBeacons.GetWeight(ss.Ctx.SubnetID, nodeID) + ss.Ctx.Log.Debug("adding weight to summaries", + zap.Stringer("nodeID", nodeID), + zap.Stringer("subnetID", ss.Ctx.SubnetID), + zap.Reflect("summaryIDs", summaryIDs), + zap.Uint64("nodeWeight", nodeWeight), + ) + for summaryID := range summaryIDs { ws, ok := ss.weightedSummaries[summaryID] if !ok { ss.Ctx.Log.Debug("skipping summary", - zap.String("reason", "received a vote from validator for unknown summary"), + zap.String("reason", "unknown summary"), zap.Stringer("nodeID", nodeID), zap.Stringer("summaryID", summaryID), ) continue } - newWeight, err := math.Add64(weight, ws.weight) + newWeight, err := safemath.Add64(nodeWeight, ws.weight) if err != nil { - ss.Ctx.Log.Error("failed to calculate the Accepted votes", - zap.Uint64("weight", weight), + ss.Ctx.Log.Error("failed to calculate new summary weight", + zap.Stringer("nodeID", nodeID), + zap.Stringer("summaryID", summaryID), + zap.Uint64("height", ws.summary.Height()), + zap.Uint64("nodeWeight", nodeWeight), zap.Uint64("previousWeight", ws.weight), zap.Error(err), ) - newWeight = stdmath.MaxUint64 + newWeight = math.MaxUint64 } + + ss.Ctx.Log.Verbo("updating summary weight", + zap.Stringer("nodeID", nodeID), + zap.Stringer("summaryID", summaryID), + zap.Uint64("height", ws.summary.Height()), + zap.Uint64("previousWeight", ws.weight), + zap.Uint64("newWeight", newWeight), + ) ws.weight = newWeight } @@ -266,6 +346,8 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node if ws.weight < ss.Alpha { ss.Ctx.Log.Debug("removing summary", zap.String("reason", "insufficient weight"), + zap.Stringer("summaryID", summaryID), + zap.Uint64("height", ws.summary.Height()), zap.Uint64("currentWeight", ws.weight), zap.Uint64("requiredWeight", ss.Alpha), ) @@ -277,22 +359,28 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node size := len(ss.weightedSummaries) if size == 0 { // retry the state sync if the weight is not enough to state sync - failedBeaconWeight := ss.StateSyncBeacons.SubsetWeight(ss.failedVoters) + failedVotersWeight, err := ss.StateSyncBeacons.SubsetWeight(ss.Ctx.SubnetID, ss.failedVoters) + if err != nil { + return fmt.Errorf("failed to get total weight of failed voters: %w", err) + } // if we had too many timeouts when asking for validator votes, we should restart // state sync hoping for the network problems to go away; otherwise, we received // enough (>= ss.Alpha) responses, but no state summary was supported by a majority // of validators (i.e. votes are split between minorities supporting different state // summaries), so there is no point in retrying state sync; we should move ahead to bootstrapping - votingStakes := ss.StateSyncBeacons.Weight() - failedBeaconWeight - if ss.Config.RetryBootstrap && votingStakes < ss.Alpha { + beaconsTotalWeight, err := ss.StateSyncBeacons.TotalWeight(ss.Ctx.SubnetID) + if err != nil { + return fmt.Errorf("failed to get total weight of state sync beacons for subnet %s: %w", ss.Ctx.SubnetID, err) + } + votingStakes := beaconsTotalWeight - failedVotersWeight + if votingStakes < ss.Alpha { ss.Ctx.Log.Debug("restarting state sync", zap.String("reason", "not enough votes received"), - zap.Int("numBeacons", ss.StateSyncBeacons.Len()), + zap.Int("numBeacons", ss.StateSyncBeacons.Count(ss.Ctx.SubnetID)), zap.Int("numFailedSyncers", ss.failedVoters.Len()), - zap.Int("numAttempts", ss.attempts), ) - return ss.restart(ctx) + return ss.startup(ctx) } ss.Ctx.Log.Info("skipping state sync", @@ -381,27 +469,6 @@ func (ss *stateSyncer) GetAcceptedStateSummaryFailed(ctx context.Context, nodeID return ss.AcceptedStateSummary(ctx, nodeID, requestID, nil) } -func (ss *stateSyncer) Start(ctx context.Context, startReqID uint32) error { - ss.Ctx.Log.Info("starting state sync") - - ss.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.StateSyncing, - }) - if err := ss.VM.SetState(ctx, snow.StateSyncing); err != nil { - return fmt.Errorf("failed to notify VM that state syncing has started: %w", err) - } - - ss.requestID = startReqID - - if !ss.StartupTracker.ShouldStart() { - return nil - } - - ss.started = true - return ss.startup(ctx) -} - // startup do start the whole state sync process by // sampling frontier seeders, listing state syncers to request votes to // and reaching out frontier seeders if any. Otherwise, it moves immediately @@ -423,18 +490,18 @@ func (ss *stateSyncer) startup(ctx context.Context) error { ss.failedVoters.Clear() // sample K beacons to retrieve frontier from - beaconIDs, err := ss.StateSyncBeacons.Sample(ss.Config.SampleK) + beaconIDs, err := ss.StateSyncBeacons.Sample(ss.Ctx.SubnetID, ss.Config.SampleK) if err != nil { return err } - ss.frontierSeeders = validators.NewSet() + ss.frontierSeeders = validators.NewManager() for _, nodeID := range beaconIDs { - if !ss.frontierSeeders.Contains(nodeID) { + if _, ok := ss.frontierSeeders.GetValidator(ss.Ctx.SubnetID, nodeID); !ok { // Invariant: We never use the TxID or BLS keys populated here. - err = ss.frontierSeeders.Add(nodeID, nil, ids.Empty, 1) + err = ss.frontierSeeders.AddStaker(ss.Ctx.SubnetID, nodeID, nil, ids.Empty, 1) } else { - err = ss.frontierSeeders.AddWeight(nodeID, 1) + err = ss.frontierSeeders.AddWeight(ss.Ctx.SubnetID, nodeID, 1) } if err != nil { return err @@ -443,9 +510,7 @@ func (ss *stateSyncer) startup(ctx context.Context) error { } // list all beacons, to reach them for voting on frontier - for _, vdr := range ss.StateSyncBeacons.List() { - ss.targetVoters.Add(vdr.NodeID) - } + ss.targetVoters.Add(ss.StateSyncBeacons.GetValidatorIDs(ss.Ctx.SubnetID)...) // check if there is an ongoing state sync; if so add its state summary // to the frontier to request votes on @@ -468,7 +533,6 @@ func (ss *stateSyncer) startup(ctx context.Context) error { } // initiate messages exchange - ss.attempts++ if ss.targetSeeders.Len() == 0 { ss.Ctx.Log.Info("State syncing skipped due to no provided syncers") return ss.onDoneStateSyncing(ctx, ss.requestID) @@ -479,22 +543,12 @@ func (ss *stateSyncer) startup(ctx context.Context) error { return nil } -func (ss *stateSyncer) restart(ctx context.Context) error { - if ss.attempts > 0 && ss.attempts%ss.RetryBootstrapWarnFrequency == 0 { - ss.Ctx.Log.Debug("check internet connection", - zap.Int("numSyncAttempts", ss.attempts), - ) - } - - return ss.startup(ctx) -} - // Ask up to [common.MaxOutstandingBroadcastRequests] state sync validators at a time // to send their accepted state summary. It is called again until there are // no more seeders to be reached in the pending set func (ss *stateSyncer) sendGetStateSummaryFrontiers(ctx context.Context) { vdrs := set.NewSet[ids.NodeID](1) - for ss.targetSeeders.Len() > 0 && ss.pendingSeeders.Len() < common.MaxOutstandingBroadcastRequests { + for ss.targetSeeders.Len() > 0 && ss.pendingSeeders.Len() < maxOutstandingBroadcastRequests { vdr, _ := ss.targetSeeders.Pop() vdrs.Add(vdr) ss.pendingSeeders.Add(vdr) @@ -510,7 +564,7 @@ func (ss *stateSyncer) sendGetStateSummaryFrontiers(ctx context.Context) { // no more voters to be reached in the pending set. func (ss *stateSyncer) sendGetAcceptedStateSummaries(ctx context.Context) { vdrs := set.NewSet[ids.NodeID](1) - for ss.targetVoters.Len() > 0 && ss.pendingVoters.Len() < common.MaxOutstandingBroadcastRequests { + for ss.targetVoters.Len() > 0 && ss.pendingVoters.Len() < maxOutstandingBroadcastRequests { vdr, _ := ss.targetVoters.Pop() vdrs.Add(vdr) ss.pendingVoters.Add(vdr) @@ -537,37 +591,16 @@ func (ss *stateSyncer) Notify(ctx context.Context, msg common.Message) error { return ss.onDoneStateSyncing(ctx, ss.requestID) } -func (ss *stateSyncer) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := ss.VM.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if err := ss.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { - return err - } - - if ss.started || !ss.StartupTracker.ShouldStart() { - return nil - } - - ss.started = true - return ss.startup(ctx) -} - -func (ss *stateSyncer) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - if err := ss.VM.Disconnected(ctx, nodeID); err != nil { - return err - } - - return ss.StartupTracker.Disconnected(ctx, nodeID) -} - func (*stateSyncer) Gossip(context.Context) error { return nil } func (ss *stateSyncer) Shutdown(ctx context.Context) error { ss.Config.Ctx.Log.Info("shutting down state syncer") + + ss.Ctx.Lock.Lock() + defer ss.Ctx.Lock.Unlock() + return ss.VM.Shutdown(ctx) } @@ -578,6 +611,9 @@ func (*stateSyncer) Timeout(context.Context) error { } func (ss *stateSyncer) HealthCheck(ctx context.Context) (interface{}, error) { + ss.Ctx.Lock.Lock() + defer ss.Ctx.Lock.Unlock() + vmIntf, vmErr := ss.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": struct{}{}, @@ -586,15 +622,14 @@ func (ss *stateSyncer) HealthCheck(ctx context.Context) (interface{}, error) { return intf, vmErr } -func (ss *stateSyncer) GetVM() common.VM { - return ss.VM -} - func (ss *stateSyncer) IsEnabled(ctx context.Context) (bool, error) { if ss.stateSyncVM == nil { // state sync is not implemented return false, nil } + ss.Ctx.Lock.Lock() + defer ss.Ctx.Lock.Unlock() + return ss.stateSyncVM.StateSyncEnabled(ctx) } diff --git a/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go b/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go index 0d5b0155..1ec1e670 100644 --- a/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go +++ b/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -9,20 +9,21 @@ import ( "errors" "math" "testing" + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" - - safeMath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -35,20 +36,25 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require := require.New(t) // Build state syncer + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) sender := &common.SenderTest{T: t} - commonCfg := &common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Sender: sender, - } // Non state syncableVM case nonStateSyncableVM := &block.TestVM{ TestVM: common.TestVM{T: t}, } - dummyGetter, err := getter.New(nonStateSyncableVM, *commonCfg) + dummyGetter, err := getter.New( + nonStateSyncableVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry(), + ) require.NoError(err) - cfg, err := NewConfig(*commonCfg, nil, dummyGetter, nonStateSyncableVM) + cfg, err := NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, nonStateSyncableVM) require.NoError(err) syncer := New(cfg, func(context.Context, uint32) error { return nil @@ -59,8 +65,6 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require.False(enabled) // State syncableVM case - commonCfg.Ctx = snow.DefaultConsensusContextTest() // reset metrics - fullVM := &fullVM{ TestVM: &block.TestVM{ TestVM: common.TestVM{T: t}, @@ -69,10 +73,16 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { T: t, }, } - dummyGetter, err = getter.New(fullVM, *commonCfg) + dummyGetter, err = getter.New( + fullVM, + sender, + logging.NoLog{}, + time.Second, + 2000, + prometheus.NewRegistry()) require.NoError(err) - cfg, err = NewConfig(*commonCfg, nil, dummyGetter, fullVM) + cfg, err = NewConfig(dummyGetter, ctx, nil, sender, nil, 0, 0, nil, fullVM) require.NoError(err) syncer = New(cfg, func(context.Context, uint32) error { return nil @@ -97,70 +107,61 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { require := require.New(t) - - vdrs := buildTestPeers(t) - alpha := vdrs.Weight() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + alpha, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) startupAlpha := alpha peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: alpha, - StartupTracker: startup, - } - syncer, _, sender := buildTestsObjects(t, &commonCfg) + syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) sender.CantSendGetStateSummaryFrontier = true sender.SendGetStateSummaryFrontierF = func(context.Context, set.Set[ids.NodeID], uint32) {} startReqID := uint32(0) // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. - require.False(commonCfg.StartupTracker.ShouldStart()) + require.False(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(beacons.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) require.NoError(syncer.Connected(context.Background(), vdr0, version.CurrentApp)) - require.False(commonCfg.StartupTracker.ShouldStart()) + require.False(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr, nil, ids.Empty, startupAlpha)) + require.NoError(beacons.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) require.NoError(syncer.Connected(context.Background(), vdr, version.CurrentApp)) - require.True(commonCfg.StartupTracker.ShouldStart()) + require.True(startup.ShouldStart()) require.NoError(syncer.Start(context.Background(), startReqID)) require.True(syncer.started) } func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) { require := require.New(t) - - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) + syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // mock VM to simulate a valid summary is returned localSummary := &block.TestStateSummary{ @@ -174,35 +175,31 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.locallyAvailableSummary == localSummary) + require.Equal(localSummary, syncer.locallyAvailableSummary) ws, ok := syncer.weightedSummaries[summaryID] require.True(ok) - require.True(bytes.Equal(ws.summary.Bytes(), summaryBytes)) + require.Equal(summaryBytes, ws.summary.Bytes()) require.Zero(ws.weight) } func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T) { require := require.New(t) - - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) + syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // mock VM to simulate a no summary returned fullVM.CantStateSyncGetOngoingSummary = true @@ -211,8 +208,8 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } require.Nil(syncer.locallyAvailableSummary) @@ -222,21 +219,18 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, _, sender := buildTestsObjects(t, &commonCfg) + syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := set.NewSet[ids.NodeID](3) @@ -246,39 +240,36 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } // check that vdrs are reached out for frontiers - require.True(len(contactedFrontiersProviders) == safeMath.Min(vdrs.Len(), common.MaxOutstandingBroadcastRequests)) + require.Len(contactedFrontiersProviders, min(beacons.Count(ctx.SubnetID), maxOutstandingBroadcastRequests)) for beaconID := range contactedFrontiersProviders { // check that beacon is duly marked as reached out - require.True(syncer.pendingSeeders.Contains(beaconID)) + require.Contains(syncer.pendingSeeders, beaconID) } // check that, obviously, no summary is yet registered - require.True(len(syncer.weightedSummaries) == 0) + require.Empty(syncer.weightedSummaries) } func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -290,13 +281,13 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true @@ -319,8 +310,8 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { math.MaxInt32, summaryBytes, )) - require.True(syncer.pendingSeeders.Contains(responsiveBeaconID)) // responsiveBeacon still pending - require.True(len(syncer.weightedSummaries) == 0) + require.Contains(syncer.pendingSeeders, responsiveBeaconID) // responsiveBeacon still pending + require.Empty(syncer.weightedSummaries) // check a response from unsolicited node is dropped unsolicitedNodeID := ids.GenerateTestNodeID() @@ -330,7 +321,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { responsiveBeaconReqID, summaryBytes, )) - require.True(len(syncer.weightedSummaries) == 0) + require.Empty(syncer.weightedSummaries) // check a valid response is duly recorded require.NoError(syncer.StateSummaryFrontier( @@ -341,7 +332,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { )) // responsiveBeacon not pending anymore - require.False(syncer.pendingSeeders.Contains(responsiveBeaconID)) + require.NotContains(syncer.pendingSeeders, responsiveBeaconID) // valid summary is recorded ws, ok := syncer.weightedSummaries[summaryID] @@ -351,27 +342,24 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // other listed vdrs are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Len()) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) } func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -383,13 +371,13 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // mock VM to simulate an invalid summary is returned summary := []byte{'s', 'u', 'm', 'm', 'a', 'r', 'y'} @@ -413,37 +401,34 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { )) // responsiveBeacon not pending anymore - require.False(syncer.pendingSeeders.Contains(responsiveBeaconID)) + require.NotContains(syncer.pendingSeeders, responsiveBeaconID) // invalid summary is not recorded require.True(isSummaryDecoded) - require.True(len(syncer.weightedSummaries) == 0) + require.Empty(syncer.weightedSummaries) // even in case of invalid summaries, other listed vdrs // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Len()) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) } func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -455,13 +440,13 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, maxOutstandingBroadcastRequests) // pick one of the vdrs that have been reached out unresponsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) @@ -481,14 +466,14 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { )) // unresponsiveBeacon not pending anymore - require.False(syncer.pendingSeeders.Contains(unresponsiveBeaconID)) - require.True(syncer.failedSeeders.Contains(unresponsiveBeaconID)) + require.NotContains(syncer.pendingSeeders, unresponsiveBeaconID) + require.Contains(syncer.failedSeeders, unresponsiveBeaconID) // even in case of timeouts, other listed vdrs // are reached for data require.True( len(contactedFrontiersProviders) > initiallyReachedOutBeaconsSize || - len(contactedFrontiersProviders) == vdrs.Len()) + len(contactedFrontiersProviders) == beacons.Count(ctx.SubnetID)) // mock VM to simulate a valid but late summary is returned fullVM.CantParseStateSummary = true @@ -509,29 +494,24 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { )) // late summary is not recorded - require.True(len(syncer.weightedSummaries) == 0) + require.Empty(syncer.weightedSummaries) } func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, - RetryBootstrapWarnFrequency: 1, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -568,10 +548,10 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let just one node respond and all others timeout maxResponses := 1 @@ -600,30 +580,27 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { } // check that some frontier seeders are reached again for the frontier - require.True(syncer.pendingSeeders.Len() > 0) + require.NotEmpty(syncer.pendingSeeders) // check that no vote requests are issued - require.True(len(contactedVoters) == 0) + require.Empty(contactedVoters) } func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -637,7 +614,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { - require.True(bytes.Equal(b, summaryBytes)) + require.Equal(summaryBytes, b) return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -654,10 +631,10 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -672,32 +649,29 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) } func TestUnRequestedVotesAreDropped(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -727,10 +701,10 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -745,12 +719,12 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -764,12 +738,12 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, math.MaxInt32, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveVoter still pending - require.True(syncer.pendingVoters.Contains(responsiveVoterID)) - require.True(syncer.weightedSummaries[summaryID].weight == 0) + require.Contains(syncer.pendingVoters, responsiveVoterID) + require.Zero(syncer.weightedSummaries[summaryID].weight) // check a response from unsolicited node is dropped unsolicitedVoterID := ids.GenerateTestNodeID() @@ -777,47 +751,44 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { context.Background(), unsolicitedVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) - require.True(syncer.weightedSummaries[summaryID].weight == 0) + require.Zero(syncer.weightedSummaries[summaryID].weight) // check a valid response is duly recorded require.NoError(syncer.AcceptedStateSummary( context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) // responsiveBeacon not pending anymore - require.False(syncer.pendingSeeders.Contains(responsiveVoterID)) - voterWeight := vdrs.GetWeight(responsiveVoterID) + require.NotContains(syncer.pendingSeeders, responsiveVoterID) + voterWeight := beacons.GetWeight(ctx.SubnetID, responsiveVoterID) require.Equal(voterWeight, syncer.weightedSummaries[summaryID].weight) // other listed voters are reached out require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == vdrs.Len()) + len(contactedVoters) == beacons.Count(ctx.SubnetID)) } func TestVotesForUnknownSummariesAreDropped(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -847,10 +818,10 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -865,12 +836,12 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, maxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -884,46 +855,44 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{unknownSummaryID}, + set.Of(unknownSummaryID), )) _, found = syncer.weightedSummaries[unknownSummaryID] require.False(found) // check that responsiveVoter cannot cast another vote - require.False(syncer.pendingSeeders.Contains(responsiveVoterID)) + require.NotContains(syncer.pendingSeeders, responsiveVoterID) require.NoError(syncer.AcceptedStateSummary( context.Background(), responsiveVoterID, responsiveVoterReqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) - require.True(syncer.weightedSummaries[summaryID].weight == 0) + require.Zero(syncer.weightedSummaries[summaryID].weight) // other listed voters are reached out, even in the face of vote // on unknown summary require.True( len(contactedVoters) > initiallyContactedVotersSize || - len(contactedVoters) == vdrs.Len()) + len(contactedVoters) == beacons.Count(ctx.SubnetID)) } func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -969,10 +938,10 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond with majority or minority summaries for { @@ -1000,7 +969,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { )) } } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) majoritySummaryCalled := false minoritySummaryCalled := false @@ -1021,23 +990,23 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { reqID := contactedVoters[voterID] switch { - case cumulatedWeight < commonCfg.Alpha/2: + case cumulatedWeight < alpha/2: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID, minoritySummaryID}, + set.Of(summaryID, minoritySummaryID), )) - cumulatedWeight += vdrs.GetWeight(voterID) + cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) - case cumulatedWeight < commonCfg.Alpha: + case cumulatedWeight < alpha: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) - cumulatedWeight += vdrs.GetWeight(voterID) + cumulatedWeight += beacons.GetWeight(ctx.SubnetID, voterID) default: require.NoError(syncer.GetAcceptedStateSummaryFailed( @@ -1056,23 +1025,19 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, // this sets RetryStateSyncing too - RetryBootstrapWarnFrequency: 1, // this sets RetrySyncingWarnFrequency too - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -1104,10 +1069,10 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -1122,7 +1087,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) minoritySummaryCalled := false minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { @@ -1138,19 +1103,19 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { reqID := contactedVoters[voterID] // vdr carries the largest weight by far. Make sure it fails - if timedOutWeight <= commonCfg.Alpha { + if timedOutWeight <= alpha { require.NoError(syncer.GetAcceptedStateSummaryFailed( context.Background(), voterID, reqID, )) - timedOutWeight += vdrs.GetWeight(voterID) + timedOutWeight += beacons.GetWeight(ctx.SubnetID, voterID) } else { require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{summaryID}, + set.Of(summaryID), )) } } @@ -1159,28 +1124,26 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { require.False(minoritySummaryCalled) // instead the whole process is restared - require.False(syncer.pendingVoters.Len() != 0) // no voters reached - require.True(syncer.pendingSeeders.Len() != 0) // frontiers providers reached again + require.Empty(syncer.pendingVoters) // no voters reached + require.NotEmpty(syncer.pendingSeeders) // frontiers providers reached again } func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 + alpha := (totalWeight + 1) / 2 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - } - syncer, fullVM, sender := buildTestsObjects(t, &commonCfg) + syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map @@ -1226,10 +1189,10 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. } // Connect enough stake to start syncer - for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) + for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { + require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond with majority or minority summaries for { @@ -1257,7 +1220,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. )) } } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) majoritySummaryCalled := false minoritySummaryCalled := false @@ -1285,23 +1248,23 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. reqID := contactedVoters[voterID] switch { - case votingWeightStake < commonCfg.Alpha/2: + case votingWeightStake < alpha/2: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{minoritySummary1.ID(), minoritySummary2.ID()}, + set.Of(minoritySummary1.ID(), minoritySummary2.ID()), )) - votingWeightStake += vdrs.GetWeight(voterID) + votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) default: require.NoError(syncer.AcceptedStateSummary( context.Background(), voterID, reqID, - []ids.ID{{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}}, + set.Of(ids.ID{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}), )) - votingWeightStake += vdrs.GetWeight(voterID) + votingWeightStake += beacons.GetWeight(ctx.SubnetID, voterID) } } @@ -1314,24 +1277,18 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { require := require.New(t) - vdrs := buildTestPeers(t) - startupAlpha := (3*vdrs.Weight() + 3) / 4 + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + beacons := buildTestPeers(t, ctx.SubnetID) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startupAlpha := (3*totalWeight + 3) / 4 peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - vdrs.RegisterCallbackListener(startup) - - commonCfg := common.Config{ - Ctx: snow.DefaultConsensusContextTest(), - Beacons: vdrs, - SampleK: vdrs.Len(), - Alpha: (vdrs.Weight() + 1) / 2, - StartupTracker: startup, - RetryBootstrap: true, // this sets RetryStateSyncing too - RetryBootstrapWarnFrequency: 1, // this sets RetrySyncingWarnFrequency too - } - syncer, fullVM, _ := buildTestsObjects(t, &commonCfg) - _ = fullVM + beacons.RegisterCallbackListener(ctx.SubnetID, startup) + + syncer, _, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) stateSyncFullyDone := false syncer.onDoneStateSyncing = func(context.Context, uint32) error { diff --git a/avalanchego/snow/engine/snowman/syncer/utils_test.go b/avalanchego/snow/engine/snowman/syncer/utils_test.go index 01303743..a5217a4b 100644 --- a/avalanchego/snow/engine/snowman/syncer/utils_test.go +++ b/avalanchego/snow/engine/snowman/syncer/utils_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -6,12 +6,15 @@ package syncer import ( "context" "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/validators" @@ -54,24 +57,29 @@ type fullVM struct { *block.TestStateSyncableVM } -func buildTestPeers(t *testing.T) validators.Set { - // we consider more than common.MaxOutstandingBroadcastRequests peers - // so to test the effect of cap on number of requests sent out - vdrs := validators.NewSet() - for idx := 0; idx < 2*common.MaxOutstandingBroadcastRequests; idx++ { +func buildTestPeers(t *testing.T, subnetID ids.ID) validators.Manager { + // We consider more than maxOutstandingBroadcastRequests peers to test + // capping the number of requests sent out. + vdrs := validators.NewManager() + for idx := 0; idx < 2*maxOutstandingBroadcastRequests; idx++ { beaconID := ids.GenerateTestNodeID() - require.NoError(t, vdrs.Add(beaconID, nil, ids.Empty, 1)) + require.NoError(t, vdrs.AddStaker(subnetID, beaconID, nil, ids.Empty, 1)) } return vdrs } -func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( +func buildTestsObjects( + t *testing.T, + ctx *snow.ConsensusContext, + startupTracker tracker.Startup, + beacons validators.Manager, + alpha uint64, +) ( *stateSyncer, *fullVM, *common.SenderTest, ) { - sender := &common.SenderTest{T: t} - commonCfg.Sender = sender + require := require.New(t) fullVM := &fullVM{ TestVM: &block.TestVM{ @@ -81,17 +89,35 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( T: t, }, } - dummyGetter, err := getter.New(fullVM, *commonCfg) - require.NoError(t, err) - - cfg, err := NewConfig(*commonCfg, nil, dummyGetter, fullVM) - require.NoError(t, err) + sender := &common.SenderTest{T: t} + dummyGetter, err := getter.New( + fullVM, + sender, + ctx.Log, + time.Second, + 2000, + ctx.Registerer, + ) + require.NoError(err) + + cfg, err := NewConfig( + dummyGetter, + ctx, + startupTracker, + sender, + beacons, + beacons.Count(ctx.SubnetID), + alpha, + nil, + fullVM, + ) + require.NoError(err) commonSyncer := New(cfg, func(context.Context, uint32) error { return nil }) - syncer, ok := commonSyncer.(*stateSyncer) - require.True(t, ok) - require.True(t, syncer.stateSyncVM != nil) + require.IsType(&stateSyncer{}, commonSyncer) + syncer := commonSyncer.(*stateSyncer) + require.NotNil(syncer.stateSyncVM) fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound diff --git a/avalanchego/snow/engine/snowman/test_engine.go b/avalanchego/snow/engine/snowman/test_engine.go index c5c897b4..eada8463 100644 --- a/avalanchego/snow/engine/snowman/test_engine.go +++ b/avalanchego/snow/engine/snowman/test_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,6 +7,8 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -36,7 +38,7 @@ func (e *EngineTest) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, return e.GetBlockF(ctx, blkID) } if e.CantGetBlock && e.T != nil { - e.T.Fatalf("Unexpectedly called GetBlock") + require.FailNow(e.T, errGetBlock.Error()) } return nil, errGetBlock } diff --git a/avalanchego/snow/engine/snowman/traced_engine.go b/avalanchego/snow/engine/snowman/traced_engine.go index 56b46de4..e2306dcd 100644 --- a/avalanchego/snow/engine/snowman/traced_engine.go +++ b/avalanchego/snow/engine/snowman/traced_engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,12 +8,12 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ Engine = (*tracedEngine)(nil) diff --git a/avalanchego/snow/engine/snowman/transitive.go b/avalanchego/snow/engine/snowman/transitive.go index bf3f8908..de39295e 100644 --- a/avalanchego/snow/engine/snowman/transitive.go +++ b/avalanchego/snow/engine/snowman/transitive.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -19,14 +20,27 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman/poll" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/snow/engine/snowman/ancestor" + "github.com/ava-labs/avalanchego/snow/event" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/bimap" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" ) -const nonVerifiedCacheSize = 128 +const ( + nonVerifiedCacheSize = 64 * units.MiB + + // putGossipPeriod specifies the number of times Gossip will be called per + // Put gossip. This is done to avoid splitting Gossip into multiple + // functions and to allow more frequent pull gossip than push gossip. + putGossipPeriod = 10 +) var _ Engine = (*Transitive)(nil) @@ -34,6 +48,10 @@ func New(config Config) (Engine, error) { return newTransitive(config) } +func cachedBlockSize(_ ids.ID, blk snowman.Block) int { + return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead +} + // Transitive implements the Engine interface by attempting to fetch all // Transitive dependencies. type Transitive struct { @@ -49,20 +67,23 @@ type Transitive struct { common.AppHandler validators.Connector - RequestID uint32 + requestID uint32 + + gossipCounter int // track outstanding preference requests polls poll.Set // blocks that have we have sent get requests for but haven't yet received - blkReqs common.Requests + blkReqs *bimap.BiMap[common.Request, ids.ID] + blkReqSourceMetric map[common.Request]prometheus.Counter // blocks that are queued to be issued to consensus once missing dependencies are fetched // Block ID --> Block pending map[ids.ID]snowman.Block // Block ID --> Parent ID - nonVerifieds AncestorTree + nonVerifieds ancestor.Tree // Block ID --> Block. // A block is put into this cache if it was not able to be issued. A block @@ -75,7 +96,7 @@ type Transitive struct { // operations that are blocked on a block being issued. This could be // issuing another block, responding to a query, or applying votes to consensus - blocked events.Blocker + blocked event.Blocker // number of times build block needs to be called once the number of // processing blocks has gone below the optimal number. @@ -91,16 +112,32 @@ func newTransitive(config Config) (*Transitive, error) { nonVerifiedCache, err := metercacher.New[ids.ID, snowman.Block]( "non_verified_cache", config.Ctx.Registerer, - &cache.LRU[ids.ID, snowman.Block]{Size: nonVerifiedCacheSize}, + cache.NewSizedLRU[ids.ID, snowman.Block]( + nonVerifiedCacheSize, + cachedBlockSize, + ), ) if err != nil { return nil, err } acceptedFrontiers := tracker.NewAccepted() - config.Validators.RegisterCallbackListener(acceptedFrontiers) + config.Validators.RegisterCallbackListener(config.Ctx.SubnetID, acceptedFrontiers) + + factory := poll.NewEarlyTermNoTraversalFactory( + config.Params.AlphaPreference, + config.Params.AlphaConfidence, + ) + polls, err := poll.NewSet( + factory, + config.Ctx.Log, + "", + config.Ctx.Registerer, + ) + if err != nil { + return nil, err + } - factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha) t := &Transitive{ Config: config, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), @@ -111,53 +148,144 @@ func newTransitive(config Config) (*Transitive, error) { AppHandler: config.VM, Connector: config.VM, pending: make(map[ids.ID]snowman.Block), - nonVerifieds: NewAncestorTree(), + nonVerifieds: ancestor.NewTree(), nonVerifiedCache: nonVerifiedCache, acceptedFrontiers: acceptedFrontiers, - polls: poll.NewSet(factory, - config.Ctx.Log, - "", - config.Ctx.Registerer, - ), + polls: polls, + blkReqs: bimap.New[common.Request, ids.ID](), + blkReqSourceMetric: make(map[common.Request]prometheus.Counter), } return t, t.metrics.Initialize("", config.Ctx.Registerer) } -func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { - blk, err := t.VM.ParseBlock(ctx, blkBytes) - if err != nil { - t.Ctx.Log.Debug("failed to parse block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), +func (t *Transitive) Gossip(ctx context.Context) error { + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if numProcessing := t.Consensus.NumProcessing(); numProcessing == 0 { + t.Ctx.Log.Verbo("sampling from validators", + zap.Stringer("validators", t.Validators), ) - t.Ctx.Log.Verbo("failed to parse block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Binary("block", blkBytes), + + // Uniform sampling is used here to reduce bandwidth requirements of + // nodes with a large amount of stake weight. + vdrID, ok := t.ConnectedValidators.SampleValidator() + if !ok { + t.Ctx.Log.Warn("skipping block gossip", + zap.String("reason", "no connected validators"), + ) + return nil + } + + nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) + if err != nil { + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "block height overflow"), + zap.Stringer("blkID", lastAcceptedID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Error(err), + ) + return nil + } + + t.requestID++ + t.Sender.SendPullQuery( + ctx, + set.Of(vdrID), + t.requestID, + t.Consensus.Preference(), + nextHeightToAccept, + ) + } else { + t.Ctx.Log.Debug("skipping block gossip", + zap.String("reason", "blocks currently processing"), + zap.Int("numProcessing", numProcessing), + ) + + // repoll is called here to unblock the engine if it previously errored + // when attempting to issue a query. This can happen if a subnet was + // temporarily misconfigured and there were no validators. + t.repoll(ctx) + } + + // TODO: Remove periodic push gossip after v1.11.x is activated + t.gossipCounter++ + t.gossipCounter %= putGossipPeriod + if t.gossipCounter > 0 { + return nil + } + + lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) + if err != nil { + t.Ctx.Log.Warn("dropping gossip request", + zap.String("reason", "block couldn't be loaded"), + zap.Stringer("blkID", lastAcceptedID), zap.Error(err), ) + return nil + } + t.Ctx.Log.Verbo("gossiping accepted block to the network", + zap.Stringer("blkID", lastAcceptedID), + ) + t.Sender.SendGossip(ctx, lastAccepted.Bytes()) + return nil +} + +func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { + blk, err := t.VM.ParseBlock(ctx, blkBytes) + if err != nil { + if t.Ctx.Log.Enabled(logging.Verbo) { + t.Ctx.Log.Verbo("failed to parse block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Binary("block", blkBytes), + zap.Error(err), + ) + } else { + t.Ctx.Log.Debug("failed to parse block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) + } // because GetFailed doesn't utilize the assumption that we actually // sent a Get message, we can safely call GetFailed here to potentially // abandon the request. return t.GetFailed(ctx, nodeID, requestID) } - actualBlkID := blk.ID() - expectedBlkID, ok := t.blkReqs.Get(nodeID, requestID) - // If the provided block is not the requested block, we need to explicitly - // mark the request as failed to avoid having a dangling dependency. - if ok && actualBlkID != expectedBlkID { - t.Ctx.Log.Debug("incorrect block returned in Put", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("blkID", actualBlkID), - zap.Stringer("expectedBlkID", expectedBlkID), - ) - // We assume that [blk] is useless because it doesn't match what we - // expected. - return t.GetFailed(ctx, nodeID, requestID) + var ( + req = common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + issuedMetric prometheus.Counter + ) + switch expectedBlkID, ok := t.blkReqs.GetValue(req); { + case ok: + actualBlkID := blk.ID() + if actualBlkID != expectedBlkID { + t.Ctx.Log.Debug("incorrect block returned in Put", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("blkID", actualBlkID), + zap.Stringer("expectedBlkID", expectedBlkID), + ) + // We assume that [blk] is useless because it doesn't match what we + // expected. + return t.GetFailed(ctx, nodeID, requestID) + } + + issuedMetric = t.blkReqSourceMetric[req] + case requestID == constants.GossipMsgRequestID: + issuedMetric = t.metrics.issued.WithLabelValues(putGossipSource) + default: + // This can happen if this block was provided to this engine while a Get + // request was outstanding. For example, the block may have been locally + // built or the node may have received a PushQuery with this block. + // + // Note: It is still possible this block will be issued here, because + // the block may have previously failed verification. + issuedMetric = t.metrics.issued.WithLabelValues(unknownSource) } if t.wasIssued(blk) { @@ -169,7 +297,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } return t.buildBlocks(ctx) @@ -177,8 +305,13 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // We don't assume that this function is called after a failed Get message. - // Check to see if we have an outstanding request and also get what the request was for if it exists. - blkID, ok := t.blkReqs.Remove(nodeID, requestID) + // Check to see if we have an outstanding request and also get what the + // request was for if it exists. + req := common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + blkID, ok := t.blkReqs.DeleteKey(req) if !ok { t.Ctx.Log.Debug("unexpected GetFailed", zap.Stringer("nodeID", nodeID), @@ -186,6 +319,7 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID ) return nil } + delete(t.blkReqSourceMetric, req) // Because the get request was dropped, we no longer expect blkID to be issued. t.blocked.Abandon(ctx, blkID) @@ -194,35 +328,40 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID return t.buildBlocks(ctx) } -func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { - t.sendChits(ctx, nodeID, requestID) +func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID, requestedHeight uint64) error { + t.sendChits(ctx, nodeID, requestID, requestedHeight) + + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(ctx, nodeID, blkID); err != nil { + if _, err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { return err } return t.buildBlocks(ctx) } -func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { - t.sendChits(ctx, nodeID, requestID) +func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte, requestedHeight uint64) error { + t.sendChits(ctx, nodeID, requestID, requestedHeight) blk, err := t.VM.ParseBlock(ctx, blkBytes) // If parsing fails, we just drop the request, as we didn't ask for it if err != nil { - t.Ctx.Log.Debug("failed to parse block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - t.Ctx.Log.Verbo("failed to parse block", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Binary("block", blkBytes), - zap.Error(err), - ) + if t.Ctx.Log.Enabled(logging.Verbo) { + t.Ctx.Log.Verbo("failed to parse block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Binary("block", blkBytes), + zap.Error(err), + ) + } else { + t.Ctx.Log.Debug("failed to parse block", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) + } return nil } @@ -230,56 +369,70 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID t.metrics.numUselessPushQueryBytes.Add(float64(len(blkBytes))) } + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) + // issue the block into consensus. If the block has already been issued, // this will be a noop. If this block has missing dependencies, nodeID will // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } return t.buildBlocks(ctx) } -func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) error { - t.acceptedFrontiers.SetAcceptedFrontier(nodeID, accepted) - - // Since this is a linear chain, there should only be one ID in the vote set - if len(votes) != 1 { - t.Ctx.Log.Debug("failing Chits", - zap.String("reason", "expected only 1 vote"), - zap.Int("numVotes", len(votes)), - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - // because QueryFailed doesn't utilize the assumption that we actually - // sent a Query message, we can safely call QueryFailed here to - // potentially abandon the request. - return t.QueryFailed(ctx, nodeID, requestID) - } - blkID := votes[0] +func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error { + t.acceptedFrontiers.SetLastAccepted(nodeID, acceptedID) t.Ctx.Log.Verbo("called Chits for the block", - zap.Stringer("blkID", blkID), zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID)) + zap.Uint32("requestID", requestID), + zap.Stringer("preferredID", preferredID), + zap.Stringer("preferredIDAtHeight", preferredIDAtHeight), + zap.Stringer("acceptedID", acceptedID), + ) - // Will record chits once [blkID] has been issued into consensus - v := &voter{ - t: t, - vdr: nodeID, - requestID: requestID, - response: blkID, - } + issuedMetric := t.metrics.issued.WithLabelValues(pullGossipSource) - added, err := t.issueFromByID(ctx, nodeID, blkID) + addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric) if err != nil { return err } - // Wait until [blkID] has been issued to consensus before applying this chit. - if !added { - v.deps.Add(blkID) + + var ( + addedPreferredIDAtHeight = addedPreferred + // Invariant: The order of [responseOptions] must be [preferredID] then + // (optionally) [preferredIDAtHeight]. During vote application, the + // first vote that can be applied will be used. So, the votes should be + // populated in order of decreasing height. + responseOptions = []ids.ID{preferredID} + ) + if preferredID != preferredIDAtHeight { + addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric) + if err != nil { + return err + } + responseOptions = append(responseOptions, preferredIDAtHeight) + } + + // Will record chits once [preferredID] and [preferredIDAtHeight] have been + // issued into consensus + v := &voter{ + t: t, + vdr: nodeID, + requestID: requestID, + responseOptions: responseOptions, + } + + // Wait until [preferredID] and [preferredIDAtHeight] have been issued to + // consensus before applying this chit. + if !addedPreferred { + v.deps.Add(preferredID) + } + if !addedPreferredIDAtHeight { + v.deps.Add(preferredIDAtHeight) } t.blocked.Register(ctx, v) @@ -288,11 +441,9 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin } func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - lastAccepted := t.acceptedFrontiers.AcceptedFrontier(nodeID) - if len(lastAccepted) == 1 { - // Chits calls QueryFailed if [votes] doesn't have length 1, so this - // check is required to avoid infinite mutual recursion. - return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted) + lastAccepted, ok := t.acceptedFrontiers.LastAccepted(nodeID) + if ok { + return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted, lastAccepted) } t.blocked.Register( @@ -311,32 +462,14 @@ func (*Transitive) Timeout(context.Context) error { return nil } -func (t *Transitive) Gossip(ctx context.Context) error { - blkID, err := t.VM.LastAccepted(ctx) - if err != nil { - return err - } - - blk, err := t.GetBlock(ctx, blkID) - if err != nil { - t.Ctx.Log.Warn("dropping gossip request", - zap.String("reason", "block couldn't be loaded"), - zap.Stringer("blkID", blkID), - zap.Error(err), - ) - return nil - } - t.Ctx.Log.Verbo("gossiping accepted block to the network", - zap.Stringer("blkID", blkID), - ) - t.Sender.SendGossip(ctx, blk.Bytes()) - return nil -} - func (*Transitive) Halt(context.Context) {} func (t *Transitive) Shutdown(ctx context.Context) error { t.Ctx.Log.Info("shutting down consensus engine") + + t.Ctx.Lock.Lock() + defer t.Ctx.Lock.Unlock() + return t.VM.Shutdown(ctx) } @@ -362,7 +495,7 @@ func (t *Transitive) Context() *snow.ConsensusContext { } func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { - t.RequestID = startReqID + t.requestID = startReqID lastAcceptedID, err := t.VM.LastAccepted(ctx) if err != nil { return err @@ -395,9 +528,10 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { case err != nil: return err default: + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) for _, blk := range options { // note that deliver will set the VM's preference - if err := t.deliver(ctx, blk); err != nil { + if err := t.deliver(ctx, t.Ctx.NodeID, blk, false, issuedMetric); err != nil { return err } } @@ -423,6 +557,18 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { } func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { + t.Ctx.Lock.Lock() + defer t.Ctx.Lock.Unlock() + + t.Ctx.Log.Verbo("running health check", + zap.Uint32("requestID", t.requestID), + zap.Int("gossipCounter", t.gossipCounter), + zap.Stringer("polls", t.polls), + zap.Reflect("outstandingBlockRequests", t.blkReqs), + zap.Stringer("blockedJobs", &t.blocked), + zap.Int("pendingBuildBlocks", t.pendingBuildBlocks), + ) + consensusIntf, consensusErr := t.Consensus.HealthCheck(ctx) vmIntf, vmErr := t.VM.HealthCheck(ctx) intf := map[string]interface{}{ @@ -435,11 +581,7 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) -} - -func (t *Transitive) GetVM() common.VM { - return t.VM + return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) } func (t *Transitive) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { @@ -453,13 +595,71 @@ func (t *Transitive) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, return t.VM.GetBlock(ctx, blkID) } -func (t *Transitive) sendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32) { - lastAccepted := t.Consensus.LastAccepted() - if t.Ctx.StateSyncing.Get() { - t.Sender.SendChits(ctx, nodeID, requestID, []ids.ID{lastAccepted}, []ids.ID{lastAccepted}) +func (t *Transitive) sendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, requestedHeight uint64) { + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + // If we aren't fully verifying blocks, only vote for blocks that are widely + // preferred by the validator set. + if t.Ctx.StateSyncing.Get() || t.Config.PartialSync { + acceptedAtHeight, err := t.VM.GetBlockIDAtHeight(ctx, requestedHeight) + if err != nil { + // Because we only return accepted state here, it's fairly likely + // that the requested height is higher than the last accepted block. + // That means that this code path is actually quite common. + t.Ctx.Log.Debug("failed fetching accepted block", + zap.Stringer("nodeID", nodeID), + zap.Uint64("requestedHeight", requestedHeight), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Stringer("lastAcceptedID", lastAcceptedID), + zap.Error(err), + ) + acceptedAtHeight = lastAcceptedID + } + t.Sender.SendChits(ctx, nodeID, requestID, lastAcceptedID, acceptedAtHeight, lastAcceptedID) + return + } + + var ( + preference = t.Consensus.Preference() + preferenceAtHeight ids.ID + ) + if requestedHeight < lastAcceptedHeight { + var err error + preferenceAtHeight, err = t.VM.GetBlockIDAtHeight(ctx, requestedHeight) + if err != nil { + // If this chain is pruning historical blocks, it's expected for a + // node to be unable to fetch some block IDs. In this case, we fall + // back to returning the last accepted ID. + // + // Because it is possible for a byzantine node to spam requests at + // old heights on a pruning network, we log this as debug. However, + // this case is unexpected to be hit by correct peers. + t.Ctx.Log.Debug("failed fetching accepted block", + zap.Stringer("nodeID", nodeID), + zap.Uint64("requestedHeight", requestedHeight), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Stringer("lastAcceptedID", lastAcceptedID), + zap.Error(err), + ) + t.numMissingAcceptedBlocks.Inc() + + preferenceAtHeight = lastAcceptedID + } } else { - t.Sender.SendChits(ctx, nodeID, requestID, []ids.ID{t.Consensus.Preference()}, []ids.ID{lastAccepted}) + var ok bool + preferenceAtHeight, ok = t.Consensus.PreferenceAtHeight(requestedHeight) + if !ok { + t.Ctx.Log.Debug("failed fetching processing block", + zap.Stringer("nodeID", nodeID), + zap.Uint64("requestedHeight", requestedHeight), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Stringer("preferredID", preference), + ) + // If the requested height is higher than our preferred tip, we + // don't prefer anything at the requested height yet. + preferenceAtHeight = preference + } } + t.Sender.SendChits(ctx, nodeID, requestID, preference, preferenceAtHeight, lastAcceptedID) } // Build blocks if they have been requested and the number of processing blocks @@ -500,7 +700,8 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { ) } - added, err := t.issueWithAncestors(ctx, blk) + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) + added, err := t.issueWithAncestors(ctx, blk, issuedMetric) if err != nil { return err } @@ -523,30 +724,40 @@ func (t *Transitive) repoll(ctx context.Context) { prefID := t.Consensus.Preference() for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ { - t.pullQuery(ctx, prefID) + t.sendQuery(ctx, prefID, nil, false) } } // issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. // If we do not have [blkID], request it. // Returns true if the block is processing in consensus or is decided. -func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) (bool, error) { +func (t *Transitive) issueFromByID( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) (bool, error) { blk, err := t.GetBlock(ctx, blkID) if err != nil { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } - return t.issueFrom(ctx, nodeID, blk) + return t.issueFrom(ctx, nodeID, blk, issuedMetric) } // issueFrom attempts to issue the branch ending with block [blkID] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing, request it from [vdr]. -func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowman.Block) (bool, error) { +func (t *Transitive) issueFrom( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { // issue [blk] and its ancestors to consensus. blkID := blk.ID() for !t.wasIssued(blk) { - if err := t.issue(ctx, blk); err != nil { + if err := t.issue(ctx, nodeID, blk, false, issuedMetric); err != nil { return false, err } @@ -556,13 +767,15 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // If we don't have this ancestor, request it from [vdr] if err != nil || !blk.Status().Fetched() { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } } // Remove any outstanding requests for this block - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } issued := t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) if issued { @@ -581,12 +794,16 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // issueWithAncestors attempts to issue the branch ending with [blk] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. -func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) issueWithAncestors( + ctx context.Context, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { blkID := blk.ID() // issue [blk] and its ancestors into consensus status := blk.Status() for status.Fetched() && !t.wasIssued(blk) { - err := t.issue(ctx, blk) + err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { return false, err } @@ -606,7 +823,7 @@ func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) // There's an outstanding request for this block. // We can just wait for that request to succeed or fail. - if t.blkReqs.Contains(blkID) { + if t.blkReqs.HasValue(blkID) { return false, nil } @@ -626,19 +843,32 @@ func (t *Transitive) wasIssued(blk snowman.Block) bool { } // Issue [blk] to consensus once its ancestors have been issued. -func (t *Transitive) issue(ctx context.Context, blk snowman.Block) error { +// If [push] is true, a push query will be used. Otherwise, a pull query will be +// used. +func (t *Transitive) issue( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() // mark that the block is queued to be added to consensus once its ancestors have been t.pending[blkID] = blk // Remove any outstanding requests for this block - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } // Will add [blk] to consensus once its ancestors have been i := &issuer{ - t: t, - blk: blk, + t: t, + nodeID: nodeID, + blk: blk, + issuedMetric: issuedMetric, + push: push, } // block on the parent if needed @@ -661,93 +891,100 @@ func (t *Transitive) issue(ctx context.Context, blk snowman.Block) error { } // Request that [vdr] send us block [blkID] -func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) { +func (t *Transitive) sendRequest( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) { // There is already an outstanding request for this block - if t.blkReqs.Contains(blkID) { + if t.blkReqs.HasValue(blkID) { return } - t.RequestID++ - t.blkReqs.Add(nodeID, t.RequestID, blkID) + t.requestID++ + req := common.Request{ + NodeID: nodeID, + RequestID: t.requestID, + } + t.blkReqs.Put(req, blkID) + t.blkReqSourceMetric[req] = issuedMetric + t.Ctx.Log.Verbo("sending Get request", zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", t.RequestID), + zap.Uint32("requestID", t.requestID), zap.Stringer("blkID", blkID), ) - t.Sender.SendGet(ctx, nodeID, t.RequestID, blkID) + t.Sender.SendGet(ctx, nodeID, t.requestID, blkID) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) } -// send a pull query for this block ID -func (t *Transitive) pullQuery(ctx context.Context, blkID ids.ID) { +// Send a query for this block. If push is set to true, blkBytes will be used to +// send a PushQuery. Otherwise, blkBytes will be ignored and a PullQuery will be +// sent. +func (t *Transitive) sendQuery( + ctx context.Context, + blkID ids.ID, + blkBytes []byte, + push bool, +) { t.Ctx.Log.Verbo("sampling from validators", zap.Stringer("validators", t.Validators), ) - // The validators we will query - vdrIDs, err := t.Validators.Sample(t.Params.K) + + vdrIDs, err := t.Validators.Sample(t.Ctx.SubnetID, t.Params.K) if err != nil { - t.Ctx.Log.Error("dropped query for block", + t.Ctx.Log.Warn("dropped query for block", zap.String("reason", "insufficient number of validators"), zap.Stringer("blkID", blkID), + zap.Int("size", t.Params.K), ) return } - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrIDs...) - - t.RequestID++ - if t.polls.Add(t.RequestID, vdrBag) { - vdrList := vdrBag.List() - vdrSet := set.NewSet[ids.NodeID](len(vdrList)) - vdrSet.Add(vdrList...) - t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, blkID) - } -} - -// Send a query for this block. Some validators will be sent -// a Push Query and some will be sent a Pull Query. -func (t *Transitive) sendMixedQuery(ctx context.Context, blk snowman.Block) { - t.Ctx.Log.Verbo("sampling from validators", - zap.Stringer("validators", t.Validators), - ) - - blkID := blk.ID() - vdrIDs, err := t.Validators.Sample(t.Params.K) + _, lastAcceptedHeight := t.Consensus.LastAccepted() + nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) if err != nil { t.Ctx.Log.Error("dropped query for block", - zap.String("reason", "insufficient number of validators"), + zap.String("reason", "block height overflow"), zap.Stringer("blkID", blkID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Error(err), ) return } - vdrBag := bag.Bag[ids.NodeID]{} - vdrBag.Add(vdrIDs...) - - t.RequestID++ - if t.polls.Add(t.RequestID, vdrBag) { - // Send a push query to some of the validators, and a pull query to the rest. - numPushTo := t.Params.MixedQueryNumPushVdr - if !t.Validators.Contains(t.Ctx.NodeID) { - numPushTo = t.Params.MixedQueryNumPushNonVdr - } - common.SendMixedQuery( - ctx, - t.Sender, - vdrBag.List(), // Note that this doesn't contain duplicates; length may be < k - numPushTo, - t.RequestID, - blkID, - blk.Bytes(), + vdrBag := bag.Of(vdrIDs...) + t.requestID++ + if !t.polls.Add(t.requestID, vdrBag) { + t.Ctx.Log.Error("dropped query for block", + zap.String("reason", "failed to add poll"), + zap.Stringer("blkID", blkID), + zap.Uint32("requestID", t.requestID), ) + return + } + + vdrSet := set.Of(vdrIDs...) + if push { + t.Sender.SendPushQuery(ctx, vdrSet, t.requestID, blkBytes, nextHeightToAccept) + } else { + t.Sender.SendPullQuery(ctx, vdrSet, t.requestID, blkID, nextHeightToAccept) } } // issue [blk] to consensus -func (t *Transitive) deliver(ctx context.Context, blk snowman.Block) error { +// If [push] is true, a push query will be used. Otherwise, a pull query will be +// used. +func (t *Transitive) deliver( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { return nil @@ -773,7 +1010,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block) error { // By ensuring that the parent is either processing or accepted, it is // guaranteed that the parent was successfully verified. This means that // calling Verify on this block is allowed. - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -797,7 +1034,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block) error { } for _, blk := range options { - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -817,25 +1054,29 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block) error { // If the block is now preferred, query the network for its preferences // with this new block. if t.Consensus.IsPreferred(blk) { - t.sendMixedQuery(ctx, blk) + t.sendQuery(ctx, blkID, blk.Bytes(), push) } t.blocked.Fulfill(ctx, blkID) for _, blk := range added { + blkID := blk.ID() if t.Consensus.IsPreferred(blk) { - t.sendMixedQuery(ctx, blk) + t.sendQuery(ctx, blkID, blk.Bytes(), push) } - blkID := blk.ID() t.removeFromPending(blk) t.blocked.Fulfill(ctx, blkID) - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } for _, blk := range dropped { blkID := blk.ID() t.removeFromPending(blk) t.blocked.Abandon(ctx, blkID) - t.blkReqs.RemoveAny(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } // If we should issue multiple queries at the same time, we need to repoll @@ -877,10 +1118,21 @@ func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // addUnverifiedBlockToConsensus returns whether the block was added and an // error if one occurred while adding it to consensus. -func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) addUnverifiedBlockToConsensus( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { + blkID := blk.ID() + blkHeight := blk.Height() + // make sure this block is valid if err := blk.Verify(ctx); err != nil { t.Ctx.Log.Debug("block verification failed", + zap.Stringer("nodeID", nodeID), + zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), zap.Error(err), ) @@ -889,12 +1141,15 @@ func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snow return false, nil } - blkID := blk.ID() + issuedMetric.Inc() t.nonVerifieds.Remove(blkID) t.nonVerifiedCache.Evict(blkID) t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) + t.metrics.issuerStake.Observe(float64(t.Validators.GetWeight(t.Ctx.SubnetID, nodeID))) t.Ctx.Log.Verbo("adding block to consensus", + zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), ) return true, t.Consensus.Add(ctx, &memoryBlock{ Block: blk, diff --git a/avalanchego/snow/engine/snowman/transitive_test.go b/avalanchego/snow/engine/snowman/transitive_test.go index 30a23baf..dcae2e26 100644 --- a/avalanchego/snow/engine/snowman/transitive_test.go +++ b/avalanchego/snow/engine/snowman/transitive_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,8 +7,8 @@ import ( "bytes" "context" "errors" - "fmt" "testing" + "time" "github.com/stretchr/testify/require" @@ -22,7 +22,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -34,28 +34,35 @@ var ( Genesis = ids.GenerateTestID() ) -func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { - vals := validators.NewSet() +func setup(t *testing.T, engCfg Config) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { + require := require.New(t) + + vals := validators.NewManager() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + require.NoError(engCfg.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) + + vals.RegisterCallbackListener(engCfg.Ctx.SubnetID, engCfg.ConnectedValidators) sender := &common.SenderTest{T: t} engCfg.Sender = sender - commonCfg.Sender = sender sender.Default(true) vm := &block.TestVM{} vm.T = t engCfg.VM = vm - snowGetHandler, err := getter.New(vm, commonCfg) - if err != nil { - t.Fatal(err) - } + snowGetHandler, err := getter.New( + vm, + sender, + engCfg.Ctx.Log, + time.Second, + 2000, + engCfg.Ctx.Registerer, + ) + require.NoError(err) engCfg.AllGetsServer = snowGetHandler vm.Default(true) @@ -81,26 +88,23 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil return vdr, vals, sender, vm, te, gBlk } -func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { - commonCfg := common.DefaultConfigTest() - engCfg := DefaultConfigs() - return setup(t, commonCfg, engCfg) +func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { + engCfg := DefaultConfig(t) + return setup(t, engCfg) } func TestEngineShutdown(t *testing.T) { + require := require.New(t) + _, _, _, vm, transitive, _ := setupDefaultConfig(t) vmShutdownCalled := false vm.ShutdownF = func(context.Context) error { @@ -108,20 +112,16 @@ func TestEngineShutdown(t *testing.T) { return nil } vm.CantShutdown = false - if err := transitive.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - if !vmShutdownCalled { - t.Fatal("Shutting down the Transitive did not shutdown the VM") - } + require.NoError(transitive.Shutdown(context.Background())) + require.True(vmShutdownCalled) } func TestEngineAdd(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - if te.Ctx.ChainID != ids.Empty { - t.Fatalf("Wrong chain ID") - } + require.Equal(ids.Empty, te.Ctx.ChainID) parent := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -141,22 +141,14 @@ func TestEngineAdd(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blkID != blk.Parent() { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Equal(blk.Parent(), blkID) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk.Bytes(), b) return blk, nil } @@ -171,36 +163,27 @@ func TestEngineAdd(t *testing.T) { } } - if err := te.Put(context.Background(), vdr, 0, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, blk.Bytes())) vm.ParseBlockF = nil - if !*asked { - t.Fatalf("Didn't ask for a missing block") - } - - if len(te.blocked) != 1 { - t.Fatalf("Should have been blocking on request") - } + require.True(*asked) + require.Len(te.blocked, 1) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errUnknownBytes } - if err := te.Put(context.Background(), vdr, *reqID, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, nil)) vm.ParseBlockF = nil - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking issue") - } + require.Empty(te.blocked) } func TestEngineQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) blk := &snowman.TestBlock{ @@ -214,26 +197,13 @@ func TestEngineQuery(t *testing.T) { } chitted := new(bool) - sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, prefSet []ids.ID, accepted []ids.ID) { - if *chitted { - t.Fatalf("Sent multiple chits") - } + sender.SendChitsF = func(_ context.Context, _ ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDByHeight ids.ID, accepted ids.ID) { + require.False(*chitted) *chitted = true - if requestID != 15 { - t.Fatalf("Wrong request ID") - } - if len(prefSet) != 1 { - t.Fatal("Should only be one vote") - } - if gBlk.ID() != prefSet[0] { - t.Fatalf("Wrong chits block") - } - if len(accepted) != 1 { - t.Fatal("accepted should only have one element") - } - if gBlk.ID() != accepted[0] { - t.Fatalf("Wrong accepted frontier") - } + require.Equal(uint32(15), requestID) + require.Equal(gBlk.ID(), preferredID) + require.Equal(gBlk.ID(), preferredIDByHeight) + require.Equal(gBlk.ID(), accepted) } blocked := new(bool) @@ -250,64 +220,41 @@ func TestEngineQuery(t *testing.T) { asked := new(bool) getRequestID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk.ID() != blkID && gBlk.ID() != blkID { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Contains([]ids.ID{ + blk.ID(), + gBlk.ID(), + }, blkID) } - if err := te.PullQuery(context.Background(), vdr, 15, blk.ID()); err != nil { - t.Fatal(err) - } - if !*chitted { - t.Fatalf("Didn't respond with chits") - } - if !*blocked { - t.Fatalf("Didn't request block") - } - if !*asked { - t.Fatalf("Didn't request block from validator") - } + require.NoError(te.PullQuery(context.Background(), vdr, 15, blk.ID(), 1)) + require.True(*chitted) + require.True(*blocked) + require.True(*asked) queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) + require.Equal(blk.ID(), blockID) + require.Equal(uint64(1), requestedHeight) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk.Bytes(), b) return blk, nil } - if err := te.Put(context.Background(), vdr, *getRequestID, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk.Bytes())) vm.ParseBlockF = nil - if !*queried { - t.Fatalf("Didn't ask for preferences") - } + require.True(*queried) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -321,54 +268,37 @@ func TestEngineQuery(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case blk.ID(): - return nil, errUnknownBlock - case blk1.ID(): + case blk.ID(), blk1.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } *asked = false sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk1.ID() != blkID { - t.Fatalf("Asking for wrong block") - } - } - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk1.ID()}, nil); err != nil { - t.Fatal(err) + require.Equal(vdr, inVdr) + require.Equal(blk1.ID(), blkID) } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID())) *queried = false - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + *queryRequestID = 0 + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.ID(), blockID) + require.Equal(uint64(1), requestedHeight) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk1.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk1.Bytes(), b) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -377,64 +307,50 @@ func TestEngineQuery(t *testing.T) { case blk1.ID(): return blk1, nil } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } return blk1, nil } - if err := te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes())) vm.ParseBlockF = nil - if blk1.Status() != choices.Accepted { - t.Fatalf("Should have executed block") - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.Equal(choices.Accepted, blk1.Status()) + require.Empty(te.blocked) _ = te.polls.String() // Shouldn't panic - if err := te.QueryFailed(context.Background(), vdr, *queryRequestID); err != nil { - t.Fatal(err) - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) + require.Empty(te.blocked) } func TestEngineMultipleQuery(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 3, - } - - vals := validators.NewSet() + K: 3, + AlphaPreference: 2, + AlphaConfidence: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + vals := validators.NewManager() engCfg.Validators = vals vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() vdr2 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vals.Add(vdr0, nil, ids.Empty, 1), - vals.Add(vdr1, nil, ids.Empty, 1), - vals.Add(vdr2, nil, ids.Empty, 1), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr0, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr1, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr2, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -457,20 +373,14 @@ func TestEngineMultipleQuery(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -487,20 +397,14 @@ func TestEngineMultipleQuery(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk0.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr0, vdr1, vdr2) + require.Equal(vdrSet, inVdrs) + require.Equal(blk0.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -512,9 +416,13 @@ func TestEngineMultipleQuery(t *testing.T) { } } - if err := te.issue(context.Background(), blk0); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -535,32 +443,21 @@ func TestEngineMultipleQuery(t *testing.T) { case blk1.ID(): return nil, errUnknownBlock } - t.Fatalf("Unknown block") - panic("Should have errored") + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } asked := new(bool) getRequestID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr0 != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk1.ID() != blkID { - t.Fatalf("Asking for wrong block") - } - } - blkSet := []ids.ID{blk1.ID()} - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } - if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) + require.Equal(vdr0, inVdr) + require.Equal(blk1.ID(), blkID) } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID())) + require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID())) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -570,8 +467,8 @@ func TestEngineMultipleQuery(t *testing.T) { case blkID == blk1.ID(): return blk1, nil } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } return blk1, nil @@ -579,40 +476,27 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = false secondQueryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *secondQueryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } - } - if err := te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) + vdrSet := set.Of(vdr0, vdr1, vdr2) + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } + require.NoError(te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes())) // Should be dropped because the query was already filled - blkSet = []ids.ID{blk0.ID()} - if err := te.Chits(context.Background(), vdr2, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr2, *queryRequestID, blk0.ID(), blk0.ID(), blk0.ID())) - if blk1.Status() != choices.Accepted { - t.Fatalf("Should have executed block") - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.Equal(choices.Accepted, blk1.Status()) + require.Empty(te.blocked) } func TestEngineBlockedIssue(t *testing.T) { + require := require.New(t) + _, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) @@ -648,21 +532,29 @@ func TestEngineBlockedIssue(t *testing.T) { } } - if err := te.issue(context.Background(), blk1); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk1, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk0.StatusV = choices.Processing - if err := te.issue(context.Background(), blk0); err != nil { - t.Fatal(err) - } - - if blk1.ID() != te.Consensus.Preference() { - t.Fatalf("Should have issued blk1") - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) + + require.Equal(blk1.ID(), te.Consensus.Preference()) } func TestEngineAbandonResponse(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) @@ -684,59 +576,50 @@ func TestEngineAbandonResponse(t *testing.T) { case blkID == blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } - if err := te.QueryFailed(context.Background(), vdr, 1); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) + require.NoError(te.QueryFailed(context.Background(), vdr, 1)) - if len(te.blocked) != 0 { - t.Fatalf("Should have removed blocking event") - } + require.Empty(te.blocked) } func TestEngineFetchBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id == gBlk.ID() { - return gBlk, nil - } - t.Fatalf("Unknown block") - panic("Should have failed") + require.Equal(gBlk.ID(), id) + return gBlk, nil } added := new(bool) sender.SendPutF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blk []byte) { - if vdr != inVdr { - t.Fatalf("Wrong validator") - } - if requestID != 123 { - t.Fatalf("Wrong request id") - } - if !bytes.Equal(gBlk.Bytes(), blk) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Equal(uint32(123), requestID) + require.Equal(gBlk.Bytes(), blk) *added = true } - if err := te.Get(context.Background(), vdr, 123, gBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.Get(context.Background(), vdr, 123, gBlk.ID())) - if !*added { - t.Fatalf("Should have sent block to peer") - } + require.True(*added) } func TestEnginePushQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -770,60 +653,35 @@ func TestEnginePushQuery(t *testing.T) { } chitted := new(bool) - sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { - if *chitted { - t.Fatalf("Sent chit multiple times") - } + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDByHeight ids.ID, acceptedID ids.ID) { + require.False(*chitted) *chitted = true - if inVdr != vdr { - t.Fatalf("Asking wrong validator for preference") - } - if requestID != 20 { - t.Fatalf("Wrong request id") - } - if len(votes) != 1 { - t.Fatal("votes should only have one element") - } - if gBlk.ID() != votes[0] { - t.Fatalf("Asking for wrong block") - } - if len(accepted) != 1 { - t.Fatal("accepted should only have one element") - } - if gBlk.ID() != accepted[0] { - t.Fatalf("Wrong accepted frontier") - } + require.Equal(vdr, inVdr) + require.Equal(uint32(20), requestID) + require.Equal(gBlk.ID(), preferredID) + require.Equal(gBlk.ID(), preferredIDByHeight) + require.Equal(gBlk.ID(), acceptedID) } queried := new(bool) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr) + require.True(inVdrs.Equals(vdrSet)) + require.Equal(blk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } - if err := te.PushQuery(context.Background(), vdr, 20, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 20, blk.Bytes(), 1)) - if !*chitted { - t.Fatalf("Should have sent a chit to the peer") - } - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*chitted) + require.True(*queried) } func TestEngineBuildBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -847,86 +705,71 @@ func TestEngineBuildBlock(t *testing.T) { } } - queried := new(bool) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } - *queried = true - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { + require.FailNow("should not be sending pulls when we are the block producer") + } + + pushSent := new(bool) + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, _ []byte, _ uint64) { + require.False(*pushSent) + *pushSent = true + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) } vm.BuildBlockF = func(context.Context) (snowman.Block, error) { return blk, nil } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*pushSent) } func TestEngineRepoll(t *testing.T) { + require := require.New(t) vdr, _, sender, _, te, _ := setupDefaultConfig(t) sender.Default(true) queried := new(bool) - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, _ ids.ID, _ uint64) { + require.False(*queried) *queried = true - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) } te.repoll(context.Background()) - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*queried) } func TestVoteCanceling(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 3, - } - - vals := validators.NewSet() + K: 3, + AlphaPreference: 2, + AlphaConfidence: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + vals := validators.NewManager() engCfg.Validators = vals vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() vdr2 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vals.Add(vdr0, nil, ids.Empty, 1), - vals.Add(vdr1, nil, ids.Empty, 1), - vals.Add(vdr2, nil, ids.Empty, 1), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr0, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr1, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr2, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -949,23 +792,14 @@ func TestVoteCanceling(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - switch id { - case gBlk.ID(): - return gBlk, nil - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(gBlk.ID(), id) + return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil @@ -981,53 +815,43 @@ func TestVoteCanceling(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr0, vdr1, vdr2) + require.Equal(vdrSet, inVdrs) + require.Equal(blk.Bytes(), blkBytes) + require.Equal(uint64(1), requestedHeight) } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) - if te.polls.Len() != 1 { - t.Fatalf("Shouldn't have finished blocking issue") - } + require.Equal(1, te.polls.Len()) - if err := te.QueryFailed(context.Background(), vdr0, *queryRequestID); err != nil { - t.Fatal(err) - } + require.NoError(te.QueryFailed(context.Background(), vdr0, *queryRequestID)) - if te.polls.Len() != 1 { - t.Fatalf("Shouldn't have finished blocking issue") - } + require.Equal(1, te.polls.Len()) repolled := new(bool) - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { *repolled = true } - if err := te.QueryFailed(context.Background(), vdr1, *queryRequestID); err != nil { - t.Fatal(err) - } + require.NoError(te.QueryFailed(context.Background(), vdr1, *queryRequestID)) - if !*repolled { - t.Fatalf("Should have finished blocking issue and repolled the network") - } + require.True(*repolled) } func TestEngineNoQuery(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1054,13 +878,9 @@ func TestEngineNoQuery(t *testing.T) { engCfg.VM = vm te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) blk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1072,13 +892,19 @@ func TestEngineNoQuery(t *testing.T) { BytesV: []byte{1}, } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineNoRepollQuery(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1105,18 +931,16 @@ func TestEngineNoRepollQuery(t *testing.T) { engCfg.VM = vm te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) te.repoll(context.Background()) } func TestEngineAbandonQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, _ := setupDefaultConfig(t) sender.Default(true) @@ -1124,13 +948,8 @@ func TestEngineAbandonQuery(t *testing.T) { blkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - switch id { - case blkID: - return nil, errUnknownBlock - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(blkID, id) + return nil, errUnknownBlock } reqID := new(uint32) @@ -1140,21 +959,13 @@ func TestEngineAbandonQuery(t *testing.T) { sender.CantSendChits = false - if err := te.PullQuery(context.Background(), vdr, 0, blkID); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, blkID, 0)) - if te.blkReqs.Len() != 1 { - t.Fatalf("Should have issued request") - } + require.Equal(1, te.blkReqs.Len()) - if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) - if te.blkReqs.Len() != 0 { - t.Fatalf("Should have removed request") - } + require.Zero(te.blkReqs.Len()) } func TestEngineAbandonChit(t *testing.T) { @@ -1181,17 +992,22 @@ func TestEngineAbandonChit(t *testing.T) { case blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } var reqID uint32 - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { reqID = requestID } - err := te.issue(context.Background(), blk) - require.NoError(err) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1204,14 +1020,12 @@ func TestEngineAbandonChit(t *testing.T) { } // Register a voter dependency on an unknown block. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) require.Len(te.blocked, 1) sender.CantSendPullQuery = false - err = te.GetFailed(context.Background(), vdr, reqID) - require.NoError(err) + require.NoError(te.GetFailed(context.Background(), vdr, reqID)) require.Empty(te.blocked) } @@ -1239,17 +1053,22 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { case blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } var reqID uint32 - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte, _ uint64) { reqID = requestID } - err := te.issue(context.Background(), blk) - require.NoError(err) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1262,8 +1081,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { } // Register a voter dependency on an unknown block. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) require.Len(te.blocked, 1) sender.CantSendPullQuery = false @@ -1276,12 +1094,13 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Respond with an unexpected block and verify that the request is correctly // cleared. - err = te.Put(context.Background(), vdr, reqID, gBlkBytes) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, reqID, gBlkBytes)) require.Empty(te.blocked) } func TestEngineBlockingChitRequest(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1328,42 +1147,41 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) {} vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, blockingBlk.Bytes()): - return blockingBlk, nil - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(blockingBlk.Bytes(), b) + return blockingBlk, nil } - if err := te.issue(context.Background(), parentBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + parentBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes(), 0)) - if len(te.blocked) != 2 { - t.Fatalf("Both inserts should be blocking") - } + require.Len(te.blocked, 2) - sender.CantSendPushQuery = false + sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(context.Background(), missingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) - if len(te.blocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } + require.Empty(te.blocked) } func TestEngineBlockingChitResponse(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1400,6 +1218,8 @@ func TestEngineBlockingChitResponse(t *testing.T) { switch blkID { case gBlk.ID(): return gBlk, nil + case issuedBlk.ID(): + return issuedBlk, nil case blockingBlk.ID(): return blockingBlk, nil default: @@ -1407,44 +1227,52 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - if err := te.issue(context.Background(), blockingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blockingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(issuedBlk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) + require.Equal(issuedBlk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } - if err := te.issue(context.Background(), issuedBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + issuedBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil sender.CantSendPushQuery = false - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blockingBlk.ID(), issuedBlk.ID(), blockingBlk.ID())) - require.Len(t, te.blocked, 2) + require.Len(te.blocked, 2) sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(context.Background(), missingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineRetryFetch(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1467,16 +1295,12 @@ func TestEngineRetryFetch(t *testing.T) { } sender.CantSendChits = false - if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID(), 0)) vm.CantGetBlock = true sender.SendGetF = nil - if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) vm.CantGetBlock = false @@ -1485,19 +1309,17 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID(), 0)) vm.CantGetBlock = true sender.SendGetF = nil - if !*called { - t.Fatalf("Should have requested the block again") - } + require.True(*called) } func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1525,10 +1347,9 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { invalidBlkID := invalidBlk.ID() reqID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { *reqID = requestID } - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ ids.ID) {} vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1542,62 +1363,57 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { return nil, errUnknownBlock } } - if err := te.issue(context.Background(), validBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + validBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil - if err := te.issue(context.Background(), invalidBlk); err != nil { - t.Fatal(err) - } - - if err := te.Chits(context.Background(), vdr, *reqID, []ids.ID{invalidBlkID}, nil); err != nil { - t.Fatal(err) - } - - if status := validBlk.Status(); status != choices.Accepted { - t.Log(status) - t.Fatalf("Should have bubbled invalid votes to the valid parent") - } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + invalidBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) + require.NoError(te.Chits(context.Background(), vdr, *reqID, invalidBlkID, invalidBlkID, invalidBlkID)) + + require.Equal(choices.Accepted, validBlk.Status()) } func TestEngineGossip(t *testing.T) { - _, _, sender, vm, te, gBlk := setupDefaultConfig(t) + require := require.New(t) + + nodeID, _, sender, vm, te, gBlk := setupDefaultConfig(t) vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID == gBlk.ID() { - return gBlk, nil - } - t.Fatal(errUnknownBlock) - return nil, errUnknownBlock + require.Equal(gBlk.ID(), blkID) + return gBlk, nil } - called := new(bool) - sender.SendGossipF = func(_ context.Context, blkBytes []byte) { - *called = true - if !bytes.Equal(blkBytes, gBlk.Bytes()) { - t.Fatal(errUnknownBytes) - } + var calledSendPullQuery bool + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ ids.ID, _ uint64) { + calledSendPullQuery = true + require.Equal(set.Of(nodeID), nodeIDs) } - if err := te.Gossip(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(te.Gossip(context.Background())) - if !*called { - t.Fatalf("Should have gossiped the block") - } + require.True(calledSendPullQuery) } func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { + require := require.New(t) + vdr, vdrs, sender, vm, te, gBlk := setupDefaultConfig(t) secondVdr := ids.GenerateTestNodeID() - if err := vdrs.Add(secondVdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vdrs.AddStaker(te.Ctx.SubnetID, secondVdr, nil, ids.Empty, 1)) sender.Default(true) @@ -1646,22 +1462,14 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if blkID != missingBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(vdr, reqVdr) + require.Equal(missingBlk.ID(), blkID) } sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes(), 0)) - if err := te.Put(context.Background(), secondVdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), secondVdr, *reqID, []byte{3})) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1684,21 +1492,18 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { return nil, errUnknownBlock } } - sender.CantSendPushQuery = false + sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) - pref := te.Consensus.Preference() - if pref != pendingBlk.ID() { - t.Fatalf("Shouldn't have abandoned the pending block") - } + require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } func TestEnginePushQueryRequestIDConflict(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1748,25 +1553,17 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if blkID != missingBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(vdr, reqVdr) + require.Equal(missingBlk.ID(), blkID) } sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes(), 0)) sender.SendGetF = nil sender.CantSendGet = false - if err := te.PushQuery(context.Background(), vdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, *reqID, []byte{3}, 0)) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1789,29 +1586,24 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { return nil, errUnknownBlock } } - sender.CantSendPushQuery = false + sender.CantSendPullQuery = false - if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) - pref := te.Consensus.Preference() - if pref != pendingBlk.ID() { - t.Fatalf("Shouldn't have abandoned the pending block") - } + require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } func TestEngineAggressivePolling(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) engCfg.Params.ConcurrentRepolls = 2 - vals := validators.NewSet() + vals := validators.NewManager() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1834,20 +1626,14 @@ func TestEngineAggressivePolling(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -1885,55 +1671,40 @@ func TestEngineAggressivePolling(t *testing.T) { } } - numPushed := new(int) - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { - *numPushed++ - } - numPulled := new(int) - sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) { + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { *numPulled++ } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if *numPushed != 1 { - t.Fatalf("Should have initially sent a push query") - } - - if *numPulled != 1 { - t.Fatalf("Should have sent an additional pull query") - } + require.Equal(2, *numPulled) } func TestEngineDoubleChit(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 2, - } - - vals := validators.NewSet() + K: 2, + AlphaPreference: 2, + AlphaConfidence: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + vals := validators.NewManager() engCfg.Validators = vals vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() - if err := vals.Add(vdr0, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vals.Add(vdr1, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr0, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr1, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1957,21 +1728,14 @@ func TestEngineDoubleChit(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id == gBlk.ID() { - return gBlk, nil - } - t.Fatalf("Unknown block") - panic("Should have errored") + require.Equal(gBlk.ID(), id) + return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil @@ -1987,25 +1751,22 @@ func TestEngineDoubleChit(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False((*queried)) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr0, vdr1) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } - } - - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) + vdrSet := set.Of(vdr0, vdr1) + require.Equal(vdrSet, inVdrs) + require.Equal(blk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { @@ -2014,54 +1775,36 @@ func TestEngineDoubleChit(t *testing.T) { case blk.ID(): return blk, nil } - t.Fatalf("Unknown block") - panic("Should have errored") - } - - blkSet := []ids.ID{blk.ID()} - - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) - } - - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } - - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.Equal(choices.Processing, blk.Status()) - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) - } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID())) + require.Equal(choices.Processing, blk.Status()) - if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID())) + require.Equal(choices.Processing, blk.Status()) - if status := blk.Status(); status != choices.Accepted { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) - } + require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blk.ID(), blk.ID(), blk.ID())) + require.Equal(choices.Accepted, blk.Status()) } func TestEngineBuildBlockLimit(t *testing.T) { - engCfg := DefaultConfigs() + require := require.New(t) + + engCfg := DefaultConfig(t) engCfg.Params.K = 1 - engCfg.Params.Alpha = 1 + engCfg.Params.AlphaPreference = 1 + engCfg.Params.AlphaConfidence = 1 engCfg.Params.OptimalProcessing = 1 - vals := validators.NewSet() + vals := validators.NewManager() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -2084,20 +1827,14 @@ func TestEngineBuildBlockLimit(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -2126,17 +1863,12 @@ func TestEngineBuildBlockLimit(t *testing.T) { queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], rID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], rID uint32, _ []byte, _ uint64) { reqID = rID - if queried { - t.Fatalf("Asked multiple times") - } + require.False(queried) queried = true - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -2150,29 +1882,19 @@ func TestEngineBuildBlockLimit(t *testing.T) { blkToReturn := 0 vm.BuildBlockF = func(context.Context) (snowman.Block, error) { - if blkToReturn >= len(blks) { - t.Fatalf("Built too many blocks") - } + require.Less(blkToReturn, len(blks)) blk := blks[blkToReturn] blkToReturn++ return blk, nil } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if !queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(queried) queried = false - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if queried { - t.Fatalf("Shouldn't have sent a query to the peer") - } + require.False(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2185,16 +1907,14 @@ func TestEngineBuildBlockLimit(t *testing.T) { } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{blk0.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, blk0.ID(), blk0.ID(), blk0.ID())) - if !queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(queried) } func TestEngineReceiveNewRejectedBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2234,8 +1954,8 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2254,24 +1974,18 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { asked bool reqID uint32 ) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, blkBytes []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { asked = true reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !asked { - t.Fatalf("Didn't query for the new block") - } + require.True(asked) - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - sender.SendPushQueryF = nil + sender.SendPullQueryF = nil asked = false sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, _ ids.ID) { @@ -2279,26 +1993,20 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { reqID = rID } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if !asked { - t.Fatalf("Didn't request the missing block") - } + require.True(asked) rejectedBlk.StatusV = choices.Rejected - if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - if te.blkReqs.Len() != 0 { - t.Fatalf("Should have finished all requests") - } + require.Zero(te.blkReqs.Len()) } func TestEngineRejectionAmplification(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2338,8 +2046,8 @@ func TestEngineRejectionAmplification(t *testing.T) { case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2358,18 +2066,14 @@ func TestEngineRejectionAmplification(t *testing.T) { queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { queried = true reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.True(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2382,52 +2086,38 @@ func TestEngineRejectionAmplification(t *testing.T) { } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.Zero(te.Consensus.NumProcessing()) queried = false var asked bool - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { queried = true } sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, blkID ids.ID) { asked = true reqID = rID - if blkID != rejectedBlk.ID() { - t.Fatalf("requested %s but should have requested %s", blkID, rejectedBlk.ID()) - } + require.Equal(rejectedBlk.ID(), blkID) } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if queried { - t.Fatalf("Queried for the pending block") - } - if !asked { - t.Fatalf("Should have asked for the missing block") - } + require.False(queried) + require.True(asked) rejectedBlk.StatusV = choices.Processing - if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - if queried { - t.Fatalf("Queried for the rejected block") - } + require.False(queried) } // Test that the node will not issue a block into consensus that it knows will // be rejected because the parent is rejected. func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2468,8 +2158,8 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2490,43 +2180,31 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { queried = true reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.True(queried) - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.Zero(te.Consensus.NumProcessing()) - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.Zero(te.Consensus.NumProcessing()) - if len(te.pending) != 0 { - t.Fatalf("Shouldn't have any pending blocks") - } + require.Empty(te.pending) } // Test that the node will not issue a block into consensus that it knows will // be rejected because the parent is failing verification. func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2568,8 +2246,8 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2586,18 +2264,13 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, blkBytes []byte) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { queried = true reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } - - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) + require.True(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2612,29 +2285,17 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } - - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } - - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } - - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - if len(te.pending) != 0 { - t.Fatalf("Shouldn't have any pending blocks") - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) + require.Zero(te.Consensus.NumProcessing()) + require.Empty(te.pending) } // Test that the node will not gossip a block that isn't preferred. func TestEngineNonPreferredAmplification(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) preferredBlk := &snowman.TestBlock{ @@ -2663,8 +2324,8 @@ func TestEngineNonPreferredAmplification(t *testing.T) { case bytes.Equal(b, nonPreferredBlk.Bytes()): return nonPreferredBlk, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2677,24 +2338,18 @@ func TestEngineNonPreferredAmplification(t *testing.T) { } } - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if bytes.Equal(nonPreferredBlk.Bytes(), blkBytes) { - t.Fatalf("gossiped non-preferred block") - } + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkBytes []byte, requestedHeight uint64) { + require.NotEqual(nonPreferredBlk.Bytes(), blkBytes) + require.Equal(uint64(1), requestedHeight) } - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - if blkID == nonPreferredBlk.ID() { - t.Fatalf("gossiped non-preferred block") - } + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID, requestedHeight uint64) { + require.NotEqual(nonPreferredBlk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } - if err := te.Put(context.Background(), vdr, 0, preferredBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, preferredBlk.Bytes())) - if err := te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes())) } // Test that in the following scenario, if block B fails verification, votes @@ -2709,7 +2364,10 @@ func TestEngineNonPreferredAmplification(t *testing.T) { // | // B func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + expectedVdrSet := set.Of(vdr) // [blk1] is a child of [gBlk] and currently passes verification blk1 := &snowman.TestBlock{ @@ -2742,13 +2400,14 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { case bytes.Equal(b, blk2.Bytes()): return blk2, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } - // The VM should only be able to retrieve [gBlk] from storage - // TODO GetBlockF should be updated after blocks are verified/accepted + // for now, this VM should only be able to retrieve [gBlk] from storage + // this "GetBlockF" will be updated after blocks are verified/accepted + // in the following tests vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): @@ -2762,54 +2421,40 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - if blkID != blk1.ID() { - t.Fatalf("Expected engine to request blk1") - } - if inVdr != vdr { - t.Fatalf("Expected engine to request blk2 from vdr") - } + require.False(*asked) + require.Equal(blk1.ID(), blkID) + require.Equal(vdr, inVdr) *asked = true } - // Receive Gossip message for [blk2] first and expect the sender to issue a Get request for - // its ancestor: [blk1]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { - t.Fatal(err) - } - - if !*asked { - t.Fatalf("Didn't ask for missing blk1") - } + // This engine receives a Gossip message for [blk2] which was "unknown" in this engine. + // The engine thus learns about its ancestor [blk1] and should send a Get request for it. + // (see above for expected "Get" request) + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) + require.True(*asked) // Prepare to PushQuery [blk1] after our Get request is fulfilled. We should not PushQuery // [blk2] since it currently fails verification. queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true - *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } - } - - // Answer the request, this should allow [blk1] to be issued and cause [blk2] to - // fail verification. - if err := te.Put(context.Background(), vdr, *reqID, blk1.Bytes()); err != nil { - t.Fatal(err) - } - // now blk1 is verified, vm can return it + *queryRequestID = requestID + vdrSet := set.Of(vdr) + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.ID(), blkID) + require.Equal(uint64(1), requestedHeight) + } + // This engine now handles the response to the "Get" request. This should cause [blk1] to be issued + // which will result in attempting to issue [blk2]. However, [blk2] should fail verification and be dropped. + // By issuing [blk1], this node should fire a "PushQuery" request for [blk1]. + // (see above for expected "PushQuery" request) + require.NoError(te.Put(context.Background(), vdr, *reqID, blk1.Bytes())) + require.True(*asked) + require.True(*queried, "Didn't query the newly issued blk1") + + // now [blk1] is verified, vm can return it vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): @@ -2821,52 +2466,28 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } } - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk1") - } - sendReqID := new(uint32) reqVdr := new(ids.NodeID) // Update GetF to produce a more detailed error message in the case that receiving a Chits // message causes us to send another Get request. sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - switch blkID { - case blk1.ID(): - t.Fatal("Unexpectedly sent a Get request for blk1") - case blk2.ID(): - *sendReqID = requestID - *reqVdr = inVdr - return - default: - t.Fatal("Unexpectedly sent a Get request for unknown block") - } - } + require.Equal(blk2.ID(), blkID) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - switch blkID { - case blk1.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk1") - case blk2.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk2") - default: - t.Fatal("Unexpectedly sent a PullQuery request for unknown block") - } + *sendReqID = requestID + *reqVdr = inVdr } - // Now we are expecting a Chits message, and we receive it for blk2 instead of blk1 - // The votes should be bubbled through blk2 despite the fact that it is failing verification. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { - t.Fatal(err) - } + // Now we are expecting a Chits message, and we receive it for [blk2] + // instead of [blk1]. This will cause the node to again request [blk2]. + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID())) - if err := te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes()); err != nil { - t.Fatal(err) - } + // The votes should be bubbled through [blk2] despite the fact that it is + // failing verification. + require.NoError(te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes())) // The vote should be bubbled through [blk2], such that [blk1] gets marked as Accepted. - if blk1.Status() != choices.Accepted { - t.Fatalf("Expected blk1 to be Accepted, but found status: %s", blk1.Status()) - } + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Processing, blk2.Status()) // Now that [blk1] has been marked as Accepted, [blk2] can pass verification. blk2.VerifyV = nil @@ -2884,38 +2505,21 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } *queried = false // Prepare to PushQuery [blk2] after receiving a Gossip message with [blk2]. - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk2.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(expectedVdrSet, inVdrs) + require.Equal(blk2.ID(), blkID) + require.Equal(uint64(2), requestedHeight) } // Expect that the Engine will send a PushQuery after receiving this Gossip message for [blk2]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { - t.Fatal(err) - } - - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk2") - } + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) + require.True(*queried) // After a single vote for [blk2], it should be marked as accepted. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { - t.Fatal(err) - } - - if blk2.Status() != choices.Accepted { - t.Fatalf("Expected blk2 to be Accepted, but found status: %s", blk2.Status()) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID())) + require.Equal(choices.Accepted, blk2.Status()) } // Test that in the following scenario, if block B fails verification, votes @@ -2932,7 +2536,10 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // | // C func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + expectedVdrSet := set.Of(vdr) // [blk1] is a child of [gBlk] and currently passes verification blk1 := &snowman.TestBlock{ @@ -2978,8 +2585,8 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { case bytes.Equal(b, blk3.Bytes()): return blk3, nil default: - t.Fatalf("Unknown block bytes") - return nil, nil + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } } @@ -2999,56 +2606,33 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - if blkID != blk2.ID() { - t.Fatalf("Expected engine to request blk2") - } - if inVdr != vdr { - t.Fatalf("Expected engine to request blk2 from vdr") - } + require.False(*asked) + require.Equal(blk2.ID(), blkID) + require.Equal(vdr, inVdr) *asked = true } // Receive Gossip message for [blk3] first and expect the sender to issue a // Get request for its ancestor: [blk2]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes()); err != nil { - t.Fatal(err) - } - - if !*asked { - t.Fatalf("Didn't ask for missing blk2") - } + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes())) + require.True(*asked) // Prepare to PushQuery [blk1] after our request for [blk2] is fulfilled. // We should not PushQuery [blk2] since it currently fails verification. // We should not PushQuery [blk3] because [blk2] wasn't issued. queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.False(*queried) *queried = true *queryRequestID = requestID - vdrSet := set.Set[ids.NodeID]{} - vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(expectedVdrSet, inVdrs) + require.Equal(blk1.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } // Answer the request, this should result in [blk1] being issued as well. - if err := te.Put(context.Background(), vdr, *reqID, blk2.Bytes()); err != nil { - t.Fatal(err) - } - - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk1") - } + require.NoError(te.Put(context.Background(), vdr, *reqID, blk2.Bytes())) + require.True(*queried) sendReqID := new(uint32) reqVdr := new(ids.NodeID) @@ -3057,7 +2641,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { switch blkID { case blk1.ID(): - t.Fatal("Unexpectedly sent a Get request for blk1") + require.FailNow("Unexpectedly sent a Get request for blk1") case blk2.ID(): t.Logf("sending get for blk2 with %d", requestID) *sendReqID = requestID @@ -3069,193 +2653,22 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { *reqVdr = inVdr return default: - t.Fatal("Unexpectedly sent a Get request for unknown block") - } - } - - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - switch blkID { - case blk1.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk1") - case blk2.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk2") - case blk3.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk3") - default: - t.Fatal("Unexpectedly sent a PullQuery request for unknown block") + require.FailNow("Unexpectedly sent a Get request for unknown block") } } - // Now we are expecting a Chits message, and we receive it for [blk3] - // instead of blk1. This will cause the node to again request [blk3]. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk3.ID()}, nil); err != nil { - t.Fatal(err) - } + // Now we are expecting a Chits message and we receive it for [blk3]. + // This will cause the node to again request [blk3]. + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk3.ID(), blk1.ID(), blk3.ID())) - // Drop the re-request for blk3 to cause the poll to termindate. The votes - // should be bubbled through blk3 despite the fact that it hasn't been + // Drop the re-request for [blk3] to cause the poll to terminate. The votes + // should be bubbled through [blk3] despite the fact that it hasn't been // issued. - if err := te.GetFailed(context.Background(), *reqVdr, *sendReqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), *reqVdr, *sendReqID)) // The vote should be bubbled through [blk3] and [blk2] such that [blk1] // gets marked as Accepted. - if blk1.Status() != choices.Accepted { - t.Fatalf("Expected blk1 to be Accepted, but found status: %s", blk1.Status()) - } -} - -func TestMixedQueryNumPushSet(t *testing.T) { - for i := 0; i < 3; i++ { - t.Run( - fmt.Sprint(i), - func(t *testing.T) { - engCfg := DefaultConfigs() - engCfg.Params.MixedQueryNumPushVdr = i - te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } - if te.Params.MixedQueryNumPushVdr != i { - t.Fatalf("expected to push query %v validators but got %v", i, te.Config.Params.MixedQueryNumPushVdr) - } - }, - ) - } -} - -func TestSendMixedQuery(t *testing.T) { - type test struct { - isVdr bool - } - tests := []test{ - {isVdr: true}, - {isVdr: false}, - } - for _, tt := range tests { - t.Run( - fmt.Sprintf("is validator: %v", tt.isVdr), - func(t *testing.T) { - engConfig := DefaultConfigs() - commonCfg := common.DefaultConfigTest() - // Override the parameters k and MixedQueryNumPushNonVdr, - // and update the validator set to have k validators. - engConfig.Params.Alpha = 12 - engConfig.Params.MixedQueryNumPushNonVdr = 12 - engConfig.Params.MixedQueryNumPushVdr = 14 - engConfig.Params.K = 20 - _, _, sender, vm, te, gBlk := setup(t, commonCfg, engConfig) - - vdrs := set.Set[ids.NodeID]{} - te.Validators = validators.NewSet() - for i := 0; i < te.Params.K; i++ { - vdrID := ids.GenerateTestNodeID() - vdrs.Add(vdrID) - err := te.Validators.Add(vdrID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } - } - if tt.isVdr { - vdrs.Add(te.Ctx.NodeID) - err := te.Validators.Add(te.Ctx.NodeID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } - } - - // [blk1] is a child of [gBlk] and passes verification - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - - // The VM should be able to parse [blk1] - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, blk1.Bytes()): - return blk1, nil - default: - t.Fatalf("Unknown block bytes") - return nil, nil - } - } - - // The VM should only be able to retrieve [gBlk] from storage - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - default: - return nil, errUnknownBlock - } - } - - pullQuerySent := new(bool) - pullQueryReqID := new(uint32) - pullQueriedVdrs := set.Set[ids.NodeID]{} - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { - switch { - case *pullQuerySent: - t.Fatalf("Asked multiple times") - case blkID != blk1.ID(): - t.Fatalf("Expected engine to request blk1") - } - pullQueriedVdrs.Union(inVdrs) - *pullQuerySent = true - *pullQueryReqID = requestID - } - - pushQuerySent := new(bool) - pushQueryReqID := new(uint32) - pushQueriedVdrs := set.Set[ids.NodeID]{} - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - switch { - case *pushQuerySent: - t.Fatal("Asked multiple times") - case !bytes.Equal(blkBytes, blk1.Bytes()): - t.Fatal("got unexpected block bytes instead of blk1") - } - *pushQuerySent = true - *pushQueryReqID = requestID - pushQueriedVdrs.Union(inVdrs) - } - - // Give the engine blk1. It should insert it into consensus and send a mixed query - // consisting of 12 push queries and 8 pull queries. - if err := te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) - } - - switch { - case !*pullQuerySent: - t.Fatal("expected us to send pull queries") - case !*pushQuerySent: - t.Fatal("expected us to send push queries") - case *pushQueryReqID != *pullQueryReqID: - t.Fatalf("expected equal push query (%v) and pull query (%v) req IDs", *pushQueryReqID, *pullQueryReqID) - case pushQueriedVdrs.Len()+pullQueriedVdrs.Len() != te.Config.Params.K: - t.Fatalf("expected num push queried (%d) + num pull queried (%d) to be %d", pushQueriedVdrs.Len(), pullQueriedVdrs.Len(), te.Config.Params.K) - case !tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushNonVdr: - t.Fatalf("expected num push queried (%d) to be %d", pushQueriedVdrs.Len(), te.Params.MixedQueryNumPushNonVdr) - case tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushVdr: - t.Fatalf("expected num push queried (%d) to be %d", pushQueriedVdrs.Len(), te.Params.MixedQueryNumPushVdr) - } - - pullQueriedVdrs.Union(pushQueriedVdrs) // Now this holds all queried validators (push and pull) - for vdr := range pullQueriedVdrs { - if !vdrs.Contains(vdr) { - t.Fatalf("got unexpected vdr %v", vdr) - } - } - }) - } + require.Equal(choices.Accepted, blk1.Status()) } func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { @@ -3325,14 +2738,14 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } queryRequestGPID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - require.Equal(grandParentBlk.Bytes(), blkBytes) + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { *queryRequestGPID = requestID + require.Equal(grandParentBlk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } // Give the engine the grandparent - err := te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkA.BytesV, b) @@ -3342,8 +2755,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // Give the node [parentBlkA]/[parentBlkB]. // When it's parsed we get [parentBlkA] (not [parentBlkB]). // [parentBlkA] fails verification and gets put into [te.nonVerifiedCache]. - err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkB.BytesV, b) @@ -3364,9 +2776,10 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } queryRequestAID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - require.Equal(parentBlkA.Bytes(), blkBytes) + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { *queryRequestAID = requestID + require.Equal(parentBlkA.ID(), blkID) + require.Equal(uint64(1), requestedHeight) } sender.CantSendPullQuery = false @@ -3375,15 +2788,11 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // When we fetch it using [GetBlockF] we get [parentBlkB]. // Note that [parentBlkB] doesn't fail verification and is issued into consensus. // This evicts [parentBlkA] from [te.nonVerifiedCache]. - err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) // Give 2 chits for [parentBlkA]/[parentBlkB] - err = te.Chits(context.Background(), vdr, *queryRequestAID, []ids.ID{parentBlkB.IDV}, nil) - require.NoError(err) - - err = te.Chits(context.Background(), vdr, *queryRequestGPID, []ids.ID{parentBlkB.IDV}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, *queryRequestAID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV)) + require.NoError(te.Chits(context.Background(), vdr, *queryRequestGPID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV)) // Assert that the blocks' statuses are correct. // The evicted [parentBlkA] shouldn't be changed. @@ -3395,37 +2804,36 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } sentQuery := new(bool) - sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte, uint64) { *sentQuery = true } // Should issue a new block and send a query for it. - err = te.Notify(context.Background(), common.PendingTxs) - require.NoError(err) + require.NoError(te.Notify(context.Background(), common.PendingTxs)) require.True(*sentQuery) } func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require := require.New(t) - engCfg := DefaultConfigs() + engCfg := DefaultConfig(t) engCfg.Params = snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 2, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - MixedQueryNumPushNonVdr: 1, - } - - vals := validators.NewSet() + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + vals := validators.NewManager() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -3470,13 +2878,20 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { } queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte, requestedHeight uint64) { + *queryRequestID = requestID require.Contains(inVdrs, vdr) require.Equal(blk.Bytes(), blkBytes) - *queryRequestID = requestID + require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { @@ -3485,20 +2900,20 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { case blk.ID(): return blk, nil } - t.Fatalf("unknown block") - panic("Should have errored") + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock } require.Equal(choices.Processing, blk.Status()) - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + *queryRequestID = requestID require.Contains(inVdrs, vdr) require.Equal(blk.ID(), blkID) - *queryRequestID = requestID + require.Equal(uint64(1), requestedHeight) } - blkIDs := []ids.ID{blk.ID()} - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blkIDs, blkIDs)) + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk.ID(), blk.ID(), blk.ID())) require.Equal(choices.Processing, blk.Status()) @@ -3506,3 +2921,117 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require.Equal(choices.Accepted, blk.Status()) } + +func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { + require := require.New(t) + + engCfg := DefaultConfig(t) + engCfg.Params = snowball.Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + // Setup the engine with no validators. When a block is issued, the poll + // should fail to be created because there is nobody to poll. + vals := validators.NewManager() + engCfg.Validators = vals + + sender := &common.SenderTest{T: t} + engCfg.Sender = sender + + sender.Default(true) + + vm := &block.TestVM{} + vm.T = t + engCfg.VM = vm + + vm.Default(true) + vm.CantSetState = false + vm.CantSetPreference = false + + gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + require.Equal(gBlk.ID(), id) + return gBlk, nil + } + + te, err := newTransitive(engCfg) + require.NoError(err) + require.NoError(te.Start(context.Background(), 0)) + + vm.LastAcceptedF = nil + + blk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: gBlk.IDV, + HeightV: 1, + BytesV: []byte{1}, + } + + // Issue the block. This shouldn't call the sender, because creating the + // poll should fail. + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) + + // The block should have successfully been added into consensus. + require.Equal(1, te.Consensus.NumProcessing()) + + // Fix the subnet configuration by adding a validator. + vdr := ids.GenerateTestNodeID() + require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + + var ( + queryRequestID uint32 + queried bool + ) + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + queryRequestID = requestID + require.Contains(inVdrs, vdr) + require.Equal(blk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) + queried = true + } + + // Because there is now a validator that can be queried, gossip should + // trigger creation of the poll. + require.NoError(te.Gossip(context.Background())) + require.True(queried) + + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + switch id { + case gBlk.ID(): + return gBlk, nil + case blk.ID(): + return blk, nil + } + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock + } + + // Voting for the block that was issued during the period when the validator + // set was misconfigured should result in it being accepted successfully. + require.NoError(te.Chits(context.Background(), vdr, queryRequestID, blk.ID(), blk.ID(), blk.ID())) + require.Equal(choices.Accepted, blk.Status()) +} diff --git a/avalanchego/snow/engine/snowman/voter.go b/avalanchego/snow/engine/snowman/voter.go index e2813851..0a029e87 100644 --- a/avalanchego/snow/engine/snowman/voter.go +++ b/avalanchego/snow/engine/snowman/voter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -15,11 +15,11 @@ import ( // Voter records chits received from [vdr] once its dependencies are met. type voter struct { - t *Transitive - vdr ids.NodeID - requestID uint32 - response ids.ID - deps set.Set[ids.ID] + t *Transitive + vdr ids.NodeID + requestID uint32 + responseOptions []ids.ID + deps set.Set[ids.ID] } func (v *voter) Dependencies() set.Set[ids.ID] { @@ -42,11 +42,27 @@ func (v *voter) Update(ctx context.Context) { return } + var ( + vote ids.ID + shouldVote bool + voteIndex int + ) + for i, voteOption := range v.responseOptions { + // To prevent any potential deadlocks with undisclosed dependencies, + // votes must be bubbled to the nearest valid block + vote, shouldVote = v.getProcessingAncestor(ctx, voteOption) + if shouldVote { + voteIndex = i + break + } + } + var results []bag.Bag[ids.ID] - if v.response == ids.Empty { - results = v.t.polls.Drop(v.requestID, v.vdr) + if shouldVote { + v.t.selectedVoteIndex.Observe(float64(voteIndex)) + results = v.t.polls.Vote(v.requestID, v.vdr, vote) } else { - results = v.t.polls.Vote(v.requestID, v.vdr, v.response) + results = v.t.polls.Drop(v.requestID, v.vdr) } if len(results) == 0 { @@ -55,13 +71,6 @@ func (v *voter) Update(ctx context.Context) { for _, result := range results { result := result - v.t.Ctx.Log.Debug("filtering poll results", - zap.Stringer("result", &result), - ) - - // To prevent any potential deadlocks with un-disclosed dependencies, - // votes must be bubbled to the nearest valid block - result = v.bubbleVotes(ctx, result) v.t.Ctx.Log.Debug("finishing poll", zap.Stringer("result", &result), ) @@ -79,7 +88,7 @@ func (v *voter) Update(ctx context.Context) { return } - if v.t.Consensus.Finalized() { + if v.t.Consensus.NumProcessing() == 0 { v.t.Ctx.Log.Debug("Snowman engine can quiesce") return } @@ -88,92 +97,55 @@ func (v *voter) Update(ctx context.Context) { v.t.repoll(ctx) } -// bubbleVotes bubbles the [votes] a set of the number of votes for specific -// blkIDs that received votes in consensus, to their most recent ancestor that -// has been issued to consensus. +// getProcessingAncestor finds [initialVote]'s most recent ancestor that is +// processing in consensus. If no ancestor could be found, false is returned. // -// Note: bubbleVotes does not bubbleVotes to all of the ancestors in consensus, -// just the most recent one. bubbling to the rest of the ancestors, which may -// also be in consensus is handled in RecordPoll. -func (v *voter) bubbleVotes(ctx context.Context, votes bag.Bag[ids.ID]) bag.Bag[ids.ID] { - bubbledVotes := bag.Bag[ids.ID]{} - -votesLoop: - for _, vote := range votes.List() { - count := votes.Count(vote) - // use rootID in case of this is a non-verified block ID - rootID := v.t.nonVerifieds.GetRoot(vote) - v.t.Ctx.Log.Verbo("bubbling vote(s) through unverified blocks", - zap.Int("numVotes", count), - zap.Stringer("voteID", vote), - zap.Stringer("parentID", rootID), - ) - - blk, err := v.t.GetBlock(ctx, rootID) +// Note: If [initialVote] is processing, then [initialVote] will be returned. +func (v *voter) getProcessingAncestor(ctx context.Context, initialVote ids.ID) (ids.ID, bool) { + // If [bubbledVote] != [initialVote], it is guaranteed that [bubbledVote] is + // in processing. Otherwise, we attempt to iterate through any blocks we + // have at our disposal as a best-effort mechanism to find a valid ancestor. + bubbledVote := v.t.nonVerifieds.GetAncestor(initialVote) + for { + blk, err := v.t.GetBlock(ctx, bubbledVote) // If we cannot retrieve the block, drop [vote] if err != nil { - v.t.Ctx.Log.Debug("dropping vote(s)", - zap.String("reason", "parent couldn't be fetched"), - zap.Stringer("parentID", rootID), - zap.Int("numVotes", count), - zap.Stringer("voteID", vote), + v.t.Ctx.Log.Debug("dropping vote", + zap.String("reason", "ancestor couldn't be fetched"), + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), zap.Error(err), ) - continue + v.t.numProcessingAncestorFetchesFailed.Inc() + return ids.Empty, false } - status := blk.Status() - blkID := blk.ID() - // If we have not fetched [blkID] break from the loop. We will drop the - // vote below and move on to the next vote. - // - // If [blk] has already been decided, break from the loop, we will drop - // the vote below since there is no need to count the votes for a [blk] - // we've already finalized. - // - // If [blk] is currently in consensus, break from the loop, we have - // reached the first ancestor of the original [vote] that has been - // issued consensus. In this case, the votes will be bubbled further - // from [blk] to any of its ancestors that are also in consensus. - for status.Fetched() && !(v.t.Consensus.Decided(blk) || v.t.Consensus.Processing(blkID)) { - parentID := blk.Parent() - v.t.Ctx.Log.Verbo("pushing vote(s)", - zap.Int("numVotes", count), - zap.Stringer("voteID", vote), - zap.Stringer("parentID", rootID), + if v.t.Consensus.Decided(blk) { + v.t.Ctx.Log.Debug("dropping vote", + zap.String("reason", "bubbled vote already decided"), + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), + zap.Stringer("status", blk.Status()), + zap.Uint64("height", blk.Height()), ) - - blkID = parentID - blk, err = v.t.GetBlock(ctx, blkID) - // If we cannot retrieve the block, drop [vote] - if err != nil { - v.t.Ctx.Log.Debug("dropping vote(s)", - zap.String("reason", "block couldn't be fetched"), - zap.Stringer("blkID", blkID), - zap.Int("numVotes", count), - zap.Stringer("voteID", vote), - zap.Error(err), - ) - continue votesLoop - } - status = blk.Status() + v.t.numProcessingAncestorFetchesDropped.Inc() + return ids.Empty, false } - // If [blkID] is currently in consensus, count the votes - if v.t.Consensus.Processing(blkID) { - v.t.Ctx.Log.Verbo("applying vote(s)", - zap.Int("numVotes", count), - zap.Stringer("blkID", blkID), - zap.Stringer("status", status), - ) - bubbledVotes.AddCount(blkID, count) - } else { - v.t.Ctx.Log.Verbo("dropping vote(s)", - zap.Int("numVotes", count), - zap.Stringer("blkID", blkID), - zap.Stringer("status", status), + if v.t.Consensus.Processing(bubbledVote) { + v.t.Ctx.Log.Verbo("applying vote", + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), + zap.Uint64("height", blk.Height()), ) + if bubbledVote != initialVote { + v.t.numProcessingAncestorFetchesSucceeded.Inc() + } else { + v.t.numProcessingAncestorFetchesUnneeded.Inc() + } + return bubbledVote, true } + + bubbledVote = blk.Parent() } - return bubbledVotes } diff --git a/avalanchego/snow/events/blockable.go b/avalanchego/snow/event/blockable.go similarity index 89% rename from avalanchego/snow/events/blockable.go rename to avalanchego/snow/event/blockable.go index 233dae80..404e95c2 100644 --- a/avalanchego/snow/events/blockable.go +++ b/avalanchego/snow/event/blockable.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package events +package event import ( "context" diff --git a/avalanchego/snow/events/blocker.go b/avalanchego/snow/event/blocker.go similarity index 96% rename from avalanchego/snow/events/blocker.go rename to avalanchego/snow/event/blocker.go index f72f6729..9c15ffb5 100644 --- a/avalanchego/snow/events/blocker.go +++ b/avalanchego/snow/event/blocker.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package events +package event import ( "context" diff --git a/avalanchego/snow/events/blocker_test.go b/avalanchego/snow/event/blocker_test.go similarity index 70% rename from avalanchego/snow/events/blocker_test.go rename to avalanchego/snow/event/blocker_test.go index d3710be9..d7620bfe 100644 --- a/avalanchego/snow/events/blocker_test.go +++ b/avalanchego/snow/event/blocker_test.go @@ -1,17 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package events +package event import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) func TestBlocker(t *testing.T) { + require := require.New(t) + b := Blocker(nil) a := newTestBlockable() @@ -24,8 +28,7 @@ func TestBlocker(t *testing.T) { a.dependencies = func() set.Set[ids.ID] { *calledDep = true - s := set.Set[ids.ID]{} - s.Add(id0, id1) + s := set.Of(id0, id1) return s } calledFill := new(bool) @@ -43,39 +46,39 @@ func TestBlocker(t *testing.T) { b.Register(context.Background(), a) - switch { - case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.False(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Fulfill(context.Background(), id2) b.Abandon(context.Background(), id2) - switch { - case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.False(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Fulfill(context.Background(), id0) - switch { - case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Abandon(context.Background(), id0) - switch { - case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Abandon(context.Background(), id1) - switch { - case !*calledDep, !*calledFill, !*calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.True(*calledAbandon) + require.True(*calledUpdate) } type testBlockable struct { diff --git a/avalanchego/snow/networking/benchlist/benchable.go b/avalanchego/snow/networking/benchlist/benchable.go index 845eeeb7..f1cc85d9 100644 --- a/avalanchego/snow/networking/benchlist/benchable.go +++ b/avalanchego/snow/networking/benchlist/benchable.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" // Benchable is notified when a validator is benched or unbenched from a given chain type Benchable interface { diff --git a/avalanchego/snow/networking/benchlist/benchlist.go b/avalanchego/snow/networking/benchlist/benchlist.go index 4f7cb20a..08f7e7d8 100644 --- a/avalanchego/snow/networking/benchlist/benchlist.go +++ b/avalanchego/snow/networking/benchlist/benchlist.go @@ -1,31 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist import ( - "container/heap" "fmt" "math/rand" "sync" "time" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" safemath "github.com/ava-labs/avalanchego/utils/math" ) -var _ heap.Interface = (*benchedQueue)(nil) - // If a peer consistently does not respond to queries, it will // increase latencies on the network whenever that peer is polled. // If we cannot terminate the poll early, then the poll will wait @@ -45,46 +40,6 @@ type Benchlist interface { IsBenched(nodeID ids.NodeID) bool } -// Data about a validator who is benched -type benchData struct { - benchedUntil time.Time - nodeID ids.NodeID - index int -} - -// Each element is a benched validator -type benchedQueue []*benchData - -func (bq benchedQueue) Len() int { - return len(bq) -} - -func (bq benchedQueue) Less(i, j int) bool { - return bq[i].benchedUntil.Before(bq[j].benchedUntil) -} - -func (bq benchedQueue) Swap(i, j int) { - bq[i], bq[j] = bq[j], bq[i] - bq[i].index = i - bq[j].index = j -} - -// Push adds an item to this queue. x must have type *benchData -func (bq *benchedQueue) Push(x interface{}) { - item := x.(*benchData) - item.index = len(*bq) - *bq = append(*bq, item) -} - -// Pop returns the validator that should leave the bench next -func (bq *benchedQueue) Pop() interface{} { - n := len(*bq) - item := (*bq)[n-1] - (*bq)[n-1] = nil // make sure the item is freed from memory - *bq = (*bq)[:n-1] - return item -} - type failureStreak struct { // Time of first consecutive timeout firstFailure time.Time @@ -94,14 +49,12 @@ type failureStreak struct { type benchlist struct { lock sync.RWMutex - // This is the benchlist for chain [chainID] - chainID ids.ID - log logging.Logger + // Context of the chain this is the benchlist for + ctx *snow.ConsensusContext metrics metrics - // Fires when the next validator should leave the bench - // Calls [update] when it fires - timer *timer.Timer + // Used to notify the timer that it should recalculate when it should fire + resetTimer chan struct{} // Tells the time. Can be faked for testing. clock mockable.Clock @@ -110,7 +63,7 @@ type benchlist struct { benchable Benchable // Validator set of the network - vdrs validators.Set + vdrs validators.Manager // Validator ID --> Consecutive failure information // [streaklock] must be held when touching [failureStreaks] @@ -120,9 +73,8 @@ type benchlist struct { // IDs of validators that are currently benched benchlistSet set.Set[ids.NodeID] - // Min heap containing benched validators and their endtimes - // Pop() returns the next validator to leave - benchedQueue benchedQueue + // Min heap of benched validators ordered by when they can be unbenched + benchedHeap heap.Map[ids.NodeID, time.Time] // A validator will be benched if [threshold] messages in a row // to them time out and the first of those messages was more than @@ -140,126 +92,145 @@ type benchlist struct { // NewBenchlist returns a new Benchlist func NewBenchlist( - chainID ids.ID, - log logging.Logger, + ctx *snow.ConsensusContext, benchable Benchable, - validators validators.Set, + validators validators.Manager, threshold int, minimumFailingDuration, duration time.Duration, maxPortion float64, - registerer prometheus.Registerer, ) (Benchlist, error) { if maxPortion < 0 || maxPortion >= 1 { return nil, fmt.Errorf("max portion of benched stake must be in [0,1) but got %f", maxPortion) } + benchlist := &benchlist{ - chainID: chainID, - log: log, + ctx: ctx, + resetTimer: make(chan struct{}, 1), failureStreaks: make(map[ids.NodeID]failureStreak), benchlistSet: set.Set[ids.NodeID]{}, benchable: benchable, + benchedHeap: heap.NewMap[ids.NodeID, time.Time](time.Time.Before), vdrs: validators, threshold: threshold, minimumFailingDuration: minimumFailingDuration, duration: duration, maxPortion: maxPortion, } - benchlist.timer = timer.NewTimer(benchlist.update) - go benchlist.timer.Dispatch() - return benchlist, benchlist.metrics.Initialize(registerer) + if err := benchlist.metrics.Initialize(ctx.Registerer); err != nil { + return nil, err + } + + go benchlist.run() + return benchlist, nil } -// Update removes benched validators whose time on the bench is over -func (b *benchlist) update() { - b.lock.Lock() - defer b.lock.Unlock() +// TODO: Close this goroutine during node shutdown +func (b *benchlist) run() { + timer := time.NewTimer(0) + defer timer.Stop() - now := b.clock.Time() for { - // [next] is nil when no more validators should - // leave the bench at this time - next := b.nextToLeave(now) - if next == nil { - break + // Invariant: The [timer] is not stopped. + select { + case <-timer.C: + case <-b.resetTimer: + if !timer.Stop() { + <-timer.C + } } - b.remove(next) + + b.waitForBenchedNodes() + + b.removedExpiredNodes() + + // Note: If there are no nodes to remove, [duration] will be 0 and we + // will immediately wait until there are benched nodes. + duration := b.durationToSleep() + timer.Reset(duration) } - // Set next time update will be called - b.setNextLeaveTime() } -// Remove [validator] from the benchlist -// Assumes [b.lock] is held -func (b *benchlist) remove(node *benchData) { - // Update state - id := node.nodeID - b.log.Debug("removing node from benchlist", - zap.Stringer("nodeID", id), - ) - heap.Remove(&b.benchedQueue, node.index) - b.benchlistSet.Remove(id) - b.benchable.Unbenched(b.chainID, id) +func (b *benchlist) waitForBenchedNodes() { + for { + b.lock.RLock() + _, _, ok := b.benchedHeap.Peek() + b.lock.RUnlock() + if ok { + return + } - // Update metrics - b.metrics.numBenched.Set(float64(b.benchedQueue.Len())) - benchedStake := b.vdrs.SubsetWeight(b.benchlistSet) - b.metrics.weightBenched.Set(float64(benchedStake)) + // Invariant: Whenever a new node is benched we ensure that resetTimer + // has a pending message while the write lock is held. + <-b.resetTimer + } } -// Returns the next validator that should leave -// the bench at time [now]. nil if no validator should. -// Assumes [b.lock] is held -func (b *benchlist) nextToLeave(now time.Time) *benchData { - if b.benchedQueue.Len() == 0 { - return nil +func (b *benchlist) removedExpiredNodes() { + b.lock.Lock() + defer b.lock.Unlock() + + now := b.clock.Time() + for { + _, next, ok := b.benchedHeap.Peek() + if !ok { + break + } + if now.Before(next) { + break + } + + nodeID, _, _ := b.benchedHeap.Pop() + b.ctx.Log.Debug("removing node from benchlist", + zap.Stringer("nodeID", nodeID), + ) + b.benchlistSet.Remove(nodeID) + b.benchable.Unbenched(b.ctx.ChainID, nodeID) } - next := b.benchedQueue[0] - if now.Before(next.benchedUntil) { - return nil + + b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) + benchedStake, err := b.vdrs.SubsetWeight(b.ctx.SubnetID, b.benchlistSet) + if err != nil { + b.ctx.Log.Error("error calculating benched stake", + zap.Stringer("subnetID", b.ctx.SubnetID), + zap.Error(err), + ) + return } - return next + b.metrics.weightBenched.Set(float64(benchedStake)) } -// Set [b.timer] to fire when the next validator should leave the bench -// Assumes [b.lock] is held -func (b *benchlist) setNextLeaveTime() { - if b.benchedQueue.Len() == 0 { - b.timer.Cancel() - return +func (b *benchlist) durationToSleep() time.Duration { + b.lock.RLock() + defer b.lock.RUnlock() + + _, next, ok := b.benchedHeap.Peek() + if !ok { + return 0 } + now := b.clock.Time() - next := b.benchedQueue[0] - nextLeave := next.benchedUntil.Sub(now) - b.timer.SetTimeoutIn(nextLeave) + return next.Sub(now) } -// IsBenched returns true if messages to [nodeID] -// should not be sent over the network and should immediately fail. +// IsBenched returns true if messages to [nodeID] should not be sent over the +// network and should immediately fail. func (b *benchlist) IsBenched(nodeID ids.NodeID) bool { b.lock.RLock() defer b.lock.RUnlock() - return b.isBenched(nodeID) -} -// isBenched checks if [nodeID] is currently benched -// and calls cleanup if its benching period has elapsed -// Assumes [b.lock] is held. -func (b *benchlist) isBenched(nodeID ids.NodeID) bool { - if _, ok := b.benchlistSet[nodeID]; ok { - return true - } - return false + return b.benchlistSet.Contains(nodeID) } -// RegisterResponse notes that we received a response from validator [validatorID] +// RegisterResponse notes that we received a response from [nodeID] func (b *benchlist) RegisterResponse(nodeID ids.NodeID) { b.streaklock.Lock() defer b.streaklock.Unlock() + delete(b.failureStreaks, nodeID) } -// RegisterResponse notes that a request to validator [validatorID] timed out +// RegisterResponse notes that a request to [nodeID] timed out func (b *benchlist) RegisterFailure(nodeID ids.NodeID) { b.lock.Lock() defer b.lock.Unlock() @@ -290,28 +261,44 @@ func (b *benchlist) RegisterFailure(nodeID ids.NodeID) { // Assumes [b.lock] is held // Assumes [nodeID] is not already benched func (b *benchlist) bench(nodeID ids.NodeID) { - validatorStake := b.vdrs.GetWeight(nodeID) + validatorStake := b.vdrs.GetWeight(b.ctx.SubnetID, nodeID) if validatorStake == 0 { // We might want to bench a non-validator because they don't respond to // my Get requests, but we choose to only bench validators. return } - benchedStake := b.vdrs.SubsetWeight(b.benchlistSet) + benchedStake, err := b.vdrs.SubsetWeight(b.ctx.SubnetID, b.benchlistSet) + if err != nil { + b.ctx.Log.Error("error calculating benched stake", + zap.Stringer("subnetID", b.ctx.SubnetID), + zap.Error(err), + ) + return + } + newBenchedStake, err := safemath.Add64(benchedStake, validatorStake) if err != nil { // This should never happen - b.log.Error("overflow calculating new benched stake", + b.ctx.Log.Error("overflow calculating new benched stake", zap.Stringer("nodeID", nodeID), ) return } - totalStake := b.vdrs.Weight() + totalStake, err := b.vdrs.TotalWeight(b.ctx.SubnetID) + if err != nil { + b.ctx.Log.Error("error calculating total stake", + zap.Stringer("subnetID", b.ctx.SubnetID), + zap.Error(err), + ) + return + } + maxBenchedStake := float64(totalStake) * b.maxPortion if float64(newBenchedStake) > maxBenchedStake { - b.log.Debug("not benching node", + b.ctx.Log.Debug("not benching node", zap.String("reason", "benched stake would exceed max"), zap.Stringer("nodeID", nodeID), zap.Float64("benchedStake", float64(newBenchedStake)), @@ -328,28 +315,29 @@ func (b *benchlist) bench(nodeID ids.NodeID) { diff := maxBenchedUntil.Sub(minBenchedUntil) benchedUntil := minBenchedUntil.Add(time.Duration(rand.Float64() * float64(diff))) // #nosec G404 + b.ctx.Log.Debug("benching validator after consecutive failed queries", + zap.Stringer("nodeID", nodeID), + zap.Duration("benchDuration", benchedUntil.Sub(now)), + zap.Int("numFailedQueries", b.threshold), + ) + // Add to benchlist times with randomized delay b.benchlistSet.Add(nodeID) - b.benchable.Benched(b.chainID, nodeID) + b.benchable.Benched(b.ctx.ChainID, nodeID) b.streaklock.Lock() delete(b.failureStreaks, nodeID) b.streaklock.Unlock() - heap.Push( - &b.benchedQueue, - &benchData{nodeID: nodeID, benchedUntil: benchedUntil}, - ) - b.log.Debug("benching validator after consecutive failed queries", - zap.Stringer("nodeID", nodeID), - zap.Duration("benchDuration", benchedUntil.Sub(now)), - zap.Int("numFailedQueries", b.threshold), - ) + b.benchedHeap.Push(nodeID, benchedUntil) - // Set [b.timer] to fire when next validator should leave bench - b.setNextLeaveTime() + // Update the timer to account for the newly benched node. + select { + case b.resetTimer <- struct{}{}: + default: + } // Update metrics - b.metrics.numBenched.Set(float64(b.benchedQueue.Len())) + b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) b.metrics.weightBenched.Set(float64(newBenchedStake)) } diff --git a/avalanchego/snow/networking/benchlist/benchlist_test.go b/avalanchego/snow/networking/benchlist/benchlist_test.go index f3f36d73..45568392 100644 --- a/avalanchego/snow/networking/benchlist/benchlist_test.go +++ b/avalanchego/snow/networking/benchlist/benchlist_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -7,38 +7,33 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var minimumFailingDuration = 5 * time.Minute // Test that validators are properly added to the bench func TestBenchlistAdd(t *testing.T) { - vdrs := validators.NewSet() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() vdrID2 := ids.GenerateTestNodeID() vdrID3 := ids.GenerateTestNodeID() vdrID4 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 50), - vdrs.Add(vdrID1, nil, ids.Empty, 50), - vdrs.Add(vdrID2, nil, ids.Empty, 50), - vdrs.Add(vdrID3, nil, ids.Empty, 50), - vdrs.Add(vdrID4, nil, ids.Empty, 50), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID0, nil, ids.Empty, 50)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID1, nil, ids.Empty, 50)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID2, nil, ids.Empty, 50)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID3, nil, ids.Empty, 50)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID4, nil, ids.Empty, 50)) benchable := &TestBenchable{T: t} benchable.Default(true) @@ -47,34 +42,24 @@ func TestBenchlistAdd(t *testing.T) { duration := time.Minute maxPortion := 0.5 benchIntf, err := NewBenchlist( - ids.Empty, - logging.NoLog{}, + ctx, benchable, vdrs, threshold, minimumFailingDuration, duration, maxPortion, - prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.clock.Set(now) // Nobody should be benched at the start b.lock.Lock() - require.False(t, b.isBenched(vdrID0)) - require.False(t, b.isBenched(vdrID1)) - require.False(t, b.isBenched(vdrID2)) - require.False(t, b.isBenched(vdrID3)) - require.False(t, b.isBenched(vdrID4)) - require.Len(t, b.failureStreaks, 0) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) + require.Empty(b.benchlistSet) + require.Empty(b.failureStreaks) + require.Zero(b.benchedHeap.Len()) b.lock.Unlock() // Register [threshold - 1] failures in a row for vdr0 @@ -83,13 +68,12 @@ func TestBenchlistAdd(t *testing.T) { } // Still shouldn't be benched due to not enough consecutive failure - require.False(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) - require.Len(t, b.failureStreaks, 1) + require.Empty(b.benchlistSet) + require.Zero(b.benchedHeap.Len()) + require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID0] - require.Equal(t, threshold-1, fs.consecutive) - require.True(t, fs.firstFailure.Equal(now)) + require.Equal(threshold-1, fs.consecutive) + require.True(fs.firstFailure.Equal(now)) // Register another failure b.RegisterFailure(vdrID0) @@ -97,9 +81,8 @@ func TestBenchlistAdd(t *testing.T) { // Still shouldn't be benched because not enough time (any in this case) // has passed since the first failure b.lock.Lock() - require.False(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) + require.Empty(b.benchlistSet) + require.Zero(b.benchedHeap.Len()) b.lock.Unlock() // Move the time up @@ -118,16 +101,17 @@ func TestBenchlistAdd(t *testing.T) { // Now this validator should be benched b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 1) - require.Equal(t, b.benchlistSet.Len(), 1) - - next := b.benchedQueue[0] - require.Equal(t, vdrID0, next.nodeID) - require.True(t, !next.benchedUntil.After(now.Add(duration))) - require.True(t, !next.benchedUntil.Before(now.Add(duration/2))) - require.Len(t, b.failureStreaks, 0) - require.True(t, benched) + require.Contains(b.benchlistSet, vdrID0) + require.Equal(1, b.benchedHeap.Len()) + require.Equal(1, b.benchlistSet.Len()) + + nodeID, benchedUntil, ok := b.benchedHeap.Peek() + require.True(ok) + require.Equal(vdrID0, nodeID) + require.False(benchedUntil.After(now.Add(duration))) + require.False(benchedUntil.Before(now.Add(duration / 2))) + require.Empty(b.failureStreaks) + require.True(benched) benchable.BenchedF = nil b.lock.Unlock() @@ -142,11 +126,10 @@ func TestBenchlistAdd(t *testing.T) { // vdr1 shouldn't be benched // The response should have cleared its consecutive failures b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.False(t, b.isBenched(vdrID1)) - require.Equal(t, b.benchedQueue.Len(), 1) - require.Equal(t, b.benchlistSet.Len(), 1) - require.Len(t, b.failureStreaks, 0) + require.Contains(b.benchlistSet, vdrID0) + require.Equal(1, b.benchedHeap.Len()) + require.Equal(1, b.benchlistSet.Len()) + require.Empty(b.failureStreaks) b.lock.Unlock() // Register another failure for vdr0, who is benched @@ -154,13 +137,17 @@ func TestBenchlistAdd(t *testing.T) { // A failure for an already benched validator should not count against it b.lock.Lock() - require.Len(t, b.failureStreaks, 0) + require.Empty(b.failureStreaks) b.lock.Unlock() } // Test that the benchlist won't bench more than the maximum portion of stake func TestBenchlistMaxStake(t *testing.T) { - vdrs := validators.NewSet() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() vdrID2 := ids.GenerateTestNodeID() @@ -168,38 +155,27 @@ func TestBenchlistMaxStake(t *testing.T) { vdrID4 := ids.GenerateTestNodeID() // Total weight is 5100 - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 1000), - vdrs.Add(vdrID1, nil, ids.Empty, 1000), - vdrs.Add(vdrID2, nil, ids.Empty, 1000), - vdrs.Add(vdrID3, nil, ids.Empty, 2000), - vdrs.Add(vdrID4, nil, ids.Empty, 100), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID0, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID1, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID2, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID3, nil, ids.Empty, 2000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID4, nil, ids.Empty, 100)) threshold := 3 duration := 1 * time.Hour // Shouldn't bench more than 2550 (5100/2) maxPortion := 0.5 benchIntf, err := NewBenchlist( - ids.Empty, - logging.NoLog{}, + ctx, &TestBenchable{T: t}, vdrs, threshold, minimumFailingDuration, duration, maxPortion, - prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.clock.Set(now) @@ -225,12 +201,11 @@ func TestBenchlistMaxStake(t *testing.T) { // Benching vdr2 (weight 1000) would cause the amount benched // to exceed the maximum b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.False(t, b.isBenched(vdrID2)) - require.Equal(t, b.benchedQueue.Len(), 2) - require.Equal(t, b.benchlistSet.Len(), 2) - require.Len(t, b.failureStreaks, 1) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Equal(2, b.benchedHeap.Len()) + require.Equal(2, b.benchlistSet.Len()) + require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID2] fs.consecutive = threshold fs.firstFailure = now @@ -252,15 +227,15 @@ func TestBenchlistMaxStake(t *testing.T) { // vdr4 should be benched now b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID4)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Contains(t, b.benchlistSet, vdrID0) - require.Contains(t, b.benchlistSet, vdrID1) - require.Contains(t, b.benchlistSet, vdrID4) - require.Len(t, b.failureStreaks, 1) // for vdr2 + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) + require.Equal(3, b.benchedHeap.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) + require.Len(b.failureStreaks, 1) // for vdr2 b.lock.Unlock() // More failures for vdr2 shouldn't add it to the bench @@ -270,29 +245,23 @@ func TestBenchlistMaxStake(t *testing.T) { } b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID4)) - require.False(t, b.isBenched(vdrID2)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Len(t, b.failureStreaks, 1) - require.Contains(t, b.failureStreaks, vdrID2) - - // Ensure the benched queue root has the min end time - minEndTime := b.benchedQueue[0].benchedUntil - benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID4} - for _, benchedVdr := range b.benchedQueue { - require.Contains(t, benchedIDs, benchedVdr.nodeID) - require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) - } - + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) + require.Equal(3, b.benchedHeap.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Len(b.failureStreaks, 1) + require.Contains(b.failureStreaks, vdrID2) b.lock.Unlock() } // Test validators are removed from the bench correctly func TestBenchlistRemove(t *testing.T) { - vdrs := validators.NewSet() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() vdrID2 := ids.GenerateTestNodeID() @@ -300,17 +269,11 @@ func TestBenchlistRemove(t *testing.T) { vdrID4 := ids.GenerateTestNodeID() // Total weight is 5000 - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 1000), - vdrs.Add(vdrID1, nil, ids.Empty, 1000), - vdrs.Add(vdrID2, nil, ids.Empty, 1000), - vdrs.Add(vdrID3, nil, ids.Empty, 1000), - vdrs.Add(vdrID4, nil, ids.Empty, 1000), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID0, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID1, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID2, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID3, nil, ids.Empty, 1000)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID4, nil, ids.Empty, 1000)) count := 0 benchable := &TestBenchable{ @@ -325,21 +288,16 @@ func TestBenchlistRemove(t *testing.T) { duration := 2 * time.Second maxPortion := 0.76 // can bench 3 of the 5 validators benchIntf, err := NewBenchlist( - ids.Empty, - logging.NoLog{}, + ctx, benchable, vdrs, threshold, minimumFailingDuration, duration, maxPortion, - prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) - defer b.timer.Stop() now := time.Now() b.lock.Lock() b.clock.Set(now) @@ -364,20 +322,12 @@ func TestBenchlistRemove(t *testing.T) { // All 3 should be benched b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID2)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Len(t, b.failureStreaks, 0) - - // Ensure the benched queue root has the min end time - minEndTime := b.benchedQueue[0].benchedUntil - benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID2} - for _, benchedVdr := range b.benchedQueue { - require.Contains(t, benchedIDs, benchedVdr.nodeID) - require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) - } + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID2) + require.Equal(3, b.benchedHeap.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Empty(b.failureStreaks) // Set the benchlist's clock past when all validators should be unbenched // so that when its timer fires, it can remove them @@ -386,7 +336,6 @@ func TestBenchlistRemove(t *testing.T) { // Make sure each validator is eventually removed require.Eventually( - t, func() bool { return !b.IsBenched(vdrID0) }, @@ -395,7 +344,6 @@ func TestBenchlistRemove(t *testing.T) { ) require.Eventually( - t, func() bool { return !b.IsBenched(vdrID1) }, @@ -404,7 +352,6 @@ func TestBenchlistRemove(t *testing.T) { ) require.Eventually( - t, func() bool { return !b.IsBenched(vdrID2) }, @@ -412,5 +359,5 @@ func TestBenchlistRemove(t *testing.T) { 100*time.Millisecond, ) - require.Equal(t, 3, count) + require.Equal(3, count) } diff --git a/avalanchego/snow/networking/benchlist/manager.go b/avalanchego/snow/networking/benchlist/manager.go index fb0daf33..e6ac45da 100644 --- a/avalanchego/snow/networking/benchlist/manager.go +++ b/avalanchego/snow/networking/benchlist/manager.go @@ -1,24 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist import ( - "errors" "sync" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" ) -var ( - errUnknownValidators = errors.New("unknown validator set for provided chain") - - _ Manager = (*manager)(nil) -) +var _ Manager = (*manager)(nil) // Manager provides an interface for a benchlist to register whether // queries have been successful or unsuccessful and place validators with @@ -47,7 +41,6 @@ type Manager interface { type Config struct { Benchable Benchable `json:"-"` Validators validators.Manager `json:"-"` - StakingEnabled bool `json:"-"` Threshold int `json:"threshold"` MinimumFailingDuration time.Duration `json:"minimumFailingDuration"` Duration time.Duration `json:"duration"` @@ -115,30 +108,14 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { return nil } - var ( - vdrs validators.Set - ok bool - ) - if m.config.StakingEnabled { - vdrs, ok = m.config.Validators.Get(ctx.SubnetID) - } else { - // If staking is disabled, everyone validates every chain - vdrs, ok = m.config.Validators.Get(constants.PrimaryNetworkID) - } - if !ok { - return errUnknownValidators - } - benchlist, err := NewBenchlist( - ctx.ChainID, - ctx.Log, + ctx, m.config.Benchable, - vdrs, + m.config.Validators, m.config.Threshold, m.config.MinimumFailingDuration, m.config.Duration, m.config.MaxPortion, - ctx.Registerer, ) if err != nil { return err diff --git a/avalanchego/snow/networking/benchlist/metrics.go b/avalanchego/snow/networking/benchlist/metrics.go index 12da52d3..25f9e50f 100644 --- a/avalanchego/snow/networking/benchlist/metrics.go +++ b/avalanchego/snow/networking/benchlist/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/avalanchego/snow/networking/benchlist/test_benchable.go b/avalanchego/snow/networking/benchlist/test_benchable.go index 1655d808..dabfab56 100644 --- a/avalanchego/snow/networking/benchlist/test_benchable.go +++ b/avalanchego/snow/networking/benchlist/test_benchable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -6,6 +6,8 @@ package benchlist import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -26,7 +28,7 @@ func (b *TestBenchable) Benched(chainID ids.ID, validatorID ids.NodeID) { if b.BenchedF != nil { b.BenchedF(chainID, validatorID) } else if b.CantBenched && b.T != nil { - b.T.Fatalf("Unexpectedly called Benched") + require.FailNow(b.T, "Unexpectedly called Benched") } } @@ -34,6 +36,6 @@ func (b *TestBenchable) Unbenched(chainID ids.ID, validatorID ids.NodeID) { if b.UnbenchedF != nil { b.UnbenchedF(chainID, validatorID) } else if b.CantUnbenched && b.T != nil { - b.T.Fatalf("Unexpectedly called Unbenched") + require.FailNow(b.T, "Unexpectedly called Unbenched") } } diff --git a/avalanchego/snow/networking/handler/engine.go b/avalanchego/snow/networking/handler/engine.go index 94ae54ff..e3de84ac 100644 --- a/avalanchego/snow/networking/handler/engine.go +++ b/avalanchego/snow/networking/handler/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/avalanchego/snow/networking/handler/engine_test.go b/avalanchego/snow/networking/handler/engine_test.go index 9eb6752c..e9b2b8ae 100644 --- a/avalanchego/snow/networking/handler/engine_test.go +++ b/avalanchego/snow/networking/handler/engine_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -58,14 +58,12 @@ func TestEngineManager_Get(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := require.New(t) - e := EngineManager{ Avalanche: avalanche, Snowman: snowman, } - r.Equal(test.expected.engine, e.Get(test.args.engineType)) + require.Equal(t, test.expected.engine, e.Get(test.args.engineType)) }) } } diff --git a/avalanchego/snow/networking/handler/handler.go b/avalanchego/snow/networking/handler/handler.go index 81b8a9c5..7a62dd7d 100644 --- a/avalanchego/snow/networking/handler/handler.go +++ b/avalanchego/snow/networking/handler/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -8,14 +8,14 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" + "golang.org/x/sync/errgroup" "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" @@ -24,11 +24,13 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/snow/networking/worker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" + + commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const ( @@ -62,9 +64,13 @@ type Handler interface { Start(ctx context.Context, recoverPanic bool) Push(ctx context.Context, msg Message) Len() int + Stop(ctx context.Context) StopWithError(ctx context.Context, err error) - Stopped() chan struct{} + // AwaitStopped returns an error if the call would block and [ctx] is done. + // Even if [ctx] is done when passed into this function, this function will + // return a nil error if it will not block. + AwaitStopped(ctx context.Context) (time.Duration, error) } // handler passes incoming messages from the network to the consensus engine. @@ -76,8 +82,9 @@ type handler struct { clock mockable.Clock ctx *snow.ConsensusContext - // The validator set that validates this chain - validators validators.Set + // TODO: consider using peerTracker instead of validators + // since peerTracker is already tracking validators + validators validators.Manager // Receives messages from the VM msgFromVMChan <-chan common.Message preemptTimeouts chan struct{} @@ -99,46 +106,53 @@ type handler struct { // [unprocessedAsyncMsgsCond.L] must be held while accessing [asyncMessageQueue]. asyncMessageQueue MessageQueue // Worker pool for handling asynchronous consensus messages - asyncMessagePool worker.Pool + asyncMessagePool errgroup.Group timeouts chan struct{} closeOnce sync.Once + startClosingTime time.Time + totalClosingTime time.Duration closingChan chan struct{} - numDispatchersClosed int + numDispatchersClosed atomic.Uint32 // Closed when this handler and [engine] are done shutting down closed chan struct{} subnetConnector validators.SubnetConnector - subnetAllower subnets.Allower + subnet subnets.Subnet + + // Tracks the peers that are currently connected to this subnet + peerTracker commontracker.Peers } // Initialize this consensus handler // [engine] must be initialized before initializing this handler func New( ctx *snow.ConsensusContext, - validators validators.Set, + validators validators.Manager, msgFromVMChan <-chan common.Message, gossipFrequency time.Duration, threadPoolSize int, resourceTracker tracker.ResourceTracker, subnetConnector validators.SubnetConnector, subnet subnets.Subnet, + peerTracker commontracker.Peers, ) (Handler, error) { h := &handler{ - ctx: ctx, - validators: validators, - msgFromVMChan: msgFromVMChan, - preemptTimeouts: subnet.OnBootstrapCompleted(), - gossipFrequency: gossipFrequency, - asyncMessagePool: worker.NewPool(threadPoolSize), - timeouts: make(chan struct{}, 1), - closingChan: make(chan struct{}), - closed: make(chan struct{}), - resourceTracker: resourceTracker, - subnetConnector: subnetConnector, - subnetAllower: subnet, + ctx: ctx, + validators: validators, + msgFromVMChan: msgFromVMChan, + preemptTimeouts: subnet.OnBootstrapCompleted(), + gossipFrequency: gossipFrequency, + timeouts: make(chan struct{}, 1), + closingChan: make(chan struct{}), + closed: make(chan struct{}), + resourceTracker: resourceTracker, + subnetConnector: subnetConnector, + subnet: subnet, + peerTracker: peerTracker, } + h.asyncMessagePool.SetLimit(threadPoolSize) var err error @@ -147,11 +161,11 @@ func New( return nil, fmt.Errorf("initializing handler metrics errored with: %w", err) } cpuTracker := resourceTracker.CPUTracker() - h.syncMessageQueue, err = NewMessageQueue(h.ctx.Log, h.validators, cpuTracker, "handler", h.ctx.Registerer, message.SynchronousOps) + h.syncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler", message.SynchronousOps) if err != nil { return nil, fmt.Errorf("initializing sync message queue errored with: %w", err) } - h.asyncMessageQueue, err = NewMessageQueue(h.ctx.Log, h.validators, cpuTracker, "handler_async", h.ctx.Registerer, message.AsynchronousOps) + h.asyncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler_async", message.AsynchronousOps) if err != nil { return nil, fmt.Errorf("initializing async message queue errored with: %w", err) } @@ -163,7 +177,8 @@ func (h *handler) Context() *snow.ConsensusContext { } func (h *handler) ShouldHandle(nodeID ids.NodeID) bool { - return h.subnetAllower.IsAllowed(nodeID, h.validators.Contains(nodeID)) + _, ok := h.validators.GetValidator(h.ctx.SubnetID, nodeID) + return h.subnet.IsAllowed(nodeID, ok) } func (h *handler) SetEngineManager(engineManager *EngineManager) { @@ -198,31 +213,31 @@ func (h *handler) selectStartingGear(ctx context.Context) (common.Engine, error) } // drop bootstrap state from previous runs before starting state sync - return engines.StateSyncer, engines.Bootstrapper.Clear() + return engines.StateSyncer, engines.Bootstrapper.Clear(ctx) } func (h *handler) Start(ctx context.Context, recoverPanic bool) { - h.ctx.Lock.Lock() - defer h.ctx.Lock.Unlock() - gear, err := h.selectStartingGear(ctx) if err != nil { h.ctx.Log.Error("chain failed to select starting gear", zap.Error(err), ) - h.shutdown(ctx) + h.shutdown(ctx, h.clock.Time()) return } - if err := gear.Start(ctx, 0); err != nil { + h.ctx.Lock.Lock() + err = gear.Start(ctx, 0) + h.ctx.Lock.Unlock() + if err != nil { h.ctx.Log.Error("chain failed to start", zap.Error(err), ) - h.shutdown(ctx) + h.shutdown(ctx, h.clock.Time()) return } - detachedCtx := utils.Detach(ctx) + detachedCtx := context.WithoutCancel(ctx) dispatchSync := func() { h.dispatchSync(detachedCtx) } @@ -249,28 +264,11 @@ func (h *handler) Start(ctx context.Context, recoverPanic bool) { } } -func (h *handler) HealthCheck(ctx context.Context) (interface{}, error) { - h.ctx.Lock.Lock() - defer h.ctx.Lock.Unlock() - - state := h.ctx.State.Get() - engine, ok := h.engineManager.Get(state.Type).Get(state.State) - if !ok { - return nil, fmt.Errorf( - "%w %s running %s", - errMissingEngine, - state.State, - state.Type, - ) - } - return engine.HealthCheck(ctx) -} - // Push the message onto the handler's queue func (h *handler) Push(ctx context.Context, msg Message) { switch msg.Op() { - case message.AppRequestOp, message.AppRequestFailedOp, message.AppResponseOp, message.AppGossipOp, - message.CrossChainAppRequestOp, message.CrossChainAppRequestFailedOp, message.CrossChainAppResponseOp: + case message.AppRequestOp, message.AppErrorOp, message.AppResponseOp, message.AppGossipOp, + message.CrossChainAppRequestOp, message.CrossChainAppErrorOp, message.CrossChainAppResponseOp: h.asyncMessageQueue.Push(ctx, msg) default: h.syncMessageQueue.Push(ctx, msg) @@ -302,8 +300,12 @@ func (h *handler) RegisterTimeout(d time.Duration) { } // Note: It is possible for Stop to be called before/concurrently with Start. +// +// Invariant: Stop must never block. func (h *handler) Stop(ctx context.Context) { h.closeOnce.Do(func() { + h.startClosingTime = h.clock.Time() + // Must hold the locks here to ensure there's no race condition in where // we check the value of [h.closing] after the call to [Signal]. h.syncMessageQueue.Shutdown() @@ -322,7 +324,7 @@ func (h *handler) Stop(ctx context.Context) { state := h.ctx.State.Get() bootstrapper, ok := h.engineManager.Get(state.Type).Get(snow.Bootstrapping) if !ok { - h.ctx.Log.Error("bootstrapping engine doesn't exists", + h.ctx.Log.Error("bootstrapping engine doesn't exist", zap.Stringer("type", state.Type), ) return @@ -339,8 +341,19 @@ func (h *handler) StopWithError(ctx context.Context, err error) { h.Stop(ctx) } -func (h *handler) Stopped() chan struct{} { - return h.closed +func (h *handler) AwaitStopped(ctx context.Context) (time.Duration, error) { + select { + case <-h.closed: + return h.totalClosingTime, nil + default: + } + + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-h.closed: + return h.totalClosingTime, nil + } } func (h *handler) dispatchSync(ctx context.Context) { @@ -369,7 +382,9 @@ func (h *handler) dispatchSync(ctx context.Context) { func (h *handler) dispatchAsync(ctx context.Context) { defer func() { - h.asyncMessagePool.Shutdown() + // We never return an error in any of our functions, so it is safe to + // drop any error here. + _ = h.asyncMessagePool.Wait() h.closeDispatcher(ctx) }() @@ -412,7 +427,7 @@ func (h *handler) dispatchChans(ctx context.Context) { if err := h.handleChanMsg(msg); err != nil { h.StopWithError(ctx, fmt.Errorf( - "%w while processing async message: %s", + "%w while processing chan message: %s", err, msg, )) @@ -432,15 +447,18 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { // execution (may change during execution) isNormalOp = h.ctx.State.Get().State == snow.NormalOp ) - h.ctx.Log.Debug("forwarding sync message to consensus", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - ) - h.ctx.Log.Verbo("forwarding sync message to consensus", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Any("message", body), - ) + if h.ctx.Log.Enabled(logging.Verbo) { + h.ctx.Log.Verbo("forwarding sync message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.Stringer("message", body), + ) + } else { + h.ctx.Log.Debug("forwarding sync message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + ) + } h.resourceTracker.StartProcessing(nodeID, startTime) h.ctx.Lock.Lock() lockAcquiredTime := h.clock.Time() @@ -450,12 +468,12 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { var ( endTime = h.clock.Time() messageHistograms = h.metrics.messages[op] - msgHandlingTime = lockAcquiredTime.Sub(startTime) processingTime = endTime.Sub(startTime) + msgHandlingTime = endTime.Sub(lockAcquiredTime) ) h.resourceTracker.StopProcessing(nodeID, endTime) - messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) messageHistograms.processingTime.Observe(float64(processingTime)) + messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling sync message", zap.Stringer("messageOp", op), @@ -466,7 +484,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() @@ -535,23 +553,11 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.GetStateSummaryFrontierFailed(ctx, nodeID, msg.RequestID) case *p2p.GetAcceptedStateSummary: - // TODO: Enforce that the numbers are sorted to make this verification - // more efficient. - if !utils.IsUnique(msg.Heights) { - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), - zap.Uint32("requestID", msg.RequestId), - zap.String("field", "Heights"), - ) - return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestId) - } - return engine.GetAcceptedStateSummary( ctx, nodeID, msg.RequestId, - msg.Heights, + set.Of(msg.Heights...), ) case *p2p.AcceptedStateSummary: @@ -578,19 +584,19 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.GetAcceptedFrontier(ctx, nodeID, msg.RequestId) case *p2p.AcceptedFrontier: - containerIDs, err := getIDs(msg.ContainerIds) + containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Uint32("requestID", msg.RequestId), - zap.String("field", "ContainerIDs"), + zap.String("field", "ContainerID"), zap.Error(err), ) return engine.GetAcceptedFrontierFailed(ctx, nodeID, msg.RequestId) } - return engine.AcceptedFrontier(ctx, nodeID, msg.RequestId, containerIDs) + return engine.AcceptedFrontier(ctx, nodeID, msg.RequestId, containerID) case *message.GetAcceptedFrontierFailed: return engine.GetAcceptedFrontierFailed(ctx, nodeID, msg.RequestID) @@ -671,7 +677,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.Put(ctx, nodeID, msg.RequestId, msg.Container) case *p2p.PushQuery: - return engine.PushQuery(ctx, nodeID, msg.RequestId, msg.Container) + return engine.PushQuery(ctx, nodeID, msg.RequestId, msg.Container, msg.RequestedHeight) case *p2p.PullQuery: containerID, err := ids.ToID(msg.ContainerId) @@ -686,46 +692,68 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return nil } - return engine.PullQuery(ctx, nodeID, msg.RequestId, containerID) + return engine.PullQuery(ctx, nodeID, msg.RequestId, containerID, msg.RequestedHeight) case *p2p.Chits: - votes, err := getIDs(msg.PreferredContainerIds) + preferredID, err := ids.ToID(msg.PreferredId) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.ChitsOp), zap.Uint32("requestID", msg.RequestId), - zap.String("field", "PreferredContainerIDs"), + zap.String("field", "PreferredID"), zap.Error(err), ) return engine.QueryFailed(ctx, nodeID, msg.RequestId) } - accepted, err := getIDs(msg.AcceptedContainerIds) + preferredIDAtHeight, err := ids.ToID(msg.PreferredIdAtHeight) + if err != nil { + h.ctx.Log.Debug("message with invalid field", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", message.ChitsOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "PreferredIDAtHeight"), + zap.Error(err), + ) + // TODO: Require this field to be populated correctly after v1.11.x + // is activated. + preferredIDAtHeight = preferredID + } + + acceptedID, err := ids.ToID(msg.AcceptedId) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.ChitsOp), zap.Uint32("requestID", msg.RequestId), - zap.String("field", "AcceptedContainerIDs"), + zap.String("field", "AcceptedID"), zap.Error(err), ) return engine.QueryFailed(ctx, nodeID, msg.RequestId) } - return engine.Chits(ctx, nodeID, msg.RequestId, votes, accepted) + return engine.Chits(ctx, nodeID, msg.RequestId, preferredID, preferredIDAtHeight, acceptedID) case *message.QueryFailed: return engine.QueryFailed(ctx, nodeID, msg.RequestID) // Connection messages can be sent to the currently executing engine case *message.Connected: + err := h.peerTracker.Connected(ctx, nodeID, msg.NodeVersion) + if err != nil { + return err + } return engine.Connected(ctx, nodeID, msg.NodeVersion) case *message.ConnectedSubnet: return h.subnetConnector.ConnectedSubnet(ctx, nodeID, msg.SubnetID) case *message.Disconnected: + err := h.peerTracker.Disconnected(ctx, nodeID) + if err != nil { + return err + } return engine.Disconnected(ctx, nodeID) default: @@ -737,7 +765,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { } func (h *handler) handleAsyncMsg(ctx context.Context, msg Message) { - h.asyncMessagePool.Send(func() { + h.asyncMessagePool.Go(func() error { if err := h.executeAsyncMsg(ctx, msg); err != nil { h.StopWithError(ctx, fmt.Errorf( "%w while processing async message: %s", @@ -745,6 +773,7 @@ func (h *handler) handleAsyncMsg(ctx context.Context, msg Message) { msg, )) } + return nil }) } @@ -756,15 +785,18 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { body = msg.Message() startTime = h.clock.Time() ) - h.ctx.Log.Debug("forwarding async message to consensus", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - ) - h.ctx.Log.Verbo("forwarding async message to consensus", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Any("message", body), - ) + if h.ctx.Log.Enabled(logging.Verbo) { + h.ctx.Log.Verbo("forwarding async message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.Stringer("message", body), + ) + } else { + h.ctx.Log.Debug("forwarding async message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + ) + } h.resourceTracker.StartProcessing(nodeID, startTime) defer func() { var ( @@ -806,8 +838,18 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { case *p2p.AppResponse: return engine.AppResponse(ctx, nodeID, m.RequestId, m.AppBytes) - case *message.AppRequestFailed: - return engine.AppRequestFailed(ctx, nodeID, m.RequestID) + case *p2p.AppError: + err := &common.AppError{ + Code: m.ErrorCode, + Message: m.ErrorMessage, + } + + return engine.AppRequestFailed( + ctx, + nodeID, + m.RequestId, + err, + ) case *p2p.AppGossip: return engine.AppGossip(ctx, nodeID, m.AppBytes) @@ -830,10 +872,16 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { ) case *message.CrossChainAppRequestFailed: + err := &common.AppError{ + Code: m.ErrorCode, + Message: m.ErrorMessage, + } + return engine.CrossChainAppRequestFailed( ctx, m.SourceChainID, m.RequestID, + err, ) default: @@ -854,13 +902,16 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { // execution (may change during execution) isNormalOp = h.ctx.State.Get().State == snow.NormalOp ) - h.ctx.Log.Debug("forwarding chan message to consensus", - zap.Stringer("messageOp", op), - ) - h.ctx.Log.Verbo("forwarding chan message to consensus", - zap.Stringer("messageOp", op), - zap.Any("message", body), - ) + if h.ctx.Log.Enabled(logging.Verbo) { + h.ctx.Log.Verbo("forwarding chan message to consensus", + zap.Stringer("messageOp", op), + zap.Stringer("message", body), + ) + } else { + h.ctx.Log.Debug("forwarding chan message to consensus", + zap.Stringer("messageOp", op), + ) + } h.ctx.Lock.Lock() lockAcquiredTime := h.clock.Time() defer func() { @@ -869,11 +920,11 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { var ( endTime = h.clock.Time() messageHistograms = h.metrics.messages[op] - msgHandlingTime = lockAcquiredTime.Sub(startTime) processingTime = endTime.Sub(startTime) + msgHandlingTime = endTime.Sub(lockAcquiredTime) ) - messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) messageHistograms.processingTime.Observe(float64(processingTime)) + messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling chan message", zap.Stringer("messageOp", op), @@ -883,7 +934,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { zap.Duration("processingTime", processingTime), zap.Duration("msgHandlingTime", msgHandlingTime), zap.Stringer("messageOp", op), - zap.Any("message", body), + zap.Stringer("message", body), ) } }() @@ -904,19 +955,6 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { return engine.Notify(context.TODO(), common.Message(msg.Notification)) case *message.GossipRequest: - // TODO: After Cortina is activated, this can be removed as everyone - // will have accepted the StopVertex. - if state.Type == p2p.EngineType_ENGINE_TYPE_SNOWMAN { - avalancheEngine, ok := h.engineManager.Get(p2p.EngineType_ENGINE_TYPE_AVALANCHE).Get(state.State) - if ok { - // This chain was linearized, so we should gossip the Avalanche - // accepted frontier to make sure everyone eventually linearizes - // the chain. - if err := avalancheEngine.Gossip(context.TODO()); err != nil { - return err - } - } - } return engine.Gossip(context.TODO()) case *message.Timeout: @@ -962,24 +1000,24 @@ func (h *handler) popUnexpiredMsg( } } +// Invariant: if closeDispatcher is called, Stop has already been called. func (h *handler) closeDispatcher(ctx context.Context) { - h.ctx.Lock.Lock() - defer h.ctx.Lock.Unlock() - - h.numDispatchersClosed++ - if h.numDispatchersClosed < numDispatchersToClose { + if h.numDispatchersClosed.Add(1) < numDispatchersToClose { return } - h.shutdown(ctx) + h.shutdown(ctx, h.startClosingTime) } -// Note: shutdown is only called after all message dispatchers have exited. -func (h *handler) shutdown(ctx context.Context) { +// Note: shutdown is only called after all message dispatchers have exited or if +// no message dispatchers ever started. +func (h *handler) shutdown(ctx context.Context, startClosingTime time.Time) { defer func() { if h.onStopped != nil { go h.onStopped() } + + h.totalClosingTime = h.clock.Time().Sub(startClosingTime) close(h.closed) }() diff --git a/avalanchego/snow/networking/handler/handler_test.go b/avalanchego/snow/networking/handler/handler_test.go index b9e757b7..dbf378c0 100644 --- a/avalanchego/snow/networking/handler/handler_test.go +++ b/avalanchego/snow/networking/handler/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -10,11 +10,9 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -22,10 +20,14 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" + + commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const testThreadPoolSize = 2 @@ -33,14 +35,16 @@ const testThreadPoolSize = 2 var errFatal = errors.New("error should cause handler to close") func TestHandlerDropsTimedOutMessages(t *testing.T) { + require := require.New(t) + called := make(chan struct{}) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - vdrs := validators.NewSet() + vdrs := validators.NewManager() vdr0 := ids.GenerateTestNodeID() - err := vdrs.Add(vdr0, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -48,7 +52,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -58,14 +62,12 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -74,11 +76,11 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - t.Fatalf("GetAcceptedFrontier message should have timed out") + bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error { + require.FailNow("GetAcceptedFrontier message should have timed out") return nil } - bootstrapper.GetAcceptedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + bootstrapper.GetAcceptedF = func(context.Context, ids.NodeID, uint32, set.Set[ids.ID]) error { called <- struct{}{} return nil } @@ -124,18 +126,20 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { defer ticker.Stop() select { case <-ticker.C: - t.Fatalf("Calling engine function timed out") + require.FailNow("Calling engine function timed out") case <-called: } } func TestHandlerClosesOnError(t *testing.T) { + require := require.New(t) + closed := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -143,7 +147,7 @@ func TestHandlerClosesOnError(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -153,8 +157,9 @@ func TestHandlerClosesOnError(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) handler.clock.Set(time.Now()) @@ -163,9 +168,6 @@ func TestHandlerClosesOnError(t *testing.T) { }) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -174,7 +176,7 @@ func TestHandlerClosesOnError(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetAcceptedFrontierF = func(context.Context, ids.NodeID, uint32) error { return errFatal } @@ -216,17 +218,19 @@ func TestHandlerClosesOnError(t *testing.T) { ticker := time.NewTicker(time.Second) select { case <-ticker.C: - t.Fatalf("Handler shutdown timed out before calling toClose") + require.FailNow("Handler shutdown timed out before calling toClose") case <-closed: } } func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { + require := require.New(t) + closed := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -234,7 +238,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -244,16 +248,14 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) handler.clock.Set(time.Now()) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -262,7 +264,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetFailedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetFailedF = func(context.Context, ids.NodeID, uint32) error { closed <- struct{}{} return nil } @@ -294,19 +296,20 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { ticker := time.NewTicker(time.Second) select { case <-ticker.C: - t.Fatalf("Handler shutdown timed out before calling toClose") + require.FailNow("Handler shutdown timed out before calling toClose") case <-closed: } } // Test that messages from the VM are handled func TestHandlerDispatchInternal(t *testing.T) { - calledNotify := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) msgFromVMChan := make(chan common.Message) - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -314,7 +317,7 @@ func TestHandlerDispatchInternal(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handler, err := New( ctx, vdrs, @@ -324,13 +327,11 @@ func TestHandlerDispatchInternal(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -342,8 +343,10 @@ func TestHandlerDispatchInternal(t *testing.T) { engine.ContextF = func() *snow.ConsensusContext { return ctx } + + wg := &sync.WaitGroup{} engine.NotifyF = func(context.Context, common.Message) error { - calledNotify <- struct{}{} + wg.Done() return nil } @@ -363,21 +366,19 @@ func TestHandlerDispatchInternal(t *testing.T) { return nil } + wg.Add(1) handler.Start(context.Background(), false) msgFromVMChan <- 0 - - select { - case <-time.After(20 * time.Millisecond): - t.Fatalf("should have called notify") - case <-calledNotify: - } + wg.Wait() } func TestHandlerSubnetConnector(t *testing.T) { - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -385,14 +386,13 @@ func TestHandlerSubnetConnector(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) + require.NoError(err) ctrl := gomock.NewController(t) - defer ctrl.Finish() connector := validators.NewMockSubnetConnector(ctrl) nodeID := ids.GenerateTestNodeID() subnetID := ids.GenerateTestID() - require.NoError(t, err) handler, err := New( ctx, vdrs, @@ -402,13 +402,11 @@ func TestHandlerSubnetConnector(t *testing.T) { resourceTracker, connector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -550,11 +548,13 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + require := require.New(t) + messageReceived := make(chan struct{}) - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -562,7 +562,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handler, err := New( ctx, vdrs, @@ -572,13 +572,11 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ids.EmptyNodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -590,7 +588,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { engine.ContextF = func() *snow.ConsensusContext { return ctx } - engine.ChitsF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { + engine.ChitsF = func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) error { close(messageReceived) return nil } @@ -607,12 +605,13 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { } handler.Start(context.Background(), false) - handler.Push(context.TODO(), Message{ + handler.Push(context.Background(), Message{ InboundMessage: message.InboundChits( ids.Empty, uint32(0), - nil, - nil, + ids.Empty, + ids.Empty, + ids.Empty, ids.EmptyNodeID, ), EngineType: test.requestedEngineType, @@ -622,3 +621,42 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }) } } + +func TestHandlerStartError(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) + + handler, err := New( + ctx, + validators.NewManager(), + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + nil, + subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), + ) + require.NoError(err) + + // Starting a handler with an unprovided engine should immediately cause the + // handler to shutdown. + handler.SetEngineManager(&EngineManager{}) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Initializing, + }) + handler.Start(context.Background(), false) + + _, err = handler.AwaitStopped(context.Background()) + require.NoError(err) +} diff --git a/avalanchego/snow/networking/handler/health.go b/avalanchego/snow/networking/handler/health.go new file mode 100644 index 00000000..3f4af429 --- /dev/null +++ b/avalanchego/snow/networking/handler/health.go @@ -0,0 +1,58 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "context" + "errors" + "fmt" +) + +var ErrNotConnectedEnoughStake = errors.New("not connected to enough stake") + +func (h *handler) HealthCheck(ctx context.Context) (interface{}, error) { + state := h.ctx.State.Get() + engine, ok := h.engineManager.Get(state.Type).Get(state.State) + if !ok { + return nil, fmt.Errorf( + "%w %s running %s", + errMissingEngine, + state.State, + state.Type, + ) + } + engineIntf, engineErr := engine.HealthCheck(ctx) + networkingIntf, networkingErr := h.networkHealthCheck() + intf := map[string]interface{}{ + "engine": engineIntf, + "networking": networkingIntf, + } + if engineErr == nil { + return intf, networkingErr + } + if networkingErr == nil { + return intf, engineErr + } + return intf, fmt.Errorf("engine: %w; networking: %w", engineErr, networkingErr) +} + +func (h *handler) networkHealthCheck() (interface{}, error) { + percentConnected := h.peerTracker.ConnectedPercent() + details := map[string]float64{ + "percentConnected": percentConnected, + } + + var err error + subnetConfig := h.subnet.Config() + minPercentConnected := subnetConfig.ConsensusParameters.MinPercentConnectedHealthy() + if percentConnected < minPercentConnected { + err = fmt.Errorf("%w: connected to %f%%; required at least %f%%", + ErrNotConnectedEnoughStake, + percentConnected*100, + minPercentConnected*100, + ) + } + + return details, err +} diff --git a/avalanchego/snow/networking/handler/health_test.go b/avalanchego/snow/networking/handler/health_test.go new file mode 100644 index 00000000..adeb3430 --- /dev/null +++ b/avalanchego/snow/networking/handler/health_test.go @@ -0,0 +1,150 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "context" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/math/meter" + "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" + + commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" +) + +func TestHealthCheckSubnet(t *testing.T) { + tests := map[string]struct { + consensusParams snowball.Parameters + }{ + "default consensus params": { + consensusParams: snowball.DefaultParameters, + }, + "custom consensus params": { + func() snowball.Parameters { + params := snowball.DefaultParameters + params.K = params.AlphaConfidence + return params + }(), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + + vdrs := validators.NewManager() + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) + + peerTracker := commontracker.NewPeers() + vdrs.RegisterCallbackListener(ctx.SubnetID, peerTracker) + + sb := subnets.New( + ctx.NodeID, + subnets.Config{ + ConsensusParameters: test.consensusParams, + }, + ) + handlerIntf, err := New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + sb, + peerTracker, + ) + require.NoError(err) + + bootstrapper := &common.BootstrapperTest{ + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + + engine := &common.EngineTest{T: t} + engine.Default(false) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + + handlerIntf.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrap is done + }) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + handlerIntf.Start(context.Background(), false) + + testVdrCount := 4 + vdrIDs := set.NewSet[ids.NodeID](testVdrCount) + for i := 0; i < testVdrCount; i++ { + vdrID := ids.GenerateTestNodeID() + vdrIDs.Add(vdrID) + + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdrID, nil, ids.Empty, 100)) + } + + for index, nodeID := range vdrIDs.List() { + require.NoError(peerTracker.Connected(context.Background(), nodeID, nil)) + + details, err := handlerIntf.HealthCheck(context.Background()) + expectedPercentConnected := float64(index+1) / float64(testVdrCount) + conf := sb.Config() + minPercentConnected := conf.ConsensusParameters.MinPercentConnectedHealthy() + if expectedPercentConnected >= minPercentConnected { + require.NoError(err) + continue + } + require.ErrorIs(err, ErrNotConnectedEnoughStake) + + detailsMap, ok := details.(map[string]interface{}) + require.True(ok) + networkingMap, ok := detailsMap["networking"] + require.True(ok) + networkingDetails, ok := networkingMap.(map[string]float64) + require.True(ok) + percentConnected, ok := networkingDetails["percentConnected"] + require.True(ok) + require.Equal(expectedPercentConnected, percentConnected) + } + }) + } +} diff --git a/avalanchego/snow/networking/handler/message_queue.go b/avalanchego/snow/networking/handler/message_queue.go index 1dfeea5e..58e4f2b3 100644 --- a/avalanchego/snow/networking/handler/message_queue.go +++ b/avalanchego/snow/networking/handler/message_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -7,16 +7,15 @@ import ( "context" "sync" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -60,9 +59,9 @@ type messageQueue struct { clock mockable.Clock metrics messageQueueMetrics - log logging.Logger + ctx *snow.ConsensusContext // Validator set for the chain associated with this - vdrs validators.Set + vdrs validators.Manager // Tracks CPU utilization of each node cpuTracker tracker.Tracker @@ -71,25 +70,25 @@ type messageQueue struct { // Node ID --> Messages this node has in [msgs] nodeToUnprocessedMsgs map[ids.NodeID]int // Unprocessed messages - msgAndCtxs []*msgAndContext + msgAndCtxs buffer.Deque[*msgAndContext] } func NewMessageQueue( - log logging.Logger, - vdrs validators.Set, + ctx *snow.ConsensusContext, + vdrs validators.Manager, cpuTracker tracker.Tracker, metricsNamespace string, - metricsRegisterer prometheus.Registerer, ops []message.Op, ) (MessageQueue, error) { m := &messageQueue{ - log: log, + ctx: ctx, vdrs: vdrs, cpuTracker: cpuTracker, cond: sync.NewCond(&sync.Mutex{}), nodeToUnprocessedMsgs: make(map[ids.NodeID]int), + msgAndCtxs: buffer.NewUnboundedDeque[*msgAndContext](1 /*=initSize*/), } - return m, m.metrics.initialize(metricsNamespace, metricsRegisterer, ops) + return m, m.metrics.initialize(metricsNamespace, ctx.Registerer, ops) } func (m *messageQueue) Push(ctx context.Context, msg Message) { @@ -102,7 +101,7 @@ func (m *messageQueue) Push(ctx context.Context, msg Message) { } // Add the message to the queue - m.msgAndCtxs = append(m.msgAndCtxs, &msgAndContext{ + m.msgAndCtxs.PushRight(&msgAndContext{ msg: msg, ctx: ctx, }) @@ -127,36 +126,30 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { if m.closed { return nil, Message{}, false } - if len(m.msgAndCtxs) != 0 { + if m.msgAndCtxs.Len() != 0 { break } m.cond.Wait() } - n := len(m.msgAndCtxs) + n := m.msgAndCtxs.Len() // note that n > 0 i := 0 for { if i == n { - m.log.Debug("canPop is false for all unprocessed messages", + m.ctx.Log.Debug("canPop is false for all unprocessed messages", zap.Int("numMessages", n), ) } var ( - msgAndCtx = m.msgAndCtxs[0] - msg = msgAndCtx.msg - ctx = msgAndCtx.ctx - nodeID = msg.NodeID() + msgAndCtx, _ = m.msgAndCtxs.PopLeft() + msg = msgAndCtx.msg + ctx = msgAndCtx.ctx + nodeID = msg.NodeID() ) - m.msgAndCtxs[0] = nil // See if it's OK to process [msg] next if m.canPop(msg) || i == n { // i should never == n but handle anyway as a fail-safe - if cap(m.msgAndCtxs) == 1 { - m.msgAndCtxs = nil // Give back memory if possible - } else { - m.msgAndCtxs = m.msgAndCtxs[1:] - } m.nodeToUnprocessedMsgs[nodeID]-- if m.nodeToUnprocessedMsgs[nodeID] == 0 { delete(m.nodeToUnprocessedMsgs, nodeID) @@ -168,8 +161,7 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { } // [msg.nodeID] is causing excessive CPU usage. // Push [msg] to back of [m.msgs] and handle it later. - m.msgAndCtxs = append(m.msgAndCtxs, msgAndCtx) - m.msgAndCtxs = m.msgAndCtxs[1:] + m.msgAndCtxs.PushRight(msgAndCtx) i++ m.metrics.numExcessiveCPU.Inc() } @@ -179,7 +171,7 @@ func (m *messageQueue) Len() int { m.cond.L.Lock() defer m.cond.L.Unlock() - return len(m.msgAndCtxs) + return m.msgAndCtxs.Len() } func (m *messageQueue) Shutdown() { @@ -187,10 +179,10 @@ func (m *messageQueue) Shutdown() { defer m.cond.L.Unlock() // Remove all the current messages from the queue - for _, msg := range m.msgAndCtxs { - msg.msg.OnFinishedHandling() + for m.msgAndCtxs.Len() > 0 { + msgAndCtx, _ := m.msgAndCtxs.PopLeft() + msgAndCtx.msg.OnFinishedHandling() } - m.msgAndCtxs = nil m.nodeToUnprocessedMsgs = nil // Update metrics @@ -218,14 +210,26 @@ func (m *messageQueue) canPop(msg message.InboundMessage) bool { // the number of nodes with unprocessed messages. baseMaxCPU := 1 / float64(len(m.nodeToUnprocessedMsgs)) nodeID := msg.NodeID() - weight := m.vdrs.GetWeight(nodeID) - // The sum of validator weights should never be 0, but handle - // that case for completeness here to avoid divide by 0. - portionWeight := float64(0) - totalVdrsWeight := m.vdrs.Weight() - if totalVdrsWeight != 0 { + weight := m.vdrs.GetWeight(m.ctx.SubnetID, nodeID) + + var portionWeight float64 + if totalVdrsWeight, err := m.vdrs.TotalWeight(m.ctx.SubnetID); err != nil { + // The sum of validator weights should never overflow, but if they do, + // we treat portionWeight as 0. + m.ctx.Log.Error("failed to get total weight of validators", + zap.Stringer("subnetID", m.ctx.SubnetID), + zap.Error(err), + ) + } else if totalVdrsWeight == 0 { + // The sum of validator weights should never be 0, but handle that case + // for completeness here to avoid divide by 0. + m.ctx.Log.Warn("validator set is empty", + zap.Stringer("subnetID", m.ctx.SubnetID), + ) + } else { portionWeight = float64(weight) / float64(totalVdrsWeight) } + // Validators are allowed to use more CPU. More weight --> more CPU use allowed. recentCPUUsage := m.cpuTracker.Usage(nodeID, m.clock.Time()) maxCPU := baseMaxCPU + (1.0-baseMaxCPU)*portionWeight diff --git a/avalanchego/snow/networking/handler/message_queue_metrics.go b/avalanchego/snow/networking/handler/message_queue_metrics.go index e165d045..74cf4d23 100644 --- a/avalanchego/snow/networking/handler/message_queue_metrics.go +++ b/avalanchego/snow/networking/handler/message_queue_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -24,7 +25,7 @@ func (m *messageQueueMetrics) initialize( metricsRegisterer prometheus.Registerer, ops []message.Op, ) error { - namespace := fmt.Sprintf("%s_%s", metricsNamespace, "unprocessed_msgs") + namespace := metric.AppendNamespace(metricsNamespace, "unprocessed_msgs") m.len = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "len", @@ -48,8 +49,8 @@ func (m *messageQueueMetrics) initialize( opStr := op.String() opMetric := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_count", opStr), - Help: fmt.Sprintf("Number of of %s messages in the message queue.", opStr), + Name: opStr + "_count", + Help: fmt.Sprintf("Number of %s messages in the message queue.", opStr), }) m.ops[op] = opMetric errs.Add(metricsRegisterer.Register(opMetric)) diff --git a/avalanchego/snow/networking/handler/message_queue_test.go b/avalanchego/snow/networking/handler/message_queue_test.go index 8e1bf550..69fbaf53 100644 --- a/avalanchego/snow/networking/handler/message_queue_test.go +++ b/avalanchego/snow/networking/handler/message_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -8,33 +8,30 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/logging" ) const engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN func TestQueue(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) cpuTracker := tracker.NewMockTracker(ctrl) - vdrs := validators.NewSet() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) - mIntf, err := NewMessageQueue(logging.NoLog{}, vdrs, cpuTracker, "", prometheus.NewRegistry(), message.SynchronousOps) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr2ID, nil, ids.Empty, 1)) + mIntf, err := NewMessageQueue(ctx, vdrs, cpuTracker, "", message.SynchronousOps) require.NoError(err) u := mIntf.(*messageQueue) currentTime := time.Now() @@ -46,6 +43,7 @@ func TestQueue(t *testing.T) { 0, time.Second, ids.GenerateTestID(), + 0, vdr1ID, engineType, ), @@ -56,48 +54,48 @@ func TestQueue(t *testing.T) { // messages on [u.msgs] cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.1).Times(1) u.Push(context.Background(), msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.Equal(1, u.Len()) _, gotMsg1, ok := u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) + require.Empty(u.nodeToUnprocessedMsgs) + require.Zero(u.Len()) + require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) u.Push(context.Background(), msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) + require.Empty(u.nodeToUnprocessedMsgs) + require.Zero(u.Len()) + require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(1.0).Times(1) u.Push(context.Background(), msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) + require.Empty(u.nodeToUnprocessedMsgs) + require.Zero(u.Len()) + require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) u.Push(context.Background(), msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) + require.Empty(u.nodeToUnprocessedMsgs) + require.Zero(u.Len()) + require.Equal(msg1, gotMsg1) // Push msg1 from vdr1ID u.Push(context.Background(), msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.Equal(1, u.Len()) msg2 := Message{ InboundMessage: message.InboundPullQuery( @@ -105,6 +103,7 @@ func TestQueue(t *testing.T) { 0, time.Second, ids.GenerateTestID(), + 0, vdr2ID, engineType, ), @@ -113,37 +112,37 @@ func TestQueue(t *testing.T) { // Push msg2 from vdr2ID u.Push(context.Background(), msg2) - require.EqualValues(2, u.Len()) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr2ID]) + require.Equal(2, u.Len()) + require.Equal(1, u.nodeToUnprocessedMsgs[vdr2ID]) // Set vdr1's usage to 99% and vdr2's to .01 cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(.99).Times(2) cpuTracker.EXPECT().Usage(vdr2ID, gomock.Any()).Return(.01).Times(1) // Pop should return msg2 first because vdr1 has exceeded it's portion of CPU time _, gotMsg2, ok := u.Pop() require.True(ok) - require.EqualValues(1, u.Len()) - require.EqualValues(msg2, gotMsg2) + require.Equal(1, u.Len()) + require.Equal(msg2, gotMsg2) _, gotMsg1, ok = u.Pop() require.True(ok) - require.EqualValues(msg1, gotMsg1) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) + require.Equal(msg1, gotMsg1) + require.Empty(u.nodeToUnprocessedMsgs) + require.Zero(u.Len()) // u is now empty // Non-validators should be able to put messages onto [u] nonVdrNodeID1, nonVdrNodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() msg3 := Message{ - InboundMessage: message.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, nonVdrNodeID1, engineType), + InboundMessage: message.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, 0, nonVdrNodeID1, engineType), EngineType: engineType, } msg4 := Message{ - InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, nonVdrNodeID2, engineType), + InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, 0, nonVdrNodeID2, engineType), EngineType: engineType, } u.Push(context.Background(), msg3) u.Push(context.Background(), msg4) u.Push(context.Background(), msg1) - require.EqualValues(3, u.Len()) + require.Equal(3, u.Len()) // msg1 should get popped first because nonVdrNodeID1 and nonVdrNodeID2 // exceeded their limit @@ -154,15 +153,15 @@ func TestQueue(t *testing.T) { // u.msgs is [msg3, msg4, msg1] _, gotMsg1, ok = u.Pop() require.True(ok) - require.EqualValues(msg1, gotMsg1) + require.Equal(msg1, gotMsg1) // u.msgs is [msg3, msg4] cpuTracker.EXPECT().Usage(nonVdrNodeID1, gomock.Any()).Return(.51).Times(2) _, gotMsg4, ok := u.Pop() require.True(ok) - require.EqualValues(msg4, gotMsg4) + require.Equal(msg4, gotMsg4) // u.msgs is [msg3] _, gotMsg3, ok := u.Pop() require.True(ok) - require.EqualValues(msg3, gotMsg3) - require.EqualValues(0, u.Len()) + require.Equal(msg3, gotMsg3) + require.Zero(u.Len()) } diff --git a/avalanchego/snow/networking/handler/metrics.go b/avalanchego/snow/networking/handler/metrics.go index a8776b30..efb6cf55 100644 --- a/avalanchego/snow/networking/handler/metrics.go +++ b/avalanchego/snow/networking/handler/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -49,13 +49,13 @@ func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { processingTime: metric.NewAveragerWithErrs( namespace, opStr, - fmt.Sprintf("time (in ns) spent handling a %s", opStr), + "time (in ns) spent handling a "+opStr, reg, &errs, ), msgHandlingTime: metric.NewAveragerWithErrs( namespace, - fmt.Sprintf("%s_msg_handling", opStr), + opStr+"_msg_handling", fmt.Sprintf("time (in ns) spent handling a %s after grabbing the lock", opStr), reg, &errs, diff --git a/avalanchego/snow/networking/handler/mock_handler.go b/avalanchego/snow/networking/handler/mock_handler.go index a94fd997..517fbcd8 100644 --- a/avalanchego/snow/networking/handler/mock_handler.go +++ b/avalanchego/snow/networking/handler/mock_handler.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/handler (interfaces: Handler) +// +// Generated by this command: +// +// mockgen -package=handler -destination=snow/networking/handler/mock_handler.go github.com/ava-labs/avalanchego/snow/networking/handler Handler +// // Package handler is a generated GoMock package. package handler @@ -14,7 +16,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockHandler is a mock of Handler interface. @@ -40,6 +42,21 @@ func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { return m.recorder } +// AwaitStopped mocks base method. +func (m *MockHandler) AwaitStopped(arg0 context.Context) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AwaitStopped", arg0) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AwaitStopped indicates an expected call of AwaitStopped. +func (mr *MockHandlerMockRecorder) AwaitStopped(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AwaitStopped", reflect.TypeOf((*MockHandler)(nil).AwaitStopped), arg0) +} + // Context mocks base method. func (m *MockHandler) Context() *snow.ConsensusContext { m.ctrl.T.Helper() @@ -69,16 +86,16 @@ func (mr *MockHandlerMockRecorder) GetEngineManager() *gomock.Call { } // HealthCheck mocks base method. -func (m *MockHandler) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockHandler) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockHandlerMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockHandler)(nil).HealthCheck), arg0) } @@ -104,7 +121,7 @@ func (m *MockHandler) Push(arg0 context.Context, arg1 Message) { } // Push indicates an expected call of Push. -func (mr *MockHandlerMockRecorder) Push(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Push(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockHandler)(nil).Push), arg0, arg1) } @@ -116,7 +133,7 @@ func (m *MockHandler) RegisterTimeout(arg0 time.Duration) { } // RegisterTimeout indicates an expected call of RegisterTimeout. -func (mr *MockHandlerMockRecorder) RegisterTimeout(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) RegisterTimeout(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterTimeout", reflect.TypeOf((*MockHandler)(nil).RegisterTimeout), arg0) } @@ -128,7 +145,7 @@ func (m *MockHandler) SetEngineManager(arg0 *EngineManager) { } // SetEngineManager indicates an expected call of SetEngineManager. -func (mr *MockHandlerMockRecorder) SetEngineManager(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) SetEngineManager(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEngineManager", reflect.TypeOf((*MockHandler)(nil).SetEngineManager), arg0) } @@ -140,7 +157,7 @@ func (m *MockHandler) SetOnStopped(arg0 func()) { } // SetOnStopped indicates an expected call of SetOnStopped. -func (mr *MockHandlerMockRecorder) SetOnStopped(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) SetOnStopped(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOnStopped", reflect.TypeOf((*MockHandler)(nil).SetOnStopped), arg0) } @@ -154,7 +171,7 @@ func (m *MockHandler) ShouldHandle(arg0 ids.NodeID) bool { } // ShouldHandle indicates an expected call of ShouldHandle. -func (mr *MockHandlerMockRecorder) ShouldHandle(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) ShouldHandle(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldHandle", reflect.TypeOf((*MockHandler)(nil).ShouldHandle), arg0) } @@ -166,7 +183,7 @@ func (m *MockHandler) Start(arg0 context.Context, arg1 bool) { } // Start indicates an expected call of Start. -func (mr *MockHandlerMockRecorder) Start(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Start(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockHandler)(nil).Start), arg0, arg1) } @@ -178,7 +195,7 @@ func (m *MockHandler) Stop(arg0 context.Context) { } // Stop indicates an expected call of Stop. -func (mr *MockHandlerMockRecorder) Stop(arg0 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) Stop(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockHandler)(nil).Stop), arg0) } @@ -190,21 +207,7 @@ func (m *MockHandler) StopWithError(arg0 context.Context, arg1 error) { } // StopWithError indicates an expected call of StopWithError. -func (mr *MockHandlerMockRecorder) StopWithError(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHandlerMockRecorder) StopWithError(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopWithError", reflect.TypeOf((*MockHandler)(nil).StopWithError), arg0, arg1) } - -// Stopped mocks base method. -func (m *MockHandler) Stopped() chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stopped") - ret0, _ := ret[0].(chan struct{}) - return ret0 -} - -// Stopped indicates an expected call of Stopped. -func (mr *MockHandlerMockRecorder) Stopped() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stopped", reflect.TypeOf((*MockHandler)(nil).Stopped)) -} diff --git a/avalanchego/snow/networking/handler/parser.go b/avalanchego/snow/networking/handler/parser.go index 9349b073..4dc954e4 100644 --- a/avalanchego/snow/networking/handler/parser.go +++ b/avalanchego/snow/networking/handler/parser.go @@ -1,30 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( - "errors" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedID = errors.New("inbound message contains duplicated ID") - -func getIDs(idsBytes [][]byte) ([]ids.ID, error) { - res := make([]ids.ID, len(idsBytes)) - idSet := set.NewSet[ids.ID](len(idsBytes)) - for i, bytes := range idsBytes { +func getIDs(idsBytes [][]byte) (set.Set[ids.ID], error) { + var res set.Set[ids.ID] + for _, bytes := range idsBytes { id, err := ids.ToID(bytes) if err != nil { return nil, err } - if idSet.Contains(id) { - return nil, errDuplicatedID - } - res[i] = id - idSet.Add(id) + res.Add(id) } return res, nil } diff --git a/avalanchego/snow/networking/router/chain_router.go b/avalanchego/snow/networking/router/chain_router.go index d8e7eced..9c242588 100644 --- a/avalanchego/snow/networking/router/chain_router.go +++ b/avalanchego/snow/networking/router/chain_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -12,7 +12,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -76,11 +75,11 @@ type ChainRouter struct { peers map[ids.NodeID]*peer // node ID --> chains that node is benched on // invariant: if a node is benched on any chain, it is treated as disconnected on all chains - benched map[ids.NodeID]set.Set[ids.ID] - criticalChains set.Set[ids.ID] - stakingEnabled bool - onFatal func(exitCode int) - metrics *routerMetrics + benched map[ids.NodeID]set.Set[ids.ID] + criticalChains set.Set[ids.ID] + sybilProtectionEnabled bool + onFatal func(exitCode int) + metrics *routerMetrics // Parameters for doing health checks healthConfig HealthConfig // aggregator of requests based on their time @@ -98,7 +97,7 @@ func (cr *ChainRouter) Initialize( timeoutManager timeout.Manager, closeTimeout time.Duration, criticalChains set.Set[ids.ID], - stakingEnabled bool, + sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, @@ -111,7 +110,7 @@ func (cr *ChainRouter) Initialize( cr.closeTimeout = closeTimeout cr.benched = make(map[ids.NodeID]set.Set[ids.ID]) cr.criticalChains = criticalChains - cr.stakingEnabled = stakingEnabled + cr.sybilProtectionEnabled = sybilProtectionEnabled cr.onFatal = onFatal cr.timedRequests = linkedhashmap.New[ids.RequestID, requestEntry]() cr.peers = make(map[ids.NodeID]*peer) @@ -368,15 +367,21 @@ func (cr *ChainRouter) Shutdown(ctx context.Context) { chain.Stop(ctx) } - ticker := time.NewTicker(cr.closeTimeout) - defer ticker.Stop() + ctx, cancel := context.WithTimeout(ctx, cr.closeTimeout) + defer cancel() for _, chain := range prevChains { - select { - case <-chain.Stopped(): - case <-ticker.C: - cr.log.Warn("timed out while shutting down the chains") - return + shutdownDuration, err := chain.AwaitStopped(ctx) + + chainLog := chain.Context().Log + if err != nil { + chainLog.Warn("timed out while shutting down", + zap.Error(err), + ) + } else { + chainLog.Info("chain shutdown", + zap.Duration("shutdownDuration", shutdownDuration), + ) } } } @@ -408,7 +413,7 @@ func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { // If this peer isn't running this chain, then we shouldn't mark them as // connected - if !peer.trackedSubnets.Contains(subnetID) && cr.stakingEnabled { + if !peer.trackedSubnets.Contains(subnetID) && cr.sybilProtectionEnabled { continue } @@ -466,14 +471,14 @@ func (cr *ChainRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Applica // set, disconnect. we cannot put a subnet-only validator check here since // Disconnected would not be handled properly. // - // When staking is disabled, we only want this clause to happen once. - // Therefore, we only update the chains during the connection of the primary - // network, which is guaranteed to happen for every peer. - if cr.stakingEnabled || subnetID == constants.PrimaryNetworkID { + // When sybil protection is disabled, we only want this clause to happen + // once. Therefore, we only update the chains during the connection of the + // primary network, which is guaranteed to happen for every peer. + if cr.sybilProtectionEnabled || subnetID == constants.PrimaryNetworkID { for _, chain := range cr.chainHandlers { - // If staking is disabled, send a Connected message to every chain - // when connecting to the primary network - if subnetID == chain.Context().SubnetID || !cr.stakingEnabled { + // If sybil protection is disabled, send a Connected message to + // every chain when connecting to the primary network. + if subnetID == chain.Context().SubnetID || !cr.sybilProtectionEnabled { chain.Push( context.TODO(), handler.Message{ @@ -506,7 +511,7 @@ func (cr *ChainRouter) Disconnected(nodeID ids.NodeID) { // if a validator connects then it leaves validator-set, it would not be // disconnected properly. for _, chain := range cr.chainHandlers { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.sybilProtectionEnabled { chain.Push( context.TODO(), handler.Message{ @@ -536,7 +541,7 @@ func (cr *ChainRouter) Benched(chainID ids.ID, nodeID ids.NodeID) { msg := message.InternalDisconnected(nodeID) for _, chain := range cr.chainHandlers { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.sybilProtectionEnabled { chain.Push( context.TODO(), handler.Message{ @@ -571,7 +576,7 @@ func (cr *ChainRouter) Unbenched(chainID ids.ID, nodeID ids.NodeID) { msg := message.InternalConnected(nodeID, peer.version) for _, chain := range cr.chainHandlers { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.sybilProtectionEnabled { chain.Push( context.TODO(), handler.Message{ @@ -646,12 +651,19 @@ func (cr *ChainRouter) removeChain(ctx context.Context, chainID ids.ID) { chain.Stop(ctx) - ticker := time.NewTicker(cr.closeTimeout) - defer ticker.Stop() - select { - case <-chain.Stopped(): - case <-ticker.C: - chain.Context().Log.Warn("timed out while shutting down") + ctx, cancel := context.WithTimeout(ctx, cr.closeTimeout) + shutdownDuration, err := chain.AwaitStopped(ctx) + cancel() + + chainLog := chain.Context().Log + if err != nil { + chainLog.Warn("timed out while shutting down", + zap.Error(err), + ) + } else { + chainLog.Info("chain shutdown", + zap.Duration("shutdownDuration", shutdownDuration), + ) } if cr.onFatal != nil && cr.criticalChains.Contains(chainID) { diff --git a/avalanchego/snow/networking/router/chain_router_metrics.go b/avalanchego/snow/networking/router/chain_router_metrics.go index cfcc9613..bc8f2622 100644 --- a/avalanchego/snow/networking/router/chain_router_metrics.go +++ b/avalanchego/snow/networking/router/chain_router_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -6,7 +6,7 @@ package router import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) // routerMetrics about router messages @@ -40,11 +40,10 @@ func newRouterMetrics(namespace string, registerer prometheus.Registerer) (*rout }, ) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(rMetrics.outstandingRequests), registerer.Register(rMetrics.longestRunningRequest), registerer.Register(rMetrics.droppedRequests), ) - return rMetrics, errs.Err + return rMetrics, err } diff --git a/avalanchego/snow/networking/router/chain_router_test.go b/avalanchego/snow/networking/router/chain_router_test.go index f235064b..e9617d7a 100644 --- a/avalanchego/snow/networking/router/chain_router_test.go +++ b/avalanchego/snow/networking/router/chain_router_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -9,13 +9,10 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" @@ -25,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" @@ -34,6 +32,8 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" + + commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const ( @@ -41,10 +41,15 @@ const ( testThreadPoolSize = 2 ) +// TODO refactor tests in this file + func TestShutdown(t *testing.T) { - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + chainCtx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(chainCtx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -58,11 +63,13 @@ func TestShutdown(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) + go tm.Dispatch() + defer tm.Stop() chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -74,35 +81,31 @@ func TestShutdown(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) shutdownCalled := make(chan struct{}, 1) - ctx := snow.DefaultConsensusContextTest() resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( - ctx, + chainCtx, vdrs, nil, time.Second, testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, - subnets.New(ctx.NodeID, subnets.Config{}), + subnets.New(chainCtx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -110,7 +113,7 @@ func TestShutdown(t *testing.T) { bootstrapper.Default(true) bootstrapper.CantGossip = false bootstrapper.ContextF = func() *snow.ConsensusContext { - return ctx + return chainCtx } bootstrapper.ShutdownF = func(context.Context) error { shutdownCalled <- struct{}{} @@ -125,7 +128,7 @@ func TestShutdown(t *testing.T) { engine.Default(true) engine.CantGossip = false engine.ContextF = func() *snow.ConsensusContext { - return ctx + return chainCtx } engine.ShutdownF = func(context.Context) error { shutdownCalled <- struct{}{} @@ -147,7 +150,7 @@ func TestShutdown(t *testing.T) { Consensus: engine, }, }) - ctx.State.Set(snow.EngineState{ + chainCtx.State.Set(snow.EngineState{ Type: engineType, State: snow.NormalOp, // assumed bootstrapping is done }) @@ -161,25 +164,29 @@ func TestShutdown(t *testing.T) { chainRouter.Shutdown(context.Background()) - ticker := time.NewTicker(250 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + defer cancel() + select { - case <-ticker.C: - t.Fatalf("Handler shutdown was not called or timed out after 250ms during chainRouter shutdown") + case <-ctx.Done(): + require.FailNow("Handler shutdown was not called or timed out after 250ms during chainRouter shutdown") case <-shutdownCalled: } - select { - case <-h.Stopped(): - default: - t.Fatal("handler shutdown but never closed its closing channel") - } + shutdownDuration, err := h.AwaitStopped(ctx) + require.NoError(err) + require.GreaterOrEqual(shutdownDuration, time.Duration(0)) + require.Less(shutdownDuration, 250*time.Millisecond) } func TestShutdownTimesOut(t *testing.T) { + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) nodeID := ids.EmptyNodeID - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() metrics := prometheus.NewRegistry() // Ensure that the Ancestors request does not timeout @@ -195,12 +202,14 @@ func TestShutdownTimesOut(t *testing.T) { "", metrics, ) - require.NoError(t, err) + require.NoError(err) + go tm.Dispatch() + defer tm.Stop() chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -212,17 +221,15 @@ func TestShutdownTimesOut(t *testing.T) { HealthConfig{}, "", metrics, - ) - require.NoError(t, err) + )) - ctx := snow.DefaultConsensusContextTest() resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -232,14 +239,12 @@ func TestShutdownTimesOut(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapFinished := make(chan struct{}, 1) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -253,7 +258,7 @@ func TestShutdownTimesOut(t *testing.T) { return nil } bootstrapper.HaltF = func(context.Context) {} - bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID, uint64) error { // Ancestors blocks for two seconds time.Sleep(2 * time.Second) bootstrapFinished <- struct{}{} @@ -299,7 +304,7 @@ func TestShutdownTimesOut(t *testing.T) { go func() { chainID := ids.ID{} msg := handler.Message{ - InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), nodeID, engineType), + InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), 0, nodeID, engineType), EngineType: engineType, } h.Push(context.Background(), msg) @@ -312,7 +317,7 @@ func TestShutdownTimesOut(t *testing.T) { select { case <-bootstrapFinished: - t.Fatalf("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") + require.FailNow("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") case <-shutdownFinished: } } @@ -320,6 +325,7 @@ func TestShutdownTimesOut(t *testing.T) { // Ensure that a timeout fires if we don't get a response to a request func TestRouterTimeout(t *testing.T) { require := require.New(t) + // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( @@ -335,12 +341,13 @@ func TestRouterTimeout(t *testing.T) { prometheus.NewRegistry(), ) require.NoError(err) + go tm.Dispatch() + defer tm.Stop() // Create a router chainRouter := ChainRouter{} - - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -352,25 +359,28 @@ func TestRouterTimeout(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) + defer chainRouter.Shutdown(context.Background()) // Create bootstrapper, engine and handler var ( - calledGetStateSummaryFrontierFailed, calledGetAcceptedStateSummaryFailed, - calledGetAcceptedFrontierFailed, calledGetAcceptedFailed, + calledGetStateSummaryFrontierFailed, + calledGetAcceptedStateSummaryFailed, + calledGetAcceptedFrontierFailed, + calledGetAcceptedFailed, calledGetAncestorsFailed, - calledGetFailed, calledQueryFailed, + calledGetFailed, + calledQueryFailed, calledAppRequestFailed, calledCrossChainAppRequestFailed bool wg = sync.WaitGroup{} ) - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewSet() - err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -389,13 +399,11 @@ func TestRouterTimeout(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -409,6 +417,7 @@ func TestRouterTimeout(t *testing.T) { return nil } bootstrapper.HaltF = func(context.Context) {} + bootstrapper.ShutdownF = func(context.Context) error { return nil } bootstrapper.GetStateSummaryFrontierFailedF = func(context.Context, ids.NodeID, uint32) error { defer wg.Done() @@ -445,12 +454,12 @@ func TestRouterTimeout(t *testing.T) { calledQueryFailed = true return nil } - bootstrapper.AppRequestFailedF = func(context.Context, ids.NodeID, uint32) error { + bootstrapper.AppRequestFailedF = func(context.Context, ids.NodeID, uint32, *common.AppError) error { defer wg.Done() calledAppRequestFailed = true return nil } - bootstrapper.CrossChainAppRequestFailedF = func(context.Context, ids.ID, uint32) error { + bootstrapper.CrossChainAppRequestFailedF = func(context.Context, ids.ID, uint32, *common.AppError) error { defer wg.Done() calledCrossChainAppRequestFailed = true return nil @@ -628,10 +637,12 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, message.AppResponseOp, - message.InternalAppRequestFailed( + message.InboundAppError( nodeID, ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ), p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) @@ -647,11 +658,13 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( + message.InternalCrossChainAppError( nodeID, ctx.ChainID, ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ), p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) @@ -675,8 +688,6 @@ func TestRouterTimeout(t *testing.T) { func TestRouterHonorsRequestedEngine(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - require := require.New(t) // Create a timeout manager @@ -693,11 +704,13 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { prometheus.NewRegistry(), ) require.NoError(err) + go tm.Dispatch() + defer tm.Stop() // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -709,14 +722,17 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) + defer chainRouter.Shutdown(context.Background()) h := handler.NewMockHandler(ctrl) - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) h.EXPECT().Context().Return(ctx).AnyTimes() h.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() + h.EXPECT().Stop(gomock.Any()).AnyTimes() + h.EXPECT().AwaitStopped(gomock.Any()).AnyTimes() h.EXPECT().Push(gomock.Any(), gomock.Any()).Times(1) chainRouter.AddChain(context.Background(), h) @@ -783,32 +799,126 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { } { + engineType := p2p.EngineType(100) + requestID++ msg := message.InboundPushQuery( ctx.ChainID, requestID, 0, nil, + 0, nodeID, - 100, + engineType, ) h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { - require.EqualValues(100, msg.EngineType) + require.Equal(engineType, msg.EngineType) }) chainRouter.HandleInbound(context.Background(), msg) } - require.Equal(0, chainRouter.timedRequests.Len()) + require.Zero(chainRouter.timedRequests.Len()) } func TestRouterClearTimeouts(t *testing.T) { + requestID := uint32(123) + + tests := []struct { + name string + responseOp message.Op + responseMsg message.InboundMessage + timeoutMsg message.InboundMessage + }{ + { + name: "StateSummaryFrontier", + responseOp: message.StateSummaryFrontierOp, + responseMsg: message.InboundStateSummaryFrontier(ids.Empty, requestID, []byte("summary"), ids.EmptyNodeID), + timeoutMsg: message.InternalGetStateSummaryFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID), + }, + { + name: "AcceptedStateSummary", + responseOp: message.AcceptedStateSummaryOp, + responseMsg: message.InboundAcceptedStateSummary(ids.Empty, requestID, []ids.ID{ids.GenerateTestID()}, ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedStateSummaryFailed(ids.EmptyNodeID, ids.Empty, requestID), + }, + { + name: "AcceptedFrontierOp", + responseOp: message.AcceptedFrontierOp, + responseMsg: message.InboundAcceptedFrontier(ids.Empty, requestID, ids.GenerateTestID(), ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + }, + { + name: "Accepted", + responseOp: message.AcceptedOp, + responseMsg: message.InboundAccepted(ids.Empty, requestID, []ids.ID{ids.GenerateTestID()}, ids.EmptyNodeID), + timeoutMsg: message.InternalGetAcceptedFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + }, + { + name: "Chits", + responseOp: message.ChitsOp, + responseMsg: message.InboundChits(ids.Empty, requestID, ids.GenerateTestID(), ids.GenerateTestID(), ids.GenerateTestID(), ids.EmptyNodeID), + timeoutMsg: message.InternalQueryFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + }, + { + name: "AppResponse", + responseOp: message.AppResponseOp, + responseMsg: message.InboundAppResponse(ids.Empty, requestID, []byte("responseMsg"), ids.EmptyNodeID), + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 123, "error"), + }, + { + name: "AppError", + responseOp: message.AppResponseOp, + responseMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 1234, "custom error"), + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, requestID, 123, "error"), + }, + { + name: "CrossChainAppResponse", + responseOp: message.CrossChainAppResponseOp, + responseMsg: message.InternalCrossChainAppResponse(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, []byte("responseMsg")), + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 123, "error"), + }, + { + name: "CrossChainAppError", + responseOp: message.CrossChainAppResponseOp, + responseMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 1234, "custom error"), + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, requestID, 123, "error"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + chainRouter, _ := newChainRouterTest(t) + + chainRouter.RegisterRequest( + context.Background(), + ids.EmptyNodeID, + ids.Empty, + ids.Empty, + requestID, + tt.responseOp, + tt.timeoutMsg, + engineType, + ) + + chainRouter.HandleInbound(context.Background(), tt.responseMsg) + require.Zero(chainRouter.timedRequests.Len()) + }) + } +} + +func TestValidatorOnlyMessageDrops(t *testing.T) { + require := require.New(t) + // Create a timeout manager + maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 3 * time.Second, - MinimumTimeout: 3 * time.Second, - MaximumTimeout: 5 * time.Minute, + InitialTimeout: 10 * time.Millisecond, + MinimumTimeout: 10 * time.Millisecond, + MaximumTimeout: maxTimeout, TimeoutCoefficient: 1, TimeoutHalflife: 5 * time.Minute, }, @@ -816,12 +926,14 @@ func TestRouterClearTimeouts(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) + go tm.Dispatch() + defer tm.Stop() // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -833,22 +945,26 @@ func TestRouterClearTimeouts(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) + defer chainRouter.Shutdown(context.Background()) // Create bootstrapper, engine and handler - ctx := snow.DefaultConsensusContextTest() - vdrs := validators.NewSet() - err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + calledF := false + wg := sync.WaitGroup{} + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) + vdrs := validators.NewManager() + vID := ids.GenerateTestNodeID() + require.NoError(vdrs.AddStaker(ctx.SubnetID, vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -857,14 +973,12 @@ func TestRouterClearTimeouts(t *testing.T) { testThreadPoolSize, resourceTracker, validators.UnhandledSubnetConnector, - subnets.New(ctx.NodeID, subnets.Config{}), + sb, + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -873,12 +987,21 @@ func TestRouterClearTimeouts(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID, uint64) error { + defer wg.Done() + calledF = true + return nil + } + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrapping is ongoing + }) engine := &common.EngineTest{T: t} - engine.Default(false) engine.ContextF = func() *snow.ConsensusContext { return ctx } + engine.Default(false) h.SetEngineManager(&handler.EngineManager{ Avalanche: &handler.Engine{ StateSyncer: nil, @@ -891,10 +1014,6 @@ func TestRouterClearTimeouts(t *testing.T) { Consensus: engine, }, }) - ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.NormalOp, // assumed bootstrapping is done - }) chainRouter.AddChain(context.Background(), h) @@ -903,321 +1022,26 @@ func TestRouterClearTimeouts(t *testing.T) { } h.Start(context.Background(), false) - nodeID := ids.GenerateTestNodeID() - requestID := uint32(0) - { - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.StateSummaryFrontierOp, - message.InternalGetStateSummaryFrontierFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundStateSummaryFrontier( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + var inMsg message.InboundMessage + dummyContainerID := ids.GenerateTestID() + reqID := uint32(0) - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedStateSummaryOp, - message.InternalGetAcceptedStateSummaryFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundAcceptedStateSummary( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + // Non-validator case + nID := ids.GenerateTestNodeID() - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedFrontierOp, - message.InternalGetAcceptedFrontierFailed( - nodeID, - ctx.ChainID, - requestID, - engineType, - ), - engineType, - ) - msg := message.InboundAcceptedFrontier( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } + calledF = false + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, + 0, + nID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + chainRouter.HandleInbound(context.Background(), inMsg) - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AcceptedOp, - message.InternalGetAcceptedFailed( - nodeID, - ctx.ChainID, - requestID, - engineType, - ), - engineType, - ) - msg := message.InboundAccepted( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.ChitsOp, - message.InternalQueryFailed( - nodeID, - ctx.ChainID, - requestID, - engineType, - ), - engineType, - ) - msg := message.InboundChits( - ctx.ChainID, - requestID, - nil, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.AppResponseOp, - message.InternalAppRequestFailed( - nodeID, - ctx.ChainID, - requestID, - ), - engineType, - ) - msg := message.InboundAppResponse( - ctx.ChainID, - requestID, - nil, - nodeID, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - { - requestID++ - chainRouter.RegisterRequest( - context.Background(), - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - ), - p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, - ) - msg := message.InternalCrossChainAppResponse( - nodeID, - ctx.ChainID, - ctx.ChainID, - requestID, - nil, - ) - chainRouter.HandleInbound(context.Background(), msg) - } - - require.Equal(t, 0, chainRouter.timedRequests.Len()) -} - -func TestValidatorOnlyMessageDrops(t *testing.T) { - // Create a timeout manager - maxTimeout := 25 * time.Millisecond - tm, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 10 * time.Millisecond, - MinimumTimeout: 10 * time.Millisecond, - MaximumTimeout: maxTimeout, - TimeoutCoefficient: 1, - TimeoutHalflife: 5 * time.Minute, - }, - benchlist.NewNoBenchlist(), - "", - prometheus.NewRegistry(), - ) - require.NoError(t, err) - go tm.Dispatch() - - // Create a router - chainRouter := ChainRouter{} - err = chainRouter.Initialize( - ids.EmptyNodeID, - logging.NoLog{}, - tm, - time.Millisecond, - set.Set[ids.ID]{}, - true, - set.Set[ids.ID]{}, - nil, - HealthConfig{}, - "", - prometheus.NewRegistry(), - ) - require.NoError(t, err) - - // Create bootstrapper, engine and handler - calledF := false - wg := sync.WaitGroup{} - - ctx := snow.DefaultConsensusContextTest() - sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) - vdrs := validators.NewSet() - vID := ids.GenerateTestNodeID() - err = vdrs.Add(vID, nil, ids.Empty, 1) - require.NoError(t, err) - resourceTracker, err := tracker.NewResourceTracker( - prometheus.NewRegistry(), - resource.NoUsage, - meter.ContinuousFactory{}, - time.Second, - ) - require.NoError(t, err) - h, err := handler.New( - ctx, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - sb, - ) - require.NoError(t, err) - - bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, - EngineTest: common.EngineTest{ - T: t, - }, - } - bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { - return ctx - } - bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { - defer wg.Done() - calledF = true - return nil - } - ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - State: snow.Bootstrapping, // assumed bootstrapping is ongoing - }) - - engine := &common.EngineTest{T: t} - engine.ContextF = func() *snow.ConsensusContext { - return ctx - } - engine.Default(false) - h.SetEngineManager(&handler.EngineManager{ - Avalanche: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: bootstrapper, - Consensus: engine, - }, - Snowman: &handler.Engine{ - StateSyncer: nil, - Bootstrapper: bootstrapper, - Consensus: engine, - }, - }) - - chainRouter.AddChain(context.Background(), h) - - bootstrapper.StartF = func(context.Context, uint32) error { - return nil - } - h.Start(context.Background(), false) - - var inMsg message.InboundMessage - dummyContainerID := ids.GenerateTestID() - reqID := uint32(0) - - // Non-validator case - nID := ids.GenerateTestNodeID() - - calledF = false - inMsg = message.InboundPullQuery( - ctx.ChainID, - reqID, - time.Hour, - dummyContainerID, - nID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - ) - chainRouter.HandleInbound(context.Background(), inMsg) - - require.False(t, calledF) // should not be called + require.False(calledF) // should not be called // Validator case calledF = false @@ -1227,6 +1051,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { reqID, time.Hour, dummyContainerID, + 0, vID, p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) @@ -1234,155 +1059,12 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a validator request -} - -func TestRouterCrossChainMessages(t *testing.T) { - tm, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: 3 * time.Second, - MinimumTimeout: 3 * time.Second, - MaximumTimeout: 5 * time.Minute, - TimeoutCoefficient: 1, - TimeoutHalflife: 5 * time.Minute, - }, - benchlist.NewNoBenchlist(), - "timeoutManager", - prometheus.NewRegistry(), - ) - require.NoError(t, err) - go tm.Dispatch() - - // Create chain router - nodeID := ids.GenerateTestNodeID() - chainRouter := ChainRouter{} - err = chainRouter.Initialize( - nodeID, - logging.NoLog{}, - tm, - time.Millisecond, - set.Set[ids.ID]{}, - true, - set.Set[ids.ID]{}, - nil, - HealthConfig{}, - "", - prometheus.NewRegistry(), - ) - require.NoError(t, err) - - // Set up validators - vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - - // Create bootstrapper, engine and handler - requester := snow.DefaultConsensusContextTest() - requester.ChainID = ids.GenerateTestID() - requester.Registerer = prometheus.NewRegistry() - requester.Metrics = metrics.NewOptionalGatherer() - requester.Executing.Set(false) - - resourceTracker, err := tracker.NewResourceTracker( - prometheus.NewRegistry(), - resource.NoUsage, - meter.ContinuousFactory{}, - time.Second, - ) - require.NoError(t, err) - - requesterHandler, err := handler.New( - requester, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - subnets.New(requester.NodeID, subnets.Config{}), - ) - require.NoError(t, err) - - responder := snow.DefaultConsensusContextTest() - responder.ChainID = ids.GenerateTestID() - responder.Registerer = prometheus.NewRegistry() - responder.Metrics = metrics.NewOptionalGatherer() - responder.Executing.Set(false) - - responderHandler, err := handler.New( - responder, - vdrs, - nil, - time.Second, - testThreadPoolSize, - resourceTracker, - validators.UnhandledSubnetConnector, - subnets.New(responder.NodeID, subnets.Config{}), - ) - require.NoError(t, err) - - // assumed bootstrapping is done - responder.State.Set(snow.EngineState{ - Type: engineType, - State: snow.NormalOp, - }) - requester.State.Set(snow.EngineState{ - Type: engineType, - State: snow.NormalOp, - }) - - // router tracks two chains - one will send a message to the other - chainRouter.AddChain(context.Background(), requesterHandler) - chainRouter.AddChain(context.Background(), responderHandler) - - // Each chain should start off with a connected message - require.Equal(t, 1, chainRouter.chainHandlers[requester.ChainID].Len()) - require.Equal(t, 1, chainRouter.chainHandlers[responder.ChainID].Len()) - - // Requester sends a request to the responder - msgBytes := []byte("foobar") - msg := message.InternalCrossChainAppRequest( - requester.NodeID, - requester.ChainID, - responder.ChainID, - uint32(1), - time.Minute, - msgBytes, - ) - chainRouter.HandleInbound(context.Background(), msg) - require.Equal(t, 2, chainRouter.chainHandlers[responder.ChainID].Len()) - - // We register the cross-chain response on the requester-side so we don't - // drop it. - chainRouter.RegisterRequest( - context.Background(), - nodeID, - requester.ChainID, - responder.ChainID, - uint32(1), - message.CrossChainAppResponseOp, - message.InternalCrossChainAppRequestFailed( - nodeID, - responder.ChainID, - requester.ChainID, - uint32(1), - ), - p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, - ) - // Responder sends a response back to the requester. - msg = message.InternalCrossChainAppResponse( - nodeID, - responder.ChainID, - requester.ChainID, - uint32(1), - msgBytes, - ) - chainRouter.HandleInbound(context.Background(), msg) - require.Equal(t, 2, chainRouter.chainHandlers[requester.ChainID].Len()) + require.True(calledF) // should be called since this is a validator request } func TestConnectedSubnet(t *testing.T) { + require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -1396,18 +1078,19 @@ func TestConnectedSubnet(t *testing.T) { "timeoutManager", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) + go tm.Dispatch() + defer tm.Stop() // Create chain router myNodeID := ids.GenerateTestNodeID() peerNodeID := ids.GenerateTestNodeID() subnetID0 := ids.GenerateTestID() subnetID1 := ids.GenerateTestID() - trackedSubnets := set.Set[ids.ID]{} - trackedSubnets.Add(subnetID0, subnetID1) + trackedSubnets := set.Of(subnetID0, subnetID1) chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( myNodeID, logging.NoLog{}, tm, @@ -1419,17 +1102,13 @@ func TestConnectedSubnet(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Create bootstrapper, engine and handler - platform := snow.DefaultConsensusContextTest() - platform.ChainID = constants.PlatformChainID - platform.SubnetID = constants.PrimaryNetworkID - platform.Registerer = prometheus.NewRegistry() - platform.Metrics = metrics.NewOptionalGatherer() - platform.Executing.Set(false) - platform.State.Set(snow.EngineState{ + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) + ctx.Executing.Set(false) + ctx.State.Set(snow.EngineState{ Type: engineType, State: snow.NormalOp, }) @@ -1448,7 +1127,7 @@ func TestConnectedSubnet(t *testing.T) { } platformHandler := handler.NewMockHandler(ctrl) - platformHandler.EXPECT().Context().Return(platform).AnyTimes() + platformHandler.EXPECT().Context().Return(ctx).AnyTimes() platformHandler.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) @@ -1500,6 +1179,8 @@ func TestConnectedSubnet(t *testing.T) { } func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { + require := require.New(t) + // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( @@ -1514,12 +1195,14 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) + go tm.Dispatch() + defer tm.Stop() // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -1531,23 +1214,22 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) + defer chainRouter.Shutdown(context.Background()) // Create bootstrapper, engine and handler calledF := false wg := sync.WaitGroup{} - ctx := snow.DefaultConsensusContextTest() + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) allowedID := ids.GenerateTestNodeID() - allowedSet := set.NewSet[ids.NodeID](1) - allowedSet.Add(allowedID) + allowedSet := set.Of(allowedID) sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true, AllowedNodes: allowedSet}) - vdrs := validators.NewSet() + vdrs := validators.NewManager() vID := ids.GenerateTestNodeID() - err = vdrs.Add(vID, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(vdrs.AddStaker(ctx.SubnetID, vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -1555,7 +1237,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, @@ -1566,13 +1248,11 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, sb, + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -1581,7 +1261,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID, uint64) error { defer wg.Done() calledF = true return nil @@ -1623,12 +1303,13 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { reqID, time.Hour, dummyContainerID, + 0, nID, engineType, ) chainRouter.HandleInbound(context.Background(), inMsg) - require.False(t, calledF) // should not be called for unallowed node ID + require.False(calledF) // should not be called for unallowed node ID // Allowed NodeID case calledF = false @@ -1638,6 +1319,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { reqID, time.Hour, dummyContainerID, + 0, allowedID, engineType, ) @@ -1645,7 +1327,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a allowed node request + require.True(calledF) // should be called since this is a allowed node request // Validator case calledF = false @@ -1655,6 +1337,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { reqID, time.Hour, dummyContainerID, + 0, vID, engineType, ) @@ -1662,5 +1345,277 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a validator request + require.True(calledF) // should be called since this is a validator request +} + +// Tests that a response, peer error, or a timeout clears the timeout and calls +// the handler +func TestAppRequest(t *testing.T) { + wantRequestID := uint32(123) + wantResponse := []byte("response") + + errFoo := common.AppError{ + Code: 456, + Message: "foo", + } + + tests := []struct { + name string + responseOp message.Op + timeoutMsg message.InboundMessage + inboundMsg message.InboundMessage + }{ + { + name: "AppRequest - chain response", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InboundAppResponse(ids.Empty, wantRequestID, wantResponse, ids.EmptyNodeID), + }, + { + name: "AppRequest - chain error", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + { + name: "AppRequest - timeout", + responseOp: message.AppResponseOp, + timeoutMsg: message.InboundAppError(ids.EmptyNodeID, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + wg := &sync.WaitGroup{} + chainRouter, engine := newChainRouterTest(t) + + wg.Add(1) + if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.AppErrorOp { + engine.AppRequestFailedF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.EmptyNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(errFoo.Code, appErr.Code) + require.Equal(errFoo.Message, appErr.Message) + + return nil + } + } else if tt.inboundMsg.Op() == message.AppResponseOp { + engine.AppResponseF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.EmptyNodeID, nodeID) + require.Equal(wantRequestID, requestID) + require.Equal(wantResponse, msg) + + return nil + } + } + + ctx := context.Background() + chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + require.Equal(1, chainRouter.timedRequests.Len()) + + if tt.inboundMsg != nil { + chainRouter.HandleInbound(ctx, tt.inboundMsg) + } + + wg.Wait() + }) + } +} + +// Tests that a response, peer error, or a timeout clears the timeout and calls +// the handler +func TestCrossChainAppRequest(t *testing.T) { + wantRequestID := uint32(123) + wantResponse := []byte("response") + + errFoo := common.AppError{ + Code: 456, + Message: "foo", + } + + tests := []struct { + name string + responseOp message.Op + timeoutMsg message.InboundMessage + inboundMsg message.InboundMessage + }{ + { + name: "CrossChainAppRequest - chain response", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InternalCrossChainAppResponse(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, wantResponse), + }, + { + name: "CrossChainAppRequest - chain error", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + inboundMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + { + name: "CrossChainAppRequest - timeout", + responseOp: message.CrossChainAppResponseOp, + timeoutMsg: message.InternalCrossChainAppError(ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, errFoo.Code, errFoo.Message), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + wg := &sync.WaitGroup{} + chainRouter, engine := newChainRouterTest(t) + + wg.Add(1) + if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.CrossChainAppErrorOp { + engine.CrossChainAppRequestFailedF = func(_ context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.Empty, chainID) + require.Equal(wantRequestID, requestID) + require.Equal(errFoo.Code, appErr.Code) + require.Equal(errFoo.Message, appErr.Message) + + return nil + } + } else if tt.inboundMsg.Op() == message.CrossChainAppResponseOp { + engine.CrossChainAppResponseF = func(_ context.Context, chainID ids.ID, requestID uint32, msg []byte) error { + defer wg.Done() + require.Zero(chainRouter.timedRequests.Len()) + + require.Equal(ids.Empty, chainID) + require.Equal(wantRequestID, requestID) + require.Equal(wantResponse, msg) + + return nil + } + } + + ctx := context.Background() + chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + require.Equal(1, chainRouter.timedRequests.Len()) + + if tt.inboundMsg != nil { + chainRouter.HandleInbound(ctx, tt.inboundMsg) + } + + wg.Wait() + }) + } +} + +func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { + // Create a timeout manager + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 3 * time.Second, + MinimumTimeout: 3 * time.Second, + MaximumTimeout: 5 * time.Minute, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + go tm.Dispatch() + + // Create a router + chainRouter := &ChainRouter{} + require.NoError(t, chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + )) + + // Create bootstrapper, engine and handler + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(t, err) + h, err := handler.New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), + ) + require.NoError(t, err) + + bootstrapper := &common.BootstrapperTest{ + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + + engine := &common.EngineTest{T: t} + engine.Default(false) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrapping is done + }) + + chainRouter.AddChain(context.Background(), h) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + h.Start(context.Background(), false) + + t.Cleanup(func() { + tm.Stop() + chainRouter.Shutdown(context.Background()) + }) + + return chainRouter, engine } diff --git a/avalanchego/snow/networking/router/health.go b/avalanchego/snow/networking/router/health.go index d678f0f1..3968f981 100644 --- a/avalanchego/snow/networking/router/health.go +++ b/avalanchego/snow/networking/router/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/avalanchego/snow/networking/router/inbound_handler.go b/avalanchego/snow/networking/router/inbound_handler.go index cfd6d5fa..81d2d9b8 100644 --- a/avalanchego/snow/networking/router/inbound_handler.go +++ b/avalanchego/snow/networking/router/inbound_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/avalanchego/snow/networking/router/main_test.go b/avalanchego/snow/networking/router/main_test.go new file mode 100644 index 00000000..4398ad2e --- /dev/null +++ b/avalanchego/snow/networking/router/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package router + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/snow/networking/router/mock_router.go b/avalanchego/snow/networking/router/mock_router.go index 60d1d393..c9146a77 100644 --- a/avalanchego/snow/networking/router/mock_router.go +++ b/avalanchego/snow/networking/router/mock_router.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/networking/router (interfaces: Router) +// Source: snow/networking/router/router.go +// +// Generated by this command: +// +// mockgen -source=snow/networking/router/router.go -destination=snow/networking/router/mock_router.go -package=router -exclude_interfaces=InternalHandler +// // Package router is a generated GoMock package. package router @@ -20,8 +22,8 @@ import ( logging "github.com/ava-labs/avalanchego/utils/logging" set "github.com/ava-labs/avalanchego/utils/set" version "github.com/ava-labs/avalanchego/version" - gomock "github.com/golang/mock/gomock" prometheus "github.com/prometheus/client_golang/prometheus" + gomock "go.uber.org/mock/gomock" ) // MockRouter is a mock of Router interface. @@ -48,51 +50,51 @@ func (m *MockRouter) EXPECT() *MockRouterMockRecorder { } // AddChain mocks base method. -func (m *MockRouter) AddChain(arg0 context.Context, arg1 handler.Handler) { +func (m *MockRouter) AddChain(ctx context.Context, chain handler.Handler) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0, arg1) + m.ctrl.Call(m, "AddChain", ctx, chain) } // AddChain indicates an expected call of AddChain. -func (mr *MockRouterMockRecorder) AddChain(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) AddChain(ctx, chain any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockRouter)(nil).AddChain), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockRouter)(nil).AddChain), ctx, chain) } // Benched mocks base method. -func (m *MockRouter) Benched(arg0 ids.ID, arg1 ids.NodeID) { +func (m *MockRouter) Benched(chainID ids.ID, validatorID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Benched", arg0, arg1) + m.ctrl.Call(m, "Benched", chainID, validatorID) } // Benched indicates an expected call of Benched. -func (mr *MockRouterMockRecorder) Benched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Benched(chainID, validatorID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Benched", reflect.TypeOf((*MockRouter)(nil).Benched), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Benched", reflect.TypeOf((*MockRouter)(nil).Benched), chainID, validatorID) } // Connected mocks base method. -func (m *MockRouter) Connected(arg0 ids.NodeID, arg1 *version.Application, arg2 ids.ID) { +func (m *MockRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Connected", arg0, arg1, arg2) + m.ctrl.Call(m, "Connected", nodeID, nodeVersion, subnetID) } // Connected indicates an expected call of Connected. -func (mr *MockRouterMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Connected(nodeID, nodeVersion, subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockRouter)(nil).Connected), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockRouter)(nil).Connected), nodeID, nodeVersion, subnetID) } // Disconnected mocks base method. -func (m *MockRouter) Disconnected(arg0 ids.NodeID) { +func (m *MockRouter) Disconnected(nodeID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Disconnected", arg0) + m.ctrl.Call(m, "Disconnected", nodeID) } // Disconnected indicates an expected call of Disconnected. -func (mr *MockRouterMockRecorder) Disconnected(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Disconnected(nodeID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockRouter)(nil).Disconnected), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockRouter)(nil).Disconnected), nodeID) } // HandleInbound mocks base method. @@ -102,50 +104,50 @@ func (m *MockRouter) HandleInbound(arg0 context.Context, arg1 message.InboundMes } // HandleInbound indicates an expected call of HandleInbound. -func (mr *MockRouterMockRecorder) HandleInbound(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) HandleInbound(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInbound", reflect.TypeOf((*MockRouter)(nil).HandleInbound), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockRouter) HealthCheck(arg0 context.Context) (interface{}, error) { +func (m *MockRouter) HealthCheck(arg0 context.Context) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HealthCheck", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockRouterMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) HealthCheck(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockRouter)(nil).HealthCheck), arg0) } // Initialize mocks base method. -func (m *MockRouter) Initialize(arg0 ids.NodeID, arg1 logging.Logger, arg2 timeout.Manager, arg3 time.Duration, arg4 set.Set[ids.ID], arg5 bool, arg6 set.Set[ids.ID], arg7 func(int), arg8 HealthConfig, arg9 string, arg10 prometheus.Registerer) error { +func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockRouterMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) } // RegisterRequest mocks base method. -func (m *MockRouter) RegisterRequest(arg0 context.Context, arg1 ids.NodeID, arg2, arg3 ids.ID, arg4 uint32, arg5 message.Op, arg6 message.InboundMessage, arg7 p2p.EngineType) { +func (m *MockRouter) RegisterRequest(ctx context.Context, nodeID ids.NodeID, sourceChainID, destinationChainID ids.ID, requestID uint32, op message.Op, failedMsg message.InboundMessage, engineType p2p.EngineType) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterRequest", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + m.ctrl.Call(m, "RegisterRequest", ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType) } // RegisterRequest indicates an expected call of RegisterRequest. -func (mr *MockRouterMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) RegisterRequest(ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockRouter)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockRouter)(nil).RegisterRequest), ctx, nodeID, sourceChainID, destinationChainID, requestID, op, failedMsg, engineType) } // Shutdown mocks base method. @@ -155,19 +157,19 @@ func (m *MockRouter) Shutdown(arg0 context.Context) { } // Shutdown indicates an expected call of Shutdown. -func (mr *MockRouterMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Shutdown(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockRouter)(nil).Shutdown), arg0) } // Unbenched mocks base method. -func (m *MockRouter) Unbenched(arg0 ids.ID, arg1 ids.NodeID) { +func (m *MockRouter) Unbenched(chainID ids.ID, validatorID ids.NodeID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Unbenched", arg0, arg1) + m.ctrl.Call(m, "Unbenched", chainID, validatorID) } // Unbenched indicates an expected call of Unbenched. -func (mr *MockRouterMockRecorder) Unbenched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRouterMockRecorder) Unbenched(chainID, validatorID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unbenched", reflect.TypeOf((*MockRouter)(nil).Unbenched), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unbenched", reflect.TypeOf((*MockRouter)(nil).Unbenched), chainID, validatorID) } diff --git a/avalanchego/snow/networking/router/router.go b/avalanchego/snow/networking/router/router.go index dca02f39..4df5614c 100644 --- a/avalanchego/snow/networking/router/router.go +++ b/avalanchego/snow/networking/router/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -32,7 +32,7 @@ type Router interface { timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], - stakingEnabled bool, + sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, diff --git a/avalanchego/snow/networking/router/traced_router.go b/avalanchego/snow/networking/router/traced_router.go index 13e3c1bf..4c52bce0 100644 --- a/avalanchego/snow/networking/router/traced_router.go +++ b/avalanchego/snow/networking/router/traced_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router @@ -8,11 +8,8 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" @@ -22,6 +19,8 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ Router = (*tracedRouter)(nil) @@ -44,7 +43,7 @@ func (r *tracedRouter) Initialize( timeoutManager timeout.Manager, closeTimeout time.Duration, criticalChains set.Set[ids.ID], - stakingEnabled bool, + sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, @@ -57,7 +56,7 @@ func (r *tracedRouter) Initialize( timeoutManager, closeTimeout, criticalChains, - stakingEnabled, + sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, diff --git a/avalanchego/snow/networking/sender/external_sender.go b/avalanchego/snow/networking/sender/external_sender.go index 72d9539d..7d279889 100644 --- a/avalanchego/snow/networking/sender/external_sender.go +++ b/avalanchego/snow/networking/sender/external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender diff --git a/avalanchego/snow/networking/sender/mock_external_sender.go b/avalanchego/snow/networking/sender/mock_external_sender.go index 322a7d17..9dc0a50d 100644 --- a/avalanchego/snow/networking/sender/mock_external_sender.go +++ b/avalanchego/snow/networking/sender/mock_external_sender.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/networking/sender (interfaces: ExternalSender) +// Source: snow/networking/sender/external_sender.go +// +// Generated by this command: +// +// mockgen -source=snow/networking/sender/external_sender.go -destination=snow/networking/sender/mock_external_sender.go -package=sender -exclude_interfaces= +// // Package sender is a generated GoMock package. package sender @@ -14,7 +16,7 @@ import ( message "github.com/ava-labs/avalanchego/message" subnets "github.com/ava-labs/avalanchego/subnets" set "github.com/ava-labs/avalanchego/utils/set" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockExternalSender is a mock of ExternalSender interface. @@ -41,29 +43,29 @@ func (m *MockExternalSender) EXPECT() *MockExternalSenderMockRecorder { } // Gossip mocks base method. -func (m *MockExternalSender) Gossip(arg0 message.OutboundMessage, arg1 ids.ID, arg2, arg3, arg4 int, arg5 subnets.Allower) set.Set[ids.NodeID] { +func (m *MockExternalSender) Gossip(msg message.OutboundMessage, subnetID ids.ID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int, allower subnets.Allower) set.Set[ids.NodeID] { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Gossip", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "Gossip", msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) ret0, _ := ret[0].(set.Set[ids.NodeID]) return ret0 } // Gossip indicates an expected call of Gossip. -func (mr *MockExternalSenderMockRecorder) Gossip(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockExternalSenderMockRecorder) Gossip(msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) } // Send mocks base method. -func (m *MockExternalSender) Send(arg0 message.OutboundMessage, arg1 set.Set[ids.NodeID], arg2 ids.ID, arg3 subnets.Allower) set.Set[ids.NodeID] { +func (m *MockExternalSender) Send(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Send", msg, nodeIDs, subnetID, allower) ret0, _ := ret[0].(set.Set[ids.NodeID]) return ret0 } // Send indicates an expected call of Send. -func (mr *MockExternalSenderMockRecorder) Send(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockExternalSenderMockRecorder) Send(msg, nodeIDs, subnetID, allower any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), msg, nodeIDs, subnetID, allower) } diff --git a/avalanchego/snow/networking/sender/sender.go b/avalanchego/snow/networking/sender/sender.go index 014c4b8a..a7090e5d 100644 --- a/avalanchego/snow/networking/sender/sender.go +++ b/avalanchego/snow/networking/sender/sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -8,7 +8,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -19,8 +18,8 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/subnets" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -93,7 +92,7 @@ func New( } func (s *sender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. @@ -174,7 +173,7 @@ func (s *sender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Se } func (s *sender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Sending this message to myself. if nodeID == s.ctx.NodeID { @@ -206,8 +205,7 @@ func (s *sender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -215,24 +213,27 @@ func (s *sender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.StateSummaryFrontierOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.StateSummaryFrontierOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - zap.Binary("summary", summary), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.StateSummaryFrontierOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Binary("summary", summary), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.StateSummaryFrontierOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + ) + } } } func (s *sender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. @@ -316,7 +317,7 @@ func (s *sender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Se } func (s *sender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) if nodeID == s.ctx.NodeID { inMsg := message.InboundAcceptedStateSummary( @@ -347,8 +348,7 @@ func (s *sender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -367,7 +367,7 @@ func (s *sender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID } func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. @@ -450,15 +450,15 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id } } -func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - ctx = utils.Detach(ctx) +func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { + ctx = context.WithoutCancel(ctx) // Sending this message to myself. if nodeID == s.ctx.NodeID { inMsg := message.InboundAcceptedFrontier( s.ctx.ChainID, requestID, - containerIDs, + containerID, nodeID, ) go s.router.HandleInbound(ctx, inMsg) @@ -469,22 +469,21 @@ func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re outMsg, err := s.msgCreator.AcceptedFrontier( s.ctx.ChainID, requestID, - containerIDs, + containerID, ) if err != nil { s.ctx.Log.Error("failed to build message", zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringers("containerIDs", containerIDs), + zap.Stringer("containerID", containerID), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -497,13 +496,13 @@ func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringers("containerIDs", containerIDs), + zap.Stringer("containerID", containerID), ) } } func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. @@ -590,7 +589,7 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID } func (s *sender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) if nodeID == s.ctx.NodeID { inMsg := message.InboundAccepted( @@ -617,8 +616,7 @@ func (s *sender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -637,7 +635,7 @@ func (s *sender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID } func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Tell the router to expect a response message or a message notifying // that we won't get a response from this node. @@ -698,8 +696,7 @@ func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, reques } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -720,9 +717,6 @@ func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, reques } } -// SendAncestors sends an Ancestors message to the consensus engine running on -// the specified chain on the specified node. -// The Ancestors message gives the recipient the contents of several containers. func (s *sender) SendAncestors(_ context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) { // Create the outbound message. outMsg, err := s.msgCreator.Ancestors(s.ctx.ChainID, requestID, containers) @@ -738,8 +732,7 @@ func (s *sender) SendAncestors(_ context.Context, nodeID ids.NodeID, requestID u } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -757,12 +750,8 @@ func (s *sender) SendAncestors(_ context.Context, nodeID ids.NodeID, requestID u } } -// SendGet sends a Get message to the consensus engine running on the specified -// chain to the specified node. The Get message signifies that this -// consensus engine would like the recipient to send this consensus engine the -// specified container. func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Tell the router to expect a response message or a message notifying // that we won't get a response from this node. @@ -813,8 +802,7 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 // Send the message over the network. var sentTo set.Set[ids.NodeID] if err == nil { - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo = s.sender.Send( outMsg, nodeIDs, @@ -846,10 +834,6 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 } } -// SendPut sends a Put message to the consensus engine running on the specified -// chain on the specified node. -// The Put message signifies that this consensus engine is giving to the -// recipient the contents of the specified container. func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { // Create the outbound message. outMsg, err := s.msgCreator.Put(s.ctx.ChainID, requestID, container, s.engineType) @@ -865,8 +849,7 @@ func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -874,29 +857,33 @@ func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - zap.Binary("container", container), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Binary("container", container), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + ) + } } } -// SendPushQuery sends a PushQuery message to the consensus engines running on -// the specified chains on the specified nodes. -// The PushQuery message signifies that this consensus engine would like each -// node to send their preferred frontier given the existence of the specified -// container. -func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) { - ctx = utils.Detach(ctx) +func (s *sender) SendPushQuery( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + requestID uint32, + container []byte, + requestedHeight uint64, +) { + ctx = context.WithoutCancel(ctx) // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. @@ -935,6 +922,7 @@ func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID, deadline, container, + requestedHeight, s.ctx.NodeID, s.engineType, ) @@ -968,6 +956,7 @@ func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID, deadline, container, + requestedHeight, s.engineType, ) @@ -987,25 +976,31 @@ func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("container", container), + zap.Uint64("requestedHeight", requestedHeight), zap.Error(err), ) } for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PushQueryOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PushQueryOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - zap.Binary("container", container), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.PushQueryOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Binary("container", container), + zap.Uint64("requestedHeight", requestedHeight), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.PushQueryOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Uint64("requestedHeight", requestedHeight), + ) + } // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() @@ -1020,12 +1015,14 @@ func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], } } -// SendPullQuery sends a PullQuery message to the consensus engines running on -// the specified chains on the specified nodes. -// The PullQuery message signifies that this consensus engine would like each -// node to send their preferred frontier. -func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) { - ctx = utils.Detach(ctx) +func (s *sender) SendPullQuery( + ctx context.Context, + nodeIDs set.Set[ids.NodeID], + requestID uint32, + containerID ids.ID, + requestedHeight uint64, +) { + ctx = context.WithoutCancel(ctx) // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. @@ -1064,6 +1061,7 @@ func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID, deadline, containerID, + requestedHeight, s.ctx.NodeID, s.engineType, ) @@ -1096,6 +1094,7 @@ func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID, deadline, containerID, + requestedHeight, s.engineType, ) @@ -1115,6 +1114,7 @@ func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], zap.Uint32("requestID", requestID), zap.Duration("deadline", deadline), zap.Stringer("containerID", containerID), + zap.Uint64("requestedHeight", requestedHeight), zap.Error(err), ) } @@ -1127,6 +1127,7 @@ func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Stringer("containerID", containerID), + zap.Uint64("requestedHeight", requestedHeight), ) // Register failures for nodes we didn't send a request to. @@ -1142,9 +1143,15 @@ func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], } } -// SendChits sends chits -func (s *sender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes, accepted []ids.ID) { - ctx = utils.Detach(ctx) +func (s *sender) SendChits( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + preferredID ids.ID, + preferredIDAtHeight ids.ID, + acceptedID ids.ID, +) { + ctx = context.WithoutCancel(ctx) // If [nodeID] is myself, send this message directly // to my own router rather than sending it over the network @@ -1152,8 +1159,9 @@ func (s *sender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uin inMsg := message.InboundChits( s.ctx.ChainID, requestID, - votes, - accepted, + preferredID, + preferredIDAtHeight, + acceptedID, nodeID, ) go s.router.HandleInbound(ctx, inMsg) @@ -1161,21 +1169,22 @@ func (s *sender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uin } // Create the outbound message. - outMsg, err := s.msgCreator.Chits(s.ctx.ChainID, requestID, votes, accepted) + outMsg, err := s.msgCreator.Chits(s.ctx.ChainID, requestID, preferredID, preferredIDAtHeight, acceptedID) if err != nil { s.ctx.Log.Error("failed to build message", zap.Stringer("messageOp", message.ChitsOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringers("containerIDs", votes), + zap.Stringer("preferredID", preferredID), + zap.Stringer("preferredIDAtHeight", preferredIDAtHeight), + zap.Stringer("acceptedID", acceptedID), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -1188,20 +1197,24 @@ func (s *sender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uin zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringers("containerIDs", votes), + zap.Stringer("preferredID", preferredID), + zap.Stringer("preferredIDAtHeight", preferredIDAtHeight), + zap.Stringer("acceptedID", acceptedID), ) } } func (s *sender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // The failed message is treated as if it was sent by the requested chain - failedMsg := message.InternalCrossChainAppRequestFailed( + failedMsg := message.InternalCrossChainAppError( s.ctx.NodeID, chainID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) s.router.RegisterRequest( ctx, @@ -1227,7 +1240,7 @@ func (s *sender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, r } func (s *sender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) inMsg := message.InternalCrossChainAppResponse( s.ctx.NodeID, @@ -1240,11 +1253,23 @@ func (s *sender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, return nil } -// SendAppRequest sends an application-level request to the given nodes. -// The meaning of this request, and how it should be handled, is defined by the -// VM. +func (s *sender) SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error { + ctx = context.WithoutCancel(ctx) + + inMsg := message.InternalCrossChainAppError( + s.ctx.NodeID, + s.ctx.ChainID, + chainID, + requestID, + errorCode, + errorMessage, + ) + go s.router.HandleInbound(ctx, inMsg) + return nil +} + func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. @@ -1252,10 +1277,12 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) s.router.RegisterRequest( ctx, @@ -1298,10 +1325,12 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // Immediately register a failure. Do so asynchronously to avoid // deadlock. - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1337,26 +1366,31 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppRequestOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppRequestOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), - zap.Binary("payload", appRequestBytes), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.AppRequestOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Binary("payload", appRequestBytes), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.AppRequestOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + ) + } // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := message.InternalAppRequestFailed( + inMsg := message.InboundAppError( nodeID, s.ctx.ChainID, requestID, + common.ErrTimeout.Code, + common.ErrTimeout.Message, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1364,10 +1398,8 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] return nil } -// SendAppResponse sends a response to an application-level request from the -// given node func (s *sender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { - ctx = utils.Detach(ctx) + ctx = context.WithoutCancel(ctx) if nodeID == s.ctx.NodeID { inMsg := message.InboundAppResponse( @@ -1398,8 +1430,7 @@ func (s *sender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request } // Send the message over the network. - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, nodeIDs, @@ -1407,19 +1438,88 @@ func (s *sender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppResponseOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Uint32("requestID", requestID), + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.AppResponseOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Binary("payload", appResponseBytes), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.AppResponseOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + ) + } + } + return nil +} + +func (s *sender) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + ctx = context.WithoutCancel(ctx) + + if nodeID == s.ctx.NodeID { + inMsg := message.InboundAppError( + nodeID, + s.ctx.ChainID, + requestID, + errorCode, + errorMessage, ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppResponseOp), + go s.router.HandleInbound(ctx, inMsg) + return nil + } + + // Create the outbound message. + outMsg, err := s.msgCreator.AppError( + s.ctx.ChainID, + requestID, + errorCode, + errorMessage, + ) + if err != nil { + s.ctx.Log.Error("failed to build message", + zap.Stringer("messageOp", message.AppErrorOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Binary("payload", appResponseBytes), + zap.Int32("errorCode", errorCode), + zap.String("errorMessage", errorMessage), + zap.Error(err), ) + return nil + } + + // Send the message over the network. + sentTo := s.sender.Send( + outMsg, + set.Of(nodeID), + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.AppErrorOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Int32("errorCode", errorCode), + zap.String("errorMessage", errorMessage), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.AppErrorOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Uint32("requestID", requestID), + zap.Int32("errorCode", errorCode), + zap.String("errorMessage", errorMessage), + ) + } } return nil } @@ -1447,24 +1547,26 @@ func (s *sender) SendAppGossipSpecific(_ context.Context, nodeIDs set.Set[ids.No if sentTo.Len() == 0 { for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("payload", appGossipBytes), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.AppGossipOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Binary("payload", appGossipBytes), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.AppGossipOp), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", s.ctx.ChainID), + ) + } } } } return nil } -// SendAppGossip sends an application-level gossip message. func (s *sender) SendAppGossip(_ context.Context, appGossipBytes []byte) error { // Create the outbound message. outMsg, err := s.msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) @@ -1492,20 +1594,22 @@ func (s *sender) SendAppGossip(_ context.Context, appGossipBytes []byte) error { s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("chainID", s.ctx.ChainID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("payload", appGossipBytes), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.AppGossipOp), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Binary("payload", appGossipBytes), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.AppGossipOp), + zap.Stringer("chainID", s.ctx.ChainID), + ) + } } return nil } -// SendGossip gossips the provided container func (s *sender) SendGossip(_ context.Context, container []byte) { // Create the outbound message. outMsg, err := s.msgCreator.Put( @@ -1534,15 +1638,18 @@ func (s *sender) SendGossip(_ context.Context, container []byte) { s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Binary("container", container), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("chainID", s.ctx.ChainID), + ) + } } } @@ -1580,15 +1687,18 @@ func (s *sender) Accept(ctx *snow.ConsensusContext, _ ids.ID, container []byte) s.subnet, ) if sentTo.Len() == 0 { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - ) - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - ) + if s.ctx.Log.Enabled(logging.Verbo) { + s.ctx.Log.Verbo("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("chainID", s.ctx.ChainID), + zap.Binary("container", container), + ) + } else { + s.ctx.Log.Debug("failed to send message", + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("chainID", s.ctx.ChainID), + ) + } } return nil } diff --git a/avalanchego/snow/networking/sender/sender_test.go b/avalanchego/snow/networking/sender/sender_test.go index 04bfeea6..89db9e45 100644 --- a/avalanchego/snow/networking/sender/sender_test.go +++ b/avalanchego/snow/networking/sender/sender_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -10,11 +10,9 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -26,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" @@ -35,6 +34,8 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" + + commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const testThreadPoolSize = 2 @@ -50,9 +51,11 @@ var defaultSubnetConfig = subnets.Config{ func TestTimeout(t *testing.T) { require := require.New(t) - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -81,7 +84,7 @@ func TestTimeout(t *testing.T) { ) require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -93,10 +96,8 @@ func TestTimeout(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) - ctx := snow.DefaultConsensusContextTest() externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) @@ -111,7 +112,7 @@ func TestTimeout(t *testing.T) { ) require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, @@ -128,13 +129,11 @@ func TestTimeout(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -202,8 +201,18 @@ func TestTimeout(t *testing.T) { bootstrapper.GetAncestorsFailedF = failed bootstrapper.GetFailedF = failed bootstrapper.QueryFailedF = failed - bootstrapper.AppRequestFailedF = failed - bootstrapper.CrossChainAppRequestFailedF = func(ctx context.Context, chainID ids.ID, _ uint32) error { + bootstrapper.AppRequestFailedF = func(ctx context.Context, nodeID ids.NodeID, _ uint32, _ *common.AppError) error { + require.NoError(ctx.Err()) + + failedLock.Lock() + defer failedLock.Unlock() + + failedVDRs.Add(nodeID) + wg.Done() + return nil + } + + bootstrapper.CrossChainAppRequestFailedF = func(ctx context.Context, chainID ids.ID, _ uint32, _ *common.AppError) error { require.NoError(ctx.Err()) failedLock.Lock() @@ -216,36 +225,28 @@ func TestTimeout(t *testing.T) { sendAll := func() { { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ sender.SendGetStateSummaryFrontier(cancelledCtx, nodeIDs, requestID) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ sender.SendGetAcceptedStateSummary(cancelledCtx, nodeIDs, requestID, nil) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ sender.SendGetAcceptedFrontier(cancelledCtx, nodeIDs, requestID) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ @@ -266,45 +267,37 @@ func TestTimeout(t *testing.T) { sender.SendGet(cancelledCtx, nodeID, requestID, ids.Empty) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ - sender.SendPullQuery(cancelledCtx, nodeIDs, requestID, ids.Empty) + sender.SendPullQuery(cancelledCtx, nodeIDs, requestID, ids.Empty, 0) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ - sender.SendPushQuery(cancelledCtx, nodeIDs, requestID, nil) + sender.SendPushQuery(cancelledCtx, nodeIDs, requestID, nil, 0) } { - nodeIDs := set.Set[ids.NodeID]{ - ids.GenerateTestNodeID(): struct{}{}, - } + nodeIDs := set.Of(ids.GenerateTestNodeID()) vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ - err := sender.SendAppRequest(cancelledCtx, nodeIDs, requestID, nil) - require.NoError(err) + require.NoError(sender.SendAppRequest(cancelledCtx, nodeIDs, requestID, nil)) } { chainID := ids.GenerateTestID() chains.Add(chainID) wg.Add(1) requestID++ - err := sender.SendCrossChainAppRequest(cancelledCtx, chainID, requestID, nil) - require.NoError(err) + require.NoError(sender.SendCrossChainAppRequest(cancelledCtx, chainID, requestID, nil)) } } // Send messages to disconnected peers - externalSender.SendF = func(_ message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(message.OutboundMessage, set.Set[ids.NodeID], ids.ID, subnets.Allower) set.Set[ids.NodeID] { return nil } sendAll() @@ -322,9 +315,12 @@ func TestTimeout(t *testing.T) { } func TestReliableMessages(t *testing.T) { - vdrs := validators.NewSet() - err := vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1) - require.NoError(t, err) + require := require.New(t) + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.BuildTestNodeID([]byte{1}), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -338,7 +334,7 @@ func TestReliableMessages(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() @@ -352,9 +348,9 @@ func TestReliableMessages(t *testing.T) { constants.DefaultNetworkCompressionType, 10*time.Second, ) - require.NoError(t, err) + require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -366,10 +362,7 @@ func TestReliableMessages(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) - - ctx := snow.DefaultConsensusContextTest() + )) externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) @@ -383,16 +376,16 @@ func TestReliableMessages(t *testing.T) { p2p.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, defaultSubnetConfig), ) - require.NoError(t, err) + require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx2, vdrs, @@ -402,13 +395,11 @@ func TestReliableMessages(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -457,10 +448,9 @@ func TestReliableMessages(t *testing.T) { go func() { for i := 0; i < queriesToSend; i++ { - vdrIDs := set.Set[ids.NodeID]{} - vdrIDs.Add(ids.NodeID{1}) + vdrIDs := set.Of(ids.BuildTestNodeID([]byte{1})) - sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty) + sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 } }() @@ -471,10 +461,13 @@ func TestReliableMessages(t *testing.T) { } func TestReliableMessagesToMyself(t *testing.T) { + require := require.New(t) + benchlist := benchlist.NewNoBenchlist() - vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + vdrs := validators.NewManager() + require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ InitialTimeout: 10 * time.Millisecond, @@ -487,7 +480,7 @@ func TestReliableMessagesToMyself(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() @@ -501,9 +494,9 @@ func TestReliableMessagesToMyself(t *testing.T) { constants.DefaultNetworkCompressionType, 10*time.Second, ) - require.NoError(t, err) + require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -515,10 +508,7 @@ func TestReliableMessagesToMyself(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) - - ctx := snow.DefaultConsensusContextTest() + )) externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) @@ -532,16 +522,16 @@ func TestReliableMessagesToMyself(t *testing.T) { p2p.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, defaultSubnetConfig), ) - require.NoError(t, err) + require.NoError(err) - ctx2 := snow.DefaultConsensusContextTest() + ctx2 := snowtest.ConsensusContext(snowCtx) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx2, vdrs, @@ -551,13 +541,11 @@ func TestReliableMessagesToMyself(t *testing.T) { resourceTracker, validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), + commontracker.NewPeers(), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ - BootstrapableTest: common.BootstrapableTest{ - T: t, - }, EngineTest: common.EngineTest{ T: t, }, @@ -608,9 +596,8 @@ func TestReliableMessagesToMyself(t *testing.T) { // Send a pull query to some random peer that won't respond // because they don't exist. This will almost immediately trigger // a query failed message - vdrIDs := set.Set[ids.NodeID]{} - vdrIDs.Add(ids.GenerateTestNodeID()) - sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty) + vdrIDs := set.Of(ids.GenerateTestNodeID()) + sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) } }() @@ -621,26 +608,16 @@ func TestReliableMessagesToMyself(t *testing.T) { func TestSender_Bootstrap_Requests(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() successNodeID = ids.GenerateTestNodeID() failedNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() heights = []uint64{1, 2, 3} containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -659,21 +636,21 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetStateSummaryFrontierFailed( nodeID, - chainID, + ctx.ChainID, requestID, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.GetStateSummaryFrontier) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.GetStateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetStateSummaryFrontier) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) }, expectedResponseOp: message.StateSummaryFrontierOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetStateSummaryFrontier( - chainID, + ctx.ChainID, requestID, deadline, ).Return(nil, nil) @@ -681,15 +658,11 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set - successNodeID: struct{}{}, - failedNodeID: struct{}{}, - }, // Node IDs - subnetID, // Subnet ID + // Note [myNodeID] is not in this set + set.Of(successNodeID, failedNodeID), + ctx.SubnetID, // Subnet ID gomock.Any(), - ).Return(set.Set[ids.NodeID]{ - successNodeID: struct{}{}, - }) + ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetStateSummaryFrontier( @@ -704,14 +677,14 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedStateSummaryFailed( nodeID, - chainID, + ctx.ChainID, requestID, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.GetAcceptedStateSummary) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.GetAcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAcceptedStateSummary) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(heights, innerMsg.Heights) @@ -719,7 +692,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedStateSummaryOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAcceptedStateSummary( - chainID, + ctx.ChainID, requestID, deadline, heights, @@ -728,15 +701,11 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set - successNodeID: struct{}{}, - failedNodeID: struct{}{}, - }, // Node IDs - subnetID, // Subnet ID + // Note [myNodeID] is not in this set + set.Of(successNodeID, failedNodeID), + ctx.SubnetID, // Subnet ID gomock.Any(), - ).Return(set.Set[ids.NodeID]{ - successNodeID: struct{}{}, - }) + ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetAcceptedStateSummary(context.Background(), nodeIDs, requestID, heights) @@ -747,15 +716,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedFrontierFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.GetAcceptedFrontier) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.GetAcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAcceptedFrontier) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(engineType, innerMsg.EngineType) @@ -763,7 +732,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedFrontierOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAcceptedFrontier( - chainID, + ctx.ChainID, requestID, deadline, engineType, @@ -772,15 +741,11 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set - successNodeID: struct{}{}, - failedNodeID: struct{}{}, - }, // Node IDs - subnetID, // Subnet ID + // Note [myNodeID] is not in this set + set.Of(successNodeID, failedNodeID), + ctx.SubnetID, // Subnet ID gomock.Any(), - ).Return(set.Set[ids.NodeID]{ - successNodeID: struct{}{}, - }) + ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetAcceptedFrontier(context.Background(), nodeIDs, requestID) @@ -792,15 +757,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAcceptedFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.GetAccepted) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.GetAccepted{}, msg.Message()) + innerMsg := msg.Message().(*p2p.GetAccepted) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) require.Equal(engineType, innerMsg.EngineType) @@ -808,7 +773,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { expectedResponseOp: message.AcceptedOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAccepted( - chainID, + ctx.ChainID, requestID, deadline, containerIDs, @@ -818,15 +783,11 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set - successNodeID: struct{}{}, - failedNodeID: struct{}{}, - }, // Node IDs - subnetID, // Subnet ID + // Note [myNodeID] is not in this set + set.Of(successNodeID, failedNodeID), + ctx.SubnetID, // Subnet ID gomock.Any(), - ).Return(set.Set[ids.NodeID]{ - successNodeID: struct{}{}, - }) + ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetAccepted(context.Background(), nodeIDs, requestID, containerIDs) @@ -839,25 +800,23 @@ func TestSender_Bootstrap_Requests(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( msgCreator = message.NewMockOutboundMsgBuilder(ctrl) externalSender = NewMockExternalSender(ctrl) timeoutManager = timeout.NewMockManager(ctrl) router = router.NewMockRouter(ctrl) - nodeIDs = set.Set[ids.NodeID]{ - successNodeID: struct{}{}, - failedNodeID: struct{}{}, - myNodeID: struct{}{}, - } - nodeIDsCopy set.Set[ids.NodeID] + nodeIDs = set.Of(successNodeID, failedNodeID, ctx.NodeID) + nodeIDsCopy set.Set[ids.NodeID] ) nodeIDsCopy.Union(nodeIDs) - snowCtx.Registerer = prometheus.NewRegistry() + + // Instantiate new registerers to avoid duplicate metrics + // registration + ctx.Registerer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -876,8 +835,8 @@ func TestSender_Bootstrap_Requests(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context nodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -914,25 +873,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { func TestSender_Bootstrap_Responses(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() destinationNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} summary = []byte{1, 2, 3} engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -947,23 +896,23 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "StateSummaryFrontier", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().StateSummaryFrontier( - chainID, + ctx.ChainID, requestID, summary, ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.StateSummaryFrontier) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.StateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.StateSummaryFrontier) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summary, innerMsg.Summary) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, // Subnet ID + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -975,15 +924,15 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "AcceptedStateSummary", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().AcceptedStateSummary( - chainID, + ctx.ChainID, requestID, summaryIDs, ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.AcceptedStateSummary) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.AcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AcceptedStateSummary) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { require.Equal(summaryID[:], innerMsg.SummaryIds[i]) @@ -991,9 +940,9 @@ func TestSender_Bootstrap_Responses(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, // Subnet ID + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -1005,45 +954,43 @@ func TestSender_Bootstrap_Responses(t *testing.T) { name: "AcceptedFrontier", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().AcceptedFrontier( - chainID, + ctx.ChainID, requestID, - summaryIDs, + summaryIDs[0], ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.AcceptedFrontier) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.AcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2p.AcceptedFrontier) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) - for i, summaryID := range summaryIDs { - require.Equal(summaryID[:], innerMsg.ContainerIds[i]) - } + require.Equal(summaryIDs[0][:], innerMsg.ContainerId) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, // Subnet ID + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendAcceptedFrontier(context.Background(), nodeID, requestID, summaryIDs) + sender.SendAcceptedFrontier(context.Background(), nodeID, requestID, summaryIDs[0]) }, }, { name: "Accepted", setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().Accepted( - chainID, + ctx.ChainID, requestID, summaryIDs, ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*p2p.Accepted) - require.True(ok) - require.Equal(chainID[:], innerMsg.ChainId) + require.IsType(&p2p.Accepted{}, msg.Message()) + innerMsg := msg.Message().(*p2p.Accepted) + require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { require.Equal(summaryID[:], innerMsg.ContainerIds[i]) @@ -1051,9 +998,9 @@ func TestSender_Bootstrap_Responses(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, // Subnet ID + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -1067,7 +1014,6 @@ func TestSender_Bootstrap_Responses(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( msgCreator = message.NewMockOutboundMsgBuilder(ctrl) @@ -1078,11 +1024,11 @@ func TestSender_Bootstrap_Responses(t *testing.T) { // Instantiate new registerers to avoid duplicate metrics // registration - snowCtx.Registerer = prometheus.NewRegistry() - snowCtx.AvalancheRegisterer = prometheus.NewRegistry() + ctx.Registerer = prometheus.NewRegistry() + ctx.AvalancheRegisterer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -1106,7 +1052,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { close(calledHandleInbound) }, ) - tt.sendF(require, sender, myNodeID) + tt.sendF(require, sender, ctx.NodeID) <-calledHandleInbound } @@ -1125,24 +1071,14 @@ func TestSender_Bootstrap_Responses(t *testing.T) { func TestSender_Single_Request(t *testing.T) { var ( - chainID = ids.GenerateTestID() - subnetID = ids.GenerateTestID() - myNodeID = ids.GenerateTestNodeID() destinationNodeID = ids.GenerateTestNodeID() deadline = time.Second requestID = uint32(1337) - ctx = snow.DefaultContextTest() containerID = ids.GenerateTestID() engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) - ctx.ChainID = chainID - ctx.SubnetID = subnetID - ctx.NodeID = myNodeID - snowCtx := &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - } + snowCtx := snowtest.Context(t, snowtest.PChainID) + ctx := snowtest.ConsensusContext(snowCtx) type test struct { name string @@ -1160,22 +1096,22 @@ func TestSender_Single_Request(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetAncestorsFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*message.GetAncestorsFailed) - require.True(ok) - require.Equal(chainID, innerMsg.ChainID) + require.IsType(&message.GetAncestorsFailed{}, msg.Message()) + innerMsg := msg.Message().(*message.GetAncestorsFailed) + require.Equal(ctx.ChainID, innerMsg.ChainID) require.Equal(requestID, innerMsg.RequestID) require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.AncestorsOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().GetAncestors( - chainID, + ctx.ChainID, requestID, deadline, containerID, @@ -1184,9 +1120,9 @@ func TestSender_Single_Request(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, gomock.Any(), ).Return(sentTo) }, @@ -1199,22 +1135,22 @@ func TestSender_Single_Request(t *testing.T) { failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { return message.InternalGetFailed( nodeID, - chainID, + ctx.ChainID, requestID, engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - innerMsg, ok := msg.Message().(*message.GetFailed) - require.True(ok) - require.Equal(chainID, innerMsg.ChainID) + require.IsType(&message.GetFailed{}, msg.Message()) + innerMsg := msg.Message().(*message.GetFailed) + require.Equal(ctx.ChainID, innerMsg.ChainID) require.Equal(requestID, innerMsg.RequestID) require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.PutOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { msgCreator.EXPECT().Get( - chainID, + ctx.ChainID, requestID, deadline, containerID, @@ -1223,9 +1159,9 @@ func TestSender_Single_Request(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs - subnetID, + gomock.Any(), // Outbound message + set.Of(destinationNodeID), // Node IDs + ctx.SubnetID, gomock.Any(), ).Return(sentTo) }, @@ -1239,7 +1175,6 @@ func TestSender_Single_Request(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( msgCreator = message.NewMockOutboundMsgBuilder(ctrl) @@ -1247,10 +1182,13 @@ func TestSender_Single_Request(t *testing.T) { timeoutManager = timeout.NewMockManager(ctrl) router = router.NewMockRouter(ctrl) ) - snowCtx.Registerer = prometheus.NewRegistry() + + // Instantiate new registerers to avoid duplicate metrics + // registration + ctx.Registerer = prometheus.NewRegistry() sender, err := New( - snowCtx, + ctx, msgCreator, externalSender, router, @@ -1266,12 +1204,12 @@ func TestSender_Single_Request(t *testing.T) { // Case: sending to myself { // Make sure we register requests with the router - expectedFailedMsg := tt.failedMsgF(myNodeID) + expectedFailedMsg := tt.failedMsgF(ctx.NodeID) router.EXPECT().RegisterRequest( gomock.Any(), // Context - myNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.NodeID, // Node ID + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -1290,14 +1228,14 @@ func TestSender_Single_Request(t *testing.T) { }, ) - tt.sendF(require, sender, myNodeID) + tt.sendF(require, sender, ctx.NodeID) <-calledHandleInbound } // Case: Node is benched { - timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(true) + timeoutManager.EXPECT().IsBenched(destinationNodeID, ctx.ChainID).Return(true) timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() @@ -1306,8 +1244,8 @@ func TestSender_Single_Request(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context destinationNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message @@ -1333,7 +1271,7 @@ func TestSender_Single_Request(t *testing.T) { // Case: Node is not myself, not benched and send fails { - timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(false) + timeoutManager.EXPECT().IsBenched(destinationNodeID, ctx.ChainID).Return(false) timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() @@ -1342,8 +1280,8 @@ func TestSender_Single_Request(t *testing.T) { router.EXPECT().RegisterRequest( gomock.Any(), // Context destinationNodeID, // Node ID - chainID, // Source Chain - chainID, // Destination Chain + ctx.ChainID, // Source Chain + ctx.ChainID, // Destination Chain requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message diff --git a/avalanchego/snow/networking/sender/test_external_sender.go b/avalanchego/snow/networking/sender/test_external_sender.go index 7b8bef90..ae061872 100644 --- a/avalanchego/snow/networking/sender/test_external_sender.go +++ b/avalanchego/snow/networking/sender/test_external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender diff --git a/avalanchego/snow/networking/sender/traced_sender.go b/avalanchego/snow/networking/sender/traced_sender.go index a6ffe7f4..a82264ab 100644 --- a/avalanchego/snow/networking/sender/traced_sender.go +++ b/avalanchego/snow/networking/sender/traced_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -8,13 +8,13 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/set" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ common.Sender = (*tracedSender)(nil) @@ -81,15 +81,15 @@ func (s *tracedSender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set. s.sender.SendGetAcceptedFrontier(ctx, nodeIDs, requestID) } -func (s *tracedSender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (s *tracedSender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { ctx, span := s.tracer.Start(ctx, "tracedSender.SendAcceptedFrontier", oteltrace.WithAttributes( attribute.Stringer("recipients", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numContainerIDs", len(containerIDs)), + attribute.Stringer("containerID", containerID), )) defer span.End() - s.sender.SendAcceptedFrontier(ctx, nodeID, requestID, containerIDs) + s.sender.SendAcceptedFrontier(ctx, nodeID, requestID, containerID) } func (s *tracedSender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { @@ -157,36 +157,39 @@ func (s *tracedSender) SendPut(ctx context.Context, nodeID ids.NodeID, requestID s.sender.SendPut(ctx, nodeID, requestID, container) } -func (s *tracedSender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) { +func (s *tracedSender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte, requestedHeight uint64) { ctx, span := s.tracer.Start(ctx, "tracedSender.SendPushQuery", oteltrace.WithAttributes( attribute.Int64("requestID", int64(requestID)), attribute.Int("containerLen", len(container)), + attribute.Int64("requestedHeight", int64(requestedHeight)), )) defer span.End() - s.sender.SendPushQuery(ctx, nodeIDs, requestID, container) + s.sender.SendPushQuery(ctx, nodeIDs, requestID, container, requestedHeight) } -func (s *tracedSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) { +func (s *tracedSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID, requestedHeight uint64) { ctx, span := s.tracer.Start(ctx, "tracedSender.SendPullQuery", oteltrace.WithAttributes( attribute.Int64("requestID", int64(requestID)), attribute.Stringer("containerID", containerID), + attribute.Int64("requestedHeight", int64(requestedHeight)), )) defer span.End() - s.sender.SendPullQuery(ctx, nodeIDs, requestID, containerID) + s.sender.SendPullQuery(ctx, nodeIDs, requestID, containerID, requestedHeight) } -func (s *tracedSender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { +func (s *tracedSender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) { ctx, span := s.tracer.Start(ctx, "tracedSender.SendChits", oteltrace.WithAttributes( attribute.Stringer("recipients", nodeID), attribute.Int64("requestID", int64(requestID)), - attribute.Int("numVotes", len(votes)), - attribute.Int("numAccepted", len(accepted)), + attribute.Stringer("preferredID", preferredID), + attribute.Stringer("preferredIDAtHeight", preferredIDAtHeight), + attribute.Stringer("acceptedID", acceptedID), )) defer span.End() - s.sender.SendChits(ctx, nodeID, requestID, votes, accepted) + s.sender.SendChits(ctx, nodeID, requestID, preferredID, preferredIDAtHeight, acceptedID) } func (s *tracedSender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { @@ -211,6 +214,18 @@ func (s *tracedSender) SendCrossChainAppResponse(ctx context.Context, chainID id return s.sender.SendCrossChainAppResponse(ctx, chainID, requestID, appResponseBytes) } +func (s *tracedSender) SendCrossChainAppError(ctx context.Context, chainID ids.ID, requestID uint32, errorCode int32, errorMessage string) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendCrossChainAppError", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int64("errorCode", int64(errorCode)), + attribute.String("errorMessage", errorMessage), + )) + defer span.End() + + return s.sender.SendCrossChainAppError(ctx, chainID, requestID, errorCode, errorMessage) +} + func (s *tracedSender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { ctx, span := s.tracer.Start(ctx, "tracedSender.SendAppRequest", oteltrace.WithAttributes( attribute.Int64("requestID", int64(requestID)), @@ -232,6 +247,18 @@ func (s *tracedSender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, r return s.sender.SendAppResponse(ctx, nodeID, requestID, appResponseBytes) } +func (s *tracedSender) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAppError", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int64("errorCode", int64(errorCode)), + attribute.String("errorMessage", errorMessage), + )) + defer span.End() + + return s.sender.SendAppError(ctx, nodeID, requestID, errorCode, errorMessage) +} + func (s *tracedSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { _, span := s.tracer.Start(ctx, "tracedSender.SendAppGossipSpecific", oteltrace.WithAttributes( attribute.Int("gossipLen", len(appGossipBytes)), diff --git a/avalanchego/snow/networking/timeout/main_test.go b/avalanchego/snow/networking/timeout/main_test.go new file mode 100644 index 00000000..c8a597fa --- /dev/null +++ b/avalanchego/snow/networking/timeout/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timeout + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/snow/networking/timeout/manager.go b/avalanchego/snow/networking/timeout/manager.go index 6846151b..95a3be25 100644 --- a/avalanchego/snow/networking/timeout/manager.go +++ b/avalanchego/snow/networking/timeout/manager.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout import ( "fmt" + "sync" "time" "github.com/prometheus/client_golang/prometheus" @@ -62,6 +63,9 @@ type Manager interface { // Mark that we no longer expect a response to this request we sent. // Does not modify the timeout. RemoveRequest(requestID ids.RequestID) + + // Stops the manager. + Stop() } func NewManager( @@ -88,6 +92,7 @@ type manager struct { tm timer.AdaptiveTimeoutManager benchlistMgr benchlist.Manager metrics metrics + stopOnce sync.Once } func (m *manager) Dispatch() { @@ -125,8 +130,11 @@ func (m *manager) RegisterRequest( timeoutHandler func(), ) { newTimeoutHandler := func() { - // If this request timed out, tell the benchlist manager - m.benchlistMgr.RegisterFailure(chainID, nodeID) + if requestID.Op != byte(message.AppResponseOp) { + // If the request timed out and wasn't an AppRequest, tell the + // benchlist manager. + m.benchlistMgr.RegisterFailure(chainID, nodeID) + } timeoutHandler() } m.tm.Put(requestID, measureLatency, newTimeoutHandler) @@ -153,3 +161,7 @@ func (m *manager) RemoveRequest(requestID ids.RequestID) { func (m *manager) RegisterRequestToUnreachableValidator() { m.tm.ObserveLatency(m.TimeoutDuration()) } + +func (m *manager) Stop() { + m.stopOnce.Do(m.tm.Stop) +} diff --git a/avalanchego/snow/networking/timeout/manager_test.go b/avalanchego/snow/networking/timeout/manager_test.go index 19866817..49a05f78 100644 --- a/avalanchego/snow/networking/timeout/manager_test.go +++ b/avalanchego/snow/networking/timeout/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -9,6 +9,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -29,16 +30,15 @@ func TestManagerFire(t *testing.T) { "", prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) go manager.Dispatch() + defer manager.Stop() wg := sync.WaitGroup{} wg.Add(1) manager.RegisterRequest( - ids.NodeID{}, + ids.EmptyNodeID, ids.ID{}, true, ids.RequestID{}, diff --git a/avalanchego/snow/networking/timeout/metrics.go b/avalanchego/snow/networking/timeout/metrics.go index 6be45fd2..0892e5d8 100644 --- a/avalanchego/snow/networking/timeout/metrics.go +++ b/avalanchego/snow/networking/timeout/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -9,7 +9,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" diff --git a/avalanchego/snow/networking/timeout/mock_manager.go b/avalanchego/snow/networking/timeout/mock_manager.go index f53a5f85..8eeac4c6 100644 --- a/avalanchego/snow/networking/timeout/mock_manager.go +++ b/avalanchego/snow/networking/timeout/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/timeout (interfaces: Manager) +// +// Generated by this command: +// +// mockgen -package=timeout -destination=snow/networking/timeout/mock_manager.go github.com/ava-labs/avalanchego/snow/networking/timeout Manager +// // Package timeout is a generated GoMock package. package timeout @@ -14,7 +16,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" message "github.com/ava-labs/avalanchego/message" snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockManager is a mock of Manager interface. @@ -61,7 +63,7 @@ func (m *MockManager) IsBenched(arg0 ids.NodeID, arg1 ids.ID) bool { } // IsBenched indicates an expected call of IsBenched. -func (mr *MockManagerMockRecorder) IsBenched(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) IsBenched(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBenched", reflect.TypeOf((*MockManager)(nil).IsBenched), arg0, arg1) } @@ -75,7 +77,7 @@ func (m *MockManager) RegisterChain(arg0 *snow.ConsensusContext) error { } // RegisterChain indicates an expected call of RegisterChain. -func (mr *MockManagerMockRecorder) RegisterChain(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterChain(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockManager)(nil).RegisterChain), arg0) } @@ -87,7 +89,7 @@ func (m *MockManager) RegisterRequest(arg0 ids.NodeID, arg1 ids.ID, arg2 bool, a } // RegisterRequest indicates an expected call of RegisterRequest. -func (mr *MockManagerMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockManager)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4) } @@ -111,7 +113,7 @@ func (m *MockManager) RegisterResponse(arg0 ids.NodeID, arg1 ids.ID, arg2 ids.Re } // RegisterResponse indicates an expected call of RegisterResponse. -func (mr *MockManagerMockRecorder) RegisterResponse(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterResponse(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterResponse", reflect.TypeOf((*MockManager)(nil).RegisterResponse), arg0, arg1, arg2, arg3, arg4) } @@ -123,11 +125,23 @@ func (m *MockManager) RemoveRequest(arg0 ids.RequestID) { } // RemoveRequest indicates an expected call of RemoveRequest. -func (mr *MockManagerMockRecorder) RemoveRequest(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRequest", reflect.TypeOf((*MockManager)(nil).RemoveRequest), arg0) } +// Stop mocks base method. +func (m *MockManager) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockManagerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockManager)(nil).Stop)) +} + // TimeoutDuration mocks base method. func (m *MockManager) TimeoutDuration() time.Duration { m.ctrl.T.Helper() diff --git a/avalanchego/snow/networking/tracker/mock_resource_tracker.go b/avalanchego/snow/networking/tracker/mock_resource_tracker.go index 924b4f84..438bd44d 100644 --- a/avalanchego/snow/networking/tracker/mock_resource_tracker.go +++ b/avalanchego/snow/networking/tracker/mock_resource_tracker.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Tracker) +// +// Generated by this command: +// +// mockgen -package=tracker -destination=snow/networking/tracker/mock_resource_tracker.go github.com/ava-labs/avalanchego/snow/networking/tracker Tracker +// // Package tracker is a generated GoMock package. package tracker @@ -12,7 +14,7 @@ import ( time "time" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockTracker is a mock of Tracker interface. @@ -47,7 +49,7 @@ func (m *MockTracker) TimeUntilUsage(arg0 ids.NodeID, arg1 time.Time, arg2 float } // TimeUntilUsage indicates an expected call of TimeUntilUsage. -func (mr *MockTrackerMockRecorder) TimeUntilUsage(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) TimeUntilUsage(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeUntilUsage", reflect.TypeOf((*MockTracker)(nil).TimeUntilUsage), arg0, arg1, arg2) } @@ -75,7 +77,7 @@ func (m *MockTracker) Usage(arg0 ids.NodeID, arg1 time.Time) float64 { } // Usage indicates an expected call of Usage. -func (mr *MockTrackerMockRecorder) Usage(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) Usage(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Usage", reflect.TypeOf((*MockTracker)(nil).Usage), arg0, arg1) } diff --git a/avalanchego/snow/networking/tracker/mock_targeter.go b/avalanchego/snow/networking/tracker/mock_targeter.go index f8b545fd..7e260fe6 100644 --- a/avalanchego/snow/networking/tracker/mock_targeter.go +++ b/avalanchego/snow/networking/tracker/mock_targeter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Targeter) +// +// Generated by this command: +// +// mockgen -package=tracker -destination=snow/networking/tracker/mock_targeter.go github.com/ava-labs/avalanchego/snow/networking/tracker Targeter +// // Package tracker is a generated GoMock package. package tracker @@ -11,7 +13,7 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockTargeter is a mock of Targeter interface. @@ -46,7 +48,7 @@ func (m *MockTargeter) TargetUsage(arg0 ids.NodeID) float64 { } // TargetUsage indicates an expected call of TargetUsage. -func (mr *MockTargeterMockRecorder) TargetUsage(arg0 interface{}) *gomock.Call { +func (mr *MockTargeterMockRecorder) TargetUsage(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TargetUsage", reflect.TypeOf((*MockTargeter)(nil).TargetUsage), arg0) } diff --git a/avalanchego/snow/networking/tracker/resource_tracker.go b/avalanchego/snow/networking/tracker/resource_tracker.go index 721d531e..b4b14a75 100644 --- a/avalanchego/snow/networking/tracker/resource_tracker.go +++ b/avalanchego/snow/networking/tracker/resource_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -11,10 +11,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const epsilon = 1e-9 @@ -321,13 +321,12 @@ func newCPUTrackerMetrics(namespace string, reg prometheus.Registerer) (*tracker Help: "Available space remaining (bytes) on the database volume", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.processingTimeMetric), reg.Register(m.cpuMetric), reg.Register(m.diskReadsMetric), reg.Register(m.diskWritesMetric), reg.Register(m.diskSpaceAvailable), ) - return m, errs.Err + return m, err } diff --git a/avalanchego/snow/networking/tracker/resource_tracker_test.go b/avalanchego/snow/networking/tracker/resource_tracker_test.go index 11904e48..22b477e0 100644 --- a/avalanchego/snow/networking/tracker/resource_tracker_test.go +++ b/avalanchego/snow/networking/tracker/resource_tracker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -7,11 +7,9 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/math/meter" @@ -27,8 +25,8 @@ func TestNewCPUTracker(t *testing.T) { trackerIntf, err := NewResourceTracker(reg, resource.NoUsage, factory, halflife) require.NoError(err) - tracker, ok := trackerIntf.(*resourceTracker) - require.True(ok) + require.IsType(&resourceTracker{}, trackerIntf) + tracker := trackerIntf.(*resourceTracker) require.Equal(factory, tracker.factory) require.NotNil(tracker.processingMeter) require.Equal(halflife, tracker.halflife) @@ -37,6 +35,8 @@ func TestNewCPUTracker(t *testing.T) { } func TestCPUTracker(t *testing.T) { + require := require.New(t) + halflife := 5 * time.Second ctrl := gomock.NewController(t) @@ -44,10 +44,10 @@ func TestCPUTracker(t *testing.T) { mockUser.EXPECT().CPUUsage().Return(1.0).Times(3) tracker, err := NewResourceTracker(prometheus.NewRegistry(), mockUser, meter.ContinuousFactory{}, time.Second) - require.NoError(t, err) + require.NoError(err) - node1 := ids.NodeID{1} - node2 := ids.NodeID{2} + node1 := ids.BuildTestNodeID([]byte{1}) + node2 := ids.BuildTestNodeID([]byte{2}) // Note that all the durations between start and end are [halflife]. startTime1 := time.Now() @@ -66,28 +66,20 @@ func TestCPUTracker(t *testing.T) { node1Utilization := cpuTracker.Usage(node1, endTime2) node2Utilization := cpuTracker.Usage(node2, endTime2) - if node1Utilization >= node2Utilization { - t.Fatalf("Utilization should have been higher for the more recent spender") - } + require.Greater(node2Utilization, node1Utilization) cumulative := cpuTracker.TotalUsage() sum := node1Utilization + node2Utilization - if cumulative != sum { - t.Fatalf("Cumulative utilization: %f should have been equal to the sum of the spenders: %f", cumulative, sum) - } + require.Equal(sum, cumulative) mockUser.EXPECT().CPUUsage().Return(.5).Times(3) startTime3 := endTime2 endTime3 := startTime3.Add(halflife) newNode1Utilization := cpuTracker.Usage(node1, endTime3) - if newNode1Utilization >= node1Utilization { - t.Fatalf("node CPU utilization should decrease over time") - } + require.Greater(node1Utilization, newNode1Utilization) newCumulative := cpuTracker.TotalUsage() - if newCumulative >= cumulative { - t.Fatal("at-large CPU utilization should decrease over time ") - } + require.Greater(cumulative, newCumulative) startTime4 := endTime3 endTime4 := startTime4.Add(halflife) @@ -97,15 +89,15 @@ func TestCPUTracker(t *testing.T) { cumulative = cpuTracker.TotalUsage() sum = node1Utilization + node2Utilization - if cumulative >= sum { - t.Fatal("Sum of CPU usage should exceed cumulative at-large utilization") - } + require.Greater(sum, cumulative) } func TestCPUTrackerTimeUntilCPUUtilization(t *testing.T) { + require := require.New(t) + halflife := 5 * time.Second tracker, err := NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, halflife) - require.NoError(t, err) + require.NoError(err) now := time.Now() nodeID := ids.GenerateTestNodeID() // Start the meter @@ -125,11 +117,11 @@ func TestCPUTrackerTimeUntilCPUUtilization(t *testing.T) { now = now.Add(timeUntilDesiredVal) actualVal := cpuTracker.Usage(nodeID, now) // Make sure the actual/expected are close - require.InDelta(t, desiredVal, actualVal, .00001) + require.InDelta(desiredVal, actualVal, .00001) // Make sure TimeUntilUsage returns the zero duration if // the value provided >= the current value - require.Zero(t, cpuTracker.TimeUntilUsage(nodeID, now, actualVal)) - require.Zero(t, cpuTracker.TimeUntilUsage(nodeID, now, actualVal+.1)) + require.Zero(cpuTracker.TimeUntilUsage(nodeID, now, actualVal)) + require.Zero(cpuTracker.TimeUntilUsage(nodeID, now, actualVal+.1)) // Make sure it returns the zero duration if the node isn't known - require.Zero(t, cpuTracker.TimeUntilUsage(ids.GenerateTestNodeID(), now, 0.0001)) + require.Zero(cpuTracker.TimeUntilUsage(ids.GenerateTestNodeID(), now, 0.0001)) } diff --git a/avalanchego/snow/networking/tracker/targeter.go b/avalanchego/snow/networking/tracker/targeter.go index 216bb9ec..b52d8048 100644 --- a/avalanchego/snow/networking/tracker/targeter.go +++ b/avalanchego/snow/networking/tracker/targeter.go @@ -1,13 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker import ( - "math" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" ) var _ Targeter = (*targeter)(nil) @@ -32,11 +34,13 @@ type TargeterConfig struct { } func NewTargeter( + logger logging.Logger, config *TargeterConfig, - vdrs validators.Set, + vdrs validators.Manager, tracker Tracker, ) Targeter { return &targeter{ + log: logger, vdrs: vdrs, tracker: tracker, vdrAlloc: config.VdrAlloc, @@ -46,7 +50,8 @@ func NewTargeter( } type targeter struct { - vdrs validators.Set + vdrs validators.Manager + log logging.Logger tracker Tracker vdrAlloc float64 maxNonVdrUsage float64 @@ -56,11 +61,23 @@ type targeter struct { func (t *targeter) TargetUsage(nodeID ids.NodeID) float64 { // This node's at-large allocation is min([remaining at large], [max at large for a given peer]) usage := t.tracker.TotalUsage() - baseAlloc := math.Max(0, t.maxNonVdrUsage-usage) - baseAlloc = math.Min(baseAlloc, t.maxNonVdrNodeUsage) + baseAlloc := max(0, t.maxNonVdrUsage-usage) + baseAlloc = min(baseAlloc, t.maxNonVdrNodeUsage) // This node gets a stake-weighted portion of the validator allocation. - weight := t.vdrs.GetWeight(nodeID) - vdrAlloc := t.vdrAlloc * float64(weight) / float64(t.vdrs.Weight()) + weight := t.vdrs.GetWeight(constants.PrimaryNetworkID, nodeID) + if weight == 0 { + return baseAlloc + } + + totalWeight, err := t.vdrs.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + t.log.Error("couldn't get total weight of primary network", + zap.Error(err), + ) + return baseAlloc + } + + vdrAlloc := t.vdrAlloc * float64(weight) / float64(totalWeight) return vdrAlloc + baseAlloc } diff --git a/avalanchego/snow/networking/tracker/targeter_test.go b/avalanchego/snow/networking/tracker/targeter_test.go index 11d2cca4..d31e64e0 100644 --- a/avalanchego/snow/networking/tracker/targeter_test.go +++ b/avalanchego/snow/networking/tracker/targeter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -6,35 +6,36 @@ package tracker import ( "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" ) // Assert fields are set correctly. func TestNewTargeter(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() config := &TargeterConfig{ VdrAlloc: 10, MaxNonVdrUsage: 10, MaxNonVdrNodeUsage: 10, } - vdrs := validators.NewSet() + vdrs := validators.NewManager() tracker := NewMockTracker(ctrl) targeterIntf := NewTargeter( + logging.NoLog{}, config, vdrs, tracker, ) - targeter, ok := targeterIntf.(*targeter) - require.True(ok) + require.IsType(&targeter{}, targeterIntf) + targeter := targeterIntf.(*targeter) require.Equal(vdrs, targeter.vdrs) require.Equal(tracker, targeter.tracker) require.Equal(config.MaxNonVdrUsage, targeter.maxNonVdrUsage) @@ -43,19 +44,14 @@ func TestNewTargeter(t *testing.T) { func TestTarget(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - vdr := ids.NodeID{1} + vdr := ids.BuildTestNodeID([]byte{1}) vdrWeight := uint64(1) totalVdrWeight := uint64(10) - nonVdr := ids.NodeID{2} - vdrs := validators.NewSet() - if err := vdrs.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight); err != nil { - t.Fatal(err) - } + nonVdr := ids.BuildTestNodeID([]byte{2}) + vdrs := validators.NewManager() + require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, vdr, nil, ids.Empty, 1)) + require.NoError(t, vdrs.AddStaker(constants.PrimaryNetworkID, ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight)) tracker := NewMockTracker(ctrl) config := &TargeterConfig{ @@ -65,6 +61,7 @@ func TestTarget(t *testing.T) { } targeter := NewTargeter( + logging.NoLog{}, config, vdrs, tracker, diff --git a/avalanchego/snow/networking/worker/pool.go b/avalanchego/snow/networking/worker/pool.go deleted file mode 100644 index b574f7dd..00000000 --- a/avalanchego/snow/networking/worker/pool.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package worker - -import ( - "sync" -) - -var _ Pool = (*pool)(nil) - -type Request func() - -type Pool interface { - // Send the request to the worker pool. - // - // Send should never be called after [Shutdown] is called. - Send(Request) - - // Shutdown the worker pool. - // - // This method will block until all workers have finished their current - // tasks. - // - // It is safe to call shutdown multiple times. - Shutdown() -} - -type pool struct { - requests chan Request - - shutdownOnce sync.Once - shutdownWG sync.WaitGroup -} - -func NewPool(size int) Pool { - p := &pool{ - requests: make(chan Request), - } - p.shutdownWG.Add(size) - for w := 0; w < size; w++ { - go p.runWorker() - } - return p -} - -func (p *pool) runWorker() { - defer p.shutdownWG.Done() - - for request := range p.requests { - request() - } -} - -func (p *pool) Shutdown() { - p.shutdownOnce.Do(func() { - close(p.requests) - }) - p.shutdownWG.Wait() -} - -func (p *pool) Send(msg Request) { - p.requests <- msg -} diff --git a/avalanchego/snow/snowtest/snowtest.go b/avalanchego/snow/snowtest/snowtest.go new file mode 100644 index 00000000..9879b726 --- /dev/null +++ b/avalanchego/snow/snowtest/snowtest.go @@ -0,0 +1,98 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowtest + +import ( + "context" + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var ( + XChainID = ids.GenerateTestID() + CChainID = ids.GenerateTestID() + PChainID = constants.PlatformChainID + AVAXAssetID = ids.GenerateTestID() + + errMissing = errors.New("missing") + + _ snow.Acceptor = noOpAcceptor{} +) + +type noOpAcceptor struct{} + +func (noOpAcceptor) Accept(*snow.ConsensusContext, ids.ID, []byte) error { + return nil +} + +func ConsensusContext(ctx *snow.Context) *snow.ConsensusContext { + return &snow.ConsensusContext{ + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + BlockAcceptor: noOpAcceptor{}, + TxAcceptor: noOpAcceptor{}, + VertexAcceptor: noOpAcceptor{}, + } +} + +func Context(tb testing.TB, chainID ids.ID) *snow.Context { + require := require.New(tb) + + secretKey, err := bls.NewSecretKey() + require.NoError(err) + publicKey := bls.PublicFromSecretKey(secretKey) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) + require.NoError(aliaser.Alias(XChainID, "X")) + require.NoError(aliaser.Alias(XChainID, XChainID.String())) + require.NoError(aliaser.Alias(CChainID, "C")) + require.NoError(aliaser.Alias(CChainID, CChainID.String())) + + validatorState := &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + XChainID: constants.PrimaryNetworkID, + CChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil + }, + } + + return &snow.Context{ + NetworkID: constants.UnitTestID, + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + NodeID: ids.EmptyNodeID, + PublicKey: publicKey, + + XChainID: XChainID, + CChainID: CChainID, + AVAXAssetID: AVAXAssetID, + + Log: logging.NoLog{}, + BCLookup: aliaser, + Metrics: metrics.NewOptionalGatherer(), + + ValidatorState: validatorState, + ChainDataDir: "", + } +} diff --git a/avalanchego/snow/state.go b/avalanchego/snow/state.go index 97fd18df..091cd31f 100644 --- a/avalanchego/snow/state.go +++ b/avalanchego/snow/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -10,7 +10,7 @@ import ( ) const ( - Initializing = iota + Initializing State = iota StateSyncing Bootstrapping NormalOp diff --git a/avalanchego/snow/uptime/locked_calculator.go b/avalanchego/snow/uptime/locked_calculator.go index 687b5f59..884878ab 100644 --- a/avalanchego/snow/uptime/locked_calculator.go +++ b/avalanchego/snow/uptime/locked_calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/avalanchego/snow/uptime/locked_calculator_test.go b/avalanchego/snow/uptime/locked_calculator_test.go index 3123a5b7..966722f6 100644 --- a/avalanchego/snow/uptime/locked_calculator_test.go +++ b/avalanchego/snow/uptime/locked_calculator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -8,9 +8,8 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" @@ -19,10 +18,9 @@ import ( func TestLockedCalculator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lc := NewLockedCalculator() - require.NotNil(t) + require.NotNil(lc) // Should still error because ctx is nil nodeID := ids.GenerateTestNodeID() @@ -48,7 +46,7 @@ func TestLockedCalculator(t *testing.T) { require.ErrorIs(err, errStillBootstrapping) _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) - require.EqualValues(errStillBootstrapping, err) + require.ErrorIs(err, errStillBootstrapping) isBootstrapped.Set(true) diff --git a/avalanchego/snow/uptime/manager.go b/avalanchego/snow/uptime/manager.go index 1cee24b6..a64b71ca 100644 --- a/avalanchego/snow/uptime/manager.go +++ b/avalanchego/snow/uptime/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ TestManager = (*manager)(nil) +var _ Manager = (*manager)(nil) type Manager interface { Tracker @@ -35,22 +35,18 @@ type Calculator interface { CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) } -type TestManager interface { - Manager - SetTime(time.Time) -} - type manager struct { // Used to get time. Useful for faking time during tests. - clock mockable.Clock + clock *mockable.Clock state State connections map[ids.NodeID]map[ids.ID]time.Time // nodeID -> subnetID -> time trackedSubnets set.Set[ids.ID] } -func NewManager(state State) Manager { +func NewManager(state State, clk *mockable.Clock) Manager { return &manager{ + clock: clk, state: state, connections: make(map[ids.NodeID]map[ids.ID]time.Time), } @@ -206,10 +202,6 @@ func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, return uptime, nil } -func (m *manager) SetTime(newTime time.Time) { - m.clock.Set(newTime) -} - // updateSubnetUptime updates the subnet uptime of the node on the state by the amount // of time that the node has been connected to the subnet. func (m *manager) updateSubnetUptime(nodeID ids.NodeID, subnetID ids.ID) error { diff --git a/avalanchego/snow/uptime/manager_test.go b/avalanchego/snow/uptime/manager_test.go index bea1533e..e04fcc3a 100644 --- a/avalanchego/snow/uptime/manager_test.go +++ b/avalanchego/snow/uptime/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -10,7 +10,9 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/timer/mockable" ) var errTest = errors.New("non-nil error") @@ -25,18 +27,18 @@ func TestStartTracking(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) currentTime := startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestStartTrackingDBError(t *testing.T) { @@ -50,26 +52,28 @@ func TestStartTrackingDBError(t *testing.T) { s.dbWriteError = errTest s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) currentTime := startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.Error(err) + require.ErrorIs(err, errTest) } func TestStartTrackingNonValidator(t *testing.T) { require := require.New(t) s := NewTestState() - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) nodeID0 := ids.GenerateTestNodeID() subnetID := ids.GenerateTestID() err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) } func TestStartTrackingInThePast(t *testing.T) { @@ -82,13 +86,13 @@ func TestStartTrackingInThePast(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) currentTime := startTime.Add(-time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -107,28 +111,25 @@ func TestStopTrackingDecreasesUptime(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) - up = NewManager(s).(*manager) - up.clock.Set(currentTime) + up = NewManager(s, &clk) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestStopTrackingIncreasesUptime(t *testing.T) { @@ -142,31 +143,27 @@ func TestStopTrackingIncreasesUptime(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) - up = NewManager(s).(*manager) - up.clock.Set(currentTime) + up = NewManager(s, &clk) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestStopTrackingDisconnectedNonValidator(t *testing.T) { @@ -176,13 +173,13 @@ func TestStopTrackingDisconnectedNonValidator(t *testing.T) { subnetID := ids.GenerateTestID() s := NewTestState() - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) - err := up.StartTracking(nil, subnetID) - require.NoError(err) + require.NoError(up.StartTracking(nil, subnetID)) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.Error(err) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + require.ErrorIs(err, database.ErrNotFound) } func TestStopTrackingConnectedDBError(t *testing.T) { @@ -194,17 +191,16 @@ func TestStopTrackingConnectedDBError(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) - err := up.StartTracking(nil, subnetID) - require.NoError(err) + require.NoError(up.StartTracking(nil, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) s.dbReadError = errTest - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.Error(err) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + require.ErrorIs(err, errTest) } func TestStopTrackingNonConnectedPast(t *testing.T) { @@ -217,17 +213,16 @@ func TestStopTrackingNonConnectedPast(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(-time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := s.GetUptime(nodeID0, subnetID) require.NoError(err) @@ -245,18 +240,18 @@ func TestStopTrackingNonConnectedDBError(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) s.dbWriteError = errTest - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.Error(err) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + require.ErrorIs(err, errTest) } func TestConnectAndDisconnect(t *testing.T) { @@ -282,8 +277,9 @@ func TestConnectAndDisconnect(t *testing.T) { startTime := currentTime s := NewTestState() - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) for _, subnetID := range tt.subnetIDs { s.AddNode(nodeID0, subnetID, startTime) @@ -291,8 +287,7 @@ func TestConnectAndDisconnect(t *testing.T) { connected := up.IsConnected(nodeID0, subnetID) require.False(connected) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) connected = up.IsConnected(nodeID0, subnetID) require.False(connected) @@ -300,27 +295,25 @@ func TestConnectAndDisconnect(t *testing.T) { duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) connected = up.IsConnected(nodeID0, subnetID) require.True(connected) } currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) for _, subnetID := range tt.subnetIDs { duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } - err := up.Disconnect(nodeID0) - require.NoError(err) + require.NoError(up.Disconnect(nodeID0)) for _, subnetID := range tt.subnetIDs { connected := up.IsConnected(nodeID0, subnetID) @@ -328,13 +321,13 @@ func TestConnectAndDisconnect(t *testing.T) { } currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) for _, subnetID := range tt.subnetIDs { duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } }) } @@ -351,26 +344,24 @@ func TestConnectAndDisconnectBeforeTracking(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err := up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.Disconnect(nodeID0) - require.NoError(err) + require.NoError(up.Disconnect(nodeID0)) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestUnrelatedNodeDisconnect(t *testing.T) { @@ -385,41 +376,38 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) - err = up.Connect(nodeID1, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID1, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) - err = up.Disconnect(nodeID1) - require.NoError(err) + require.NoError(up.Disconnect(nodeID1)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestCalculateUptimeWhenNeverTracked(t *testing.T) { @@ -432,15 +420,16 @@ func TestCalculateUptimeWhenNeverTracked(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) currentTime := startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) require.NoError(err) @@ -456,20 +445,20 @@ func TestCalculateUptimeWhenNeverConnected(t *testing.T) { s := NewTestState() - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) - err := up.StartTracking([]ids.NodeID{}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{}, subnetID)) s.AddNode(nodeID0, subnetID, startTime) currentTime := startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) require.NoError(err) @@ -487,25 +476,24 @@ func TestCalculateUptimeWhenConnectedBeforeTracking(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestCalculateUptimeWhenConnectedInFuture(t *testing.T) { @@ -519,25 +507,24 @@ func TestCalculateUptimeWhenConnectedInFuture(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(2 * time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(-time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + require.Equal(clk.UnixTime(), lastUpdated) } func TestCalculateUptimeNonValidator(t *testing.T) { @@ -549,10 +536,11 @@ func TestCalculateUptimeNonValidator(t *testing.T) { s := NewTestState() - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) _, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) } func TestCalculateUptimePercentageDivBy0(t *testing.T) { @@ -566,8 +554,9 @@ func TestCalculateUptimePercentageDivBy0(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) require.NoError(err) @@ -585,13 +574,13 @@ func TestCalculateUptimePercentage(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) + clk := mockable.Clock{} + up := NewManager(s, &clk) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) require.NoError(err) @@ -609,37 +598,33 @@ func TestStopTrackingUnixTimeRegression(t *testing.T) { s := NewTestState() s.AddNode(nodeID0, subnetID, startTime) - up := NewManager(s).(*manager) - up.clock.Set(currentTime) + clk := mockable.Clock{} + up := NewManager(s, &clk) + clk.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - up = NewManager(s).(*manager) + up = NewManager(s, &clk) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) - up.clock.Set(currentTime) + clk.Set(currentTime) perc, err := up.CalculateUptimePercent(nodeID0, subnetID) require.NoError(err) diff --git a/avalanchego/snow/uptime/mock_calculator.go b/avalanchego/snow/uptime/mock_calculator.go index 02f5a85c..cc5b5942 100644 --- a/avalanchego/snow/uptime/mock_calculator.go +++ b/avalanchego/snow/uptime/mock_calculator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/uptime (interfaces: Calculator) +// +// Generated by this command: +// +// mockgen -package=uptime -destination=snow/uptime/mock_calculator.go github.com/ava-labs/avalanchego/snow/uptime Calculator +// // Package uptime is a generated GoMock package. package uptime @@ -12,7 +14,7 @@ import ( time "time" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockCalculator is a mock of Calculator interface. @@ -49,7 +51,7 @@ func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Dur } // CalculateUptime indicates an expected call of CalculateUptime. -func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptime", reflect.TypeOf((*MockCalculator)(nil).CalculateUptime), arg0, arg1) } @@ -64,7 +66,7 @@ func (m *MockCalculator) CalculateUptimePercent(arg0 ids.NodeID, arg1 ids.ID) (f } // CalculateUptimePercent indicates an expected call of CalculateUptimePercent. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercent", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercent), arg0, arg1) } @@ -79,7 +81,7 @@ func (m *MockCalculator) CalculateUptimePercentFrom(arg0 ids.NodeID, arg1 ids.ID } // CalculateUptimePercentFrom indicates an expected call of CalculateUptimePercentFrom. -func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercentFrom", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercentFrom), arg0, arg1, arg2) } diff --git a/avalanchego/snow/uptime/no_op_calculator.go b/avalanchego/snow/uptime/no_op_calculator.go index 44c688e3..fb308f4f 100644 --- a/avalanchego/snow/uptime/no_op_calculator.go +++ b/avalanchego/snow/uptime/no_op_calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/avalanchego/snow/uptime/state.go b/avalanchego/snow/uptime/state.go index 5b2592ac..f9edeb76 100644 --- a/avalanchego/snow/uptime/state.go +++ b/avalanchego/snow/uptime/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/avalanchego/snow/uptime/test_state.go b/avalanchego/snow/uptime/test_state.go index 58687e16..23879b5c 100644 --- a/avalanchego/snow/uptime/test_state.go +++ b/avalanchego/snow/uptime/test_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime diff --git a/avalanchego/snow/validators/connector.go b/avalanchego/snow/validators/connector.go index abb28d08..e3e7e1f9 100644 --- a/avalanchego/snow/validators/connector.go +++ b/avalanchego/snow/validators/connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/avalanchego/snow/validators/gvalidators/validator_state_client.go b/avalanchego/snow/validators/gvalidators/validator_state_client.go index 4760212a..49fa1e64 100644 --- a/avalanchego/snow/validators/gvalidators/validator_state_client.go +++ b/avalanchego/snow/validators/gvalidators/validator_state_client.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators import ( "context" + "errors" "google.golang.org/protobuf/types/known/emptypb" @@ -15,7 +16,10 @@ import ( pb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" ) -var _ validators.State = (*Client)(nil) +var ( + _ validators.State = (*Client)(nil) + errFailedPublicKeyDeserialize = errors.New("couldn't deserialize public key") +) type Client struct { client pb.ValidatorStateClient @@ -72,9 +76,13 @@ func (c *Client) GetValidatorSet( } var publicKey *bls.PublicKey if len(validator.PublicKey) > 0 { - publicKey, err = bls.PublicKeyFromBytes(validator.PublicKey) - if err != nil { - return nil, err + // This is a performance optimization to avoid the cost of compression + // and key re-verification with PublicKeyFromBytes. We can safely + // assume that the BLS Public Keys are verified before being added + // to the P-Chain and served by the gRPC server. + publicKey = bls.DeserializePublicKey(validator.PublicKey) + if publicKey == nil { + return nil, errFailedPublicKeyDeserialize } } vdrs[nodeID] = &validators.GetValidatorOutput{ diff --git a/avalanchego/snow/validators/gvalidators/validator_state_server.go b/avalanchego/snow/validators/gvalidators/validator_state_server.go index 949022ca..5476dca4 100644 --- a/avalanchego/snow/validators/gvalidators/validator_state_server.go +++ b/avalanchego/snow/validators/gvalidators/validator_state_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators @@ -66,11 +66,13 @@ func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetReq i := 0 for _, vdr := range vdrs { vdrPB := &pb.Validator{ - NodeId: vdr.NodeID[:], + NodeId: vdr.NodeID.Bytes(), Weight: vdr.Weight, } if vdr.PublicKey != nil { - vdrPB.PublicKey = bls.PublicKeyToBytes(vdr.PublicKey) + // This is a performance optimization to avoid the cost of compression + // from PublicKeyToBytes. + vdrPB.PublicKey = bls.SerializePublicKey(vdr.PublicKey) } resp.Validators[i] = vdrPB i++ diff --git a/avalanchego/snow/validators/gvalidators/validator_state_test.go b/avalanchego/snow/validators/gvalidators/validator_state_test.go index d9a6d409..8895507a 100644 --- a/avalanchego/snow/validators/gvalidators/validator_state_test.go +++ b/avalanchego/snow/validators/gvalidators/validator_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gvalidators @@ -6,11 +6,11 @@ package gvalidators import ( "context" "errors" + "fmt" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" @@ -23,12 +23,13 @@ import ( var errCustom = errors.New("custom") type testState struct { - client *Client - server *validators.MockState - closeFn func() + client *Client + server *validators.MockState } func setupState(t testing.TB, ctrl *gomock.Controller) *testState { + require := require.New(t) + t.Helper() state := &testState{ @@ -36,9 +37,7 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { } listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() @@ -48,26 +47,24 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { go grpcutils.Serve(listener, server) conn, err := grpcutils.Dial(listener.Addr().String()) - if err != nil { - t.Fatalf("Failed to dial: %s", err) - } + require.NoError(err) state.client = NewClient(pb.NewValidatorStateClient(conn)) - state.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return state } func TestGetMinimumHeight(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -81,16 +78,15 @@ func TestGetMinimumHeight(t *testing.T) { state.server.EXPECT().GetMinimumHeight(gomock.Any()).Return(expectedHeight, errCustom) _, err = state.client.GetMinimumHeight(context.Background()) - require.Error(err) + // TODO: require specific error + require.Error(err) //nolint:forbidigo // currently returns grpc error } func TestGetCurrentHeight(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -104,16 +100,15 @@ func TestGetCurrentHeight(t *testing.T) { state.server.EXPECT().GetCurrentHeight(gomock.Any()).Return(expectedHeight, errCustom) _, err = state.client.GetCurrentHeight(context.Background()) - require.Error(err) + // TODO: require specific error + require.Error(err) //nolint:forbidigo // currently returns grpc error } func TestGetSubnetID(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := setupState(t, ctrl) - defer state.closeFn() // Happy path chainID := ids.GenerateTestID() @@ -128,16 +123,15 @@ func TestGetSubnetID(t *testing.T) { state.server.EXPECT().GetSubnetID(gomock.Any(), chainID).Return(expectedSubnetID, errCustom) _, err = state.client.GetSubnetID(context.Background(), chainID) - require.Error(err) + // TODO: require specific error + require.Error(err) //nolint:forbidigo // currently returns grpc error } func TestGetValidatorSet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := setupState(t, ctrl) - defer state.closeFn() // Happy path sk0, err := bls.NewSecretKey() @@ -179,5 +173,64 @@ func TestGetValidatorSet(t *testing.T) { state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(expectedVdrs, errCustom) _, err = state.client.GetValidatorSet(context.Background(), height, subnetID) - require.Error(err) + // TODO: require specific error + require.Error(err) //nolint:forbidigo // currently returns grpc error +} + +func TestPublicKeyDeserialize(t *testing.T) { + require := require.New(t) + + sk, err := bls.NewSecretKey() + require.NoError(err) + pk := bls.PublicFromSecretKey(sk) + + pkBytes := bls.SerializePublicKey(pk) + pkDe := bls.DeserializePublicKey(pkBytes) + require.NotNil(pkDe) + require.Equal(pk, pkDe) +} + +// BenchmarkGetValidatorSet measures the time it takes complete a gRPC client +// request based on a mocked validator set. +func BenchmarkGetValidatorSet(b *testing.B) { + for _, size := range []int{1, 16, 32, 1024, 2048} { + vs := setupValidatorSet(b, size) + b.Run(fmt.Sprintf("get_validator_set_%d_validators", size), func(b *testing.B) { + benchmarkGetValidatorSet(b, vs) + }) + } +} + +func benchmarkGetValidatorSet(b *testing.B, vs map[ids.NodeID]*validators.GetValidatorOutput) { + require := require.New(b) + ctrl := gomock.NewController(b) + state := setupState(b, ctrl) + + height := uint64(1337) + subnetID := ids.GenerateTestID() + state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(vs, nil).AnyTimes() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := state.client.GetValidatorSet(context.Background(), height, subnetID) + require.NoError(err) + } + b.StopTimer() +} + +func setupValidatorSet(b *testing.B, size int) map[ids.NodeID]*validators.GetValidatorOutput { + b.Helper() + + set := make(map[ids.NodeID]*validators.GetValidatorOutput, size) + sk, err := bls.NewSecretKey() + require.NoError(b, err) + pk := bls.PublicFromSecretKey(sk) + for i := 0; i < size; i++ { + id := ids.GenerateTestNodeID() + set[id] = &validators.GetValidatorOutput{ + NodeID: id, + PublicKey: pk, + Weight: uint64(i), + } + } + return set } diff --git a/avalanchego/snow/validators/logger.go b/avalanchego/snow/validators/logger.go index 98d47d30..40613b76 100644 --- a/avalanchego/snow/validators/logger.go +++ b/avalanchego/snow/validators/logger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -7,7 +7,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -18,7 +17,6 @@ var _ SetCallbackListener = (*logger)(nil) type logger struct { log logging.Logger - enabled *utils.Atomic[bool] subnetID ids.ID nodeIDs set.Set[ids.NodeID] } @@ -27,15 +25,12 @@ type logger struct { // the specified validators func NewLogger( log logging.Logger, - enabled *utils.Atomic[bool], subnetID ids.ID, nodeIDs ...ids.NodeID, ) SetCallbackListener { - nodeIDSet := set.NewSet[ids.NodeID](len(nodeIDs)) - nodeIDSet.Add(nodeIDs...) + nodeIDSet := set.Of(nodeIDs...) return &logger{ log: log, - enabled: enabled, subnetID: subnetID, nodeIDs: nodeIDSet, } @@ -47,7 +42,7 @@ func (l *logger) OnValidatorAdded( txID ids.ID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { var pkBytes []byte if pk != nil { pkBytes = bls.PublicKeyToBytes(pk) @@ -66,7 +61,7 @@ func (l *logger) OnValidatorRemoved( nodeID ids.NodeID, weight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("node removed from validator set", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), @@ -80,7 +75,7 @@ func (l *logger) OnValidatorWeightChanged( oldWeight uint64, newWeight uint64, ) { - if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + if l.nodeIDs.Contains(nodeID) { l.log.Info("validator weight changed", zap.Stringer("subnetID", l.subnetID), zap.Stringer("nodeID", nodeID), diff --git a/avalanchego/snow/validators/manager.go b/avalanchego/snow/validators/manager.go index 0d0bc563..5844c1e7 100644 --- a/avalanchego/snow/validators/manager.go +++ b/avalanchego/snow/validators/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -14,33 +14,89 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" ) var ( _ Manager = (*manager)(nil) - errMissingValidators = errors.New("missing validators") + ErrZeroWeight = errors.New("weight must be non-zero") + ErrMissingValidators = errors.New("missing validators") ) +type SetCallbackListener interface { + OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) + OnValidatorRemoved(nodeID ids.NodeID, weight uint64) + OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) +} + // Manager holds the validator set of each subnet type Manager interface { fmt.Stringer - // Add a subnet's validator set to the manager. - // - // If the subnet had previously registered a validator set, false will be - // returned and the manager will not be modified. - Add(subnetID ids.ID, set Set) bool + // Add a new staker to the subnet. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is already in the validator set + // If an error is returned, the set will be unmodified. + AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error + + // AddWeight to an existing staker to the subnet. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is not already in the validator set + // If an error is returned, the set will be unmodified. + // AddWeight can result in a total weight that overflows uint64. + // In this case no error will be returned for this call. + // However, the next TotalWeight call will return an error. + AddWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error + + // GetWeight retrieves the validator weight from the subnet. + GetWeight(subnetID ids.ID, nodeID ids.NodeID) uint64 + + // GetValidator returns the validator tied to the specified ID in subnet. + // If the validator doesn't exist, returns false. + GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Validator, bool) + + // GetValidatoIDs returns the validator IDs in the subnet. + GetValidatorIDs(subnetID ids.ID) []ids.NodeID + + // SubsetWeight returns the sum of the weights of the validators in the subnet. + // Returns err if subset weight overflows uint64. + SubsetWeight(subnetID ids.ID, validatorIDs set.Set[ids.NodeID]) (uint64, error) + + // RemoveWeight from a staker in the subnet. If the staker's weight becomes 0, the staker + // will be removed from the subnet set. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is not already in the subnet set + // - the weight of the validator would become negative + // If an error is returned, the set will be unmodified. + RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error + + // Count returns the number of validators currently in the subnet. + Count(subnetID ids.ID) int + + // TotalWeight returns the cumulative weight of all validators in the subnet. + // Returns err if total weight overflows uint64. + TotalWeight(subnetID ids.ID) (uint64, error) - // Get returns the validator set for the given subnet - // Returns false if the subnet doesn't exist - Get(ids.ID) (Set, bool) + // Sample returns a collection of validatorIDs in the subnet, potentially with duplicates. + // If sampling the requested size isn't possible, an error will be returned. + Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) + + // Map of the validators in this subnet + GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput + + // When a validator's weight changes, or a validator is added/removed, + // this listener is called. + RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) } // NewManager returns a new, empty manager func NewManager() Manager { return &manager{ - subnetToVdrs: make(map[ids.ID]Set), + subnetToVdrs: make(map[ids.ID]*vdrSet), } } @@ -49,27 +105,176 @@ type manager struct { // Key: Subnet ID // Value: The validators that validate the subnet - subnetToVdrs map[ids.ID]Set + subnetToVdrs map[ids.ID]*vdrSet } -func (m *manager) Add(subnetID ids.ID, set Set) bool { +func (m *manager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + if weight == 0 { + return ErrZeroWeight + } + m.lock.Lock() defer m.lock.Unlock() - if _, exists := m.subnetToVdrs[subnetID]; exists { - return false + set, exists := m.subnetToVdrs[subnetID] + if !exists { + set = newSet() + m.subnetToVdrs[subnetID] = set } - m.subnetToVdrs[subnetID] = set - return true + return set.Add(nodeID, pk, txID, weight) } -func (m *manager) Get(subnetID ids.ID) (Set, bool) { +func (m *manager) AddWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + if weight == 0 { + return ErrZeroWeight + } + + // We do not need to grab a write lock here because we never modify the + // subnetToVdrs map. However, we must hold the read lock during the entirity + // of this function to ensure that errors are returned consistently. + // + // Consider the case that: + // AddStaker(subnetID, nodeID, 1) + // go func() { + // AddWeight(subnetID, nodeID, 1) + // } + // go func() { + // RemoveWeight(subnetID, nodeID, 1) + // } + // + // In this case, after both goroutines have finished, either AddWeight + // should have errored, or the weight of the node should equal 1. It would + // be unexpected to not have received an error from AddWeight but for the + // node to no longer be tracked as a validator. m.lock.RLock() defer m.lock.RUnlock() - vdrs, ok := m.subnetToVdrs[subnetID] - return vdrs, ok + set, exists := m.subnetToVdrs[subnetID] + if !exists { + return errMissingValidator + } + + return set.AddWeight(nodeID, weight) +} + +func (m *manager) GetWeight(subnetID ids.ID, nodeID ids.NodeID) uint64 { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return 0 + } + + return set.GetWeight(nodeID) +} + +func (m *manager) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Validator, bool) { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return nil, false + } + + return set.Get(nodeID) +} + +func (m *manager) SubsetWeight(subnetID ids.ID, validatorIDs set.Set[ids.NodeID]) (uint64, error) { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return 0, nil + } + + return set.SubsetWeight(validatorIDs) +} + +func (m *manager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + if weight == 0 { + return ErrZeroWeight + } + + m.lock.Lock() + defer m.lock.Unlock() + + set, exists := m.subnetToVdrs[subnetID] + if !exists { + return errMissingValidator + } + + if err := set.RemoveWeight(nodeID, weight); err != nil { + return err + } + // If this was the last validator in the subnet and no callback listeners + // are registered, remove the subnet + if set.Len() == 0 && !set.HasCallbackRegistered() { + delete(m.subnetToVdrs, subnetID) + } + + return nil +} + +func (m *manager) Count(subnetID ids.ID) int { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return 0 + } + + return set.Len() +} + +func (m *manager) TotalWeight(subnetID ids.ID) (uint64, error) { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return 0, nil + } + + return set.TotalWeight() +} + +func (m *manager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { + if size == 0 { + return nil, nil + } + + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return nil, ErrMissingValidators + } + + return set.Sample(size) +} + +func (m *manager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return make(map[ids.NodeID]*GetValidatorOutput) + } + + return set.Map() +} + +func (m *manager) RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) { + m.lock.Lock() + defer m.lock.Unlock() + + set, exists := m.subnetToVdrs[subnetID] + if !exists { + set = newSet() + m.subnetToVdrs[subnetID] = set + } + + set.RegisterCallbackListener(listener) } func (m *manager) String() string { @@ -96,52 +301,13 @@ func (m *manager) String() string { return sb.String() } -// Add is a helper that fetches the validator set of [subnetID] from [m] and -// adds [nodeID] to the validator set. -// Returns an error if: -// - [subnetID] does not have a registered validator set in [m] -// - adding [nodeID] to the validator set returns an error -func Add(m Manager, subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { - vdrs, ok := m.Get(subnetID) - if !ok { - return fmt.Errorf("%w: %s", errMissingValidators, subnetID) - } - return vdrs.Add(nodeID, pk, txID, weight) -} - -// AddWeight is a helper that fetches the validator set of [subnetID] from [m] -// and adds [weight] to [nodeID] in the validator set. -// Returns an error if: -// - [subnetID] does not have a registered validator set in [m] -// - adding [weight] to [nodeID] in the validator set returns an error -func AddWeight(m Manager, subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { - vdrs, ok := m.Get(subnetID) - if !ok { - return fmt.Errorf("%w: %s", errMissingValidators, subnetID) - } - return vdrs.AddWeight(nodeID, weight) -} - -// RemoveWeight is a helper that fetches the validator set of [subnetID] from -// [m] and removes [weight] from [nodeID] in the validator set. -// Returns an error if: -// - [subnetID] does not have a registered validator set in [m] -// - removing [weight] from [nodeID] in the validator set returns an error -func RemoveWeight(m Manager, subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { - vdrs, ok := m.Get(subnetID) - if !ok { - return fmt.Errorf("%w: %s", errMissingValidators, subnetID) +func (m *manager) GetValidatorIDs(subnetID ids.ID) []ids.NodeID { + m.lock.RLock() + vdrs, exist := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exist { + return nil } - return vdrs.RemoveWeight(nodeID, weight) -} -// Contains is a helper that fetches the validator set of [subnetID] from [m] -// and returns if the validator set contains [nodeID]. If [m] does not contain a -// validator set for [subnetID], false is returned. -func Contains(m Manager, subnetID ids.ID, nodeID ids.NodeID) bool { - vdrs, ok := m.Get(subnetID) - if !ok { - return false - } - return vdrs.Contains(nodeID) + return vdrs.GetValidatorIDs() } diff --git a/avalanchego/snow/validators/manager_test.go b/avalanchego/snow/validators/manager_test.go index 433033ca..781d2e78 100644 --- a/avalanchego/snow/validators/manager_test.go +++ b/avalanchego/snow/validators/manager_test.go @@ -1,120 +1,552 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "math" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) -func TestAdd(t *testing.T) { +func TestAddZeroWeight(t *testing.T) { require := require.New(t) - m := NewManager() + m := NewManager().(*manager) + err := m.AddStaker(ids.GenerateTestID(), ids.GenerateTestNodeID(), nil, ids.Empty, 0) + require.ErrorIs(err, ErrZeroWeight) + require.Empty(m.subnetToVdrs) +} + +func TestAddDuplicate(t *testing.T) { + require := require.New(t) + m := NewManager() subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) - err := Add(m, subnetID, nodeID, nil, ids.Empty, 1) - require.ErrorIs(err, errMissingValidators) + err := m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1) + require.ErrorIs(err, errDuplicateValidator) +} - s := NewSet() - m.Add(subnetID, s) +func TestAddOverflow(t *testing.T) { + require := require.New(t) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) - require.NoError(err) + m := NewManager() + subnetID := ids.GenerateTestID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) + + require.NoError(m.AddStaker(subnetID, nodeID2, nil, ids.Empty, math.MaxUint64)) - weight := s.Weight() - require.EqualValues(1, weight) + _, err := m.TotalWeight(subnetID) + require.ErrorIs(err, errTotalWeightNotUint64) + + set := set.Of(nodeID1, nodeID2) + _, err = m.SubsetWeight(subnetID, set) + require.ErrorIs(err, safemath.ErrOverflow) } -func TestAddWeight(t *testing.T) { +func TestAddWeightZeroWeight(t *testing.T) { require := require.New(t) m := NewManager() + subnetID := ids.GenerateTestID() + + nodeID := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) + + err := m.AddWeight(subnetID, nodeID, 0) + require.ErrorIs(err, ErrZeroWeight) +} + +func TestAddWeightOverflow(t *testing.T) { + require := require.New(t) + m := NewManager() subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + nodeID := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) - err := AddWeight(m, subnetID, nodeID, 1) - require.ErrorIs(err, errMissingValidators) + require.NoError(m.AddWeight(subnetID, nodeID, math.MaxUint64-1)) - s := NewSet() - m.Add(subnetID, s) + _, err := m.TotalWeight(subnetID) + require.ErrorIs(err, errTotalWeightNotUint64) +} - err = AddWeight(m, subnetID, nodeID, 1) - require.ErrorIs(err, errMissingValidator) +func TestGetWeight(t *testing.T) { + require := require.New(t) + + m := NewManager() + subnetID := ids.GenerateTestID() + + nodeID := ids.GenerateTestNodeID() + require.Zero(m.GetWeight(subnetID, nodeID)) + + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) + totalWeight, err := m.TotalWeight(subnetID) require.NoError(err) + require.Equal(uint64(1), totalWeight) +} + +func TestSubsetWeight(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + weight0 := uint64(93) + weight1 := uint64(123) + weight2 := uint64(810) + + subset := set.Of(nodeID0, nodeID1) + + m := NewManager() + subnetID := ids.GenerateTestID() + + require.NoError(m.AddStaker(subnetID, nodeID0, nil, ids.Empty, weight0)) + require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, weight1)) + require.NoError(m.AddStaker(subnetID, nodeID2, nil, ids.Empty, weight2)) - err = AddWeight(m, subnetID, nodeID, 1) + expectedWeight := weight0 + weight1 + subsetWeight, err := m.SubsetWeight(subnetID, subset) require.NoError(err) + require.Equal(expectedWeight, subsetWeight) +} + +func TestRemoveWeightZeroWeight(t *testing.T) { + require := require.New(t) - weight := s.Weight() - require.EqualValues(2, weight) + m := NewManager() + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) + + err := m.RemoveWeight(subnetID, nodeID, 0) + require.ErrorIs(err, ErrZeroWeight) } -func TestRemoveWeight(t *testing.T) { +func TestRemoveWeightMissingValidator(t *testing.T) { require := require.New(t) m := NewManager() + subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + + err := m.RemoveWeight(subnetID, ids.GenerateTestNodeID(), 1) + require.ErrorIs(err, errMissingValidator) +} + +func TestRemoveWeightUnderflow(t *testing.T) { + require := require.New(t) + + m := NewManager() subnetID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - err := RemoveWeight(m, subnetID, nodeID, 1) - require.ErrorIs(err, errMissingValidators) + require.NoError(m.AddStaker(subnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + + nodeID := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID, nil, ids.Empty, 1)) - s := NewSet() - m.Add(subnetID, s) + err := m.RemoveWeight(subnetID, nodeID, 2) + require.ErrorIs(err, safemath.ErrUnderflow) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 2) + totalWeight, err := m.TotalWeight(subnetID) require.NoError(err) + require.Equal(uint64(2), totalWeight) +} + +func TestGet(t *testing.T) { + require := require.New(t) + + m := NewManager() + subnetID := ids.GenerateTestID() + + nodeID := ids.GenerateTestNodeID() + _, ok := m.GetValidator(subnetID, nodeID) + require.False(ok) - err = RemoveWeight(m, subnetID, nodeID, 1) + sk, err := bls.NewSecretKey() require.NoError(err) - weight := s.Weight() - require.EqualValues(1, weight) + pk := bls.PublicFromSecretKey(sk) + require.NoError(m.AddStaker(subnetID, nodeID, pk, ids.Empty, 1)) + + vdr0, ok := m.GetValidator(subnetID, nodeID) + require.True(ok) + require.Equal(nodeID, vdr0.NodeID) + require.Equal(pk, vdr0.PublicKey) + require.Equal(uint64(1), vdr0.Weight) + + require.NoError(m.AddWeight(subnetID, nodeID, 1)) + + vdr1, ok := m.GetValidator(subnetID, nodeID) + require.True(ok) + require.Equal(nodeID, vdr0.NodeID) + require.Equal(pk, vdr0.PublicKey) + require.Equal(uint64(1), vdr0.Weight) + require.Equal(nodeID, vdr1.NodeID) + require.Equal(pk, vdr1.PublicKey) + require.Equal(uint64(2), vdr1.Weight) + + require.NoError(m.RemoveWeight(subnetID, nodeID, 2)) + _, ok = m.GetValidator(subnetID, nodeID) + require.False(ok) +} + +func TestLen(t *testing.T) { + require := require.New(t) + + m := NewManager() + subnetID := ids.GenerateTestID() + + count := m.Count(subnetID) + require.Zero(count) + + nodeID0 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID0, nil, ids.Empty, 1)) + + count = m.Count(subnetID) + require.Equal(1, count) + + nodeID1 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) + + count = m.Count(subnetID) + require.Equal(2, count) + + require.NoError(m.RemoveWeight(subnetID, nodeID1, 1)) - err = RemoveWeight(m, subnetID, nodeID, 1) + count = m.Count(subnetID) + require.Equal(1, count) + + require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) + + count = m.Count(subnetID) + require.Zero(count) +} + +func TestGetMap(t *testing.T) { + require := require.New(t) + + m := NewManager() + subnetID := ids.GenerateTestID() + + mp := m.GetMap(subnetID) + require.Empty(mp) + + sk, err := bls.NewSecretKey() require.NoError(err) - weight = s.Weight() - require.Zero(weight) + pk := bls.PublicFromSecretKey(sk) + nodeID0 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID0, pk, ids.Empty, 2)) + + mp = m.GetMap(subnetID) + require.Len(mp, 1) + require.Contains(mp, nodeID0) + + node0 := mp[nodeID0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.Equal(uint64(2), node0.Weight) + + nodeID1 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, 1)) + + mp = m.GetMap(subnetID) + require.Len(mp, 2) + require.Contains(mp, nodeID0) + require.Contains(mp, nodeID1) + + node0 = mp[nodeID0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.Equal(uint64(2), node0.Weight) + + node1 := mp[nodeID1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.Equal(uint64(1), node1.Weight) + + require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.Equal(uint64(2), node0.Weight) + + mp = m.GetMap(subnetID) + require.Len(mp, 2) + require.Contains(mp, nodeID0) + require.Contains(mp, nodeID1) + + node0 = mp[nodeID0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.Equal(uint64(1), node0.Weight) + + node1 = mp[nodeID1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.Equal(uint64(1), node1.Weight) + + require.NoError(m.RemoveWeight(subnetID, nodeID0, 1)) + + mp = m.GetMap(subnetID) + require.Len(mp, 1) + require.Contains(mp, nodeID1) + + node1 = mp[nodeID1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.Equal(uint64(1), node1.Weight) + + require.NoError(m.RemoveWeight(subnetID, nodeID1, 1)) + + require.Empty(m.GetMap(subnetID)) } -func TestContains(t *testing.T) { +func TestWeight(t *testing.T) { require := require.New(t) + vdr0 := ids.BuildTestNodeID([]byte{1}) + weight0 := uint64(93) + vdr1 := ids.BuildTestNodeID([]byte{2}) + weight1 := uint64(123) + m := NewManager() + subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, vdr0, nil, ids.Empty, weight0)) + + require.NoError(m.AddStaker(subnetID, vdr1, nil, ids.Empty, weight1)) + + setWeight, err := m.TotalWeight(subnetID) + require.NoError(err) + expectedWeight := weight0 + weight1 + require.Equal(expectedWeight, setWeight) +} + +func TestSample(t *testing.T) { + require := require.New(t) + m := NewManager() subnetID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - contains := Contains(m, subnetID, nodeID) - require.False(contains) + sampled, err := m.Sample(subnetID, 0) + require.NoError(err) + require.Empty(sampled) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + nodeID0 := ids.GenerateTestNodeID() + pk := bls.PublicFromSecretKey(sk) + require.NoError(m.AddStaker(subnetID, nodeID0, pk, ids.Empty, 1)) + + sampled, err = m.Sample(subnetID, 1) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID0}, sampled) + + _, err = m.Sample(subnetID, 2) + require.ErrorIs(err, sampler.ErrOutOfRange) + + nodeID1 := ids.GenerateTestNodeID() + require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, math.MaxInt64-1)) + + sampled, err = m.Sample(subnetID, 1) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1}, sampled) + + sampled, err = m.Sample(subnetID, 2) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1, nodeID1}, sampled) + + sampled, err = m.Sample(subnetID, 3) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1, nodeID1, nodeID1}, sampled) +} - s := NewSet() - m.Add(subnetID, s) +func TestString(t *testing.T) { + require := require.New(t) - contains = Contains(m, subnetID, nodeID) - require.False(contains) + nodeID0 := ids.EmptyNodeID + nodeID1, err := ids.NodeIDFromString("NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V") + require.NoError(err) - err := Add(m, subnetID, nodeID, nil, ids.Empty, 1) + subnetID0, err := ids.FromString("TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES") + require.NoError(err) + subnetID1, err := ids.FromString("2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w") require.NoError(err) - contains = Contains(m, subnetID, nodeID) - require.True(contains) + m := NewManager() + require.NoError(m.AddStaker(subnetID0, nodeID0, nil, ids.Empty, 1)) + require.NoError(m.AddStaker(subnetID0, nodeID1, nil, ids.Empty, math.MaxInt64-1)) + require.NoError(m.AddStaker(subnetID1, nodeID1, nil, ids.Empty, 1)) + + expected := `Validator Manager: (Size = 2) + Subnet[TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES]: Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806 + Subnet[2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w]: Validator Set: (Size = 1, Weight = 1) + Validator[0]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 1` + result := m.String() + require.Equal(expected, result) +} + +func TestAddCallback(t *testing.T) { + require := require.New(t) - err = RemoveWeight(m, subnetID, nodeID, 1) + nodeID0 := ids.BuildTestNodeID([]byte{1}) + sk0, err := bls.NewSecretKey() require.NoError(err) + pk0 := bls.PublicFromSecretKey(sk0) + txID0 := ids.GenerateTestID() + weight0 := uint64(1) + + m := NewManager() + subnetID := ids.GenerateTestID() + callCount := 0 + m.RegisterCallbackListener(subnetID, &callbackListener{ + t: t, + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(pk0, pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ + }, + }) + require.NoError(m.AddStaker(subnetID, nodeID0, pk0, txID0, weight0)) + // setup another subnetID + subnetID2 := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) + // should not be called for subnetID2 + require.Equal(1, callCount) +} + +func TestAddWeightCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.BuildTestNodeID([]byte{1}) + txID0 := ids.GenerateTestID() + weight0 := uint64(1) + weight1 := uint64(93) + + m := NewManager() + subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + + callCount := 0 + m.RegisterCallbackListener(subnetID, &callbackListener{ + t: t, + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ + }, + onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(weight0, oldWeight) + require.Equal(weight0+weight1, newWeight) + callCount++ + }, + }) + require.NoError(m.AddWeight(subnetID, nodeID0, weight1)) + // setup another subnetID + subnetID2 := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) + require.NoError(m.AddWeight(subnetID2, nodeID0, weight1)) + // should not be called for subnetID2 + require.Equal(2, callCount) +} + +func TestRemoveWeightCallback(t *testing.T) { + require := require.New(t) - contains = Contains(m, subnetID, nodeID) - require.False(contains) + nodeID0 := ids.BuildTestNodeID([]byte{1}) + txID0 := ids.GenerateTestID() + weight0 := uint64(93) + weight1 := uint64(92) + + m := NewManager() + subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + + callCount := 0 + m.RegisterCallbackListener(subnetID, &callbackListener{ + t: t, + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ + }, + onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(weight0, oldWeight) + require.Equal(weight0-weight1, newWeight) + callCount++ + }, + }) + require.NoError(m.RemoveWeight(subnetID, nodeID0, weight1)) + // setup another subnetID + subnetID2 := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) + require.NoError(m.RemoveWeight(subnetID2, nodeID0, weight1)) + // should not be called for subnetID2 + require.Equal(2, callCount) +} + +func TestValidatorRemovedCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.BuildTestNodeID([]byte{1}) + txID0 := ids.GenerateTestID() + weight0 := uint64(93) + + m := NewManager() + subnetID := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + + callCount := 0 + m.RegisterCallbackListener(subnetID, &callbackListener{ + t: t, + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ + }, + onRemoved: func(nodeID ids.NodeID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(weight0, weight) + callCount++ + }, + }) + require.NoError(m.RemoveWeight(subnetID, nodeID0, weight0)) + // setup another subnetID + subnetID2 := ids.GenerateTestID() + require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) + require.NoError(m.AddWeight(subnetID2, nodeID0, weight0)) + // should not be called for subnetID2 + require.Equal(2, callCount) } diff --git a/avalanchego/snow/validators/mock_manager.go b/avalanchego/snow/validators/mock_manager.go index ef92abc2..b622ba11 100644 --- a/avalanchego/snow/validators/mock_manager.go +++ b/avalanchego/snow/validators/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: Manager) +// Source: snow/validators/manager.go +// +// Generated by this command: +// +// mockgen -source=snow/validators/manager.go -destination=snow/validators/mock_manager.go -package=validators -exclude_interfaces=SetCallbackListener +// // Package validators is a generated GoMock package. package validators @@ -11,7 +13,9 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + set "github.com/ava-labs/avalanchego/utils/set" + gomock "go.uber.org/mock/gomock" ) // MockManager is a mock of Manager interface. @@ -37,33 +41,144 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// Add mocks base method. -func (m *MockManager) Add(arg0 ids.ID, arg1 Set) bool { +// AddStaker mocks base method. +func (m *MockManager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStaker", subnetID, nodeID, pk, txID, weight) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddStaker indicates an expected call of AddStaker. +func (mr *MockManagerMockRecorder) AddStaker(subnetID, nodeID, pk, txID, weight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStaker", reflect.TypeOf((*MockManager)(nil).AddStaker), subnetID, nodeID, pk, txID, weight) +} + +// AddWeight mocks base method. +func (m *MockManager) AddWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddWeight", subnetID, nodeID, weight) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddWeight indicates an expected call of AddWeight. +func (mr *MockManagerMockRecorder) AddWeight(subnetID, nodeID, weight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockManager)(nil).AddWeight), subnetID, nodeID, weight) +} + +// Count mocks base method. +func (m *MockManager) Count(subnetID ids.ID) int { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", arg0, arg1) - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "Count", subnetID) + ret0, _ := ret[0].(int) return ret0 } -// Add indicates an expected call of Add. -func (mr *MockManagerMockRecorder) Add(arg0, arg1 interface{}) *gomock.Call { +// Count indicates an expected call of Count. +func (mr *MockManagerMockRecorder) Count(subnetID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockManager)(nil).Add), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockManager)(nil).Count), subnetID) } -// Get mocks base method. -func (m *MockManager) Get(arg0 ids.ID) (Set, bool) { +// GetMap mocks base method. +func (m *MockManager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(Set) + ret := m.ctrl.Call(m, "GetMap", subnetID) + ret0, _ := ret[0].(map[ids.NodeID]*GetValidatorOutput) + return ret0 +} + +// GetMap indicates an expected call of GetMap. +func (mr *MockManagerMockRecorder) GetMap(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMap", reflect.TypeOf((*MockManager)(nil).GetMap), subnetID) +} + +// GetValidator mocks base method. +func (m *MockManager) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Validator, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidator", subnetID, nodeID) + ret0, _ := ret[0].(*Validator) ret1, _ := ret[1].(bool) return ret0, ret1 } -// Get indicates an expected call of Get. -func (mr *MockManagerMockRecorder) Get(arg0 interface{}) *gomock.Call { +// GetValidator indicates an expected call of GetValidator. +func (mr *MockManagerMockRecorder) GetValidator(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidator", reflect.TypeOf((*MockManager)(nil).GetValidator), subnetID, nodeID) +} + +// GetValidatorIDs mocks base method. +func (m *MockManager) GetValidatorIDs(subnetID ids.ID) []ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidatorIDs", subnetID) + ret0, _ := ret[0].([]ids.NodeID) + return ret0 +} + +// GetValidatorIDs indicates an expected call of GetValidatorIDs. +func (mr *MockManagerMockRecorder) GetValidatorIDs(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorIDs", reflect.TypeOf((*MockManager)(nil).GetValidatorIDs), subnetID) +} + +// GetWeight mocks base method. +func (m *MockManager) GetWeight(subnetID ids.ID, nodeID ids.NodeID) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWeight", subnetID, nodeID) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetWeight indicates an expected call of GetWeight. +func (mr *MockManagerMockRecorder) GetWeight(subnetID, nodeID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockManager)(nil).GetWeight), subnetID, nodeID) +} + +// RegisterCallbackListener mocks base method. +func (m *MockManager) RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterCallbackListener", subnetID, listener) +} + +// RegisterCallbackListener indicates an expected call of RegisterCallbackListener. +func (mr *MockManagerMockRecorder) RegisterCallbackListener(subnetID, listener any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockManager)(nil).RegisterCallbackListener), subnetID, listener) +} + +// RemoveWeight mocks base method. +func (m *MockManager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveWeight", subnetID, nodeID, weight) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveWeight indicates an expected call of RemoveWeight. +func (mr *MockManagerMockRecorder) RemoveWeight(subnetID, nodeID, weight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockManager)(nil).RemoveWeight), subnetID, nodeID, weight) +} + +// Sample mocks base method. +func (m *MockManager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sample", subnetID, size) + ret0, _ := ret[0].([]ids.NodeID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Sample indicates an expected call of Sample. +func (mr *MockManagerMockRecorder) Sample(subnetID, size any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManager)(nil).Get), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockManager)(nil).Sample), subnetID, size) } // String mocks base method. @@ -79,3 +194,33 @@ func (mr *MockManagerMockRecorder) String() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockManager)(nil).String)) } + +// SubsetWeight mocks base method. +func (m *MockManager) SubsetWeight(subnetID ids.ID, validatorIDs set.Set[ids.NodeID]) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubsetWeight", subnetID, validatorIDs) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubsetWeight indicates an expected call of SubsetWeight. +func (mr *MockManagerMockRecorder) SubsetWeight(subnetID, validatorIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockManager)(nil).SubsetWeight), subnetID, validatorIDs) +} + +// TotalWeight mocks base method. +func (m *MockManager) TotalWeight(subnetID ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TotalWeight", subnetID) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TotalWeight indicates an expected call of TotalWeight. +func (mr *MockManagerMockRecorder) TotalWeight(subnetID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalWeight", reflect.TypeOf((*MockManager)(nil).TotalWeight), subnetID) +} diff --git a/avalanchego/snow/validators/mock_set.go b/avalanchego/snow/validators/mock_set.go deleted file mode 100644 index 29b7fbcd..00000000 --- a/avalanchego/snow/validators/mock_set.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: Set) - -// Package validators is a generated GoMock package. -package validators - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - bls "github.com/ava-labs/avalanchego/utils/crypto/bls" - set "github.com/ava-labs/avalanchego/utils/set" - gomock "github.com/golang/mock/gomock" -) - -// MockSet is a mock of Set interface. -type MockSet struct { - ctrl *gomock.Controller - recorder *MockSetMockRecorder -} - -// MockSetMockRecorder is the mock recorder for MockSet. -type MockSetMockRecorder struct { - mock *MockSet -} - -// NewMockSet creates a new mock instance. -func NewMockSet(ctrl *gomock.Controller) *MockSet { - mock := &MockSet{ctrl: ctrl} - mock.recorder = &MockSetMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSet) EXPECT() *MockSetMockRecorder { - return m.recorder -} - -// Add mocks base method. -func (m *MockSet) Add(arg0 ids.NodeID, arg1 *bls.PublicKey, arg2 ids.ID, arg3 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// Add indicates an expected call of Add. -func (mr *MockSetMockRecorder) Add(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockSet)(nil).Add), arg0, arg1, arg2, arg3) -} - -// AddWeight mocks base method. -func (m *MockSet) AddWeight(arg0 ids.NodeID, arg1 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddWeight", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddWeight indicates an expected call of AddWeight. -func (mr *MockSetMockRecorder) AddWeight(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockSet)(nil).AddWeight), arg0, arg1) -} - -// Contains mocks base method. -func (m *MockSet) Contains(arg0 ids.NodeID) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Contains", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Contains indicates an expected call of Contains. -func (mr *MockSetMockRecorder) Contains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Contains", reflect.TypeOf((*MockSet)(nil).Contains), arg0) -} - -// Get mocks base method. -func (m *MockSet) Get(arg0 ids.NodeID) (*Validator, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(*Validator) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockSetMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSet)(nil).Get), arg0) -} - -// GetWeight mocks base method. -func (m *MockSet) GetWeight(arg0 ids.NodeID) uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWeight", arg0) - ret0, _ := ret[0].(uint64) - return ret0 -} - -// GetWeight indicates an expected call of GetWeight. -func (mr *MockSetMockRecorder) GetWeight(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockSet)(nil).GetWeight), arg0) -} - -// Len mocks base method. -func (m *MockSet) Len() int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Len") - ret0, _ := ret[0].(int) - return ret0 -} - -// Len indicates an expected call of Len. -func (mr *MockSetMockRecorder) Len() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockSet)(nil).Len)) -} - -// List mocks base method. -func (m *MockSet) List() []*Validator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List") - ret0, _ := ret[0].([]*Validator) - return ret0 -} - -// List indicates an expected call of List. -func (mr *MockSetMockRecorder) List() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSet)(nil).List)) -} - -// PrefixedString mocks base method. -func (m *MockSet) PrefixedString(arg0 string) string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrefixedString", arg0) - ret0, _ := ret[0].(string) - return ret0 -} - -// PrefixedString indicates an expected call of PrefixedString. -func (mr *MockSetMockRecorder) PrefixedString(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefixedString", reflect.TypeOf((*MockSet)(nil).PrefixedString), arg0) -} - -// RegisterCallbackListener mocks base method. -func (m *MockSet) RegisterCallbackListener(arg0 SetCallbackListener) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterCallbackListener", arg0) -} - -// RegisterCallbackListener indicates an expected call of RegisterCallbackListener. -func (mr *MockSetMockRecorder) RegisterCallbackListener(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockSet)(nil).RegisterCallbackListener), arg0) -} - -// RemoveWeight mocks base method. -func (m *MockSet) RemoveWeight(arg0 ids.NodeID, arg1 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveWeight", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemoveWeight indicates an expected call of RemoveWeight. -func (mr *MockSetMockRecorder) RemoveWeight(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockSet)(nil).RemoveWeight), arg0, arg1) -} - -// Sample mocks base method. -func (m *MockSet) Sample(arg0 int) ([]ids.NodeID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sample", arg0) - ret0, _ := ret[0].([]ids.NodeID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Sample indicates an expected call of Sample. -func (mr *MockSetMockRecorder) Sample(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockSet)(nil).Sample), arg0) -} - -// String mocks base method. -func (m *MockSet) String() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "String") - ret0, _ := ret[0].(string) - return ret0 -} - -// String indicates an expected call of String. -func (mr *MockSetMockRecorder) String() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockSet)(nil).String)) -} - -// SubsetWeight mocks base method. -func (m *MockSet) SubsetWeight(arg0 set.Set[ids.NodeID]) uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubsetWeight", arg0) - ret0, _ := ret[0].(uint64) - return ret0 -} - -// SubsetWeight indicates an expected call of SubsetWeight. -func (mr *MockSetMockRecorder) SubsetWeight(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockSet)(nil).SubsetWeight), arg0) -} - -// Weight mocks base method. -func (m *MockSet) Weight() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Weight") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// Weight indicates an expected call of Weight. -func (mr *MockSetMockRecorder) Weight() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockSet)(nil).Weight)) -} diff --git a/avalanchego/snow/validators/mock_state.go b/avalanchego/snow/validators/mock_state.go index 6efc0896..6bed638b 100644 --- a/avalanchego/snow/validators/mock_state.go +++ b/avalanchego/snow/validators/mock_state.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: State) +// +// Generated by this command: +// +// mockgen -package=validators -destination=snow/validators/mock_state.go github.com/ava-labs/avalanchego/snow/validators State +// // Package validators is a generated GoMock package. package validators @@ -12,7 +14,7 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockState is a mock of State interface. @@ -48,7 +50,7 @@ func (m *MockState) GetCurrentHeight(arg0 context.Context) (uint64, error) { } // GetCurrentHeight indicates an expected call of GetCurrentHeight. -func (mr *MockStateMockRecorder) GetCurrentHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetCurrentHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentHeight", reflect.TypeOf((*MockState)(nil).GetCurrentHeight), arg0) } @@ -63,7 +65,7 @@ func (m *MockState) GetMinimumHeight(arg0 context.Context) (uint64, error) { } // GetMinimumHeight indicates an expected call of GetMinimumHeight. -func (mr *MockStateMockRecorder) GetMinimumHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetMinimumHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinimumHeight", reflect.TypeOf((*MockState)(nil).GetMinimumHeight), arg0) } @@ -78,7 +80,7 @@ func (m *MockState) GetSubnetID(arg0 context.Context, arg1 ids.ID) (ids.ID, erro } // GetSubnetID indicates an expected call of GetSubnetID. -func (mr *MockStateMockRecorder) GetSubnetID(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetSubnetID(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetID", reflect.TypeOf((*MockState)(nil).GetSubnetID), arg0, arg1) } @@ -93,7 +95,7 @@ func (m *MockState) GetValidatorSet(arg0 context.Context, arg1 uint64, arg2 ids. } // GetValidatorSet indicates an expected call of GetValidatorSet. -func (mr *MockStateMockRecorder) GetValidatorSet(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetValidatorSet(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorSet", reflect.TypeOf((*MockState)(nil).GetValidatorSet), arg0, arg1, arg2) } diff --git a/avalanchego/snow/validators/mock_subnet_connector.go b/avalanchego/snow/validators/mock_subnet_connector.go index 2f8cd3bf..b9f3ee05 100644 --- a/avalanchego/snow/validators/mock_subnet_connector.go +++ b/avalanchego/snow/validators/mock_subnet_connector.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: SubnetConnector) +// +// Generated by this command: +// +// mockgen -package=validators -destination=snow/validators/mock_subnet_connector.go github.com/ava-labs/avalanchego/snow/validators SubnetConnector +// // Package validators is a generated GoMock package. package validators @@ -12,7 +14,7 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockSubnetConnector is a mock of SubnetConnector interface. @@ -47,7 +49,7 @@ func (m *MockSubnetConnector) ConnectedSubnet(arg0 context.Context, arg1 ids.Nod } // ConnectedSubnet indicates an expected call of ConnectedSubnet. -func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectedSubnet", reflect.TypeOf((*MockSubnetConnector)(nil).ConnectedSubnet), arg0, arg1, arg2) } diff --git a/avalanchego/snow/validators/set.go b/avalanchego/snow/validators/set.go index 0e243c49..5e7c81a2 100644 --- a/avalanchego/snow/validators/set.go +++ b/avalanchego/snow/validators/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -6,6 +6,7 @@ package validators import ( "errors" "fmt" + "math/big" "strings" "sync" @@ -18,92 +19,17 @@ import ( ) var ( - _ Set = (*vdrSet)(nil) - - errZeroWeight = errors.New("weight must be non-zero") - errDuplicateValidator = errors.New("duplicate validator") - errMissingValidator = errors.New("missing validator") + errDuplicateValidator = errors.New("duplicate validator") + errMissingValidator = errors.New("missing validator") + errTotalWeightNotUint64 = errors.New("total weight is not a uint64") ) -// Set of validators that can be sampled -type Set interface { - formatting.PrefixedStringer - - // Add a new staker to the set. - // Returns an error if: - // - [weight] is 0 - // - [nodeID] is already in the validator set - // - the total weight of the validator set would overflow uint64 - // If an error is returned, the set will be unmodified. - Add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error - - // AddWeight to an existing staker. - // Returns an error if: - // - [weight] is 0 - // - [nodeID] is not already in the validator set - // - the total weight of the validator set would overflow uint64 - // If an error is returned, the set will be unmodified. - AddWeight(nodeID ids.NodeID, weight uint64) error - - // GetWeight retrieves the validator weight from the set. - GetWeight(ids.NodeID) uint64 - - // Get returns the validator tied to the specified ID. - Get(ids.NodeID) (*Validator, bool) - - // SubsetWeight returns the sum of the weights of the validators. - SubsetWeight(set.Set[ids.NodeID]) uint64 - - // RemoveWeight from a staker. If the staker's weight becomes 0, the staker - // will be removed from the validator set. - // Returns an error if: - // - [weight] is 0 - // - [nodeID] is not already in the validator set - // - the weight of the validator would become negative - // If an error is returned, the set will be unmodified. - RemoveWeight(nodeID ids.NodeID, weight uint64) error - - // Contains returns true if there is a validator with the specified ID - // currently in the set. - Contains(ids.NodeID) bool - - // Len returns the number of validators currently in the set. - Len() int - - // List all the validators in this group - List() []*Validator - - // Weight returns the cumulative weight of all validators in the set. - Weight() uint64 - - // Sample returns a collection of validatorIDs, potentially with duplicates. - // If sampling the requested size isn't possible, an error will be returned. - Sample(size int) ([]ids.NodeID, error) - - // When a validator's weight changes, or a validator is added/removed, - // this listener is called. - RegisterCallbackListener(SetCallbackListener) -} - -type SetCallbackListener interface { - OnValidatorAdded(validatorID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) - OnValidatorRemoved(validatorID ids.NodeID, weight uint64) - OnValidatorWeightChanged(validatorID ids.NodeID, oldWeight, newWeight uint64) -} - -// NewSet returns a new, empty set of validators. -func NewSet() Set { +// newSet returns a new, empty set of validators. +func newSet() *vdrSet { return &vdrSet{ - vdrs: make(map[ids.NodeID]*Validator), - sampler: sampler.NewWeightedWithoutReplacement(), - } -} - -// NewBestSet returns a new, empty set of validators. -func NewBestSet(expectedSampleSize int) Set { - return &vdrSet{ - vdrs: make(map[ids.NodeID]*Validator), - sampler: sampler.NewBestWeightedWithoutReplacement(expectedSampleSize), + vdrs: make(map[ids.NodeID]*Validator), + sampler: sampler.NewWeightedWithoutReplacement(), + totalWeight: new(big.Int), } } @@ -112,7 +38,7 @@ type vdrSet struct { vdrs map[ids.NodeID]*Validator vdrSlice []*Validator weights []uint64 - totalWeight uint64 + totalWeight *big.Int samplerInitialized bool sampler sampler.WeightedWithoutReplacement @@ -121,10 +47,6 @@ type vdrSet struct { } func (s *vdrSet) Add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { - if weight == 0 { - return errZeroWeight - } - s.lock.Lock() defer s.lock.Unlock() @@ -137,13 +59,6 @@ func (s *vdrSet) add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight u return errDuplicateValidator } - // We first calculate the new total weight of the set, as this guarantees - // that none of the following operations can overflow. - newTotalWeight, err := math.Add64(s.totalWeight, weight) - if err != nil { - return err - } - vdr := &Validator{ NodeID: nodeID, PublicKey: pk, @@ -154,7 +69,7 @@ func (s *vdrSet) add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight u s.vdrs[nodeID] = vdr s.vdrSlice = append(s.vdrSlice, vdr) s.weights = append(s.weights, weight) - s.totalWeight = newTotalWeight + s.totalWeight.Add(s.totalWeight, new(big.Int).SetUint64(weight)) s.samplerInitialized = false s.callValidatorAddedCallbacks(nodeID, pk, txID, weight) @@ -162,10 +77,6 @@ func (s *vdrSet) add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight u } func (s *vdrSet) AddWeight(nodeID ids.NodeID, weight uint64) error { - if weight == 0 { - return errZeroWeight - } - s.lock.Lock() defer s.lock.Unlock() @@ -178,17 +89,14 @@ func (s *vdrSet) addWeight(nodeID ids.NodeID, weight uint64) error { return errMissingValidator } - // We first calculate the new total weight of the set, as this guarantees - // that none of the following operations can overflow. - newTotalWeight, err := math.Add64(s.totalWeight, weight) + oldWeight := vdr.Weight + newWeight, err := math.Add64(oldWeight, weight) if err != nil { return err } - - oldWeight := vdr.Weight - vdr.Weight += weight - s.weights[vdr.index] += weight - s.totalWeight = newTotalWeight + vdr.Weight = newWeight + s.weights[vdr.index] = newWeight + s.totalWeight.Add(s.totalWeight, new(big.Int).SetUint64(weight)) s.samplerInitialized = false s.callWeightChangeCallbacks(nodeID, oldWeight, vdr.Weight) @@ -209,28 +117,28 @@ func (s *vdrSet) getWeight(nodeID ids.NodeID) uint64 { return 0 } -func (s *vdrSet) SubsetWeight(subset set.Set[ids.NodeID]) uint64 { +func (s *vdrSet) SubsetWeight(subset set.Set[ids.NodeID]) (uint64, error) { s.lock.RLock() defer s.lock.RUnlock() return s.subsetWeight(subset) } -func (s *vdrSet) subsetWeight(subset set.Set[ids.NodeID]) uint64 { - var totalWeight uint64 +func (s *vdrSet) subsetWeight(subset set.Set[ids.NodeID]) (uint64, error) { + var ( + totalWeight uint64 + err error + ) for nodeID := range subset { - // Because [totalWeight] will be <= [s.totalWeight], we are guaranteed - // this will not overflow. - totalWeight += s.getWeight(nodeID) + totalWeight, err = math.Add64(totalWeight, s.getWeight(nodeID)) + if err != nil { + return 0, err + } } - return totalWeight + return totalWeight, nil } func (s *vdrSet) RemoveWeight(nodeID ids.NodeID, weight uint64) error { - if weight == 0 { - return errZeroWeight - } - s.lock.Lock() defer s.lock.Unlock() @@ -274,7 +182,7 @@ func (s *vdrSet) removeWeight(nodeID ids.NodeID, weight uint64) error { s.callWeightChangeCallbacks(nodeID, oldWeight, newWeight) } - s.totalWeight -= weight + s.totalWeight.Sub(s.totalWeight, new(big.Int).SetUint64(weight)) s.samplerInitialized = false return nil } @@ -295,18 +203,6 @@ func (s *vdrSet) get(nodeID ids.NodeID) (*Validator, bool) { return &copiedVdr, true } -func (s *vdrSet) Contains(nodeID ids.NodeID) bool { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.contains(nodeID) -} - -func (s *vdrSet) contains(nodeID ids.NodeID) bool { - _, contains := s.vdrs[nodeID] - return contains -} - func (s *vdrSet) Len() int { s.lock.RLock() defer s.lock.RUnlock() @@ -318,27 +214,29 @@ func (s *vdrSet) len() int { return len(s.vdrSlice) } -func (s *vdrSet) List() []*Validator { +func (s *vdrSet) HasCallbackRegistered() bool { s.lock.RLock() defer s.lock.RUnlock() - return s.list() + return len(s.callbackListeners) > 0 } -func (s *vdrSet) list() []*Validator { - list := make([]*Validator, len(s.vdrSlice)) - for i, vdr := range s.vdrSlice { - copiedVdr := *vdr - list[i] = &copiedVdr +func (s *vdrSet) Map() map[ids.NodeID]*GetValidatorOutput { + s.lock.RLock() + defer s.lock.RUnlock() + + set := make(map[ids.NodeID]*GetValidatorOutput, len(s.vdrSlice)) + for _, vdr := range s.vdrSlice { + set[vdr.NodeID] = &GetValidatorOutput{ + NodeID: vdr.NodeID, + PublicKey: vdr.PublicKey, + Weight: vdr.Weight, + } } - return list + return set } func (s *vdrSet) Sample(size int) ([]ids.NodeID, error) { - if size == 0 { - return nil, nil - } - s.lock.Lock() defer s.lock.Unlock() @@ -365,11 +263,15 @@ func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { return list, nil } -func (s *vdrSet) Weight() uint64 { +func (s *vdrSet) TotalWeight() (uint64, error) { s.lock.RLock() defer s.lock.RUnlock() - return s.totalWeight + if !s.totalWeight.IsUint64() { + return 0, fmt.Errorf("%w, total weight: %s", errTotalWeightNotUint64, s.totalWeight) + } + + return s.totalWeight.Uint64(), nil } func (s *vdrSet) String() string { @@ -433,3 +335,14 @@ func (s *vdrSet) callValidatorRemovedCallbacks(node ids.NodeID, weight uint64) { callbackListener.OnValidatorRemoved(node, weight) } } + +func (s *vdrSet) GetValidatorIDs() []ids.NodeID { + s.lock.RLock() + defer s.lock.RUnlock() + + list := make([]ids.NodeID, len(s.vdrSlice)) + for i, vdr := range s.vdrSlice { + list[i] = vdr.NodeID + } + return list +} diff --git a/avalanchego/snow/validators/set_test.go b/avalanchego/snow/validators/set_test.go index 645a5215..4554f930 100644 --- a/avalanchego/snow/validators/set_test.go +++ b/avalanchego/snow/validators/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -11,90 +11,63 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" -) - -func TestSetAddZeroWeight(t *testing.T) { - require := require.New(t) - s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 0) - require.ErrorIs(err, errZeroWeight) -} + safemath "github.com/ava-labs/avalanchego/utils/math" +) func TestSetAddDuplicate(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.Add(nodeID, nil, ids.Empty, 1) + err := s.Add(nodeID, nil, ids.Empty, 1) require.ErrorIs(err, errDuplicateValidator) } func TestSetAddOverflow(t *testing.T) { require := require.New(t) - s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + s := newSet() + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - err = s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, math.MaxUint64) - require.Error(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, math.MaxUint64)) - weight := s.Weight() - require.EqualValues(1, weight) -} - -func TestSetAddWeightZeroWeight(t *testing.T) { - require := require.New(t) - - s := NewSet() - - nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) - - err = s.AddWeight(nodeID, 0) - require.ErrorIs(err, errZeroWeight) + _, err := s.TotalWeight() + require.ErrorIs(err, errTotalWeightNotUint64) } func TestSetAddWeightOverflow(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) nodeID := ids.GenerateTestNodeID() - err = s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.AddWeight(nodeID, math.MaxUint64-1) - require.Error(err) + require.NoError(s.AddWeight(nodeID, math.MaxUint64-1)) - weight := s.Weight() - require.EqualValues(2, weight) + _, err := s.TotalWeight() + require.ErrorIs(err, errTotalWeightNotUint64) } func TestSetGetWeight(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() nodeID := ids.GenerateTestNodeID() - weight := s.GetWeight(nodeID) - require.Zero(weight) + require.Zero(s.GetWeight(nodeID)) - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - weight = s.GetWeight(nodeID) - require.EqualValues(1, weight) + require.Equal(uint64(1), s.GetWeight(nodeID)) } func TestSetSubsetWeight(t *testing.T) { @@ -108,74 +81,53 @@ func TestSetSubsetWeight(t *testing.T) { weight1 := uint64(123) weight2 := uint64(810) - subset := set.Set[ids.NodeID]{} - subset.Add(nodeID0) - subset.Add(nodeID1) - - s := NewSet() - - err := s.Add(nodeID0, nil, ids.Empty, weight0) - require.NoError(err) + subset := set.Of(nodeID0, nodeID1) - err = s.Add(nodeID1, nil, ids.Empty, weight1) - require.NoError(err) + s := newSet() - err = s.Add(nodeID2, nil, ids.Empty, weight2) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, ids.Empty, weight0)) + require.NoError(s.Add(nodeID1, nil, ids.Empty, weight1)) + require.NoError(s.Add(nodeID2, nil, ids.Empty, weight2)) expectedWeight := weight0 + weight1 - subsetWeight := s.SubsetWeight(subset) - require.Equal(expectedWeight, subsetWeight) -} - -func TestSetRemoveWeightZeroWeight(t *testing.T) { - require := require.New(t) - - s := NewSet() - - nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) + subsetWeight, err := s.SubsetWeight(subset) require.NoError(err) - - err = s.RemoveWeight(nodeID, 0) - require.ErrorIs(err, errZeroWeight) + require.Equal(expectedWeight, subsetWeight) } func TestSetRemoveWeightMissingValidator(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - err = s.RemoveWeight(ids.GenerateTestNodeID(), 1) + err := s.RemoveWeight(ids.GenerateTestNodeID(), 1) require.ErrorIs(err, errMissingValidator) } func TestSetRemoveWeightUnderflow(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) nodeID := ids.GenerateTestNodeID() - err = s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.RemoveWeight(nodeID, 2) - require.Error(err) + err := s.RemoveWeight(nodeID, 2) + require.ErrorIs(err, safemath.ErrUnderflow) - weight := s.Weight() - require.EqualValues(2, weight) + totalWeight, err := s.TotalWeight() + require.NoError(err) + require.Equal(uint64(2), totalWeight) } func TestSetGet(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() nodeID := ids.GenerateTestNodeID() _, ok := s.Get(nodeID) @@ -185,179 +137,154 @@ func TestSetGet(t *testing.T) { require.NoError(err) pk := bls.PublicFromSecretKey(sk) - err = s.Add(nodeID, pk, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, pk, ids.Empty, 1)) vdr0, ok := s.Get(nodeID) require.True(ok) require.Equal(nodeID, vdr0.NodeID) require.Equal(pk, vdr0.PublicKey) - require.EqualValues(1, vdr0.Weight) + require.Equal(uint64(1), vdr0.Weight) - err = s.AddWeight(nodeID, 1) - require.NoError(err) + require.NoError(s.AddWeight(nodeID, 1)) vdr1, ok := s.Get(nodeID) require.True(ok) require.Equal(nodeID, vdr0.NodeID) require.Equal(pk, vdr0.PublicKey) - require.EqualValues(1, vdr0.Weight) + require.Equal(uint64(1), vdr0.Weight) require.Equal(nodeID, vdr1.NodeID) require.Equal(pk, vdr1.PublicKey) - require.EqualValues(2, vdr1.Weight) -} - -func TestSetContains(t *testing.T) { - require := require.New(t) - - s := NewSet() - - nodeID := ids.GenerateTestNodeID() - contains := s.Contains(nodeID) - require.False(contains) - - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) - - contains = s.Contains(nodeID) - require.True(contains) + require.Equal(uint64(2), vdr1.Weight) - err = s.RemoveWeight(nodeID, 1) - require.NoError(err) - - contains = s.Contains(nodeID) - require.False(contains) + require.NoError(s.RemoveWeight(nodeID, 2)) + _, ok = s.Get(nodeID) + require.False(ok) } func TestSetLen(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() - len := s.Len() - require.Zero(len) + setLen := s.Len() + require.Zero(setLen) nodeID0 := ids.GenerateTestNodeID() - err := s.Add(nodeID0, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) - len = s.Len() - require.Equal(1, len) + setLen = s.Len() + require.Equal(1, setLen) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, 1)) - len = s.Len() - require.Equal(2, len) + setLen = s.Len() + require.Equal(2, setLen) - err = s.RemoveWeight(nodeID1, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID1, 1)) - len = s.Len() - require.Equal(1, len) + setLen = s.Len() + require.Equal(1, setLen) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) - len = s.Len() - require.Zero(len) + setLen = s.Len() + require.Zero(setLen) } -func TestSetList(t *testing.T) { +func TestSetMap(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() - list := s.List() - require.Empty(list) + m := s.Map() + require.Empty(m) sk, err := bls.NewSecretKey() require.NoError(err) pk := bls.PublicFromSecretKey(sk) nodeID0 := ids.GenerateTestNodeID() - err = s.Add(nodeID0, pk, ids.Empty, 2) - require.NoError(err) + require.NoError(s.Add(nodeID0, pk, ids.Empty, 2)) - list = s.List() - require.Len(list, 1) + m = s.Map() + require.Len(m, 1) + require.Contains(m, nodeID0) - node0 := list[0] + node0 := m[nodeID0] require.Equal(nodeID0, node0.NodeID) require.Equal(pk, node0.PublicKey) - require.EqualValues(2, node0.Weight) + require.Equal(uint64(2), node0.Weight) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, 1)) - list = s.List() - require.Len(list, 2) + m = s.Map() + require.Len(m, 2) + require.Contains(m, nodeID0) + require.Contains(m, nodeID1) - node0 = list[0] + node0 = m[nodeID0] require.Equal(nodeID0, node0.NodeID) require.Equal(pk, node0.PublicKey) - require.EqualValues(2, node0.Weight) + require.Equal(uint64(2), node0.Weight) - node1 := list[1] + node1 := m[nodeID1] require.Equal(nodeID1, node1.NodeID) require.Nil(node1.PublicKey) - require.EqualValues(1, node1.Weight) + require.Equal(uint64(1), node1.Weight) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) require.Equal(nodeID0, node0.NodeID) require.Equal(pk, node0.PublicKey) - require.EqualValues(2, node0.Weight) + require.Equal(uint64(2), node0.Weight) - list = s.List() - require.Len(list, 2) + m = s.Map() + require.Len(m, 2) + require.Contains(m, nodeID0) + require.Contains(m, nodeID1) - node0 = list[0] + node0 = m[nodeID0] require.Equal(nodeID0, node0.NodeID) require.Equal(pk, node0.PublicKey) - require.EqualValues(1, node0.Weight) + require.Equal(uint64(1), node0.Weight) - node1 = list[1] + node1 = m[nodeID1] require.Equal(nodeID1, node1.NodeID) require.Nil(node1.PublicKey) - require.EqualValues(1, node1.Weight) + require.Equal(uint64(1), node1.Weight) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) - list = s.List() - require.Len(list, 1) + m = s.Map() + require.Len(m, 1) + require.Contains(m, nodeID1) - node0 = list[0] - require.Equal(nodeID1, node0.NodeID) - require.Nil(node0.PublicKey) - require.EqualValues(1, node0.Weight) + node1 = m[nodeID1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.Equal(uint64(1), node1.Weight) - err = s.RemoveWeight(nodeID1, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID1, 1)) - list = s.List() - require.Empty(list) + require.Empty(s.Map()) } func TestSetWeight(t *testing.T) { require := require.New(t) - vdr0 := ids.NodeID{1} + vdr0 := ids.BuildTestNodeID([]byte{1}) weight0 := uint64(93) - vdr1 := ids.NodeID{2} + vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) - s := NewSet() - err := s.Add(vdr0, nil, ids.Empty, weight0) - require.NoError(err) + s := newSet() + require.NoError(s.Add(vdr0, nil, ids.Empty, weight0)) - err = s.Add(vdr1, nil, ids.Empty, weight1) - require.NoError(err) + require.NoError(s.Add(vdr1, nil, ids.Empty, weight1)) - setWeight := s.Weight() + setWeight, err := s.TotalWeight() + require.NoError(err) expectedWeight := weight0 + weight1 require.Equal(expectedWeight, setWeight) } @@ -365,7 +292,7 @@ func TestSetWeight(t *testing.T) { func TestSetSample(t *testing.T) { require := require.New(t) - s := NewSet() + s := newSet() sampled, err := s.Sample(0) require.NoError(err) @@ -376,19 +303,17 @@ func TestSetSample(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() pk := bls.PublicFromSecretKey(sk) - err = s.Add(nodeID0, pk, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID0, pk, ids.Empty, 1)) sampled, err = s.Sample(1) require.NoError(err) require.Equal([]ids.NodeID{nodeID0}, sampled) _, err = s.Sample(2) - require.Error(err) + require.ErrorIs(err, sampler.ErrOutOfRange) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) sampled, err = s.Sample(1) require.NoError(err) @@ -407,21 +332,19 @@ func TestSetString(t *testing.T) { require := require.New(t) nodeID0 := ids.EmptyNodeID - nodeID1 := ids.NodeID{ + nodeID1 := ids.BuildTestNodeID([]byte{ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } + }) - s := NewSet() - err := s.Add(nodeID0, nil, ids.Empty, 1) - require.NoError(err) + s := newSet() + require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) - err = s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) - expected := "Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806" + expected := `Validator Set: (Size = 2, Weight = 9223372036854775807) + Validator[0]: NodeID-111111111111111111116DBWJs, 1 + Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806` result := s.String() require.Equal(expected, result) } @@ -462,15 +385,16 @@ func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight func TestSetAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) sk0, err := bls.NewSecretKey() require.NoError(err) pk0 := bls.PublicFromSecretKey(sk0) txID0 := ids.GenerateTestID() weight0 := uint64(1) - s := NewSet() + s := newSet() callCount := 0 + require.False(s.HasCallbackRegistered()) s.RegisterCallbackListener(&callbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { @@ -481,24 +405,24 @@ func TestSetAddCallback(t *testing.T) { callCount++ }, }) - err = s.Add(nodeID0, pk0, txID0, weight0) - require.NoError(err) + require.True(s.HasCallbackRegistered()) + require.NoError(s.Add(nodeID0, pk0, txID0, weight0)) require.Equal(1, callCount) } func TestSetAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(1) weight1 := uint64(93) - s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + s := newSet() + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 + require.False(s.HasCallbackRegistered()) s.RegisterCallbackListener(&callbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { @@ -515,24 +439,24 @@ func TestSetAddWeightCallback(t *testing.T) { callCount++ }, }) - err = s.AddWeight(nodeID0, weight1) - require.NoError(err) + require.True(s.HasCallbackRegistered()) + require.NoError(s.AddWeight(nodeID0, weight1)) require.Equal(2, callCount) } func TestSetRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) weight1 := uint64(92) - s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + s := newSet() + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 + require.False(s.HasCallbackRegistered()) s.RegisterCallbackListener(&callbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { @@ -549,23 +473,23 @@ func TestSetRemoveWeightCallback(t *testing.T) { callCount++ }, }) - err = s.RemoveWeight(nodeID0, weight1) - require.NoError(err) + require.True(s.HasCallbackRegistered()) + require.NoError(s.RemoveWeight(nodeID0, weight1)) require.Equal(2, callCount) } func TestSetValidatorRemovedCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.NodeID{1} + nodeID0 := ids.BuildTestNodeID([]byte{1}) txID0 := ids.GenerateTestID() weight0 := uint64(93) - s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + s := newSet() + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 + require.False(s.HasCallbackRegistered()) s.RegisterCallbackListener(&callbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { @@ -581,7 +505,7 @@ func TestSetValidatorRemovedCallback(t *testing.T) { callCount++ }, }) - err = s.RemoveWeight(nodeID0, weight0) - require.NoError(err) + require.True(s.HasCallbackRegistered()) + require.NoError(s.RemoveWeight(nodeID0, weight0)) require.Equal(2, callCount) } diff --git a/avalanchego/snow/validators/state.go b/avalanchego/snow/validators/state.go index fa9ef278..3f92df35 100644 --- a/avalanchego/snow/validators/state.go +++ b/avalanchego/snow/validators/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/avalanchego/snow/validators/subnet_connector.go b/avalanchego/snow/validators/subnet_connector.go index 6b4a24bd..06b02ff9 100644 --- a/avalanchego/snow/validators/subnet_connector.go +++ b/avalanchego/snow/validators/subnet_connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/avalanchego/snow/validators/test_state.go b/avalanchego/snow/validators/test_state.go index 6be85dcb..ee4102cf 100644 --- a/avalanchego/snow/validators/test_state.go +++ b/avalanchego/snow/validators/test_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -21,7 +23,7 @@ var ( var _ State = (*TestState)(nil) type TestState struct { - T *testing.T + T testing.TB CantGetMinimumHeight, CantGetCurrentHeight, @@ -39,7 +41,7 @@ func (vm *TestState) GetMinimumHeight(ctx context.Context) (uint64, error) { return vm.GetMinimumHeightF(ctx) } if vm.CantGetMinimumHeight && vm.T != nil { - vm.T.Fatal(errMinimumHeight) + require.FailNow(vm.T, errMinimumHeight.Error()) } return 0, errMinimumHeight } @@ -49,7 +51,7 @@ func (vm *TestState) GetCurrentHeight(ctx context.Context) (uint64, error) { return vm.GetCurrentHeightF(ctx) } if vm.CantGetCurrentHeight && vm.T != nil { - vm.T.Fatal(errCurrentHeight) + require.FailNow(vm.T, errCurrentHeight.Error()) } return 0, errCurrentHeight } @@ -59,7 +61,7 @@ func (vm *TestState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, e return vm.GetSubnetIDF(ctx, chainID) } if vm.CantGetSubnetID && vm.T != nil { - vm.T.Fatal(errSubnetID) + require.FailNow(vm.T, errSubnetID.Error()) } return ids.Empty, errSubnetID } @@ -73,7 +75,7 @@ func (vm *TestState) GetValidatorSet( return vm.GetValidatorSetF(ctx, height, subnetID) } if vm.CantGetValidatorSet && vm.T != nil { - vm.T.Fatal(errGetValidatorSet) + require.FailNow(vm.T, errGetValidatorSet.Error()) } return nil, errGetValidatorSet } diff --git a/avalanchego/snow/validators/traced_state.go b/avalanchego/snow/validators/traced_state.go index e1f54720..1116ab9d 100644 --- a/avalanchego/snow/validators/traced_state.go +++ b/avalanchego/snow/validators/traced_state.go @@ -1,18 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( "context" - "fmt" "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ State = (*tracedState)(nil) @@ -29,10 +28,10 @@ type tracedState struct { func Trace(s State, name string, tracer trace.Tracer) State { return &tracedState{ s: s, - getMinimumHeightTag: fmt.Sprintf("%s.GetMinimumHeight", name), - getCurrentHeightTag: fmt.Sprintf("%s.GetCurrentHeight", name), - getSubnetIDTag: fmt.Sprintf("%s.GetSubnetID", name), - getValidatorSetTag: fmt.Sprintf("%s.GetValidatorSet", name), + getMinimumHeightTag: name + ".GetMinimumHeight", + getCurrentHeightTag: name + ".GetCurrentHeight", + getSubnetIDTag: name + ".GetSubnetID", + getValidatorSetTag: name + ".GetValidatorSet", tracer: tracer, } } diff --git a/avalanchego/snow/validators/unhandled_subnet_connector.go b/avalanchego/snow/validators/unhandled_subnet_connector.go index de7225aa..08447c45 100644 --- a/avalanchego/snow/validators/unhandled_subnet_connector.go +++ b/avalanchego/snow/validators/unhandled_subnet_connector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/avalanchego/snow/validators/validator.go b/avalanchego/snow/validators/validator.go index 56664ddc..499b5189 100644 --- a/avalanchego/snow/validators/validator.go +++ b/avalanchego/snow/validators/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators diff --git a/avalanchego/staking/asn1.go b/avalanchego/staking/asn1.go new file mode 100644 index 00000000..afd817a9 --- /dev/null +++ b/avalanchego/staking/asn1.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/x509" + "encoding/asn1" + "fmt" + + // Explicitly import for the crypto.RegisterHash init side-effects. + // + // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L30-L34 + _ "crypto/sha256" +) + +var ( + // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L433-L452 + // + // RFC 3279, 2.3 Public Key Algorithms + // + // pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) + // rsadsi(113549) pkcs(1) 1 } + // + // rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + // RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters + // + // id-ecPublicKey OBJECT IDENTIFIER ::= { + // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + + // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L326-L350 + signatureAlgorithmVerificationDetails = map[x509.SignatureAlgorithm]x509.PublicKeyAlgorithm{ + x509.SHA256WithRSA: x509.RSA, + x509.ECDSAWithSHA256: x509.ECDSA, + } +) + +func init() { + if !crypto.SHA256.Available() { + panic(fmt.Sprintf("required hash %q is not available", crypto.SHA256)) + } +} diff --git a/avalanchego/staking/certificate.go b/avalanchego/staking/certificate.go new file mode 100644 index 00000000..b3e1a511 --- /dev/null +++ b/avalanchego/staking/certificate.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/x509" +) + +type Certificate struct { + Raw []byte + PublicKey crypto.PublicKey + // TODO: Remove after v1.11.x activates. + SignatureAlgorithm x509.SignatureAlgorithm +} + +// CertificateFromX509 converts an x509 certificate into a staking certificate. +// +// Invariant: The provided certificate must be a parseable into a staking +// certificate. +func CertificateFromX509(cert *x509.Certificate) *Certificate { + return &Certificate{ + Raw: cert.Raw, + PublicKey: cert.PublicKey, + SignatureAlgorithm: cert.SignatureAlgorithm, + } +} diff --git a/avalanchego/staking/large_rsa_key.cert b/avalanchego/staking/large_rsa_key.cert new file mode 100644 index 00000000..45e60a6b Binary files /dev/null and b/avalanchego/staking/large_rsa_key.cert differ diff --git a/avalanchego/staking/local/README.md b/avalanchego/staking/local/README.md new file mode 100644 index 00000000..7c843a69 --- /dev/null +++ b/avalanchego/staking/local/README.md @@ -0,0 +1,107 @@ +# Local Network Staking Keys + +This folder contains the staking keys referenced by the local network genesis. + +**NOTE:** These keys **are** intended to be public. They **must** only be used for local test networks. + +Each staker's Base64 encoded keys are included below for ease of use with the `--staking-tls-key-file-content` and `--staking-tls-cert-file-content` flags. + +## Staker1 + +### NodeID + +``` +NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg +``` + +### Key Base64 + +``` +LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBeW1Fa2NQMXRHS1dCL3pFMElZaGEwZEp2UFplc2s3c3k2UTdZN25hLytVWjRTRDc3CmFwTzJDQnB2NXZaZHZjY0VlQ2VhKzBtUnJQdTlNZ1hyWkcwdm9lejhDZHE1bGc4RzYzY2lTcjFRWWFuL0pFcC8KbFZOaTNqMGlIQ1k5ZmR5dzhsb1ZkUitKYWpaRkpIQUVlK2hZZmFvekx3TnFkTHlldXZuZ3kxNWFZZjBXR3FVTQpmUjREby85QVpnQ2pLMkFzcU9RWVVZb2Zqcm9JUEdpdUJ2VDBFeUFPUnRzRTFsdGtKQjhUUDBLYVJZMlhmMThFCkhpZGgrcm0xakJYT1g3YlgrZ002U2J4U0F3YnZ5UXdpbG9ncnVadkxlQmkvTU5qcXlNZkNiTmZaUmVHR0JObnEKSXdxM3FvRDR1dUV0NkhLc0NyQTZNa2s4T3YrWWlrT1FWR01GRjE5OCt4RnpxZy9FakIzbjFDbm5NNCtGcndHbQpTODBkdTZsNXVlUklFV0VBQ0YrSDRabU96WDBxS2Qxb2RCS090dmlSNkRQOVlQbElEbDVXNTFxV1BlVElIZS8zCjhBMGxpN3VDTVJOUDdxdkZibnlHM3d1TXEyUEtwVTFYd0gzeU5aWFVYYnlZenlRVDRrTkFqYXpwZXRDMWFiYVoKQm5QYklSKzhHZG16OUd4SjJDazRDd0h6c3cvRkxlOVR0Z0RpR3ZOQU5SalJaZUdKWHZ6RWpTVG5FRGtxWUxWbgpVUk15RktIcHdJMzdzek9Ebms2K0hFWU9QbFdFd0tQU2h5cTRqZFE3bnNEY3huZkZveWdGUjVuQ0RJNmlFaTA1CmN6SVhiSFp2anBuME9qcjhsKzc5Qmt6Z1c0VDlQVFJuTU1PUU5JQXMxemRmQlV1YU1aOFh1amh2UTlNQ0F3RUEKQVFLQ0FnRUF1VU00TXQ4cjhiWUJUUFZqL1padlhVakFZS2ZxYWNxaWprcnpOMGtwOEM0Y2lqWnR2V0MrOEtnUwo3R0YzNnZTM0dLOVk1dFN3TUtTNnk0SXp2RmxmazJINFQ2VVU0MU9hU0E5bEt2b25EV0NybWpOQW5CZ2JsOHBxCjRVMzRXTEdnb2hycExiRFRBSkh4dGF0OXoxZ2hPZGlHeG5EZ0VVRmlKVlA5L3UyKzI1anRsVEttUGhzdHhnRXkKbUszWXNTcDNkNXhtenE0Y3VYRi9mSjF2UWhzWEhETHFIdDc4aktaWkErQVdwSUI1N1ZYeTY3eTFiazByR25USwp4eFJuT2FPT0R1YkpneHFNRVExV2tMczFKb3c5U3NwZDl2RGdoUHp0NFNOTXpvckI4WURFU01pYjE3eEY2aVhxCmpGajZ4NkhCOEg3bXA0WDNSeU1ZSnVvMnc2bHB6QnNFbmNVWXBLaHFNYWJGMEkvZ2lJNVZkcFNEdmtDQ09GZW4KbldaTFY5QWkveDd0VHEvMEYrY1ZNNjlNZ2ZlOGlZeW1xbGZkNldSWklUS2ZWaU5IQUxsRy9QcTl5SEpzejdOZwpTOEJLT0R0L3NqNFEweEx0RkRUL0RtcFA1MGlxN1NpUzE0b2JjS2NRcjhGQWpNL3NPWS9VbGc0TThNQTdFdWdTCnBESndMbDZYRG9JTU1DTndaMUhHc0RzdHpteDVNZjUwYlM0dGJLNGlaemNwUFg1UkJUbFZkbzlNVFNnbkZpenAKSWkxTmpITHVWVkNTTGIxT2pvVGd1MGNRRmlXRUJDa0MxWHVvUjhSQ1k2aVdWclVINEdlem5pN2NrdDJtSmFOQQpwZDYvODdkRktFM2poNVQ2alplSk1KZzVza1RaSFNvekpEdWFqOXBNSy9KT05TRDA2c0VDZ2dFQkFQcTJsRW1kCmcxaHBNSXFhN2V5MXVvTGQxekZGemxXcnhUSkxsdTM4TjY5bVlET0hyVi96cVJHT3BaQisxbkg3dFFKSVQvTDEKeExOMzNtRlZxQ3JOOHlVbVoraVVXaW9hSTVKWjFqekNnZW1WR2VCZ29kd1A5TU9aZnh4ckRwMTdvVGRhYmFFcQo3WmFCWW5ZOHhLLzRiQ3h1L0I0bUZpRjNaYThaVGQvKzJ5ZXY3Sk0rRTNNb3JXYzdyckttMUFwZmxmeHl0ZGhPCkpMQmlxT2Nxb2JJM2RnSHl6ZXNWYjhjVDRYQ3BvUmhkckZ3b3J0MEpJN3J5ZmRkZDQ5dk1KM0VsUmJuTi9oNEYKZjI0Y1dZL3NRUHEvbmZEbWVjMjhaN25WemExRDRyc3pOeWxZRHZ6ZGpGMFExbUw1ZEZWbnRXYlpBMUNOdXJWdwpuVGZ3dXlROFJGOVluWU1DZ2dFQkFNNmxwTmVxYWlHOWl4S1NyNjVwWU9LdEJ5VUkzL2VUVDR2Qm5yRHRZRis4Cm9oaUtnSXltRy92SnNTZHJ5bktmd0pPYkV5MmRCWWhDR0YzaDl6Mm5jOUtKUUQvc3U3d3hDc2RtQnM3WW9EaU0KdXpOUGxSQW1JMFFBRklMUENrNDh6L2xVUWszci9NenUwWXpSdjdmSTRXU3BJR0FlZlZQRHF5MXVYc0FURG9ESgphcmNFa05ENUxpYjg5THg3cjAyRWV2SkpUZGhUSk04bUJkUmw2d3BOVjN4QmR3aXM2N3VTeXVuRlpZcFNpTXc3CldXaklSaHpoTEl2cGdENzhVdk52dUppMFVHVkVqVHFueHZ1VzNZNnNMZklrODBLU1IyNFVTaW5UMjd0Ly94N3oKeXpOa283NWF2RjJobTFmOFkvRXBjSEhBYXg4TkFRRjV1dVY5eEJOdnYzRUNnZ0VBZFMvc1JqQ0syVU5wdmcvRwowRkx0V0FncmNzdUhNNEl6alZ2SnMzbWw2YVYzcC81dUtxQncwVlVVekdLTkNBQTRUbFhRa09jUnh6VnJTNkhICkZpTG4yT0NIeHkyNHExOUdhenowcDdmZkUzaHUvUE1PRlJlY04rVkNoZDBBbXRuVHRGVGZVMnNHWE1nalp0TG0KdUwzc2lpUmlVaEZKWE9FN05Vb2xuV0s1dTJZK3RXQlpwUVZKY0N4MGJ1c054NytBRXR6blpMQzU4M3hhS0p0RApzMUs3SlJRQjdqVTU1eHJDMEc5cGJrTXlzbTBOdHlGemd3bWZpcEJIVmxDcHl2ZzZEQ3hkOEZodmhOOVplYTFiCmZoa2MwU0pab3JIQzVoa3FweWRKRG1sVkNrMHZ6RUFlUU00Qzk0WlVPeXRibmpRbm1YcDE0Q05BU1lxTFh0ZVEKdWVSbzB3S0NBUUFHMEYxMEl4Rm0xV290alpxdlpKZ21RVkJYLzBmclVQY3hnNHZwQjVyQzdXUm03TUk2WVF2UgpMS0JqeldFYWtIdjRJZ2ZxM0IrZms1WmNHaVJkNnhTZG41cjN3S1djR2YzaC8xSkFKZEo2cXVGTld0VnVkK04zCnpZemZsMVllcUZDdlJ3RDhzc2hlTlkzQlYvVTdhU3ROZDJveTRTNSt3WmYyWW9wTFNSV1VWNC9tUXdkSGJNQUIKMXh0Mno1bEROQmdkdng4TEFBclpyY1pKYjZibGF4RjBibkF2WUF4UjNoQkV6eFovRGlPbW9GcGRZeVUwdEpRVQpkUG1lbWhGZUo1UHRyUnh0aW1vaHdnQ0VzVC9UQVlodVVKdVkyVnZ6bkVXcHhXdWNiaWNLYlQySkQwdDY3bUVCCnNWOSs4anFWYkNsaUJ0ZEJhZHRib2hqd2trb1IzZ0J4QW9JQkFHM2NadU5rSVdwRUxFYmVJQ0tvdVNPS04wNnIKRnMvVVhVOHJvTlRoUFI3dlB0amVEMU5ETW1VSEpyMUZHNFNKclNpZ2REOHFOQmc4dy9HM25JMEl3N2VGc2trNQo4bU5tMjFDcER6T04zNlpPN0lETWo1dXlCbGoydCtJeGwvdUpZaFlTcHVOWHlVVE1tK3JrRkowdmRTVjRmakxkCkoybTMwanVZbk1pQkJKZjdkejVNOTUrVDB4aWNHV3lWMjR6VllZQmJTbzBOSEVHeHFlUmhpa05xWk5Qa29kNmYKa2ZPSlpHYWxoMkthSzVSTXBacEZGaFova1c5eFJXTkpaeUNXZ2tJb1lrZGlsTXVJU0J1M2xDcms4cmRNcEFMMAp3SEVjcTh4d2NnWUNTMnFrOEh3anRtVmQzZ3BCMXk5VXNoTXIzcW51SDF3TXBVNUMrbk0yb3kzdlNrbz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K +``` + +### Cert Base64 + +``` +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRWYUdBOHpNREU1TURjeE1ERTIKTVRJeE5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRREtZU1J3L1cwWXBZSC9NVFFoaUZyUjBtODlsNnlUdXpMcER0anVkci81Um5oSVB2dHFrN1lJR20vbTlsMjkKeHdSNEo1cjdTWkdzKzcweUJldGtiUytoN1B3SjJybVdEd2JyZHlKS3ZWQmhxZjhrU24rVlUyTGVQU0ljSmoxOQozTER5V2hWMUg0bHFOa1VrY0FSNzZGaDlxak12QTJwMHZKNjYrZURMWGxwaC9SWWFwUXg5SGdPai8wQm1BS01yCllDeW81QmhSaWgrT3VnZzhhSzRHOVBRVElBNUcyd1RXVzJRa0h4TS9RcHBGalpkL1h3UWVKMkg2dWJXTUZjNWYKdHRmNkF6cEp2RklEQnUvSkRDS1dpQ3U1bTh0NEdMOHcyT3JJeDhKczE5bEY0WVlFMmVvakNyZXFnUGk2NFMzbwpjcXdLc0RveVNUdzYvNWlLUTVCVVl3VVhYM3o3RVhPcUQ4U01IZWZVS2Vjemo0V3ZBYVpMelIyN3FYbTU1RWdSCllRQUlYNGZobVk3TmZTb3AzV2gwRW82MitKSG9NLzFnK1VnT1hsYm5XcFk5NU1nZDcvZndEU1dMdTRJeEUwL3UKcThWdWZJYmZDNHlyWThxbFRWZkFmZkkxbGRSZHZKalBKQlBpUTBDTnJPbDYwTFZwdHBrR2M5c2hIN3daMmJQMApiRW5ZS1RnTEFmT3pEOFV0NzFPMkFPSWE4MEExR05GbDRZbGUvTVNOSk9jUU9TcGd0V2RSRXpJVW9lbkFqZnV6Ck00T2VUcjRjUmc0K1ZZVEFvOUtIS3JpTjFEdWV3TnpHZDhXaktBVkhtY0lNanFJU0xUbHpNaGRzZG0rT21mUTYKT3Z5WDd2MEdUT0JiaFAwOU5HY3d3NUEwZ0N6WE4xOEZTNW94bnhlNk9HOUQwd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFxTDFUV0kxUFRNbTNKYVhraGRUQmU4dHNrNytGc0hBRnpUY0JWQnNCOGRrSk5HaHhiCmRsdTdYSW0rQXlHVW4wajhzaXo4cW9qS2JPK3JFUFYvSW1USDVXN1EzNnJYU2Rndk5VV3BLcktJQzVTOFBVRjUKVDRwSCtscFlJbFFIblRhS011cUgzbk8zSTQwSWhFaFBhYTJ3QXd5MmtEbHo0NmZKY3I2YU16ajZaZzQzSjVVSwpaaWQrQlFzaVdBVWF1NVY3Q3BDN0dNQ3g0WWRPWldXc1QzZEFzdWc5aHZ3VGU4MWtLMUpvVEgwanV3UFRCSDB0CnhVZ1VWSVd5dXdlTTFVd1lGM244SG13cTZCNDZZbXVqaE1ES1QrM2xncVp0N2VaMVh2aWVMZEJSbFZRV3pPYS8KNlFZVGtycXdQWmlvS0lTdHJ4VkdZams0MHFFQ05vZENTQ0l3UkRnYm5RdWJSV3Jkc2x4aUl5YzVibEpOdU9WKwpqZ3Y1ZDJFZVVwd1VqdnBadUVWN0ZxUEtHUmdpRzBqZmw2UHNtczlnWVVYZCt5M3l0RzlIZW9ETm1MVFNUQkU0Cm5DUVhYOTM1UDIveE91b2s2Q3BpR3BQODlEWDd0OHlpd2s4TEZOblkzcnZ2NTBuVnk4a2VyVmRuZkhUbW9NWjkKL0lCZ29qU0lLb3Y0bG1QS2RnekZmaW16aGJzc1ZDYTRETy9MSWhURjdiUWJIMXV0L09xN25wZE9wTWpMWUlCRQo5bGFndlJWVFZGd1QvdXdyQ2NYSENiMjFiL3B1d1Y5NFNOWFZ3dDdCaGVGVEZCZHR4SnJSNGpqcjJUNW9kTGtYCjZuUWNZOFYyT1Q3S094bjBLVmM2cGwzc2FKVExtTCtILzNDdEFhbzlOdG11VURhcEtJTlJTVk55dmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` + +## Staker2 + +### NodeID + +``` +NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ +``` + +### Key Base64 + +``` +LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBM1U2RWV0SjJ1amJrZllrZ0ZETXN6MXlUVEZsVUd5OGsrZjRtUW9pZHdRZUdGakFZClg4YVNWaVlsbDNEUHM1dzRLU2JzZ2hIb2VRNmVtV054WHU1T2g0YmVoWmhhUTQ4SjQ2VUpuWkJDbElOTkJ2aEoKVDA2ZjFMSnJEeS9pUUxNRDZnV1ZxVEFVdDNiRjBEYnB6RExUQnBhMytVdEplbDErTXU1SFB4azhXSUwzbG1mUwpiVWY1dGlqWHEvc0k0QVI2aWVLejZweHdkOUxBMkQyWVBkUXM5UUpQQVc5c05pTmRQTTBGWUNpdW5pS2pTbHlTCmYrS1lzMG1YVk9QZHRNRE1XQWFaM3d3SElaOFhvdlZQNzkwRzArM2lsQ05ueHEvb1Rsa3lGbFBOUGxYamdiZkcKdWorTEV2UUVJZWYraHZLc0pnS2p0MkVQWGNUZk5vcnZ4OS9qZmhobXZ6TksrYXhGcTg4Q0xoWWNDcjhrSEtUMApRWnRza0p3WjRTL1J1WkRlcCtEZ1ROQ3JJR2R3bUpSMlVvT0hsbkRZQWJkVmdoYkVITzczMUJXaVo2L0VEOTZSCmVGZitBYUpPOEV6dVJ6ZEFOUkdCRldMbCtkelJwNnlsWHZoWVBmc1REb0Z6MXBGRGFuYjZTUFdXMWQyZEV1Zk8KdXFoZC83d1dhNFlBcXluT1JQN3pIc1FVWHZDb3VNQ3FqalZZWXZWaGxIRVI3eFJJcEhQdTFhejQ5R0dodXMxUwozZnJqZ0NUZk5YeWpqMktPRitMR2F6TWoySmR6b3hyVElYMWVDcndtaXhLOVhianBKYWp1bkFyeGtQSWdKUzFjCnQxU2NsdVJpZEE5Q3NvZ0lxK0ZUWFFDU3FOV1lJaG9yeVMxcnBVMzQxSW53amZhTmRlUUlxaWNZcXNNQ0F3RUEKQVFLQ0FnQU5HVU9nSFdybmxLNHJlLzFKRk1wWEw2eU1QVkZNRnB0Q3JMZEpBdHNMZk0yRDdLN1VwR1V1OGkwUgpiSnp1alpXSllnTm5vM1cyREpaNGo3azdIREhMdGNEZitXZUdUaVlRc2trQ2FYSjNaZG9lU24zVVV0d0U4OWFBClhKNHdwQ2ZjSng1M21CL3h4L2JuWHdpeGpHU1BKRWFaVzhwcWtyUVFnYWYzNVI5OFFhd3oyOHRKcXBQdUl6YTQKdURBTFNsaVNacmV0Y0RyNzdKNTdiaEhmdnZvMk9qL0EzdjV4cWVBdjVCYW9YV0FRZmc1YUxXYUNhVUFPaEpHUApkYmsrcEphenN4aFNhbHpWc1p2dGlrV0Q5Zm9jZXgwSkZadGoyQytReTVpNlY1VnpWaFFVTG5OMXZLTVhxUmZCCmhnQzdyZ3RnYUpHV0hnbVJ6RUJGOHkxRUVFMWZvaGJvMnNxa0c0b016M2pCWjRvNE1BRFFjcGZLMnFjaGdybmsKT3hJUy91VThzemR1ODRpSDhzNkYvSGwxKzg3am5xNk85UmUwaU1TdXZ5VWJqQUVlOENtOVAvYTVNMVg5ZXl6dwpXU1hTUFpCd0tTUm9QM3d1eWNiRW9uVFdRblFIZHd5U1krSXZkdGdsaUVEaEtyVmJaR25rczV6bWFhSXlkVy95CkxTMlM5SlJNNVkrWHAwdlYzbkdsRWVoQ1VkclhvUTFEei9BaUhuV0hqYnhvQ0ZHdDBxTDZDT0p6aUFHZlVYS2EKY1E1aURkN3pjMkozbTJaNmM4Vzh4a1BKZSsxZG1OV2ZHSHJqYThEU0h0VGNEWTZBcWQ5OFZ1MG5pdThQQzdieApBdncrKzZKMndHN0xOODlyZ1IwdVA3YXM5Q3g0a0hIc09Gd3ArbEtPRFZlMmR3MHZBUUtDQVFFQTdtb05DalA2CjVQa1NrU05QaS9qdzFZL0ZDd0JvSkV6bDNRNWZ0d0dya1laRlJMQlR2Q0NsaTJqaGV4YUMwRDkreWpzVmFMLzIKVmFwNDMveGk1NWlwbmlxdjhjMVdBYzV4RmgrNmhDeWRTNks5b3dEeGxITjc1TUdMcm1yWWpZKzNhTWRvMTVEbQp4NWJ6bk9MTHlNVVc0QWsrNzdNVHcxMmZhZC83TDBBTlh1bUZGajZ5ZGNTOFBIbWhKbG16NVZlZ1d6NWIxS0dRCksvL3BoY3VPbTM0OXhla3Q3SjVrS1JiREVxTE9sWnYvRUlBZENCUU00VTNkNlAvMnZVVXk1bktZRzBGMXhlYUMKbGVWcHIxRVBvRUkrWGtUeStqam9hQnM3aVVIcGNEMzU5WFFDV0xuaXdmMVlmdHRrOXpKcDdtNnRSL0dlYWJsawp1bm5INXp5Rmt3emxRd0tDQVFFQTdhRnROc2pMMFVFWGx5QllqQ1JPb1B1NmthL280UXlFYVd2TUhjaVh1K0d2Ck03VFFDRjJpOW9lUVhBQkl5VE5vUXIrcE5qQVJib1k4cDArOVpmVjhRR2x2SDZhd1cyTU56RDA3bGc5aHdzalkKSk9DSTY0WHhaajE4M0doSGdOOS9jRTRQWEJyUUNxUExQQ0tkVjY2eUFSOVdObTlWYTNZOVhmL1J2Y29MaU5CMQpGQWc1YmhiTlFNblIzOG5QSnM5K3N1U3FZQjh4QURLdndtS0Vkb255K1dJTS9HUXlZWmlEbFhFajhFZldRb3VNCndBb2s2VnVoczZjdUxpSEh6WEZSNFk2UkNXUmIybmYyVnJ6V29wejJCcDAySWVIWTBVWnNaZUtucWhhOWR0VXUKWkNJdDJNWlVFTHhpaDlKUyt3ekNYOEJKazN4ZWRpODl6T1pLUng0TWdRS0NBUUVBeHFuVUo5WmNrSVFEdHJFbgp6Y2tvVmF5eFVwT0tOQVZuM1NYbkdBWHFReDhSaFVVdzRTaUxDWG5odWNGdVM3MDlGNkxZR2lzclJ3TUFLaFNUCkRjMG1PY2YwU0pjRHZnbWFMZ2RPVW1raXdTM2d1MzFEMEtIU2NUSGVCUDYvYUdhRFBHbzlzTExydXhETCtzVDUKYmxqYzBONmpkUFZSMkkraEVJWTFOcEEzRkFtZWZvVE1ERnBkU0Q5Snl6MGdMRkV5TEJYd1MyUTlVSXkwdUdxQQpjSTFuU0EwZjJYVzZuSXA5RG9CZmlFY3U2VDczOGcxVEZrTGVVUk5KTlRuK1NnemZOb2I3Ym1iQUZjdk9udW43CkRWMWx2d1BSUERSRFpNeWNkYWxZcmREWEFuTWlxWEJyeFo0b0tiMERpd0NWU0xzczVUQXZBb1licTA5akJncG0KZTd4WkpRS0NBUUVBM2Y3bDBiMXFzNVdVM1VtSmozckh2aHNOWTljcnZ6cjdaS1VoTGwzY2F0aGUzZlk0TnVpTApPcmJReFRJNnpVUnFUWmxTRWw1N21uNXJvWDZjR09scVo1NVlBd0N0VnVMRjNCMEVVcDhTSEcrWGhYUUNWYzF2CkJLM0N2UUhxY3RuWTYyanhib0ZhQSthYkVoWGdXaTdJK3NWMHZDdnNhQlV4SldTOVpBbWlGdkZ2dndRajZ0WUEKY0Z0YTV5OVlpQkJtYytldHgxaThaVXYwNktzeXhxNy9QNzA3Rm5yZ21rNXA5eTJZZm53T0RXTGpYZkRjSk9uRwp1ZGdnQzFiaG11c1hySm1NbzNLUFlSeWJGTk1ielJUSHZzd1Y2emRiWDc3anU1Y3dQWFU3RVEzOVpleU1XaXlHCkVwQjdtQm1FRGljUVczVi9CdnEwSU1MbmdFbFA4UHFBZ1FLQ0FRRUFxNEJFMVBGTjZoUU9xZTBtY084ZzltcXUKenhsMk1NMEtiMkFCRThmeFEydzRGeTdnNDJOb3pEVVcxMy9NTjdxMUkrQXdNaGJsNEliMlFJbUVNVHVGYUhQWQpBM09abG5FOUwwb2k0Rkkra0cyZUpPQi8rNXBIU3VmL2pyWi80Z0FSSyt1Yy9DRGVhSWxqUC9ueHcwY1grc0YrCkhqWDRPYjQvQ3lFSWVJVUdkT0dzN2c5a2Yrb2lyWHJ5dURjWnhsLzJmUU94cXZhOWRoaEJMaFBYRzNvdFNwMFQKRDkweEMxbFNQTElIZitWVWlGOWJMTXRVcDRtZUdjZ3dwWFBWalJWNWNibExyUDlQeGJldmxoRzJEM3ZuT0s5QQo4aldJOVAxdU5CRUFVVFNtWFY4cmVNWU95TlhKSDhZYmJUNHlpYXJXbmFRTTBKMGlwV3dYR0VlV2Fndi9hQT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +``` + +### Cert Base64 + +``` +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRsYUdBOHpNREU1TURjeE1ERTIKTVRJeE9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGRUb1I2MG5hNk51UjlpU0FVTXl6UFhKTk1XVlFiTHlUNS9pWkNpSjNCQjRZV01CaGZ4cEpXSmlXWGNNK3oKbkRncEp1eUNFZWg1RHA2WlkzRmU3azZIaHQ2Rm1GcERqd25qcFFtZGtFS1VnMDBHK0VsUFRwL1VzbXNQTCtKQQpzd1BxQlpXcE1CUzNkc1hRTnVuTU10TUdscmY1UzBsNlhYNHk3a2MvR1R4WWd2ZVdaOUp0Ui9tMktOZXIrd2pnCkJIcUo0clBxbkhCMzBzRFlQWmc5MUN6MUFrOEJiMncySTEwOHpRVmdLSzZlSXFOS1hKSi80cGl6U1pkVTQ5MjAKd014WUJwbmZEQWNobnhlaTlVL3YzUWJUN2VLVUkyZkdyK2hPV1RJV1U4MCtWZU9CdDhhNlA0c1M5QVFoNS82Rwo4cXdtQXFPM1lROWR4TjgyaXUvSDMrTitHR2EvTTByNXJFV3J6d0l1Rmh3S3Z5UWNwUFJCbTJ5UW5CbmhMOUc1CmtONm40T0JNMEtzZ1ozQ1lsSFpTZzRlV2NOZ0J0MVdDRnNRYzd2ZlVGYUpucjhRUDNwRjRWLzRCb2s3d1RPNUgKTjBBMUVZRVZZdVg1M05HbnJLVmUrRmc5K3hNT2dYUFdrVU5xZHZwSTlaYlYzWjBTNTg2NnFGMy92QlpyaGdDcgpLYzVFL3ZNZXhCUmU4S2k0d0txT05WaGk5V0dVY1JIdkZFaWtjKzdWclBqMFlhRzZ6VkxkK3VPQUpOODFmS09QCllvNFg0c1pyTXlQWWwzT2pHdE1oZlY0S3ZDYUxFcjFkdU9rbHFPNmNDdkdROGlBbExWeTNWSnlXNUdKMEQwS3kKaUFpcjRWTmRBSktvMVpnaUdpdkpMV3VsVGZqVWlmQ045bzExNUFpcUp4aXF3d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUNRT2R3RDdlUkl4QnZiUUhVYyttMFRSekVhMTdCQ2ZjazFZMld3TjNUWlhER1NrUFZFCjB1dWpBOFNMM3FpOC9DVExHUnFJOVUzZ1JaSmYrdEpQQkYvUDAyMVBFbXlhRlRTNGh0eGNEeFR4dVp2MmpDbzkKK1hoVUV5dlJXaXRUbW95MWVzcTNta290VlFIZVRtUXZ3Q3NRSkFoY3RWQS9oUmRKd21NUHMxQjhReE9VSTZCcQpTT0JIYTlDc1hJelZPRnY4RnFFOTFQWkEybnMzMHNLUVlycm5iSDk5YXBmRjVXZ2xMVW95UHd4ZjJlM0FBQ2g3CmJlRWRrNDVpdnZLd2k1Sms4bnI4NUtESFlQbHFrcjBiZDlFaGw4eHBsYU5CZE1QZVJ1ZnFCRGx6dGpjTEozd28KbW5ydDk1Z1FNZVNvTEhZM1VOc0lSamJqNDN6SW11N3E5di9ERDlwcFFwdTI2YVJEUm1CTmdMWkE5R001WG5iWgpSRmkzVnhMeXFhc0djU3phSHd6NWM3dk9CT2tPZGxxY1F6SVNSdldEeGlOMUhrQUwraGtpUUN1TWNoZ09SQWdNCnd6UG9vYThyZld0TElwT1hNcHd1VkdiLzhyR05MRVBvdm9DSzl6NmMrV1oremtSbzQrM1RRa09NWTY2WGh0N3IKQWhseTNsZXIrVHlnNmE1alhUOTJXS0MvTVhCWUF5MlpRTm95MjA0a05LZXZjSDdSMmNTa3hJVGQzbjVFYWNOeQo1TUF0Q05JazdKd2VMQ2g5ckxyTFVCdCtpNG40NHNQK0xWaGZXSGVtbmdBOENvRjRuNmVRMHBwMGl4WlRlbjBqCjR1TjBHMk5mK0plR01scW9PYkxXZElPZEgvcGJEcHBYR29aYUtLRGQ3K2JBNzRGbGU1VWg3KzFlM0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` + +## Staker3 + +### NodeID + +``` +NodeID-NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN +``` + +### Key Base64 + +``` +LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKSndJQkFBS0NBZ0VBdkpsUTA2QjI1RkJkb0VYVlg2Y25XU3pTbmtOL0hlaDJSWkZETjFOWFZpMlJVbVRiCjJvRGZJWlVMMlA3VUx0d09OS2RmVm9DbTgxbWEzdDdZTlFKTVBDYzdYYy9ZSmVFVEo5ZU5pQ0R0d29zQ0tiWkIKTkNhS1cyWVpodVh3QXRwL3pGMExYNG5pTjRjM2tkT0MxdUNZQ3lzMzl6Wi9NV1haUVlDWHdaaSthbEZKeE96dQp5R3lSaHBtdW9FZzkzc0hyUk9pa0diVGJYOU1VU0w5UEhhR1NtTUU1ZWlMWkhSaXUrcm42cXRQZkJsY0Zyd05jCkVycDN1UGpCWUxQVGVIaFNZK2ljcmZPWDRpdjNNRDhleWtYWUdJQWM2MlVPQkQ3SVVaOEZxd1U0bmpqbWlid0EKTmtUTVRCR015a2F5UUFMR0NOYzhiVGNxaUZHZ1UzTVptcy9qVjFCbHZzQjhDRTRuTkJxZjEwSnRpOEsvNWNTRgplanB3SlM5d29HMGw0cTBwNkJ2bmgwUHJ3OHdRSEVRNUV3SUwveGxDTTVYNms5a3JVeXROcmlBWUNXTTd4VXh1CktaUjRyWEhFWDh2TU1iSWVXZk1SdmZaWmowRUxTN0o1VUFGZmk0dE9nbS9BaCtJVkZhSWxvbGJPVUFHb2FscHIKK1g2YUY5QkxOdWtjbEIxeG00eE50TnFjakRBTzQxSHExRlNqSW96cUJtYlhCK3N3ZUtmM1NkRTdVNkVjRG9LdwpPelFpUFFHRmJhbFY2MXh2N25ZL0xxOXE5VzAxY0NHeDdEbmt4RDk0R05KaDBWcHRoUDJ5NDdmYjIyQ0VWTVVmCk1WcHk1RzNnYzRqVThJUHQrbzNReGZ1b0x0M0ZNLzUyMHAvR0dQbU5ZNDE3ak41YWhmSWpHZ0tHNGJzQ0F3RUEKQVFLQ0FnQSt1SElUM3lLSzdWc2xxUE83K3Rmd0pTTHFOU0k2TFF2Z09OMzBzVWV6UmpZMUE0dkdEK09reEcrTApPN3dPMVduNEFzMkc5QVFSbS9RUU9HWUl3dm5kYTJLbjRTNU44cHN2UGRVNHQxSzZ4d1h5SDBWeDlYcy95Q1duCklpTCtuL0d1WWljZEg3cldvcVpOWGR6K1h2VFJpZzd6clBFQjJaQTE0M0VVbGhxRk93RmdkemMxK2owdldUNmsKMlVHU0trVjJ4ak9FeFF2THcyUFVpYUxqQk0rKzgwdU5IYmU4b0cvWXZDN3J6c2cxMEl6NFZoS3h1OGVEQVY4MgpMTGVnTWN1Y3BFZ3U1WHJXWWE2MElkbTRoUi9Iamh1UUFTeDNKdlh4aHdRWWl3VDRRWTRSc2k4VDNTOWdBTm9rCmp2eEtvMkYrb1MzY1dHTlJzR3UwTk93SCt5anNWeU1ZYXpjTE9VZXNBQWU4NXR0WGdZcjAyK1ovdU1ueHF0T0YKZ2pJSFkzWDVRWmJENGw0Z2J3eCtQTGJqc2o0S0M2cjN5WnJyNTFQZExVckJ2b3FCaHF3dUNrc2RhTW50V0dNRQp1MFYvb29KaTQrdXpDWXpOMDZqRmZBRlhhMnBXelZCNXlLdzFkNnlZaTlVL2JQZDR4bjFwaExVTUhyQzJidmRNCkg4UDE4Z0FTNnJrV24rYWdlaVdSSG1rZjR1b0tndjNQck1qaWprQmFHcGY2eGp2NiswUTM5M2pkVklDN3dnSlYKOFcwaTFmMUF3djY4MDg5bUhCRWFyUFR2M2d6MzkyNTFXRkNQTlFoRXVTeTc5TGk1emp3T3ByWlhTME1uSlhibQpCMDBJUFRJdzUxS3Vhb3VlV3pZMXBBMklGUS8wc0gzZm8ySmhEMHBwMWdJMERkZTdFUUtDQVFFQTdSVmdOZWxrCjNIM1pKc29PZk9URmEwM2xuWHVFZlRBeXhoRUVDUno2NGs1NHBTYkVXVjczUEllWUJ5WnZuc3JLUnlkcFlXVVAKQ3AvbUtoQUpINFVDZjJoelkwR3lPNy9ENitIRUtaZENnNmEwRE5LY2tBb0ZrQmZlT2xMSkxqTFZBVzJlRVZ4egp0bEZ0Ky9XQkU5MEdDdkU1b3ZYdUVoWEdhUHhDUHA1Z2lJTjVwaFN6U0Q1NTdid3dPeVB3TktGWjdBbzc3VU5LCmt6NkV6Y3ZRZ3FiMjA1U1JSS0dwUzgvVC85TGNMc1VZVmtCZllRL0JheWpmZk8rY1FGNHZINXJCNHgvOC9UN3QKdVVhNzl1WStMZUdIZ1RTRklBdWk5TEVLNXJ5Ly8yaERKSU5zSXRZTWtzMVFvNFN1dTIzcE91R2VyamlGVEtXbAptT0lvRm1QbWJlYkFjd0tDQVFFQXk2V2FKY3pQY0tRUS9ocWdsUXR4VTZWZlA0OVpqVUtrb2krT0NtdHZLRHRzCjdwSmZJa3VsdWhuWUdlcUZmam5vcHc1YmdaSE5GbTZHY2NjNUN3QnRONldrMUtubk9nRElnM2tZQ3JmanRLeS8KQlNTVjNrTEVCdmhlOUVKQTU2bUZnUDdSdWZNYkhUWGhYUEdMa2dFN0pCWmoyRUt4cDFxR1lZVlplc1RNRndETQpLRUh3eklHY0ZreVpzZDJqcHR5TFlxY2ZES3pUSG1GR2N3MW1kdExXQVVkcHYzeHJTM0d2ckNiVU1xSW9kalJkCnFrcmcvZC9rUXBLN0Ezb0xPV2ZhNmVCUTJCWHFhV0IxeDEzYnpKMldsc2h4SkFaMXAxb3pLaWk1QlE5cnZ3V28KbXVJNXZkN282QTlYc2w4UXpsdVNTU1BpK05oalo2NGdNQnJYY2lSdm1RS0NBUUIvZEI1azNUUDcxU3dJVGxlNwpqTUVWRHF1Q0hnVDd5QTJEcldJZUJCWmIweFBJdFM2WlhSUk0xaGhFdjhVQitNTUZ2WXBKY2FyRWEzR3c2eTM4ClkrVVQyWE11eVFLb1hFOVhYK2UwOUR3dHlsREJFL2hXOXd4R2lvNU5qSFBiQWpqQXE4MXVSK1ZzL2huQ2Voa0sKTktncStjT2lkOU9rcFZBazRIZzhjYWd6dTNxS2JsWnpZQ0xzUzE4aWJBK1dPNmU3M1VTYUtMTE90YTF2ZFVLQworbjkyLzBlWlBjOWxralRHTXZWcnIwbUdGTlV4dU9haVZUYlFVNEFNbXBWNnlCZXpvbDYvUmpWR2hXQkhPei95CktteE9hWTJuekptdU1mOUtTKzVyd0FGWWY4NkNhOUFXbTRuZVhsWVJMT1ZWWWpXTU01WjF2aGRvT1N5VDNPRGoKOUVsQkFvSUJBR0NSUGFCeEYyajlrOFU3RVN5OENWZzEwZzNNeHhWU0pjbDJyVzlKZEtOcVVvUnF5a3Z6L1RsYgphZnNZRjRjOHBKTWJIczg1T1R4SzJ0djNNWmlDOGtkeDk5Q1VaTDQvZ3RXOVJXWkh2dVY5Q1BQQ1hvTFB2QzdsCjlmanp0ZDFrcUpiN3ZxM2psdGJxSnR5dytaTVpuRmJIZXo4Z21TZVhxS056M1hOM0FLUmp6MnZEb1JFSTRPQSsKSUorVVR6Y2YyOFRESk5rWTF0L1FGdDBWM0tHNTVwc2lwd1dUVlRtb1JqcG5DemFiYUg1czVJR05FbFd3cG9mZgpGbWxXcFIzcW5vZEt4R3RETVM0WS9LQzJaRFVLQVUrczZ1Ry9ZbWtpUDZMZFBxY2tvZDRxSzhLT1JmMUFSOGRMCkJ6WGhHSklTSURNb25rZU1MTThNWmQwSnpXSWwzdmtDZ2dFQVBCa0V4ZDJqNFZZNXMrd1FKZGlNdG81RERvY2kKa0FFSXZJa0pZOUkrUHQybHBpblFLQWNBQVhidnVlYUprSnBxMzFmNlk2NnVvazhRbkQwOWJJUUNBQmpqbEl2ZQpvN3FRK0g4L2lxSFFYMW5iSER6SW5hRGRhZDNqWXRrV1VIakhQYUtnMi9rdHlOa0Z0bFNIc2t2dkNFVnc1YWp1CjgwUTN0UnBRRzlQZTRaUmpLRXpOSXBNWGZRa3NGSDBLd2p3QVZLd1lKTHFaeHRORVlvazRkcGVmU0lzbkgvclgKcHdLL3B5QnJGcXhVNlBVUlVMVUp1THFSbGFJUlhBVTMxUm1Kc1ZzMkpibUk3Q2J0ajJUbXFBT3hzTHNpNVVlSgpjWnhjVEF1WUNOWU11ODhrdEh1bDhZSmRCRjNyUUtVT25zZ1cxY3g3SDZMR2J1UFpUcGc4U2J5bHR3PT0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K +``` + +### Cert Base64 + +``` +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpKYUdBOHpNREU1TURjeE1ERTIKTVRJeU1sb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRQzhtVkRUb0hia1VGMmdSZFZmcHlkWkxOS2VRMzhkNkhaRmtVTTNVMWRXTFpGU1pOdmFnTjhobFF2WS90UXUKM0E0MHAxOVdnS2J6V1pyZTN0ZzFBa3c4Snp0ZHo5Z2w0Uk1uMTQySUlPM0Npd0lwdGtFMEpvcGJaaG1HNWZBQwoybi9NWFF0ZmllSTNoemVSMDRMVzRKZ0xLemYzTm44eFpkbEJnSmZCbUw1cVVVbkU3TzdJYkpHR21hNmdTRDNlCndldEU2S1FadE50ZjB4Ukl2MDhkb1pLWXdUbDZJdGtkR0s3NnVmcXEwOThHVndXdkExd1N1bmU0K01GZ3M5TjQKZUZKajZKeXQ4NWZpSy9jd1B4N0tSZGdZZ0J6clpRNEVQc2hSbndXckJUaWVPT2FKdkFBMlJNeE1FWXpLUnJKQQpBc1lJMXp4dE55cUlVYUJUY3htYXorTlhVR1crd0h3SVRpYzBHcC9YUW0yTHdyL2x4SVY2T25BbEwzQ2diU1hpCnJTbm9HK2VIUSt2RHpCQWNSRGtUQWd2L0dVSXpsZnFUMlN0VEswMnVJQmdKWXp2RlRHNHBsSGl0Y2NSZnk4d3gKc2g1Wjh4Rzk5bG1QUVF0THNubFFBVitMaTA2Q2I4Q0g0aFVWb2lXaVZzNVFBYWhxV212NWZwb1gwRXMyNlJ5VQpIWEdiakUyMDJweU1NQTdqVWVyVVZLTWlqT29HWnRjSDZ6QjRwL2RKMFR0VG9Sd09nckE3TkNJOUFZVnRxVlhyClhHL3Vkajh1cjJyMWJUVndJYkhzT2VURVAzZ1kwbUhSV20yRS9iTGp0OXZiWUlSVXhSOHhXbkxrYmVCemlOVHcKZyszNmpkREYrNmd1M2NVei9uYlNuOFlZK1kxampYdU0zbHFGOGlNYUFvYmh1d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFlMmtDMEhqS1pVK2RsblUyUmxmQnBCNFFnenpyRkU1TjlBOEYxTWxFNHZWM0F6Q2cxClJWZEhQdm5pWHpkTmhEaWlmbEswbC9jbnJGdjJYMVR6WU1yckE2NzcvdXNIZjJCdzB4am0vaXBIT3Q1Vis0VE4KbVpBSUE0SVBsMDlnUDI4SVpMYzl4U3VxNEZvSGVNOE9UeGh0dE9sSU5ocXBHOVA1ZDZiUGV6VzZaekkzQ2RQUApDRjY5eEs0R0Zsai9OUW5Bb0ZvZ2lkNG9qWVlOVGovY000UFlRVTJLYnJsekx5UHVVay9DZ3dlZlhMTUg4Ny9ICmUza1BEZXY4MFRqdjJQbTVuRDkzN2ZaZmdyRW95b2xLeGlSVmNmWlZNeFI3cWhQaGl6anVlRDBEQWtmUUlzN0wKWVZTeXgvcWpFdjJiQllhaW01UlFha1VlSFIxWHU1WGovazV6cjMzdDk3OWVkZTUwYnlRcmNXbTRINUp4bkVwRApKeEpuRmZET1U2bzE0U0tHSFNyYW81WjRDM2RJNTVETTg0V0xBU25sTUk1Qks0WHRTM25vdExOekc4ZGZXV2hUCjltMEhjcnkrd1BORGNHcjhNdGoxbG9zLzBiTURxTUhDNGpjRlcxaHJYQ1VVczlSWXpFK04veG9xd0NRU2dOMVAKRTczdVhUeVNXajVvdk1SNVRQRjZQaGNmdExCL096aXFPN0Z2ZXJFQnB2R0dIVUFuVVQ2MUp0am9kalhQYkVkagowVmd5TU9CWTJ5NTNIVFhueDNkeGVGWmtVZFJYL1ZaWXk4dE1LM01UWSs3VUlVNWNXWW5DWkFvNUxOY2MwdWtSClM2V1M5KzZlYVE2WFJqaGZOVWp4OWE3RnpxYXBXZHRUZWRwaXBtQlAxTmphcDNnMjlpVXVWbkxRZWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` + +## Staker4 + +### NodeID + +``` +NodeID-GWPcbFJZFfZreETSoWjPimr846mXEKCtu +``` + +### Key Base64 + +``` +LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBMlp3NkF4eE5wNC9Oc1E0eDlFMit6bHpLa0F4OC9zMnk0bmJlakVpTlZ3ZDd6Z0NCCklqNnE3WHB4cDZrVzFSWEp1Um1jYjhZZUdFQS9Bb3lrcE5hb0dTWE03TkF6MG9oa1BCSDVlRHhqdU1aeEc1WlkKQzl6MUVUMXFlNWhGZDZJZW44U0FvRUNrd1pVK3U5Ukp4Y3dlWmNpVnFicGtjN3dOYjhvUVRMR1BHanZzWGF1UwoxQlpiV013WGI2WUMxMVdnOEIyVU5qMEJGclJkSGRDVHRqZEZoUlF4alpzZXAvS1NrbmlBRlE3RThHUW1TdmhRCm5uaE1xQm4xN0NEYkhOTWdqRUxKV3NKSHpqS2dQY2dWZHlZdWtKaFdDc2htTGV1VEZoamFqd09GNnlNcFlpc0EKZ2w4TS9abzJsc2plNndNc1BYRnBTekRnelpnemtWc3hrV2VkdmREQkFCQ0diS28yQjFZclVVelAwWnpTR25VNgpHUmp0TXViZ0pETmVISms3NTdDdFFqL2Rxclgwb3JLdStWdnlQZFhhMDRhQkxVeXUvRkJqcGYrVU5XQVVkUEJmCnJVbEZtVnNoM2lPbERnVkFoZ3Rtd3Jac3lqSmU3MU51WlJ1cG13a2RTYmtJdWtXS1ozekkxeDkrcFZlRXUzMDcKMHNNTGtkTjRkaGR4OVhoWkZqUnRwV3hhMXRIY3d3ajRONVY4SmhVT3VZVkhFSVhMYzU2QWMrUXRaUmRubHROOApMbFlNWXA1N2xaU1I0OHZ4cTZwbHJBRXA5VHc3bGJPSk81T2d5WDhNaHdJZ05Mc1Y2eG44V0w2ZXgvSStQdG1ECnBsb2xwZWNXSUt4QTQ3cStaL0djZU9qeDBIcTlnTDZ0N0d4QmhIdFRBclZ5Wlk5M2EzZVI0cHQ1dmpVQ0F3RUEKQVFLQ0FnQk1vQk5aWnd6OUZNa0VNSkJzaXpmRjZLeTNQbjZCSnFOMzFRMldiakcrMUhiRzJpeWVoMXllMUwvUwpudHJZVzV5MW5nd1UyN2xiSnJ4SlJJYnhPRmpteWdXMzJiUjF6T3NtcjltZGVmNVBZU2tRNHNiTUhwajQ0aHh0CnV2ZXpJWllSQWh1YzBrWnhtQUVJR0wrRmM5TzhXWDVCenMxeVoyUi8yYklWbjJ4WmU0SkdsWlRWTTY0a3ZYRC8KTW9ETG5HNVlQc0lpdXlaMy9UalF0OUpibG1qWGJIM3FkQlcrWTg4eTNsV1RsS2pLVVNtZXVvT0EyYkY4ZSsrNQpudlFvMlRzYnlLU29YY0wxRzZTTFBMbzZRMnFnSmRRZVplUjlCUGU5RHpGZXJJbnFlMjRtRUNoVXYrMk9HMUJmCmxnblF6VVExdW9xdUhGNzhaank2VVZkSjhTZDh1ZnZLQzlyejhKWXNJeW5mdzBnUUMzRjgvZW1tMVFTYWJGdlkKdEc0K3gwSzhGZ3JpampFMDhSdnFnSW5keDlmdENOb040dTNsWHhQckpoS3ByMnh1WFNhNFZaYnVtZ043ZnFXeApVQkM4bG1QUWk1VlptajNuSmZqNGRhdG1CVHZzMWRPTFJNZGZkdFRGeitjQWRXTlp4WDNIT0xaVVNxTVZXZ1hZCmtYMHM3SVY5R255VW50Qmt0WCtJRWJXbEF0dHpsZHlxRjltZDRhdmpLWFErWTRQSy9zUjF5V3N1dnRpWmRZVUwKL1FyUUhYMENzVnYxaFJjWDB5ZWtBMGE4cXdhR214RWNuZEVLdjd3RjFpNjI2amMyZkRSNnFJMXlwMjBYbDNTaQprWUJTTmg3VksyMTBYSWhkZFN1VnhXNS9neU5uRkFCRGZwMWJTZFRoNVpKUmZOdnRRUUtDQVFFQTlaaXBueXU4CkpLbEx0V3JoMmlQOXBtRkJhZVVnb0Y2OElWZmQ2SVhkVjduQUhTV3lzaUM0M1NDVW1JNjN4bHVNWUVKRmRBWisKbS9pUmMvbjZWRktFQlc0K1VqazlWVzFFMWlxSGdTQUJnN250RXNCMk1EY1lZMHFLc040Q1lqQzJmTllPOTd6Sgo1b2p1ODRVM1FuOFRXTmtNc3JVVTdjcm0yb0FRZDA4QWl6VkZxTG8xZDhhSXpScSt0bDk1MlMvbGhmWEtjL1A5CmtmaGwrUktqaVlDMnpiV25HaW54YzJOYmY1cFd3bm10U3JjZW5nK1prZ1ZmU0IzSHZTY2txekVOeWU5WWtwVk0KR0UrS2pFZHNzK1FuR1FSV00ySlBseW9ZRG1oVDZycmFzUlQ2VEtzZWN3bzFyUlhCaTRDMWVUWlFTblpmMjRPZwpRdXJTLy9Yekh6Ym5rUUtDQVFFQTR0UVNtYUpQWk5XWXhPSG5saXZ6c2VsZkpZeWc1SnlKVk1RV3cvN0N2VGNWCkdPcG9ENGhDMjVwdUFuaVQxVS9SeWFZYVVGWitJcDJGaEsyUWNlK3Vza05ndHFGTjlwaGgvRGVPMWc4Q1NhSWUKNkVidGc4TjhHTGMwWWhkaUp0ZDJYR3JrdGoyWHRoTUw3T0pQWUlpZGQ0OHRHdVFpemZpam80RmUxUzByU1c1NgpCNFJIVGgvTzZhMHRhTmVGYm5aUUpENTJoYTl3bG5jL1BaU0NVTWI5QzBkMDhkU3hkQlFWK1NWZEdybC9JUmZDCnFISG9DODZHWURjbW52aUQ1Q0ZPeHB4N0FKL2hRQXdQRlFSQ25XR0h3RGpwY29NT3RrdHlvN3BqOU1EdXpCVWIKa3I0cjFlaThmN1BDOWRtU1ltWXpKTVF4TGZ6K1RpMlN5eU9tZE0xQ1pRS0NBUUVBc1ZyNGl6Q0xJcko3TU55cAprdDFRekRrSmd3NXEvRVROZVFxNS9yUEUveGZ0eTE2dzUvL0hZRENwL20xNSt5MmJkdHdFeWQveXlIRzlvRklTClc1aG5MSURMVXBkeFdtS1pSa3ZhSlA1VythaG5zcFg0QTZPVjRnWXZsOEFMV3Bzdy9YK2J1WDNGRTgwcE9nU20KdmtlRVVqSVVBRzNTV2xLZldZVUgzeERYSkxCb3lJc0lGNkh3b3FWQXVmVEN5bnZUTldVbE9ZMG1QYVp6QldaWApZUEhwa1M0d0tTM0c1bndHMUdSQmFSbHpjalJCVVFXVThpVWRCTGcweUwwZXR0MnF4bndvcTFwVFpHNzBiNDhZCnllUGw5Q1AwbUJEVHh5Y256aWU3Q2hTNzN3dDJJYTJsUkpCSDZPR0FMbHpaTUZwdnF3WkcvUC9WMk4wNVdJeGwKY05JMmNRS0NBUUVBb3lzN1ZobFVVNHp6b0cyQlVwMjdhRGdnb2JwUDR5UllCZ29vOWtURmdhZW1IWTVCM1NxQQpMY2toYWRXalFzZHdla1pxbDNBZ3ZIWGtIbFZjbXhsMzZmUmVGZ0pqT3dqVE04UWpsQWluOUtBUzY3UmFGM2NBClJpZEVIMndDeHo0bmZzUEdVdkpydUNaclpiUkd0WUtSQS9pUzBjMWEzQ0FJVnc0eFVkaDBVeGFONGVwZUFPMFEKd3pnNGVqclBXVzd5cDUvblVyT3BvaE9XQW81YVVCRlU1bEE0NTkzQTZXZXBodGhCNlgrVzNBOWprQmlnZkIzTQp2Rm53Qmx0dlJTUlFycjdTSE5qbUNGU2taTkh6dVpMM1BHZTBSeFBQK1lLOHJOcmdIS2pOSHpIdjY5ZXhZT2RTCjhlbzJUUFIrUVJxVG45Y2lLWnJjdFJCRGtLM01pQ2svb1FLQ0FRQVpJWmRrT0NsVVBIZlNrNHg1bkJYYXNoS1kKZ0R6ZXlZSFlMd05hQmRFS2dITnVmNmpDbHRLV29EelpzcXJ2MU55YS8xNDhzVGdTVGc5MzFiYmNoK2xuSEtKZApjWHJDUVpXQm51MlVxdWlzRk1lTk92cHAwY1B0NHRJWURaVkNSTVJyd0lsWnFJSnhiMm5Bd0Z2YjBmRWZMays0CmdtdSszY0NhTi92UzNvSkE5RUZremp4RzBYaUxPeW55QVpiNWZZMDRObUZPSXNxM3JnVDREZUN1ckhUS3RPSjIKdDE0b1ROcTA2TEQ1NjZPblQ2cGxMN3ZhTHRUUi85L3FKYzAwN1dqdzhRZGJUdVFBTHFDaldXZzJiN0JWa095UgpvOUdyaFB6U2VUNm5CSEk4RW9KdjBueGVRV05EWDlwWmlXLzFuc3l1QUFGSjlJU2JEV2p6L1R3QjE3VUwKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K +``` + +### Cert Base64 + +``` +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpWYUdBOHpNREU1TURjeE1ERTIKTVRJeU5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRFpuRG9ESEUybmo4MnhEakgwVGI3T1hNcVFESHoremJMaWR0Nk1TSTFYQjN2T0FJRWlQcXJ0ZW5HbnFSYlYKRmNtNUdaeHZ4aDRZUUQ4Q2pLU2sxcWdaSmN6czBEUFNpR1E4RWZsNFBHTzR4bkVibGxnTDNQVVJQV3A3bUVWMwpvaDZmeElDZ1FLVEJsVDY3MUVuRnpCNWx5SldwdW1SenZBMXZ5aEJNc1k4YU8reGRxNUxVRmx0WXpCZHZwZ0xYClZhRHdIWlEyUFFFV3RGMGQwSk8yTjBXRkZER05teDZuOHBLU2VJQVZEc1R3WkNaSytGQ2VlRXlvR2ZYc0lOc2MKMHlDTVFzbGF3a2ZPTXFBOXlCVjNKaTZRbUZZS3lHWXQ2NU1XR05xUEE0WHJJeWxpS3dDQ1h3ejltamFXeU43cgpBeXc5Y1dsTE1PRE5tRE9SV3pHUlo1MjkwTUVBRUlac3FqWUhWaXRSVE0vUm5OSWFkVG9aR08weTV1QWtNMTRjCm1Udm5zSzFDUDkycXRmU2lzcTc1Vy9JOTFkclRob0V0VEs3OFVHT2wvNVExWUJSMDhGK3RTVVdaV3lIZUk2VU8KQlVDR0MyYkN0bXpLTWw3dlUyNWxHNm1iQ1IxSnVRaTZSWXBuZk1qWEgzNmxWNFM3ZlR2U3d3dVIwM2gyRjNIMQplRmtXTkcybGJGclcwZHpEQ1BnM2xYd21GUTY1aFVjUWhjdHpub0J6NUMxbEYyZVcwM3d1Vmd4aW5udVZsSkhqCnkvR3JxbVdzQVNuMVBEdVZzNGs3azZESmZ3eUhBaUEwdXhYckdmeFl2cDdIOGo0KzJZT21XaVdsNXhZZ3JFRGoKdXI1bjhaeDQ2UEhRZXIyQXZxM3NiRUdFZTFNQ3RYSmxqM2RyZDVIaW0zbStOUUlEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUE0MGF4MGRBTXJiV2lrYUo1czZramFHa1BrWXV4SE5KYncwNDdEbzBoancrbmNYc3hjClFESG1XY29ISHBnTVFDeDArdnA4eStvS1o0cG5xTmZHU3VPVG83L2wwNW9RVy9OYld3OW1Id1RpTE1lSTE4L3gKQXkrNUxwT2FzdytvbXFXTGJkYmJXcUwwby9SdnRCZEsycmtjSHpUVnpFQ2dHU294VUZmWkQrY2syb2RwSCthUgpzUVZ1ODZBWlZmY2xOMm1qTXlGU3FNSXRxUmNWdzdycXIzWHk2RmNnUlFQeWtVbnBndUNFZ2NjOWM1NGMxbFE5ClpwZGR0NGV6WTdjVGRrODZvaDd5QThRRmNodnRFOVpiNWRKNVZ1OWJkeTlpZzFreXNjUFRtK1NleWhYUmNoVW8KcWw0SC9jekdCVk1IVVk0MXdZMlZGejdIaXRFQ2NUQUlwUzZRdmN4eGdZZXZHTmpaWnh5WnZFQThTWXBMTVp5YgpvbWs0ZW5EVExkL3hLMXlGN1ZGb2RUREV5cTYzSUFtME5UUVpVVnZJRGZKZXV6dU56NTV1eGdkVXEyUkxwYUplCjBidnJ0OU9ieitmNWoyam9uYjJlMEJ1dWN3U2RUeUZYa1VDeE1XK3BpSVVHa3lyZ3VBaGxjSG9oRExFbzJ1Qi8KaVE0Zm9zR3Fxc2w0N2IrVGV6VDVwU1NibGtnVWppd3o2ZURwTTRsUXB4MjJNeHNIVmx4RkhyY0JObTBUZDkydgpGaXhybWxsYW1BWmJFejF0Qi8vMGJpcEthT09adWhBTkpmcmdOOEJDNnYyYWhsNC9TQnV1dDA5YTBBenl4cXBwCnVDc3lUbmZORWQxVzZjNm5vYXEyNHMrN1c3S0tMSWVrdU5uMU51bm5IcUtxcmlFdUgxeGx4eFBqWUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` + +## Staker5 + +### NodeID + +``` +NodeID-P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5 +``` + +### Key Base64 + +``` +LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBNEN1YStiM1I3U1JSSU1PNFJoUDVjeW1oM0w4TTY5OW1xNXF0SlBiV0hJU1YzMDk1Ck5ZOVpWN1FRSFhGRGQ0RWtTbzRNeC83NDI1OUhtdHdJZ3dsSGRqSEpQNTlDbkNXZU4wdVNPMll6KzYyMnI5dmUKbXlnbWRSNis2K1lGMkJZWlB0MmxMRlh3MUtnRHdjTWovQ25sZDlRRVZRUDdQbGozbHFyT3ZVRjAzY2liZ1dCTwphYStOTDlVbi9ubTFDSjJkKzdobU41SWp0Qk1meVlIS0VMQmljNjFnLzg3VzBRaFNMNjNUU1dYZEtScnpKWmt5CnFlMTdERWQzS2I2cE8rOVAvekN3bmNobHY5U0FLMDByZzliM2U1Z1Y5U2NsVnlzbkdEZEx5RHJXNE1UemhiclYKTXFnem5qKzZPZGF1SWowdDNRa1lINXAvNFZpOE12Z3JTWHRBUmlyRytMWk5MU3NhTkxIdkwvU1FQSmxMWjVzNQpqTGVHUFk0b3pxd2pnZTE4RmlwaVdCSnZEZVZHM011bktkcUMvTDBGYmM4OXoxNjVCYmZ0M0JiZFk2NDNhVUdJCkIrSThwSkZnM2pveEg4dFJRc003RFViblMwN3k0WHk1L092MlUrSEMyZlNTTy8xbHRDRHd2QlhPNVljNXlmbXkKRUZIaEozV2huWUZaTWJoSU5ETVhheks5STFOQ0JhRjhra1Z6VWMzQTFtVW9SSXdVNys3WnhjUzVxRFZQcmlXLwp5cVRCK3JFV0l0ODdQT1huR2pDamNodml3NWdXb1ZKUjhlWVNnQitQVHZQT2VmZ0RJYklQZFRqV1NaWFkwRjlLCkdheUNzeXVRYWJLbms1ZUhObUxmM3dDNG1XbmhaTmFRUkxMMVhqRGdRaUxsaFBtK0ZDQkdpd0RvY1g4Q0F3RUEKQVFLQ0FnRUFwdU1QcnhtSDdYbjZBK0J4a1lwUlRWRVROWm50N3JRVVpYRHpzZThwbTNXQmRneGVlbWRMNWlVaApVaW4rUmp1WVh3Qzl0eTYwNmh2OFhPZXVWbzlUNmtSS1JOazE1N1dCd2p5Nmt3b1ZiU3I0TkpnRmM1RkNnREx4CmhBRnRIRi9uVDR3RzZhalpjQmZkSkNVNDV3UHgxM0c1LytqRTVMZXJLem5pUzdjdFgrZDNEYXc2OUNkRGZ2YTcKblpIU0dxWHM5WGRrY2I2VVlmMVN6dHVYS1RHSE9nTTdrWFhWS3kxOHNnNUFuQVgvemhoSUtCZVRSanFNUHFuOQpwdEJRZ1ZRNlJBdGxrVEdkdm1CZlF0MWlwZllsckplZTBUSGhkTEdsbXp1ZmFXT1VrU1ZPL3FJSEVuMXlZRCtsClRtWHFvWWJXWEJYbkpiQUp3Q1FsaC9TRmxXRHlpV1dPeHN6eGR3d1QyeWJ3N09SM2EwREVWME1iS0prVWV4eUYKOTJMcjNxb0JTWlJGUW5YVnZCZ2pRT3duekVGcGgxQU51R1kzb2RMOEpTTTF0SG5pSXNDczRXaERQT3NiQWoraAprd1M1MWNvbE1rM2JOQ1ozeGVBcmpNTEJWTGdUN3hMWC83WlljNy9vVEVGV2lrKzIwVHZTRVd6ZEUxTi80Z2ZKCmpFVS9WcXJuTmp5ZXYydzlBazZiRWt3WkZMUzZWWjlyVFdURjlqazhDMWFYai9SaGZhYUMzM3hYQmJobjlIdVgKbFR1L0phTE1wMFFjNGFDbHFVWU02TGx4SWVqSDViOGZJeENOSEppc2xYSkRhNmE2YVFsODVCaVFPRFBGeFZUNQpXQ3BRRDQ4NThFdUxkWDRCUlcyZklHUlk2RGl2UjZ1SlJBbXhMZitFd0FnL3JnVHpVc0VDZ2dFQkFQU2tIWDVGCkJoUmd1ZEYwTW53Titlbmo0U29YSGhSRytEVG9yeE8xWmgycU45bG5YTzluTUtNQ1hWSkxJVnZHRnVpTVJTSjAKVktmMXUwVXFhQkYwMk1iSXZiZWk3bXpra1cwLzc0bTA0WDM3aXlNbXRubW9vUTBHRVY4NG9PTndBdDNEZWVUZwp2SXBPdHE5VjI2WEhHYVFEeGNSRk1GQnVEMDJhMnlmM0pZa1hqNzRpMnNjTVA0eHhNSE1rSnhHSzlGU0JPaG5wCmsvcDBoTWwzRlZHZm81TnM1VDFSbDNwTXVlRUYzQjUrQnZyVjF6MTRJTi8wbHd1aHVqclVVWVM0RXcrUGs1ekMKRlN1YmZJUU1xU1QxanZYWFRhR2dYMEdQZmZhNGx4Z2FERUFUTGV3dkwzRmp5MjdYemw1N2k5WnZUTkM0eUZhZAo0b2tqci9lSXRIdEtWSEVDZ2dFQkFPcVVLd3cvNnVpSk1OdmMyTnhMVU14dXpCMDdycU9aS1QyVk1Ca0c1R3prCnY4MWZEdGxuZEQ4Y3dIU3FPTEtzY0gvUUtYRDdXSzNGQ3V2WlN2TXdDakVCNFBwMXpnd0pvQmV4dVh2RkREYnMKMFQ3N1Fpd2UrMldtUklpWWV2NWFSRzNsbkJNTThSRFMvUVB6RWRveEhkenJGVVJZVmwwcnY1bC83cndCMlpkNgp4QVlIY1VwWmM0WmF5c0VncVFDdVpRcUM3TXJxN3FmQnlVdGhIMjhZaWN6MTk3OGZwRTNkeDE1Y2VxalU5akJRCnhVVXdiZUtUL1VrUVF2bVlIZHRnd0VqaHpWUUwxT0FBV2tUNlJzc01xeDJSQWRpMFNxV1BGRWh4TlBIQnBHOUIKbEtVREJCSU02ZHU5MTZPbjBCamdoaDNXaHhRS3BUSXp2ZU5BaWV4YlhPOENnZ0VCQU52Sm9oR3ljMzdWVTd3ZwoxOFpxVEEvY3dvc3REOElKN0s2a0tiN2NKeTBabzJsM21xQWZKaXdkVUxoQmRXdmRNUEdtSytxRGR4Y2JCeTloCnBQT2g5YXZKNStCV3lqd2NzYWJrWFJGcjUzWm5DcDcvQmN1Uk8zZlc3cjZNd3NieStEQkNrWDJXaHV6L1FOT1AKb0hGMHljMTM4aktlTW9UZ0RIR2RZYTJyTmhiUGl6MjRWTE9saG1abnZxNkRXWEpDVTdha0R3Mytzd3E5cWhyUwpHTjRuUFMrVEV2VWZHNmN0ellXajNSbXNBaHRUQ1RoWmQ3ZWRLQ0swSHZzQmkyZGdkUWR5NTV4YkplZnlubENJCmkySUFGM3M0L3E3cHhRckNudG1OQjNvSTFONndISDduK1lpMnJxc2J5WFZMSzl2d1RLUHNqMWg2S204cEY4dWQKRHdFQlM1RUNnZ0VBTW5xMkZNbkFiRS94Z3E2d3dCODVBUFVxMlhPWmJqMHNZY016K1g3Qk15bTZtS0JIR3NPbgpnVmxYbFFONGRnS2pwdTJOclhGNU1OUEJPT1dtdWxSeExRQ2hnR1JQZGNtd2VNalhDR3ByNlhubXdXM2lYSXBDClFTcVpmdWVKT0NrR3BydU5iWkFRWkRWekd5RjRpd0tjMFlpSktBNzJidEJXUjlyKzdkaGNFYnZxYVAyN0JHdmgKYjEwa1dwRURyVkRhRDN3REp0dU5oZTR1dWhqcFljZmZCNHM2eUJjd0RVMlhkSmZrRVdiYW42VVIvb1NnY095MQp5YjVGRzE3L3RkREpNQ1hmUUtIWEtta0pBK1R6elFncDNvL3czTWhYYys4cFJ6bU5VaVVBbEt5QkowMVIxK3lOCmVxc010M3dLVFFBci9FbkpBYWdVeW92VjVneGlZY2w3WXdLQ0FRQWRPWWNaeC9sLy9PMEZ0bTZ3cFhHUmpha04KSUhGY1AyaTdtVVc3NkV3bmoxaUZhOVl2N3BnYmJCRDlTMVNNdWV0ZklXY3FTakRpVWF5bW5EZEExOE5WWVVZdgpsaGxVSjZrd2RWdXNlanFmY24rNzVKZjg3QnZXZElWR3JOeFBkQjdaL2xtYld4RnF5WmkwMFI5MFVHQm50YU11CnpnL2lickxnYXR6QTlTS2dvV1htMmJMdDZiYlhlZm1PZ25aWHl3OFFrbzcwWHh0eDVlQlIxQkRBUWpEaXM4MW4KTGc5NnNKM0xPbjdTWEhmeEozQnRYc2hUSkFvQkZ4NkVwbXVsZ05vUFdJa0p0ZDdYV1lQNll5MjJEK2tLN09oSApScTNDaVlNdERtWm91Yi9rVkJMME1WZFNtN2huMVRTVlRIakZvVzZjd1EzN2lLSGprWlZSd1gxS3p0MEIKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K +``` + +### Cert Base64 + +``` +LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpsYUdBOHpNREU1TURjeE1ERTIKTVRJeU9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGdLNXI1dmRIdEpGRWd3N2hHRS9sekthSGN2d3pyMzJhcm1xMGs5dFljaEpYZlQzazFqMWxYdEJBZGNVTjMKZ1NSS2pnekgvdmpibjBlYTNBaURDVWQyTWNrL24wS2NKWjQzUzVJN1pqUDdyYmF2Mjk2YktDWjFIcjdyNWdYWQpGaGsrM2FVc1ZmRFVxQVBCd3lQOEtlVjMxQVJWQS9zK1dQZVdxczY5UVhUZHlKdUJZRTVwcjQwdjFTZitlYlVJCm5aMzd1R1kza2lPMEV4L0pnY29Rc0dKenJXRC96dGJSQ0ZJdnJkTkpaZDBwR3ZNbG1US3A3WHNNUjNjcHZxazcKNzAvL01MQ2R5R1cvMUlBclRTdUQxdmQ3bUJYMUp5VlhLeWNZTjB2SU90Ymd4UE9GdXRVeXFET2VQN281MXE0aQpQUzNkQ1JnZm1uL2hXTHd5K0N0SmUwQkdLc2I0dGswdEt4bzBzZTh2OUpBOG1VdG5tem1NdDRZOWppak9yQ09CCjdYd1dLbUpZRW04TjVVYmN5NmNwMm9MOHZRVnR6ejNQWHJrRnQrM2NGdDFqcmpkcFFZZ0g0anlra1dEZU9qRWYKeTFGQ3d6c05SdWRMVHZMaGZMbjg2L1pUNGNMWjlKSTcvV1cwSVBDOEZjN2xoem5KK2JJUVVlRW5kYUdkZ1ZreAp1RWcwTXhkck1yMGpVMElGb1h5U1JYTlJ6Y0RXWlNoRWpCVHY3dG5GeExtb05VK3VKYi9LcE1INnNSWWkzenM4CjVlY2FNS055RytMRG1CYWhVbEh4NWhLQUg0OU84ODU1K0FNaHNnOTFPTlpKbGRqUVgwb1pySUt6SzVCcHNxZVQKbDRjMll0L2ZBTGlaYWVGazFwQkVzdlZlTU9CQ0l1V0UrYjRVSUVhTEFPaHhmd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUIrMlZYbnFScWZHN0gyL0swbGd6eFQrWDlyMXUrWURuMEVhVUdBRzcxczcwUW5xYnBuClg3dEJtQ0tMTjZYZ1BMMEhyTjkzM253aVlybWZiOFMzM3paN2t3OEdKRHZhVGFtTE55ZW00LzhxVEJRbW5Sd2UKNnJRN1NZMmw3M0lnODdtUjBXVGkrclRuVFR0YzY2Ky9qTHRGZWFqMFljbDloQlpYSEtpVUxTR2hzYlVid3RregppdU5sQU5ob05LWE5JQUJSSW1VcTZPd1loRVFOMER3SFhqNzl3a3B5RFlqS1p3SHVFWlVrbmM4UGwyb1FQQmtlCm1pbDN0c3J2R1Jrd2hpc25YWDd0cWg2cldLVlpOSmtPNjhoeTdYTzlhVFhqYmNCLzdZMUs4M0lTTkV5R1BzSC8KcHdGeWQvajhPNG1vZHdoN1Vsd3cxL2h3Y3FucWlFRkUzS3p4WDJwTWg3VnhlQW1YMnQ1ZVhGWk9sUngxbGVjTQpYUmtWdTE5bFlES1FIR1NyR3huZytCRmxTT0I5NmU1a1hJYnVJWEtwUEFBQ29CUS9KWllidEhrczlIOE90TllPClAyam9xbW5ROXdHa0U1Y28xSWkvL2oydHVvQ1JDcEs4Nm1tYlRseU5ZdksrMS9ra0tjc2FpaVdYTnJRc3JJRFoKQkZzMEZ3WDVnMjRPUDUrYnJ4VGxSWkUwMVI2U3Q4bFFqNElVd0FjSXpHOGZGbU1DV2FZYXZyQ1pUZVlhRWl5RgpBMFgyVkEvdlo3eDlENVA5WjVPYWtNaHJNVytoSlRZcnBIMXJtNktSN0IyNmlVMmtKUnhUWDd4UTlscmtzcWZCCjdsWCtxMGloZWVZQTRjSGJHSk5Xd1dnZCtGUXNLL1BUZWl5cjRyZnF1dHV0ZFdBMEl4b0xSYzNYRnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` \ No newline at end of file diff --git a/avalanchego/staking/parse.go b/avalanchego/staking/parse.go new file mode 100644 index 00000000..4f9a50f0 --- /dev/null +++ b/avalanchego/staking/parse.go @@ -0,0 +1,181 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "math/big" + + "golang.org/x/crypto/cryptobyte" + + "github.com/ava-labs/avalanchego/utils/units" + + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +const ( + MaxCertificateLen = 2 * units.KiB + + allowedRSASmallModulusLen = 2048 + allowedRSALargeModulusLen = 4096 + allowedRSAPublicExponentValue = 65537 +) + +var ( + ErrCertificateTooLarge = fmt.Errorf("staking: certificate length is greater than %d", MaxCertificateLen) + ErrMalformedCertificate = errors.New("staking: malformed certificate") + ErrMalformedTBSCertificate = errors.New("staking: malformed tbs certificate") + ErrMalformedVersion = errors.New("staking: malformed version") + ErrMalformedSerialNumber = errors.New("staking: malformed serial number") + ErrMalformedSignatureAlgorithmIdentifier = errors.New("staking: malformed signature algorithm identifier") + ErrMalformedIssuer = errors.New("staking: malformed issuer") + ErrMalformedValidity = errors.New("staking: malformed validity") + ErrMalformedSPKI = errors.New("staking: malformed spki") + ErrMalformedPublicKeyAlgorithmIdentifier = errors.New("staking: malformed public key algorithm identifier") + ErrMalformedSubjectPublicKey = errors.New("staking: malformed subject public key") + ErrMalformedOID = errors.New("staking: malformed oid") + ErrInvalidRSAPublicKey = errors.New("staking: invalid RSA public key") + ErrInvalidRSAModulus = errors.New("staking: invalid RSA modulus") + ErrInvalidRSAPublicExponent = errors.New("staking: invalid RSA public exponent") + ErrRSAModulusNotPositive = errors.New("staking: RSA modulus is not a positive number") + ErrUnsupportedRSAModulusBitLen = errors.New("staking: unsupported RSA modulus bitlen") + ErrRSAModulusIsEven = errors.New("staking: RSA modulus is an even number") + ErrUnsupportedRSAPublicExponent = errors.New("staking: unsupported RSA public exponent") + ErrFailedUnmarshallingEllipticCurvePoint = errors.New("staking: failed to unmarshal elliptic curve point") + ErrUnknownPublicKeyAlgorithm = errors.New("staking: unknown public key algorithm") +) + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +// +// TODO: Remove after v1.11.x activates. +func ParseCertificate(der []byte) (*Certificate, error) { + x509Cert, err := x509.ParseCertificate(der) + if err != nil { + return nil, err + } + stakingCert := CertificateFromX509(x509Cert) + return stakingCert, ValidateCertificate(stakingCert) +} + +// ParseCertificatePermissive parses a single certificate from the given ASN.1. +// +// This function does not validate that the certificate is valid to be used +// against normal TLS implementations. +// +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L789-L968 +func ParseCertificatePermissive(bytes []byte) (*Certificate, error) { + if len(bytes) > MaxCertificateLen { + return nil, ErrCertificateTooLarge + } + + input := cryptobyte.String(bytes) + // Consume the length and tag bytes. + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedCertificate + } + + // Read the "to be signed" certificate into input. + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedTBSCertificate + } + if !input.SkipOptionalASN1(cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return nil, ErrMalformedVersion + } + if !input.SkipASN1(cryptobyte_asn1.INTEGER) { + return nil, ErrMalformedSerialNumber + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedSignatureAlgorithmIdentifier + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedIssuer + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedValidity + } + if !input.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedIssuer + } + + // Read the "subject public key info" into input. + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedSPKI + } + + // Read the public key algorithm identifier. + var pkAISeq cryptobyte.String + if !input.ReadASN1(&pkAISeq, cryptobyte_asn1.SEQUENCE) { + return nil, ErrMalformedPublicKeyAlgorithmIdentifier + } + var pkAI asn1.ObjectIdentifier + if !pkAISeq.ReadASN1ObjectIdentifier(&pkAI) { + return nil, ErrMalformedOID + } + + // Note: Unlike the x509 package, we require parsing the public key. + + var spk asn1.BitString + if !input.ReadASN1BitString(&spk) { + return nil, ErrMalformedSubjectPublicKey + } + publicKey, signatureAlgorithm, err := parsePublicKey(pkAI, spk) + return &Certificate{ + Raw: bytes, + SignatureAlgorithm: signatureAlgorithm, + PublicKey: publicKey, + }, err +} + +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L215-L306 +func parsePublicKey(oid asn1.ObjectIdentifier, publicKey asn1.BitString) (crypto.PublicKey, x509.SignatureAlgorithm, error) { + der := cryptobyte.String(publicKey.RightAlign()) + switch { + case oid.Equal(oidPublicKeyRSA): + pub := &rsa.PublicKey{N: new(big.Int)} + if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { + return nil, 0, ErrInvalidRSAPublicKey + } + if !der.ReadASN1Integer(pub.N) { + return nil, 0, ErrInvalidRSAModulus + } + if !der.ReadASN1Integer(&pub.E) { + return nil, 0, ErrInvalidRSAPublicExponent + } + + if pub.N.Sign() <= 0 { + return nil, 0, ErrRSAModulusNotPositive + } + + if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { + return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) + } + if pub.N.Bit(0) == 0 { + return nil, 0, ErrRSAModulusIsEven + } + if pub.E != allowedRSAPublicExponentValue { + return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) + } + return pub, x509.SHA256WithRSA, nil + case oid.Equal(oidPublicKeyECDSA): + namedCurve := elliptic.P256() + x, y := elliptic.Unmarshal(namedCurve, der) + if x == nil { + return nil, 0, ErrFailedUnmarshallingEllipticCurvePoint + } + return &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + }, x509.ECDSAWithSHA256, nil + default: + return nil, 0, ErrUnknownPublicKeyAlgorithm + } +} diff --git a/avalanchego/staking/parse_test.go b/avalanchego/staking/parse_test.go new file mode 100644 index 00000000..60f6ee8f --- /dev/null +++ b/avalanchego/staking/parse_test.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "testing" + + "github.com/stretchr/testify/require" + + _ "embed" +) + +var ( + //go:embed large_rsa_key.cert + largeRSAKeyCert []byte + + parsers = []struct { + name string + parse func([]byte) (*Certificate, error) + }{ + { + name: "ParseCertificate", + parse: ParseCertificate, + }, + { + name: "ParseCertificatePermissive", + parse: ParseCertificatePermissive, + }, + } +) + +func TestParseCheckLargeCert(t *testing.T) { + for _, parser := range parsers { + t.Run(parser.name, func(t *testing.T) { + _, err := parser.parse(largeRSAKeyCert) + require.ErrorIs(t, err, ErrCertificateTooLarge) + }) + } +} + +func BenchmarkParse(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + bytes := tlsCert.Leaf.Raw + for _, parser := range parsers { + b.Run(parser.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err = parser.parse(bytes) + require.NoError(b, err) + } + }) + } +} + +func FuzzParseCertificate(f *testing.F) { + tlsCert, err := NewTLSCert() + require.NoError(f, err) + + f.Add(tlsCert.Leaf.Raw) + f.Add(largeRSAKeyCert) + f.Fuzz(func(t *testing.T, certBytes []byte) { + require := require.New(t) + + // Verify that any certificate that can be parsed by ParseCertificate + // can also be parsed by ParseCertificatePermissive. + { + strictCert, err := ParseCertificate(certBytes) + if err == nil { + permissiveCert, err := ParseCertificatePermissive(certBytes) + require.NoError(err) + require.Equal(strictCert, permissiveCert) + } + } + + // Verify that any certificate that can't be parsed by + // ParseCertificatePermissive also can't be parsed by ParseCertificate. + { + cert, err := ParseCertificatePermissive(certBytes) + if err == nil { + require.NoError(ValidateCertificate(cert)) + } else { + _, err = ParseCertificate(certBytes) + require.Error(err) //nolint:forbidigo + } + } + }) +} diff --git a/avalanchego/staking/tls.go b/avalanchego/staking/tls.go index 6a6a4640..fbb5d9e4 100644 --- a/avalanchego/staking/tls.go +++ b/avalanchego/staking/tls.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking @@ -10,7 +10,6 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "errors" "fmt" "math/big" "os" @@ -18,11 +17,8 @@ import ( "time" "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicateExtension = errors.New("duplicate certificate extension") - // InitNodeStakingKeyPair generates a self-signed TLS key/cert pair to use in // staking. The key and files will be placed at [keyPath] and [certPath], // respectively. If there is already a file at [keyPath], returns nil. @@ -87,7 +83,7 @@ func LoadTLSCertFromBytes(keyBytes, certBytes []byte) (*tls.Certificate, error) if err != nil { return nil, fmt.Errorf("failed parsing cert: %w", err) } - return &cert, VerifyCertificate(cert.Leaf) + return &cert, nil } func LoadTLSCertFromFiles(keyPath, certPath string) (*tls.Certificate, error) { @@ -99,7 +95,7 @@ func LoadTLSCertFromFiles(keyPath, certPath string) (*tls.Certificate, error) { if err != nil { return nil, fmt.Errorf("failed parsing cert: %w", err) } - return &cert, VerifyCertificate(cert.Leaf) + return &cert, nil } func NewTLSCert() (*tls.Certificate, error) { @@ -152,15 +148,3 @@ func NewCertAndKeyBytes() ([]byte, []byte, error) { } return certBuff.Bytes(), keyBuff.Bytes(), nil } - -func VerifyCertificate(cert *x509.Certificate) error { - extensionSet := set.NewSet[string](len(cert.Extensions)) - for _, extension := range cert.Extensions { - idStr := extension.Id.String() - if extensionSet.Contains(idStr) { - return errDuplicateExtension - } - extensionSet.Add(idStr) - } - return nil -} diff --git a/avalanchego/staking/tls_test.go b/avalanchego/staking/tls_test.go index bcab2c98..6de376c2 100644 --- a/avalanchego/staking/tls_test.go +++ b/avalanchego/staking/tls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking @@ -27,6 +27,5 @@ func TestMakeKeys(t *testing.T) { sig, err := cert.PrivateKey.(crypto.Signer).Sign(rand.Reader, msgHash, crypto.SHA256) require.NoError(err) - err = cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig) - require.NoError(err) + require.NoError(cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig)) } diff --git a/avalanchego/staking/verify.go b/avalanchego/staking/verify.go new file mode 100644 index 00000000..dd425545 --- /dev/null +++ b/avalanchego/staking/verify.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "errors" + "fmt" +) + +var ( + ErrUnsupportedAlgorithm = errors.New("staking: cannot verify signature: unsupported algorithm") + ErrPublicKeyAlgoMismatch = errors.New("staking: signature algorithm specified different public key type") + ErrInvalidECDSAPublicKey = errors.New("staking: invalid ECDSA public key") + ErrECDSAVerificationFailure = errors.New("staking: ECDSA verification failure") +) + +// CheckSignature verifies that the signature is a valid signature over signed +// from the certificate. +// +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L793-L797 +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L816-L879 +func CheckSignature(cert *Certificate, msg []byte, signature []byte) error { + hasher := crypto.SHA256.New() + _, err := hasher.Write(msg) + if err != nil { + return err + } + hashed := hasher.Sum(nil) + + switch pub := cert.PublicKey.(type) { + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(pub, crypto.SHA256, hashed, signature) + case *ecdsa.PublicKey: + if !ecdsa.VerifyASN1(pub, hashed, signature) { + return ErrECDSAVerificationFailure + } + return nil + default: + return ErrUnsupportedAlgorithm + } +} + +// ValidateCertificate verifies that this certificate conforms to the required +// staking format assuming that it was already able to be parsed. +// +// TODO: Remove after v1.11.x activates. +func ValidateCertificate(cert *Certificate) error { + if len(cert.Raw) > MaxCertificateLen { + return ErrCertificateTooLarge + } + + pubkeyAlgo, ok := signatureAlgorithmVerificationDetails[cert.SignatureAlgorithm] + if !ok { + return ErrUnsupportedAlgorithm + } + + switch pub := cert.PublicKey.(type) { + case *rsa.PublicKey: + if pubkeyAlgo != x509.RSA { + return signaturePublicKeyAlgoMismatchError(pubkeyAlgo, pub) + } + if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { + return fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) + } + if pub.N.Bit(0) == 0 { + return ErrRSAModulusIsEven + } + if pub.E != allowedRSAPublicExponentValue { + return fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) + } + return nil + case *ecdsa.PublicKey: + if pubkeyAlgo != x509.ECDSA { + return signaturePublicKeyAlgoMismatchError(pubkeyAlgo, pub) + } + if pub.Curve != elliptic.P256() { + return ErrInvalidECDSAPublicKey + } + return nil + default: + return ErrUnsupportedAlgorithm + } +} + +// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L812-L814 +func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo x509.PublicKeyAlgorithm, pubKey any) error { + return fmt.Errorf("%w: expected an %s public key, but have public key of type %T", ErrPublicKeyAlgoMismatch, expectedPubKeyAlgo, pubKey) +} diff --git a/avalanchego/subnets/config.go b/avalanchego/subnets/config.go index cdec292c..9a12c550 100644 --- a/avalanchego/subnets/config.go +++ b/avalanchego/subnets/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets @@ -9,22 +9,22 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/set" ) var errAllowedNodesWhenNotValidatorOnly = errors.New("allowedNodes can only be set when ValidatorOnly is true") type GossipConfig struct { - AcceptedFrontierValidatorSize uint `json:"gossipAcceptedFrontierValidatorSize" yaml:"gossipAcceptedFrontierValidatorSize"` + AcceptedFrontierValidatorSize uint `json:"gossipAcceptedFrontierValidatorSize" yaml:"gossipAcceptedFrontierValidatorSize"` AcceptedFrontierNonValidatorSize uint `json:"gossipAcceptedFrontierNonValidatorSize" yaml:"gossipAcceptedFrontierNonValidatorSize"` - AcceptedFrontierPeerSize uint `json:"gossipAcceptedFrontierPeerSize" yaml:"gossipAcceptedFrontierPeerSize"` - OnAcceptValidatorSize uint `json:"gossipOnAcceptValidatorSize" yaml:"gossipOnAcceptValidatorSize"` - OnAcceptNonValidatorSize uint `json:"gossipOnAcceptNonValidatorSize" yaml:"gossipOnAcceptNonValidatorSize"` - OnAcceptPeerSize uint `json:"gossipOnAcceptPeerSize" yaml:"gossipOnAcceptPeerSize"` - AppGossipValidatorSize uint `json:"appGossipValidatorSize" yaml:"appGossipValidatorSize"` - AppGossipNonValidatorSize uint `json:"appGossipNonValidatorSize" yaml:"appGossipNonValidatorSize"` - AppGossipPeerSize uint `json:"appGossipPeerSize" yaml:"appGossipPeerSize"` + AcceptedFrontierPeerSize uint `json:"gossipAcceptedFrontierPeerSize" yaml:"gossipAcceptedFrontierPeerSize"` + OnAcceptValidatorSize uint `json:"gossipOnAcceptValidatorSize" yaml:"gossipOnAcceptValidatorSize"` + OnAcceptNonValidatorSize uint `json:"gossipOnAcceptNonValidatorSize" yaml:"gossipOnAcceptNonValidatorSize"` + OnAcceptPeerSize uint `json:"gossipOnAcceptPeerSize" yaml:"gossipOnAcceptPeerSize"` + AppGossipValidatorSize uint `json:"appGossipValidatorSize" yaml:"appGossipValidatorSize"` + AppGossipNonValidatorSize uint `json:"appGossipNonValidatorSize" yaml:"appGossipNonValidatorSize"` + AppGossipPeerSize uint `json:"appGossipPeerSize" yaml:"appGossipPeerSize"` } type Config struct { @@ -37,21 +37,37 @@ type Config struct { ValidatorOnly bool `json:"validatorOnly" yaml:"validatorOnly"` // AllowedNodes is the set of node IDs that are explicitly allowed to connect to this Subnet when // ValidatorOnly is enabled. - AllowedNodes set.Set[ids.NodeID] `json:"allowedNodes" yaml:"allowedNodes"` - ConsensusParameters avalanche.Parameters `json:"consensusParameters" yaml:"consensusParameters"` + AllowedNodes set.Set[ids.NodeID] `json:"allowedNodes" yaml:"allowedNodes"` + ConsensusParameters snowball.Parameters `json:"consensusParameters" yaml:"consensusParameters"` // ProposerMinBlockDelay is the minimum delay this node will enforce when // building a snowman++ block. + // // TODO: Remove this flag once all VMs throttle their own block production. ProposerMinBlockDelay time.Duration `json:"proposerMinBlockDelay" yaml:"proposerMinBlockDelay"` - - // See comment on [MinPercentConnectedStakeHealthy] in platformvm.Config - MinPercentConnectedStakeHealthy float64 `json:"minPercentConnectedStakeHealthy" yaml:"minPercentConnectedStakeHealthy"` + // ProposerNumHistoricalBlocks is the number of historical snowman++ blocks + // this node will index per chain. If set to 0, the node will index all + // snowman++ blocks. + // + // Note: The last accepted block is not considered a historical block. This + // prevents the user from only storing the last accepted block, which can + // never be safe due to the non-atomic commits between the proposervm + // database and the innerVM's database. + // + // Invariant: This value must be set such that the proposervm never needs to + // rollback more blocks than have been deleted. On startup, the proposervm + // rolls back its accepted chain to match the innerVM's accepted chain. If + // the innerVM is not persisting its last accepted block quickly enough, the + // database can become corrupted. + // + // TODO: Move this flag once the proposervm is configurable on a per-chain + // basis. + ProposerNumHistoricalBlocks uint64 `json:"proposerNumHistoricalBlocks" yaml:"proposerNumHistoricalBlocks"` } func (c *Config) Valid() error { - if err := c.ConsensusParameters.Valid(); err != nil { - return fmt.Errorf("consensus parameters are invalid: %w", err) + if err := c.ConsensusParameters.Verify(); err != nil { + return fmt.Errorf("consensus %w", err) } if !c.ValidatorOnly && c.AllowedNodes.Len() > 0 { return errAllowedNodesWhenNotValidatorOnly diff --git a/avalanchego/subnets/config_test.go b/avalanchego/subnets/config_test.go index f745a4f4..fdb10c4e 100644 --- a/avalanchego/subnets/config_test.go +++ b/avalanchego/subnets/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets @@ -9,52 +9,46 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/set" ) -var validParameters = avalanche.Parameters{ - Parents: 2, - BatchSize: 1, - Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, +var validParameters = snowball.Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, } func TestValid(t *testing.T) { tests := []struct { - name string - s Config - err string + name string + s Config + expectedErr error }{ { name: "invalid consensus parameters", s: Config{ - ConsensusParameters: avalanche.Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 1, - }, + ConsensusParameters: snowball.Parameters{ + K: 2, + AlphaPreference: 1, }, }, - err: "consensus parameters are invalid", + expectedErr: snowball.ErrParametersInvalid, }, { name: "invalid allowed node IDs", s: Config{ - AllowedNodes: set.Set[ids.NodeID]{ids.GenerateTestNodeID(): struct{}{}}, + AllowedNodes: set.Of(ids.GenerateTestNodeID()), ValidatorOnly: false, ConsensusParameters: validParameters, }, - err: errAllowedNodesWhenNotValidatorOnly.Error(), + expectedErr: errAllowedNodesWhenNotValidatorOnly, }, { name: "valid", @@ -62,16 +56,13 @@ func TestValid(t *testing.T) { ConsensusParameters: validParameters, ValidatorOnly: false, }, + expectedErr: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := tt.s.Valid() - if tt.err != "" { - require.ErrorContains(t, err, tt.err) - } else { - require.NoError(t, err) - } + require.ErrorIs(t, err, tt.expectedErr) }) } } diff --git a/avalanchego/subnets/no_op_allower.go b/avalanchego/subnets/no_op_allower.go index 9d2d51ea..9cb7115e 100644 --- a/avalanchego/subnets/no_op_allower.go +++ b/avalanchego/subnets/no_op_allower.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/avalanchego/subnets/subnet.go b/avalanchego/subnets/subnet.go index 31bc9dcb..95425ba3 100644 --- a/avalanchego/subnets/subnet.go +++ b/avalanchego/subnets/subnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets diff --git a/avalanchego/subnets/subnet_test.go b/avalanchego/subnets/subnet_test.go index 3ae42e6f..3a816a15 100644 --- a/avalanchego/subnets/subnet_test.go +++ b/avalanchego/subnets/subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subnets @@ -61,9 +61,7 @@ func TestIsAllowed(t *testing.T) { allowedNodeID := ids.GenerateTestNodeID() s = New(myNodeID, Config{ ValidatorOnly: true, - AllowedNodes: set.Set[ids.NodeID]{ - allowedNodeID: struct{}{}, - }, + AllowedNodes: set.Of(allowedNodeID), }) require.True(s.IsAllowed(allowedNodeID, true), "Validator should be allowed with validator only rules and allowed nodes") require.True(s.IsAllowed(myNodeID, false), "Self node should be allowed with validator only rules") diff --git a/avalanchego/tests/colors.go b/avalanchego/tests/colors.go index 4c50eb3a..84b40656 100644 --- a/avalanchego/tests/colors.go +++ b/avalanchego/tests/colors.go @@ -1,12 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests import ( - "fmt" - "github.com/onsi/ginkgo/v2/formatter" + + ginkgo "github.com/onsi/ginkgo/v2" ) // Outputs to stdout. @@ -20,5 +20,11 @@ import ( // for an exhaustive list of color options. func Outf(format string, args ...interface{}) { s := formatter.F(format, args...) - fmt.Fprint(formatter.ColorableStdOut, s) + // Use GinkgoWriter to ensure that output from this function is + // printed sequentially within other test output produced with + // GinkgoWriter (e.g. `STEP:...`) when tests are run in + // parallel. ginkgo collects and writes stdout separately from + // GinkgoWriter during parallel execution and the resulting output + // can be confusing. + ginkgo.GinkgoWriter.Print(s) } diff --git a/avalanchego/tests/e2e/README.md b/avalanchego/tests/e2e/README.md index 325c334c..50ab608a 100644 --- a/avalanchego/tests/e2e/README.md +++ b/avalanchego/tests/e2e/README.md @@ -1,6 +1,6 @@ # Avalanche e2e test suites -- Works for any environments (e.g., local, test network). +- Works with fixture-managed temporary networks. - Compiles to a single binary with customizable configurations. ## Running tests @@ -11,12 +11,30 @@ ACK_GINKGO_RC=true ginkgo build ./tests/e2e ./tests/e2e/e2e.test --help ./tests/e2e/e2e.test \ ---network-runner-grpc-endpoint="0.0.0.0:12340" \ --avalanchego-path=./build/avalanchego ``` See [`tests.e2e.sh`](../../scripts/tests.e2e.sh) for an example. +### Filtering test execution with labels + +In cases where a change can be verified against only a subset of +tests, it is possible to filter the tests that will be executed by the +declarative labels that have been applied to them. Available labels +are defined as constants in [`describe.go`](./describe.go) with names +of the form `*Label`. The following example runs only those tests that +primarily target the X-Chain: + + +```bash +./tests/e2e/e2e.test \ + --avalanchego-path=./build/avalanchego \ + --ginkgo.label-filter=x +``` + +The ginkgo docs provide further detail on [how to compose label +queries](https://onsi.github.io/ginkgo/#spec-labels). + ## Adding tests Define any flags/configurations in [`e2e.go`](./e2e.go). @@ -24,13 +42,70 @@ Define any flags/configurations in [`e2e.go`](./e2e.go). Create a new package to implement feature-specific tests, or add tests to an existing package. For example: ``` -. +tests └── e2e ├── README.md ├── e2e.go ├── e2e_test.go - └── ping - └── suites.go + └── x + └── transfer.go + └── virtuous.go ``` -`e2e.go` defines common configurations (e.g., network-runner client) for other test packages. `ping/suites.go` defines ping tests, annotated by `[Ping]`, which can be selected by `./tests/e2e/e2e.test --ginkgo.focus "\[Local\] \[Ping\]"`. +`e2e.go` defines common configuration for other test +packages. `x/transfer/virtuous.go` defines X-Chain transfer tests, +labeled with `x`, which can be selected by `./tests/e2e/e2e.test +--ginkgo.label-filter "x"`. + +## Testing against an existing network + +By default, a new temporary test network will be started before each +test run and stopped at the end of the run. When developing e2e tests, +it may be helpful to create a temporary network that can be used +across multiple test runs. This can increase the speed of iteration by +removing the requirement to start a new network for every invocation +of the test under development. + +To create a temporary network for use across test runs: + +```bash +# From the root of the avalanchego repo + +# Build the tmpnetctl binary +$ ./scripts/build_tmpnetctl.sh + +# Start a new network +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego +... +Started network 1000 @ /home/me/.tmpnet/networks/1000 + +Configure tmpnetctl and the test suite to target this network by default +with one of the following statements: + - source /home/me/.tmpnet/networks/1000/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest + +# Start a new test run using the existing network +ginkgo -v ./tests/e2e -- \ + --avalanchego-path=/path/to/avalanchego \ + --ginkgo.focus-file=[name of file containing test] \ + --use-existing-network \ + --network-dir=/path/to/network + +# It is also possible to set the AVALANCHEGO_PATH env var instead of supplying --avalanchego-path +# and to set TMPNET_NETWORK_DIR instead of supplying --network-dir. +``` + +See the tmpnet fixture [README](../fixture/tmpnet/README.md) for more details. + +## Skipping bootstrap checks + +By default many tests will attempt to bootstrap a new node with the +post-test network state. While this is a valuable activity to perform +in CI, it can add considerable latency to test development. To disable +these bootstrap checks during development, set the +`E2E_SKIP_BOOTSTRAP_CHECKS` env var to a non-empty value: + +```bash +E2E_SKIP_BOOTSTRAP_CHECKS=1 ginkgo -v ./tests/e2e ... +``` diff --git a/avalanchego/tests/e2e/banff/suites.go b/avalanchego/tests/e2e/banff/suites.go index ee07f50c..7ac486b3 100644 --- a/avalanchego/tests/e2e/banff/suites.go +++ b/avalanchego/tests/e2e/banff/suites.go @@ -1,60 +1,31 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements tests for the banff network upgrade. package banff import ( - "context" + "github.com/stretchr/testify/require" - ginkgo "github.com/onsi/ginkgo/v2" - - "github.com/onsi/gomega" - - "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/avalanchego/wallet/subnet/primary" + + ginkgo "github.com/onsi/ginkgo/v2" ) var _ = ginkgo.Describe("[Banff]", func() { + require := require.New(ginkgo.GinkgoT()) + ginkgo.It("can send custom assets X->P and P->X", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "xp", - "banff", - ), func() { - ginkgo.By("reload initial snapshot for test independence", func() { - err := e2e.Env.RestoreInitialState(true /*switchOffNetworkFirst*/) - gomega.Expect(err).Should(gomega.BeNil()) - }) - - uris := e2e.Env.GetURIs() - gomega.Expect(uris).ShouldNot(gomega.BeEmpty()) - - kc := secp256k1fx.NewKeychain(genesis.EWOQKey) - var wallet primary.Wallet - ginkgo.By("initialize wallet", func() { - walletURI := uris[0] - - // 5-second is enough to fetch initial UTXOs for test cluster in "primary.NewWallet" - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - var err error - wallet, err = primary.NewWalletFromURI(ctx, walletURI, kc) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - tests.Outf("{{green}}created wallet{{/}}\n") - }) + keychain := e2e.Env.NewKeychain(1) + wallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) // Get the P-chain and the X-chain wallets pWallet := wallet.P() @@ -65,14 +36,13 @@ var _ = ginkgo.Describe("[Banff]", func() { owner := &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ - genesis.EWOQKey.PublicKey().Address(), + keychain.Keys[0].Address(), }, } var assetID ids.ID ginkgo.By("create new X-chain asset", func() { - var err error - assetID, err = xWallet.IssueCreateAssetTx( + assetTx, err := xWallet.IssueCreateAssetTx( "RnM", "RNM", 9, @@ -84,14 +54,16 @@ var _ = ginkgo.Describe("[Banff]", func() { }, }, }, + e2e.WithDefaultContext(), ) - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) + assetID = assetTx.ID() tests.Outf("{{green}}created new X-chain asset{{/}}: %s\n", assetID) }) ginkgo.By("export new X-chain asset to P-chain", func() { - txID, err := xWallet.IssueExportTx( + tx, err := xWallet.IssueExportTx( constants.PlatformChainID, []*avax.TransferableOutput{ { @@ -104,21 +76,26 @@ var _ = ginkgo.Describe("[Banff]", func() { }, }, }, + e2e.WithDefaultContext(), ) - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) - tests.Outf("{{green}}issued X-chain export{{/}}: %s\n", txID) + tests.Outf("{{green}}issued X-chain export{{/}}: %s\n", tx.ID()) }) ginkgo.By("import new asset from X-chain on the P-chain", func() { - txID, err := pWallet.IssueImportTx(xChainID, owner) - gomega.Expect(err).Should(gomega.BeNil()) + tx, err := pWallet.IssueImportTx( + xChainID, + owner, + e2e.WithDefaultContext(), + ) + require.NoError(err) - tests.Outf("{{green}}issued P-chain import{{/}}: %s\n", txID) + tests.Outf("{{green}}issued P-chain import{{/}}: %s\n", tx.ID()) }) ginkgo.By("export asset from P-chain to the X-chain", func() { - txID, err := pWallet.IssueExportTx( + tx, err := pWallet.IssueExportTx( xChainID, []*avax.TransferableOutput{ { @@ -131,17 +108,22 @@ var _ = ginkgo.Describe("[Banff]", func() { }, }, }, + e2e.WithDefaultContext(), ) - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) - tests.Outf("{{green}}issued P-chain export{{/}}: %s\n", txID) + tests.Outf("{{green}}issued P-chain export{{/}}: %s\n", tx.ID()) }) ginkgo.By("import asset from P-chain on the X-chain", func() { - txID, err := xWallet.IssueImportTx(constants.PlatformChainID, owner) - gomega.Expect(err).Should(gomega.BeNil()) + tx, err := xWallet.IssueImportTx( + constants.PlatformChainID, + owner, + e2e.WithDefaultContext(), + ) + require.NoError(err) - tests.Outf("{{green}}issued X-chain import{{/}}: %s\n", txID) + tests.Outf("{{green}}issued X-chain import{{/}}: %s\n", tx.ID()) }) }) }) diff --git a/avalanchego/tests/e2e/c/dynamic_fees.go b/avalanchego/tests/e2e/c/dynamic_fees.go new file mode 100644 index 00000000..d218f2df --- /dev/null +++ b/avalanchego/tests/e2e/c/dynamic_fees.go @@ -0,0 +1,166 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "math/big" + "strings" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +// This test uses the compiled bin for `hashing.sol` as +// well as its ABI contained in `hashing_contract.go`. + +var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { + require := require.New(ginkgo.GinkgoT()) + + // Need a gas limit much larger than the standard 21_000 to enable + // the contract to induce a gas price increase + const largeGasLimit = uint64(8_000_000) + + // TODO(marun) What is the significance of this value? + gasTip := big.NewInt(1000 * params.GWei) + + ginkgo.It("should ensure that the gas price is affected by load", func() { + ginkgo.By("creating a new private network to ensure isolation from other tests") + privateNetwork := e2e.Env.NewPrivateNetwork() + + ginkgo.By("allocating a pre-funded key") + key := privateNetwork.PreFundedKeys[0] + ethAddress := evm.GetEthAddress(key) + + ginkgo.By("initializing a coreth client") + node := privateNetwork.Nodes[0] + nodeURI := tmpnet.NodeURI{ + NodeID: node.NodeID, + URI: node.URI, + } + ethClient := e2e.NewEthClient(nodeURI) + + ginkgo.By("initializing a transaction signer") + cChainID, err := ethClient.ChainID(e2e.DefaultContext()) + require.NoError(err) + signer := types.NewEIP155Signer(cChainID) + ecdsaKey := key.ToECDSA() + sign := func(tx *types.Transaction) *types.Transaction { + signedTx, err := types.SignTx(tx, signer, ecdsaKey) + require.NoError(err) + return signedTx + } + + var contractAddress common.Address + ginkgo.By("deploying an expensive contract", func() { + // Create transaction + nonce, err := ethClient.AcceptedNonceAt(e2e.DefaultContext(), ethAddress) + require.NoError(err) + compiledContract := common.Hex2Bytes(hashingCompiledContract) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: gasTip, + Gas: largeGasLimit, + Value: common.Big0, + Data: compiledContract, + }) + + // Send the transaction and wait for acceptance + signedTx := sign(tx) + receipt := e2e.SendEthTransaction(ethClient, signedTx) + + contractAddress = receipt.ContractAddress + }) + + var gasPrice *big.Int + ginkgo.By("calling the expensive contract repeatedly until a gas price increase is detected", func() { + // Evaluate the bytes representation of the contract + hashingABI, err := abi.JSON(strings.NewReader(hashingABIJson)) + require.NoError(err) + contractData, err := hashingABI.Pack("hashIt") + require.NoError(err) + + var initialGasPrice *big.Int + e2e.Eventually(func() bool { + // Check the gas price + var err error + gasPrice, err = ethClient.SuggestGasPrice(e2e.DefaultContext()) + require.NoError(err) + if initialGasPrice == nil { + initialGasPrice = gasPrice + tests.Outf("{{blue}}initial gas price is %v{{/}}\n", initialGasPrice) + } else if gasPrice.Cmp(initialGasPrice) > 0 { + // Gas price has increased + tests.Outf("{{blue}}gas price has increased to %v{{/}}\n", gasPrice) + return true + } + + // Create the transaction + nonce, err := ethClient.AcceptedNonceAt(e2e.DefaultContext(), ethAddress) + require.NoError(err) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: gasTip, + Gas: largeGasLimit, + To: &contractAddress, + Value: common.Big0, + Data: contractData, + }) + + // Send the transaction and wait for acceptance + signedTx := sign(tx) + _ = e2e.SendEthTransaction(ethClient, signedTx) + + // The gas price will be checked at the start of the next iteration + return false + }, e2e.DefaultTimeout, e2e.DefaultPollingInterval, "failed to see gas price increase before timeout") + }) + + ginkgo.By("waiting for the gas price to decrease...", func() { + initialGasPrice := gasPrice + e2e.Eventually(func() bool { + var err error + gasPrice, err = ethClient.SuggestGasPrice(e2e.DefaultContext()) + require.NoError(err) + tests.Outf("{{blue}}.{{/}}") + return initialGasPrice.Cmp(gasPrice) > 0 + }, e2e.DefaultTimeout, e2e.DefaultPollingInterval, "failed to see gas price decrease before timeout") + tests.Outf("\n{{blue}}gas price has decreased to %v{{/}}\n", gasPrice) + }) + + ginkgo.By("sending funds at the current gas price", func() { + // Create a recipient address + recipientKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + recipientEthAddress := evm.GetEthAddress(recipientKey) + + // Create transaction + nonce, err := ethClient.AcceptedNonceAt(e2e.DefaultContext(), ethAddress) + require.NoError(err) + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + GasPrice: gasPrice, + Gas: e2e.DefaultGasLimit, + To: &recipientEthAddress, + Value: common.Big0, + }) + + // Send the transaction and wait for acceptance + signedTx := sign(tx) + _ = e2e.SendEthTransaction(ethClient, signedTx) + }) + + e2e.CheckBootstrapIsPossible(privateNetwork) + }) +}) diff --git a/avalanchego/tests/e2e/c/hashing.sol b/avalanchego/tests/e2e/c/hashing.sol new file mode 100644 index 00000000..0457ac42 --- /dev/null +++ b/avalanchego/tests/e2e/c/hashing.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT + +pragma solidity = 0.8.6; + +contract ConsumeGas { + + bytes hashVar = bytes("This is the hashing text for the test"); + + function hashIt() public { + for (uint i=0; i<3700; i++) { + ripemd160(hashVar); + } + } + +} diff --git a/avalanchego/tests/e2e/c/hashing_contract.go b/avalanchego/tests/e2e/c/hashing_contract.go new file mode 100644 index 00000000..7bf1db76 --- /dev/null +++ b/avalanchego/tests/e2e/c/hashing_contract.go @@ -0,0 +1,11 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// AUTOMATICALLY GENERATED. DO NOT EDIT! +// Generated from hashing.sol by compile-contract.sh +package c + +const ( + hashingCompiledContract = "60806040526040518060600160405280602581526020016103ba6025913960009080519060200190610032929190610045565b5034801561003f57600080fd5b50610149565b828054610051906100e8565b90600052602060002090601f01602090048101928261007357600085556100ba565b82601f1061008c57805160ff19168380011785556100ba565b828001600101855582156100ba579182015b828111156100b957825182559160200191906001019061009e565b5b5090506100c791906100cb565b5090565b5b808211156100e45760008160009055506001016100cc565b5090565b6000600282049050600182168061010057607f821691505b602082108114156101145761011361011a565b5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b610262806101586000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80636f37ecea14610030575b600080fd5b61003861003a565b005b60005b610e7481101561009057600360006040516100589190610112565b602060405180830381855afa158015610075573d6000803e3d6000fd5b5050506040515150808061008890610185565b91505061003d565b50565b600081546100a081610153565b6100aa818661013e565b945060018216600081146100c557600181146100d657610109565b60ff19831686528186019350610109565b6100df85610129565b60005b83811015610101578154818901526001820191506020810190506100e2565b838801955050505b50505092915050565b600061011e8284610093565b915081905092915050565b60008190508160005260206000209050919050565b600081905092915050565b6000819050919050565b6000600282049050600182168061016b57607f821691505b6020821081141561017f5761017e6101fd565b5b50919050565b600061019082610149565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156101c3576101c26101ce565b5b600182019050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fdfea2646970667358221220cc5de5cd3c7aa5bda60e63e0f3156691253f7a78191eb336ec6699b38a8a777c64736f6c6343000806003354686973206973207468652068617368696e67207465787420666f72207468652074657374" + hashingABIJson = `[{"inputs":[],"name":"hashIt","outputs":[],"stateMutability":"nonpayable","type":"function"}]` +) diff --git a/avalanchego/tests/e2e/c/interchain_workflow.go b/avalanchego/tests/e2e/c/interchain_workflow.go new file mode 100644 index 00000000..cf8437b7 --- /dev/null +++ b/avalanchego/tests/e2e/c/interchain_workflow.go @@ -0,0 +1,164 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "math/big" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { + require := require.New(ginkgo.GinkgoT()) + + const txAmount = 10 * units.Avax // Arbitrary amount to send and transfer + + ginkgo.It("should ensure that funds can be transferred from the C-Chain to the X-Chain and the P-Chain", func() { + ginkgo.By("initializing a new eth client") + // Select a random node URI to use for both the eth client and + // the wallet to avoid having to verify that all nodes are at + // the same height before initializing the wallet. + nodeURI := e2e.Env.GetRandomNodeURI() + ethClient := e2e.NewEthClient(nodeURI) + + ginkgo.By("allocating a pre-funded key to send from and a recipient key to deliver to") + senderKey := e2e.Env.AllocatePreFundedKey() + senderEthAddress := evm.GetEthAddress(senderKey) + recipientKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + recipientEthAddress := evm.GetEthAddress(recipientKey) + + ginkgo.By("sending funds from one address to another on the C-Chain", func() { + // Create transaction + acceptedNonce, err := ethClient.AcceptedNonceAt(e2e.DefaultContext(), senderEthAddress) + require.NoError(err) + gasPrice := e2e.SuggestGasPrice(ethClient) + tx := types.NewTransaction( + acceptedNonce, + recipientEthAddress, + big.NewInt(int64(txAmount)), + e2e.DefaultGasLimit, + gasPrice, + nil, + ) + + // Sign transaction + cChainID, err := ethClient.ChainID(e2e.DefaultContext()) + require.NoError(err) + signer := types.NewEIP155Signer(cChainID) + signedTx, err := types.SignTx(tx, signer, senderKey.ToECDSA()) + require.NoError(err) + + _ = e2e.SendEthTransaction(ethClient, signedTx) + + ginkgo.By("waiting for the C-Chain recipient address to have received the sent funds") + e2e.Eventually(func() bool { + balance, err := ethClient.BalanceAt(e2e.DefaultContext(), recipientEthAddress, nil) + require.NoError(err) + return balance.Cmp(big.NewInt(0)) > 0 + }, e2e.DefaultTimeout, e2e.DefaultPollingInterval, "failed to see funds delivered before timeout") + }) + + // Wallet must be initialized after sending funds on the + // C-Chain with the same node URI to ensure wallet state + // matches on-chain state. + ginkgo.By("initializing a keychain and associated wallet") + keychain := secp256k1fx.NewKeychain(senderKey, recipientKey) + baseWallet := e2e.NewWallet(keychain, nodeURI) + xWallet := baseWallet.X() + cWallet := baseWallet.C() + pWallet := baseWallet.P() + + ginkgo.By("defining common configuration") + avaxAssetID := xWallet.AVAXAssetID() + // Use the same owner for import funds to X-Chain and P-Chain + recipientOwner := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + recipientKey.Address(), + }, + } + // Use the same outputs for both X-Chain and P-Chain exports + exportOutputs := []*secp256k1fx.TransferOutput{ + { + Amt: txAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keychain.Keys[0].Address(), + }, + }, + }, + } + + ginkgo.By("exporting AVAX from the C-Chain to the X-Chain", func() { + _, err := cWallet.IssueExportTx( + xWallet.BlockchainID(), + exportOutputs, + e2e.WithDefaultContext(), + e2e.WithSuggestedGasPrice(ethClient), + ) + require.NoError(err) + }) + + ginkgo.By("importing AVAX from the C-Chain to the X-Chain", func() { + _, err := xWallet.IssueImportTx( + cWallet.BlockchainID(), + &recipientOwner, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the X-Chain", func() { + balances, err := xWallet.Builder().GetFTBalance(common.WithCustomAddresses(set.Of( + recipientKey.Address(), + ))) + require.NoError(err) + require.Positive(balances[avaxAssetID]) + }) + + ginkgo.By("exporting AVAX from the C-Chain to the P-Chain", func() { + _, err := cWallet.IssueExportTx( + constants.PlatformChainID, + exportOutputs, + e2e.WithDefaultContext(), + e2e.WithSuggestedGasPrice(ethClient), + ) + require.NoError(err) + }) + + ginkgo.By("importing AVAX from the C-Chain to the P-Chain", func() { + _, err = pWallet.IssueImportTx( + cWallet.BlockchainID(), + &recipientOwner, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the P-Chain", func() { + balances, err := pWallet.Builder().GetBalance(common.WithCustomAddresses(set.Of( + recipientKey.Address(), + ))) + require.NoError(err) + require.Positive(balances[avaxAssetID]) + }) + + e2e.CheckBootstrapIsPossible(e2e.Env.GetNetwork()) + }) +}) diff --git a/avalanchego/tests/e2e/describe.go b/avalanchego/tests/e2e/describe.go deleted file mode 100644 index b3a849a0..00000000 --- a/avalanchego/tests/e2e/describe.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package e2e - -import ( - ginkgo "github.com/onsi/ginkgo/v2" -) - -// DescribeXChain annotates the tests for X-Chain. -// Can run with any type of cluster (e.g., local, fuji, mainnet). -func DescribeXChain(text string, body func()) bool { - return ginkgo.Describe("[X-Chain] "+text, body) -} - -// DescribePChain annotates the tests for P-Chain. -// Can run with any type of cluster (e.g., local, fuji, mainnet). -func DescribePChain(text string, body func()) bool { - return ginkgo.Describe("[P-Chain] "+text, body) -} diff --git a/avalanchego/tests/e2e/e2e.go b/avalanchego/tests/e2e/e2e.go deleted file mode 100644 index d9f3425d..00000000 --- a/avalanchego/tests/e2e/e2e.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// e2e implements the e2e tests. -package e2e - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - "sync" - "time" - - "github.com/onsi/gomega" - - runner_sdk "github.com/ava-labs/avalanche-network-runner-sdk" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -type ClusterType byte - -const ( - Unknown ClusterType = iota - StandAlone - PreExisting - - // Enough for primary.NewWallet to fetch initial UTXOs. - DefaultWalletCreationTimeout = 5 * time.Second - - // Defines default tx confirmation timeout. - // Enough for test/custom networks. - DefaultConfirmTxTimeout = 20 * time.Second - - DefaultShutdownTimeout = 2 * time.Minute -) - -// Env is the global struct containing all we need to test -var ( - Env = &TestEnvironment{ - testEnvironmentConfig: &testEnvironmentConfig{ - clusterType: Unknown, - }, - } - - errGRPCAndURIsSpecified = errors.New("either network-runner-grpc-endpoint or uris should be specified, not both") - errNoKeyFile = errors.New("test keys file not provided") - errUnknownClusterType = errors.New("unhandled cluster type") - errNotNetworkRunnerCLI = errors.New("not network-runner cli") -) - -type testEnvironmentConfig struct { - clusterType ClusterType - logLevel string - networkRunnerGRPCEndpoint string - avalancheGoExecPath string - avalancheGoLogLevel string - testKeysFile string - - // we snapshot initial state, right after starting cluster - // to be able to reset state if needed and isolate tests - snapshotName string -} - -type TestEnvironment struct { - *testEnvironmentConfig - - runnerMu sync.RWMutex - runnerCli runner_sdk.Client - runnerGRPCEp string - - urisMu sync.RWMutex - uris []string - - testKeysMu sync.RWMutex - testKeys []*secp256k1.PrivateKey - - snapMu sync.RWMutex - snapped bool -} - -// should be called only once -// must be called before StartCluster -// Note that either networkRunnerGRPCEp or uris must be specified -func (te *TestEnvironment) ConfigCluster( - logLevel string, - networkRunnerGRPCEp string, - avalancheGoExecPath string, - avalancheGoLogLevel string, - uris string, - testKeysFile string, -) error { - if avalancheGoExecPath != "" { - if _, err := os.Stat(avalancheGoExecPath); err != nil { - return fmt.Errorf("could not find avalanchego binary: %w", err) - } - } - - te.testKeysFile = testKeysFile - te.snapshotName = "ginkgo" + time.Now().String() - switch { - case networkRunnerGRPCEp != "" && len(uris) == 0: - te.clusterType = StandAlone - te.logLevel = logLevel - te.networkRunnerGRPCEndpoint = networkRunnerGRPCEp - te.avalancheGoExecPath = avalancheGoExecPath - te.avalancheGoLogLevel = avalancheGoLogLevel - - err := te.setRunnerClient(te.logLevel, te.networkRunnerGRPCEndpoint) - if err != nil { - return fmt.Errorf("could not setup network-runner client: %w", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - presp, err := te.GetRunnerClient().Ping(ctx) - cancel() - if err != nil { - return fmt.Errorf("could not ping network-runner client: %w", err) - } - tests.Outf("{{green}}network-runner running in PID %d{{/}}\n", presp.Pid) - - // URIs will be set upon cluster start here - return nil - - case networkRunnerGRPCEp == "" && len(uris) != 0: - te.clusterType = PreExisting - uriSlice := strings.Split(uris, ",") - te.setURIs(uriSlice) - tests.Outf("{{green}}URIs:{{/}} %q\n", uriSlice) - return nil - - default: - return errGRPCAndURIsSpecified - } -} - -func (te *TestEnvironment) LoadKeys() error { - // load test keys - if len(te.testKeysFile) == 0 { - return errNoKeyFile - } - testKeys, err := tests.LoadHexTestKeys(te.testKeysFile) - if err != nil { - return fmt.Errorf("failed loading test keys: %w", err) - } - te.setTestKeys(testKeys) - return nil -} - -func (te *TestEnvironment) StartCluster() error { - switch te.clusterType { - case StandAlone: - tests.Outf("{{magenta}}starting network-runner with %q{{/}}\n", te.avalancheGoExecPath) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - resp, err := te.GetRunnerClient().Start(ctx, te.avalancheGoExecPath, - runner_sdk.WithNumNodes(5), - runner_sdk.WithGlobalNodeConfig(fmt.Sprintf(`{"log-level":"%s"}`, te.avalancheGoLogLevel)), - ) - cancel() - if err != nil { - return fmt.Errorf("could not start network-runner: %w", err) - } - tests.Outf("{{green}}successfully started network-runner: {{/}} %+v\n", resp.ClusterInfo.NodeNames) - - // start is async, so wait some time for cluster health - time.Sleep(time.Minute) - - ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) - _, err = te.GetRunnerClient().Health(ctx) - cancel() - if err != nil { - return fmt.Errorf("could not check health network-runner: %w", err) - } - - return te.refreshURIs() - - case PreExisting: - return nil // nothing to do, really - - default: - return errUnknownClusterType - } -} - -func (te *TestEnvironment) refreshURIs() error { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - uriSlice, err := te.GetRunnerClient().URIs(ctx) - cancel() - if err != nil { - return fmt.Errorf("could not retrieve URIs: %w", err) - } - te.setURIs(uriSlice) - tests.Outf("{{green}}URIs:{{/}} %q\n", uriSlice) - return nil -} - -func (te *TestEnvironment) setRunnerClient(logLevel string, gRPCEp string) error { - te.runnerMu.Lock() - defer te.runnerMu.Unlock() - - cli, err := runner_sdk.New(runner_sdk.Config{ - LogLevel: logLevel, - Endpoint: gRPCEp, - DialTimeout: 10 * time.Second, - }) - if err != nil { - return err - } - if te.runnerCli != nil { - te.runnerCli.Close() - } - te.runnerCli = cli - te.runnerGRPCEp = gRPCEp - return err -} - -func (te *TestEnvironment) GetRunnerClient() (cli runner_sdk.Client) { - te.runnerMu.RLock() - cli = te.runnerCli - te.runnerMu.RUnlock() - return cli -} - -func (te *TestEnvironment) closeRunnerClient() (err error) { - te.runnerMu.Lock() - err = te.runnerCli.Close() - te.runnerMu.Unlock() - return err -} - -func (te *TestEnvironment) GetRunnerGRPCEndpoint() (ep string) { - te.runnerMu.RLock() - ep = te.runnerGRPCEp - te.runnerMu.RUnlock() - return ep -} - -func (te *TestEnvironment) setURIs(us []string) { - te.urisMu.Lock() - te.uris = us - te.urisMu.Unlock() -} - -func (te *TestEnvironment) GetURIs() []string { - te.urisMu.RLock() - us := te.uris - te.urisMu.RUnlock() - return us -} - -func (te *TestEnvironment) setTestKeys(ks []*secp256k1.PrivateKey) { - te.testKeysMu.Lock() - te.testKeys = ks - te.testKeysMu.Unlock() -} - -func (te *TestEnvironment) GetTestKeys() ([]*secp256k1.PrivateKey, []ids.ShortID, *secp256k1fx.Keychain) { - te.testKeysMu.RLock() - testKeys := te.testKeys - te.testKeysMu.RUnlock() - testKeyAddrs := make([]ids.ShortID, len(testKeys)) - for i := range testKeyAddrs { - testKeyAddrs[i] = testKeys[i].PublicKey().Address() - } - keyChain := secp256k1fx.NewKeychain(testKeys...) - return testKeys, testKeyAddrs, keyChain -} - -func (te *TestEnvironment) ShutdownCluster() error { - if te.GetRunnerGRPCEndpoint() == "" { - // we connected directly to existing cluster - // nothing to shutdown - return nil - } - - runnerCli := te.GetRunnerClient() - if runnerCli == nil { - return errNotNetworkRunnerCLI - } - - tests.Outf("{{red}}shutting down network-runner cluster{{/}}\n") - ctx, cancel := context.WithTimeout(context.Background(), DefaultShutdownTimeout) - _, err := runnerCli.Stop(ctx) - cancel() - if err != nil { - return err - } - - tests.Outf("{{red}}shutting down network-runner client{{/}}\n") - return te.closeRunnerClient() -} - -func (te *TestEnvironment) SnapInitialState() error { - te.snapMu.RLock() - defer te.snapMu.RUnlock() - - if te.snapped { - return nil // initial state snapshot already captured - } - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - _, err := te.runnerCli.SaveSnapshot(ctx, te.snapshotName) - cancel() - if err != nil { - return err - } - te.snapped = true - return nil -} - -func (te *TestEnvironment) RestoreInitialState(switchOffNetworkFirst bool) error { - te.snapMu.Lock() - defer te.snapMu.Unlock() - - if switchOffNetworkFirst { - ctx, cancel := context.WithTimeout(context.Background(), DefaultShutdownTimeout) - _, err := te.GetRunnerClient().Stop(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - } - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - _, err := te.GetRunnerClient().LoadSnapshot(ctx, te.snapshotName) - cancel() - if err != nil { - return err - } - - // make sure cluster goes back to health before moving on - ctx, cancel = context.WithTimeout(context.Background(), DefaultShutdownTimeout) - _, err = te.GetRunnerClient().Health(ctx) - cancel() - if err != nil { - return fmt.Errorf("could not check health network-runner: %w", err) - } - - return te.refreshURIs() -} diff --git a/avalanchego/tests/e2e/e2e_test.go b/avalanchego/tests/e2e/e2e_test.go index 2530c2cc..3f526d84 100644 --- a/avalanchego/tests/e2e/e2e_test.go +++ b/avalanchego/tests/e2e/e2e_test.go @@ -1,25 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package e2e_test import ( - "flag" "testing" - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "github.com/ava-labs/avalanchego/tests/e2e" - // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" + _ "github.com/ava-labs/avalanchego/tests/e2e/c" + _ "github.com/ava-labs/avalanchego/tests/e2e/faultinjection" _ "github.com/ava-labs/avalanchego/tests/e2e/p" - _ "github.com/ava-labs/avalanchego/tests/e2e/ping" - _ "github.com/ava-labs/avalanchego/tests/e2e/static-handlers" + _ "github.com/ava-labs/avalanchego/tests/e2e/x" _ "github.com/ava-labs/avalanchego/tests/e2e/x/transfer" - _ "github.com/ava-labs/avalanchego/tests/e2e/x/whitelist-vtx" + + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + + ginkgo "github.com/onsi/ginkgo/v2" ) func TestE2E(t *testing.T) { @@ -27,92 +27,18 @@ func TestE2E(t *testing.T) { ginkgo.RunSpecs(t, "e2e test suites") } -var ( - // helpers to parse test flags - logLevel string - - networkRunnerGRPCEp string - networkRunnerAvalancheGoExecPath string - networkRunnerAvalancheGoLogLevel string - - uris string - - testKeysFile string -) +var flagVars *e2e.FlagVars func init() { - flag.StringVar( - &logLevel, - "log-level", - "info", - "log level", - ) - - flag.StringVar( - &networkRunnerGRPCEp, - "network-runner-grpc-endpoint", - "", - "[optional] gRPC server endpoint for network-runner (only required for local network-runner tests)", - ) - flag.StringVar( - &networkRunnerAvalancheGoExecPath, - "network-runner-avalanchego-path", - "", - "[optional] avalanchego executable path (only required for local network-runner tests)", - ) - flag.StringVar( - &networkRunnerAvalancheGoLogLevel, - "network-runner-avalanchego-log-level", - "INFO", - "[optional] avalanchego log-level (only required for local network-runner tests)", - ) - - // e.g., custom network HTTP RPC endpoints - flag.StringVar( - &uris, - "uris", - "", - "HTTP RPC endpoint URIs for avalanche node (comma-separated, required to run against existing cluster)", - ) - - // file that contains a list of new-line separated secp256k1 private keys - flag.StringVar( - &testKeysFile, - "test-keys-file", - "", - "file that contains a list of new-line separated hex-encoded secp256k1 private keys (assume test keys are pre-funded, for test networks)", - ) + flagVars = e2e.RegisterFlags() } -var _ = ginkgo.BeforeSuite(func() { - err := e2e.Env.ConfigCluster( - logLevel, - networkRunnerGRPCEp, - networkRunnerAvalancheGoExecPath, - networkRunnerAvalancheGoLogLevel, - uris, - testKeysFile, - ) - gomega.Expect(err).Should(gomega.BeNil()) - - // check cluster can be started - err = e2e.Env.StartCluster() - gomega.Expect(err).Should(gomega.BeNil()) - - // load keys - err = e2e.Env.LoadKeys() - gomega.Expect(err).Should(gomega.BeNil()) - - // take initial snapshot. cluster will be switched off - err = e2e.Env.SnapInitialState() - gomega.Expect(err).Should(gomega.BeNil()) - - // restart cluster - err = e2e.Env.RestoreInitialState(false /*switchOffNetworkFirst*/) - gomega.Expect(err).Should(gomega.BeNil()) -}) +var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { + // Run only once in the first ginkgo process + return e2e.NewTestEnvironment(flagVars, &tmpnet.Network{}).Marshal() +}, func(envBytes []byte) { + // Run in every ginkgo process -var _ = ginkgo.AfterSuite(func() { - err := e2e.Env.ShutdownCluster() - gomega.Expect(err).Should(gomega.BeNil()) + // Initialize the local test environment from the global state + e2e.InitSharedTestEnvironment(envBytes) }) diff --git a/avalanchego/tests/e2e/faultinjection/duplicate_node_id.go b/avalanchego/tests/e2e/faultinjection/duplicate_node_id.go new file mode 100644 index 00000000..288583c7 --- /dev/null +++ b/avalanchego/tests/e2e/faultinjection/duplicate_node_id.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package faultinjection + +import ( + "context" + "fmt" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/set" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +var _ = ginkgo.Describe("Duplicate node handling", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should ensure that a given Node ID (i.e. staking keypair) can be used at most once on a network", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("creating new node") + node1 := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) + e2e.WaitForHealthy(node1) + + ginkgo.By("checking that the new node is connected to its peers") + checkConnectedPeers(network.Nodes, node1) + + ginkgo.By("creating a second new node with the same staking keypair as the first new node") + node1Flags := node1.Flags + node2Flags := tmpnet.FlagsMap{ + config.StakingTLSKeyContentKey: node1Flags[config.StakingTLSKeyContentKey], + config.StakingCertContentKey: node1Flags[config.StakingCertContentKey], + // Construct a unique data dir to ensure the two nodes' data will be stored + // separately. Usually the dir name is the node ID but in this one case the nodes have + // the same node ID. + config.DataDirKey: fmt.Sprintf("%s-second", node1Flags[config.DataDirKey]), + } + node2 := e2e.AddEphemeralNode(network, node2Flags) + + ginkgo.By("checking that the second new node fails to become healthy before timeout") + err := tmpnet.WaitForHealthy(e2e.DefaultContext(), node2) + require.ErrorIs(err, context.DeadlineExceeded) + + ginkgo.By("stopping the first new node") + require.NoError(node1.Stop(e2e.DefaultContext())) + + ginkgo.By("checking that the second new node becomes healthy within timeout") + e2e.WaitForHealthy(node2) + + ginkgo.By("checking that the second new node is connected to its peers") + checkConnectedPeers(network.Nodes, node2) + + // A bootstrap check was already performed by the second node. + }) +}) + +// Check that a new node is connected to existing nodes and vice versa +func checkConnectedPeers(existingNodes []*tmpnet.Node, newNode *tmpnet.Node) { + require := require.New(ginkgo.GinkgoT()) + + // Collect the node ids of the new node's peers + infoClient := info.NewClient(newNode.URI) + peers, err := infoClient.Peers(e2e.DefaultContext()) + require.NoError(err) + peerIDs := set.NewSet[ids.NodeID](len(existingNodes)) + for _, peer := range peers { + peerIDs.Add(peer.ID) + } + + for _, existingNode := range existingNodes { + // Check that the existing node is a peer of the new node + require.True(peerIDs.Contains(existingNode.NodeID)) + + // Check that the new node is a peer + infoClient := info.NewClient(existingNode.URI) + peers, err := infoClient.Peers(e2e.DefaultContext()) + require.NoError(err) + isPeer := false + for _, peer := range peers { + if peer.ID == newNode.NodeID { + isPeer = true + break + } + } + require.True(isPeer) + } +} diff --git a/avalanchego/tests/e2e/ignore.go b/avalanchego/tests/e2e/ignore.go new file mode 100644 index 00000000..ddf89c5d --- /dev/null +++ b/avalanchego/tests/e2e/ignore.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +// This file is required by ginkgo to accurately report compilation errors in test packages. Without +// it, the following error will mask the actual errors: +// +// ``` +// Failed to compile e2e: +// +// github.com/ava-labs/avalanchego/tests/e2e: no non-test Go files in /path/to/avalanchego/tests/e2e +// ``` diff --git a/avalanchego/tests/e2e/p/interchain_workflow.go b/avalanchego/tests/e2e/p/interchain_workflow.go new file mode 100644 index 00000000..a9c70bea --- /dev/null +++ b/avalanchego/tests/e2e/p/interchain_workflow.go @@ -0,0 +1,214 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "math/big" + "time" + + "github.com/ava-labs/coreth/plugin/evm" + "github.com/spf13/cast" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainLabel), func() { + require := require.New(ginkgo.GinkgoT()) + + const ( + transferAmount = 10 * units.Avax + weight = 2_000 * units.Avax // Used for both validation and delegation + ) + + ginkgo.It("should ensure that funds can be transferred from the P-Chain to the X-Chain and the C-Chain", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("checking that the network has a compatible minimum stake duration", func() { + minStakeDuration := cast.ToDuration(network.DefaultFlags[config.MinStakeDurationKey]) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) + }) + + ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") + recipientKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + keychain := e2e.Env.NewKeychain(1) + keychain.Add(recipientKey) + nodeURI := e2e.Env.GetRandomNodeURI() + baseWallet := e2e.NewWallet(keychain, nodeURI) + xWallet := baseWallet.X() + cWallet := baseWallet.C() + pWallet := baseWallet.P() + + ginkgo.By("defining common configuration") + recipientEthAddress := evm.GetEthAddress(recipientKey) + avaxAssetID := xWallet.AVAXAssetID() + // Use the same owner for sending to X-Chain and importing funds to P-Chain + recipientOwner := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + recipientKey.Address(), + }, + } + // Use the same outputs for both X-Chain and C-Chain exports + exportOutputs := []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: transferAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keychain.Keys[0].Address(), + }, + }, + }, + }, + } + + ginkgo.By("adding new node and waiting for it to report healthy") + node := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) + e2e.WaitForHealthy(node) + + ginkgo.By("retrieving new node's id and pop") + infoClient := info.NewClient(node.URI) + nodeID, nodePOP, err := infoClient.GetNodeID(e2e.DefaultContext()) + require.NoError(err) + + // Adding a validator should not break interchain transfer. + endTime := time.Now().Add(30 * time.Second) + ginkgo.By("adding the new node as a validator", func() { + rewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + const ( + delegationPercent = 0.10 // 10% + delegationShare = reward.PercentDenominator * delegationPercent + ) + + _, err = pWallet.IssueAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + End: uint64(endTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + nodePOP, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + delegationShare, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + // Adding a delegator should not break interchain transfer. + ginkgo.By("adding a delegator to the new node", func() { + rewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + _, err = pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + End: uint64(endTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("exporting AVAX from the P-Chain to the X-Chain", func() { + _, err := pWallet.IssueExportTx( + xWallet.BlockchainID(), + exportOutputs, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("importing AVAX from the P-Chain to the X-Chain", func() { + _, err := xWallet.IssueImportTx( + constants.PlatformChainID, + &recipientOwner, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the X-Chain", func() { + balances, err := xWallet.Builder().GetFTBalance(common.WithCustomAddresses(set.Of( + recipientKey.Address(), + ))) + require.NoError(err) + require.Positive(balances[avaxAssetID]) + }) + + ginkgo.By("exporting AVAX from the P-Chain to the C-Chain", func() { + _, err := pWallet.IssueExportTx( + cWallet.BlockchainID(), + exportOutputs, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("initializing a new eth client") + ethClient := e2e.NewEthClient(nodeURI) + + ginkgo.By("importing AVAX from the P-Chain to the C-Chain", func() { + _, err := cWallet.IssueImportTx( + constants.PlatformChainID, + recipientEthAddress, + e2e.WithDefaultContext(), + e2e.WithSuggestedGasPrice(ethClient), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the C-Chain") + balance, err := ethClient.BalanceAt(e2e.DefaultContext(), recipientEthAddress, nil) + require.NoError(err) + require.Positive(balance.Cmp(big.NewInt(0))) + + ginkgo.By("stopping validator node to free up resources for a bootstrap check") + require.NoError(node.Stop(e2e.DefaultContext())) + + e2e.CheckBootstrapIsPossible(network) + }) +}) diff --git a/avalanchego/tests/e2e/p/permissionless_subnets.go b/avalanchego/tests/e2e/p/permissionless_subnets.go index bcd6ac64..eb0a6e12 100644 --- a/avalanchego/tests/e2e/p/permissionless_subnets.go +++ b/avalanchego/tests/e2e/p/permissionless_subnets.go @@ -1,104 +1,73 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( - "context" "fmt" "time" - ginkgo "github.com/onsi/ginkgo/v2" - - "github.com/onsi/gomega" + "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/avalanchego/wallet/subnet/primary" - "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" ) var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { + require := require.New(ginkgo.GinkgoT()) + ginkgo.It("subnets operations", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "xp", - "permissionless-subnets", - ), func() { - ginkgo.By("reload initial snapshot for test independence", func() { - err := e2e.Env.RestoreInitialState(true /*switchOffNetworkFirst*/) - gomega.Expect(err).Should(gomega.BeNil()) - }) + nodeURI := e2e.Env.GetRandomNodeURI() - rpcEps := e2e.Env.GetURIs() - gomega.Expect(rpcEps).ShouldNot(gomega.BeEmpty()) - nodeURI := rpcEps[0] - - tests.Outf("{{blue}} setting up keys {{/}}\n") - testKey := genesis.EWOQKey - keyChain := secp256k1fx.NewKeychain(testKey) - - var baseWallet primary.Wallet - ginkgo.By("setup wallet", func() { - var err error - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - baseWallet, err = primary.NewWalletFromURI(ctx, nodeURI, keyChain) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - }) + keychain := e2e.Env.NewKeychain(1) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() - pChainClient := platformvm.NewClient(nodeURI) xWallet := baseWallet.X() - xChainClient := avm.NewClient(nodeURI, xWallet.BlockchainID().String()) xChainID := xWallet.BlockchainID() + var validatorID ids.NodeID + ginkgo.By("retrieving the node ID of a primary network validator", func() { + pChainClient := platformvm.NewClient(nodeURI.URI) + validatorIDs, err := pChainClient.SampleValidators(e2e.DefaultContext(), constants.PrimaryNetworkID, 1) + require.NoError(err) + validatorID = validatorIDs[0] + }) + owner := &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ - testKey.PublicKey().Address(), + keychain.Keys[0].Address(), }, } var subnetID ids.ID ginkgo.By("create a permissioned subnet", func() { - var err error - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - subnetID, err = pWallet.IssueCreateSubnetTx( + subnetTx, err := pWallet.IssueCreateSubnetTx( owner, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(subnetID, err).Should(gomega.Not(gomega.Equal(constants.PrimaryNetworkID))) - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, subnetID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + subnetID = subnetTx.ID() + require.NoError(err) + require.NotEqual(subnetID, constants.PrimaryNetworkID) }) var subnetAssetID ids.ID ginkgo.By("create a custom asset for the permissionless subnet", func() { - var err error - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - subnetAssetID, err = xWallet.IssueCreateAssetTx( + subnetAssetTx, err := xWallet.IssueCreateAssetTx( "RnM", "RNM", 9, @@ -110,20 +79,14 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { }, }, }, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := xChainClient.GetTxStatus(ctx, subnetAssetID) - cancel() - gomega.Expect(txStatus, err).To(gomega.Equal(choices.Accepted)) + require.NoError(err) + subnetAssetID = subnetAssetTx.ID() }) ginkgo.By(fmt.Sprintf("Send 100 MegaAvax of asset %s to the P-chain", subnetAssetID), func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - exportTxID, err := xWallet.IssueExportTx( + _, err := xWallet.IssueExportTx( constants.PlatformChainID, []*avax.TransferableOutput{ { @@ -136,36 +99,22 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { }, }, }, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := xChainClient.GetTxStatus(ctx, exportTxID) - cancel() - gomega.Expect(txStatus, err).To(gomega.Equal(choices.Accepted)) + require.NoError(err) }) ginkgo.By(fmt.Sprintf("Import the 100 MegaAvax of asset %s from the X-chain into the P-chain", subnetAssetID), func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - importTxID, err := pWallet.IssueImportTx( + _, err := pWallet.IssueImportTx( xChainID, owner, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, importTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) ginkgo.By("make subnet permissionless", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - transformSubnetTxID, err := pWallet.IssueTransformSubnetTx( + _, err := pWallet.IssueTransformSubnetTx( subnetID, subnetAssetID, 50*units.MegaAvax, @@ -180,26 +129,18 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { 1, 5, .80*reward.PercentDenominator, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, transformSubnetTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) - validatorStartTime := time.Now().Add(time.Minute) + endTime := time.Now().Add(time.Minute) ginkgo.By("add permissionless validator", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( + _, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ - NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, - Start: uint64(validatorStartTime.Unix()), - End: uint64(validatorStartTime.Add(5 * time.Second).Unix()), + NodeID: validatorID, + End: uint64(endTime.Unix()), Wght: 25 * units.MegaAvax, }, Subnet: subnetID, @@ -209,41 +150,26 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { &secp256k1fx.OutputOwners{}, &secp256k1fx.OutputOwners{}, reward.PercentDenominator, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, addSubnetValidatorTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) - delegatorStartTime := validatorStartTime ginkgo.By("add permissionless delegator", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( + _, err := pWallet.IssueAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ - NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, - Start: uint64(delegatorStartTime.Unix()), - End: uint64(delegatorStartTime.Add(5 * time.Second).Unix()), + NodeID: validatorID, + End: uint64(endTime.Unix()), Wght: 25 * units.MegaAvax, }, Subnet: subnetID, }, subnetAssetID, &secp256k1fx.OutputOwners{}, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, addSubnetDelegatorTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) }) }) diff --git a/avalanchego/tests/e2e/p/staking_rewards.go b/avalanchego/tests/e2e/p/staking_rewards.go new file mode 100644 index 00000000..436d8967 --- /dev/null +++ b/avalanchego/tests/e2e/p/staking_rewards.go @@ -0,0 +1,307 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "time" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/cast" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/admin" + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +const ( + targetDelegationPeriod = 15 * time.Second + targetValidationPeriod = 30 * time.Second +) + +var _ = ginkgo.Describe("[Staking Rewards]", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should ensure that validator node uptime determines whether a staking reward is issued", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("checking that the network has a compatible minimum stake duration", func() { + minStakeDuration := cast.ToDuration(network.DefaultFlags[config.MinStakeDurationKey]) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) + }) + + ginkgo.By("adding alpha node, whose uptime should result in a staking reward") + alphaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) + ginkgo.By("adding beta node, whose uptime should not result in a staking reward") + betaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) + + // Wait to check health until both nodes have started to minimize the duration + // required for both nodes to report healthy. + ginkgo.By("waiting until alpha node is healthy") + e2e.WaitForHealthy(alphaNode) + ginkgo.By("waiting until beta node is healthy") + e2e.WaitForHealthy(betaNode) + + ginkgo.By("retrieving alpha node id and pop") + alphaInfoClient := info.NewClient(alphaNode.URI) + alphaNodeID, alphaPOP, err := alphaInfoClient.GetNodeID(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("retrieving beta node id and pop") + betaInfoClient := info.NewClient(betaNode.URI) + betaNodeID, betaPOP, err := betaInfoClient.GetNodeID(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("generating reward keys") + + alphaValidationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + alphaDelegationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + betaValidationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + betaDelegationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + gammaDelegationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + deltaDelegationRewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + rewardKeys := []*secp256k1.PrivateKey{ + alphaValidationRewardKey, + alphaDelegationRewardKey, + betaValidationRewardKey, + betaDelegationRewardKey, + gammaDelegationRewardKey, + deltaDelegationRewardKey, + } + + ginkgo.By("creating keychain and P-Chain wallet") + keychain := secp256k1fx.NewKeychain(rewardKeys...) + fundedKey := e2e.Env.AllocatePreFundedKey() + keychain.Add(fundedKey) + nodeURI := tmpnet.NodeURI{ + NodeID: alphaNodeID, + URI: alphaNode.URI, + } + baseWallet := e2e.NewWallet(keychain, nodeURI) + pWallet := baseWallet.P() + + const ( + delegationPercent = 0.10 // 10% + delegationShare = reward.PercentDenominator * delegationPercent + weight = 2_000 * units.Avax + ) + + pvmClient := platformvm.NewClient(alphaNode.URI) + + ginkgo.By("retrieving supply before inserting validators") + supplyAtValidatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + + alphaValidatorsEndTime := time.Now().Add(targetValidationPeriod) + tests.Outf("alpha node validation period ending at: %v\n", alphaValidatorsEndTime) + + ginkgo.By("adding alpha node as a validator", func() { + _, err := pWallet.IssueAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: alphaNodeID, + End: uint64(alphaValidatorsEndTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + alphaPOP, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{alphaValidationRewardKey.Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{alphaDelegationRewardKey.Address()}, + }, + delegationShare, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + betaValidatorEndTime := time.Now().Add(targetValidationPeriod) + tests.Outf("beta node validation period ending at: %v\n", betaValidatorEndTime) + + ginkgo.By("adding beta node as a validator", func() { + _, err := pWallet.IssueAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: betaNodeID, + End: uint64(betaValidatorEndTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + betaPOP, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{betaValidationRewardKey.Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{betaDelegationRewardKey.Address()}, + }, + delegationShare, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("retrieving supply before inserting delegators") + supplyAtDelegatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + + gammaDelegatorEndTime := time.Now().Add(targetDelegationPeriod) + tests.Outf("gamma delegation period ending at: %v\n", gammaDelegatorEndTime) + + ginkgo.By("adding gamma as delegator to the alpha node", func() { + _, err := pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: alphaNodeID, + End: uint64(gammaDelegatorEndTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{gammaDelegationRewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + deltaDelegatorEndTime := time.Now().Add(targetDelegationPeriod) + tests.Outf("delta delegation period ending at: %v\n", deltaDelegatorEndTime) + + ginkgo.By("adding delta as delegator to the beta node", func() { + _, err := pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: betaNodeID, + End: uint64(deltaDelegatorEndTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + pWallet.AVAXAssetID(), + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{deltaDelegationRewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("stopping beta node to prevent it and its delegator from receiving a validation reward") + require.NoError(betaNode.Stop(e2e.DefaultContext())) + + ginkgo.By("retrieving staking periods from the chain") + data, err := pvmClient.GetCurrentValidators(e2e.DefaultContext(), constants.PlatformChainID, []ids.NodeID{alphaNodeID}) + require.NoError(err) + require.Len(data, 1) + actualAlphaValidationPeriod := time.Duration(data[0].EndTime-data[0].StartTime) * time.Second + delegatorData := data[0].Delegators[0] + actualGammaDelegationPeriod := time.Duration(delegatorData.EndTime-delegatorData.StartTime) * time.Second + + ginkgo.By("waiting until all validation periods are over") + // The beta validator was the last added and so has the latest end time. The + // delegation periods are shorter than the validation periods. + time.Sleep(time.Until(betaValidatorEndTime)) + + ginkgo.By("waiting until the alpha and beta nodes are no longer validators") + e2e.Eventually(func() bool { + validators, err := pvmClient.GetCurrentValidators(e2e.DefaultContext(), constants.PrimaryNetworkID, nil) + require.NoError(err) + for _, validator := range validators { + if validator.NodeID == alphaNodeID || validator.NodeID == betaNodeID { + return false + } + } + return true + }, e2e.DefaultTimeout, e2e.DefaultPollingInterval, "nodes failed to stop validating before timeout ") + + ginkgo.By("retrieving reward configuration for the network") + // TODO(marun) Enable GetConfig to return *node.Config + // directly. Currently, due to a circular dependency issue, a + // map-based equivalent is used for which manual unmarshaling + // is required. + adminClient := admin.NewClient(e2e.Env.GetRandomNodeURI().URI) + rawNodeConfigMap, err := adminClient.GetConfig(e2e.DefaultContext()) + require.NoError(err) + nodeConfigMap, ok := rawNodeConfigMap.(map[string]interface{}) + require.True(ok) + stakingConfigMap, ok := nodeConfigMap["stakingConfig"].(map[string]interface{}) + require.True(ok) + rawRewardConfig := stakingConfigMap["rewardConfig"] + rewardConfig := reward.Config{} + require.NoError(mapstructure.Decode(rawRewardConfig, &rewardConfig)) + + ginkgo.By("retrieving reward address balances") + rewardBalances := make(map[ids.ShortID]uint64, len(rewardKeys)) + for _, rewardKey := range rewardKeys { + keychain := secp256k1fx.NewKeychain(rewardKey) + baseWallet := e2e.NewWallet(keychain, nodeURI) + pWallet := baseWallet.P() + balances, err := pWallet.Builder().GetBalance() + require.NoError(err) + rewardBalances[rewardKey.Address()] = balances[pWallet.AVAXAssetID()] + } + require.Len(rewardBalances, len(rewardKeys)) + + ginkgo.By("determining expected validation and delegation rewards") + calculator := reward.NewCalculator(rewardConfig) + expectedValidationReward := calculator.Calculate(actualAlphaValidationPeriod, weight, supplyAtValidatorsStart) + potentialDelegationReward := calculator.Calculate(actualGammaDelegationPeriod, weight, supplyAtDelegatorsStart) + expectedDelegationFee, expectedDelegatorReward := reward.Split(potentialDelegationReward, delegationShare) + + ginkgo.By("checking expected rewards against actual rewards") + expectedRewardBalances := map[ids.ShortID]uint64{ + alphaValidationRewardKey.Address(): expectedValidationReward, + alphaDelegationRewardKey.Address(): expectedDelegationFee, + betaValidationRewardKey.Address(): 0, // Validator didn't meet uptime requirement + betaDelegationRewardKey.Address(): 0, // Validator didn't meet uptime requirement + gammaDelegationRewardKey.Address(): expectedDelegatorReward, + deltaDelegationRewardKey.Address(): 0, // Validator didn't meet uptime requirement + } + for address := range expectedRewardBalances { + require.Equal(expectedRewardBalances[address], rewardBalances[address]) + } + + ginkgo.By("stopping alpha to free up resources for a bootstrap check") + require.NoError(alphaNode.Stop(e2e.DefaultContext())) + + e2e.CheckBootstrapIsPossible(network) + }) +}) diff --git a/avalanchego/tests/e2e/p/validator_sets.go b/avalanchego/tests/e2e/p/validator_sets.go new file mode 100644 index 00000000..36072e32 --- /dev/null +++ b/avalanchego/tests/e2e/p/validator_sets.go @@ -0,0 +1,113 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "fmt" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +var _ = e2e.DescribePChain("[Validator Sets]", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should be identical for every height for all nodes in the network", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("creating wallet with a funded key to source delegated funds from") + keychain := e2e.Env.NewKeychain(1) + nodeURI := e2e.Env.GetRandomNodeURI() + baseWallet := e2e.NewWallet(keychain, nodeURI) + pWallet := baseWallet.P() + + const delegatorCount = 15 + ginkgo.By(fmt.Sprintf("adding %d delegators", delegatorCount), func() { + rewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + avaxAssetID := pWallet.AVAXAssetID() + startTime := time.Now().Add(tmpnet.DefaultValidatorStartTimeDiff) + endTime := startTime.Add(time.Second * 360) + // This is the default flag value for MinDelegatorStake. + weight := genesis.LocalParams.StakingConfig.MinDelegatorStake + + for i := 0; i < delegatorCount; i++ { + _, err = pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeURI.NodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + avaxAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + } + }) + + ginkgo.By("getting the current P-Chain height from the wallet") + currentPChainHeight, err := platformvm.NewClient(nodeURI.URI).GetHeight(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("checking that validator sets are equal across all heights for all nodes", func() { + pvmClients := make([]platformvm.Client, len(e2e.Env.URIs)) + for i, nodeURI := range e2e.Env.URIs { + pvmClients[i] = platformvm.NewClient(nodeURI.URI) + // Ensure that the height of the target node is at least the expected height + e2e.Eventually( + func() bool { + pChainHeight, err := pvmClients[i].GetHeight(e2e.DefaultContext()) + require.NoError(err) + return pChainHeight >= currentPChainHeight + }, + e2e.DefaultTimeout, + e2e.DefaultPollingInterval, + fmt.Sprintf("failed to see expected height %d for %s before timeout", currentPChainHeight, nodeURI.NodeID), + ) + } + + for height := uint64(0); height <= currentPChainHeight; height++ { + tests.Outf(" checked validator sets for height %d\n", height) + var observedValidatorSet map[ids.NodeID]*validators.GetValidatorOutput + for _, pvmClient := range pvmClients { + validatorSet, err := pvmClient.GetValidatorsAt( + e2e.DefaultContext(), + constants.PrimaryNetworkID, + height, + ) + require.NoError(err) + if observedValidatorSet == nil { + observedValidatorSet = validatorSet + continue + } + require.Equal(observedValidatorSet, validatorSet) + } + } + }) + + e2e.CheckBootstrapIsPossible(network) + }) +}) diff --git a/avalanchego/tests/e2e/p/workflow.go b/avalanchego/tests/e2e/p/workflow.go index 957124a4..1819df44 100644 --- a/avalanchego/tests/e2e/p/workflow.go +++ b/avalanchego/tests/e2e/p/workflow.go @@ -1,32 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( - "context" - "errors" "time" - ginkgo "github.com/onsi/ginkgo/v2" - - "github.com/onsi/gomega" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/avalanchego/wallet/subnet/primary" - "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" ) // PChainWorkflow is an integration test for normal P-Chain operations @@ -36,72 +32,57 @@ import ( // - Checks the expected value of the funding address var _ = e2e.DescribePChain("[Workflow]", func() { + require := require.New(ginkgo.GinkgoT()) + ginkgo.It("P-chain main operations", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "xp", - "workflow", - ), - ginkgo.FlakeAttempts(2), func() { - rpcEps := e2e.Env.GetURIs() - gomega.Expect(rpcEps).ShouldNot(gomega.BeEmpty()) - nodeURI := rpcEps[0] - - tests.Outf("{{blue}} setting up keys {{/}}\n") - _, testKeyAddrs, keyChain := e2e.Env.GetTestKeys() - - tests.Outf("{{blue}} setting up wallet {{/}}\n") - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - baseWallet, err := primary.NewWalletFromURI(ctx, nodeURI, keyChain) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + nodeURI := e2e.Env.GetRandomNodeURI() + keychain := e2e.Env.NewKeychain(2) + baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() avaxAssetID := baseWallet.P().AVAXAssetID() xWallet := baseWallet.X() - pChainClient := platformvm.NewClient(nodeURI) - xChainClient := avm.NewClient(nodeURI, xWallet.BlockchainID().String()) + pChainClient := platformvm.NewClient(nodeURI.URI) tests.Outf("{{blue}} fetching minimal stake amounts {{/}}\n") - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - minValStake, minDelStake, err := pChainClient.GetMinStake(ctx, constants.PlatformChainID) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + minValStake, minDelStake, err := pChainClient.GetMinStake(e2e.DefaultContext(), constants.PlatformChainID) + require.NoError(err) tests.Outf("{{green}} minimal validator stake: %d {{/}}\n", minValStake) tests.Outf("{{green}} minimal delegator stake: %d {{/}}\n", minDelStake) tests.Outf("{{blue}} fetching tx fee {{/}}\n") - infoClient := info.NewClient(nodeURI) - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - fees, err := infoClient.GetTxFee(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + infoClient := info.NewClient(nodeURI.URI) + fees, err := infoClient.GetTxFee(e2e.DefaultContext()) + require.NoError(err) txFees := uint64(fees.TxFee) tests.Outf("{{green}} txFee: %d {{/}}\n", txFees) // amount to transfer from P to X chain toTransfer := 1 * units.Avax - pShortAddr := testKeyAddrs[0] - xTargetAddr := testKeyAddrs[1] + pShortAddr := keychain.Keys[0].Address() + xTargetAddr := keychain.Keys[1].Address() ginkgo.By("check selected keys have sufficient funds", func() { pBalances, err := pWallet.Builder().GetBalance() pBalance := pBalances[avaxAssetID] minBalance := minValStake + txFees + minDelStake + txFees + toTransfer + txFees - gomega.Expect(pBalance, err).To(gomega.BeNumerically(">=", minBalance)) + require.NoError(err) + require.GreaterOrEqual(pBalance, minBalance) }) - // create validator data - validatorStartTimeDiff := 30 * time.Second - vdrStartTime := time.Now().Add(validatorStartTimeDiff) - - vdr := &txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: uint64(vdrStartTime.Unix()), - End: uint64(vdrStartTime.Add(72 * time.Hour).Unix()), - Wght: minValStake, + + // Use a random node ID to ensure that repeated test runs + // will succeed against a network that persists across runs. + validatorID, err := ids.ToNodeID(utils.RandomBytes(ids.NodeIDLen)) + require.NoError(err) + + vdr := &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: validatorID, + End: uint64(time.Now().Add(72 * time.Hour).Unix()), + Wght: minValStake, + }, + Subnet: constants.PrimaryNetworkID, } rewardOwner := &secp256k1fx.OutputOwners{ Threshold: 1, @@ -109,47 +90,41 @@ var _ = e2e.DescribePChain("[Workflow]", func() { } shares := uint32(20000) // TODO: retrieve programmatically + sk, err := bls.NewSecretKey() + require.NoError(err) + pop := signer.NewProofOfPossession(sk) + ginkgo.By("issue add validator tx", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - addValidatorTxID, err := pWallet.IssueAddValidatorTx( + _, err := pWallet.IssueAddPermissionlessValidatorTx( vdr, + pop, + avaxAssetID, + rewardOwner, rewardOwner, shares, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, addValidatorTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) ginkgo.By("issue add delegator tx", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - addDelegatorTxID, err := pWallet.IssueAddDelegatorTx( + _, err := pWallet.IssueAddPermissionlessDelegatorTx( vdr, + avaxAssetID, rewardOwner, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, addDelegatorTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) // retrieve initial balances pBalances, err := pWallet.Builder().GetBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) pStartBalance := pBalances[avaxAssetID] tests.Outf("{{blue}} P-chain balance before P->X export: %d {{/}}\n", pStartBalance) xBalances, err := xWallet.Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) xStartBalance := xBalances[avaxAssetID] tests.Outf("{{blue}} X-chain balance before P->X export: %d {{/}}\n", xStartBalance) @@ -165,8 +140,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { } ginkgo.By("export avax from P to X chain", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - exportTxID, err := pWallet.IssueExportTx( + _, err := pWallet.IssueExportTx( xWallet.BlockchainID(), []*avax.TransferableOutput{ { @@ -176,59 +150,46 @@ var _ = e2e.DescribePChain("[Workflow]", func() { Out: output, }, }, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := pChainClient.GetTxStatus(ctx, exportTxID) - cancel() - gomega.Expect(txStatus.Status, err).To(gomega.Equal(status.Committed)) + require.NoError(err) }) // check balances post export pBalances, err = pWallet.Builder().GetBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) pPreImportBalance := pBalances[avaxAssetID] tests.Outf("{{blue}} P-chain balance after P->X export: %d {{/}}\n", pPreImportBalance) xBalances, err = xWallet.Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) xPreImportBalance := xBalances[avaxAssetID] tests.Outf("{{blue}} X-chain balance after P->X export: %d {{/}}\n", xPreImportBalance) - gomega.Expect(xPreImportBalance).To(gomega.Equal(xStartBalance)) // import not performed yet - gomega.Expect(pPreImportBalance).To(gomega.Equal(pStartBalance - toTransfer - txFees)) + require.Equal(xPreImportBalance, xStartBalance) // import not performed yet + require.Equal(pPreImportBalance, pStartBalance-toTransfer-txFees) ginkgo.By("import avax from P into X chain", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - importTxID, err := xWallet.IssueImportTx( + _, err := xWallet.IssueImportTx( constants.PlatformChainID, &outputOwner, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil(), "is context.DeadlineExceeded: %v", errors.Is(err, context.DeadlineExceeded)) - - ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txStatus, err := xChainClient.GetTxStatus(ctx, importTxID) - cancel() - gomega.Expect(txStatus, err).To(gomega.Equal(choices.Accepted)) + require.NoError(err) }) // check balances post import pBalances, err = pWallet.Builder().GetBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) pFinalBalance := pBalances[avaxAssetID] tests.Outf("{{blue}} P-chain balance after P->X import: %d {{/}}\n", pFinalBalance) xBalances, err = xWallet.Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) xFinalBalance := xBalances[avaxAssetID] tests.Outf("{{blue}} X-chain balance after P->X import: %d {{/}}\n", xFinalBalance) - gomega.Expect(xFinalBalance).To(gomega.Equal(xPreImportBalance + toTransfer - txFees)) // import not performed yet - gomega.Expect(pFinalBalance).To(gomega.Equal(pPreImportBalance)) + require.Equal(xFinalBalance, xPreImportBalance+toTransfer-txFees) // import not performed yet + require.Equal(pFinalBalance, pPreImportBalance) }) }) diff --git a/avalanchego/tests/e2e/ping/suites.go b/avalanchego/tests/e2e/ping/suites.go deleted file mode 100644 index 74d51d2a..00000000 --- a/avalanchego/tests/e2e/ping/suites.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Implements ping tests, requires network-runner cluster. -package ping - -import ( - "context" - "time" - - "github.com/ava-labs/avalanchego/tests/e2e" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("[Ping]", func() { - ginkgo.It("can ping network-runner RPC server", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "ping", - ), - func() { - if e2e.Env.GetRunnerGRPCEndpoint() == "" { - ginkgo.Skip("no local network-runner, skipping") - } - - runnerCli := e2e.Env.GetRunnerClient() - gomega.Expect(runnerCli).ShouldNot(gomega.BeNil()) - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - _, err := runnerCli.Ping(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - }) -}) diff --git a/avalanchego/tests/e2e/static-handlers/suites.go b/avalanchego/tests/e2e/static-handlers/suites.go deleted file mode 100644 index 18b6c367..00000000 --- a/avalanchego/tests/e2e/static-handlers/suites.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Implements static handlers tests for avm and platformvm -package statichandlers - -import ( - "context" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/platformvm/api" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("[StaticHandlers]", func() { - ginkgo.It("can make calls to avm static api", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "static-handlers", - ), - func() { - addrMap := map[string]string{} - for _, addrStr := range []string{ - "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", - "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", - "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", - "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", - } { - addr, err := ids.ShortFromString(addrStr) - gomega.Expect(err).Should(gomega.BeNil()) - addrMap[addrStr], err = address.FormatBech32(constants.NetworkIDToHRP[constants.LocalID], addr[:]) - gomega.Expect(err).Should(gomega.BeNil()) - } - avmArgs := avm.BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]avm.AssetDefinition{ - "asset1": { - Name: "myFixedCapAsset", - Symbol: "MFCA", - Denomination: 8, - InitialState: map[string][]interface{}{ - "fixedCap": { - avm.Holder{ - Amount: 100000, - Address: addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - avm.Holder{ - Amount: 100000, - Address: addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - avm.Holder{ - Amount: json.Uint64(50000), - Address: addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - }, - avm.Holder{ - Amount: json.Uint64(50000), - Address: addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - "asset2": { - Name: "myVarCapAsset", - Symbol: "MVCA", - InitialState: map[string][]interface{}{ - "variableCap": { - avm.Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - }, - avm.Owners{ - Threshold: 2, - Minters: []string{ - addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - }, - "asset3": { - Name: "myOtherVarCapAsset", - InitialState: map[string][]interface{}{ - "variableCap": { - avm.Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - }, - }, - }, - }, - }, - } - uris := e2e.Env.GetURIs() - gomega.Expect(uris).ShouldNot(gomega.BeEmpty()) - staticClient := avm.NewStaticClient(uris[0]) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - resp, err := staticClient.BuildGenesis(ctx, &avmArgs) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - gomega.Expect(resp.Bytes).Should(gomega.Equal("0x0000000000030006617373657431000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f6d794669786564436170417373657400044d4643410800000001000000000000000400000007000000000000c350000000000000000000000001000000013f78e510df62bc48b0829ec06d6a6b98062d695300000007000000000000c35000000000000000000000000100000001c54903de5177a16f7811771ef2f4659d9e8646710000000700000000000186a0000000000000000000000001000000013f58fda2e9ea8d9e4b181832a07b26dae286f2cb0000000700000000000186a000000000000000000000000100000001645938bb7ae2193270e6ffef009e3664d11e07c10006617373657432000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d6d79566172436170417373657400044d5643410000000001000000000000000200000006000000000000000000000001000000023f58fda2e9ea8d9e4b181832a07b26dae286f2cb645938bb7ae2193270e6ffef009e3664d11e07c100000006000000000000000000000001000000023f78e510df62bc48b0829ec06d6a6b98062d6953c54903de5177a16f7811771ef2f4659d9e864671000661737365743300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000126d794f7468657256617243617041737365740000000000000100000000000000010000000600000000000000000000000100000001645938bb7ae2193270e6ffef009e3664d11e07c1279fa028")) - }) - - ginkgo.It("can make calls to platformvm static api", func() { - keys := []*secp256k1.PrivateKey{} - factory := secp256k1.Factory{} - for _, key := range []string{ - "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", - "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", - "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", - "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", - } { - privKeyBytes, err := cb58.Decode(key) - gomega.Expect(err).Should(gomega.BeNil()) - pk, err := factory.ToPrivateKey(privKeyBytes) - gomega.Expect(err).Should(gomega.BeNil()) - keys = append(keys, pk) - } - - genesisUTXOs := make([]api.UTXO, len(keys)) - hrp := constants.NetworkIDToHRP[constants.UnitTestID] - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - gomega.Expect(err).Should(gomega.BeNil()) - genesisUTXOs[i] = api.UTXO{ - Amount: json.Uint64(50000 * units.MilliAvax), - Address: addr, - } - } - - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - gomega.Expect(err).Should(gomega.BeNil()) - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ - StartTime: json.Uint64(time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC).Unix()), - EndTime: json.Uint64(time.Date(1997, 1, 30, 0, 0, 0, 0, time.UTC).Unix()), - NodeID: ids.NodeID(id), - }, - RewardOwner: &api.Owner{ - Threshold: 1, - Addresses: []string{addr}, - }, - Staked: []api.UTXO{{ - Amount: json.Uint64(10000), - Address: addr, - }}, - DelegationFee: reward.PercentDenominator, - } - } - - buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(constants.UnitTestID), - AvaxAssetID: ids.ID{'a', 'v', 'a', 'x'}, - UTXOs: genesisUTXOs, - Validators: genesisValidators, - Chains: nil, - Time: json.Uint64(time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC).Unix()), - InitialSupply: json.Uint64(360 * units.MegaAvax), - Encoding: formatting.Hex, - } - - uris := e2e.Env.GetURIs() - gomega.Expect(uris).ShouldNot(gomega.BeEmpty()) - - staticClient := api.NewStaticClient(uris[0]) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - resp, err := staticClient.BuildGenesis(ctx, &buildGenesisArgs) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - gomega.Expect(resp.Bytes).Should(gomega.Equal("0x0000000000050000000000000000000000000000000000000000000000000000000000000000000000006176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b740000000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa293077626000000000000000000000000000000000000000000000000000000000000000000000000000000016176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b7400000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e000000000000000000000000000000000000000000000000000000000000000000000000000000026176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b740000000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee31000000000000000000000000000000000000000000000000000000000000000000000000000000036176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b7400000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c000000000000000000000000000000000000000000000000000000000000000000000000000000046176617800000000000000000000000000000000000000000000000000000000000000070000000ba43b74000000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b00000000000000050000000c0000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000fceda8f90fcb5d30614b99d79fc4baa2930776260000000032c9a9000000000032efe480000000000000271000000001617661780000000000000000000000000000000000000000000000000000000000000007000000000000271000000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa2930776260000000b00000000000000000000000100000001fceda8f90fcb5d30614b99d79fc4baa29307762600000000000000000000000c0000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ead693c17abb1be422bb50b30b9711ff98d667e0000000032c9a9000000000032efe4800000000000002710000000016176617800000000000000000000000000000000000000000000000000000000000000070000000000002710000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e0000000b000000000000000000000001000000016ead693c17abb1be422bb50b30b9711ff98d667e00000000000000000000000c0000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f2420846876e69f473dda256172967e992f0ee310000000032c9a9000000000032efe480000000000000271000000001617661780000000000000000000000000000000000000000000000000000000000000007000000000000271000000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee310000000b00000000000000000000000100000001f2420846876e69f473dda256172967e992f0ee3100000000000000000000000c0000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000003cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000032c9a9000000000032efe4800000000000002710000000016176617800000000000000000000000000000000000000000000000000000000000000070000000000002710000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000b000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000000000000000000000c0000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000087c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000032c9a9000000000032efe48000000000000027100000000161766178000000000000000000000000000000000000000000000000000000000000000700000000000027100000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000b0000000000000000000000010000000187c4ec0736fdad03fd9ec8c3ba609de958601a7b0000000000000000000000000000000032c9a90004fefa17b724000000008e96cbef")) - }) -}) diff --git a/avalanchego/tests/e2e/x/interchain_workflow.go b/avalanchego/tests/e2e/x/interchain_workflow.go new file mode 100644 index 00000000..ce13cf8a --- /dev/null +++ b/avalanchego/tests/e2e/x/interchain_workflow.go @@ -0,0 +1,152 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package x + +import ( + "math/big" + + "github.com/ava-labs/coreth/plugin/evm" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainLabel), func() { + require := require.New(ginkgo.GinkgoT()) + + const transferAmount = 10 * units.Avax + + ginkgo.It("should ensure that funds can be transferred from the X-Chain to the C-Chain and the P-Chain", func() { + nodeURI := e2e.Env.GetRandomNodeURI() + + ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") + recipientKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + keychain := e2e.Env.NewKeychain(1) + keychain.Add(recipientKey) + baseWallet := e2e.NewWallet(keychain, nodeURI) + xWallet := baseWallet.X() + cWallet := baseWallet.C() + pWallet := baseWallet.P() + + ginkgo.By("defining common configuration") + recipientEthAddress := evm.GetEthAddress(recipientKey) + avaxAssetID := xWallet.AVAXAssetID() + // Use the same owner for sending to X-Chain and importing funds to P-Chain + recipientOwner := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + recipientKey.Address(), + }, + } + // Use the same outputs for both C-Chain and P-Chain exports + exportOutputs := []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: transferAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keychain.Keys[0].Address(), + }, + }, + }, + }, + } + + ginkgo.By("sending funds from one address to another on the X-Chain", func() { + _, err = xWallet.IssueBaseTx( + []*avax.TransferableOutput{{ + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: transferAmount, + OutputOwners: recipientOwner, + }, + }}, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the X-Chain recipient address has received the sent funds", func() { + balances, err := xWallet.Builder().GetFTBalance(common.WithCustomAddresses(set.Of( + recipientKey.Address(), + ))) + require.NoError(err) + require.Positive(balances[avaxAssetID]) + }) + + ginkgo.By("exporting AVAX from the X-Chain to the C-Chain", func() { + _, err := xWallet.IssueExportTx( + cWallet.BlockchainID(), + exportOutputs, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("initializing a new eth client") + ethClient := e2e.NewEthClient(nodeURI) + + ginkgo.By("importing AVAX from the X-Chain to the C-Chain", func() { + _, err := cWallet.IssueImportTx( + xWallet.BlockchainID(), + recipientEthAddress, + e2e.WithDefaultContext(), + e2e.WithSuggestedGasPrice(ethClient), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the C-Chain") + e2e.Eventually(func() bool { + balance, err := ethClient.BalanceAt(e2e.DefaultContext(), recipientEthAddress, nil) + require.NoError(err) + return balance.Cmp(big.NewInt(0)) > 0 + }, e2e.DefaultTimeout, e2e.DefaultPollingInterval, "failed to see recipient address funded before timeout") + + ginkgo.By("exporting AVAX from the X-Chain to the P-Chain", func() { + _, err := xWallet.IssueExportTx( + constants.PlatformChainID, + exportOutputs, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("importing AVAX from the X-Chain to the P-Chain", func() { + _, err := pWallet.IssueImportTx( + xWallet.BlockchainID(), + &recipientOwner, + e2e.WithDefaultContext(), + ) + require.NoError(err) + }) + + ginkgo.By("checking that the recipient address has received imported funds on the P-Chain", func() { + balances, err := pWallet.Builder().GetBalance(common.WithCustomAddresses(set.Of( + recipientKey.Address(), + ))) + require.NoError(err) + require.Positive(balances[avaxAssetID]) + }) + + e2e.CheckBootstrapIsPossible(e2e.Env.GetNetwork()) + }) +}) diff --git a/avalanchego/tests/e2e/x/transfer/virtuous.go b/avalanchego/tests/e2e/x/transfer/virtuous.go index f162276a..4736ba93 100644 --- a/avalanchego/tests/e2e/x/transfer/virtuous.go +++ b/avalanchego/tests/e2e/x/transfer/virtuous.go @@ -1,19 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements X-chain transfer tests. package transfer import ( - "context" "fmt" "math/rand" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,41 +23,57 @@ import ( "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" ) const ( - metricVtxProcessing = "avalanche_X_avalanche_vtx_processing" - metricVtxAccepted = "avalanche_X_avalanche_vtx_accepted_count" - metricVtxRejected = "avalanche_X_avalanche_vtx_rejected_count" + totalRounds = 50 + + metricBlksProcessing = "avalanche_X_blks_processing" + metricBlksAccepted = "avalanche_X_blks_accepted_count" ) -const totalRounds = 50 +// This test requires that the network not have ongoing blocks and +// cannot reliably be run in parallel. +var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { + require := require.New(ginkgo.GinkgoT()) -var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { ginkgo.It("can issue a virtuous transfer tx for AVAX asset", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "x", - "virtuous-transfer-tx-avax", - ), func() { - rpcEps := e2e.Env.GetURIs() - gomega.Expect(rpcEps).ShouldNot(gomega.BeEmpty()) + rpcEps := make([]string, len(e2e.Env.URIs)) + for i, nodeURI := range e2e.Env.URIs { + rpcEps[i] = nodeURI.URI + } + + // Waiting for ongoing blocks to have completed before starting this + // test avoids the case of a previous test having initiated block + // processing but not having completed it. + e2e.Eventually(func() bool { + allNodeMetrics, err := tests.GetNodesMetrics(rpcEps, metricBlksProcessing) + require.NoError(err) + for _, metrics := range allNodeMetrics { + if metrics[metricBlksProcessing] > 0 { + return false + } + } + return true + }, + e2e.DefaultTimeout, + e2e.DefaultPollingInterval, + "The cluster is generating ongoing blocks. Is this test being run in parallel?", + ) allMetrics := []string{ - metricVtxProcessing, - metricVtxAccepted, - metricVtxRejected, + metricBlksProcessing, + metricBlksAccepted, } + // Ensure the same set of 10 keys is used for all tests + // by retrieving them outside of runFunc. + testKeys := e2e.Env.AllocatePreFundedKeys(10) + runFunc := func(round int) { tests.Outf("{{green}}\n\n\n\n\n\n---\n[ROUND #%02d]:{{/}}\n", round) - testKeys, _, _ := e2e.Env.GetTestKeys() - needPermute := round > 3 if needPermute { rand.Seed(time.Now().UnixNano()) @@ -64,19 +81,9 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { testKeys[i], testKeys[j] = testKeys[j], testKeys[i] }) } - keyChain := secp256k1fx.NewKeychain(testKeys...) - - var baseWallet primary.Wallet - var err error - ginkgo.By("setting up a base wallet", func() { - walletURI := rpcEps[0] - - // 5-second is enough to fetch initial UTXOs for test cluster in "primary.NewWallet" - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - baseWallet, err = primary.NewWalletFromURI(ctx, walletURI, keyChain) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - }) + + keychain := secp256k1fx.NewKeychain(testKeys...) + baseWallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) avaxAssetID := baseWallet.X().AVAXAssetID() wallets := make([]primary.Wallet, len(testKeys)) @@ -86,33 +93,22 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { wallets[i] = primary.NewWalletWithOptions( baseWallet, - common.WithCustomAddresses(set.Set[ids.ShortID]{ - testKeys[i].PublicKey().Address(): struct{}{}, - }), + common.WithCustomAddresses(set.Of( + testKeys[i].PublicKey().Address(), + )), ) } - // URI -> "metric name" -> "metric value" - metricsBeforeTx := make(map[string]map[string]float64) - for _, u := range rpcEps { - ep := u + "/ext/metrics" - - mm, err := tests.GetMetricsValue(ep, allMetrics...) - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{green}}metrics at %q:{{/}} %v\n", ep, mm) - - if mm[metricVtxProcessing] > 0 { - tests.Outf("{{red}}{{bold}}%q already has processing vtx!!!{{/}}\n", u) - ginkgo.Skip("the cluster has already ongoing vtx txs thus skipping to prevent conflicts...") - } - - metricsBeforeTx[u] = mm + metricsBeforeTx, err := tests.GetNodesMetrics(rpcEps, allMetrics...) + require.NoError(err) + for _, uri := range rpcEps { + tests.Outf("{{green}}metrics at %q:{{/}} %v\n", uri, metricsBeforeTx[uri]) } testBalances := make([]uint64, 0) for i, w := range wallets { balances, err := w.X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) bal := balances[avaxAssetID] testBalances = append(testBalances, bal) @@ -130,9 +126,7 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { break } } - if fromIdx < 0 { - gomega.Expect(fromIdx).Should(gomega.BeNumerically(">", 0), "no address found with non-zero balance") - } + require.GreaterOrEqual(fromIdx, 0, "no address found with non-zero balance") toIdx := -1 for i := range testBalances { @@ -156,8 +150,7 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { receiverNewBal := receiverOrigBal + amountToTransfer ginkgo.By("X-Chain transfer with wrong amount must fail", func() { - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - _, err = wallets[fromIdx].X().IssueBaseTx( + _, err := wallets[fromIdx].X().IssueBaseTx( []*avax.TransferableOutput{{ Asset: avax.Asset{ ID: avaxAssetID, @@ -170,10 +163,9 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { }, }, }}, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err.Error()).Should(gomega.ContainSubstring("insufficient funds")) + require.Contains(err.Error(), "insufficient funds") }) fmt.Printf(`=== @@ -199,8 +191,7 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX receiverNewBal, ) - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - txID, err := wallets[fromIdx].X().IssueBaseTx( + tx, err := wallets[fromIdx].X().IssueBaseTx( []*avax.TransferableOutput{{ Asset: avax.Asset{ ID: avaxAssetID, @@ -213,55 +204,48 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX }, }, }}, - common.WithContext(ctx), + e2e.WithDefaultContext(), ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) balances, err := wallets[fromIdx].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) senderCurBalX := balances[avaxAssetID] tests.Outf("{{green}}first wallet balance:{{/}} %d\n", senderCurBalX) balances, err = wallets[toIdx].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) + require.NoError(err) receiverCurBalX := balances[avaxAssetID] tests.Outf("{{green}}second wallet balance:{{/}} %d\n", receiverCurBalX) - gomega.Expect(senderCurBalX).Should(gomega.Equal(senderNewBal)) - gomega.Expect(receiverCurBalX).Should(gomega.Equal(receiverNewBal)) + require.Equal(senderCurBalX, senderNewBal) + require.Equal(receiverCurBalX, receiverNewBal) + txID := tx.ID() for _, u := range rpcEps { xc := avm.NewClient(u, "X") - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - status, err := xc.ConfirmTx(ctx, txID, 2*time.Second) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - gomega.Expect(status).Should(gomega.Equal(choices.Accepted)) + status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) + require.NoError(err) + require.Equal(choices.Accepted, status) } for _, u := range rpcEps { xc := avm.NewClient(u, "X") - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - status, err := xc.ConfirmTx(ctx, txID, 2*time.Second) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - gomega.Expect(status).Should(gomega.Equal(choices.Accepted)) + status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) + require.NoError(err) + require.Equal(choices.Accepted, status) - ep := u + "/ext/metrics" - mm, err := tests.GetMetricsValue(ep, allMetrics...) - gomega.Expect(err).Should(gomega.BeNil()) + mm, err := tests.GetNodeMetrics(u, allMetrics...) + require.NoError(err) prev := metricsBeforeTx[u] - // +0 since X-chain tx must have been processed and accepted by now - gomega.Expect(mm[metricVtxProcessing]).Should(gomega.Equal(prev[metricVtxProcessing])) + // +0 since X-chain tx must have been processed and accepted + // by now + require.Equal(mm[metricBlksProcessing], prev[metricBlksProcessing]) // +1 since X-chain tx must have been accepted by now - gomega.Expect(mm[metricVtxAccepted]).Should(gomega.Equal(prev[metricVtxAccepted] + 1)) - - // +0 since virtuous X-chain tx must not be rejected - gomega.Expect(mm[metricVtxRejected]).Should(gomega.Equal(prev[metricVtxRejected])) + require.Equal(mm[metricBlksAccepted], prev[metricBlksAccepted]+1) metricsBeforeTx[u] = mm } diff --git a/avalanchego/tests/e2e/x/whitelist-vtx/suites.go b/avalanchego/tests/e2e/x/whitelist-vtx/suites.go deleted file mode 100644 index 75015762..00000000 --- a/avalanchego/tests/e2e/x/whitelist-vtx/suites.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Implements X-Chain whitelist vtx (stop vertex) tests. -package whitelistvtx - -import ( - "context" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/e2e" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "github.com/ava-labs/avalanchego/wallet/subnet/primary" - "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -const ( - metricVtxIssueSuccess = "avalanche_X_avalanche_whitelist_vtx_issue_success" - metricVtxIssueFailure = "avalanche_X_avalanche_whitelist_vtx_issue_failure" - metricTxProcessing = "avalanche_X_avalanche_whitelist_tx_processing" - metricTxAccepted = "avalanche_X_avalanche_whitelist_tx_accepted_count" - metricTxRejected = "avalanche_X_avalanche_whitelist_tx_rejected_count" - metricTxPollsAccepted = "avalanche_X_avalanche_whitelist_tx_polls_accepted_count" - metricTxPollsRejected = "avalanche_X_avalanche_whitelist_tx_polls_rejected_count" -) - -var _ = e2e.DescribeXChain("[WhitelistTx]", func() { - ginkgo.It("can issue whitelist vtx", - // use this for filtering tests by labels - // ref. https://onsi.github.io/ginkgo/#spec-labels - ginkgo.Label( - "require-network-runner", - "x", - "whitelist-tx", - ), - func() { - uris := e2e.Env.GetURIs() - gomega.Expect(uris).ShouldNot(gomega.BeEmpty()) - - testKeys, testKeyAddrs, keyChain := e2e.Env.GetTestKeys() - var baseWallet primary.Wallet - ginkgo.By("collect whitelist vtx metrics", func() { - walletURI := uris[0] - - // 5-second is enough to fetch initial UTXOs for test cluster in "primary.NewWallet" - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultWalletCreationTimeout) - var err error - baseWallet, err = primary.NewWalletFromURI(ctx, walletURI, keyChain) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - if baseWallet.P().NetworkID() == constants.MainnetID { - ginkgo.Skip("skipping tests (mainnet)") - } - }) - avaxAssetID := baseWallet.X().AVAXAssetID() - wallets := make([]primary.Wallet, len(testKeys)) - for i := range wallets { - wallets[i] = primary.NewWalletWithOptions( - baseWallet, - common.WithCustomAddresses(set.Set[ids.ShortID]{ - testKeys[i].PublicKey().Address(): struct{}{}, - }), - ) - } - - allMetrics := []string{ - metricVtxIssueSuccess, - metricVtxIssueFailure, - metricTxProcessing, - metricTxAccepted, - metricTxRejected, - metricTxPollsAccepted, - metricTxPollsRejected, - } - - // URI -> "metric name" -> "metric value" - curMetrics := make(map[string]map[string]float64) - ginkgo.By("collect whitelist vtx metrics", func() { - for _, u := range uris { - ep := u + "/ext/metrics" - - mm, err := tests.GetMetricsValue(ep, allMetrics...) - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{green}}metrics at %q:{{/}} %v\n", ep, mm) - - if mm[metricTxAccepted] > 0 { - tests.Outf("{{red}}{{bold}}%q already has whitelist vtx!!!{{/}}\n", u) - ginkgo.Skip("the cluster has already accepted whitelist vtx thus skipping") - } - - curMetrics[u] = mm - } - }) - - ginkgo.By("issue regular, virtuous X-Chain tx, before whitelist vtx, should succeed", func() { - balances, err := wallets[0].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) - key1PrevBalX := balances[avaxAssetID] - tests.Outf("{{green}}first wallet balance:{{/}} %d\n", key1PrevBalX) - - balances, err = wallets[1].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) - - key2PrevBalX := balances[avaxAssetID] - tests.Outf("{{green}}second wallet balance:{{/}} %d\n", key2PrevBalX) - - transferAmount := key1PrevBalX / 10 - gomega.Expect(transferAmount).Should(gomega.BeNumerically(">", 0.0), "not enough balance in the test wallet") - tests.Outf("{{green}}amount to transfer:{{/}} %d\n", transferAmount) - - tests.Outf("{{blue}}issuing regular, virtuous transaction at %q{{/}}\n", uris[0]) - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - _, err = wallets[0].X().IssueBaseTx( - []*avax.TransferableOutput{{ - Asset: avax.Asset{ - ID: avaxAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: transferAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeyAddrs[1]}, - }, - }, - }}, - common.WithContext(ctx), - ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - time.Sleep(3 * time.Second) - - balances, err = wallets[0].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) - key1CurBalX := balances[avaxAssetID] - tests.Outf("{{green}}first wallet balance:{{/}} %d\n", key1CurBalX) - - balances, err = wallets[1].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) - key2CurBalX := balances[avaxAssetID] - tests.Outf("{{green}}second wallet balance:{{/}} %d\n", key2CurBalX) - - gomega.Expect(key1CurBalX).Should(gomega.Equal(key1PrevBalX - transferAmount - baseWallet.X().BaseTxFee())) - gomega.Expect(key2CurBalX).Should(gomega.Equal(key2PrevBalX + transferAmount)) - }) - - // issue a whitelist vtx to the first node - // to trigger "Notify(common.StopVertex)", "t.issueStopVtx()", and "handleAsyncMsg" - // this is the very first whitelist vtx issue request - // SO THIS SHOULD SUCCEED WITH NO ERROR - ginkgo.By("issue whitelist vtx to the first node", func() { - tests.Outf("{{blue}}{{bold}}issuing whitelist vtx at URI %q at the very first time{{/}}\n", uris[0]) - client := avm.NewClient(uris[0], "X") - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - err := client.IssueStopVertex(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{blue}}issued whitelist vtx at %q{{/}}\n", uris[0]) - }) - - ginkgo.By("accept the whitelist vtx in all nodes", func() { - tests.Outf("{{blue}}waiting before checking the status of whitelist vtx{{/}}\n") - time.Sleep(5 * time.Second) // should NOT take too long for all nodes to accept whitelist vtx - - for _, u := range uris { - ep := u + "/ext/metrics" - mm, err := tests.GetMetricsValue(ep, allMetrics...) - gomega.Expect(err).Should(gomega.BeNil()) - - prev := curMetrics[u] - - // +1 since the local node engine issues a new whitelist vtx - gomega.Expect(mm[metricVtxIssueSuccess]).Should(gomega.Equal(prev[metricVtxIssueSuccess] + 1)) - - // +0 since no node ever failed to issue a whitelist vtx - gomega.Expect(mm[metricVtxIssueFailure]).Should(gomega.Equal(prev[metricVtxIssueFailure])) - - // +0 since the local node snowstorm successfully issued the whitelist tx or received from the first node, and accepted - gomega.Expect(mm[metricTxProcessing]).Should(gomega.Equal(prev[metricTxProcessing])) - - // +1 since the local node snowstorm successfully accepted the whitelist tx or received from the first node - gomega.Expect(mm[metricTxAccepted]).Should(gomega.Equal(prev[metricTxAccepted] + 1)) - gomega.Expect(mm[metricTxPollsAccepted]).Should(gomega.Equal(prev[metricTxPollsAccepted] + 1)) - - // +0 since no node ever rejected a whitelist tx - gomega.Expect(mm[metricTxRejected]).Should(gomega.Equal(prev[metricTxRejected])) - gomega.Expect(mm[metricTxPollsRejected]).Should(gomega.Equal(prev[metricTxPollsRejected])) - - curMetrics[u] = mm - } - }) - - // to trigger "Notify(common.StopVertex)" and "t.issueStopVtx()", or "Put" - // this is the second, conflicting whitelist vtx issue request - // SO THIS MUST FAIL WITH ERROR IN ALL NODES - ginkgo.By("whitelist vtx can't be issued twice in all nodes", func() { - for _, u := range uris { - tests.Outf("{{red}}issuing second whitelist vtx to URI %q{{/}}\n", u) - client := avm.NewClient(u, "X") - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - err := client.IssueStopVertex(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) // issue itself is asynchronous, so the internal error is not exposed! - - // the local node should see updates on the metrics - time.Sleep(3 * time.Second) - - ep := u + "/ext/metrics" - mm, err := tests.GetMetricsValue(ep, allMetrics...) - gomega.Expect(err).Should(gomega.BeNil()) - - prev := curMetrics[u] - - // +0 since no node should ever successfully issue another whitelist vtx - gomega.Expect(mm[metricVtxIssueSuccess]).Should(gomega.Equal(prev[metricVtxIssueSuccess])) - - // +0 since the local node engine should have dropped the conflicting whitelist vtx issue request - gomega.Expect(mm[metricVtxIssueFailure]).Should(gomega.Equal(prev[metricVtxIssueFailure])) - - // +0 since the local node snowstorm successfully issued the whitelist tx "before", and no whitelist tx is being processed - gomega.Expect(mm[metricTxProcessing]).Should(gomega.Equal(prev[metricTxProcessing])) - - // +0 since the local node snowstorm successfully accepted the whitelist tx "before" - gomega.Expect(mm[metricTxAccepted]).Should(gomega.Equal(prev[metricTxAccepted])) - gomega.Expect(mm[metricTxPollsAccepted]).Should(gomega.Equal(prev[metricTxPollsAccepted])) - - // +0 since the local node snowstorm never rejected a whitelist tx - gomega.Expect(mm[metricTxRejected]).Should(gomega.Equal(prev[metricTxRejected])) - gomega.Expect(mm[metricTxPollsRejected]).Should(gomega.Equal(prev[metricTxPollsRejected])) - - curMetrics[u] = mm - } - }) - - ginkgo.By("issue regular, virtuous X-Chain tx, after whitelist vtx, should pass", func() { - balances, err := wallets[0].X().Builder().GetFTBalance() - gomega.Expect(err).Should(gomega.BeNil()) - - avaxAssetID := baseWallet.X().AVAXAssetID() - key1PrevBalX := balances[avaxAssetID] - tests.Outf("{{green}}first wallet balance:{{/}} %d\n", key1PrevBalX) - - transferAmount := key1PrevBalX / 10 - gomega.Expect(transferAmount).Should(gomega.BeNumerically(">", 0.0), "not enough balance in the test wallet") - tests.Outf("{{blue}}issuing regular, virtuous transaction at %q{{/}}\n", uris[0]) - ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) - _, err = wallets[0].X().IssueBaseTx( - []*avax.TransferableOutput{{ - Asset: avax.Asset{ - ID: avaxAssetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: transferAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeyAddrs[1]}, - }, - }, - }}, - common.WithContext(ctx), - ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - }) - }) -}) diff --git a/avalanchego/tests/fixture/e2e/describe.go b/avalanchego/tests/fixture/e2e/describe.go new file mode 100644 index 00000000..28101177 --- /dev/null +++ b/avalanchego/tests/fixture/e2e/describe.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + ginkgo "github.com/onsi/ginkgo/v2" +) + +const ( + // For label usage in ginkgo invocation, see: https://onsi.github.io/ginkgo/#spec-labels + + // Label for filtering a test that is not primarily a C-Chain test + // but nonentheless uses the C-Chain. Intended to support + // execution of all C-Chain tests by the coreth repo in an e2e job. + UsesCChainLabel = "uses-c" +) + +// DescribeXChain annotates the tests for X-Chain. +func DescribeXChain(text string, args ...interface{}) bool { + args = append(args, ginkgo.Label("x")) + return ginkgo.Describe("[X-Chain] "+text, args...) +} + +// DescribeXChainSerial annotates serial tests for X-Chain. +func DescribeXChainSerial(text string, args ...interface{}) bool { + args = append(args, ginkgo.Serial) + return DescribeXChain(text, args...) +} + +// DescribePChain annotates the tests for P-Chain. +func DescribePChain(text string, args ...interface{}) bool { + args = append(args, ginkgo.Label("p")) + return ginkgo.Describe("[P-Chain] "+text, args...) +} + +// DescribeCChain annotates the tests for C-Chain. +func DescribeCChain(text string, args ...interface{}) bool { + args = append(args, ginkgo.Label("c")) + return ginkgo.Describe("[C-Chain] "+text, args...) +} diff --git a/avalanchego/tests/fixture/e2e/env.go b/avalanchego/tests/fixture/e2e/env.go new file mode 100644 index 00000000..a5fe08d4 --- /dev/null +++ b/avalanchego/tests/fixture/e2e/env.go @@ -0,0 +1,184 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "encoding/json" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +// Env is used to access shared test fixture. Intended to be +// initialized from SynchronizedBeforeSuite. +var Env *TestEnvironment + +func InitSharedTestEnvironment(envBytes []byte) { + require := require.New(ginkgo.GinkgoT()) + require.Nil(Env, "env already initialized") + Env = &TestEnvironment{} + require.NoError(json.Unmarshal(envBytes, Env)) + Env.require = require +} + +type TestEnvironment struct { + // The directory where the test network configuration is stored + NetworkDir string + // URIs used to access the API endpoints of nodes of the network + URIs []tmpnet.NodeURI + // The URI used to access the http server that allocates test data + TestDataServerURI string + + require *require.Assertions +} + +func (te *TestEnvironment) Marshal() []byte { + bytes, err := json.Marshal(te) + require.NoError(ginkgo.GinkgoT(), err) + return bytes +} + +// Initialize a new test environment with a shared network (either pre-existing or newly created). +func NewTestEnvironment(flagVars *FlagVars, desiredNetwork *tmpnet.Network) *TestEnvironment { + require := require.New(ginkgo.GinkgoT()) + + networkDir := flagVars.NetworkDir() + + // Load or create a test network + var network *tmpnet.Network + if len(networkDir) > 0 { + var err error + network, err = tmpnet.ReadNetwork(networkDir) + require.NoError(err) + tests.Outf("{{yellow}}Using an existing network configured at %s{{/}}\n", network.Dir) + + // Set the desired subnet configuration to ensure subsequent creation. + for _, subnet := range desiredNetwork.Subnets { + if existing := network.GetSubnet(subnet.Name); existing != nil { + // Already present + continue + } + network.Subnets = append(network.Subnets, subnet) + } + } else { + network = desiredNetwork + StartNetwork(network, DefaultNetworkDir, flagVars.AvalancheGoExecPath(), flagVars.PluginDir()) + } + + // A new network will always need subnet creation and an existing + // network will also need subnets to be created the first time it + // is used. + require.NoError(network.CreateSubnets(DefaultContext(), ginkgo.GinkgoWriter)) + + // Wait for chains to have bootstrapped on all nodes + Eventually(func() bool { + for _, subnet := range network.Subnets { + for _, validatorID := range subnet.ValidatorIDs { + uri, err := network.GetURIForNodeID(validatorID) + require.NoError(err) + infoClient := info.NewClient(uri) + for _, chain := range subnet.Chains { + isBootstrapped, err := infoClient.IsBootstrapped(DefaultContext(), chain.ChainID.String()) + // Ignore errors since a chain id that is not yet known will result in a recoverable error. + if err != nil || !isBootstrapped { + return false + } + } + } + } + return true + }, DefaultTimeout, DefaultPollingInterval, "failed to see all chains bootstrap before timeout") + + uris := network.GetNodeURIs() + require.NotEmpty(uris, "network contains no nodes") + tests.Outf("{{green}}network URIs: {{/}} %+v\n", uris) + + testDataServerURI, err := fixture.ServeTestData(fixture.TestData{ + PreFundedKeys: network.PreFundedKeys, + }) + tests.Outf("{{green}}test data server URI: {{/}} %+v\n", testDataServerURI) + require.NoError(err) + + return &TestEnvironment{ + NetworkDir: network.Dir, + URIs: uris, + TestDataServerURI: testDataServerURI, + require: require, + } +} + +// Retrieve a random URI to naively attempt to spread API load across +// nodes. +func (te *TestEnvironment) GetRandomNodeURI() tmpnet.NodeURI { + r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 + nodeURI := te.URIs[r.Intn(len(te.URIs))] + tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) + return nodeURI +} + +// Retrieve the network to target for testing. +func (te *TestEnvironment) GetNetwork() *tmpnet.Network { + network, err := tmpnet.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + return network +} + +// Retrieve the specified number of funded keys allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocatePreFundedKeys(count int) []*secp256k1.PrivateKey { + keys, err := fixture.AllocatePreFundedKeys(te.TestDataServerURI, count) + te.require.NoError(err) + tests.Outf("{{blue}} allocated pre-funded key(s): %+v{{/}}\n", keys) + return keys +} + +// Retrieve a funded key allocated for the caller's exclusive use. +func (te *TestEnvironment) AllocatePreFundedKey() *secp256k1.PrivateKey { + return te.AllocatePreFundedKeys(1)[0] +} + +// Create a new keychain with the specified number of test keys. +func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { + keys := te.AllocatePreFundedKeys(count) + return secp256k1fx.NewKeychain(keys...) +} + +// Create a new private network that is not shared with other tests. +func (te *TestEnvironment) NewPrivateNetwork() *tmpnet.Network { + // Load the shared network to retrieve its path and exec path + sharedNetwork, err := tmpnet.ReadNetwork(te.NetworkDir) + te.require.NoError(err) + + network := &tmpnet.Network{} + + // The private networks dir is under the shared network dir to ensure it + // will be included in the artifact uploaded in CI. + privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) + te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) + + pluginDir, err := sharedNetwork.DefaultFlags.GetStringVal(config.PluginDirKey) + te.require.NoError(err) + + StartNetwork( + network, + privateNetworksDir, + sharedNetwork.DefaultRuntimeConfig.AvalancheGoPath, + pluginDir, + ) + + return network +} diff --git a/avalanchego/tests/fixture/e2e/flags.go b/avalanchego/tests/fixture/e2e/flags.go new file mode 100644 index 00000000..2a00df97 --- /dev/null +++ b/avalanchego/tests/fixture/e2e/flags.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "flag" + "fmt" + "os" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" +) + +type FlagVars struct { + avalancheGoExecPath string + pluginDir string + networkDir string + useExistingNetwork bool +} + +func (v *FlagVars) AvalancheGoExecPath() string { + return v.avalancheGoExecPath +} + +func (v *FlagVars) PluginDir() string { + return v.pluginDir +} + +func (v *FlagVars) NetworkDir() string { + if !v.useExistingNetwork { + return "" + } + if len(v.networkDir) > 0 { + return v.networkDir + } + return os.Getenv(tmpnet.NetworkDirEnvName) +} + +func (v *FlagVars) UseExistingNetwork() bool { + return v.useExistingNetwork +} + +func RegisterFlags() *FlagVars { + vars := FlagVars{} + flag.StringVar( + &vars.avalancheGoExecPath, + "avalanchego-path", + os.Getenv(tmpnet.AvalancheGoPathEnvName), + fmt.Sprintf("avalanchego executable path (required if not using an existing network). Also possible to configure via the %s env variable.", tmpnet.AvalancheGoPathEnvName), + ) + flag.StringVar( + &vars.pluginDir, + "plugin-dir", + os.ExpandEnv("$HOME/.avalanchego/plugins"), + "[optional] the dir containing VM plugins.", + ) + flag.StringVar( + &vars.networkDir, + "network-dir", + "", + fmt.Sprintf("[optional] the dir containing the configuration of an existing network to target for testing. Will only be used if --use-existing-network is specified. Also possible to configure via the %s env variable.", tmpnet.NetworkDirEnvName), + ) + flag.BoolVar( + &vars.useExistingNetwork, + "use-existing-network", + false, + "[optional] whether to target the existing network identified by --network-dir.", + ) + + return &vars +} diff --git a/avalanchego/tests/fixture/e2e/helpers.go b/avalanchego/tests/fixture/e2e/helpers.go new file mode 100644 index 00000000..c88f3cac --- /dev/null +++ b/avalanchego/tests/fixture/e2e/helpers.go @@ -0,0 +1,242 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package e2e + +import ( + "context" + "errors" + "fmt" + "math/big" + "os" + "strings" + "time" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethclient" + "github.com/ava-labs/coreth/interfaces" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +const ( + // A long default timeout used to timeout failed operations but + // unlikely to induce flaking due to unexpected resource + // contention. + DefaultTimeout = 2 * time.Minute + + DefaultPollingInterval = tmpnet.DefaultPollingInterval + + // Setting this env will disable post-test bootstrap + // checks. Useful for speeding up iteration during test + // development. + SkipBootstrapChecksEnvName = "E2E_SKIP_BOOTSTRAP_CHECKS" + + DefaultValidatorStartTimeDiff = tmpnet.DefaultValidatorStartTimeDiff + + DefaultGasLimit = uint64(21000) // Standard gas limit + + // An empty string prompts the use of the default path which ensures a + // predictable target for github's upload-artifact action. + DefaultNetworkDir = "" + + // Directory used to store private networks (specific to a single test) + // under the shared network dir. + PrivateNetworksDirName = "private_networks" +) + +// Create a new wallet for the provided keychain against the specified node URI. +func NewWallet(keychain *secp256k1fx.Keychain, nodeURI tmpnet.NodeURI) primary.Wallet { + tests.Outf("{{blue}} initializing a new wallet for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) + baseWallet, err := primary.MakeWallet(DefaultContext(), &primary.WalletConfig{ + URI: nodeURI.URI, + AVAXKeychain: keychain, + EthKeychain: keychain, + }) + require.NoError(ginkgo.GinkgoT(), err) + return primary.NewWalletWithOptions( + baseWallet, + common.WithPostIssuanceFunc( + func(id ids.ID) { + tests.Outf(" issued transaction with ID: %s\n", id) + }, + ), + ) +} + +// Create a new eth client targeting the specified node URI. +func NewEthClient(nodeURI tmpnet.NodeURI) ethclient.Client { + tests.Outf("{{blue}} initializing a new eth client for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) + nodeAddress := strings.Split(nodeURI.URI, "//")[1] + uri := fmt.Sprintf("ws://%s/ext/bc/C/ws", nodeAddress) + client, err := ethclient.Dial(uri) + require.NoError(ginkgo.GinkgoT(), err) + return client +} + +// Helper simplifying use of a timed context by canceling the context on ginkgo teardown. +func ContextWithTimeout(duration time.Duration) context.Context { + ctx, cancel := context.WithTimeout(context.Background(), duration) + ginkgo.DeferCleanup(cancel) + return ctx +} + +// Helper simplifying use of a timed context configured with the default timeout. +func DefaultContext() context.Context { + return ContextWithTimeout(DefaultTimeout) +} + +// Helper simplifying use via an option of a timed context configured with the default timeout. +func WithDefaultContext() common.Option { + return common.WithContext(DefaultContext()) +} + +// Re-implementation of testify/require.Eventually that is compatible with ginkgo. testify's +// version calls the condition function with a goroutine and ginkgo assertions don't work +// properly in goroutines. +func Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msg string) { + ticker := time.NewTicker(tick) + defer ticker.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), waitFor) + defer cancel() + for !condition() { + select { + case <-ctx.Done(): + require.Fail(ginkgo.GinkgoT(), msg) + case <-ticker.C: + } + } +} + +// Adds an ephemeral node intended to be used by a single test. +func AddEphemeralNode(network *tmpnet.Network, flags tmpnet.FlagsMap) *tmpnet.Node { + require := require.New(ginkgo.GinkgoT()) + + node, err := network.AddEphemeralNode(DefaultContext(), ginkgo.GinkgoWriter, flags) + require.NoError(err) + + ginkgo.DeferCleanup(func() { + tests.Outf("shutting down ephemeral node %q\n", node.NodeID) + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(node.Stop(ctx)) + }) + return node +} + +// Wait for the given node to report healthy. +func WaitForHealthy(node *tmpnet.Node) { + // Need to use explicit context (vs DefaultContext()) to support use with DeferCleanup + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(ginkgo.GinkgoT(), tmpnet.WaitForHealthy(ctx, node)) +} + +// Sends an eth transaction, waits for the transaction receipt to be issued +// and checks that the receipt indicates success. +func SendEthTransaction(ethClient ethclient.Client, signedTx *types.Transaction) *types.Receipt { + require := require.New(ginkgo.GinkgoT()) + + txID := signedTx.Hash() + tests.Outf(" sending eth transaction with ID: %s\n", txID) + + require.NoError(ethClient.SendTransaction(DefaultContext(), signedTx)) + + // Wait for the receipt + var receipt *types.Receipt + Eventually(func() bool { + var err error + receipt, err = ethClient.TransactionReceipt(DefaultContext(), txID) + if errors.Is(err, interfaces.NotFound) { + return false // Transaction is still pending + } + require.NoError(err) + return true + }, DefaultTimeout, DefaultPollingInterval, "failed to see transaction acceptance before timeout") + + require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + return receipt +} + +// Determines the suggested gas price for the configured client that will +// maximize the chances of transaction acceptance. +func SuggestGasPrice(ethClient ethclient.Client) *big.Int { + gasPrice, err := ethClient.SuggestGasPrice(DefaultContext()) + require.NoError(ginkgo.GinkgoT(), err) + // Double the suggested gas price to maximize the chances of + // acceptance. Maybe this can be revisited pending resolution of + // https://github.com/ava-labs/coreth/issues/314. + gasPrice.Add(gasPrice, gasPrice) + return gasPrice +} + +// Helper simplifying use via an option of a gas price appropriate for testing. +func WithSuggestedGasPrice(ethClient ethclient.Client) common.Option { + baseFee := SuggestGasPrice(ethClient) + return common.WithBaseFee(baseFee) +} + +// Verify that a new node can bootstrap into the network. This function is safe to call +// from `Teardown` by virtue of not depending on ginkgo.DeferCleanup. +func CheckBootstrapIsPossible(network *tmpnet.Network) { + require := require.New(ginkgo.GinkgoT()) + + if len(os.Getenv(SkipBootstrapChecksEnvName)) > 0 { + tests.Outf("{{yellow}}Skipping bootstrap check due to the %s env var being set", SkipBootstrapChecksEnvName) + return + } + ginkgo.By("checking if bootstrap is possible with the current network state") + + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + + node, err := network.AddEphemeralNode(ctx, ginkgo.GinkgoWriter, tmpnet.FlagsMap{}) + // AddEphemeralNode will initiate node stop if an error is encountered during start, + // so no further cleanup effort is required if an error is seen here. + require.NoError(err) + + // Ensure the node is always stopped at the end of the check + defer func() { + ctx, cancel = context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(node.Stop(ctx)) + }() + + // Check that the node becomes healthy within timeout + require.NoError(tmpnet.WaitForHealthy(ctx, node)) +} + +// Start a temporary network with the provided avalanchego binary. +func StartNetwork(network *tmpnet.Network, rootNetworkDir string, avalancheGoExecPath string, pluginDir string) { + require := require.New(ginkgo.GinkgoT()) + + require.NoError( + tmpnet.StartNewNetwork( + DefaultContext(), + ginkgo.GinkgoWriter, + network, + rootNetworkDir, + avalancheGoExecPath, + pluginDir, + tmpnet.DefaultNodeCount, + ), + ) + + ginkgo.DeferCleanup(func() { + tests.Outf("Shutting down network\n") + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + require.NoError(network.Stop(ctx)) + }) + + tests.Outf("{{green}}Successfully started network{{/}}\n") +} diff --git a/avalanchego/tests/fixture/test_data_server.go b/avalanchego/tests/fixture/test_data_server.go new file mode 100644 index 00000000..b79dcc2b --- /dev/null +++ b/avalanchego/tests/fixture/test_data_server.go @@ -0,0 +1,167 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fixture + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +const ( + allocateKeysPath = "/allocateKeys" + keyCountParameterName = "count" + requestedKeyCountExceedsAvailable = "requested key count exceeds available allocation" +) + +var ( + errRequestedKeyCountExceedsAvailable = errors.New(requestedKeyCountExceedsAvailable) + errInvalidKeyCount = errors.New("key count must be greater than zero") +) + +type TestData struct { + PreFundedKeys []*secp256k1.PrivateKey +} + +// http server allocating resources to tests potentially executing in parallel +type testDataServer struct { + // Synchronizes access to test data + lock sync.Mutex + TestData +} + +// Type used to marshal/unmarshal a set of test keys for transmission over http. +type keysDocument struct { + Keys []*secp256k1.PrivateKey `json:"keys"` +} + +func (s *testDataServer) allocateKeys(w http.ResponseWriter, r *http.Request) { + // Attempt to parse the count parameter + rawKeyCount := r.URL.Query().Get(keyCountParameterName) + if len(rawKeyCount) == 0 { + msg := fmt.Sprintf("missing %q parameter", keyCountParameterName) + http.Error(w, msg, http.StatusBadRequest) + return + } + keyCount, err := strconv.Atoi(rawKeyCount) + if err != nil { + msg := fmt.Sprintf("unable to parse %q parameter: %v", keyCountParameterName, err) + http.Error(w, msg, http.StatusBadRequest) + return + } + + // Ensure a key will be allocated at most once + s.lock.Lock() + defer s.lock.Unlock() + + // Only fulfill requests for available keys + if keyCount > len(s.PreFundedKeys) { + http.Error(w, requestedKeyCountExceedsAvailable, http.StatusInternalServerError) + return + } + + // Allocate the requested number of keys + remainingKeys := len(s.PreFundedKeys) - keyCount + allocatedKeys := s.PreFundedKeys[remainingKeys:] + + keysDoc := &keysDocument{ + Keys: allocatedKeys, + } + if err := json.NewEncoder(w).Encode(keysDoc); err != nil { + msg := fmt.Sprintf("failed to encode test keys: %v", err) + http.Error(w, msg, http.StatusInternalServerError) + return + } + + // Forget the allocated keys + utils.ZeroSlice(allocatedKeys) + s.PreFundedKeys = s.PreFundedKeys[:remainingKeys] +} + +// Serve test data via http to ensure allocation is synchronized even when +// ginkgo specs are executing in parallel. Returns the URI to access the server. +func ServeTestData(testData TestData) (string, error) { + // Listen on a dynamic port to avoid conflicting with other applications + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", fmt.Errorf("failed to initialize listener for test data server: %w", err) + } + address := fmt.Sprintf("http://%s", listener.Addr()) + + s := &testDataServer{ + TestData: testData, + } + mux := http.NewServeMux() + mux.HandleFunc(allocateKeysPath, s.allocateKeys) + + httpServer := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 3 * time.Second, + } + + go func() { + // Serve always returns a non-nil error and closes l. + if err := httpServer.Serve(listener); err != http.ErrServerClosed { + panic(fmt.Sprintf("unexpected error closing test data server: %v", err)) + } + }() + + return address, nil +} + +// Retrieve the specified number of pre-funded test keys from the provided URI. A given +// key is allocated at most once during the life of the test data server. +func AllocatePreFundedKeys(baseURI string, count int) ([]*secp256k1.PrivateKey, error) { + if count <= 0 { + return nil, errInvalidKeyCount + } + + uri, err := url.Parse(baseURI) + if err != nil { + return nil, fmt.Errorf("failed to parse uri: %w", err) + } + uri.RawQuery = url.Values{ + keyCountParameterName: {strconv.Itoa(count)}, + }.Encode() + uri.Path = allocateKeysPath + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, uri.String(), nil) + if err != nil { + return nil, fmt.Errorf("failed to construct request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to request pre-funded keys: %w", err) + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response for pre-funded keys: %w", err) + } + if resp.StatusCode != http.StatusOK { + if strings.TrimSpace(string(body)) == requestedKeyCountExceedsAvailable { + return nil, errRequestedKeyCountExceedsAvailable + } + return nil, fmt.Errorf("test data server returned unexpected status code %d: %v", resp.StatusCode, body) + } + + keysDoc := &keysDocument{} + if err := json.Unmarshal(body, keysDoc); err != nil { + return nil, fmt.Errorf("failed to unmarshal pre-funded keys: %w", err) + } + return keysDoc.Keys, nil +} diff --git a/avalanchego/tests/fixture/test_data_server_test.go b/avalanchego/tests/fixture/test_data_server_test.go new file mode 100644 index 00000000..7191ae75 --- /dev/null +++ b/avalanchego/tests/fixture/test_data_server_test.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fixture + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +// Check that funded test keys can be served from an http server to +// ensure at-most-once allocation when tests are executed in parallel. +func TestAllocatePreFundedKeys(t *testing.T) { + keys := make([]*secp256k1.PrivateKey, 5) + for i := range keys { + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + keys[i] = key + } + + uri, err := ServeTestData(TestData{ + PreFundedKeys: keys, + }) + require.NoError(t, err) + + testCases := []struct { + name string + count int + expectedAddresses []ids.ShortID + expectedError error + }{ + { + name: "single key", + count: 1, + expectedAddresses: []ids.ShortID{ + keys[4].Address(), + }, + expectedError: nil, + }, + { + name: "multiple keys", + count: 4, + expectedAddresses: []ids.ShortID{ + keys[0].Address(), + keys[1].Address(), + keys[2].Address(), + keys[3].Address(), + }, + expectedError: nil, + }, + { + name: "insufficient keys available", + count: 1, + expectedAddresses: []ids.ShortID{}, + expectedError: errRequestedKeyCountExceedsAvailable, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + keys, err := AllocatePreFundedKeys(uri, tc.count) + require.ErrorIs(err, tc.expectedError) + + addresses := make([]ids.ShortID, len(keys)) + for i, key := range keys { + addresses[i] = key.Address() + } + require.Equal(tc.expectedAddresses, addresses) + }) + } +} diff --git a/avalanchego/tests/fixture/tmpnet/README.md b/avalanchego/tests/fixture/tmpnet/README.md new file mode 100644 index 00000000..909a29c6 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/README.md @@ -0,0 +1,231 @@ +# tmpnet - temporary network orchestration + +This package implements a simple orchestrator for the avalanchego +nodes of a temporary network. Configuration is stored on disk, and +nodes run as independent processes whose process details are also +written to disk. Using the filesystem to store configuration and +process details allows for the `tmpnetctl` cli and e2e test fixture to +orchestrate the same temporary networks without the use of an rpc daemon. + +## What's in a name? + +The name of this package was originally `testnet` and its cli was +`testnetctl`. This name was chosen in ignorance that `testnet` +commonly refers to a persistent blockchain network used for testing. + +To avoid confusion, the name was changed to `tmpnet` and its cli +`tmpnetctl`. `tmpnet` is short for `temporary network` since the +networks it deploys are likely to live for a limited duration in +support of the development and testing of avalanchego and its related +repositories. + +## Package details + +The functionality in this package is grouped by logical purpose into +the following non-test files: + +| Filename | Types | Purpose | +|:------------------|:------------|:-----------------------------------------------| +| defaults.go | | Defines common default configuration | +| flags.go | FlagsMap | Simplifies configuration of avalanchego flags | +| genesis.go | | Creates test genesis | +| network.go | Network | Orchestrates and configures temporary networks | +| network_config.go | Network | Reads and writes network configuration | +| node.go | Node | Orchestrates and configures nodes | +| node_config.go | Node | Reads and writes node configuration | +| node_process.go | NodeProcess | Orchestrates node processes | +| subnet.go | Subnet | Orchestrates subnets | +| utils.go | | Defines shared utility functions | + +## Usage + +### Via tmpnetctl + +A temporary network can be managed by the `tmpnetctl` cli tool: + +```bash +# From the root of the avalanchego repo + +# Build the tmpnetctl binary +$ ./scripts/build_tmpnetctl.sh + +# Start a new network +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego +... +Started network 1000 @ /home/me/.tmpnet/networks/1000 + +Configure tmpnetctl to target this network by default with one of the following statements: + - source /home/me/.tmpnet/networks/1000/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest + +# Stop the network +$ ./build/tmpnetctl stop-network --network-dir=/path/to/network +``` + +Note the export of the path ending in `latest`. This is a symlink that +is set to the last network created by `tmpnetctl start-network`. Setting +the `TMPNET_NETWORK_DIR` env var to this symlink ensures that +`tmpnetctl` commands and e2e execution with +`--use-existing-network` will target the most recently deployed temporary +network. + +### Via code + +A temporary network can be managed in code: + +```golang +network := &tmpnet.Network{ // Configure non-default values for the new network + DefaultFlags: tmpnet.FlagsMap{ + config.LogLevelKey: "INFO", // Change one of the network's defaults + }, + Subnets: []*tmpnet.Subnet{ // Subnets to create on the new network once it is running + { + Name: "xsvm-a", // User-defined name used to reference subnet in code and on disk + Chains: []*tmpnet.Chain{ + { + VMName: "xsvm", // Name of the VM the chain will run, will be used to derive the name of the VM binary + Genesis: , // Genesis bytes used to initialize the custom chain + PreFundedKey: , // (Optional) A private key that is funded in the genesis bytes + }, + }, + }, + }, +} + +_ := tmpnet.StartNewNetwork( // Start the network + ctx, // Context used to limit duration of waiting for network health + ginkgo.GinkgoWriter, // Writer to report progress of initialization + network, + "", // Empty string uses the default network path (~/tmpnet/networks) + "/path/to/avalanchego", // The path to the binary that nodes will execute + "/path/to/plugins", // The path nodes will use for plugin binaries (suggested value ~/.avalanchego/plugins) + 5, // Number of initial validating nodes +) + +uris := network.GetNodeURIs() + +// Use URIs to interact with the network + +// Stop all nodes in the network +network.Stop(context.Background()) +``` + +## Networking configuration + +By default, nodes in a temporary network will be started with staking and +API ports set to `0` to ensure that ports will be dynamically +chosen. The tmpnet fixture discovers the ports used by a given node +by reading the `[base-data-dir]/process.json` file written by +avalanchego on node start. The use of dynamic ports supports testing +with many temporary networks without having to manually select compatible +port ranges. + +## Configuration on disk + +A temporary network relies on configuration written to disk in the following structure: + +``` +HOME +└── .tmpnet // Root path for the temporary network fixture + └── networks // Default parent directory for temporary networks + └── 1000 // The networkID is used to name the network dir and starts at 1000 + ├── NodeID-37E8UK3x2YFsHE3RdALmfWcppcZ1eTuj9 // The ID of a node is the name of its data dir + │ ├── chainData + │ │ └── ... + │ ├── config.json // Node runtime configuration + │ ├── db + │ │ └── ... + │ ├── flags.json // Node flags + │ ├── logs + │ │ └── ... + │ ├── plugins + │ │ └── ... + │ └── process.json // Node process details (PID, API URI, staking address) + ├── chains + │ ├── C + │ │ └── config.json // C-Chain config for all nodes + │ └── raZ51bwfepaSaZ1MNSRNYNs3ZPfj...U7pa3 + │ └── config.json // Custom chain configuration for all nodes + ├── config.json // Common configuration (including defaults and pre-funded keys) + ├── genesis.json // Genesis for all nodes + ├── network.env // Sets network dir env var to simplify network usage + └── subnets // Parent directory for subnet definitions + ├─ subnet-a.json // Configuration for subnet-a and its chain(s) + └─ subnet-b.json // Configuration for subnet-b and its chain(s) +``` + +### Common networking configuration + +Network configuration such as default flags (e.g. `--log-level=`), +runtime defaults (e.g. avalanchego path) and pre-funded private keys +are stored at `[network-dir]/config.json`. A given default will only +be applied to a new node on its addition to the network if the node +does not explicitly set a given value. + +### Genesis + +The genesis file is stored at `[network-dir]/genesis.json` and +referenced by default by all nodes in the network. The genesis file +content will be generated with reasonable defaults if not +supplied. Each node in the network can override the default by setting +an explicit value for `--genesis-file` or `--genesis-file-content`. + +### Chain configuration + +The chain configuration for a temporary network is stored at +`[network-dir]/chains/[chain alias or ID]/config.json` and referenced +by all nodes in the network. The C-Chain config will be generated with +reasonable defaults if not supplied. X-Chain and P-Chain will use +implicit defaults. The configuration for custom chains can be provided +with subnet configuration and will be writen to the appropriate path. + +Each node in the network can override network-level chain +configuration by setting `--chain-config-dir` to an explicit value and +ensuring that configuration files for all chains exist at +`[custom-chain-config-dir]/[chain alias or ID]/config.json`. + +### Network env + +A shell script that sets the `TMPNET_NETWORK_DIR` env var to the +path of the network is stored at `[network-dir]/network.env`. Sourcing +this file (i.e. `source network.env`) in a shell will configure ginkgo +e2e and the `tmpnetctl` cli to target the network path specified in +the env var. + +Set `TMPNET_ROOT_DIR` to specify the root directory in which to create +the configuration directory of new networks +(e.g. `$TMPNET_ROOT_DIR/[network-dir]`). The default root directory is +`~/.tmpdir/networks`. Configuring the root directory is only relevant +when creating new networks as the path of existing networks will +already have been set. + +### Node configuration + +The data dir for a node is set by default to +`[network-path]/[node-id]`. A node can be configured to use a +non-default path by explicitly setting the `--data-dir` +flag. + +#### Runtime config + +The details required to configure a node's execution are written to +`[network-path]/[node-id]/config.json`. This file contains the +runtime-specific details like the path of the avalanchego binary to +start the node with. + +#### Flags + +All flags used to configure a node are written to +`[network-path]/[node-id]/flags.json` so that a node can be +configured with only a single argument: +`--config-file=/path/to/flags.json`. This simplifies node launch and +ensures all parameters used to launch a node can be modified by +editing the config file. + +#### Process details + +The process details of a node are written by avalanchego to +`[base-data-dir]/process.json`. The file contains the PID of the node +process, the URI of the node's API, and the address other nodes can +use to bootstrap themselves (aka staking address). diff --git a/avalanchego/tests/fixture/tmpnet/cmd/main.go b/avalanchego/tests/fixture/tmpnet/cmd/main.go new file mode 100644 index 00000000..dd59c300 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/cmd/main.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/version" +) + +const cliVersion = "0.0.1" + +var ( + errAvalancheGoRequired = fmt.Errorf("--avalanchego-path or %s are required", tmpnet.AvalancheGoPathEnvName) + errNetworkDirRequired = fmt.Errorf("--network-dir or %s are required", tmpnet.NetworkDirEnvName) +) + +func main() { + var networkDir string + rootCmd := &cobra.Command{ + Use: "tmpnetctl", + Short: "tmpnetctl commands", + } + rootCmd.PersistentFlags().StringVar(&networkDir, "network-dir", os.Getenv(tmpnet.NetworkDirEnvName), "The path to the configuration directory of a temporary network") + + versionCmd := &cobra.Command{ + Use: "version", + Short: "Print version details", + RunE: func(*cobra.Command, []string) error { + msg := cliVersion + if len(version.GitCommit) > 0 { + msg += ", commit=" + version.GitCommit + } + fmt.Fprintf(os.Stdout, msg+"\n") + return nil + }, + } + rootCmd.AddCommand(versionCmd) + + var ( + rootDir string + avalancheGoPath string + pluginDir string + nodeCount uint8 + ) + startNetworkCmd := &cobra.Command{ + Use: "start-network", + Short: "Start a new temporary network", + RunE: func(*cobra.Command, []string) error { + if len(avalancheGoPath) == 0 { + return errAvalancheGoRequired + } + + // Root dir will be defaulted on start if not provided + + network := &tmpnet.Network{} + + // Extreme upper bound, should never take this long + networkStartTimeout := 2 * time.Minute + + ctx, cancel := context.WithTimeout(context.Background(), networkStartTimeout) + defer cancel() + err := tmpnet.StartNewNetwork( + ctx, + os.Stdout, + network, + rootDir, + avalancheGoPath, + pluginDir, + int(nodeCount), + ) + if err != nil { + return err + } + + // Symlink the new network to the 'latest' network to simplify usage + networkRootDir := filepath.Dir(network.Dir) + networkDirName := filepath.Base(network.Dir) + latestSymlinkPath := filepath.Join(networkRootDir, "latest") + if err := os.Remove(latestSymlinkPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } + if err := os.Symlink(networkDirName, latestSymlinkPath); err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "\nConfigure tmpnetctl to target this network by default with one of the following statements:\n") + fmt.Fprintf(os.Stdout, " - source %s\n", network.EnvFilePath()) + fmt.Fprintf(os.Stdout, " - %s\n", network.EnvFileContents()) + fmt.Fprintf(os.Stdout, " - export %s=%s\n", tmpnet.NetworkDirEnvName, latestSymlinkPath) + + return nil + }, + } + startNetworkCmd.PersistentFlags().StringVar(&rootDir, "root-dir", os.Getenv(tmpnet.RootDirEnvName), "The path to the root directory for temporary networks") + startNetworkCmd.PersistentFlags().StringVar(&avalancheGoPath, "avalanchego-path", os.Getenv(tmpnet.AvalancheGoPathEnvName), "The path to an avalanchego binary") + startNetworkCmd.PersistentFlags().StringVar(&pluginDir, "plugin-dir", os.ExpandEnv("$HOME/.avalanchego/plugins"), "[optional] the dir containing VM plugins") + startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", tmpnet.DefaultNodeCount, "Number of nodes the network should initially consist of") + rootCmd.AddCommand(startNetworkCmd) + + stopNetworkCmd := &cobra.Command{ + Use: "stop-network", + Short: "Stop a temporary network", + RunE: func(*cobra.Command, []string) error { + if len(networkDir) == 0 { + return errNetworkDirRequired + } + ctx, cancel := context.WithTimeout(context.Background(), tmpnet.DefaultNetworkTimeout) + defer cancel() + if err := tmpnet.StopNetwork(ctx, networkDir); err != nil { + return err + } + fmt.Fprintf(os.Stdout, "Stopped network configured at: %s\n", networkDir) + return nil + }, + } + rootCmd.AddCommand(stopNetworkCmd) + + restartNetworkCmd := &cobra.Command{ + Use: "restart-network", + Short: "Restart a temporary network", + RunE: func(*cobra.Command, []string) error { + if len(networkDir) == 0 { + return errNetworkDirRequired + } + ctx, cancel := context.WithTimeout(context.Background(), tmpnet.DefaultNetworkTimeout) + defer cancel() + return tmpnet.RestartNetwork(ctx, os.Stdout, networkDir) + }, + } + rootCmd.AddCommand(restartNetworkCmd) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "tmpnetctl failed: %v\n", err) + os.Exit(1) + } + os.Exit(0) +} diff --git a/avalanchego/tests/fixture/tmpnet/defaults.go b/avalanchego/tests/fixture/tmpnet/defaults.go new file mode 100644 index 00000000..2b88ef49 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/defaults.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "time" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +const ( + // Interval appropriate for network operations that should be + // retried periodically but not too often. + DefaultPollingInterval = 500 * time.Millisecond + + // Validator start time must be a minimum of SyncBound from the + // current time for validator addition to succeed, and adding 20 + // seconds provides a buffer in case of any delay in processing. + DefaultValidatorStartTimeDiff = executor.SyncBound + 20*time.Second + + DefaultNetworkTimeout = 2 * time.Minute + + // Minimum required to ensure connectivity-based health checks will pass + DefaultNodeCount = 2 + + // Arbitrary number of pre-funded keys to create by default + DefaultPreFundedKeyCount = 50 + + // A short minimum stake duration enables testing of staking logic. + DefaultMinStakeDuration = time.Second + + defaultConfigFilename = "config.json" +) + +// A set of flags appropriate for testing. +func DefaultFlags() FlagsMap { + // Supply only non-default configuration to ensure that default values will be used. + return FlagsMap{ + config.NetworkPeerListGossipFreqKey: "250ms", + config.NetworkMaxReconnectDelayKey: "1s", + config.PublicIPKey: "127.0.0.1", + config.HTTPHostKey: "127.0.0.1", + config.StakingHostKey: "127.0.0.1", + config.HealthCheckFreqKey: "2s", + config.AdminAPIEnabledKey: true, + config.IpcAPIEnabledKey: true, + config.IndexEnabledKey: true, + config.LogDisplayLevelKey: "INFO", + config.LogLevelKey: "DEBUG", + config.MinStakeDurationKey: DefaultMinStakeDuration.String(), + } +} + +// A set of chain configurations appropriate for testing. +func DefaultChainConfigs() map[string]FlagsMap { + return map[string]FlagsMap{ + // Supply only non-default configuration to ensure that default + // values will be used. Available C-Chain configuration options are + // defined in the `github.com/ava-labs/coreth/evm` package. + "C": { + "warp-api-enabled": true, + "log-level": "trace", + }, + } +} diff --git a/avalanchego/tests/fixture/tmpnet/flags.go b/avalanchego/tests/fixture/tmpnet/flags.go new file mode 100644 index 00000000..3084982e --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/flags.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cast" + + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Defines a mapping of flag keys to values intended to be supplied to +// an invocation of an AvalancheGo node. +type FlagsMap map[string]interface{} + +// Utility function simplifying construction of a FlagsMap from a file. +func ReadFlagsMap(path string, description string) (*FlagsMap, error) { + bytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", description, err) + } + flagsMap := &FlagsMap{} + if err := json.Unmarshal(bytes, flagsMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s: %w", description, err) + } + return flagsMap, nil +} + +// SetDefaults ensures the effectiveness of flag overrides by only +// setting values supplied in the defaults map that are not already +// explicitly set. +func (f FlagsMap) SetDefaults(defaults FlagsMap) { + for key, value := range defaults { + if _, ok := f[key]; !ok { + f[key] = value + } + } +} + +// GetStringVal simplifies retrieving a map value as a string. +func (f FlagsMap) GetStringVal(key string) (string, error) { + rawVal, ok := f[key] + if !ok { + return "", nil + } + + val, err := cast.ToStringE(rawVal) + if err != nil { + return "", fmt.Errorf("failed to cast value for %q: %w", key, err) + } + return val, nil +} + +// Write simplifies writing a FlagsMap to the provided path. The +// description is used in error messages. +func (f FlagsMap) Write(path string, description string) error { + bytes, err := DefaultJSONMarshal(f) + if err != nil { + return fmt.Errorf("failed to marshal %s: %w", description, err) + } + if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write %s: %w", description, err) + } + return nil +} diff --git a/avalanchego/tests/fixture/tmpnet/genesis.go b/avalanchego/tests/fixture/tmpnet/genesis.go new file mode 100644 index 00000000..53cdca89 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/genesis.go @@ -0,0 +1,191 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "time" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" +) + +const ( + defaultGasLimit = uint64(100_000_000) // Gas limit is arbitrary + + // Arbitrarily large amount of AVAX to fund keys on the X-Chain for testing + defaultFundedKeyXChainAmount = 30 * units.MegaAvax +) + +var ( + // Arbitrarily large amount of AVAX (10^12) to fund keys on the C-Chain for testing + defaultFundedKeyCChainAmount = new(big.Int).Exp(big.NewInt(10), big.NewInt(30), nil) + + errNoKeysForGenesis = errors.New("no keys to fund for genesis") + errInvalidNetworkIDForGenesis = errors.New("network ID can't be mainnet, testnet or local network ID for genesis") + errMissingStakersForGenesis = errors.New("no stakers provided for genesis") +) + +// Helper type to simplify configuring X-Chain genesis balances +type XChainBalanceMap map[ids.ShortID]uint64 + +// Create a genesis struct valid for bootstrapping a test +// network. Note that many of the genesis fields (e.g. reward +// addresses) are randomly generated or hard-coded. +func NewTestGenesis( + networkID uint32, + nodes []*Node, + keysToFund []*secp256k1.PrivateKey, +) (*genesis.UnparsedConfig, error) { + // Validate inputs + switch networkID { + case constants.MainnetID, constants.LocalID: + return nil, errInvalidNetworkIDForGenesis + } + if len(nodes) == 0 { + return nil, errMissingStakersForGenesis + } + if len(keysToFund) == 0 { + return nil, errNoKeysForGenesis + } + + initialStakers, err := stakersForNodes(networkID, nodes) + if err != nil { + return nil, fmt.Errorf("failed to configure stakers for nodes: %w", err) + } + + // Address that controls stake doesn't matter -- generate it randomly + stakeAddress, err := address.Format( + "X", + constants.GetHRP(networkID), + ids.GenerateTestShortID().Bytes(), + ) + if err != nil { + return nil, fmt.Errorf("failed to format stake address: %w", err) + } + + // Ensure the total stake allows a MegaAvax per staker + totalStake := uint64(len(initialStakers)) * units.MegaAvax + + // The eth address is only needed to link pre-mainnet assets. Until that capability + // becomes necessary for testing, use a bogus address. + // + // Reference: https://github.com/ava-labs/avalanchego/issues/1365#issuecomment-1511508767 + ethAddress := "0x0000000000000000000000000000000000000000" + + now := time.Now() + + config := &genesis.UnparsedConfig{ + NetworkID: networkID, + Allocations: []genesis.UnparsedAllocation{ + { + ETHAddr: ethAddress, + AVAXAddr: stakeAddress, + InitialAmount: 0, + UnlockSchedule: []genesis.LockedAmount{ // Provides stake to validators + { + Amount: totalStake, + Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week + }, + }, + }, + }, + StartTime: uint64(now.Unix()), + InitialStakedFunds: []string{stakeAddress}, + InitialStakeDuration: 365 * 24 * 60 * 60, // 1 year + InitialStakeDurationOffset: 90 * 60, // 90 minutes + Message: "hello avalanche!", + InitialStakers: initialStakers, + } + + // Ensure pre-funded keys have arbitrary large balances on both chains to support testing + xChainBalances := make(XChainBalanceMap, len(keysToFund)) + cChainBalances := make(core.GenesisAlloc, len(keysToFund)) + for _, key := range keysToFund { + xChainBalances[key.Address()] = defaultFundedKeyXChainAmount + cChainBalances[evm.GetEthAddress(key)] = core.GenesisAccount{ + Balance: defaultFundedKeyCChainAmount, + } + } + + // Set X-Chain balances + for xChainAddress, balance := range xChainBalances { + avaxAddr, err := address.Format("X", constants.GetHRP(networkID), xChainAddress[:]) + if err != nil { + return nil, fmt.Errorf("failed to format X-Chain address: %w", err) + } + config.Allocations = append( + config.Allocations, + genesis.UnparsedAllocation{ + ETHAddr: ethAddress, + AVAXAddr: avaxAddr, + InitialAmount: balance, + UnlockSchedule: []genesis.LockedAmount{ + { + Amount: 20 * units.MegaAvax, + }, + { + Amount: totalStake, + Locktime: uint64(now.Add(7 * 24 * time.Hour).Unix()), // 1 Week + }, + }, + }, + ) + } + + // Define C-Chain genesis + cChainGenesis := &core.Genesis{ + Config: params.AvalancheLocalChainConfig, + Difficulty: big.NewInt(0), // Difficulty is a mandatory field + GasLimit: defaultGasLimit, + Alloc: cChainBalances, + } + cChainGenesisBytes, err := json.Marshal(cChainGenesis) + if err != nil { + return nil, fmt.Errorf("failed to marshal C-Chain genesis: %w", err) + } + config.CChainGenesis = string(cChainGenesisBytes) + + return config, nil +} + +// Returns staker configuration for the given set of nodes. +func stakersForNodes(networkID uint32, nodes []*Node) ([]genesis.UnparsedStaker, error) { + // Give staking rewards for initial validators to a random address. Any testing of staking rewards + // will be easier to perform with nodes other than the initial validators since the timing of + // staking can be more easily controlled. + rewardAddr, err := address.Format("X", constants.GetHRP(networkID), ids.GenerateTestShortID().Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to format reward address: %w", err) + } + + // Configure provided nodes as initial stakers + initialStakers := make([]genesis.UnparsedStaker, len(nodes)) + for i, node := range nodes { + pop, err := node.GetProofOfPossession() + if err != nil { + return nil, fmt.Errorf("failed to derive proof of possession for node %s: %w", node.NodeID, err) + } + initialStakers[i] = genesis.UnparsedStaker{ + NodeID: node.NodeID, + RewardAddress: rewardAddr, + DelegationFee: .01 * reward.PercentDenominator, + Signer: pop, + } + } + + return initialStakers, nil +} diff --git a/avalanchego/tests/fixture/tmpnet/network.go b/avalanchego/tests/fixture/tmpnet/network.go new file mode 100644 index 00000000..28626af3 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/network.go @@ -0,0 +1,708 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm" +) + +// The Network type is defined in this file (orchestration) and +// network_config.go (reading/writing configuration). + +const ( + // Constants defining the names of shell variables whose value can + // configure network orchestration. + NetworkDirEnvName = "TMPNET_NETWORK_DIR" + RootDirEnvName = "TMPNET_ROOT_DIR" + + // This interval was chosen to avoid spamming node APIs during + // startup, as smaller intervals (e.g. 50ms) seemed to noticeably + // increase the time for a network's nodes to be seen as healthy. + networkHealthCheckInterval = 200 * time.Millisecond + + // eth address: 0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC + HardHatKeyStr = "56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027" +) + +// HardhatKey is a legacy used for hardhat testing in subnet-evm +// TODO(marun) Remove when no longer needed. +var HardhatKey *secp256k1.PrivateKey + +func init() { + hardhatKeyBytes, err := hex.DecodeString(HardHatKeyStr) + if err != nil { + panic(err) + } + HardhatKey, err = secp256k1.ToPrivateKey(hardhatKeyBytes) + if err != nil { + panic(err) + } +} + +// Collects the configuration for running a temporary avalanchego network +type Network struct { + // Path where network configuration and data is stored + Dir string + + // Configuration common across nodes + Genesis *genesis.UnparsedConfig + ChainConfigs map[string]FlagsMap + + // Default configuration to use when creating new nodes + DefaultFlags FlagsMap + DefaultRuntimeConfig NodeRuntimeConfig + + // Keys pre-funded in the genesis on both the X-Chain and the C-Chain + PreFundedKeys []*secp256k1.PrivateKey + + // Nodes that constitute the network + Nodes []*Node + + // Subnets that have been enabled on the network + Subnets []*Subnet +} + +// Ensure a real and absolute network dir so that node +// configuration that embeds the network path will continue to +// work regardless of symlink and working directory changes. +func toCanonicalDir(dir string) (string, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", err + } + return filepath.EvalSymlinks(absDir) +} + +func StartNewNetwork( + ctx context.Context, + w io.Writer, + network *Network, + rootNetworkDir string, + avalancheGoExecPath string, + pluginDir string, + nodeCount int, +) error { + if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir, nodeCount); err != nil { + return err + } + if err := network.Create(rootNetworkDir); err != nil { + return err + } + return network.Start(ctx, w) +} + +// Stops the nodes of the network configured in the provided directory. +func StopNetwork(ctx context.Context, dir string) error { + network, err := ReadNetwork(dir) + if err != nil { + return err + } + return network.Stop(ctx) +} + +// Restarts the nodes of the network configured in the provided directory. +func RestartNetwork(ctx context.Context, w io.Writer, dir string) error { + network, err := ReadNetwork(dir) + if err != nil { + return err + } + return network.Restart(ctx, w) +} + +// Reads a network from the provided directory. +func ReadNetwork(dir string) (*Network, error) { + canonicalDir, err := toCanonicalDir(dir) + if err != nil { + return nil, err + } + network := &Network{ + Dir: canonicalDir, + } + if err := network.Read(); err != nil { + return nil, fmt.Errorf("failed to read network: %w", err) + } + return network, nil +} + +// Initializes a new network with default configuration. +func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string, nodeCount int) error { + if _, err := fmt.Fprintf(w, "Preparing configuration for new network with %s\n", avalancheGoPath); err != nil { + return err + } + + // Ensure default flags + if n.DefaultFlags == nil { + n.DefaultFlags = FlagsMap{} + } + n.DefaultFlags.SetDefaults(DefaultFlags()) + + // Only configure the plugin dir with a non-empty value to ensure + // the use of the default value (`[datadir]/plugins`) when + // no plugin dir is configured. + if len(pluginDir) > 0 { + if _, ok := n.DefaultFlags[config.PluginDirKey]; !ok { + n.DefaultFlags[config.PluginDirKey] = pluginDir + } + } + + // Ensure pre-funded keys + if len(n.PreFundedKeys) == 0 { + keys, err := NewPrivateKeys(DefaultPreFundedKeyCount) + if err != nil { + return err + } + n.PreFundedKeys = keys + } + + // Ensure primary chains are configured + if n.ChainConfigs == nil { + n.ChainConfigs = map[string]FlagsMap{} + } + defaultChainConfigs := DefaultChainConfigs() + for alias, chainConfig := range defaultChainConfigs { + if _, ok := n.ChainConfigs[alias]; !ok { + n.ChainConfigs[alias] = FlagsMap{} + } + n.ChainConfigs[alias].SetDefaults(chainConfig) + } + + // Ensure runtime is configured + if len(n.DefaultRuntimeConfig.AvalancheGoPath) == 0 { + n.DefaultRuntimeConfig.AvalancheGoPath = avalancheGoPath + } + + // Ensure nodes are created + if len(n.Nodes) == 0 { + n.Nodes = NewNodes(nodeCount) + } + + // Ensure nodes are configured + for i := range n.Nodes { + if err := n.EnsureNodeConfig(n.Nodes[i]); err != nil { + return err + } + } + + return nil +} + +// Creates the network on disk, choosing its network id and generating its genesis in the process. +func (n *Network) Create(rootDir string) error { + if len(rootDir) == 0 { + // Use the default root dir + var err error + rootDir, err = getDefaultRootDir() + if err != nil { + return err + } + } + + // Ensure creation of the root dir + if err := os.MkdirAll(rootDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create root network dir: %w", err) + } + + // Determine the network path and ID + var ( + networkDir string + networkID uint32 + ) + if n.Genesis != nil && n.Genesis.NetworkID > 0 { + // Use the network ID defined in the provided genesis + networkID = n.Genesis.NetworkID + } + if networkID > 0 { + // Use a directory with a random suffix + var err error + networkDir, err = os.MkdirTemp(rootDir, fmt.Sprintf("%d.", n.Genesis.NetworkID)) + if err != nil { + return fmt.Errorf("failed to create network dir: %w", err) + } + } else { + // Find the next available network ID based on the contents of the root dir + var err error + networkID, networkDir, err = findNextNetworkID(rootDir) + if err != nil { + return err + } + } + canonicalDir, err := toCanonicalDir(networkDir) + if err != nil { + return err + } + n.Dir = canonicalDir + + pluginDir, err := n.DefaultFlags.GetStringVal(config.PluginDirKey) + if err != nil { + return err + } + if len(pluginDir) > 0 { + // Ensure the existence of the plugin directory or nodes won't be able to start. + if err := os.MkdirAll(pluginDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create plugin dir: %w", err) + } + } + + if n.Genesis == nil { + // Pre-fund known legacy keys to support ad-hoc testing. Usage of a legacy key will + // require knowing the key beforehand rather than retrieving it from the set of pre-funded + // keys exposed by a network. Since allocation will not be exclusive, a test using a + // legacy key is unlikely to be a good candidate for parallel execution. + keysToFund := []*secp256k1.PrivateKey{ + genesis.VMRQKey, + genesis.EWOQKey, + HardhatKey, + } + keysToFund = append(keysToFund, n.PreFundedKeys...) + + genesis, err := NewTestGenesis(networkID, n.Nodes, keysToFund) + if err != nil { + return err + } + n.Genesis = genesis + } + + for _, node := range n.Nodes { + // Ensure the node is configured for use with the network and + // knows where to write its configuration. + if err := n.EnsureNodeConfig(node); err != nil { + return nil + } + } + + // Ensure configuration on disk is current + return n.Write() +} + +// Starts all nodes in the network +func (n *Network) Start(ctx context.Context, w io.Writer) error { + if _, err := fmt.Fprintf(w, "Starting network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { + return err + } + + // Configure the networking for each node and start + for _, node := range n.Nodes { + if err := n.StartNode(ctx, w, node); err != nil { + return err + } + } + + if _, err := fmt.Fprintf(w, "Waiting for all nodes to report healthy...\n\n"); err != nil { + return err + } + if err := n.WaitForHealthy(ctx, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\nStarted network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { + return err + } + + return nil +} + +func (n *Network) AddEphemeralNode(ctx context.Context, w io.Writer, flags FlagsMap) (*Node, error) { + node := NewNode("") + node.Flags = flags + node.IsEphemeral = true + if err := n.StartNode(ctx, w, node); err != nil { + return nil, err + } + return node, nil +} + +// Starts the provided node after configuring it for the network. +func (n *Network) StartNode(ctx context.Context, w io.Writer, node *Node) error { + if err := n.EnsureNodeConfig(node); err != nil { + return err + } + + bootstrapIPs, bootstrapIDs, err := n.getBootstrapIPsAndIDs(node) + if err != nil { + return err + } + node.SetNetworkingConfig(bootstrapIDs, bootstrapIPs) + + if err := node.Write(); err != nil { + return err + } + + if err := node.Start(w); err != nil { + // Attempt to stop an unhealthy node to provide some assurance to the caller + // that an error condition will not result in a lingering process. + err = errors.Join(err, node.Stop(ctx)) + return err + } + + return nil +} + +// Waits until all nodes in the network are healthy. +func (n *Network) WaitForHealthy(ctx context.Context, w io.Writer) error { + ticker := time.NewTicker(networkHealthCheckInterval) + defer ticker.Stop() + + healthyNodes := set.NewSet[ids.NodeID](len(n.Nodes)) + for healthyNodes.Len() < len(n.Nodes) { + for _, node := range n.Nodes { + if healthyNodes.Contains(node.NodeID) { + continue + } + + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return err + } + if !healthy { + continue + } + + healthyNodes.Add(node.NodeID) + if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { + return err + } + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) + case <-ticker.C: + } + } + return nil +} + +// Stops all nodes in the network. +func (n *Network) Stop(ctx context.Context) error { + // Target all nodes, including the ephemeral ones + nodes, err := ReadNodes(n.Dir, true /* includeEphemeral */) + if err != nil { + return err + } + + var errs []error + + // Initiate stop on all nodes + for _, node := range nodes { + if err := node.InitiateStop(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to stop node %s: %w", node.NodeID, err)) + } + } + + // Wait for stop to complete on all nodes + for _, node := range nodes { + if err := node.WaitForStopped(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to wait for node %s to stop: %w", node.NodeID, err)) + } + } + + if len(errs) > 0 { + return fmt.Errorf("failed to stop network:\n%w", errors.Join(errs...)) + } + return nil +} + +// Restarts all non-ephemeral nodes in the network. +func (n *Network) Restart(ctx context.Context, w io.Writer) error { + if _, err := fmt.Fprintf(w, " restarting network\n"); err != nil { + return err + } + for _, node := range n.Nodes { + if err := node.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop node %s: %w", node.NodeID, err) + } + if err := n.StartNode(ctx, w, node); err != nil { + return fmt.Errorf("failed to start node %s: %w", node.NodeID, err) + } + if _, err := fmt.Fprintf(w, " waiting for node %s to report healthy\n", node.NodeID); err != nil { + return err + } + if err := WaitForHealthy(ctx, node); err != nil { + return err + } + } + return nil +} + +// Ensures the provided node has the configuration it needs to start. If the data dir is not +// set, it will be defaulted to [nodeParentDir]/[node ID]. For a not-yet-created network, +// no action will be taken. +// TODO(marun) Reword or refactor to account for the differing behavior pre- vs post-start +func (n *Network) EnsureNodeConfig(node *Node) error { + flags := node.Flags + + // Set the network name if available + if n.Genesis != nil && n.Genesis.NetworkID > 0 { + // Convert the network id to a string to ensure consistency in JSON round-tripping. + flags[config.NetworkNameKey] = strconv.FormatUint(uint64(n.Genesis.NetworkID), 10) + } + + if err := node.EnsureKeys(); err != nil { + return err + } + + flags.SetDefaults(n.DefaultFlags) + + // Set fields including the network path + if len(n.Dir) > 0 { + node.Flags.SetDefaults(FlagsMap{ + config.GenesisFileKey: n.getGenesisPath(), + config.ChainConfigDirKey: n.getChainConfigDir(), + }) + + // Ensure the node's data dir is configured + dataDir := node.getDataDir() + if len(dataDir) == 0 { + // NodeID will have been set by EnsureKeys + dataDir = filepath.Join(n.Dir, node.NodeID.String()) + flags[config.DataDirKey] = dataDir + } + } + + // Ensure the node runtime is configured + if node.RuntimeConfig == nil { + node.RuntimeConfig = &NodeRuntimeConfig{ + AvalancheGoPath: n.DefaultRuntimeConfig.AvalancheGoPath, + } + } + + // Ensure available subnets are tracked + subnetIDs := make([]string, 0, len(n.Subnets)) + for _, subnet := range n.Subnets { + if subnet.SubnetID == ids.Empty { + continue + } + subnetIDs = append(subnetIDs, subnet.SubnetID.String()) + } + flags[config.TrackSubnetsKey] = strings.Join(subnetIDs, ",") + + return nil +} + +func (n *Network) GetSubnet(name string) *Subnet { + for _, subnet := range n.Subnets { + if subnet.Name == name { + return subnet + } + } + return nil +} + +// Ensure that each subnet on the network is created and that it is validated by all non-ephemeral nodes. +func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { + createdSubnets := make([]*Subnet, 0, len(n.Subnets)) + for _, subnet := range n.Subnets { + if _, err := fmt.Fprintf(w, "Creating subnet %q\n", subnet.Name); err != nil { + return err + } + if subnet.SubnetID != ids.Empty { + // The subnet already exists + continue + } + + if subnet.OwningKey == nil { + // Allocate a pre-funded key and remove it from the network so it won't be used for + // other purposes + if len(n.PreFundedKeys) == 0 { + return fmt.Errorf("no pre-funded keys available to create subnet %q", subnet.Name) + } + subnet.OwningKey = n.PreFundedKeys[len(n.PreFundedKeys)-1] + n.PreFundedKeys = n.PreFundedKeys[:len(n.PreFundedKeys)-1] + } + + // Create the subnet on the network + if err := subnet.Create(ctx, n.Nodes[0].URI); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " created subnet %q as %q\n", subnet.Name, subnet.SubnetID); err != nil { + return err + } + + // Persist the subnet configuration + if err := subnet.Write(n.getSubnetDir(), n.getChainConfigDir()); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " wrote configuration for subnet %q\n", subnet.Name); err != nil { + return err + } + + createdSubnets = append(createdSubnets, subnet) + } + + if len(createdSubnets) == 0 { + return nil + } + + // Ensure the in-memory subnet state + n.Subnets = append(n.Subnets, createdSubnets...) + + // Ensure the pre-funded key changes are persisted to disk + if err := n.Write(); err != nil { + return err + } + + // Reconfigure nodes for the new subnets + if _, err := fmt.Fprintf(w, "Configured nodes to track new subnet(s). Restart is required.\n"); err != nil { + return err + } + for _, node := range n.Nodes { + if err := n.EnsureNodeConfig(node); err != nil { + return err + } + } + // Restart nodes to allow new configuration to take effect + // TODO(marun) Only restart the validator nodes of newly-created subnets + if err := n.Restart(ctx, w); err != nil { + return err + } + + // Add each node as a subnet validator + for _, subnet := range createdSubnets { + if _, err := fmt.Fprintf(w, "Adding validators for subnet %q\n", subnet.Name); err != nil { + return err + } + if err := subnet.AddValidators(ctx, w, n.Nodes); err != nil { + return err + } + } + + // Wait for nodes to become subnet validators + pChainClient := platformvm.NewClient(n.Nodes[0].URI) + restartRequired := false + for _, subnet := range createdSubnets { + if err := waitForActiveValidators(ctx, w, pChainClient, subnet); err != nil { + return err + } + + // It should now be safe to create chains for the subnet + if err := subnet.CreateChains(ctx, w, n.Nodes[0].URI); err != nil { + return err + } + + // Persist the chain configuration + if err := subnet.Write(n.getSubnetDir(), n.getChainConfigDir()); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " wrote chain configuration for subnet %q\n", subnet.Name); err != nil { + return err + } + + // If one or more of the subnets chains have explicit configuration, the + // subnet's validator nodes will need to be restarted for those nodes to read + // the newly written chain configuration and apply it to the chain(s). + if subnet.HasChainConfig() { + restartRequired = true + } + } + + if !restartRequired { + return nil + } + + // Restart nodes to allow configuration for the new chains to take effect + // TODO(marun) Only restart the validator nodes of subnets that have chains that need configuring + return n.Restart(ctx, w) +} + +func (n *Network) GetURIForNodeID(nodeID ids.NodeID) (string, error) { + for _, node := range n.Nodes { + if node.NodeID == nodeID { + return node.URI, nil + } + } + return "", fmt.Errorf("%s is not known to the network", nodeID) +} + +func (n *Network) GetNodeURIs() []NodeURI { + return GetNodeURIs(n.Nodes) +} + +// Retrieves bootstrap IPs and IDs for all nodes except the skipped one (this supports +// collecting the bootstrap details for restarting a node). +func (n *Network) getBootstrapIPsAndIDs(skippedNode *Node) ([]string, []string, error) { + // Collect staking addresses of non-ephemeral nodes for use in bootstrapping a node + nodes, err := ReadNodes(n.Dir, false /* includeEphemeral */) + if err != nil { + return nil, nil, fmt.Errorf("failed to read network's nodes: %w", err) + } + var ( + bootstrapIPs = make([]string, 0, len(nodes)) + bootstrapIDs = make([]string, 0, len(nodes)) + ) + for _, node := range nodes { + if skippedNode != nil && node.NodeID == skippedNode.NodeID { + continue + } + + if len(node.StakingAddress) == 0 { + // Node is not running + continue + } + + bootstrapIPs = append(bootstrapIPs, node.StakingAddress) + bootstrapIDs = append(bootstrapIDs, node.NodeID.String()) + } + + return bootstrapIPs, bootstrapIDs, nil +} + +// Retrieves the default root dir for storing networks and their +// configuration. +func getDefaultRootDir() (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".tmpnet", "networks"), nil +} + +// Finds the next available network ID by attempting to create a +// directory numbered from 1000 until creation succeeds. Returns the +// network id and the full path of the created directory. +func findNextNetworkID(rootDir string) (uint32, string, error) { + var ( + networkID uint32 = 1000 + dirPath string + ) + for { + _, reserved := constants.NetworkIDToNetworkName[networkID] + if reserved { + networkID++ + continue + } + + dirPath = filepath.Join(rootDir, strconv.FormatUint(uint64(networkID), 10)) + err := os.Mkdir(dirPath, perms.ReadWriteExecute) + if err == nil { + return networkID, dirPath, nil + } + + if !errors.Is(err, fs.ErrExist) { + return 0, "", fmt.Errorf("failed to create network directory: %w", err) + } + + // Directory already exists, keep iterating + networkID++ + } +} diff --git a/avalanchego/tests/fixture/tmpnet/network_config.go b/avalanchego/tests/fixture/tmpnet/network_config.go new file mode 100644 index 00000000..1ae4e967 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/network_config.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// The Network type is defined in this file (reading/writing configuration) and network.go +// (orchestration). + +var errMissingNetworkDir = errors.New("failed to write network: missing network directory") + +// Read network and node configuration from disk. +func (n *Network) Read() error { + if err := n.readNetwork(); err != nil { + return err + } + if err := n.readNodes(); err != nil { + return err + } + return n.readSubnets() +} + +// Write network configuration to disk. +func (n *Network) Write() error { + if len(n.Dir) == 0 { + return errMissingNetworkDir + } + if err := n.writeGenesis(); err != nil { + return err + } + if err := n.writeChainConfigs(); err != nil { + return err + } + if err := n.writeNetworkConfig(); err != nil { + return err + } + if err := n.writeEnvFile(); err != nil { + return err + } + return n.writeNodes() +} + +// Read network configuration from disk. +func (n *Network) readNetwork() error { + if err := n.readGenesis(); err != nil { + return err + } + if err := n.readChainConfigs(); err != nil { + return err + } + return n.readConfig() +} + +// Read the non-ephemeral nodes associated with the network from disk. +func (n *Network) readNodes() error { + nodes, err := ReadNodes(n.Dir, false /* includeEphemeral */) + if err != nil { + return err + } + n.Nodes = nodes + return nil +} + +func (n *Network) writeNodes() error { + for _, node := range n.Nodes { + if err := node.Write(); err != nil { + return err + } + } + return nil +} + +func (n *Network) getGenesisPath() string { + return filepath.Join(n.Dir, "genesis.json") +} + +func (n *Network) readGenesis() error { + bytes, err := os.ReadFile(n.getGenesisPath()) + if err != nil { + return fmt.Errorf("failed to read genesis: %w", err) + } + genesis := genesis.UnparsedConfig{} + if err := json.Unmarshal(bytes, &genesis); err != nil { + return fmt.Errorf("failed to unmarshal genesis: %w", err) + } + n.Genesis = &genesis + return nil +} + +func (n *Network) writeGenesis() error { + bytes, err := DefaultJSONMarshal(n.Genesis) + if err != nil { + return fmt.Errorf("failed to marshal genesis: %w", err) + } + if err := os.WriteFile(n.getGenesisPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write genesis: %w", err) + } + return nil +} + +func (n *Network) getChainConfigDir() string { + return filepath.Join(n.Dir, "chains") +} + +func (n *Network) readChainConfigs() error { + baseChainConfigDir := n.getChainConfigDir() + entries, err := os.ReadDir(baseChainConfigDir) + if err != nil { + return fmt.Errorf("failed to read chain config dir: %w", err) + } + + // Clear the map of data that may end up stale (e.g. if a given + // chain is in the map but no longer exists on disk) + n.ChainConfigs = map[string]FlagsMap{} + + for _, entry := range entries { + if !entry.IsDir() { + // Chain config files are expected to be nested under a + // directory with the name of the chain alias. + continue + } + chainAlias := entry.Name() + configPath := filepath.Join(baseChainConfigDir, chainAlias, defaultConfigFilename) + if _, err := os.Stat(configPath); os.IsNotExist(err) { + // No config file present + continue + } + chainConfig, err := ReadFlagsMap(configPath, chainAlias+" chain config") + if err != nil { + return err + } + n.ChainConfigs[chainAlias] = *chainConfig + } + + return nil +} + +func (n *Network) writeChainConfigs() error { + baseChainConfigDir := n.getChainConfigDir() + + for chainAlias, chainConfig := range n.ChainConfigs { + // Create the directory + chainConfigDir := filepath.Join(baseChainConfigDir, chainAlias) + if err := os.MkdirAll(chainConfigDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create %s chain config dir: %w", chainAlias, err) + } + + // Write the file + path := filepath.Join(chainConfigDir, defaultConfigFilename) + if err := chainConfig.Write(path, chainAlias+" chain config"); err != nil { + return err + } + } + + // TODO(marun) Ensure the removal of chain aliases that aren't present in the map + + return nil +} + +func (n *Network) getConfigPath() string { + return filepath.Join(n.Dir, defaultConfigFilename) +} + +func (n *Network) readConfig() error { + bytes, err := os.ReadFile(n.getConfigPath()) + if err != nil { + return fmt.Errorf("failed to read network config: %w", err) + } + if err := json.Unmarshal(bytes, n); err != nil { + return fmt.Errorf("failed to unmarshal network config: %w", err) + } + return nil +} + +// The subset of network fields to store in the network config file. +type serializedNetworkConfig struct { + DefaultFlags FlagsMap + DefaultRuntimeConfig NodeRuntimeConfig + PreFundedKeys []*secp256k1.PrivateKey +} + +func (n *Network) writeNetworkConfig() error { + config := &serializedNetworkConfig{ + DefaultFlags: n.DefaultFlags, + DefaultRuntimeConfig: n.DefaultRuntimeConfig, + PreFundedKeys: n.PreFundedKeys, + } + bytes, err := DefaultJSONMarshal(config) + if err != nil { + return fmt.Errorf("failed to marshal network config: %w", err) + } + if err := os.WriteFile(n.getConfigPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write network config: %w", err) + } + return nil +} + +func (n *Network) EnvFilePath() string { + return filepath.Join(n.Dir, "network.env") +} + +func (n *Network) EnvFileContents() string { + return fmt.Sprintf("export %s=%s", NetworkDirEnvName, n.Dir) +} + +// Write an env file that sets the network dir env when sourced. +func (n *Network) writeEnvFile() error { + if err := os.WriteFile(n.EnvFilePath(), []byte(n.EnvFileContents()), perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write network env file: %w", err) + } + return nil +} + +func (n *Network) getSubnetDir() string { + return filepath.Join(n.Dir, defaultSubnetDirName) +} + +func (n *Network) readSubnets() error { + subnets, err := readSubnets(n.getSubnetDir()) + if err != nil { + return err + } + n.Subnets = subnets + return nil +} diff --git a/avalanchego/tests/fixture/tmpnet/network_test.go b/avalanchego/tests/fixture/tmpnet/network_test.go new file mode 100644 index 00000000..c04c497c --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/network_test.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNetworkSerialization(t *testing.T) { + require := require.New(t) + + tmpDir := t.TempDir() + + network := &Network{} + require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "", 1)) + require.NoError(network.Create(tmpDir)) + // Ensure node runtime is initialized + require.NoError(network.readNodes()) + + loadedNetwork, err := ReadNetwork(network.Dir) + require.NoError(err) + for _, key := range loadedNetwork.PreFundedKeys { + // Address() enables comparison with the original network by + // ensuring full population of a key's in-memory representation. + _ = key.Address() + } + require.Equal(network, loadedNetwork) +} diff --git a/avalanchego/tests/fixture/tmpnet/node.go b/avalanchego/tests/fixture/tmpnet/node.go new file mode 100644 index 00000000..59025b64 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/node.go @@ -0,0 +1,337 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cast" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" +) + +// The Node type is defined in this file (node.go - orchestration) and +// node_config.go (reading/writing configuration). + +const ( + defaultNodeTickerInterval = 50 * time.Millisecond +) + +var ( + errMissingTLSKeyForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingTLSKeyContentKey) + errMissingCertForNodeID = fmt.Errorf("failed to ensure node ID: missing value for %q", config.StakingCertContentKey) + errInvalidKeypair = fmt.Errorf("%q and %q must be provided together or not at all", config.StakingTLSKeyContentKey, config.StakingCertContentKey) +) + +// NodeRuntime defines the methods required to support running a node. +type NodeRuntime interface { + readState() error + Start(w io.Writer) error + InitiateStop() error + WaitForStopped(ctx context.Context) error + IsHealthy(ctx context.Context) (bool, error) +} + +// Configuration required to configure a node runtime. +type NodeRuntimeConfig struct { + AvalancheGoPath string +} + +// Node supports configuring and running a node participating in a temporary network. +type Node struct { + // Set by EnsureNodeID which is also called when the node is read. + NodeID ids.NodeID + + // Flags that will be supplied to the node at startup + Flags FlagsMap + + // An ephemeral node is not expected to be a persistent member of the network and + // should therefore not be used as for bootstrapping purposes. + IsEphemeral bool + + // The configuration used to initialize the node runtime. + RuntimeConfig *NodeRuntimeConfig + + // Runtime state, intended to be set by NodeRuntime + URI string + StakingAddress string + + // Initialized on demand + runtime NodeRuntime +} + +// Initializes a new node with only the data dir set +func NewNode(dataDir string) *Node { + return &Node{ + Flags: FlagsMap{ + config.DataDirKey: dataDir, + }, + } +} + +// Initializes the specified number of nodes. +func NewNodes(count int) []*Node { + nodes := make([]*Node, count) + for i := range nodes { + nodes[i] = NewNode("") + } + return nodes +} + +// Reads a node's configuration from the specified directory. +func ReadNode(dataDir string) (*Node, error) { + node := NewNode(dataDir) + return node, node.Read() +} + +// Reads nodes from the specified network directory. +func ReadNodes(networkDir string, includeEphemeral bool) ([]*Node, error) { + nodes := []*Node{} + + // Node configuration is stored in child directories + entries, err := os.ReadDir(networkDir) + if err != nil { + return nil, fmt.Errorf("failed to read dir: %w", err) + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + nodeDir := filepath.Join(networkDir, entry.Name()) + node, err := ReadNode(nodeDir) + if errors.Is(err, os.ErrNotExist) { + // If no config file exists, assume this is not the path of a node + continue + } else if err != nil { + return nil, err + } + + if !includeEphemeral && node.IsEphemeral { + continue + } + + nodes = append(nodes, node) + } + + return nodes, nil +} + +// Retrieves the runtime for the node. +func (n *Node) getRuntime() NodeRuntime { + if n.runtime == nil { + n.runtime = &NodeProcess{ + node: n, + } + } + return n.runtime +} + +// Runtime methods + +func (n *Node) IsHealthy(ctx context.Context) (bool, error) { + return n.getRuntime().IsHealthy(ctx) +} + +func (n *Node) Start(w io.Writer) error { + return n.getRuntime().Start(w) +} + +func (n *Node) InitiateStop(ctx context.Context) error { + if err := n.SaveMetricsSnapshot(ctx); err != nil { + return err + } + return n.getRuntime().InitiateStop() +} + +func (n *Node) WaitForStopped(ctx context.Context) error { + return n.getRuntime().WaitForStopped(ctx) +} + +func (n *Node) readState() error { + return n.getRuntime().readState() +} + +func (n *Node) getDataDir() string { + return cast.ToString(n.Flags[config.DataDirKey]) +} + +// Writes the current state of the metrics endpoint to disk +func (n *Node) SaveMetricsSnapshot(ctx context.Context) error { + if len(n.URI) == 0 { + // No URI to request metrics from + return nil + } + uri := n.URI + "/ext/metrics" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + return n.writeMetricsSnapshot(body) +} + +// Initiates node shutdown and waits for the node to stop. +func (n *Node) Stop(ctx context.Context) error { + if err := n.InitiateStop(ctx); err != nil { + return err + } + return n.WaitForStopped(ctx) +} + +// Sets networking configuration for the node. +// Convenience method for setting networking flags. +func (n *Node) SetNetworkingConfig(bootstrapIDs []string, bootstrapIPs []string) { + var ( + // Use dynamic port allocation. + httpPort uint16 = 0 + stakingPort uint16 = 0 + ) + n.Flags[config.HTTPPortKey] = httpPort + n.Flags[config.StakingPortKey] = stakingPort + n.Flags[config.BootstrapIDsKey] = strings.Join(bootstrapIDs, ",") + n.Flags[config.BootstrapIPsKey] = strings.Join(bootstrapIPs, ",") +} + +// Ensures staking and signing keys are generated if not already present and +// that the node ID (derived from the staking keypair) is set. +func (n *Node) EnsureKeys() error { + if err := n.EnsureBLSSigningKey(); err != nil { + return err + } + if err := n.EnsureStakingKeypair(); err != nil { + return err + } + return n.EnsureNodeID() +} + +// Ensures a BLS signing key is generated if not already present. +func (n *Node) EnsureBLSSigningKey() error { + // Attempt to retrieve an existing key + existingKey, err := n.Flags.GetStringVal(config.StakingSignerKeyContentKey) + if err != nil { + return err + } + if len(existingKey) > 0 { + // Nothing to do + return nil + } + + // Generate a new signing key + newKey, err := bls.NewSecretKey() + if err != nil { + return fmt.Errorf("failed to generate staking signer key: %w", err) + } + n.Flags[config.StakingSignerKeyContentKey] = base64.StdEncoding.EncodeToString(bls.SerializeSecretKey(newKey)) + return nil +} + +// Ensures a staking keypair is generated if not already present. +func (n *Node) EnsureStakingKeypair() error { + keyKey := config.StakingTLSKeyContentKey + certKey := config.StakingCertContentKey + + key, err := n.Flags.GetStringVal(keyKey) + if err != nil { + return err + } + + cert, err := n.Flags.GetStringVal(certKey) + if err != nil { + return err + } + + if len(key) == 0 && len(cert) == 0 { + // Generate new keypair + tlsCertBytes, tlsKeyBytes, err := staking.NewCertAndKeyBytes() + if err != nil { + return fmt.Errorf("failed to generate staking keypair: %w", err) + } + n.Flags[keyKey] = base64.StdEncoding.EncodeToString(tlsKeyBytes) + n.Flags[certKey] = base64.StdEncoding.EncodeToString(tlsCertBytes) + } else if len(key) == 0 || len(cert) == 0 { + // Only one of key and cert was provided + return errInvalidKeypair + } + + return nil +} + +// Derives the nodes proof-of-possession. Requires the node to have a +// BLS signing key. +func (n *Node) GetProofOfPossession() (*signer.ProofOfPossession, error) { + signingKey, err := n.Flags.GetStringVal(config.StakingSignerKeyContentKey) + if err != nil { + return nil, err + } + signingKeyBytes, err := base64.StdEncoding.DecodeString(signingKey) + if err != nil { + return nil, err + } + secretKey, err := bls.SecretKeyFromBytes(signingKeyBytes) + if err != nil { + return nil, err + } + return signer.NewProofOfPossession(secretKey), nil +} + +// Derives the node ID. Requires that a tls keypair is present. +func (n *Node) EnsureNodeID() error { + keyKey := config.StakingTLSKeyContentKey + certKey := config.StakingCertContentKey + + key, err := n.Flags.GetStringVal(keyKey) + if err != nil { + return err + } + if len(key) == 0 { + return errMissingTLSKeyForNodeID + } + keyBytes, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", keyKey, err) + } + + cert, err := n.Flags.GetStringVal(certKey) + if err != nil { + return err + } + if len(cert) == 0 { + return errMissingCertForNodeID + } + certBytes, err := base64.StdEncoding.DecodeString(cert) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to base64 decode value for %q: %w", certKey, err) + } + + tlsCert, err := staking.LoadTLSCertFromBytes(keyBytes, certBytes) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to load tls cert: %w", err) + } + stakingCert := staking.CertificateFromX509(tlsCert.Leaf) + n.NodeID = ids.NodeIDFromCert(stakingCert) + + return nil +} diff --git a/avalanchego/tests/fixture/tmpnet/node_config.go b/avalanchego/tests/fixture/tmpnet/node_config.go new file mode 100644 index 00000000..3ebbc01b --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/node_config.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ava-labs/avalanchego/utils/perms" +) + +// The Node type is defined in this file node_config.go +// (reading/writing configuration) and node.go (orchestration). + +func (n *Node) getFlagsPath() string { + return filepath.Join(n.getDataDir(), "flags.json") +} + +func (n *Node) readFlags() error { + bytes, err := os.ReadFile(n.getFlagsPath()) + if err != nil { + return fmt.Errorf("failed to read node flags: %w", err) + } + flags := FlagsMap{} + if err := json.Unmarshal(bytes, &flags); err != nil { + return fmt.Errorf("failed to unmarshal node flags: %w", err) + } + n.Flags = flags + return n.EnsureNodeID() +} + +func (n *Node) writeFlags() error { + bytes, err := DefaultJSONMarshal(n.Flags) + if err != nil { + return fmt.Errorf("failed to marshal node flags: %w", err) + } + if err := os.WriteFile(n.getFlagsPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write node flags: %w", err) + } + return nil +} + +func (n *Node) getConfigPath() string { + return filepath.Join(n.getDataDir(), defaultConfigFilename) +} + +func (n *Node) readConfig() error { + bytes, err := os.ReadFile(n.getConfigPath()) + if err != nil { + return fmt.Errorf("failed to read node config: %w", err) + } + if err := json.Unmarshal(bytes, n); err != nil { + return fmt.Errorf("failed to unmarshal node config: %w", err) + } + return nil +} + +type serializedNodeConfig struct { + IsEphemeral bool + RuntimeConfig *NodeRuntimeConfig +} + +func (n *Node) writeConfig() error { + config := serializedNodeConfig{ + IsEphemeral: n.IsEphemeral, + RuntimeConfig: n.RuntimeConfig, + } + bytes, err := DefaultJSONMarshal(config) + if err != nil { + return fmt.Errorf("failed to marshal node config: %w", err) + } + if err := os.WriteFile(n.getConfigPath(), bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write node config: %w", err) + } + return nil +} + +func (n *Node) Read() error { + if err := n.readFlags(); err != nil { + return err + } + if err := n.readConfig(); err != nil { + return err + } + return n.readState() +} + +func (n *Node) Write() error { + if err := os.MkdirAll(n.getDataDir(), perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create node dir: %w", err) + } + + if err := n.writeFlags(); err != nil { + return nil + } + return n.writeConfig() +} + +func (n *Node) writeMetricsSnapshot(data []byte) error { + metricsDir := filepath.Join(n.getDataDir(), "metrics") + if err := os.MkdirAll(metricsDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create metrics dir: %w", err) + } + // Create a compatible filesystem from the current timestamp + ts := time.Now().UTC().Format(time.RFC3339) + ts = strings.ReplaceAll(strings.ReplaceAll(ts, ":", ""), "-", "") + metricsPath := filepath.Join(metricsDir, ts) + return os.WriteFile(metricsPath, data, perms.ReadWrite) +} diff --git a/avalanchego/tests/fixture/tmpnet/node_process.go b/avalanchego/tests/fixture/tmpnet/node_process.go new file mode 100644 index 00000000..c2e2e331 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/node_process.go @@ -0,0 +1,258 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "net" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + + "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/node" +) + +const ( + AvalancheGoPathEnvName = "AVALANCHEGO_PATH" + + defaultNodeInitTimeout = 10 * time.Second +) + +var errNodeAlreadyRunning = errors.New("failed to start node: node is already running") + +func checkNodeHealth(ctx context.Context, uri string) (bool, error) { + // Check that the node is reporting healthy + health, err := health.NewClient(uri).Health(ctx, nil) + if err == nil { + return health.Healthy, nil + } + + switch t := err.(type) { + case *net.OpError: + if t.Op == "read" { + // Connection refused - potentially recoverable + return false, nil + } + case syscall.Errno: + if t == syscall.ECONNREFUSED { + // Connection refused - potentially recoverable + return false, nil + } + } + // Assume all other errors are not recoverable + return false, fmt.Errorf("failed to query node health: %w", err) +} + +// Defines local-specific node configuration. Supports setting default +// and node-specific values. +type NodeProcess struct { + node *Node + + // PID of the node process + pid int +} + +func (p *NodeProcess) setProcessContext(processContext node.NodeProcessContext) { + p.pid = processContext.PID + p.node.URI = processContext.URI + p.node.StakingAddress = processContext.StakingAddress +} + +func (p *NodeProcess) readState() error { + path := p.getProcessContextPath() + if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) { + // The absence of the process context file indicates the node is not running + p.setProcessContext(node.NodeProcessContext{}) + return nil + } + + bytes, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read node process context: %w", err) + } + processContext := node.NodeProcessContext{} + if err := json.Unmarshal(bytes, &processContext); err != nil { + return fmt.Errorf("failed to unmarshal node process context: %w", err) + } + p.setProcessContext(processContext) + return nil +} + +// Start waits for the process context to be written which +// indicates that the node will be accepting connections on +// its staking port. The network will start faster with this +// synchronization due to the avoidance of exponential backoff +// if a node tries to connect to a beacon that is not ready. +func (p *NodeProcess) Start(w io.Writer) error { + // Avoid attempting to start an already running node. + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve existing process: %w", err) + } + if proc != nil { + return errNodeAlreadyRunning + } + + // Ensure a stale process context file is removed so that the + // creation of a new file can indicate node start. + if err := os.Remove(p.getProcessContextPath()); err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to remove stale process context file: %w", err) + } + + cmd := exec.Command(p.node.RuntimeConfig.AvalancheGoPath, "--config-file", p.node.getFlagsPath()) // #nosec G204 + if err := cmd.Start(); err != nil { + return err + } + + // Determine appropriate level of node description detail + dataDir := p.node.getDataDir() + nodeDescription := fmt.Sprintf("node %q", p.node.NodeID) + if p.node.IsEphemeral { + nodeDescription = "ephemeral " + nodeDescription + } + nonDefaultNodeDir := filepath.Base(dataDir) != p.node.NodeID.String() + if nonDefaultNodeDir { + // Only include the data dir if its base is not the default (the node ID) + nodeDescription = fmt.Sprintf("%s with path: %s", nodeDescription, dataDir) + } + + go func() { + if err := cmd.Wait(); err != nil { + if err.Error() != "signal: killed" { + _, _ = fmt.Fprintf(w, "%s finished with error: %v\n", nodeDescription, err) + } + } + _, _ = fmt.Fprintf(w, "%s exited\n", nodeDescription) + }() + + // A node writes a process context file on start. If the file is not + // found in a reasonable amount of time, the node is unlikely to have + // started successfully. + if err := p.waitForProcessContext(context.Background()); err != nil { + return fmt.Errorf("failed to start local node: %w", err) + } + + _, err = fmt.Fprintf(w, "Started %s\n", nodeDescription) + return err +} + +// Signals the node process to stop. +func (p *NodeProcess) InitiateStop() error { + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve process to stop: %w", err) + } + if proc == nil { + // Already stopped + return nil + } + if err := proc.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("failed to send SIGTERM to pid %d: %w", p.pid, err) + } + return nil +} + +// Waits for the node process to stop. +func (p *NodeProcess) WaitForStopped(ctx context.Context) error { + ticker := time.NewTicker(defaultNodeTickerInterval) + defer ticker.Stop() + for { + proc, err := p.getProcess() + if err != nil { + return fmt.Errorf("failed to retrieve process: %w", err) + } + if proc == nil { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see node process stop %q before timeout: %w", p.node.NodeID, ctx.Err()) + case <-ticker.C: + } + } +} + +func (p *NodeProcess) IsHealthy(ctx context.Context) (bool, error) { + // Check that the node process is running as a precondition for + // checking health. getProcess will also ensure that the node's + // API URI is current. + proc, err := p.getProcess() + if err != nil { + return false, fmt.Errorf("failed to determine process status: %w", err) + } + if proc == nil { + return false, ErrNotRunning + } + + return checkNodeHealth(ctx, p.node.URI) +} + +func (p *NodeProcess) getProcessContextPath() string { + return filepath.Join(p.node.getDataDir(), config.DefaultProcessContextFilename) +} + +func (p *NodeProcess) waitForProcessContext(ctx context.Context) error { + ticker := time.NewTicker(defaultNodeTickerInterval) + defer ticker.Stop() + + ctx, cancel := context.WithTimeout(ctx, defaultNodeInitTimeout) + defer cancel() + for len(p.node.URI) == 0 { + err := p.readState() + if err != nil { + return fmt.Errorf("failed to read process context for node %q: %w", p.node.NodeID, err) + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to load process context for node %q before timeout: %w", p.node.NodeID, ctx.Err()) + case <-ticker.C: + } + } + return nil +} + +// Retrieve the node process if it is running. As part of determining +// process liveness, the node's process context will be refreshed if +// live or cleared if not running. +func (p *NodeProcess) getProcess() (*os.Process, error) { + // Read the process context to ensure freshness. The node may have + // stopped or been restarted since last read. + if err := p.readState(); err != nil { + return nil, fmt.Errorf("failed to read process context: %w", err) + } + + if p.pid == 0 { + // Process is not running + return nil, nil + } + + proc, err := os.FindProcess(p.pid) + if err != nil { + return nil, fmt.Errorf("failed to find process: %w", err) + } + + // Sending 0 will not actually send a signal but will perform + // error checking. + err = proc.Signal(syscall.Signal(0)) + if err == nil { + // Process is running + return proc, nil + } + if errors.Is(err, os.ErrProcessDone) { + // Process is not running + return nil, nil + } + return nil, fmt.Errorf("failed to determine process status: %w", err) +} diff --git a/avalanchego/tests/fixture/tmpnet/subnet.go b/avalanchego/tests/fixture/tmpnet/subnet.go new file mode 100644 index 00000000..41058d93 --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/subnet.go @@ -0,0 +1,346 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +const defaultSubnetDirName = "subnets" + +type Chain struct { + // Set statically + VMID ids.ID + Config string + Genesis []byte + + // Set at runtime + ChainID ids.ID + PreFundedKey *secp256k1.PrivateKey +} + +// Write the chain configuration to the specified directory. +func (c *Chain) WriteConfig(chainDir string) error { + if len(c.Config) == 0 { + return nil + } + + chainConfigDir := filepath.Join(chainDir, c.ChainID.String()) + if err := os.MkdirAll(chainConfigDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create chain config dir: %w", err) + } + + path := filepath.Join(chainConfigDir, defaultConfigFilename) + if err := os.WriteFile(path, []byte(c.Config), perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write chain config: %w", err) + } + + return nil +} + +type Subnet struct { + // A unique string that can be used to refer to the subnet across different temporary + // networks (since the SubnetID will be different every time the subnet is created) + Name string + + // The ID of the transaction that created the subnet + SubnetID ids.ID + + // The private key that owns the subnet + OwningKey *secp256k1.PrivateKey + + // IDs of the nodes responsible for validating the subnet + ValidatorIDs []ids.NodeID + + Chains []*Chain +} + +// Retrieves a wallet configured for use with the subnet +func (s *Subnet) GetWallet(ctx context.Context, uri string) (primary.Wallet, error) { + keychain := secp256k1fx.NewKeychain(s.OwningKey) + + // Only fetch the subnet transaction if a subnet ID is present. This won't be true when + // the wallet is first used to create the subnet. + txIDs := set.Set[ids.ID]{} + if s.SubnetID != ids.Empty { + txIDs.Add(s.SubnetID) + } + + return primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: keychain, + EthKeychain: keychain, + PChainTxsToFetch: txIDs, + }) +} + +// Issues the subnet creation transaction and retains the result. The URI of a node is +// required to issue the transaction. +func (s *Subnet) Create(ctx context.Context, uri string) error { + wallet, err := s.GetWallet(ctx, uri) + if err != nil { + return err + } + pWallet := wallet.P() + + subnetTx, err := pWallet.IssueCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + s.OwningKey.Address(), + }, + }, + common.WithContext(ctx), + ) + if err != nil { + return fmt.Errorf("failed to create subnet %s: %w", s.Name, err) + } + s.SubnetID = subnetTx.ID() + + return nil +} + +func (s *Subnet) CreateChains(ctx context.Context, w io.Writer, uri string) error { + wallet, err := s.GetWallet(ctx, uri) + if err != nil { + return err + } + pWallet := wallet.P() + + if _, err := fmt.Fprintf(w, "Creating chains for subnet %q\n", s.Name); err != nil { + return err + } + + for _, chain := range s.Chains { + createChainTx, err := pWallet.IssueCreateChainTx( + s.SubnetID, + chain.Genesis, + chain.VMID, + nil, + "", + common.WithContext(ctx), + ) + if err != nil { + return fmt.Errorf("failed to create chain: %w", err) + } + chain.ChainID = createChainTx.ID() + + if _, err := fmt.Fprintf(w, " created chain %q for VM %q on subnet %q\n", chain.ChainID, chain.VMID, s.Name); err != nil { + return err + } + } + return nil +} + +// Add validators to the subnet +func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, nodes []*Node) error { + apiURI := nodes[0].URI + + wallet, err := s.GetWallet(ctx, apiURI) + if err != nil { + return err + } + pWallet := wallet.P() + + // Collect the end times for current validators to reuse for subnet validators + pvmClient := platformvm.NewClient(apiURI) + validators, err := pvmClient.GetCurrentValidators(ctx, constants.PrimaryNetworkID, nil) + if err != nil { + return err + } + endTimes := make(map[ids.NodeID]uint64) + for _, validator := range validators { + endTimes[validator.NodeID] = validator.EndTime + } + + startTime := time.Now().Add(DefaultValidatorStartTimeDiff) + for _, node := range nodes { + endTime, ok := endTimes[node.NodeID] + if !ok { + return fmt.Errorf("failed to find end time for %s", node.NodeID) + } + + _, err := pWallet.IssueAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: node.NodeID, + Start: uint64(startTime.Unix()), + End: endTime, + Wght: units.Schmeckle, + }, + Subnet: s.SubnetID, + }, + common.WithContext(ctx), + ) + if err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " added %s as validator for subnet `%s`\n", node.NodeID, s.Name); err != nil { + return err + } + + s.ValidatorIDs = append(s.ValidatorIDs, node.NodeID) + } + + return nil +} + +// Write the subnet configuration to disk +func (s *Subnet) Write(subnetDir string, chainDir string) error { + if err := os.MkdirAll(subnetDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create subnet dir: %w", err) + } + path := filepath.Join(subnetDir, s.Name+".json") + + // Since subnets are expected to be serialized for the first time + // without their chains having been created (i.e. chains will have + // empty IDs), use the absence of chain IDs as a prompt for a + // subnet name uniquness check. + if len(s.Chains) > 0 && s.Chains[0].ChainID == ids.Empty { + _, err := os.Stat(path) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + return fmt.Errorf("a subnet with name %s already exists", s.Name) + } + } + + bytes, err := DefaultJSONMarshal(s) + if err != nil { + return fmt.Errorf("failed to marshal subnet %s: %w", s.Name, err) + } + if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write subnet %s: %w", s.Name, err) + } + + for _, chain := range s.Chains { + if err := chain.WriteConfig(chainDir); err != nil { + return err + } + } + + return nil +} + +// HasChainConfig indicates whether at least one of the subnet's +// chains have explicit configuration. This can be used to determine +// whether validator restart is required after chain creation to +// ensure that chains are configured correctly. +func (s *Subnet) HasChainConfig() bool { + for _, chain := range s.Chains { + if len(chain.Config) > 0 { + return true + } + } + return false +} + +func waitForActiveValidators( + ctx context.Context, + w io.Writer, + pChainClient platformvm.Client, + subnet *Subnet, +) error { + ticker := time.NewTicker(DefaultPollingInterval) + defer ticker.Stop() + + if _, err := fmt.Fprintf(w, "Waiting for validators of subnet %q to become active\n", subnet.Name); err != nil { + return err + } + + if _, err := fmt.Fprintf(w, " "); err != nil { + return err + } + + for { + if _, err := fmt.Fprintf(w, "."); err != nil { + return err + } + validators, err := pChainClient.GetCurrentValidators(ctx, subnet.SubnetID, nil) + if err != nil { + return err + } + validatorSet := set.NewSet[ids.NodeID](len(validators)) + for _, validator := range validators { + validatorSet.Add(validator.NodeID) + } + allActive := true + for _, validatorID := range subnet.ValidatorIDs { + if !validatorSet.Contains(validatorID) { + allActive = false + } + } + if allActive { + if _, err := fmt.Fprintf(w, "\n saw the expected active validators of subnet %q\n", subnet.Name); err != nil { + return err + } + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see the expected active validators of subnet %q before timeout", subnet.Name) + case <-ticker.C: + } + } +} + +// Reads subnets from [network dir]/subnets/[subnet name].json +func readSubnets(subnetDir string) ([]*Subnet, error) { + if _, err := os.Stat(subnetDir); os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + entries, err := os.ReadDir(subnetDir) + if err != nil { + return nil, fmt.Errorf("failed to read subnet dir: %w", err) + } + + subnets := []*Subnet{} + for _, entry := range entries { + if entry.IsDir() { + // Looking only for files + continue + } + if filepath.Ext(entry.Name()) != ".json" { + // Subnet files should have a .json extension + continue + } + + subnetPath := filepath.Join(subnetDir, entry.Name()) + bytes, err := os.ReadFile(subnetPath) + if err != nil { + return nil, fmt.Errorf("failed to read subnet file %s: %w", subnetPath, err) + } + subnet := &Subnet{} + if err := json.Unmarshal(bytes, subnet); err != nil { + return nil, fmt.Errorf("failed to unmarshal subnet from %s: %w", subnetPath, err) + } + subnets = append(subnets, subnet) + } + + return subnets, nil +} diff --git a/avalanchego/tests/fixture/tmpnet/utils.go b/avalanchego/tests/fixture/tmpnet/utils.go new file mode 100644 index 00000000..b363bdec --- /dev/null +++ b/avalanchego/tests/fixture/tmpnet/utils.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +const ( + DefaultNodeTickerInterval = 50 * time.Millisecond +) + +var ErrNotRunning = errors.New("not running") + +// WaitForHealthy blocks until Node.IsHealthy returns true or an error (including context timeout) is observed. +func WaitForHealthy(ctx context.Context, node *Node) error { + if _, ok := ctx.Deadline(); !ok { + return fmt.Errorf("unable to wait for health for node %q with a context without a deadline", node.NodeID) + } + ticker := time.NewTicker(DefaultNodeTickerInterval) + defer ticker.Stop() + + for { + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return fmt.Errorf("failed to wait for health of node %q: %w", node.NodeID, err) + } + if healthy { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("failed to wait for health of node %q before timeout: %w", node.NodeID, ctx.Err()) + case <-ticker.C: + } + } +} + +// NodeURI associates a node ID with its API URI. +type NodeURI struct { + NodeID ids.NodeID + URI string +} + +func GetNodeURIs(nodes []*Node) []NodeURI { + uris := make([]NodeURI, 0, len(nodes)) + for _, node := range nodes { + if node.IsEphemeral { + // Avoid returning URIs for nodes whose lifespan is indeterminate + continue + } + // Only append URIs that are not empty. A node may have an + // empty URI if it is not currently running. + if len(node.URI) > 0 { + uris = append(uris, NodeURI{ + NodeID: node.NodeID, + URI: node.URI, + }) + } + } + return uris +} + +// Marshal to json with default prefix and indent. +func DefaultJSONMarshal(v interface{}) ([]byte, error) { + return json.MarshalIndent(v, "", " ") +} + +// Helper simplifying creation of a set of private keys +func NewPrivateKeys(keyCount int) ([]*secp256k1.PrivateKey, error) { + keys := make([]*secp256k1.PrivateKey, 0, keyCount) + for i := 0; i < keyCount; i++ { + key, err := secp256k1.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failed to generate private key: %w", err) + } + keys = append(keys, key) + } + return keys, nil +} diff --git a/avalanchego/tests/http.go b/avalanchego/tests/http.go index b4cb3204..073b6d2d 100644 --- a/avalanchego/tests/http.go +++ b/avalanchego/tests/http.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests @@ -13,6 +13,31 @@ import ( "strings" ) +// "metric name" -> "metric value" +type NodeMetrics map[string]float64 + +// URI -> "metric name" -> "metric value" +type NodesMetrics map[string]NodeMetrics + +// GetNodeMetrics retrieves the specified metrics the provided node URI. +func GetNodeMetrics(nodeURI string, metricNames ...string) (NodeMetrics, error) { + uri := nodeURI + "/ext/metrics" + return GetMetricsValue(uri, metricNames...) +} + +// GetNodesMetrics retrieves the specified metrics for the provided node URIs. +func GetNodesMetrics(nodeURIs []string, metricNames ...string) (NodesMetrics, error) { + metrics := make(NodesMetrics, len(nodeURIs)) + for _, u := range nodeURIs { + var err error + metrics[u], err = GetNodeMetrics(u, metricNames...) + if err != nil { + return nil, fmt.Errorf("failed to retrieve metrics for %s: %w", u, err) + } + } + return metrics, nil +} + func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) { lines, err := getHTTPLines(url) if err != nil { @@ -48,7 +73,7 @@ func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) } func getHTTPLines(url string) ([]string, error) { - req, err := http.NewRequestWithContext(context.TODO(), "GET", url, nil) + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) if err != nil { return nil, err } diff --git a/avalanchego/tests/keys.go b/avalanchego/tests/keys.go deleted file mode 100644 index 9b5945bb..00000000 --- a/avalanchego/tests/keys.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package tests - -import ( - "bufio" - "encoding/hex" - "os" - "strings" - - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" -) - -// Loads a list of secp256k1 hex-encoded private keys from the file, new-line separated. -func LoadHexTestKeys(filePath string) (keys []*secp256k1.PrivateKey, err error) { - f, err := os.Open(filePath) - if err != nil { - return nil, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - s := scanner.Text() - k, err := decodeHexPrivateKey(s) - if err != nil { - return nil, err - } - keys = append(keys, k) - } - - if err := scanner.Err(); err != nil { - return nil, err - } - return keys, nil -} - -var keyFactory = new(secp256k1.Factory) - -func decodeHexPrivateKey(enc string) (*secp256k1.PrivateKey, error) { - rawPk := strings.Replace(enc, secp256k1.PrivateKeyPrefix, "", 1) - skBytes, err := hex.DecodeString(rawPk) - if err != nil { - return nil, err - } - return keyFactory.ToPrivateKey(skBytes) -} diff --git a/avalanchego/tests/keys_test.go b/avalanchego/tests/keys_test.go deleted file mode 100644 index a3a31b47..00000000 --- a/avalanchego/tests/keys_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package tests - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" -) - -func TestLoadTestKeys(t *testing.T) { - keys, err := LoadHexTestKeys("test.insecure.secp256k1.keys") - require.NoError(t, err) - for i, k := range keys { - curAddr := encodeShortAddr(k) - t.Logf("[%d] loaded %v", i, curAddr) - } -} - -func encodeShortAddr(pk *secp256k1.PrivateKey) string { - return pk.PublicKey().Address().String() -} diff --git a/avalanchego/tests/test.insecure.secp256k1.keys b/avalanchego/tests/test.insecure.secp256k1.keys deleted file mode 100644 index 4a128df4..00000000 --- a/avalanchego/tests/test.insecure.secp256k1.keys +++ /dev/null @@ -1,10 +0,0 @@ -56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027 -e73b5812225f2e1c62de93fb6ec35a9338882991577f9a6d5651dce61cecd852 -3a94aab8123f3be575ea9679f893da5182e8b707e26f06159c264399113aef2a -6d7b68fca444069f3e65644848b215f1ecd4a90de8403734866dbb6af1c8957d -c4c55bfa3b5fd618fbd63e8cd62a8e0277f6e008cf76472e0a00941c6d326b46 -3c53c620aeb35bc15146b84688a5f478aaa1528e41c8ef11014b50ef4b110870 -1b63a1eb7537baac2eef64d111caa99af41e7ce9b0ce9d067276af3fa9e8a777 -bf1ebb0dcbc9f92c34a1beea6950c291d5eef8cc724477b43e3c4bca69af50aa -1fd0cdc3f62d6854af1397a14521efd4c073f35e003901312d7fa6bcd5c68c79 -1834abcea6a56a4d7f1e2c3ad13a8b762a7c79ad5819fa30f31488943e82626a \ No newline at end of file diff --git a/avalanchego/tests/upgrade/upgrade_test.go b/avalanchego/tests/upgrade/upgrade_test.go index 2a14a4b1..131d3d53 100644 --- a/avalanchego/tests/upgrade/upgrade_test.go +++ b/avalanchego/tests/upgrade/upgrade_test.go @@ -1,22 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// Runs upgrade tests. -package upgrade_test +package upgrade import ( - "context" "flag" "fmt" - "os" "testing" - "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/stretchr/testify/require" - runner_sdk "github.com/ava-labs/avalanche-network-runner-sdk" - "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" ) func TestUpgrade(t *testing.T) { @@ -25,121 +22,45 @@ func TestUpgrade(t *testing.T) { } var ( - logLevel string - networkRunnerGRPCEp string - networkRunnerAvalancheGoExecPath string - networkRunnerAvalancheGoExecPathToUpgrade string - networkRunnerAvalancheGoLogLevel string + avalancheGoExecPath string + avalancheGoExecPathToUpgradeTo string ) func init() { flag.StringVar( - &logLevel, - "log-level", - "info", - "log level", - ) - flag.StringVar( - &networkRunnerGRPCEp, - "network-runner-grpc-endpoint", - "", - "gRPC server endpoint for network-runner", - ) - flag.StringVar( - &networkRunnerAvalancheGoExecPath, - "network-runner-avalanchego-path", + &avalancheGoExecPath, + "avalanchego-path", "", "avalanchego executable path", ) flag.StringVar( - &networkRunnerAvalancheGoExecPathToUpgrade, - "network-runner-avalanchego-path-to-upgrade", + &avalancheGoExecPathToUpgradeTo, + "avalanchego-path-to-upgrade-to", "", - "avalanchego executable path (to upgrade to, only required for upgrade tests with local network-runner)", - ) - flag.StringVar( - &networkRunnerAvalancheGoLogLevel, - "network-runner-avalanchego-log-level", - "INFO", - "avalanchego log-level", + "avalanchego executable path to upgrade to", ) } -var runnerCli runner_sdk.Client - -var _ = ginkgo.BeforeSuite(func() { - _, err := os.Stat(networkRunnerAvalancheGoExecPath) - gomega.Expect(err).Should(gomega.BeNil()) - - _, err = os.Stat(networkRunnerAvalancheGoExecPathToUpgrade) - gomega.Expect(err).Should(gomega.BeNil()) - - runnerCli, err = runner_sdk.New(runner_sdk.Config{ - LogLevel: logLevel, - Endpoint: networkRunnerGRPCEp, - DialTimeout: 10 * time.Second, - }) - gomega.Expect(err).Should(gomega.BeNil()) - - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - presp, err := runnerCli.Ping(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{green}}network-runner running in PID %d{{/}}\n", presp.Pid) - - tests.Outf("{{magenta}}starting network-runner with %q{{/}}\n", networkRunnerAvalancheGoExecPath) - ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second) - resp, err := runnerCli.Start(ctx, networkRunnerAvalancheGoExecPath, - runner_sdk.WithNumNodes(5), - runner_sdk.WithGlobalNodeConfig(fmt.Sprintf(`{"log-level":"%s"}`, networkRunnerAvalancheGoLogLevel)), - ) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{green}}successfully started network-runner: {{/}} %+v\n", resp.ClusterInfo.NodeNames) - - // start is async, so wait some time for cluster health - time.Sleep(time.Minute) - - ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) - _, err = runnerCli.Health(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) -}) - -var _ = ginkgo.AfterSuite(func() { - tests.Outf("{{red}}shutting down network-runner cluster{{/}}\n") - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - _, err := runnerCli.Stop(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - - tests.Outf("{{red}}shutting down network-runner client{{/}}\n") - err = runnerCli.Close() - gomega.Expect(err).Should(gomega.BeNil()) -}) - var _ = ginkgo.Describe("[Upgrade]", func() { + require := require.New(ginkgo.GinkgoT()) + ginkgo.It("can upgrade versions", func() { - tests.Outf("{{magenta}}starting upgrade tests %q{{/}}\n", networkRunnerAvalancheGoExecPathToUpgrade) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - sresp, err := runnerCli.Status(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + network := &tmpnet.Network{} + e2e.StartNetwork(network, e2e.DefaultNetworkDir, avalancheGoExecPath, "" /* pluginDir */) + + ginkgo.By(fmt.Sprintf("restarting all nodes with %q binary", avalancheGoExecPathToUpgradeTo)) + for _, node := range network.Nodes { + ginkgo.By(fmt.Sprintf("restarting node %q with %q binary", node.NodeID, avalancheGoExecPathToUpgradeTo)) + require.NoError(node.Stop(e2e.DefaultContext())) - for _, name := range sresp.ClusterInfo.NodeNames { - tests.Outf("{{magenta}}restarting the node %q{{/}} with %q\n", name, networkRunnerAvalancheGoExecPathToUpgrade) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) - resp, err := runnerCli.RestartNode(ctx, name, runner_sdk.WithExecPath(networkRunnerAvalancheGoExecPathToUpgrade)) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) + node.RuntimeConfig.AvalancheGoPath = avalancheGoExecPathToUpgradeTo - time.Sleep(20 * time.Second) + require.NoError(network.StartNode(e2e.DefaultContext(), ginkgo.GinkgoWriter, node)) - ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute) - _, err = runnerCli.Health(ctx) - cancel() - gomega.Expect(err).Should(gomega.BeNil()) - tests.Outf("{{green}}successfully upgraded %q to %q{{/}} (current info: %+v)\n", name, networkRunnerAvalancheGoExecPathToUpgrade, resp.ClusterInfo.NodeInfos) + ginkgo.By(fmt.Sprintf("waiting for node %q to report healthy after restart", node.NodeID)) + e2e.WaitForHealthy(node) } + + e2e.CheckBootstrapIsPossible(network) }) }) diff --git a/avalanchego/trace/exporter.go b/avalanchego/trace/exporter.go index 25220097..4cca5fe3 100644 --- a/avalanchego/trace/exporter.go +++ b/avalanchego/trace/exporter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/avalanchego/trace/exporter_type.go b/avalanchego/trace/exporter_type.go index 52d0124f..206731ac 100644 --- a/avalanchego/trace/exporter_type.go +++ b/avalanchego/trace/exporter_type.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace diff --git a/avalanchego/trace/noop.go b/avalanchego/trace/noop.go index 789a249b..8c2a63a9 100644 --- a/avalanchego/trace/noop.go +++ b/avalanchego/trace/noop.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace @@ -7,9 +7,13 @@ import ( "context" "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/utils/constants" ) -var _ Tracer = (*noOpTracer)(nil) +var Noop Tracer = noOpTracer{ + t: trace.NewNoopTracerProvider().Tracer(constants.AppName), +} // noOpTracer is an implementation of trace.Tracer that does nothing. type noOpTracer struct { @@ -17,7 +21,7 @@ type noOpTracer struct { } func (n noOpTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return n.t.Start(ctx, spanName, opts...) + return n.t.Start(ctx, spanName, opts...) //nolint:spancheck } func (noOpTracer) Close() error { diff --git a/avalanchego/trace/tracer.go b/avalanchego/trace/tracer.go index 6def495a..1c8d40e8 100644 --- a/avalanchego/trace/tracer.go +++ b/avalanchego/trace/tracer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package trace @@ -14,9 +14,6 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" - - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/version" ) const ( @@ -36,6 +33,9 @@ type Config struct { // If >= 1 always samples. // If <= 0 never samples. TraceSampleRate float64 `json:"traceSampleRate"` + + AppName string `json:"appName"` + Version string `json:"version"` } type Tracer interface { @@ -57,9 +57,7 @@ func (t *tracer) Close() error { func New(config Config) (Tracer, error) { if !config.Enabled { - return &noOpTracer{ - t: trace.NewNoopTracerProvider().Tracer(constants.AppName), - }, nil + return Noop, nil } exporter, err := newExporter(config.ExporterConfig) @@ -70,15 +68,15 @@ func New(config Config) (Tracer, error) { tracerProviderOpts := []sdktrace.TracerProviderOption{ sdktrace.WithBatcher(exporter, sdktrace.WithExportTimeout(tracerExportTimeout)), sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL, - attribute.Stringer("version", version.Current), - semconv.ServiceNameKey.String(constants.AppName), + attribute.String("version", config.Version), + semconv.ServiceNameKey.String(config.AppName), )), sdktrace.WithSampler(sdktrace.TraceIDRatioBased(config.TraceSampleRate)), } tracerProvider := sdktrace.NewTracerProvider(tracerProviderOpts...) return &tracer{ - Tracer: tracerProvider.Tracer(constants.AppName), + Tracer: tracerProvider.Tracer(config.AppName), tp: tracerProvider, }, nil } diff --git a/avalanchego/utils/atomic.go b/avalanchego/utils/atomic.go index d72dd4d4..3bb125ee 100644 --- a/avalanchego/utils/atomic.go +++ b/avalanchego/utils/atomic.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils -import ( - "sync" -) +import "sync" type Atomic[T any] struct { lock sync.RWMutex diff --git a/avalanchego/utils/atomic_test.go b/avalanchego/utils/atomic_test.go index 1af2ba49..3fa74063 100644 --- a/avalanchego/utils/atomic_test.go +++ b/avalanchego/utils/atomic_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/avalanchego/utils/bag/bag.go b/avalanchego/utils/bag/bag.go index cf889fa5..a9af1acb 100644 --- a/avalanchego/utils/bag/bag.go +++ b/avalanchego/utils/bag/bag.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag @@ -9,6 +9,7 @@ import ( "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" ) @@ -23,6 +24,13 @@ type Bag[T comparable] struct { metThreshold set.Set[T] } +// Of returns a Bag initialized with [elts] +func Of[T comparable](elts ...T) Bag[T] { + var b Bag[T] + b.Add(elts...) + return b +} + func (b *Bag[T]) init() { if b.counts == nil { b.counts = make(map[T]int, minBagSize) @@ -93,8 +101,6 @@ func (b *Bag[T]) Equals(other Bag[T]) bool { // Mode returns the most common element in the bag and the count of that element. // If there's a tie, any of the tied element may be returned. -// TODO for Stephen: Does the above violate an assumption made by Snowball? -// If the bag is empty, the zero value and 0 are returned. func (b *Bag[T]) Mode() (T, int) { var ( mode T @@ -154,10 +160,10 @@ func (b *Bag[T]) Remove(elt T) { b.size -= count } -func (b *Bag[_]) PrefixedString(prefix string) string { +func (b *Bag[T]) PrefixedString(prefix string) string { sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("Bag: (Size = %d)", b.Len())) + sb.WriteString(fmt.Sprintf("Bag[%T]: (Size = %d)", utils.Zero[T](), b.Len())) for elt, count := range b.counts { sb.WriteString(fmt.Sprintf("\n%s %v: %d", prefix, elt, count)) } diff --git a/avalanchego/utils/bag/bag_benchmark_test.go b/avalanchego/utils/bag/bag_benchmark_test.go index 833ce755..e17b27b8 100644 --- a/avalanchego/utils/bag/bag_benchmark_test.go +++ b/avalanchego/utils/bag/bag_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag diff --git a/avalanchego/utils/bag/bag_test.go b/avalanchego/utils/bag/bag_test.go index 6a9dece6..3b6e0faa 100644 --- a/avalanchego/utils/bag/bag_test.go +++ b/avalanchego/utils/bag/bag_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag @@ -9,6 +9,55 @@ import ( "github.com/stretchr/testify/require" ) +func TestBagOf(t *testing.T) { + tests := []struct { + name string + elements []int + expectedCounts map[int]int + }{ + { + name: "nil", + elements: nil, + expectedCounts: map[int]int{}, + }, + { + name: "empty", + elements: []int{}, + expectedCounts: map[int]int{}, + }, + { + name: "unique elements", + elements: []int{1, 2, 3}, + expectedCounts: map[int]int{ + 1: 1, + 2: 1, + 3: 1, + }, + }, + { + name: "duplicate elements", + elements: []int{1, 2, 3, 1, 2, 3}, + expectedCounts: map[int]int{ + 1: 2, + 2: 2, + 3: 2, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + b := Of(tt.elements...) + + require.Equal(len(tt.elements), b.Len()) + for entry, count := range tt.expectedCounts { + require.Equal(count, b.Count(entry)) + } + }) + } +} + func TestBagAdd(t *testing.T) { require := require.New(t) @@ -17,19 +66,19 @@ func TestBagAdd(t *testing.T) { bag := Bag[int]{} - require.Equal(0, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) - require.Equal(0, bag.Len()) - require.Len(bag.List(), 0) + require.Zero(bag.Count(elt0)) + require.Zero(bag.Count(elt1)) + require.Zero(bag.Len()) + require.Empty(bag.List()) mode, freq := bag.Mode() require.Equal(elt0, mode) - require.Equal(0, freq) - require.Len(bag.Threshold(), 0) + require.Zero(freq) + require.Empty(bag.Threshold()) bag.Add(elt0) require.Equal(1, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt1)) require.Equal(1, bag.Len()) require.Len(bag.List(), 1) mode, freq = bag.Mode() @@ -40,7 +89,7 @@ func TestBagAdd(t *testing.T) { bag.Add(elt0) require.Equal(2, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt1)) require.Equal(2, bag.Len()) require.Len(bag.List(), 1) mode, freq = bag.Mode() @@ -113,7 +162,7 @@ func TestBagFilter(t *testing.T) { even := bag.Filter(filterFunc) require.Equal(1, even.Count(elt0)) - require.Equal(0, even.Count(elt1)) + require.Zero(even.Count(elt1)) require.Equal(5, even.Count(elt2)) } @@ -138,11 +187,11 @@ func TestBagSplit(t *testing.T) { odds := bags[1] require.Equal(1, evens.Count(elt0)) - require.Equal(0, evens.Count(elt1)) + require.Zero(evens.Count(elt1)) require.Equal(5, evens.Count(elt2)) - require.Equal(0, odds.Count(elt0)) + require.Zero(odds.Count(elt0)) require.Equal(3, odds.Count(elt1)) - require.Equal(0, odds.Count(elt2)) + require.Zero(odds.Count(elt2)) } func TestBagString(t *testing.T) { @@ -152,8 +201,8 @@ func TestBagString(t *testing.T) { bag.AddCount(elt0, 1337) - expected := "Bag: (Size = 1337)\n" + - " 123: 1337" + expected := `Bag[int]: (Size = 1337) + 123: 1337` require.Equal(t, expected, bag.String()) } @@ -168,7 +217,7 @@ func TestBagRemove(t *testing.T) { bag := Bag[int]{} bag.Remove(elt0) - require.Equal(0, bag.Len()) + require.Zero(bag.Len()) bag.AddCount(elt0, 3) bag.AddCount(elt1, 2) @@ -181,7 +230,7 @@ func TestBagRemove(t *testing.T) { bag.Remove(elt0) - require.Equal(0, bag.Count(elt0)) + require.Zero(bag.Count(elt0)) require.Equal(2, bag.Count(elt1)) require.Equal(1, bag.Count(elt2)) require.Equal(3, bag.Len()) @@ -191,8 +240,8 @@ func TestBagRemove(t *testing.T) { require.Equal(2, freq) bag.Remove(elt1) - require.Equal(0, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt0)) + require.Zero(bag.Count(elt1)) require.Equal(1, bag.Count(elt2)) require.Equal(1, bag.Len()) require.Len(bag.counts, 1) diff --git a/avalanchego/utils/bag/unique_bag.go b/avalanchego/utils/bag/unique_bag.go index debad45b..d695b799 100644 --- a/avalanchego/utils/bag/unique_bag.go +++ b/avalanchego/utils/bag/unique_bag.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag @@ -9,6 +9,7 @@ import ( "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" ) @@ -93,10 +94,10 @@ func (b *UniqueBag[T]) Bag(threshold int) Bag[T] { return bag } -func (b *UniqueBag[_]) PrefixedString(prefix string) string { +func (b *UniqueBag[T]) PrefixedString(prefix string) string { sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("UniqueBag: (Size = %d)", len(*b))) + sb.WriteString(fmt.Sprintf("UniqueBag[%T]: (Size = %d)", utils.Zero[T](), len(*b))) for key, set := range *b { sb.WriteString(fmt.Sprintf("\n%s %v: %s", prefix, key, set)) } @@ -110,5 +111,5 @@ func (b *UniqueBag[_]) String() string { // Removes all key --> bitset pairs. func (b *UniqueBag[_]) Clear() { - maps.Clear(*b) + clear(*b) } diff --git a/avalanchego/utils/bag/unique_bag_test.go b/avalanchego/utils/bag/unique_bag_test.go index e1920a21..1562b5c9 100644 --- a/avalanchego/utils/bag/unique_bag_test.go +++ b/avalanchego/utils/bag/unique_bag_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bag @@ -96,8 +96,8 @@ func TestUniqueBagClear(t *testing.T) { require.Empty(b.List()) bs := b.GetSet(elt1) - require.Equal(0, bs.Len()) + require.Zero(bs.Len()) bs = b.GetSet(elt2) - require.Equal(0, bs.Len()) + require.Zero(bs.Len()) } diff --git a/avalanchego/utils/beacon/beacon.go b/avalanchego/utils/beacon/beacon.go index 47e41032..38ac6df5 100644 --- a/avalanchego/utils/beacon/beacon.go +++ b/avalanchego/utils/beacon/beacon.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon diff --git a/avalanchego/utils/beacon/set.go b/avalanchego/utils/beacon/set.go index 243f8399..8b6970b5 100644 --- a/avalanchego/utils/beacon/set.go +++ b/avalanchego/utils/beacon/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon diff --git a/avalanchego/utils/beacon/set_test.go b/avalanchego/utils/beacon/set_test.go index 4e8ada45..976d0582 100644 --- a/avalanchego/utils/beacon/set_test.go +++ b/avalanchego/utils/beacon/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon @@ -16,9 +16,9 @@ import ( func TestSet(t *testing.T) { require := require.New(t) - id0 := ids.NodeID{0} - id1 := ids.NodeID{1} - id2 := ids.NodeID{2} + id0 := ids.BuildTestNodeID([]byte{0}) + id1 := ids.BuildTestNodeID([]byte{1}) + id2 := ids.BuildTestNodeID([]byte{2}) ip0 := ips.IPPort{ IP: net.IPv4zero, @@ -39,70 +39,44 @@ func TestSet(t *testing.T) { s := NewSet() - idsArg := s.IDsArg() - require.Equal("", idsArg) - ipsArg := s.IPsArg() - require.Equal("", ipsArg) - len := s.Len() - require.Equal(0, len) + require.Equal("", s.IDsArg()) + require.Equal("", s.IPsArg()) + require.Zero(s.Len()) - err := s.Add(b0) - require.NoError(err) + require.NoError(s.Add(b0)) - idsArg = s.IDsArg() - require.Equal("NodeID-111111111111111111116DBWJs", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:0", ipsArg) - len = s.Len() - require.Equal(1, len) + require.Equal("NodeID-111111111111111111116DBWJs", s.IDsArg()) + require.Equal("0.0.0.0:0", s.IPsArg()) + require.Equal(1, s.Len()) - err = s.Add(b0) + err := s.Add(b0) require.ErrorIs(err, errDuplicateID) - idsArg = s.IDsArg() - require.Equal("NodeID-111111111111111111116DBWJs", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:0", ipsArg) - len = s.Len() - require.Equal(1, len) - - err = s.Add(b1) - require.NoError(err) - - idsArg = s.IDsArg() - require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:0,0.0.0.0:1", ipsArg) - len = s.Len() - require.Equal(2, len) - - err = s.Add(b2) - require.NoError(err) - - idsArg = s.IDsArg() - require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt,NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:0,0.0.0.0:1,0.0.0.0:2", ipsArg) - len = s.Len() - require.Equal(3, len) - - err = s.RemoveByID(b0.ID()) - require.NoError(err) - - idsArg = s.IDsArg() - require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:2,0.0.0.0:1", ipsArg) - len = s.Len() - require.Equal(2, len) - - err = s.RemoveByIP(b1.IP()) - require.NoError(err) - - idsArg = s.IDsArg() - require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", idsArg) - ipsArg = s.IPsArg() - require.Equal("0.0.0.0:2", ipsArg) - len = s.Len() - require.Equal(1, len) + require.Equal("NodeID-111111111111111111116DBWJs", s.IDsArg()) + require.Equal("0.0.0.0:0", s.IPsArg()) + require.Equal(1, s.Len()) + + require.NoError(s.Add(b1)) + + require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", s.IDsArg()) + require.Equal("0.0.0.0:0,0.0.0.0:1", s.IPsArg()) + require.Equal(2, s.Len()) + + require.NoError(s.Add(b2)) + + require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt,NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", s.IDsArg()) + require.Equal("0.0.0.0:0,0.0.0.0:1,0.0.0.0:2", s.IPsArg()) + require.Equal(3, s.Len()) + + require.NoError(s.RemoveByID(b0.ID())) + + require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", s.IDsArg()) + require.Equal("0.0.0.0:2,0.0.0.0:1", s.IPsArg()) + require.Equal(2, s.Len()) + + require.NoError(s.RemoveByIP(b1.IP())) + + require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", s.IDsArg()) + require.Equal("0.0.0.0:2", s.IPsArg()) + require.Equal(1, s.Len()) } diff --git a/avalanchego/utils/bimap/bimap.go b/avalanchego/utils/bimap/bimap.go new file mode 100644 index 00000000..bde60d97 --- /dev/null +++ b/avalanchego/utils/bimap/bimap.go @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bimap + +import ( + "bytes" + "encoding/json" + "errors" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/utils" +) + +var ( + _ json.Marshaler = (*BiMap[int, int])(nil) + _ json.Unmarshaler = (*BiMap[int, int])(nil) + + nullBytes = []byte("null") + errNotBijective = errors.New("map not bijective") +) + +type Entry[K, V any] struct { + Key K + Value V +} + +// BiMap is a bi-directional map. +type BiMap[K, V comparable] struct { + keyToValue map[K]V + valueToKey map[V]K +} + +// New creates a new empty bimap. +func New[K, V comparable]() *BiMap[K, V] { + return &BiMap[K, V]{ + keyToValue: make(map[K]V), + valueToKey: make(map[V]K), + } +} + +// Put the key value pair into the map. If either [key] or [val] was previously +// in the map, the previous entries will be removed and returned. +// +// Note: Unlike normal maps, it's possible that Put removes 0, 1, or 2 existing +// entries to ensure that mappings are one-to-one. +func (m *BiMap[K, V]) Put(key K, val V) []Entry[K, V] { + var removed []Entry[K, V] + oldVal, oldValDeleted := m.DeleteKey(key) + if oldValDeleted { + removed = append(removed, Entry[K, V]{ + Key: key, + Value: oldVal, + }) + } + oldKey, oldKeyDeleted := m.DeleteValue(val) + if oldKeyDeleted { + removed = append(removed, Entry[K, V]{ + Key: oldKey, + Value: val, + }) + } + m.keyToValue[key] = val + m.valueToKey[val] = key + return removed +} + +// GetKey that maps to the provided value. +func (m *BiMap[K, V]) GetKey(val V) (K, bool) { + key, ok := m.valueToKey[val] + return key, ok +} + +// GetValue that is mapped to the provided key. +func (m *BiMap[K, V]) GetValue(key K) (V, bool) { + val, ok := m.keyToValue[key] + return val, ok +} + +// HasKey returns true if [key] is in the map. +func (m *BiMap[K, _]) HasKey(key K) bool { + _, ok := m.keyToValue[key] + return ok +} + +// HasValue returns true if [val] is in the map. +func (m *BiMap[_, V]) HasValue(val V) bool { + _, ok := m.valueToKey[val] + return ok +} + +// DeleteKey removes [key] from the map and returns the value it mapped to. +func (m *BiMap[K, V]) DeleteKey(key K) (V, bool) { + val, ok := m.keyToValue[key] + if !ok { + return utils.Zero[V](), false + } + delete(m.keyToValue, key) + delete(m.valueToKey, val) + return val, true +} + +// DeleteValue removes [val] from the map and returns the key that mapped to it. +func (m *BiMap[K, V]) DeleteValue(val V) (K, bool) { + key, ok := m.valueToKey[val] + if !ok { + return utils.Zero[K](), false + } + delete(m.keyToValue, key) + delete(m.valueToKey, val) + return key, true +} + +// Keys returns the keys of the map. The keys will be in an indeterminate order. +func (m *BiMap[K, _]) Keys() []K { + return maps.Keys(m.keyToValue) +} + +// Values returns the values of the map. The values will be in an indeterminate +// order. +func (m *BiMap[_, V]) Values() []V { + return maps.Values(m.keyToValue) +} + +// Len return the number of entries in this map. +func (m *BiMap[K, V]) Len() int { + return len(m.keyToValue) +} + +func (m *BiMap[K, V]) MarshalJSON() ([]byte, error) { + return json.Marshal(m.keyToValue) +} + +func (m *BiMap[K, V]) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, nullBytes) { + return nil + } + var keyToValue map[K]V + if err := json.Unmarshal(b, &keyToValue); err != nil { + return err + } + valueToKey := make(map[V]K, len(keyToValue)) + for k, v := range keyToValue { + valueToKey[v] = k + } + if len(keyToValue) != len(valueToKey) { + return errNotBijective + } + + m.keyToValue = keyToValue + m.valueToKey = valueToKey + return nil +} diff --git a/avalanchego/utils/bimap/bimap_test.go b/avalanchego/utils/bimap/bimap_test.go new file mode 100644 index 00000000..1792bec9 --- /dev/null +++ b/avalanchego/utils/bimap/bimap_test.go @@ -0,0 +1,366 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bimap + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBiMapPut(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + key int + value int + expectedRemoved []Entry[int, int] + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + value: 2, + expectedRemoved: nil, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + value: 3, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 3, + }, + valueToKey: map[int]int{ + 3: 1, + }, + }, + }, + { + name: "value removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 3, + value: 2, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 3: 2, + }, + valueToKey: map[int]int{ + 2: 3, + }, + }, + }, + { + name: "key and value removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + 3: 4, + }, + valueToKey: map[int]int{ + 2: 1, + 4: 3, + }, + }, + key: 1, + value: 4, + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Value: 2, + }, + { + Key: 3, + Value: 4, + }, + }, + expectedState: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 4, + }, + valueToKey: map[int]int{ + 4: 1, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.Put(test.key, test.value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapHasValueAndGetKey(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, 2)) + + tests := []struct { + name string + value int + expectedKey int + expectedExists bool + }{ + { + name: "fetch unknown", + value: 3, + expectedKey: 0, + expectedExists: false, + }, + { + name: "fetch known value", + value: 2, + expectedKey: 1, + expectedExists: true, + }, + { + name: "fetch known key", + value: 1, + expectedKey: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasValue(test.value) + require.Equal(test.expectedExists, exists) + + key, exists := m.GetKey(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestBiMapHasKeyAndGetValue(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, 2)) + + tests := []struct { + name string + key int + expectedValue int + expectedExists bool + }{ + { + name: "fetch unknown", + key: 3, + expectedValue: 0, + expectedExists: false, + }, + { + name: "fetch known key", + key: 1, + expectedValue: 2, + expectedExists: true, + }, + { + name: "fetch known value", + key: 2, + expectedValue: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasKey(test.key) + require.Equal(test.expectedExists, exists) + + value, exists := m.GetValue(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestBiMapDeleteKey(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + key int + expectedValue int + expectedRemoved bool + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + expectedValue: 0, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + expectedValue: 2, + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + value, removed := test.state.DeleteKey(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapDeleteValue(t *testing.T) { + tests := []struct { + name string + state *BiMap[int, int] + value int + expectedKey int + expectedRemoved bool + expectedState *BiMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + value: 1, + expectedKey: 0, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &BiMap[int, int]{ + keyToValue: map[int]int{ + 1: 2, + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + value: 2, + expectedKey: 1, + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + key, removed := test.state.DeleteValue(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestBiMapLenAndLists(t *testing.T) { + require := require.New(t) + + m := New[int, int]() + require.Zero(m.Len()) + require.Empty(m.Keys()) + require.Empty(m.Values()) + + m.Put(1, 2) + require.Equal(1, m.Len()) + require.ElementsMatch([]int{1}, m.Keys()) + require.ElementsMatch([]int{2}, m.Values()) + + m.Put(2, 3) + require.Equal(2, m.Len()) + require.ElementsMatch([]int{1, 2}, m.Keys()) + require.ElementsMatch([]int{2, 3}, m.Values()) + + m.Put(1, 3) + require.Equal(1, m.Len()) + require.ElementsMatch([]int{1}, m.Keys()) + require.ElementsMatch([]int{3}, m.Values()) + + m.DeleteKey(1) + require.Zero(m.Len()) + require.Empty(m.Keys()) + require.Empty(m.Values()) +} + +func TestBiMapJSON(t *testing.T) { + require := require.New(t) + + expectedMap := New[int, int]() + expectedMap.Put(1, 2) + expectedMap.Put(2, 3) + + jsonBytes, err := json.Marshal(expectedMap) + require.NoError(err) + + expectedJSONBytes := []byte(`{"1":2,"2":3}`) + require.Equal(expectedJSONBytes, jsonBytes) + + var unmarshalledMap BiMap[int, int] + require.NoError(json.Unmarshal(jsonBytes, &unmarshalledMap)) + require.Equal(expectedMap, &unmarshalledMap) +} + +func TestBiMapInvalidJSON(t *testing.T) { + require := require.New(t) + + invalidJSONBytes := []byte(`{"1":2,"2":2}`) + var unmarshalledMap BiMap[int, int] + err := json.Unmarshal(invalidJSONBytes, &unmarshalledMap) + require.ErrorIs(err, errNotBijective) +} diff --git a/avalanchego/utils/bloom/bloom_filter.go b/avalanchego/utils/bloom/bloom_filter.go deleted file mode 100644 index 498c57d3..00000000 --- a/avalanchego/utils/bloom/bloom_filter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package bloom - -import ( - "errors" - "sync" - - "github.com/spaolacci/murmur3" - - streakKnife "github.com/holiman/bloomfilter/v2" -) - -var errMaxBytes = errors.New("too large") - -type Filter interface { - // Add adds to filter, assumed thread safe - Add(...[]byte) - - // Check checks filter, assumed thread safe - Check([]byte) bool -} - -func New(maxN uint64, p float64, maxBytes uint64) (Filter, error) { - neededBytes := bytesSteakKnifeFilter(maxN, p) - if neededBytes > maxBytes { - return nil, errMaxBytes - } - return newSteakKnifeFilter(maxN, p) -} - -type steakKnifeFilter struct { - lock sync.RWMutex - filter *streakKnife.Filter -} - -func bytesSteakKnifeFilter(maxN uint64, p float64) uint64 { - m := streakKnife.OptimalM(maxN, p) - k := streakKnife.OptimalK(m, maxN) - - // This is pulled from bloomFilter.newBits and bloomfilter.newRandKeys. The - // calculation is the size of the bitset which would be created from this - // filter. - mSize := (m + 63) / 64 - totalSize := mSize + k - - return totalSize * 8 // 8 == sizeof(uint64)) -} - -func newSteakKnifeFilter(maxN uint64, p float64) (Filter, error) { - m := streakKnife.OptimalM(maxN, p) - k := streakKnife.OptimalK(m, maxN) - - filter, err := streakKnife.New(m, k) - return &steakKnifeFilter{filter: filter}, err -} - -func (f *steakKnifeFilter) Add(bl ...[]byte) { - f.lock.Lock() - defer f.lock.Unlock() - - for _, b := range bl { - h := murmur3.New64() - _, _ = h.Write(b) - f.filter.Add(h) - } -} - -func (f *steakKnifeFilter) Check(b []byte) bool { - f.lock.RLock() - defer f.lock.RUnlock() - - h := murmur3.New64() - _, _ = h.Write(b) - return f.filter.Contains(h) -} diff --git a/avalanchego/utils/bloom/filter.go b/avalanchego/utils/bloom/filter.go new file mode 100644 index 00000000..7a0e3026 --- /dev/null +++ b/avalanchego/utils/bloom/filter.go @@ -0,0 +1,147 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "math/bits" + "sync" +) + +const ( + minHashes = 1 + maxHashes = 16 // Supports a false positive probability of 2^-16 when using optimal size values + minEntries = 1 + + bitsPerByte = 8 + bytesPerUint64 = 8 + hashRotation = 17 +) + +var ( + errInvalidNumHashes = errors.New("invalid num hashes") + errTooFewHashes = errors.New("too few hashes") + errTooManyHashes = errors.New("too many hashes") + errTooFewEntries = errors.New("too few entries") +) + +type Filter struct { + // numBits is always equal to [bitsPerByte * len(entries)] + numBits uint64 + + lock sync.RWMutex + hashSeeds []uint64 + entries []byte + count int +} + +// New creates a new Filter with the specified number of hashes and bytes for +// entries. The returned bloom filter is safe for concurrent usage. +func New(numHashes, numEntries int) (*Filter, error) { + if numEntries < minEntries { + return nil, errTooFewEntries + } + + hashSeeds, err := newHashSeeds(numHashes) + if err != nil { + return nil, err + } + + return &Filter{ + numBits: uint64(numEntries * bitsPerByte), + hashSeeds: hashSeeds, + entries: make([]byte, numEntries), + count: 0, + }, nil +} + +func (f *Filter) Add(hash uint64) { + f.lock.Lock() + defer f.lock.Unlock() + + _ = 1 % f.numBits // hint to the compiler that numBits is not 0 + for _, seed := range f.hashSeeds { + hash = bits.RotateLeft64(hash, hashRotation) ^ seed + index := hash % f.numBits + byteIndex := index / bitsPerByte + bitIndex := index % bitsPerByte + f.entries[byteIndex] |= 1 << bitIndex + } + f.count++ +} + +// Count returns the number of elements that have been added to the bloom +// filter. +func (f *Filter) Count() int { + f.lock.RLock() + defer f.lock.RUnlock() + + return f.count +} + +func (f *Filter) Contains(hash uint64) bool { + f.lock.RLock() + defer f.lock.RUnlock() + + return contains(f.hashSeeds, f.entries, hash) +} + +func (f *Filter) Marshal() []byte { + f.lock.RLock() + defer f.lock.RUnlock() + + return marshal(f.hashSeeds, f.entries) +} + +func newHashSeeds(count int) ([]uint64, error) { + switch { + case count < minHashes: + return nil, fmt.Errorf("%w: %d < %d", errTooFewHashes, count, minHashes) + case count > maxHashes: + return nil, fmt.Errorf("%w: %d > %d", errTooManyHashes, count, maxHashes) + } + + bytes := make([]byte, count*bytesPerUint64) + if _, err := rand.Reader.Read(bytes); err != nil { + return nil, err + } + + seeds := make([]uint64, count) + for i := range seeds { + seeds[i] = binary.BigEndian.Uint64(bytes[i*bytesPerUint64:]) + } + return seeds, nil +} + +func contains(hashSeeds []uint64, entries []byte, hash uint64) bool { + var ( + numBits = bitsPerByte * uint64(len(entries)) + _ = 1 % numBits // hint to the compiler that numBits is not 0 + accumulator byte = 1 + ) + for seedIndex := 0; seedIndex < len(hashSeeds) && accumulator != 0; seedIndex++ { + hash = bits.RotateLeft64(hash, hashRotation) ^ hashSeeds[seedIndex] + index := hash % numBits + byteIndex := index / bitsPerByte + bitIndex := index % bitsPerByte + accumulator &= entries[byteIndex] >> bitIndex + } + return accumulator != 0 +} + +func marshal(hashSeeds []uint64, entries []byte) []byte { + numHashes := len(hashSeeds) + entriesOffset := 1 + numHashes*bytesPerUint64 + + bytes := make([]byte, entriesOffset+len(entries)) + bytes[0] = byte(numHashes) + for i, seed := range hashSeeds { + binary.BigEndian.PutUint64(bytes[1+i*bytesPerUint64:], seed) + } + copy(bytes[entriesOffset:], entries) + return bytes +} diff --git a/avalanchego/utils/bloom/filter_test.go b/avalanchego/utils/bloom/filter_test.go new file mode 100644 index 00000000..856797f8 --- /dev/null +++ b/avalanchego/utils/bloom/filter_test.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestNewErrors(t *testing.T) { + tests := []struct { + numHashes int + numEntries int + err error + }{ + { + numHashes: 0, + numEntries: 1, + err: errTooFewHashes, + }, + { + numHashes: 17, + numEntries: 1, + err: errTooManyHashes, + }, + { + numHashes: 8, + numEntries: 0, + err: errTooFewEntries, + }, + } + for _, test := range tests { + t.Run(test.err.Error(), func(t *testing.T) { + _, err := New(test.numHashes, test.numEntries) + require.ErrorIs(t, err, test.err) + }) + } +} + +func TestNormalUsage(t *testing.T) { + require := require.New(t) + + toAdd := make([]uint64, 1024) + for i := range toAdd { + toAdd[i] = rand.Uint64() //#nosec G404 + } + + initialNumHashes, initialNumBytes := OptimalParameters(1024, 0.01) + filter, err := New(initialNumHashes, initialNumBytes) + require.NoError(err) + + for i, elem := range toAdd { + filter.Add(elem) + for _, elem := range toAdd[:i] { + require.True(filter.Contains(elem)) + } + } + + require.Equal(len(toAdd), filter.Count()) + + filterBytes := filter.Marshal() + parsedFilter, err := Parse(filterBytes) + require.NoError(err) + + for _, elem := range toAdd { + require.True(parsedFilter.Contains(elem)) + } + + parsedFilterBytes := parsedFilter.Marshal() + require.Equal(filterBytes, parsedFilterBytes) +} + +func BenchmarkAdd(b *testing.B) { + f, err := New(8, 16*units.KiB) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Add(1) + } +} + +func BenchmarkMarshal(b *testing.B) { + f, err := New(OptimalParameters(10_000, .01)) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Marshal() + } +} diff --git a/avalanchego/utils/bloom/hasher.go b/avalanchego/utils/bloom/hasher.go new file mode 100644 index 00000000..d5e3f5a6 --- /dev/null +++ b/avalanchego/utils/bloom/hasher.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "crypto/sha256" + "encoding/binary" +) + +func Add(f *Filter, key, salt []byte) { + f.Add(Hash(key, salt)) +} + +func Contains(c Checker, key, salt []byte) bool { + return c.Contains(Hash(key, salt)) +} + +type Checker interface { + Contains(hash uint64) bool +} + +func Hash(key, salt []byte) uint64 { + hash := sha256.New() + // sha256.Write never returns errors + _, _ = hash.Write(key) + _, _ = hash.Write(salt) + + output := make([]byte, 0, sha256.Size) + return binary.BigEndian.Uint64(hash.Sum(output)) +} diff --git a/avalanchego/utils/bloom/hasher_test.go b/avalanchego/utils/bloom/hasher_test.go new file mode 100644 index 00000000..b262f1db --- /dev/null +++ b/avalanchego/utils/bloom/hasher_test.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestCollisionResistance(t *testing.T) { + require := require.New(t) + + f, err := New(8, 16*units.KiB) + require.NoError(err) + + Add(f, []byte("hello world?"), []byte("so salty")) + collision := Contains(f, []byte("hello world!"), []byte("so salty")) + require.False(collision) +} + +func BenchmarkHash(b *testing.B) { + key := ids.GenerateTestID() + salt := ids.GenerateTestID() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Hash(key[:], salt[:]) + } +} diff --git a/avalanchego/utils/bloom/metrics.go b/avalanchego/utils/bloom/metrics.go new file mode 100644 index 00000000..7e33edc5 --- /dev/null +++ b/avalanchego/utils/bloom/metrics.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils" +) + +// Metrics is a collection of commonly useful metrics when using a long-lived +// bloom filter. +type Metrics struct { + Count prometheus.Gauge + NumHashes prometheus.Gauge + NumEntries prometheus.Gauge + MaxCount prometheus.Gauge + ResetCount prometheus.Counter +} + +func NewMetrics( + namespace string, + registerer prometheus.Registerer, +) (*Metrics, error) { + m := &Metrics{ + Count: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of additions that have been performed to the bloom", + }), + NumHashes: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "hashes", + Help: "Number of hashes in the bloom", + }), + NumEntries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "entries", + Help: "Number of bytes allocated to slots in the bloom", + }), + MaxCount: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "max_count", + Help: "Maximum number of additions that should be performed to the bloom before resetting", + }), + ResetCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "reset_count", + Help: "Number times the bloom has been reset", + }), + } + err := utils.Err( + registerer.Register(m.Count), + registerer.Register(m.NumHashes), + registerer.Register(m.NumEntries), + registerer.Register(m.MaxCount), + registerer.Register(m.ResetCount), + ) + return m, err +} + +// Reset the metrics to align with the provided bloom filter and max count. +func (m *Metrics) Reset(newFilter *Filter, maxCount int) { + m.Count.Set(float64(newFilter.Count())) + m.NumHashes.Set(float64(len(newFilter.hashSeeds))) + m.NumEntries.Set(float64(len(newFilter.entries))) + m.MaxCount.Set(float64(maxCount)) + m.ResetCount.Inc() +} diff --git a/avalanchego/utils/bloom/optimal.go b/avalanchego/utils/bloom/optimal.go new file mode 100644 index 00000000..e5ec228d --- /dev/null +++ b/avalanchego/utils/bloom/optimal.go @@ -0,0 +1,111 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import "math" + +const ln2Squared = math.Ln2 * math.Ln2 + +// OptimalParameters calculates the optimal [numHashes] and [numEntries] that +// should be allocated for a bloom filter which will contain [count] and target +// [falsePositiveProbability]. +func OptimalParameters(count int, falsePositiveProbability float64) (int, int) { + numEntries := OptimalEntries(count, falsePositiveProbability) + numHashes := OptimalHashes(numEntries, count) + return numHashes, numEntries +} + +// OptimalHashes calculates the number of hashes which will minimize the false +// positive probability of a bloom filter with [numEntries] after [count] +// additions. +// +// It is guaranteed to return a value in the range [minHashes, maxHashes]. +// +// ref: https://en.wikipedia.org/wiki/Bloom_filter +func OptimalHashes(numEntries, count int) int { + switch { + case numEntries < minEntries: + return minHashes + case count <= 0: + return maxHashes + } + + numHashes := math.Ceil(float64(numEntries) * bitsPerByte * math.Ln2 / float64(count)) + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if numHashes >= maxHashes { + return maxHashes + } + return max(int(numHashes), minHashes) +} + +// OptimalEntries calculates the optimal number of entries to use when creating +// a new Bloom filter when targenting a size of [count] with +// [falsePositiveProbability] assuming that the optimal number of hashes is +// used. +// +// It is guaranteed to return a value in the range [minEntries, MaxInt]. +// +// ref: https://en.wikipedia.org/wiki/Bloom_filter +func OptimalEntries(count int, falsePositiveProbability float64) int { + switch { + case count <= 0: + return minEntries + case falsePositiveProbability >= 1: + return minEntries + case falsePositiveProbability <= 0: + return math.MaxInt + } + + entriesInBits := -float64(count) * math.Log(falsePositiveProbability) / ln2Squared + entries := (entriesInBits + bitsPerByte - 1) / bitsPerByte + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if entries >= math.MaxInt { + return math.MaxInt + } + return max(int(entries), minEntries) +} + +// EstimateCount estimates the number of additions a bloom filter with +// [numHashes] and [numEntries] must have to reach [falsePositiveProbability]. +// This is derived by inversing a lower-bound on the probability of false +// positives. For values where numBits >> numHashes, the predicted probability +// is fairly accurate. +// +// It is guaranteed to return a value in the range [0, MaxInt]. +// +// ref: https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=903775 +func EstimateCount(numHashes, numEntries int, falsePositiveProbability float64) int { + switch { + case numHashes < minHashes: + return 0 + case numEntries < minEntries: + return 0 + case falsePositiveProbability <= 0: + return 0 + case falsePositiveProbability >= 1: + return math.MaxInt + } + + invNumHashes := 1 / float64(numHashes) + numBits := float64(numEntries * 8) + exp := 1 - math.Pow(falsePositiveProbability, invNumHashes) + count := math.Ceil(-math.Log(exp) * numBits * invNumHashes) + // Converting a floating-point value to an int produces an undefined value + // if the floating-point value cannot be represented as an int. To avoid + // this undefined behavior, we explicitly check against MaxInt here. + // + // ref: https://go.dev/ref/spec#Conversions + if count >= math.MaxInt { + return math.MaxInt + } + return int(count) +} diff --git a/avalanchego/utils/bloom/optimal_test.go b/avalanchego/utils/bloom/optimal_test.go new file mode 100644 index 00000000..b52356d5 --- /dev/null +++ b/avalanchego/utils/bloom/optimal_test.go @@ -0,0 +1,203 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +const largestFloat64LessThan1 float64 = 1 - 1e-16 + +func TestOptimalHashes(t *testing.T) { + tests := []struct { + numEntries int + count int + expectedHashes int + }{ + { // invalid params + numEntries: 0, + count: 1024, + expectedHashes: minHashes, + }, + { // invalid params + numEntries: 1024, + count: 0, + expectedHashes: maxHashes, + }, + { + numEntries: math.MaxInt, + count: 1, + expectedHashes: maxHashes, + }, + { + numEntries: 1, + count: math.MaxInt, + expectedHashes: minHashes, + }, + { + numEntries: 1024, + count: 1024, + expectedHashes: 6, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d", test.numEntries, test.count), func(t *testing.T) { + hashes := OptimalHashes(test.numEntries, test.count) + require.Equal(t, test.expectedHashes, hashes) + }) + } +} + +func TestOptimalEntries(t *testing.T) { + tests := []struct { + count int + falsePositiveProbability float64 + expectedEntries int + }{ + { // invalid params + count: 0, + falsePositiveProbability: .5, + expectedEntries: minEntries, + }, + { // invalid params + count: 1, + falsePositiveProbability: 0, + expectedEntries: math.MaxInt, + }, + { // invalid params + count: 1, + falsePositiveProbability: 1, + expectedEntries: minEntries, + }, + { + count: math.MaxInt, + falsePositiveProbability: math.SmallestNonzeroFloat64, + expectedEntries: math.MaxInt, + }, + { + count: 1024, + falsePositiveProbability: largestFloat64LessThan1, + expectedEntries: minEntries, + }, + { + count: 1024, + falsePositiveProbability: .01, + expectedEntries: 1227, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%f", test.count, test.falsePositiveProbability), func(t *testing.T) { + entries := OptimalEntries(test.count, test.falsePositiveProbability) + require.Equal(t, test.expectedEntries, entries) + }) + } +} + +func TestEstimateEntries(t *testing.T) { + tests := []struct { + numHashes int + numEntries int + falsePositiveProbability float64 + expectedEntries int + }{ + { // invalid params + numHashes: 0, + numEntries: 2_048, + falsePositiveProbability: .5, + expectedEntries: 0, + }, + { // invalid params + numHashes: 1, + numEntries: 0, + falsePositiveProbability: .5, + expectedEntries: 0, + }, + { // invalid params + numHashes: 1, + numEntries: 1, + falsePositiveProbability: 2, + expectedEntries: math.MaxInt, + }, + { // invalid params + numHashes: 1, + numEntries: 1, + falsePositiveProbability: -1, + expectedEntries: 0, + }, + { + numHashes: 8, + numEntries: 2_048, + falsePositiveProbability: 0, + expectedEntries: 0, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: .01, + expectedEntries: 9_993, + }, + { // params from OptimalParameters(100_000, .001) + numHashes: 10, + numEntries: 179_720, + falsePositiveProbability: .001, + expectedEntries: 100_000, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: .05, + expectedEntries: 14_449, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: 1, + expectedEntries: math.MaxInt, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: math.SmallestNonzeroFloat64, + expectedEntries: 0, + }, + { // params from OptimalParameters(10_000, .01) + numHashes: 7, + numEntries: 11_982, + falsePositiveProbability: largestFloat64LessThan1, + expectedEntries: math.MaxInt, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%f", test.numHashes, test.numEntries, test.falsePositiveProbability), func(t *testing.T) { + entries := EstimateCount(test.numHashes, test.numEntries, test.falsePositiveProbability) + require.Equal(t, test.expectedEntries, entries) + }) + } +} + +func FuzzOptimalHashes(f *testing.F) { + f.Fuzz(func(t *testing.T, numEntries, count int) { + hashes := OptimalHashes(numEntries, count) + require.GreaterOrEqual(t, hashes, minHashes) + require.LessOrEqual(t, hashes, maxHashes) + }) +} + +func FuzzOptimalEntries(f *testing.F) { + f.Fuzz(func(t *testing.T, count int, falsePositiveProbability float64) { + entries := OptimalEntries(count, falsePositiveProbability) + require.GreaterOrEqual(t, entries, minEntries) + }) +} + +func FuzzEstimateEntries(f *testing.F) { + f.Fuzz(func(t *testing.T, numHashes, numEntries int, falsePositiveProbability float64) { + entries := EstimateCount(numHashes, numEntries, falsePositiveProbability) + require.GreaterOrEqual(t, entries, 0) + }) +} diff --git a/avalanchego/utils/bloom/read_filter.go b/avalanchego/utils/bloom/read_filter.go new file mode 100644 index 00000000..075d77ed --- /dev/null +++ b/avalanchego/utils/bloom/read_filter.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "encoding/binary" + "fmt" +) + +var ( + EmptyFilter = &ReadFilter{ + hashSeeds: make([]uint64, minHashes), + entries: make([]byte, minEntries), + } + FullFilter = &ReadFilter{ + hashSeeds: make([]uint64, minHashes), + entries: make([]byte, minEntries), + } +) + +func init() { + for i := range FullFilter.entries { + FullFilter.entries[i] = 0xFF + } +} + +type ReadFilter struct { + hashSeeds []uint64 + entries []byte +} + +// Parse [bytes] into a read-only bloom filter. +func Parse(bytes []byte) (*ReadFilter, error) { + if len(bytes) == 0 { + return nil, errInvalidNumHashes + } + numHashes := bytes[0] + entriesOffset := 1 + int(numHashes)*bytesPerUint64 + switch { + case numHashes < minHashes: + return nil, fmt.Errorf("%w: %d < %d", errTooFewHashes, numHashes, minHashes) + case numHashes > maxHashes: + return nil, fmt.Errorf("%w: %d > %d", errTooManyHashes, numHashes, maxHashes) + case len(bytes) < entriesOffset+minEntries: // numEntries = len(bytes) - entriesOffset + return nil, errTooFewEntries + } + + f := &ReadFilter{ + hashSeeds: make([]uint64, numHashes), + entries: bytes[entriesOffset:], + } + for i := range f.hashSeeds { + f.hashSeeds[i] = binary.BigEndian.Uint64(bytes[1+i*bytesPerUint64:]) + } + return f, nil +} + +func (f *ReadFilter) Contains(hash uint64) bool { + return contains(f.hashSeeds, f.entries, hash) +} + +func (f *ReadFilter) Marshal() []byte { + return marshal(f.hashSeeds, f.entries) +} diff --git a/avalanchego/utils/bloom/read_filter_test.go b/avalanchego/utils/bloom/read_filter_test.go new file mode 100644 index 00000000..8ea83db0 --- /dev/null +++ b/avalanchego/utils/bloom/read_filter_test.go @@ -0,0 +1,112 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func NewMaliciousFilter(numHashes, numEntries int) *Filter { + f := &Filter{ + numBits: uint64(numEntries * bitsPerByte), + hashSeeds: make([]uint64, numHashes), + entries: make([]byte, numEntries), + count: 0, + } + for i := range f.entries { + f.entries[i] = math.MaxUint8 + } + return f +} + +func TestParseErrors(t *testing.T) { + tests := []struct { + bytes []byte + err error + }{ + { + bytes: nil, + err: errInvalidNumHashes, + }, + { + bytes: NewMaliciousFilter(0, 1).Marshal(), + err: errTooFewHashes, + }, + { + bytes: NewMaliciousFilter(17, 1).Marshal(), + err: errTooManyHashes, + }, + { + bytes: NewMaliciousFilter(1, 0).Marshal(), + err: errTooFewEntries, + }, + { + bytes: []byte{ + 0x01, // num hashes = 1 + }, + err: errTooFewEntries, + }, + } + for _, test := range tests { + t.Run(test.err.Error(), func(t *testing.T) { + _, err := Parse(test.bytes) + require.ErrorIs(t, err, test.err) + }) + } +} + +func BenchmarkParse(b *testing.B) { + f, err := New(OptimalParameters(10_000, .01)) + require.NoError(b, err) + bytes := f.Marshal() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = Parse(bytes) + } +} + +func BenchmarkContains(b *testing.B) { + f := NewMaliciousFilter(maxHashes, 1) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f.Contains(1) + } +} + +func FuzzParseThenMarshal(f *testing.F) { + f.Fuzz(func(t *testing.T, bytes []byte) { + f, err := Parse(bytes) + if err != nil { + return + } + + marshalledBytes := marshal(f.hashSeeds, f.entries) + require.Equal(t, bytes, marshalledBytes) + }) +} + +func FuzzMarshalThenParse(f *testing.F) { + f.Fuzz(func(t *testing.T, numHashes int, entries []byte) { + require := require.New(t) + + hashSeeds, err := newHashSeeds(numHashes) + if err != nil { + return + } + if len(entries) < minEntries { + return + } + + marshalledBytes := marshal(hashSeeds, entries) + rf, err := Parse(marshalledBytes) + require.NoError(err) + require.Equal(hashSeeds, rf.hashSeeds) + require.Equal(entries, rf.entries) + }) +} diff --git a/avalanchego/utils/buffer/bounded_nonblocking_queue.go b/avalanchego/utils/buffer/bounded_nonblocking_queue.go index 0b5d5f94..f8b0030e 100644 --- a/avalanchego/utils/buffer/bounded_nonblocking_queue.go +++ b/avalanchego/utils/buffer/bounded_nonblocking_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go b/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go index 402a089b..323ea925 100644 --- a/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go +++ b/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer @@ -14,7 +14,7 @@ func TestNewBoundedQueue(t *testing.T) { // Case: maxSize < 1 _, err := NewBoundedQueue[bool](0, nil) - require.Error(err) + require.ErrorIs(err, errInvalidMaxSize) // Case: maxSize == 1 and nil onEvict b, err := NewBoundedQueue[bool](1, nil) @@ -36,7 +36,7 @@ func TestBoundedQueue(t *testing.T) { b, err := NewBoundedQueue(maxSize, onEvict) require.NoError(err) - require.Equal(0, b.Len()) + require.Zero(b.Len()) // Fill the queue for i := 0; i < maxSize; i++ { @@ -44,7 +44,7 @@ func TestBoundedQueue(t *testing.T) { require.Equal(i+1, b.Len()) got, ok := b.Peek() require.True(ok) - require.Equal(0, got) + require.Zero(got) got, ok = b.Index(i) require.True(ok) require.Equal(i, got) @@ -71,7 +71,7 @@ func TestBoundedQueue(t *testing.T) { require.False(ok) _, ok = b.Index(0) require.False(ok) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Empty(b.List()) // Fill the queue again @@ -131,7 +131,7 @@ func TestBoundedQueue(t *testing.T) { // Queue is empty require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Equal([]int{0, 1, 2}, evicted) _, ok = b.Pop() require.False(ok) diff --git a/avalanchego/utils/buffer/unbounded_blocking_deque.go b/avalanchego/utils/buffer/unbounded_blocking_deque.go index 078d8d90..a6c7fb66 100644 --- a/avalanchego/utils/buffer/unbounded_blocking_deque.go +++ b/avalanchego/utils/buffer/unbounded_blocking_deque.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/avalanchego/utils/buffer/unbounded_blocking_deque_test.go b/avalanchego/utils/buffer/unbounded_blocking_deque_test.go index 054d3a2e..1f22db99 100644 --- a/avalanchego/utils/buffer/unbounded_blocking_deque_test.go +++ b/avalanchego/utils/buffer/unbounded_blocking_deque_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/avalanchego/utils/buffer/unbounded_deque.go b/avalanchego/utils/buffer/unbounded_deque.go index 336f0869..873f33f1 100644 --- a/avalanchego/utils/buffer/unbounded_deque.go +++ b/avalanchego/utils/buffer/unbounded_deque.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer diff --git a/avalanchego/utils/buffer/unbounded_deque_test.go b/avalanchego/utils/buffer/unbounded_deque_test.go index dcbfbe1c..dfdac4a5 100644 --- a/avalanchego/utils/buffer/unbounded_deque_test.go +++ b/avalanchego/utils/buffer/unbounded_deque_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package buffer @@ -13,11 +13,11 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require := require.New(t) bIntf := NewUnboundedDeque[int](10) - b, ok := bIntf.(*unboundedSliceDeque[int]) - require.True(ok) + require.IsType(&unboundedSliceDeque[int]{}, bIntf) + b := bIntf.(*unboundedSliceDeque[int]) require.Empty(b.List()) - require.Equal(0, b.Len()) - _, ok = b.Index(0) + require.Zero(b.Len()) + _, ok := b.Index(0) require.False(ok) b.PushLeft(1) @@ -30,7 +30,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.False(ok) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) _, ok = b.Index(0) @@ -44,7 +44,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -59,7 +59,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -74,7 +74,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -102,7 +102,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -130,7 +130,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -158,7 +158,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -186,7 +186,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -214,7 +214,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -233,16 +233,16 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // Starts empty. bIntf := NewUnboundedDeque[int](2) - b, ok := bIntf.(*unboundedSliceDeque[int]) - require.True(ok) - require.Equal(0, bIntf.Len()) - require.Equal(2, len(b.data)) - require.Equal(0, b.left) + require.IsType(&unboundedSliceDeque[int]{}, bIntf) + b := bIntf.(*unboundedSliceDeque[int]) + require.Zero(bIntf.Len()) + require.Len(b.data, 2) + require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) // slice is [EMPTY] - _, ok = b.PopLeft() + _, ok := b.PopLeft() require.False(ok) _, ok = b.PeekLeft() require.False(ok) @@ -251,7 +251,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { b.PushLeft(1) // slice is [1,EMPTY] require.Equal(1, b.Len()) - require.Equal(2, len(b.data)) + require.Len(b.data, 2) require.Equal(1, b.left) require.Equal(1, b.right) require.Equal([]int{1}, b.List()) @@ -267,7 +267,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // This causes a resize b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -289,7 +289,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // Tests left moving left with no wrap around. b.PushLeft(3) // slice is [2,1,EMPTY,3] require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) require.Equal(2, b.right) require.Equal([]int{3, 2, 1}, b.List()) @@ -318,7 +318,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(3, got) require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -342,8 +342,8 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(0, b.left) + require.Len(b.data, 4) + require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) @@ -361,7 +361,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // Test left wrapping around to the right side. b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -384,8 +384,8 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(0, b.left) + require.Len(b.data, 4) + require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) @@ -395,8 +395,8 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { got, ok = b.PopLeft() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) - require.Equal(4, len(b.data)) + require.Zero(b.Len()) + require.Len(b.data, 4) require.Equal(1, b.left) require.Equal(2, b.right) require.Empty(b.List()) @@ -416,16 +416,16 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // Starts empty. bIntf := NewUnboundedDeque[int](2) - b, ok := bIntf.(*unboundedSliceDeque[int]) - require.True(ok) - require.Equal(0, bIntf.Len()) - require.Equal(2, len(b.data)) - require.Equal(0, b.left) + require.IsType(&unboundedSliceDeque[int]{}, bIntf) + b := bIntf.(*unboundedSliceDeque[int]) + require.Zero(bIntf.Len()) + require.Len(b.data, 2) + require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) // slice is [EMPTY] - _, ok = b.PopRight() + _, ok := b.PopRight() require.False(ok) _, ok = b.PeekLeft() require.False(ok) @@ -434,9 +434,9 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { b.PushRight(1) // slice is [1,EMPTY] require.Equal(1, b.Len()) - require.Equal(2, len(b.data)) - require.Equal(0, b.left) - require.Equal(0, b.right) + require.Len(b.data, 2) + require.Zero(b.left) + require.Zero(b.right) require.Equal([]int{1}, b.List()) got, ok := b.Index(0) require.True(ok) @@ -453,7 +453,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // This causes a resize b.PushRight(2) // slice is [1,2,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{1, 2}, b.List()) @@ -475,7 +475,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // Tests right moving right with no wrap around b.PushRight(3) // slice is [1,2,3,EMPTY] require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(3, b.right) require.Equal([]int{1, 2, 3}, b.List()) @@ -502,7 +502,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(3, got) require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{1, 2}, b.List()) @@ -527,7 +527,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(1, b.right) require.Equal([]int{1}, b.List()) @@ -548,12 +548,12 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) - require.Equal(4, len(b.data)) + require.Zero(b.Len()) + require.Len(b.data, 4) require.Equal(3, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) _, ok = b.Index(0) require.False(ok) @@ -566,9 +566,9 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { b.PushLeft(1) // slice is [EMPTY,EMPTY,EMPTY,1] require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) require.True(ok) @@ -586,12 +586,12 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) - require.Equal(4, len(b.data)) + require.Zero(b.Len()) + require.Len(b.data, 4) require.Equal(2, b.left) require.Equal(3, b.right) require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) _, ok = b.Index(0) require.False(ok) @@ -604,9 +604,9 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // Tests right wrapping around to the left b.PushRight(2) // slice is [EMPTY,EMPTY,EMPTY,2] require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Equal([]int{2}, b.List()) got, ok = b.Index(0) require.True(ok) @@ -648,7 +648,7 @@ func FuzzUnboundedSliceDeque(f *testing.F) { } list := b.List() - require.Equal(len(input), len(list)) + require.Len(list, len(input)) for i, n := range input { require.Equal(n, list[i]) } diff --git a/avalanchego/utils/bytes.go b/avalanchego/utils/bytes.go index 31baeeb1..a32f353c 100644 --- a/avalanchego/utils/bytes.go +++ b/avalanchego/utils/bytes.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils -import ( - "crypto/rand" -) +import "crypto/rand" // RandomBytes returns a slice of n random bytes // Intended for use in testing diff --git a/avalanchego/utils/cb58/cb58.go b/avalanchego/utils/cb58/cb58.go index 23cf9c49..27d8265c 100644 --- a/avalanchego/utils/cb58/cb58.go +++ b/avalanchego/utils/cb58/cb58.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 @@ -6,6 +6,7 @@ package cb58 import ( "bytes" "errors" + "fmt" "math" "github.com/mr-tron/base58/base58" @@ -13,14 +14,13 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -const ( - checksumLen = 4 -) +const checksumLen = 4 var ( + ErrBase58Decoding = errors.New("base58 decoding error") + ErrMissingChecksum = errors.New("input string is smaller than the checksum size") + ErrBadChecksum = errors.New("invalid input checksum") errEncodingOverFlow = errors.New("encoding overflow") - errMissingChecksum = errors.New("input string is smaller than the checksum size") - errBadChecksum = errors.New("invalid input checksum") ) // Encode [bytes] to a string using cb58 format. @@ -41,16 +41,16 @@ func Encode(bytes []byte) (string, error) { func Decode(str string) ([]byte, error) { decodedBytes, err := base58.Decode(str) if err != nil { - return nil, err + return nil, fmt.Errorf("%w: %w", ErrBase58Decoding, err) } if len(decodedBytes) < checksumLen { - return nil, errMissingChecksum + return nil, ErrMissingChecksum } // Verify the checksum rawBytes := decodedBytes[:len(decodedBytes)-checksumLen] checksum := decodedBytes[len(decodedBytes)-checksumLen:] if !bytes.Equal(checksum, hashing.Checksum(rawBytes, checksumLen)) { - return nil, errBadChecksum + return nil, ErrBadChecksum } return rawBytes, nil } diff --git a/avalanchego/utils/cb58/cb58_test.go b/avalanchego/utils/cb58/cb58_test.go index 59710b62..9d28c6f9 100644 --- a/avalanchego/utils/cb58/cb58_test.go +++ b/avalanchego/utils/cb58/cb58_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 @@ -12,6 +12,8 @@ import ( // Test encoding bytes to a string and decoding back to bytes func TestEncodeDecode(t *testing.T) { + require := require.New(t) + type test struct { bytes []byte str string @@ -44,20 +46,14 @@ func TestEncodeDecode(t *testing.T) { for _, test := range tests { // Encode the bytes strResult, err := Encode(test.bytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Make sure the string repr. is what we expected - require.Equal(t, test.str, strResult) + require.Equal(test.str, strResult) // Decode the string bytesResult, err := Decode(strResult) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Make sure we got the same bytes back - if !bytes.Equal(test.bytes, bytesResult) { - t.Fatal("bytes not symmetric") - } + require.True(bytes.Equal(test.bytes, bytesResult)) } } diff --git a/avalanchego/utils/compare/compare.go b/avalanchego/utils/compare/compare.go deleted file mode 100644 index 13ec52f3..00000000 --- a/avalanchego/utils/compare/compare.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -// Returns true iff the slices have the same elements, regardless of order. -func UnsortedEquals[T comparable](a, b []T) bool { - if len(a) != len(b) { - return false - } - m := make(map[T]int, len(a)) - for _, v := range a { - m[v]++ - } - for _, v := range b { - switch count := m[v]; count { - case 0: - // There were more instances of [v] in [b] than [a]. - return false - case 1: - delete(m, v) - default: - m[v] = count - 1 - } - } - return len(m) == 0 -} diff --git a/avalanchego/utils/compare/compare_test.go b/avalanchego/utils/compare/compare_test.go deleted file mode 100644 index e46bc838..00000000 --- a/avalanchego/utils/compare/compare_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compare - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUnsortedEquals(t *testing.T) { - require := require.New(t) - - require.True(UnsortedEquals([]int{}, []int{})) - require.True(UnsortedEquals(nil, []int{})) - require.True(UnsortedEquals([]int{}, nil)) - require.False(UnsortedEquals([]int{1}, nil)) - require.False(UnsortedEquals(nil, []int{1})) - require.True(UnsortedEquals([]int{1}, []int{1})) - require.False(UnsortedEquals([]int{1, 2}, []int{})) - require.False(UnsortedEquals([]int{1, 2}, []int{1})) - require.False(UnsortedEquals([]int{1}, []int{1, 2})) - require.True(UnsortedEquals([]int{2, 1}, []int{1, 2})) - require.True(UnsortedEquals([]int{1, 2}, []int{2, 1})) -} diff --git a/avalanchego/utils/compression/compressor.go b/avalanchego/utils/compression/compressor.go index f0848357..c8624f9b 100644 --- a/avalanchego/utils/compression/compressor.go +++ b/avalanchego/utils/compression/compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/avalanchego/utils/compression/compressor_test.go b/avalanchego/utils/compression/compressor_test.go index 341fb4a2..fa8554a8 100644 --- a/avalanchego/utils/compression/compressor_test.go +++ b/avalanchego/utils/compression/compressor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -9,10 +9,10 @@ import ( "runtime" "testing" - _ "embed" - "github.com/stretchr/testify/require" + _ "embed" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" ) @@ -48,8 +48,10 @@ func TestDecompressZipBombs(t *testing.T) { newCompressorFunc := newCompressorFuncs[compressionType] t.Run(compressionType.String(), func(t *testing.T) { + require := require.New(t) + compressor, err := newCompressorFunc(maxMessageSize) - require.NoError(t, err) + require.NoError(err) var ( beforeDecompressionStats runtime.MemStats @@ -59,12 +61,12 @@ func TestDecompressZipBombs(t *testing.T) { _, err = compressor.Decompress(zipBomb) runtime.ReadMemStats(&afterDecompressionStats) - require.ErrorIs(t, err, ErrDecompressedMsgTooLarge) + require.ErrorIs(err, ErrDecompressedMsgTooLarge) // Make sure that we didn't allocate significantly more memory than // the max message size. bytesAllocatedDuringDecompression := afterDecompressionStats.TotalAlloc - beforeDecompressionStats.TotalAlloc - require.Less(t, bytesAllocatedDuringDecompression, uint64(10*maxMessageSize)) + require.Less(bytesAllocatedDuringDecompression, uint64(10*maxMessageSize)) }) } } @@ -72,38 +74,40 @@ func TestDecompressZipBombs(t *testing.T) { func TestCompressDecompress(t *testing.T) { for compressionType, newCompressorFunc := range newCompressorFuncs { t.Run(compressionType.String(), func(t *testing.T) { + require := require.New(t) + data := utils.RandomBytes(4096) data2 := utils.RandomBytes(4096) compressor, err := newCompressorFunc(maxMessageSize) - require.NoError(t, err) + require.NoError(err) dataCompressed, err := compressor.Compress(data) - require.NoError(t, err) + require.NoError(err) data2Compressed, err := compressor.Compress(data2) - require.NoError(t, err) + require.NoError(err) dataDecompressed, err := compressor.Decompress(dataCompressed) - require.NoError(t, err) - require.EqualValues(t, data, dataDecompressed) + require.NoError(err) + require.Equal(data, dataDecompressed) data2Decompressed, err := compressor.Decompress(data2Compressed) - require.NoError(t, err) - require.EqualValues(t, data2, data2Decompressed) + require.NoError(err) + require.Equal(data2, data2Decompressed) dataDecompressed, err = compressor.Decompress(dataCompressed) - require.NoError(t, err) - require.EqualValues(t, data, dataDecompressed) + require.NoError(err) + require.Equal(data, dataDecompressed) maxMessage := utils.RandomBytes(maxMessageSize) maxMessageCompressed, err := compressor.Compress(maxMessage) - require.NoError(t, err) + require.NoError(err) maxMessageDecompressed, err := compressor.Decompress(maxMessageCompressed) - require.NoError(t, err) + require.NoError(err) - require.EqualValues(t, maxMessage, maxMessageDecompressed) + require.Equal(maxMessage, maxMessageDecompressed) }) } } @@ -114,21 +118,23 @@ func TestSizeLimiting(t *testing.T) { continue } t.Run(compressionType.String(), func(t *testing.T) { + require := require.New(t) + compressor, err := compressorFunc(maxMessageSize) - require.NoError(t, err) + require.NoError(err) data := make([]byte, maxMessageSize+1) _, err = compressor.Compress(data) // should be too large - require.Error(t, err) + require.ErrorIs(err, ErrMsgTooLarge) compressor2, err := compressorFunc(2 * maxMessageSize) - require.NoError(t, err) + require.NoError(err) dataCompressed, err := compressor2.Compress(data) - require.NoError(t, err) + require.NoError(err) _, err = compressor.Decompress(dataCompressed) // should be too large - require.Error(t, err) + require.ErrorIs(err, ErrDecompressedMsgTooLarge) }) } } @@ -142,9 +148,8 @@ func TestNewCompressorWithInvalidLimit(t *testing.T) { continue } t.Run(compressionType.String(), func(t *testing.T) { - require := require.New(t) _, err := compressorFunc(math.MaxInt64) - require.ErrorIs(err, ErrInvalidMaxSizeCompressor) + require.ErrorIs(t, err, ErrInvalidMaxSizeCompressor) }) } } @@ -170,7 +175,7 @@ func fuzzHelper(f *testing.F, compressionType Type) { compressor, err = NewZstdCompressor(maxMessageSize) require.NoError(f, err) default: - f.Fatal("Unknown compression type") + require.FailNow(f, "Unknown compression type") } f.Fuzz(func(t *testing.T, data []byte) { @@ -178,7 +183,7 @@ func fuzzHelper(f *testing.F, compressionType Type) { if len(data) > maxMessageSize { _, err := compressor.Compress(data) - require.Error(err) + require.ErrorIs(err, ErrMsgTooLarge) } compressed, err := compressor.Compress(data) @@ -205,12 +210,14 @@ func BenchmarkCompress(b *testing.B) { } for _, size := range sizes { b.Run(fmt.Sprintf("%s_%d", compressionType, size), func(b *testing.B) { + require := require.New(b) + bytes := utils.RandomBytes(size) compressor, err := newCompressorFunc(maxMessageSize) - require.NoError(b, err) + require.NoError(err) for n := 0; n < b.N; n++ { _, err := compressor.Compress(bytes) - require.NoError(b, err) + require.NoError(err) } }) } @@ -231,16 +238,18 @@ func BenchmarkDecompress(b *testing.B) { } for _, size := range sizes { b.Run(fmt.Sprintf("%s_%d", compressionType, size), func(b *testing.B) { + require := require.New(b) + bytes := utils.RandomBytes(size) compressor, err := newCompressorFunc(maxMessageSize) - require.NoError(b, err) + require.NoError(err) compressedBytes, err := compressor.Compress(bytes) - require.NoError(b, err) + require.NoError(err) for n := 0; n < b.N; n++ { _, err := compressor.Decompress(compressedBytes) - require.NoError(b, err) + require.NoError(err) } }) } diff --git a/avalanchego/utils/compression/gzip_compressor.go b/avalanchego/utils/compression/gzip_compressor.go index a17c46f6..da0b941a 100644 --- a/avalanchego/utils/compression/gzip_compressor.go +++ b/avalanchego/utils/compression/gzip_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -21,6 +21,7 @@ var ( ErrMsgTooLarge = errors.New("msg too large to be compressed") ) +// TODO: Remove once v1.11.x is out. type gzipCompressor struct { maxSize int64 gzipWriterPool sync.Pool diff --git a/avalanchego/utils/compression/no_compressor.go b/avalanchego/utils/compression/no_compressor.go index 1eb4237d..3c444c71 100644 --- a/avalanchego/utils/compression/no_compressor.go +++ b/avalanchego/utils/compression/no_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/avalanchego/utils/compression/no_compressor_test.go b/avalanchego/utils/compression/no_compressor_test.go index 3535dc55..3b99a101 100644 --- a/avalanchego/utils/compression/no_compressor_test.go +++ b/avalanchego/utils/compression/no_compressor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -10,13 +10,15 @@ import ( ) func TestNoCompressor(t *testing.T) { + require := require.New(t) + data := []byte{1, 2, 3} compressor := NewNoCompressor() compressedBytes, err := compressor.Compress(data) - require.NoError(t, err) - require.EqualValues(t, data, compressedBytes) + require.NoError(err) + require.Equal(data, compressedBytes) decompressedBytes, err := compressor.Decompress(compressedBytes) - require.NoError(t, err) - require.EqualValues(t, data, decompressedBytes) + require.NoError(err) + require.Equal(data, decompressedBytes) } diff --git a/avalanchego/utils/compression/type.go b/avalanchego/utils/compression/type.go index fd58a21f..09b4d64c 100644 --- a/avalanchego/utils/compression/type.go +++ b/avalanchego/utils/compression/type.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -14,7 +14,7 @@ type Type byte const ( TypeNone Type = iota + 1 - TypeGzip + TypeGzip // Remove once v1.11.x is out. TypeZstd ) diff --git a/avalanchego/utils/compression/type_test.go b/avalanchego/utils/compression/type_test.go index aa9a47f3..eacad3ba 100644 --- a/avalanchego/utils/compression/type_test.go +++ b/avalanchego/utils/compression/type_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -10,15 +10,17 @@ import ( ) func TestTypeString(t *testing.T) { + require := require.New(t) + for _, compressionType := range []Type{TypeNone, TypeGzip, TypeZstd} { s := compressionType.String() parsedType, err := TypeFromString(s) - require.NoError(t, err) - require.Equal(t, compressionType, parsedType) + require.NoError(err) + require.Equal(compressionType, parsedType) } _, err := TypeFromString("unknown") - require.Error(t, err) + require.ErrorIs(err, errUnknownCompressionType) } func TestTypeMarshalJSON(t *testing.T) { @@ -48,9 +50,11 @@ func TestTypeMarshalJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.Type.String(), func(t *testing.T) { + require := require.New(t) + b, err := tt.Type.MarshalJSON() - require.NoError(t, err) - require.Equal(t, tt.expected, string(b)) + require.NoError(err) + require.Equal(tt.expected, string(b)) }) } } diff --git a/avalanchego/utils/compression/zstd_compressor.go b/avalanchego/utils/compression/zstd_compressor.go index eafc1071..b374fa85 100644 --- a/avalanchego/utils/compression/zstd_compressor.go +++ b/avalanchego/utils/compression/zstd_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/avalanchego/utils/constants/acps.go b/avalanchego/utils/constants/acps.go new file mode 100644 index 00000000..5392b218 --- /dev/null +++ b/avalanchego/utils/constants/acps.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package constants + +import "github.com/ava-labs/avalanchego/utils/set" + +var ( + // CurrentACPs is the set of ACPs that are currently, at the time of + // release, marked as implementable and not activated. + // + // See: https://github.com/orgs/avalanche-foundation/projects/1 + CurrentACPs = set.Of[uint32]( + 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md + 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md + 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md + 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md + 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md + 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md + 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md + ) + + // ScheduledACPs are the ACPs incuded into the next upgrade. + ScheduledACPs = set.Of[uint32]( + 23, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers.md + 24, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips.md + 25, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors.md + 30, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm.md + 31, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer.md + 41, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers.md + 62, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx.md + ) +) diff --git a/avalanchego/utils/constants/aliases.go b/avalanchego/utils/constants/aliases.go index dd94bd36..dd838824 100644 --- a/avalanchego/utils/constants/aliases.go +++ b/avalanchego/utils/constants/aliases.go @@ -1,12 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants -const ( - // ChainAliasPrefix denotes a prefix for an alias that belongs to a blockchain ID. - ChainAliasPrefix string = "bc" - - // VMAliasPrefix denotes a prefix for an alias that belongs to a VM ID. - VMAliasPrefix string = "vm" -) +// ChainAliasPrefix denotes a prefix for an alias that belongs to a blockchain ID. +const ChainAliasPrefix string = "bc" diff --git a/avalanchego/utils/constants/application.go b/avalanchego/utils/constants/application.go index 117b85d9..4e59fd37 100644 --- a/avalanchego/utils/constants/application.go +++ b/avalanchego/utils/constants/application.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/constants/memory.go b/avalanchego/utils/constants/memory.go new file mode 100644 index 00000000..cca6ee7a --- /dev/null +++ b/avalanchego/utils/constants/memory.go @@ -0,0 +1,8 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package constants + +// PointerOverhead is used to approximate the memory footprint from allocating a +// pointer. +const PointerOverhead = 8 diff --git a/avalanchego/utils/constants/network_ids.go b/avalanchego/utils/constants/network_ids.go index 4b5ef1cd..ec235c66 100644 --- a/avalanchego/utils/constants/network_ids.go +++ b/avalanchego/utils/constants/network_ids.go @@ -1,14 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants import ( + "errors" "fmt" "strconv" "strings" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) // Const variables to be exported @@ -22,7 +24,6 @@ const ( LocalID uint32 = 12345 FlareID uint32 = 14 CostwoID uint32 = 114 - StagingID uint32 = 161 LocalFlareID uint32 = 162 SongbirdID uint32 = 5 CostonID uint32 = 7 @@ -35,7 +36,6 @@ const ( LocalName = "local" FlareName = "flare" CostwoName = "costwo" - StagingName = "staging" LocalFlareName = "localflare" SongbirdName = "songbird" CostonName = "coston" @@ -49,7 +49,6 @@ const ( FallbackHRP = "custom" FlareHRP = "flare" CostwoHRP = "costwo" - StagingHRP = "staging" LocalFlareHRP = "localflare" SongbirdHRP = "songbird" CostonHRP = "coston" @@ -69,7 +68,6 @@ var ( LocalID: LocalName, FlareID: FlareName, CostwoID: CostwoName, - StagingID: StagingName, LocalFlareID: LocalFlareName, SongbirdID: SongbirdName, CostonID: CostonName, @@ -83,7 +81,6 @@ var ( LocalName: LocalID, FlareName: FlareID, CostwoName: CostwoID, - StagingName: StagingID, LocalFlareName: LocalFlareID, SongbirdName: SongbirdID, CostonName: CostonID, @@ -98,7 +95,6 @@ var ( LocalID: LocalHRP, FlareID: FlareHRP, CostwoID: CostwoHRP, - StagingID: StagingHRP, LocalFlareID: LocalFlareHRP, SongbirdID: SongbirdHRP, CostonID: CostonHRP, @@ -112,13 +108,15 @@ var ( LocalHRP: LocalID, FlareHRP: FlareID, CostwoHRP: CostwoID, - StagingHRP: StagingID, LocalFlareHRP: LocalFlareID, SongbirdHRP: SongbirdID, CostonHRP: CostonID, } + ProductionNetworkIDs = set.Of(FlareID, SongbirdID, CostwoID, CostonID) ValidNetworkPrefix = "network-" + + ErrParseNetworkName = errors.New("failed to parse network name") ) // GetHRP returns the Human-Readable-Part of bech32 addresses for a networkID @@ -151,13 +149,13 @@ func NetworkID(networkName string) (uint32, error) { } id, err := strconv.ParseUint(idStr, 10, 32) if err != nil { - return 0, fmt.Errorf("failed to parse %q as a network name", networkName) + return 0, fmt.Errorf("%w: %q", ErrParseNetworkName, networkName) } return uint32(id), nil } func IsFlareNetworkID(networkID uint32) bool { - return networkID == FlareID || networkID == CostwoID || networkID == StagingID || networkID == LocalFlareID + return networkID == FlareID || networkID == CostwoID || networkID == LocalFlareID } func IsSgbNetworkID(networkID uint32) bool { diff --git a/avalanchego/utils/constants/network_ids_test.go b/avalanchego/utils/constants/network_ids_test.go index 13eb5bdd..9ca8d231 100644 --- a/avalanchego/utils/constants/network_ids_test.go +++ b/avalanchego/utils/constants/network_ids_test.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants import ( "testing" + + "github.com/stretchr/testify/require" ) func TestGetHRP(t *testing.T) { @@ -40,10 +42,6 @@ func TestGetHRP(t *testing.T) { id: CostwoID, hrp: CostwoHRP, }, - { - id: StagingID, - hrp: StagingHRP, - }, { id: LocalFlareID, hrp: LocalFlareHRP, @@ -51,10 +49,7 @@ func TestGetHRP(t *testing.T) { } for _, test := range tests { t.Run(test.hrp, func(t *testing.T) { - if hrp := GetHRP(test.id); hrp != test.hrp { - t.Fatalf("GetHRP(%d) returned %q but expected %q", - test.id, hrp, test.hrp) - } + require.Equal(t, test.hrp, GetHRP(test.id)) }) } } @@ -92,10 +87,6 @@ func TestNetworkName(t *testing.T) { id: CostwoID, name: CostwoName, }, - { - id: StagingID, - name: StagingName, - }, { id: LocalFlareID, name: LocalFlareName, @@ -103,19 +94,16 @@ func TestNetworkName(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if name := NetworkName(test.id); name != test.name { - t.Fatalf("NetworkName(%d) returned %q but expected %q", - test.id, name, test.name) - } + require.Equal(t, test.name, NetworkName(test.id)) }) } } func TestNetworkID(t *testing.T) { tests := []struct { - name string - id uint32 - shouldErr bool + name string + id uint32 + expectedErr error }{ { name: MainnetName, @@ -141,10 +129,6 @@ func TestNetworkID(t *testing.T) { name: CostwoName, id: CostwoID, }, - { - name: StagingName, - id: StagingID, - }, { name: LocalFlareName, id: LocalFlareID, @@ -158,30 +142,25 @@ func TestNetworkID(t *testing.T) { id: 4294967295, }, { - name: "networ-4294967295", - shouldErr: true, + name: "networ-4294967295", + expectedErr: ErrParseNetworkName, }, { - name: "network-4294967295123123", - shouldErr: true, + name: "network-4294967295123123", + expectedErr: ErrParseNetworkName, }, { - name: "4294967295123123", - shouldErr: true, + name: "4294967295123123", + expectedErr: ErrParseNetworkName, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + require := require.New(t) + id, err := NetworkID(test.name) - if err == nil && test.shouldErr { - t.Fatalf("NetworkID(%q) returned %d but should have errored", test.name, test.id) - } - if err != nil && !test.shouldErr { - t.Fatalf("NetworkID(%q) unexpectedly errored with: %s", test.name, err) - } - if id != test.id { - t.Fatalf("NetworkID(%q) returned %d but expected %d", test.name, id, test.id) - } + require.ErrorIs(err, test.expectedErr) + require.Equal(test.id, id) }) } } diff --git a/avalanchego/utils/constants/networking.go b/avalanchego/utils/constants/networking.go index 7290718c..7a4ea89b 100644 --- a/avalanchego/utils/constants/networking.go +++ b/avalanchego/utils/constants/networking.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants @@ -27,16 +27,13 @@ const ( MaxContainersLen = int(4 * DefaultMaxMessageSize / 5) - // MinConnectedStakeBuffer is the safety buffer for calculation of MinConnectedStake. - // This increases the required stake percentage above alpha/k. Must be [0-1] - // 0 means MinConnectedStake = alpha/k, 1 means MinConnectedStake = 1 (fully connected) - MinConnectedStakeBuffer = .2 - DefaultNetworkPeerListNumValidatorIPs = 15 DefaultNetworkPeerListValidatorGossipSize = 20 DefaultNetworkPeerListNonValidatorGossipSize = 0 DefaultNetworkPeerListPeersGossipSize = 10 DefaultNetworkPeerListGossipFreq = time.Minute + DefaultNetworkPeerListPullGossipFreq = 2 * time.Second + DefaultNetworkPeerListBloomResetFreq = time.Minute // Inbound Connection Throttling DefaultInboundConnUpgradeThrottlerCooldown = 10 * time.Second @@ -55,10 +52,8 @@ const ( DefaultNetworkTimeoutCoefficient = 2 DefaultNetworkReadHandshakeTimeout = 15 * time.Second - DefaultNetworkCompressionEnabled = true // TODO remove when NetworkCompressionEnabledKey is removed - DefaultNetworkCompressionType = compression.TypeGzip + DefaultNetworkCompressionType = compression.TypeZstd DefaultNetworkMaxClockDifference = time.Minute - DefaultNetworkAllowPrivateIPs = true DefaultNetworkRequireValidatorToConnect = false DefaultNetworkPeerReadBufferSize = 8 * units.KiB DefaultNetworkPeerWriteBufferSize = 8 * units.KiB @@ -78,12 +73,12 @@ const ( DefaultBenchlistMinFailingDuration = 2*time.Minute + 30*time.Second // Router - DefaultConsensusGossipFrequency = 10 * time.Second DefaultConsensusAppConcurrency = 2 - DefaultConsensusShutdownTimeout = 30 * time.Second + DefaultConsensusShutdownTimeout = time.Minute + DefaultFrontierPollFrequency = 100 * time.Millisecond DefaultConsensusGossipAcceptedFrontierValidatorSize = 0 DefaultConsensusGossipAcceptedFrontierNonValidatorSize = 0 - DefaultConsensusGossipAcceptedFrontierPeerSize = 15 + DefaultConsensusGossipAcceptedFrontierPeerSize = 1 DefaultConsensusGossipOnAcceptValidatorSize = 0 DefaultConsensusGossipOnAcceptNonValidatorSize = 0 DefaultConsensusGossipOnAcceptPeerSize = 10 @@ -100,6 +95,7 @@ const ( DefaultInboundThrottlerBandwidthMaxBurstSize = DefaultMaxMessageSize DefaultInboundThrottlerCPUMaxRecheckDelay = 5 * time.Second DefaultInboundThrottlerDiskMaxRecheckDelay = 5 * time.Second + MinInboundThrottlerMaxRecheckDelay = time.Millisecond // Outbound Throttling DefaultOutboundThrottlerAtLargeAllocSize = 32 * units.MiB diff --git a/avalanchego/utils/constants/vm_ids.go b/avalanchego/utils/constants/vm_ids.go index 4fb887c4..9fda498f 100644 --- a/avalanchego/utils/constants/vm_ids.go +++ b/avalanchego/utils/constants/vm_ids.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/context.go b/avalanchego/utils/context.go deleted file mode 100644 index 9ff30018..00000000 --- a/avalanchego/utils/context.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "context" - "time" -) - -type detachedContext struct { - ctx context.Context -} - -func Detach(ctx context.Context) context.Context { - return &detachedContext{ - ctx: ctx, - } -} - -func (*detachedContext) Deadline() (time.Time, bool) { - return time.Time{}, false -} - -func (*detachedContext) Done() <-chan struct{} { - return nil -} - -func (*detachedContext) Err() error { - return nil -} - -func (c *detachedContext) Value(key any) any { - return c.ctx.Value(key) -} diff --git a/avalanchego/utils/crypto/bls/bls_benchmark_test.go b/avalanchego/utils/crypto/bls/bls_benchmark_test.go index a4503260..b9648b43 100644 --- a/avalanchego/utils/crypto/bls/bls_benchmark_test.go +++ b/avalanchego/utils/crypto/bls/bls_benchmark_test.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls import ( - "fmt" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -28,12 +28,10 @@ var sizes = []int{ } func BenchmarkSign(b *testing.B) { - require := require.New(b) - privateKey, err := NewSecretKey() - require.NoError(err) + require.NoError(b, err) for _, messageSize := range sizes { - b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + b.Run(strconv.Itoa(messageSize), func(b *testing.B) { message := utils.RandomBytes(messageSize) b.ResetTimer() @@ -46,21 +44,19 @@ func BenchmarkSign(b *testing.B) { } func BenchmarkVerify(b *testing.B) { - require := require.New(b) - privateKey, err := NewSecretKey() - require.NoError(err) + require.NoError(b, err) publicKey := PublicFromSecretKey(privateKey) for _, messageSize := range sizes { - b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + b.Run(strconv.Itoa(messageSize), func(b *testing.B) { message := utils.RandomBytes(messageSize) signature := Sign(privateKey, message) b.ResetTimer() for n := 0; n < b.N; n++ { - require.True(Verify(publicKey, signature, message)) + require.True(b, Verify(publicKey, signature, message)) } }) } @@ -76,12 +72,10 @@ func BenchmarkAggregatePublicKeys(b *testing.B) { } for _, size := range sizes { - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - require := require.New(b) - + b.Run(strconv.Itoa(size), func(b *testing.B) { for n := 0; n < b.N; n++ { _, err := AggregatePublicKeys(keys[:size]) - require.NoError(err) + require.NoError(b, err) } }) } diff --git a/avalanchego/utils/crypto/bls/bls_test.go b/avalanchego/utils/crypto/bls/bls_test.go index f3bb0500..e8a4a45b 100644 --- a/avalanchego/utils/crypto/bls/bls_test.go +++ b/avalanchego/utils/crypto/bls/bls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/bls/public.go b/avalanchego/utils/crypto/bls/public.go index f17d6127..2c3cca7a 100644 --- a/avalanchego/utils/crypto/bls/public.go +++ b/avalanchego/utils/crypto/bls/public.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -13,7 +13,7 @@ const PublicKeyLen = blst.BLST_P1_COMPRESS_BYTES var ( ErrNoPublicKeys = errors.New("no public keys") - errFailedPublicKeyDecompress = errors.New("couldn't decompress public key") + ErrFailedPublicKeyDecompress = errors.New("couldn't decompress public key") errInvalidPublicKey = errors.New("invalid public key") errFailedPublicKeyAggregation = errors.New("couldn't aggregate public keys") ) @@ -33,7 +33,7 @@ func PublicKeyToBytes(pk *PublicKey) []byte { func PublicKeyFromBytes(pkBytes []byte) (*PublicKey, error) { pk := new(PublicKey).Uncompress(pkBytes) if pk == nil { - return nil, errFailedPublicKeyDecompress + return nil, ErrFailedPublicKeyDecompress } if !pk.KeyValidate() { return nil, errInvalidPublicKey @@ -70,3 +70,11 @@ func Verify(pk *PublicKey, sig *Signature, msg []byte) bool { func VerifyProofOfPossession(pk *PublicKey, sig *Signature, msg []byte) bool { return sig.Verify(false, pk, false, msg, ciphersuiteProofOfPossession) } + +func DeserializePublicKey(pkBytes []byte) *PublicKey { + return new(PublicKey).Deserialize(pkBytes) +} + +func SerializePublicKey(key *PublicKey) []byte { + return key.Serialize() +} diff --git a/avalanchego/utils/crypto/bls/public_test.go b/avalanchego/utils/crypto/bls/public_test.go index 02300295..4465b014 100644 --- a/avalanchego/utils/crypto/bls/public_test.go +++ b/avalanchego/utils/crypto/bls/public_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -16,7 +16,7 @@ func TestPublicKeyFromBytesWrongSize(t *testing.T) { pkBytes := utils.RandomBytes(PublicKeyLen + 1) _, err := PublicKeyFromBytes(pkBytes) - require.ErrorIs(err, errFailedPublicKeyDecompress) + require.ErrorIs(err, ErrFailedPublicKeyDecompress) } func TestPublicKeyBytes(t *testing.T) { diff --git a/avalanchego/utils/crypto/bls/secret.go b/avalanchego/utils/crypto/bls/secret.go index a37961eb..049938bd 100644 --- a/avalanchego/utils/crypto/bls/secret.go +++ b/avalanchego/utils/crypto/bls/secret.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -17,7 +17,7 @@ var ( errFailedSecretKeyDeserialize = errors.New("couldn't deserialize secret key") // The ciphersuite is more commonly known as G2ProofOfPossession. - // There are two digests to ensure that that message space for normal + // There are two digests to ensure that message space for normal // signatures and the proof of possession are distinct. ciphersuiteSignature = []byte("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_") ciphersuiteProofOfPossession = []byte("BLS_POP_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_") @@ -71,3 +71,11 @@ func Sign(sk *SecretKey, msg []byte) *Signature { func SignProofOfPossession(sk *SecretKey, msg []byte) *Signature { return new(Signature).Sign(sk, msg, ciphersuiteProofOfPossession) } + +func DeserializeSecretKey(pkBytes []byte) *SecretKey { + return new(SecretKey).Deserialize(pkBytes) +} + +func SerializeSecretKey(key *SecretKey) []byte { + return key.Serialize() +} diff --git a/avalanchego/utils/crypto/bls/secret_test.go b/avalanchego/utils/crypto/bls/secret_test.go index c01540ac..d3d46e1a 100644 --- a/avalanchego/utils/crypto/bls/secret_test.go +++ b/avalanchego/utils/crypto/bls/secret_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/bls/signature.go b/avalanchego/utils/crypto/bls/signature.go index 7ee03aef..0d0d029b 100644 --- a/avalanchego/utils/crypto/bls/signature.go +++ b/avalanchego/utils/crypto/bls/signature.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -12,7 +12,7 @@ import ( const SignatureLen = blst.BLST_P2_COMPRESS_BYTES var ( - errFailedSignatureDecompress = errors.New("couldn't decompress signature") + ErrFailedSignatureDecompress = errors.New("couldn't decompress signature") errInvalidSignature = errors.New("invalid signature") errNoSignatures = errors.New("no signatures") errFailedSignatureAggregation = errors.New("couldn't aggregate signatures") @@ -33,7 +33,7 @@ func SignatureToBytes(sig *Signature) []byte { func SignatureFromBytes(sigBytes []byte) (*Signature, error) { sig := new(Signature).Uncompress(sigBytes) if sig == nil { - return nil, errFailedSignatureDecompress + return nil, ErrFailedSignatureDecompress } if !sig.SigValidate(false) { return nil, errInvalidSignature diff --git a/avalanchego/utils/crypto/bls/signature_test.go b/avalanchego/utils/crypto/bls/signature_test.go index caf613fc..3d43282c 100644 --- a/avalanchego/utils/crypto/bls/signature_test.go +++ b/avalanchego/utils/crypto/bls/signature_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/keychain/keychain.go b/avalanchego/utils/crypto/keychain/keychain.go index bd8a4c79..47d39b59 100644 --- a/avalanchego/utils/crypto/keychain/keychain.go +++ b/avalanchego/utils/crypto/keychain/keychain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain @@ -89,8 +89,7 @@ func NewLedgerKeychainFromIndices(l Ledger, indices []uint32) (Keychain, error) ) } - addrsSet := set.NewSet[ids.ShortID](len(addrs)) - addrsSet.Add(addrs...) + addrsSet := set.Of(addrs...) addrToIdx := map[ids.ShortID]uint32{} for i := range indices { diff --git a/avalanchego/utils/crypto/keychain/keychain_test.go b/avalanchego/utils/crypto/keychain/keychain_test.go index e260e87c..f0ffba7f 100644 --- a/avalanchego/utils/crypto/keychain/keychain_test.go +++ b/avalanchego/utils/crypto/keychain/keychain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain @@ -7,9 +7,8 @@ import ( "errors" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" ) @@ -19,14 +18,13 @@ var errTest = errors.New("test") func TestNewLedgerKeychain(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr := ids.GenerateTestShortID() // user request invalid number of addresses to derive ledger := NewMockLedger(ctrl) _, err := NewLedgerKeychain(ledger, 0) - require.Equal(err, ErrInvalidNumAddrsToDerive) + require.ErrorIs(err, ErrInvalidNumAddrsToDerive) // ledger does not return expected number of derived addresses ledger = NewMockLedger(ctrl) @@ -38,7 +36,7 @@ func TestNewLedgerKeychain(t *testing.T) { ledger = NewMockLedger(ctrl) ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, errTest).Times(1) _, err = NewLedgerKeychain(ledger, 1) - require.Equal(err, errTest) + require.ErrorIs(err, errTest) // good path ledger = NewMockLedger(ctrl) @@ -50,7 +48,6 @@ func TestNewLedgerKeychain(t *testing.T) { func TestLedgerKeychain_Addresses(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -82,7 +79,6 @@ func TestLedgerKeychain_Addresses(t *testing.T) { func TestLedgerKeychain_Get(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -126,7 +122,6 @@ func TestLedgerKeychain_Get(t *testing.T) { func TestLedgerSigner_SignHash(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -160,7 +155,7 @@ func TestLedgerSigner_SignHash(t *testing.T) { require.True(b) _, err = s.SignHash(toSign) - require.Equal(err, errTest) + require.ErrorIs(err, errTest) // good path 1 addr ledger = NewMockLedger(ctrl) @@ -210,7 +205,6 @@ func TestLedgerSigner_SignHash(t *testing.T) { func TestNewLedgerKeychainFromIndices(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr := ids.GenerateTestShortID() _ = addr @@ -218,7 +212,7 @@ func TestNewLedgerKeychainFromIndices(t *testing.T) { // user request invalid number of indices ledger := NewMockLedger(ctrl) _, err := NewLedgerKeychainFromIndices(ledger, []uint32{}) - require.Equal(err, ErrInvalidIndicesLength) + require.ErrorIs(err, ErrInvalidIndicesLength) // ledger does not return expected number of derived addresses ledger = NewMockLedger(ctrl) @@ -230,7 +224,7 @@ func TestNewLedgerKeychainFromIndices(t *testing.T) { ledger = NewMockLedger(ctrl) ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, errTest).Times(1) _, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) - require.Equal(err, errTest) + require.ErrorIs(err, errTest) // good path ledger = NewMockLedger(ctrl) @@ -242,7 +236,6 @@ func TestNewLedgerKeychainFromIndices(t *testing.T) { func TestLedgerKeychainFromIndices_Addresses(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -302,7 +295,6 @@ func TestLedgerKeychainFromIndices_Addresses(t *testing.T) { func TestLedgerKeychainFromIndices_Get(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -348,7 +340,6 @@ func TestLedgerKeychainFromIndices_Get(t *testing.T) { func TestLedgerSignerFromIndices_SignHash(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() @@ -382,7 +373,7 @@ func TestLedgerSignerFromIndices_SignHash(t *testing.T) { require.True(b) _, err = s.SignHash(toSign) - require.Equal(err, errTest) + require.ErrorIs(err, errTest) // good path 1 addr ledger = NewMockLedger(ctrl) diff --git a/avalanchego/utils/crypto/keychain/ledger.go b/avalanchego/utils/crypto/keychain/ledger.go index d709ed19..955eb448 100644 --- a/avalanchego/utils/crypto/keychain/ledger.go +++ b/avalanchego/utils/crypto/keychain/ledger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keychain diff --git a/avalanchego/utils/crypto/keychain/mock_ledger.go b/avalanchego/utils/crypto/keychain/mock_ledger.go index c6d0ead8..b082631c 100644 --- a/avalanchego/utils/crypto/keychain/mock_ledger.go +++ b/avalanchego/utils/crypto/keychain/mock_ledger.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/crypto/keychain (interfaces: Ledger) +// +// Generated by this command: +// +// mockgen -package=keychain -destination=utils/crypto/keychain/mock_ledger.go github.com/ava-labs/avalanchego/utils/crypto/keychain Ledger +// // Package keychain is a generated GoMock package. package keychain @@ -12,7 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" version "github.com/ava-labs/avalanchego/version" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockLedger is a mock of Ledger interface. @@ -48,7 +50,7 @@ func (m *MockLedger) Address(arg0 string, arg1 uint32) (ids.ShortID, error) { } // Address indicates an expected call of Address. -func (mr *MockLedgerMockRecorder) Address(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Address(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockLedger)(nil).Address), arg0, arg1) } @@ -63,7 +65,7 @@ func (m *MockLedger) Addresses(arg0 []uint32) ([]ids.ShortID, error) { } // Addresses indicates an expected call of Addresses. -func (mr *MockLedgerMockRecorder) Addresses(arg0 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Addresses(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addresses", reflect.TypeOf((*MockLedger)(nil).Addresses), arg0) } @@ -92,7 +94,7 @@ func (m *MockLedger) Sign(arg0 []byte, arg1 []uint32) ([][]byte, error) { } // Sign indicates an expected call of Sign. -func (mr *MockLedgerMockRecorder) Sign(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) Sign(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockLedger)(nil).Sign), arg0, arg1) } @@ -107,7 +109,7 @@ func (m *MockLedger) SignHash(arg0 []byte, arg1 []uint32) ([][]byte, error) { } // SignHash indicates an expected call of SignHash. -func (mr *MockLedgerMockRecorder) SignHash(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockLedgerMockRecorder) SignHash(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignHash", reflect.TypeOf((*MockLedger)(nil).SignHash), arg0, arg1) } diff --git a/avalanchego/utils/crypto/ledger/ledger.go b/avalanchego/utils/crypto/ledger/ledger.go index 37de44fe..5c6cee16 100644 --- a/avalanchego/utils/crypto/ledger/ledger.go +++ b/avalanchego/utils/crypto/ledger/ledger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ledger @@ -6,16 +6,17 @@ package ledger import ( "fmt" - ledger "github.com/ava-labs/ledger-avalanche/go" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/version" + + ledger "github.com/ava-labs/ledger-avalanche/go" + bip32 "github.com/tyler-smith/go-bip32" ) const ( - rootPath = "m/44'/9000'/0'" + rootPath = "m/44'/9000'/0'" // BIP44: m / purpose' / coin_type' / account' ledgerBufferLimit = 8192 ledgerPathSize = 9 ) @@ -26,6 +27,7 @@ var _ keychain.Ledger = (*Ledger)(nil) // provides Avalanche-specific access. type Ledger struct { device *ledger.LedgerAvalanche + epk *bip32.Key } func New() (keychain.Ledger, error) { @@ -40,21 +42,37 @@ func addressPath(index uint32) string { } func (l *Ledger) Address(hrp string, addressIndex uint32) (ids.ShortID, error) { - _, hash, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") + resp, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") if err != nil { return ids.ShortEmpty, err } - return ids.ToShortID(hash) + return ids.ToShortID(resp.Hash) } func (l *Ledger) Addresses(addressIndices []uint32) ([]ids.ShortID, error) { + if l.epk == nil { + pk, chainCode, err := l.device.GetExtPubKey(rootPath, false, "", "") + if err != nil { + return nil, err + } + l.epk = &bip32.Key{ + Key: pk, + ChainCode: chainCode, + } + } + // derivation path rootPath/0 (BIP44 change level, when set to 0, known as external chain) + externalChain, err := l.epk.NewChildKey(0) + if err != nil { + return nil, err + } addresses := make([]ids.ShortID, len(addressIndices)) - for i, v := range addressIndices { - _, hash, err := l.device.GetPubKey(addressPath(v), false, "", "") + for i, addressIndex := range addressIndices { + // derivation path rootPath/0/v (BIP44 address index level) + address, err := externalChain.NewChildKey(addressIndex) if err != nil { return nil, err } - copy(addresses[i][:], hash) + copy(addresses[i][:], hashing.PubkeyBytesToAddress(address.Key)) } return addresses, nil } diff --git a/avalanchego/utils/crypto/ledger/ledger_test.go b/avalanchego/utils/crypto/ledger/ledger_test.go index 1ab163c9..160b2636 100644 --- a/avalanchego/utils/crypto/ledger/ledger_test.go +++ b/avalanchego/utils/crypto/ledger/ledger_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ledger @@ -18,8 +18,6 @@ const ( hrp = "fuji" ) -var factory secp256k1.Factory - // TestLedger will be skipped if a ledger is not connected. func TestLedger(t *testing.T) { require := require.New(t) @@ -66,7 +64,7 @@ func TestLedger(t *testing.T) { for i, addrIndex := range indices { sig := sigs[i] - pk, err := factory.RecoverHashPublicKey(rawHash, sig) + pk, err := secp256k1.RecoverPublicKeyFromHash(rawHash, sig) require.NoError(err) require.Equal(addresses[addrIndex], pk.Address()) } diff --git a/avalanchego/utils/crypto/secp256k1/rfc6979_test.go b/avalanchego/utils/crypto/secp256k1/rfc6979_test.go index d4c0a9c4..7efc019a 100644 --- a/avalanchego/utils/crypto/secp256k1/rfc6979_test.go +++ b/avalanchego/utils/crypto/secp256k1/rfc6979_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 @@ -58,7 +58,6 @@ type test struct { } func TestRFC6979Compliance(t *testing.T) { - f := Factory{} for i, tt := range rfc6979Tests { t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { require := require.New(t) @@ -66,7 +65,7 @@ func TestRFC6979Compliance(t *testing.T) { skBytes, err := hex.DecodeString(tt.skHex) require.NoError(err) - sk, err := f.ToPrivateKey(skBytes) + sk, err := ToPrivateKey(skBytes) require.NoError(err) msgBytes := []byte(tt.msg) diff --git a/avalanchego/utils/crypto/secp256k1/secp256k1.go b/avalanchego/utils/crypto/secp256k1/secp256k1.go index 80de06c0..7dbbf9f8 100644 --- a/avalanchego/utils/crypto/secp256k1/secp256k1.go +++ b/avalanchego/utils/crypto/secp256k1/secp256k1.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 @@ -8,16 +8,15 @@ import ( "fmt" "strings" - stdecdsa "crypto/ecdsa" - "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/hashing" + + stdecdsa "crypto/ecdsa" + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" ) const ( @@ -44,6 +43,7 @@ const ( ) var ( + ErrInvalidSig = errors.New("invalid signature") errCompressed = errors.New("wasn't expecting a compressed key") errMissingQuotes = errors.New("first and last characters should be quotes") errMissingKeyPrefix = fmt.Errorf("private key missing %s prefix", PrivateKeyPrefix) @@ -53,16 +53,12 @@ var ( errMutatedSig = errors.New("signature was mutated from its original format") ) -type Factory struct { - Cache cache.LRU[ids.ID, *PublicKey] -} - -func (*Factory) NewPrivateKey() (*PrivateKey, error) { +func NewPrivateKey() (*PrivateKey, error) { k, err := secp256k1.GeneratePrivateKey() return &PrivateKey{sk: k}, err } -func (*Factory) ToPublicKey(b []byte) (*PublicKey, error) { +func ToPublicKey(b []byte) (*PublicKey, error) { if len(b) != PublicKeyLen { return nil, errInvalidPublicKeyLength } @@ -74,7 +70,7 @@ func (*Factory) ToPublicKey(b []byte) (*PublicKey, error) { }, err } -func (*Factory) ToPrivateKey(b []byte) (*PrivateKey, error) { +func ToPrivateKey(b []byte) (*PrivateKey, error) { if len(b) != PrivateKeyLen { return nil, errInvalidPrivateKeyLength } @@ -84,19 +80,11 @@ func (*Factory) ToPrivateKey(b []byte) (*PrivateKey, error) { }, nil } -func (f *Factory) RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { - return f.RecoverHashPublicKey(hashing.ComputeHash256(msg), sig) +func RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { + return RecoverPublicKeyFromHash(hashing.ComputeHash256(msg), sig) } -func (f *Factory) RecoverHashPublicKey(hash, sig []byte) (*PublicKey, error) { - cacheBytes := make([]byte, len(hash)+len(sig)) - copy(cacheBytes, hash) - copy(cacheBytes[len(hash):], sig) - id := hashing.ComputeHash256Array(cacheBytes) - if cachedPublicKey, ok := f.Cache.Get(id); ok { - return cachedPublicKey, nil - } - +func RecoverPublicKeyFromHash(hash, sig []byte) (*PublicKey, error) { if err := verifySECP256K1RSignatureFormat(sig); err != nil { return nil, err } @@ -108,16 +96,40 @@ func (f *Factory) RecoverHashPublicKey(hash, sig []byte) (*PublicKey, error) { rawPubkey, compressed, err := ecdsa.RecoverCompact(sig, hash) if err != nil { - return nil, err + return nil, ErrInvalidSig } if compressed { return nil, errCompressed } - pubkey := &PublicKey{pk: rawPubkey} - f.Cache.Put(id, pubkey) - return pubkey, nil + return &PublicKey{pk: rawPubkey}, nil +} + +type RecoverCache struct { + cache.LRU[ids.ID, *PublicKey] +} + +func (r *RecoverCache) RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { + return r.RecoverPublicKeyFromHash(hashing.ComputeHash256(msg), sig) +} + +func (r *RecoverCache) RecoverPublicKeyFromHash(hash, sig []byte) (*PublicKey, error) { + cacheBytes := make([]byte, len(hash)+len(sig)) + copy(cacheBytes, hash) + copy(cacheBytes[len(hash):], sig) + id := hashing.ComputeHash256Array(cacheBytes) + if cachedPublicKey, ok := r.Get(id); ok { + return cachedPublicKey, nil + } + + pubKey, err := RecoverPublicKeyFromHash(hash, sig) + if err != nil { + return nil, err + } + + r.Put(id, pubKey) + return pubKey, nil } type PublicKey struct { @@ -131,8 +143,7 @@ func (k *PublicKey) Verify(msg, sig []byte) bool { } func (k *PublicKey) VerifyHash(hash, sig []byte) bool { - factory := Factory{} - pk, err := factory.RecoverHashPublicKey(hash, sig) + pk, err := RecoverPublicKeyFromHash(hash, sig) if err != nil { return false } @@ -208,7 +219,7 @@ func (k *PrivateKey) String() string { } func (k *PrivateKey) MarshalJSON() ([]byte, error) { - return []byte("\"" + k.String() + "\""), nil + return []byte(`"` + k.String() + `"`), nil } func (k *PrivateKey) MarshalText() ([]byte, error) { diff --git a/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go b/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go index b7f105b0..ca4f98e3 100644 --- a/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go +++ b/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 @@ -15,9 +15,7 @@ import ( func BenchmarkVerify(b *testing.B) { require := require.New(b) - f := &Factory{} - - privateKey, err := f.NewPrivateKey() + privateKey, err := NewPrivateKey() require.NoError(err) message := utils.RandomBytes(512) diff --git a/avalanchego/utils/crypto/secp256k1/secp256k1_test.go b/avalanchego/utils/crypto/secp256k1/secp256k1_test.go index e8abab31..8418a239 100644 --- a/avalanchego/utils/crypto/secp256k1/secp256k1_test.go +++ b/avalanchego/utils/crypto/secp256k1/secp256k1_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 @@ -8,18 +8,18 @@ import ( "github.com/stretchr/testify/require" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/hashing" + + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" ) func TestRecover(t *testing.T) { require := require.New(t) - f := Factory{} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) msg := []byte{1, 2, 3} @@ -27,38 +27,40 @@ func TestRecover(t *testing.T) { require.NoError(err) pub := key.PublicKey() - pubRec, err := f.RecoverPublicKey(msg, sig) + pubRec, err := RecoverPublicKey(msg, sig) require.NoError(err) require.Equal(pub, pubRec) + + require.True(pub.Verify(msg, sig)) } func TestCachedRecover(t *testing.T) { require := require.New(t) - f := Factory{Cache: cache.LRU[ids.ID, *PublicKey]{Size: 1}} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) msg := []byte{1, 2, 3} sig, err := key.Sign(msg) require.NoError(err) - pub1, err := f.RecoverPublicKey(msg, sig) + r := RecoverCache{LRU: cache.LRU[ids.ID, *PublicKey]{Size: 1}} + pub1, err := r.RecoverPublicKey(msg, sig) require.NoError(err) - pub2, err := f.RecoverPublicKey(msg, sig) + pub2, err := r.RecoverPublicKey(msg, sig) require.NoError(err) - require.Equal(pub1, pub2) + require.Equal(key.PublicKey(), pub1) + require.Equal(key.PublicKey(), pub2) } func TestExtensive(t *testing.T) { require := require.New(t) - f := Factory{} hash := hashing.ComputeHash256([]byte{1, 2, 3}) for i := 0; i < 1000; i++ { - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) _, err = key.SignHash(hash) @@ -69,13 +71,12 @@ func TestExtensive(t *testing.T) { func TestGenRecreate(t *testing.T) { require := require.New(t) - f := Factory{} for i := 0; i < 1000; i++ { - sk, err := f.NewPrivateKey() + sk, err := NewPrivateKey() require.NoError(err) skBytes := sk.Bytes() - recoveredSk, err := f.ToPrivateKey(skBytes) + recoveredSk, err := ToPrivateKey(skBytes) require.NoError(err) require.Equal(sk.PublicKey(), recoveredSk.PublicKey()) @@ -85,8 +86,7 @@ func TestGenRecreate(t *testing.T) { func TestVerifyMutatedSignature(t *testing.T) { require := require.New(t) - f := Factory{} - sk, err := f.NewPrivateKey() + sk, err := NewPrivateKey() require.NoError(err) msg := []byte{'h', 'e', 'l', 'l', 'o'} @@ -99,23 +99,21 @@ func TestVerifyMutatedSignature(t *testing.T) { newSBytes := s.Bytes() copy(sig[32:], newSBytes[:]) - _, err = f.RecoverPublicKey(msg, sig) - require.Error(err) + _, err = RecoverPublicKey(msg, sig) + require.ErrorIs(err, errMutatedSig) } func TestPrivateKeySECP256K1RUnmarshalJSON(t *testing.T) { require := require.New(t) - f := Factory{} - key, err := f.NewPrivateKey() + key, err := NewPrivateKey() require.NoError(err) keyJSON, err := key.MarshalJSON() require.NoError(err) key2 := PrivateKey{} - err = key2.UnmarshalJSON(keyJSON) - require.NoError(err) + require.NoError(key2.UnmarshalJSON(keyJSON)) require.Equal(key.PublicKey(), key2.PublicKey()) } @@ -123,30 +121,47 @@ func TestPrivateKeySECP256K1RUnmarshalJSONError(t *testing.T) { tests := []struct { label string in []byte + err error }{ { - "too short", - []byte(`"`), + label: "too short", + in: []byte(`"`), + err: errMissingQuotes, + }, + { + label: "missing start quote", + in: []byte(`PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"`), + err: errMissingQuotes, + }, + { + label: "missing end quote", + in: []byte(`"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN`), + err: errMissingQuotes, }, { - "missing start quote", - []byte(`PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"`), + label: "incorrect prefix", + in: []byte(`"PrivateKfy-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"`), + err: errMissingKeyPrefix, }, { - "missing end quote", - []byte(`"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN`), + label: `"PrivateKey-"`, + in: []byte(`"PrivateKey-"`), + err: cb58.ErrBase58Decoding, }, { - "incorrect prefix", - []byte(`"PrivateKfy-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"`), + label: `"PrivateKey-1"`, + in: []byte(`"PrivateKey-1"`), + err: cb58.ErrMissingChecksum, }, { - `"PrivateKey-"`, - []byte(`"PrivateKey-"`), + label: `"PrivateKey-1"`, + in: []byte(`"PrivateKey-1"`), + err: cb58.ErrMissingChecksum, }, { - `"PrivateKey-1"`, - []byte(`"PrivateKey-1"`), + label: `"PrivateKey-1"`, + in: []byte(`"PrivateKey-45PJLL"`), + err: errInvalidPrivateKeyLength, }, } for _, tt := range tests { @@ -155,7 +170,7 @@ func TestPrivateKeySECP256K1RUnmarshalJSONError(t *testing.T) { foo := PrivateKey{} err := foo.UnmarshalJSON(tt.in) - require.Error(err) + require.ErrorIs(err, tt.err) }) } } @@ -221,3 +236,71 @@ func TestSigning(t *testing.T) { }) } } + +func TestExportedMethods(t *testing.T) { + require := require.New(t) + + key := TestKeys()[0] + + pubKey := key.PublicKey() + require.Equal("111111111111111111116DBWJs", pubKey.addr.String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", pubKey.Address().String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", pubKey.addr.String()) + require.Equal("Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", key.Address().String()) + + expectedPubKeyBytes := []byte{ + 0x03, 0x73, 0x93, 0x53, 0x47, 0x88, 0x44, 0x78, + 0xe4, 0x94, 0x5c, 0xd0, 0xfd, 0x94, 0x8e, 0xcf, + 0x08, 0x8b, 0x94, 0xdf, 0xc9, 0x20, 0x74, 0xf0, + 0xfb, 0x03, 0xda, 0x6f, 0x4d, 0xbc, 0x94, 0x35, + 0x7d, + } + require.Equal(expectedPubKeyBytes, pubKey.bytes) + + expectedPubKey, err := ToPublicKey(expectedPubKeyBytes) + require.NoError(err) + require.Equal(expectedPubKey.Address(), pubKey.Address()) + require.Equal(expectedPubKeyBytes, expectedPubKey.Bytes()) + + expectedECDSAParams := struct { + X []byte + Y []byte + }{ + []byte{ + 0x73, 0x93, 0x53, 0x47, 0x88, 0x44, 0x78, 0xe4, + 0x94, 0x5c, 0xd0, 0xfd, 0x94, 0x8e, 0xcf, 0x08, + 0x8b, 0x94, 0xdf, 0xc9, 0x20, 0x74, 0xf0, 0xfb, + 0x03, 0xda, 0x6f, 0x4d, 0xbc, 0x94, 0x35, 0x7d, + }, + []byte{ + 0x78, 0xe7, 0x39, 0x45, 0x6c, 0x3b, 0xdb, 0x9e, + 0xe9, 0xb2, 0xa9, 0xf2, 0x84, 0xfa, 0x64, 0x32, + 0xd8, 0x4e, 0xf0, 0xfa, 0x3f, 0x82, 0xf5, 0x56, + 0x10, 0x40, 0x71, 0x7f, 0x1f, 0x5e, 0x8e, 0x27, + }, + } + require.Equal(expectedECDSAParams.X, pubKey.ToECDSA().X.Bytes()) + require.Equal(expectedECDSAParams.Y, pubKey.ToECDSA().Y.Bytes()) + + require.Equal(expectedECDSAParams.X, key.ToECDSA().X.Bytes()) + require.Equal(expectedECDSAParams.Y, key.ToECDSA().Y.Bytes()) +} + +func FuzzVerifySignature(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + privateKey, err := NewPrivateKey() + require.NoError(err) + + publicKey := privateKey.PublicKey() + + sig, err := privateKey.Sign(data) + require.NoError(err) + + recoveredPublicKey, err := RecoverPublicKey(data, sig) + require.NoError(err) + + require.Equal(publicKey, recoveredPublicKey) + }) +} diff --git a/avalanchego/utils/crypto/secp256k1/test_keys.go b/avalanchego/utils/crypto/secp256k1/test_keys.go index ccd85522..4ceb5674 100644 --- a/avalanchego/utils/crypto/secp256k1/test_keys.go +++ b/avalanchego/utils/crypto/secp256k1/test_keys.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1 -import ( - "github.com/ava-labs/avalanchego/utils/cb58" -) +import "github.com/ava-labs/avalanchego/utils/cb58" func TestKeys() []*PrivateKey { var ( @@ -16,8 +14,7 @@ func TestKeys() []*PrivateKey { "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", } - keys = make([]*PrivateKey, len(keyStrings)) - factory = Factory{} + keys = make([]*PrivateKey, len(keyStrings)) ) for i, key := range keyStrings { @@ -26,7 +23,7 @@ func TestKeys() []*PrivateKey { panic(err) } - keys[i], err = factory.ToPrivateKey(privKeyBytes) + keys[i], err = ToPrivateKey(privKeyBytes) if err != nil { panic(err) } diff --git a/avalanchego/utils/dynamicip/ifconfig_resolver.go b/avalanchego/utils/dynamicip/ifconfig_resolver.go index 24423814..36c8d5ad 100644 --- a/avalanchego/utils/dynamicip/ifconfig_resolver.go +++ b/avalanchego/utils/dynamicip/ifconfig_resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip @@ -20,7 +20,7 @@ type ifConfigResolver struct { } func (r *ifConfigResolver) Resolve(ctx context.Context) (net.IP, error) { - req, err := http.NewRequestWithContext(ctx, "GET", r.url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.url, nil) if err != nil { return nil, err } diff --git a/avalanchego/utils/dynamicip/no_updater.go b/avalanchego/utils/dynamicip/no_updater.go index 5c9e38bd..e3e7c615 100644 --- a/avalanchego/utils/dynamicip/no_updater.go +++ b/avalanchego/utils/dynamicip/no_updater.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/avalanchego/utils/dynamicip/opendns_resolver.go b/avalanchego/utils/dynamicip/opendns_resolver.go index 3bda76c4..5c39c955 100644 --- a/avalanchego/utils/dynamicip/opendns_resolver.go +++ b/avalanchego/utils/dynamicip/opendns_resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/avalanchego/utils/dynamicip/resolver.go b/avalanchego/utils/dynamicip/resolver.go index df797e20..45ad3778 100644 --- a/avalanchego/utils/dynamicip/resolver.go +++ b/avalanchego/utils/dynamicip/resolver.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip import ( "context" + "errors" "fmt" "net" "strings" @@ -23,6 +24,8 @@ const ( IFConfigMeName = "ifconfigme" ) +var errUnknownResolver = errors.New("unknown resolver") + // Resolver resolves our public IP type Resolver interface { // Resolve and return our public IP. @@ -43,6 +46,6 @@ func NewResolver(resolverName string) (Resolver, error) { case IFConfigMeName: return &ifConfigResolver{url: ifConfigMeURL}, nil default: - return nil, fmt.Errorf("got unknown resolver: %s", resolverName) + return nil, fmt.Errorf("%w: %s", errUnknownResolver, resolverName) } } diff --git a/avalanchego/utils/dynamicip/resolver_test.go b/avalanchego/utils/dynamicip/resolver_test.go index 7606bdd8..6af72a98 100644 --- a/avalanchego/utils/dynamicip/resolver_test.go +++ b/avalanchego/utils/dynamicip/resolver_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip @@ -12,44 +12,40 @@ import ( func TestNewResolver(t *testing.T) { type test struct { - service string - validService bool + service string + err error } tests := []test{ { - service: OpenDNSName, - validService: true, + service: OpenDNSName, + err: nil, }, { - service: IFConfigName, - validService: true, + service: IFConfigName, + err: nil, }, { - service: IFConfigCoName, - validService: true, + service: IFConfigCoName, + err: nil, }, { - service: IFConfigMeName, - validService: true, + service: IFConfigMeName, + err: nil, }, { - service: strings.ToUpper(IFConfigMeName), - validService: true, + service: strings.ToUpper(IFConfigMeName), + err: nil, }, { - service: "not a valid resolution service name", - validService: false, + service: "not a valid resolution service name", + err: errUnknownResolver, }, } for _, tt := range tests { t.Run(tt.service, func(t *testing.T) { require := require.New(t) _, err := NewResolver(tt.service) - if tt.validService { - require.NoError(err) - } else { - require.Error(err) - } + require.ErrorIs(err, tt.err) }) } } diff --git a/avalanchego/utils/dynamicip/updater.go b/avalanchego/utils/dynamicip/updater.go index 87c99e6d..9a59c9fd 100644 --- a/avalanchego/utils/dynamicip/updater.go +++ b/avalanchego/utils/dynamicip/updater.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/avalanchego/utils/dynamicip/updater_test.go b/avalanchego/utils/dynamicip/updater_test.go index c31031f9..66c9a21c 100644 --- a/avalanchego/utils/dynamicip/updater_test.go +++ b/avalanchego/utils/dynamicip/updater_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip @@ -44,8 +44,8 @@ func TestNewUpdater(t *testing.T) { ) // Assert NewUpdater returns expected type - updater, ok := updaterIntf.(*updater) - require.True(ok) + require.IsType(&updater{}, updaterIntf) + updater := updaterIntf.(*updater) // Assert fields set require.Equal(dynamicIP, updater.dynamicIP) diff --git a/avalanchego/utils/error.go b/avalanchego/utils/error.go new file mode 100644 index 00000000..0a6a9f32 --- /dev/null +++ b/avalanchego/utils/error.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +func Err(errors ...error) error { + for _, err := range errors { + if err != nil { + return err + } + } + return nil +} diff --git a/avalanchego/utils/filesystem/io.go b/avalanchego/utils/filesystem/io.go index 939e635a..28a0c4aa 100644 --- a/avalanchego/utils/filesystem/io.go +++ b/avalanchego/utils/filesystem/io.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/avalanchego/utils/filesystem/mock_file.go b/avalanchego/utils/filesystem/mock_file.go index 6ebc1e68..7b133025 100644 --- a/avalanchego/utils/filesystem/mock_file.go +++ b/avalanchego/utils/filesystem/mock_file.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem -import ( - "io/fs" -) +import "io/fs" var _ fs.DirEntry = MockFile{} diff --git a/avalanchego/utils/filesystem/mock_io.go b/avalanchego/utils/filesystem/mock_io.go index aade6418..06b27dd1 100644 --- a/avalanchego/utils/filesystem/mock_io.go +++ b/avalanchego/utils/filesystem/mock_io.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/filesystem (interfaces: Reader) +// +// Generated by this command: +// +// mockgen -package=filesystem -destination=utils/filesystem/mock_io.go github.com/ava-labs/avalanchego/utils/filesystem Reader +// // Package filesystem is a generated GoMock package. package filesystem @@ -11,7 +13,7 @@ import ( fs "io/fs" reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockReader is a mock of Reader interface. @@ -47,7 +49,7 @@ func (m *MockReader) ReadDir(arg0 string) ([]fs.DirEntry, error) { } // ReadDir indicates an expected call of ReadDir. -func (mr *MockReaderMockRecorder) ReadDir(arg0 interface{}) *gomock.Call { +func (mr *MockReaderMockRecorder) ReadDir(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*MockReader)(nil).ReadDir), arg0) } diff --git a/avalanchego/utils/filesystem/rename.go b/avalanchego/utils/filesystem/rename.go index 3ab7c147..578c46fb 100644 --- a/avalanchego/utils/filesystem/rename.go +++ b/avalanchego/utils/filesystem/rename.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/avalanchego/utils/filesystem/rename_test.go b/avalanchego/utils/filesystem/rename_test.go index 12de4a67..53c8a503 100644 --- a/avalanchego/utils/filesystem/rename_test.go +++ b/avalanchego/utils/filesystem/rename_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem @@ -11,29 +11,31 @@ import ( ) func TestRenameIfExists(t *testing.T) { + require := require.New(t) + t.Parallel() f, err := os.CreateTemp(os.TempDir(), "test-rename") - require.NoError(t, err) + require.NoError(err) a := f.Name() b := a + ".2" - require.NoError(t, f.Close()) + require.NoError(f.Close()) // rename "a" to "b" renamed, err := RenameIfExists(a, b) - require.True(t, renamed) - require.NoError(t, err) + require.NoError(err) + require.True(renamed) // rename "b" to "a" renamed, err = RenameIfExists(b, a) - require.True(t, renamed) - require.NoError(t, err) + require.NoError(err) + require.True(renamed) // remove "a", but rename "a"->"b" should NOT error - require.NoError(t, os.RemoveAll(a)) + require.NoError(os.RemoveAll(a)) renamed, err = RenameIfExists(a, b) - require.False(t, renamed) - require.NoError(t, err) + require.NoError(err) + require.False(renamed) } diff --git a/avalanchego/utils/formatting/address/address.go b/avalanchego/utils/formatting/address/address.go index c0c6cc24..97d4e055 100644 --- a/avalanchego/utils/formatting/address/address.go +++ b/avalanchego/utils/formatting/address/address.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address @@ -14,7 +14,7 @@ import ( const addressSep = "-" var ( - errNoSeparator = errors.New("no separator found in address") + ErrNoSeparator = errors.New("no separator found in address") errBits5To8 = errors.New("unable to convert address from 5-bit to 8-bit formatting") errBits8To5 = errors.New("unable to convert address from 8-bit to 5-bit formatting") ) @@ -25,7 +25,7 @@ var ( func Parse(addrStr string) (string, string, []byte, error) { addressParts := strings.SplitN(addrStr, addressSep, 2) if len(addressParts) < 2 { - return "", "", nil, errNoSeparator + return "", "", nil, ErrNoSeparator } chainID := addressParts[0] rawAddr := addressParts[1] diff --git a/avalanchego/utils/formatting/address/converter.go b/avalanchego/utils/formatting/address/converter.go index 63f96dd4..f043ab6a 100644 --- a/avalanchego/utils/formatting/address/converter.go +++ b/avalanchego/utils/formatting/address/converter.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" func ParseToID(addrStr string) (ids.ShortID, error) { _, _, addrBytes, err := Parse(addrStr) diff --git a/avalanchego/utils/formatting/encoding.go b/avalanchego/utils/formatting/encoding.go index 20ab4df3..742800fe 100644 --- a/avalanchego/utils/formatting/encoding.go +++ b/avalanchego/utils/formatting/encoding.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting @@ -69,7 +69,7 @@ func (enc Encoding) MarshalJSON() ([]byte, error) { if !enc.valid() { return nil, errInvalidEncoding } - return []byte("\"" + enc.String() + "\""), nil + return []byte(`"` + enc.String() + `"`), nil } func (enc *Encoding) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/formatting/encoding_benchmark_test.go b/avalanchego/utils/formatting/encoding_benchmark_test.go index 83d9d9c9..87993341 100644 --- a/avalanchego/utils/formatting/encoding_benchmark_test.go +++ b/avalanchego/utils/formatting/encoding_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting @@ -8,6 +8,8 @@ import ( "math/rand" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/units" ) @@ -58,9 +60,8 @@ func BenchmarkEncodings(b *testing.B) { _, _ = rand.Read(bytes) // #nosec G404 b.Run(fmt.Sprintf("%s-%d bytes", benchmark.encoding, benchmark.size), func(b *testing.B) { for n := 0; n < b.N; n++ { - if _, err := Encode(benchmark.encoding, bytes); err != nil { - b.Fatal(err) - } + _, err := Encode(benchmark.encoding, bytes) + require.NoError(b, err) } }) } diff --git a/avalanchego/utils/formatting/encoding_test.go b/avalanchego/utils/formatting/encoding_test.go index 72793477..ec759f33 100644 --- a/avalanchego/utils/formatting/encoding_test.go +++ b/avalanchego/utils/formatting/encoding_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting import ( + "encoding/hex" "encoding/json" "testing" @@ -11,44 +12,40 @@ import ( ) func TestEncodingMarshalJSON(t *testing.T) { + require := require.New(t) + enc := Hex jsonBytes, err := enc.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if string(jsonBytes) != `"hex"` { - t.Fatal("should be 'hex'") - } + require.NoError(err) + require.Equal(`"hex"`, string(jsonBytes)) } func TestEncodingUnmarshalJSON(t *testing.T) { + require := require.New(t) + jsonBytes := []byte(`"hex"`) var enc Encoding - if err := json.Unmarshal(jsonBytes, &enc); err != nil { - t.Fatal(err) - } - if enc != Hex { - t.Fatal("should be hex") - } + require.NoError(json.Unmarshal(jsonBytes, &enc)) + require.Equal(Hex, enc) + var serr *json.SyntaxError jsonBytes = []byte("") - if err := json.Unmarshal(jsonBytes, &enc); err == nil { - t.Fatal("should have erred due to invalid encoding") - } + require.ErrorAs(json.Unmarshal(jsonBytes, &enc), &serr) jsonBytes = []byte(`""`) - if err := json.Unmarshal(jsonBytes, &enc); err == nil { - t.Fatal("should have erred due to invalid encoding") - } + err := json.Unmarshal(jsonBytes, &enc) + require.ErrorIs(err, errInvalidEncoding) } func TestEncodingString(t *testing.T) { enc := Hex - require.Equal(t, enc.String(), "hex") + require.Equal(t, "hex", enc.String()) } // Test encoding bytes to a string and decoding back to bytes func TestEncodeDecode(t *testing.T) { + require := require.New(t) + type test struct { encoding Encoding bytes []byte @@ -82,44 +79,63 @@ func TestEncodeDecode(t *testing.T) { for _, test := range tests { // Encode the bytes strResult, err := Encode(test.encoding, test.bytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Make sure the string repr. is what we expected - require.Equal(t, test.str, strResult) + require.Equal(test.str, strResult) // Decode the string bytesResult, err := Decode(test.encoding, strResult) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Make sure we got the same bytes back - require.Equal(t, test.bytes, bytesResult) + require.Equal(test.bytes, bytesResult) } } // Test that encoding nil bytes works func TestEncodeNil(t *testing.T) { + require := require.New(t) + str, err := Encode(Hex, nil) - if err != nil { - t.Fatal(err) - } - require.Equal(t, "0x7852b855", str) + require.NoError(err) + require.Equal("0x7852b855", str) } func TestDecodeHexInvalid(t *testing.T) { - invalidHex := []string{"0", "x", "0xg", "0x0017afa0Zd", "0xafafafafaf"} - for _, str := range invalidHex { - _, err := Decode(Hex, str) - if err == nil { - t.Fatalf("should have failed to decode invalid hex '%s'", str) - } + tests := []struct { + inputStr string + expectedErr error + }{ + { + inputStr: "0", + expectedErr: errMissingHexPrefix, + }, + { + inputStr: "x", + expectedErr: errMissingHexPrefix, + }, + { + inputStr: "0xg", + expectedErr: hex.InvalidByteError('g'), + }, + { + inputStr: "0x0017afa0Zd", + expectedErr: hex.InvalidByteError('Z'), + }, + { + inputStr: "0xafafafafaf", + expectedErr: errBadChecksum, + }, + } + for _, test := range tests { + _, err := Decode(Hex, test.inputStr) + require.ErrorIs(t, err, test.expectedErr) } } func TestDecodeNil(t *testing.T) { - if result, err := Decode(Hex, ""); err != nil || len(result) != 0 { - t.Fatal("decoding the empty string should return an empty byte slice") - } + require := require.New(t) + result, err := Decode(Hex, "") + require.NoError(err) + require.Empty(result) } func FuzzEncodeDecode(f *testing.F) { diff --git a/avalanchego/utils/formatting/int_format.go b/avalanchego/utils/formatting/int_format.go index 6cd8c870..7c26655f 100644 --- a/avalanchego/utils/formatting/int_format.go +++ b/avalanchego/utils/formatting/int_format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/formatting/int_format_test.go b/avalanchego/utils/formatting/int_format_test.go index 8860df16..aa5dce1d 100644 --- a/avalanchego/utils/formatting/int_format_test.go +++ b/avalanchego/utils/formatting/int_format_test.go @@ -1,44 +1,27 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting import ( "testing" + + "github.com/stretchr/testify/require" ) func TestIntFormat(t *testing.T) { - if format := IntFormat(0); format != "%01d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(9); format != "%01d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(10); format != "%02d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(99); format != "%02d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(100); format != "%03d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(999); format != "%03d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(1000); format != "%04d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(9999); format != "%04d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(10000); format != "%05d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(99999); format != "%05d" { - t.Fatalf("Wrong int format: %s", format) - } - if format := IntFormat(100000); format != "%06d" { - t.Fatalf("Wrong int format: %s", format) - } + require := require.New(t) + + require.Equal("%01d", IntFormat(0)) + require.Equal("%01d", IntFormat(9)) + require.Equal("%02d", IntFormat(10)) + require.Equal("%02d", IntFormat(99)) + require.Equal("%03d", IntFormat(100)) + require.Equal("%03d", IntFormat(999)) + require.Equal("%04d", IntFormat(1000)) + require.Equal("%04d", IntFormat(9999)) + require.Equal("%05d", IntFormat(10000)) + require.Equal("%05d", IntFormat(99999)) + require.Equal("%06d", IntFormat(100000)) + require.Equal("%06d", IntFormat(999999)) } diff --git a/avalanchego/utils/formatting/prefixed_stringer.go b/avalanchego/utils/formatting/prefixed_stringer.go index 458c7bce..3c82cddd 100644 --- a/avalanchego/utils/formatting/prefixed_stringer.go +++ b/avalanchego/utils/formatting/prefixed_stringer.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting -import ( - "fmt" -) +import "fmt" // PrefixedStringer extends a stringer that adds a prefix type PrefixedStringer interface { diff --git a/avalanchego/utils/hashing/consistent/hashable.go b/avalanchego/utils/hashing/consistent/hashable.go index df4a08d0..a51ce4df 100644 --- a/avalanchego/utils/hashing/consistent/hashable.go +++ b/avalanchego/utils/hashing/consistent/hashable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent diff --git a/avalanchego/utils/hashing/consistent/ring.go b/avalanchego/utils/hashing/consistent/ring.go index df25bd3d..c99dd276 100644 --- a/avalanchego/utils/hashing/consistent/ring.go +++ b/avalanchego/utils/hashing/consistent/ring.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent @@ -7,8 +7,9 @@ import ( "errors" "sync" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/google/btree" + + "github.com/ava-labs/avalanchego/utils/hashing" ) var ( diff --git a/avalanchego/utils/hashing/consistent/ring_test.go b/avalanchego/utils/hashing/consistent/ring_test.go index d5b59c58..e2284836 100644 --- a/avalanchego/utils/hashing/consistent/ring_test.go +++ b/avalanchego/utils/hashing/consistent/ring_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent @@ -6,9 +6,8 @@ package consistent import ( "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -177,11 +176,11 @@ func TestGetMapsToClockwiseNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + require := require.New(t) + ring, hasher := setupTest(t, 1) // setup expected calls - calls := make([]*gomock.Call, len(test.ringNodes)+1) + calls := make([]any, len(test.ringNodes)+1) for i, key := range test.ringNodes { calls[i] = hasher.EXPECT().Hash(getHashKey(key.ConsistentHashKey(), 0)).Return(key.hash).Times(1) @@ -196,32 +195,27 @@ func TestGetMapsToClockwiseNode(t *testing.T) { } node, err := ring.Get(test.key) - require.Equal(t, test.expectedNode, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(test.expectedNode, node) }) } } // Tests that if we have an empty ring, trying to call Get results in an error, as there is no node to route to. func TestGetOnEmptyRingReturnsError(t *testing.T) { - ring, _, ctrl := setupTest(t, 1) - defer ctrl.Finish() + ring, _ := setupTest(t, 1) foo := testKey{ key: "foo", hash: 0, } - - node, err := ring.Get(foo) - - require.Equal(t, nil, node) - require.Equal(t, errEmptyRing, err) + _, err := ring.Get(foo) + require.ErrorIs(t, err, errEmptyRing) } // Tests that trying to call Remove on a node that doesn't exist should return false. func TestRemoveNonExistentKeyReturnsFalse(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + ring, hasher := setupTest(t, 1) gomock.InOrder( hasher.EXPECT().Hash(getHashKey(node1.ConsistentHashKey(), 0)).Return(uint64(1)).Times(1), @@ -233,8 +227,7 @@ func TestRemoveNonExistentKeyReturnsFalse(t *testing.T) { // Tests that trying to call Remove on a node that doesn't exist should return true. func TestRemoveExistingKeyReturnsTrue(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + ring, hasher := setupTest(t, 1) gomock.InOrder( hasher.EXPECT().Hash(getHashKey(node1.ConsistentHashKey(), 0)).Return(uint64(1)).Times(1), @@ -259,8 +252,8 @@ func TestRemoveExistingKeyReturnsTrue(t *testing.T) { // Tests that if we have a collision, the node is replaced. func TestAddCollisionReplacement(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + require := require.New(t) + ring, hasher := setupTest(t, 1) foo := testKey{ key: "foo", @@ -283,15 +276,14 @@ func TestAddCollisionReplacement(t *testing.T) { ring.Add(node2) ringMember, err := ring.Get(foo) - - require.Equal(t, node2, ringMember) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, ringMember) } // Tests that virtual nodes are replicated on Add. func TestAddVirtualNodes(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 3) - defer ctrl.Finish() + require := require.New(t) + ring, hasher := setupTest(t, 3) gomock.InOrder( // we should see 3 virtual nodes created (0, 1, 2) when we insert a node into the ring. @@ -331,31 +323,31 @@ func TestAddVirtualNodes(t *testing.T) { // Gets that should route to node-1 node, err := ring.Get(testKey{key: "foo1"}) - require.Equal(t, node1, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node1, node) node, err = ring.Get(testKey{key: "foo3"}) - require.Equal(t, node1, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node1, node) node, err = ring.Get(testKey{key: "foo5"}) - require.Equal(t, node1, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node1, node) // Gets that should route to node-2 node, err = ring.Get(testKey{key: "foo0"}) - require.Equal(t, node2, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, node) node, err = ring.Get(testKey{key: "foo2"}) - require.Equal(t, node2, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, node) node, err = ring.Get(testKey{key: "foo4"}) - require.Equal(t, node2, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, node) } // Tests that the node routed to changes if an Add results in a key shuffle. func TestGetShuffleOnAdd(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + require := require.New(t) + ring, hasher := setupTest(t, 1) foo := testKey{ key: "foo", @@ -381,9 +373,8 @@ func TestGetShuffleOnAdd(t *testing.T) { // Ring: // ... -> node-1 -> foo -> ... node, err := ring.Get(foo) - - require.Equal(t, node1, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node1, node) // Add node-2, which results in foo being shuffled from node-1 to node-2. // @@ -396,15 +387,14 @@ func TestGetShuffleOnAdd(t *testing.T) { // Ring: // ... -> node-1 -> foo -> node-2 -> ... node, err = ring.Get(foo) - - require.Equal(t, node2, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, node) } // Tests that we can iterate around the ring. func TestIteration(t *testing.T) { - ring, hasher, ctrl := setupTest(t, 1) - defer ctrl.Finish() + require := require.New(t) + ring, hasher := setupTest(t, 1) foo := testKey{ key: "foo", @@ -436,16 +426,16 @@ func TestIteration(t *testing.T) { // Ring: // ... -> foo -> node-1 -> node-2 -> ... node, err := ring.Get(foo) - require.Equal(t, node1, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node1, node) // iterate by re-using node-1 to get node-2 node, err = ring.Get(node) - require.Equal(t, node2, node) - require.Nil(t, err) + require.NoError(err) + require.Equal(node2, node) } -func setupTest(t *testing.T, virtualNodes int) (Ring, *hashing.MockHasher, *gomock.Controller) { +func setupTest(t *testing.T, virtualNodes int) (Ring, *hashing.MockHasher) { ctrl := gomock.NewController(t) hasher := hashing.NewMockHasher(ctrl) @@ -453,5 +443,5 @@ func setupTest(t *testing.T, virtualNodes int) (Ring, *hashing.MockHasher, *gomo VirtualNodes: virtualNodes, Hasher: hasher, Degree: 2, - }), hasher, ctrl + }), hasher } diff --git a/avalanchego/utils/hashing/hasher.go b/avalanchego/utils/hashing/hasher.go index 7519dfbb..be74c160 100644 --- a/avalanchego/utils/hashing/hasher.go +++ b/avalanchego/utils/hashing/hasher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing diff --git a/avalanchego/utils/hashing/hashing.go b/avalanchego/utils/hashing/hashing.go index a74ef8fe..0d09fd45 100644 --- a/avalanchego/utils/hashing/hashing.go +++ b/avalanchego/utils/hashing/hashing.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing import ( "crypto/sha256" + "errors" "fmt" "io" @@ -16,6 +17,8 @@ const ( AddrLen = ripemd160.Size ) +var ErrInvalidHashLen = errors.New("invalid hash length") + // Hash256 A 256 bit long hash value. type Hash256 = [HashLen]byte @@ -85,7 +88,7 @@ func Checksum(bytes []byte, length int) []byte { func ToHash256(bytes []byte) (Hash256, error) { hash := Hash256{} if bytesLen := len(bytes); bytesLen != HashLen { - return hash, fmt.Errorf("expected 32 bytes but got %d", bytesLen) + return hash, fmt.Errorf("%w: expected 32 bytes but got %d", ErrInvalidHashLen, bytesLen) } copy(hash[:], bytes) return hash, nil @@ -94,7 +97,7 @@ func ToHash256(bytes []byte) (Hash256, error) { func ToHash160(bytes []byte) (Hash160, error) { hash := Hash160{} if bytesLen := len(bytes); bytesLen != ripemd160.Size { - return hash, fmt.Errorf("expected 20 bytes but got %d", bytesLen) + return hash, fmt.Errorf("%w: expected 20 bytes but got %d", ErrInvalidHashLen, bytesLen) } copy(hash[:], bytes) return hash, nil diff --git a/avalanchego/utils/hashing/mock_hasher.go b/avalanchego/utils/hashing/mock_hasher.go index b903ba8f..c2d5ea4b 100644 --- a/avalanchego/utils/hashing/mock_hasher.go +++ b/avalanchego/utils/hashing/mock_hasher.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/hashing (interfaces: Hasher) +// +// Generated by this command: +// +// mockgen -package=hashing -destination=utils/hashing/mock_hasher.go github.com/ava-labs/avalanchego/utils/hashing Hasher +// // Package hashing is a generated GoMock package. package hashing @@ -10,7 +12,7 @@ package hashing import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockHasher is a mock of Hasher interface. @@ -45,7 +47,7 @@ func (m *MockHasher) Hash(arg0 []byte) uint64 { } // Hash indicates an expected call of Hash. -func (mr *MockHasherMockRecorder) Hash(arg0 interface{}) *gomock.Call { +func (mr *MockHasherMockRecorder) Hash(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hash", reflect.TypeOf((*MockHasher)(nil).Hash), arg0) } diff --git a/avalanchego/utils/heap/map.go b/avalanchego/utils/heap/map.go new file mode 100644 index 00000000..1162e95f --- /dev/null +++ b/avalanchego/utils/heap/map.go @@ -0,0 +1,132 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +import ( + "container/heap" + + "github.com/ava-labs/avalanchego/utils" +) + +var _ heap.Interface = (*indexedQueue[int, int])(nil) + +func MapValues[K comparable, V any](m Map[K, V]) []V { + result := make([]V, 0, m.Len()) + for _, e := range m.queue.entries { + result = append(result, e.v) + } + return result +} + +// NewMap returns a heap without duplicates ordered by its values +func NewMap[K comparable, V any](less func(a, b V) bool) Map[K, V] { + return Map[K, V]{ + queue: &indexedQueue[K, V]{ + queue: queue[entry[K, V]]{ + less: func(a, b entry[K, V]) bool { + return less(a.v, b.v) + }, + }, + index: make(map[K]int), + }, + } +} + +type Map[K comparable, V any] struct { + queue *indexedQueue[K, V] +} + +// Push returns the evicted previous value if present +func (m *Map[K, V]) Push(k K, v V) (V, bool) { + if i, ok := m.queue.index[k]; ok { + prev := m.queue.entries[i] + m.queue.entries[i].v = v + heap.Fix(m.queue, i) + return prev.v, true + } + + heap.Push(m.queue, entry[K, V]{k: k, v: v}) + return utils.Zero[V](), false +} + +func (m *Map[K, V]) Pop() (K, V, bool) { + if m.Len() == 0 { + return utils.Zero[K](), utils.Zero[V](), false + } + + popped := heap.Pop(m.queue).(entry[K, V]) + return popped.k, popped.v, true +} + +func (m *Map[K, V]) Peek() (K, V, bool) { + if m.Len() == 0 { + return utils.Zero[K](), utils.Zero[V](), false + } + + entry := m.queue.entries[0] + return entry.k, entry.v, true +} + +func (m *Map[K, V]) Len() int { + return m.queue.Len() +} + +func (m *Map[K, V]) Remove(k K) (V, bool) { + if i, ok := m.queue.index[k]; ok { + removed := heap.Remove(m.queue, i).(entry[K, V]) + return removed.v, true + } + return utils.Zero[V](), false +} + +func (m *Map[K, V]) Contains(k K) bool { + _, ok := m.queue.index[k] + return ok +} + +func (m *Map[K, V]) Get(k K) (V, bool) { + if i, ok := m.queue.index[k]; ok { + got := m.queue.entries[i] + return got.v, true + } + return utils.Zero[V](), false +} + +func (m *Map[K, V]) Fix(k K) { + if i, ok := m.queue.index[k]; ok { + heap.Fix(m.queue, i) + } +} + +type indexedQueue[K comparable, V any] struct { + queue[entry[K, V]] + index map[K]int +} + +func (h *indexedQueue[K, V]) Swap(i, j int) { + h.entries[i], h.entries[j] = h.entries[j], h.entries[i] + h.index[h.entries[i].k], h.index[h.entries[j].k] = i, j +} + +func (h *indexedQueue[K, V]) Push(x any) { + entry := x.(entry[K, V]) + h.entries = append(h.entries, entry) + h.index[entry.k] = len(h.index) +} + +func (h *indexedQueue[K, V]) Pop() any { + end := len(h.entries) - 1 + + popped := h.entries[end] + h.entries[end] = entry[K, V]{} + h.entries = h.entries[:end] + + delete(h.index, popped.k) + return popped +} + +type entry[K any, V any] struct { + k K + v V +} diff --git a/avalanchego/utils/heap/map_test.go b/avalanchego/utils/heap/map_test.go new file mode 100644 index 00000000..64e3e4e2 --- /dev/null +++ b/avalanchego/utils/heap/map_test.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMap(t *testing.T) { + tests := []struct { + name string + setup func(h Map[string, int]) + expected []entry[string, int] + }{ + { + name: "only push", + setup: func(h Map[string, int]) { + h.Push("a", 1) + h.Push("b", 2) + h.Push("c", 3) + }, + expected: []entry[string, int]{ + {k: "a", v: 1}, + {k: "b", v: 2}, + {k: "c", v: 3}, + }, + }, + { + name: "out of order pushes", + setup: func(h Map[string, int]) { + h.Push("a", 1) + h.Push("e", 5) + h.Push("b", 2) + h.Push("d", 4) + h.Push("c", 3) + }, + expected: []entry[string, int]{ + {"a", 1}, + {"b", 2}, + {"c", 3}, + {"d", 4}, + {"e", 5}, + }, + }, + { + name: "push and pop", + setup: func(m Map[string, int]) { + m.Push("a", 1) + m.Push("e", 5) + m.Push("b", 2) + m.Push("d", 4) + m.Push("c", 3) + m.Pop() + m.Pop() + m.Pop() + }, + expected: []entry[string, int]{ + {"d", 4}, + {"e", 5}, + }, + }, + { + name: "duplicate key is overridden", + setup: func(h Map[string, int]) { + h.Push("a", 1) + h.Push("a", 2) + }, + expected: []entry[string, int]{ + {k: "a", v: 2}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + h := NewMap[string, int](func(a, b int) bool { + return a < b + }) + + tt.setup(h) + + require.Equal(len(tt.expected), h.Len()) + for _, expected := range tt.expected { + k, v, ok := h.Pop() + require.True(ok) + require.Equal(expected.k, k) + require.Equal(expected.v, v) + } + }) + } +} diff --git a/avalanchego/utils/heap/queue.go b/avalanchego/utils/heap/queue.go new file mode 100644 index 00000000..62687635 --- /dev/null +++ b/avalanchego/utils/heap/queue.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +import ( + "container/heap" + + "github.com/ava-labs/avalanchego/utils" +) + +var _ heap.Interface = (*queue[int])(nil) + +// NewQueue returns an empty heap. See QueueOf for more. +func NewQueue[T any](less func(a, b T) bool) Queue[T] { + return QueueOf(less) +} + +// QueueOf returns a heap containing entries ordered by less. +func QueueOf[T any](less func(a, b T) bool, entries ...T) Queue[T] { + q := Queue[T]{ + queue: &queue[T]{ + entries: make([]T, len(entries)), + less: less, + }, + } + + copy(q.queue.entries, entries) + heap.Init(q.queue) + return q +} + +type Queue[T any] struct { + queue *queue[T] +} + +func (q *Queue[T]) Len() int { + return len(q.queue.entries) +} + +func (q *Queue[T]) Push(t T) { + heap.Push(q.queue, t) +} + +func (q *Queue[T]) Pop() (T, bool) { + if q.Len() == 0 { + return utils.Zero[T](), false + } + + return heap.Pop(q.queue).(T), true +} + +func (q *Queue[T]) Peek() (T, bool) { + if q.Len() == 0 { + return utils.Zero[T](), false + } + + return q.queue.entries[0], true +} + +func (q *Queue[T]) Fix(i int) { + heap.Fix(q.queue, i) +} + +type queue[T any] struct { + entries []T + less func(a, b T) bool +} + +func (q *queue[T]) Len() int { + return len(q.entries) +} + +func (q *queue[T]) Less(i, j int) bool { + return q.less(q.entries[i], q.entries[j]) +} + +func (q *queue[T]) Swap(i, j int) { + q.entries[i], q.entries[j] = q.entries[j], q.entries[i] +} + +func (q *queue[T]) Push(e any) { + q.entries = append(q.entries, e.(T)) +} + +func (q *queue[T]) Pop() any { + end := len(q.entries) - 1 + + popped := q.entries[end] + q.entries[end] = utils.Zero[T]() + q.entries = q.entries[:end] + + return popped +} diff --git a/avalanchego/utils/heap/queue_test.go b/avalanchego/utils/heap/queue_test.go new file mode 100644 index 00000000..66e34171 --- /dev/null +++ b/avalanchego/utils/heap/queue_test.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHeap(t *testing.T) { + tests := []struct { + name string + setup func(h Queue[int]) + expected []int + }{ + { + name: "only push", + setup: func(h Queue[int]) { + h.Push(1) + h.Push(2) + h.Push(3) + }, + expected: []int{1, 2, 3}, + }, + { + name: "out of order pushes", + setup: func(h Queue[int]) { + h.Push(1) + h.Push(5) + h.Push(2) + h.Push(4) + h.Push(3) + }, + expected: []int{1, 2, 3, 4, 5}, + }, + { + name: "push and pop", + setup: func(h Queue[int]) { + h.Push(1) + h.Push(5) + h.Push(2) + h.Push(4) + h.Push(3) + h.Pop() + h.Pop() + h.Pop() + }, + expected: []int{4, 5}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + h := NewQueue[int](func(a, b int) bool { + return a < b + }) + + tt.setup(h) + + require.Equal(len(tt.expected), h.Len()) + for _, expected := range tt.expected { + got, ok := h.Pop() + require.True(ok) + require.Equal(expected, got) + } + }) + } +} diff --git a/avalanchego/utils/heap/set.go b/avalanchego/utils/heap/set.go new file mode 100644 index 00000000..e1865f1e --- /dev/null +++ b/avalanchego/utils/heap/set.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +// NewSet returns a heap without duplicates ordered by its values +func NewSet[T comparable](less func(a, b T) bool) Set[T] { + return Set[T]{ + set: NewMap[T, T](less), + } +} + +type Set[T comparable] struct { + set Map[T, T] +} + +// Push returns if the entry was added +func (s Set[T]) Push(t T) bool { + _, hadValue := s.set.Push(t, t) + return !hadValue +} + +func (s Set[T]) Pop() (T, bool) { + pop, _, ok := s.set.Pop() + return pop, ok +} + +func (s Set[T]) Peek() (T, bool) { + peek, _, ok := s.set.Peek() + return peek, ok +} + +func (s Set[T]) Len() int { + return s.set.Len() +} + +func (s Set[T]) Remove(t T) bool { + _, existed := s.set.Remove(t) + return existed +} + +func (s Set[T]) Fix(t T) { + s.set.Fix(t) +} + +func (s Set[T]) Contains(t T) bool { + return s.set.Contains(t) +} diff --git a/avalanchego/utils/heap/set_test.go b/avalanchego/utils/heap/set_test.go new file mode 100644 index 00000000..d4752261 --- /dev/null +++ b/avalanchego/utils/heap/set_test.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package heap + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSet(t *testing.T) { + tests := []struct { + name string + setup func(h Set[int]) + expected []int + }{ + { + name: "only push", + setup: func(h Set[int]) { + h.Push(1) + h.Push(2) + h.Push(3) + }, + expected: []int{1, 2, 3}, + }, + { + name: "out of order pushes", + setup: func(h Set[int]) { + h.Push(1) + h.Push(5) + h.Push(2) + h.Push(4) + h.Push(3) + }, + expected: []int{1, 2, 3, 4, 5}, + }, + { + name: "push and pop", + setup: func(h Set[int]) { + h.Push(1) + h.Push(5) + h.Push(2) + h.Push(4) + h.Push(3) + h.Pop() + h.Pop() + h.Pop() + }, + expected: []int{4, 5}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + h := NewSet[int](func(a, b int) bool { + return a < b + }) + + tt.setup(h) + + require.Equal(len(tt.expected), h.Len()) + for _, expected := range tt.expected { + got, ok := h.Pop() + require.True(ok) + require.Equal(expected, got) + } + }) + } +} diff --git a/avalanchego/utils/ips/claimed_ip_port.go b/avalanchego/utils/ips/claimed_ip_port.go index 94a5f693..2ef6c0a7 100644 --- a/avalanchego/utils/ips/claimed_ip_port.go +++ b/avalanchego/utils/ips/claimed_ip_port.go @@ -1,29 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips import ( - "crypto/x509" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/wrappers" ) -// Can't import these from wrappers package due to circular import. const ( - intLen = 4 - longLen = 8 - ipLen = 18 - idLen = 32 // Certificate length, signature length, IP, timestamp, tx ID - baseIPCertDescLen = 2*intLen + ipLen + longLen + idLen + baseIPCertDescLen = 2*wrappers.IntLen + IPPortLen + wrappers.LongLen + ids.IDLen + preimageLen = ids.IDLen + wrappers.LongLen ) // A self contained proof that a peer is claiming ownership of an IPPort at a // given time. type ClaimedIPPort struct { // The peer's certificate. - Cert *x509.Certificate + Cert *staking.Certificate // The peer's claimed IP and port. IPPort IPPort // The time the peer claimed to own this IP and port. @@ -33,12 +30,36 @@ type ClaimedIPPort struct { // actually claimed by the peer in question, and not by a malicious peer // trying to get us to dial bogus IPPorts. Signature []byte - // The txID that added this peer into the validator set - TxID ids.ID + // NodeID derived from the peer certificate. + NodeID ids.NodeID + // GossipID derived from the nodeID and timestamp. + GossipID ids.ID +} + +func NewClaimedIPPort( + cert *staking.Certificate, + ipPort IPPort, + timestamp uint64, + signature []byte, +) *ClaimedIPPort { + ip := &ClaimedIPPort{ + Cert: cert, + IPPort: ipPort, + Timestamp: timestamp, + Signature: signature, + NodeID: ids.NodeIDFromCert(cert), + } + + packer := wrappers.Packer{ + Bytes: make([]byte, preimageLen), + } + packer.PackFixedBytes(ip.NodeID[:]) + packer.PackLong(timestamp) + ip.GossipID = hashing.ComputeHash256Array(packer.Bytes) + return ip } -// Returns the length of the byte representation of this ClaimedIPPort. -func (i *ClaimedIPPort) BytesLen() int { - // See wrappers.PackPeerTrackInfo. +// Returns the approximate size of the binary representation of this ClaimedIPPort. +func (i *ClaimedIPPort) Size() int { return baseIPCertDescLen + len(i.Cert.Raw) + len(i.Signature) } diff --git a/avalanchego/utils/ips/dynamic_ip_port.go b/avalanchego/utils/ips/dynamic_ip_port.go index 3f30dc0a..0b83ab59 100644 --- a/avalanchego/utils/ips/dynamic_ip_port.go +++ b/avalanchego/utils/ips/dynamic_ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/avalanchego/utils/ips/ip_port.go b/avalanchego/utils/ips/ip_port.go index ba0e74af..eea20352 100644 --- a/avalanchego/utils/ips/ip_port.go +++ b/avalanchego/utils/ips/ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips @@ -12,7 +12,47 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var errBadIP = errors.New("bad ip format") +const ( + IPPortLen = net.IPv6len + wrappers.ShortLen + nullStr = "null" +) + +var ( + errMissingQuotes = errors.New("first and last characters should be quotes") + errBadIP = errors.New("bad ip format") +) + +type IPDesc IPPort + +func (ipDesc IPDesc) String() string { + return IPPort(ipDesc).String() +} + +func (ipDesc IPDesc) MarshalJSON() ([]byte, error) { + return []byte(`"` + ipDesc.String() + `"`), nil +} + +func (ipDesc *IPDesc) UnmarshalJSON(b []byte) error { + str := string(b) + if str == nullStr { // If "null", do nothing + return nil + } else if len(str) < 2 { + return errMissingQuotes + } + + lastIndex := len(str) - 1 + if str[0] != '"' || str[lastIndex] != '"' { + return errMissingQuotes + } + + ipPort, err := ToIPPort(str[1:lastIndex]) + if err != nil { + return fmt.Errorf("couldn't decode to IPPort: %w", err) + } + *ipDesc = IPDesc(ipPort) + + return nil +} // An IP and a port. type IPPort struct { @@ -25,7 +65,7 @@ func (ipPort IPPort) Equal(other IPPort) bool { } func (ipPort IPPort) String() string { - return net.JoinHostPort(ipPort.IP.String(), fmt.Sprintf("%d", ipPort.Port)) + return net.JoinHostPort(ipPort.IP.String(), strconv.FormatUint(uint64(ipPort.Port), 10)) } // IsZero returns if the IP or port is zeroed out diff --git a/avalanchego/utils/ips/ip_test.go b/avalanchego/utils/ips/ip_test.go index 14853ea7..903f26a2 100644 --- a/avalanchego/utils/ips/ip_test.go +++ b/avalanchego/utils/ips/ip_test.go @@ -1,32 +1,39 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips import ( - "fmt" + "encoding/json" "net" + "strconv" "testing" + + "github.com/stretchr/testify/require" ) func TestIPPortEqual(t *testing.T) { tests := []struct { + ipPort string ipPort1 IPPort ipPort2 IPPort result bool }{ // Expected equal { + `"127.0.0.1:0"`, IPPort{net.ParseIP("127.0.0.1"), 0}, IPPort{net.ParseIP("127.0.0.1"), 0}, true, }, { + `"[::1]:0"`, IPPort{net.ParseIP("::1"), 0}, IPPort{net.ParseIP("::1"), 0}, true, }, { + `"127.0.0.1:0"`, IPPort{net.ParseIP("127.0.0.1"), 0}, IPPort{net.ParseIP("::ffff:127.0.0.1"), 0}, true, @@ -34,35 +41,37 @@ func TestIPPortEqual(t *testing.T) { // Expected unequal { + `"127.0.0.1:0"`, IPPort{net.ParseIP("127.0.0.1"), 0}, IPPort{net.ParseIP("1.2.3.4"), 0}, false, }, { + `"[::1]:0"`, IPPort{net.ParseIP("::1"), 0}, IPPort{net.ParseIP("2001::1"), 0}, false, }, { + `"127.0.0.1:0"`, IPPort{net.ParseIP("127.0.0.1"), 0}, IPPort{net.ParseIP("127.0.0.1"), 1}, false, }, } for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - if tt.ipPort1.IP == nil { - t.Error("ipPort1 nil") - } else if tt.ipPort2.IP == nil { - t.Error("ipPort2 nil") - } - result := tt.ipPort1.Equal(tt.ipPort2) - if result && result != tt.result { - t.Error("Expected IPPort to be equal, but they were not") - } - if !result && result != tt.result { - t.Error("Expected IPPort to be unequal, but they were equal") - } + t.Run(strconv.Itoa(i), func(t *testing.T) { + require := require.New(t) + + ipPort := IPDesc{} + require.NoError(ipPort.UnmarshalJSON([]byte(tt.ipPort))) + require.Equal(tt.ipPort1, IPPort(ipPort)) + + ipPortJSON, err := json.Marshal(ipPort) + require.NoError(err) + require.Equal(tt.ipPort, string(ipPortJSON)) + + require.Equal(tt.result, tt.ipPort1.Equal(tt.ipPort2)) }) } } @@ -79,37 +88,70 @@ func TestIPPortString(t *testing.T) { } for _, tt := range tests { t.Run(tt.result, func(t *testing.T) { - if result := tt.ipPort.String(); result != tt.result { - t.Errorf("Expected %q, got %q", tt.result, result) - } + require.Equal(t, tt.result, tt.ipPort.String()) }) } } func TestToIPPortError(t *testing.T) { tests := []struct { - in string - out IPPort + in string + out IPPort + expectedErr error }{ - {"", IPPort{}}, - {":", IPPort{}}, - {"abc:", IPPort{}}, - {":abc", IPPort{}}, - {"abc:abc", IPPort{}}, - {"127.0.0.1:", IPPort{}}, - {":1", IPPort{}}, - {"::1", IPPort{}}, - {"::1:42", IPPort{}}, + { + in: "", + out: IPPort{}, + expectedErr: errBadIP, + }, + { + in: ":", + out: IPPort{}, + expectedErr: strconv.ErrSyntax, + }, + { + in: "abc:", + out: IPPort{}, + expectedErr: strconv.ErrSyntax, + }, + { + in: ":abc", + out: IPPort{}, + expectedErr: strconv.ErrSyntax, + }, + { + in: "abc:abc", + out: IPPort{}, + expectedErr: strconv.ErrSyntax, + }, + { + in: "127.0.0.1:", + out: IPPort{}, + expectedErr: strconv.ErrSyntax, + }, + { + in: ":1", + out: IPPort{}, + expectedErr: errBadIP, + }, + { + in: "::1", + out: IPPort{}, + expectedErr: errBadIP, + }, + { + in: "::1:42", + out: IPPort{}, + expectedErr: errBadIP, + }, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { + require := require.New(t) + result, err := ToIPPort(tt.in) - if err == nil { - t.Errorf("Unexpected success") - } - if !tt.out.Equal(result) { - t.Errorf("Expected %v, got %v", tt.out, result) - } + require.ErrorIs(err, tt.expectedErr) + require.Equal(tt.out, result) }) } } @@ -124,13 +166,11 @@ func TestToIPPort(t *testing.T) { } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { + require := require.New(t) + result, err := ToIPPort(tt.in) - if err != nil { - t.Errorf("Unexpected error %v", err) - } - if !tt.out.Equal(result) { - t.Errorf("Expected %#v, got %#v", tt.out, result) - } + require.NoError(err) + require.Equal(tt.out, result) }) } } diff --git a/avalanchego/utils/ips/lookup.go b/avalanchego/utils/ips/lookup.go index 8ae3de47..cdf9176f 100644 --- a/avalanchego/utils/ips/lookup.go +++ b/avalanchego/utils/ips/lookup.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/avalanchego/utils/ips/lookup_test.go b/avalanchego/utils/ips/lookup_test.go index 52c0e5ed..9fecccc5 100644 --- a/avalanchego/utils/ips/lookup_test.go +++ b/avalanchego/utils/ips/lookup_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/avalanchego/utils/json/codec.go b/avalanchego/utils/json/codec.go index 5871d67f..0bf51dce 100644 --- a/avalanchego/utils/json/codec.go +++ b/avalanchego/utils/json/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/float32.go b/avalanchego/utils/json/float32.go index 70fcc7a0..ca35a760 100644 --- a/avalanchego/utils/json/float32.go +++ b/avalanchego/utils/json/float32.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json -import ( - "strconv" -) +import "strconv" type Float32 float32 func (f Float32) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatFloat(float64(f), byte('f'), 4, 32) + "\""), nil + return []byte(`"` + strconv.FormatFloat(float64(f), byte('f'), 4, 32) + `"`), nil } func (f *Float32) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/json/float32_test.go b/avalanchego/utils/json/float32_test.go index a3a4fdc4..519ca7f4 100644 --- a/avalanchego/utils/json/float32_test.go +++ b/avalanchego/utils/json/float32_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -6,9 +6,13 @@ package json import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) func TestFloat32(t *testing.T) { + require := require.New(t) + type test struct { f Float32 expectedStr string @@ -45,17 +49,11 @@ func TestFloat32(t *testing.T) { for _, tt := range tests { jsonBytes, err := tt.f.MarshalJSON() - if err != nil { - t.Fatalf("couldn't marshal %f: %s", float32(tt.f), err) - } else if string(jsonBytes) != fmt.Sprintf("\"%s\"", tt.expectedStr) { - t.Fatalf("expected %f to marshal to %s but got %s", tt.f, tt.expectedStr, string(jsonBytes)) - } + require.NoError(err) + require.Equal(fmt.Sprintf(`"%s"`, tt.expectedStr), string(jsonBytes)) var f Float32 - if err := f.UnmarshalJSON(jsonBytes); err != nil { - t.Fatalf("couldn't unmarshal %s to Float32: %s", string(jsonBytes), err) - } else if float32(f) != tt.expectedUnmarshalled { - t.Fatalf("expected %s to unmarshal to %f but got %f", string(jsonBytes), tt.expectedUnmarshalled, f) - } + require.NoError(f.UnmarshalJSON(jsonBytes)) + require.Equal(tt.expectedUnmarshalled, float32(f)) } } diff --git a/avalanchego/utils/json/float64.go b/avalanchego/utils/json/float64.go index 25a9adec..80fb8ae7 100644 --- a/avalanchego/utils/json/float64.go +++ b/avalanchego/utils/json/float64.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json -import ( - "strconv" -) +import "strconv" type Float64 float64 func (f Float64) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatFloat(float64(f), byte('f'), 4, 64) + "\""), nil + return []byte(`"` + strconv.FormatFloat(float64(f), byte('f'), 4, 64) + `"`), nil } func (f *Float64) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/json/uint16.go b/avalanchego/utils/json/uint16.go index b7b36e9a..03e0f133 100644 --- a/avalanchego/utils/json/uint16.go +++ b/avalanchego/utils/json/uint16.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json -import ( - "strconv" -) +import "strconv" type Uint16 uint16 func (u Uint16) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint16) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/json/uint32.go b/avalanchego/utils/json/uint32.go index 95267659..bae5b885 100644 --- a/avalanchego/utils/json/uint32.go +++ b/avalanchego/utils/json/uint32.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json -import ( - "strconv" -) +import "strconv" type Uint32 uint32 func (u Uint32) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint32) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/json/uint64.go b/avalanchego/utils/json/uint64.go index ba318903..60bc9988 100644 --- a/avalanchego/utils/json/uint64.go +++ b/avalanchego/utils/json/uint64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json @@ -8,7 +8,7 @@ import "strconv" type Uint64 uint64 func (u Uint64) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint64) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/json/uint8.go b/avalanchego/utils/json/uint8.go index d0571a2c..da2ca527 100644 --- a/avalanchego/utils/json/uint8.go +++ b/avalanchego/utils/json/uint8.go @@ -1,16 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json -import ( - "strconv" -) +import "strconv" type Uint8 uint8 func (u Uint8) MarshalJSON() ([]byte, error) { - return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil + return []byte(`"` + strconv.FormatUint(uint64(u), 10) + `"`), nil } func (u *Uint8) UnmarshalJSON(b []byte) error { diff --git a/avalanchego/utils/linkedhashmap/iterator.go b/avalanchego/utils/linkedhashmap/iterator.go index 27c4427b..a2869aac 100644 --- a/avalanchego/utils/linkedhashmap/iterator.go +++ b/avalanchego/utils/linkedhashmap/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inte. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -59,8 +59,9 @@ func (it *iterator[K, V]) Next() bool { // It's important to ensure that [it.next] is not nil // by not deleting elements that have not yet been iterated // over from [it.lh] - it.key = it.next.Value.(keyValue[K, V]).key - it.value = it.next.Value.(keyValue[K, V]).value + kv := it.next.Value.(keyValue[K, V]) + it.key = kv.key + it.value = kv.value it.next = it.next.Next() // Next time, return next element it.exhausted = it.next == nil return true diff --git a/avalanchego/utils/linkedhashmap/linkedhashmap.go b/avalanchego/utils/linkedhashmap/linkedhashmap.go index e4c1b3f4..9ae5b83a 100644 --- a/avalanchego/utils/linkedhashmap/linkedhashmap.go +++ b/avalanchego/utils/linkedhashmap/linkedhashmap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inte. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -17,7 +17,7 @@ var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) type Hashmap[K, V any] interface { Put(key K, val V) Get(key K) (val V, exists bool) - Delete(key K) + Delete(key K) (deleted bool) Len() int } @@ -57,36 +57,36 @@ func (lh *linkedHashmap[K, V]) Put(key K, val V) { } func (lh *linkedHashmap[K, V]) Get(key K) (V, bool) { - lh.lock.Lock() - defer lh.lock.Unlock() + lh.lock.RLock() + defer lh.lock.RUnlock() return lh.get(key) } -func (lh *linkedHashmap[K, V]) Delete(key K) { +func (lh *linkedHashmap[K, V]) Delete(key K) bool { lh.lock.Lock() defer lh.lock.Unlock() - lh.delete(key) + return lh.delete(key) } func (lh *linkedHashmap[K, V]) Len() int { - lh.lock.Lock() - defer lh.lock.Unlock() + lh.lock.RLock() + defer lh.lock.RUnlock() return lh.len() } func (lh *linkedHashmap[K, V]) Oldest() (K, V, bool) { - lh.lock.Lock() - defer lh.lock.Unlock() + lh.lock.RLock() + defer lh.lock.RUnlock() return lh.oldest() } func (lh *linkedHashmap[K, V]) Newest() (K, V, bool) { - lh.lock.Lock() - defer lh.lock.Unlock() + lh.lock.RLock() + defer lh.lock.RUnlock() return lh.newest() } @@ -108,16 +108,19 @@ func (lh *linkedHashmap[K, V]) put(key K, value V) { func (lh *linkedHashmap[K, V]) get(key K) (V, bool) { if e, ok := lh.entryMap[key]; ok { - return e.Value.(keyValue[K, V]).value, true + kv := e.Value.(keyValue[K, V]) + return kv.value, true } return utils.Zero[V](), false } -func (lh *linkedHashmap[K, V]) delete(key K) { - if e, ok := lh.entryMap[key]; ok { +func (lh *linkedHashmap[K, V]) delete(key K) bool { + e, ok := lh.entryMap[key] + if ok { lh.entryList.Remove(e) delete(lh.entryMap, key) } + return ok } func (lh *linkedHashmap[K, V]) len() int { @@ -126,14 +129,16 @@ func (lh *linkedHashmap[K, V]) len() int { func (lh *linkedHashmap[K, V]) oldest() (K, V, bool) { if val := lh.entryList.Front(); val != nil { - return val.Value.(keyValue[K, V]).key, val.Value.(keyValue[K, V]).value, true + kv := val.Value.(keyValue[K, V]) + return kv.key, kv.value, true } return utils.Zero[K](), utils.Zero[V](), false } func (lh *linkedHashmap[K, V]) newest() (K, V, bool) { if val := lh.entryList.Back(); val != nil { - return val.Value.(keyValue[K, V]).key, val.Value.(keyValue[K, V]).value, true + kv := val.Value.(keyValue[K, V]) + return kv.key, kv.value, true } return utils.Zero[K](), utils.Zero[V](), false } diff --git a/avalanchego/utils/linkedhashmap/linkedhashmap_test.go b/avalanchego/utils/linkedhashmap/linkedhashmap_test.go index 4b251a0a..372bd24b 100644 --- a/avalanchego/utils/linkedhashmap/linkedhashmap_test.go +++ b/avalanchego/utils/linkedhashmap/linkedhashmap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -15,7 +15,7 @@ func TestLinkedHashmap(t *testing.T) { require := require.New(t) lh := New[ids.ID, int]() - require.Equal(0, lh.Len(), "a new hashmap should be empty") + require.Zero(lh.Len(), "a new hashmap should be empty") key0 := ids.GenerateTestID() _, exists := lh.Get(key0) @@ -32,17 +32,17 @@ func TestLinkedHashmap(t *testing.T) { val0, exists := lh.Get(key0) require.True(exists, "should have found the value") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey0, val0, exists := lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey0, val0, exists = lh.Newest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") key1 := ids.GenerateTestID() lh.Put(key1, 1) @@ -55,14 +55,14 @@ func TestLinkedHashmap(t *testing.T) { rkey0, val0, exists = lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey1, val1, exists := lh.Newest() require.True(exists, "should have found the value") require.Equal(key1, rkey1, "wrong key") require.Equal(1, val1, "wrong value") - lh.Delete(key0) + require.True(lh.Delete(key0)) require.Equal(1, lh.Len(), "wrong hashmap length") _, exists = lh.Get(key0) @@ -87,7 +87,7 @@ func TestLinkedHashmap(t *testing.T) { rkey0, val0, exists = lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey1, val1, exists = lh.Newest() require.True(exists, "should have found the value") @@ -108,7 +108,7 @@ func TestIterator(t *testing.T) { require.False(iter.Next()) require.False(iter.Next()) // Should be empty - require.EqualValues(ids.Empty, iter.Key()) + require.Equal(ids.Empty, iter.Key()) require.Zero(iter.Value()) } @@ -119,20 +119,20 @@ func TestIterator(t *testing.T) { require.NotNil(iter) lh.Put(id1, 1) require.True(iter.Next()) - require.EqualValues(id1, iter.Key()) - require.EqualValues(1, iter.Value()) + require.Equal(id1, iter.Key()) + require.Equal(1, iter.Value()) // Should be empty require.False(iter.Next()) // Re-assign id1 --> 10 lh.Put(id1, 10) iter = lh.NewIterator() // New iterator require.True(iter.Next()) - require.EqualValues(id1, iter.Key()) - require.EqualValues(10, iter.Value()) + require.Equal(id1, iter.Key()) + require.Equal(10, iter.Value()) // Should be empty require.False(iter.Next()) // Delete id1 - lh.Delete(id1) + require.True(lh.Delete(id1)) iter = lh.NewIterator() require.NotNil(iter) // Should immediately be exhausted @@ -148,14 +148,14 @@ func TestIterator(t *testing.T) { iter := lh.NewIterator() // Should give back all 3 elements require.True(iter.Next()) - require.EqualValues(id1, iter.Key()) - require.EqualValues(1, iter.Value()) + require.Equal(id1, iter.Key()) + require.Equal(1, iter.Value()) require.True(iter.Next()) - require.EqualValues(id2, iter.Key()) - require.EqualValues(2, iter.Value()) + require.Equal(id2, iter.Key()) + require.Equal(2, iter.Value()) require.True(iter.Next()) - require.EqualValues(id3, iter.Key()) - require.EqualValues(3, iter.Value()) + require.Equal(id3, iter.Key()) + require.Equal(3, iter.Value()) // Should be exhausted require.False(iter.Next()) } @@ -169,11 +169,11 @@ func TestIterator(t *testing.T) { iter := lh.NewIterator() require.True(iter.Next()) require.True(iter.Next()) - lh.Delete(id1) - lh.Delete(id2) + require.True(lh.Delete(id1)) + require.True(lh.Delete(id2)) require.True(iter.Next()) - require.EqualValues(id3, iter.Key()) - require.EqualValues(3, iter.Value()) + require.Equal(id3, iter.Key()) + require.Equal(3, iter.Value()) // Should be exhausted require.False(iter.Next()) } diff --git a/avalanchego/utils/logging/color.go b/avalanchego/utils/logging/color.go index 323d1d13..8fb7a8b6 100644 --- a/avalanchego/utils/logging/color.go +++ b/avalanchego/utils/logging/color.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/config.go b/avalanchego/utils/logging/config.go index baeb666d..06d7f8ca 100644 --- a/avalanchego/utils/logging/config.go +++ b/avalanchego/utils/logging/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/factory.go b/avalanchego/utils/logging/factory.go index b3426257..3fe547f8 100644 --- a/avalanchego/utils/logging/factory.go +++ b/avalanchego/utils/logging/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -11,9 +11,7 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" - "golang.org/x/exp/maps" - "gopkg.in/natefinch/lumberjack.v2" ) diff --git a/avalanchego/utils/logging/format.go b/avalanchego/utils/logging/format.go index 1e979f74..53313c3d 100644 --- a/avalanchego/utils/logging/format.go +++ b/avalanchego/utils/logging/format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -9,7 +9,7 @@ import ( "strings" "go.uber.org/zap/zapcore" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) // Format modes available @@ -59,7 +59,7 @@ func ToFormat(h string, fd uintptr) (Format, error) { case "JSON": return JSON, nil case "AUTO": - if !terminal.IsTerminal(int(fd)) { + if !term.IsTerminal(int(fd)) { return Plain, nil } return Colors, nil diff --git a/avalanchego/utils/logging/level.go b/avalanchego/utils/logging/level.go index a7951c03..7c1696b3 100644 --- a/avalanchego/utils/logging/level.go +++ b/avalanchego/utils/logging/level.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging import ( "encoding/json" + "errors" "fmt" "strings" @@ -44,6 +45,8 @@ const ( unknownLowStr = "unkno" ) +var ErrUnknownLevel = errors.New("unknown log level") + // Inverse of Level.String() func ToLevel(l string) (Level, error) { switch strings.ToUpper(l) { @@ -64,7 +67,7 @@ func ToLevel(l string) (Level, error) { case verboStr: return Verbo, nil default: - return Off, fmt.Errorf("unknown log level: %q", l) + return Off, fmt.Errorf("%w: %q", ErrUnknownLevel, l) } } diff --git a/avalanchego/utils/logging/log.go b/avalanchego/utils/logging/log.go index bff8934e..b9fc8f79 100644 --- a/avalanchego/utils/logging/log.go +++ b/avalanchego/utils/logging/log.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging import ( "io" + "os" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -67,10 +68,17 @@ func (l *log) Write(p []byte) (int, error) { // TODO: return errors here func (l *log) Stop() { for _, wc := range l.wrappedCores { - _ = wc.Writer.Close() + if wc.Writer != os.Stdout && wc.Writer != os.Stderr { + _ = wc.Writer.Close() + } } } +// Enabled returns true if the given level is at or above this level. +func (l *log) Enabled(lvl Level) bool { + return l.internalLogger.Level().Enabled(zapcore.Level(lvl)) +} + // Should only be called from [Level] functions. func (l *log) log(level Level, msg string, fields ...zap.Field) { if ce := l.internalLogger.Check(zapcore.Level(level), msg); ce != nil { diff --git a/avalanchego/utils/logging/log_test.go b/avalanchego/utils/logging/log_test.go index 4242ecab..cd7396b6 100644 --- a/avalanchego/utils/logging/log_test.go +++ b/avalanchego/utils/logging/log_test.go @@ -1,9 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestLog(t *testing.T) { log := NewLogger("", NewWrappedCore(Info, Discard, Plain.ConsoleEncoder())) @@ -17,7 +21,5 @@ func TestLog(t *testing.T) { } log.RecoverAndExit(panicFunc, exitFunc) - if !*recovered { - t.Fatalf("Exit function was never called") - } + require.True(t, *recovered) } diff --git a/avalanchego/utils/logging/logger.go b/avalanchego/utils/logging/logger.go index 79acb683..2ca95bff 100644 --- a/avalanchego/utils/logging/logger.go +++ b/avalanchego/utils/logging/logger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -38,6 +38,8 @@ type Logger interface { // SetLevel that this logger should log to SetLevel(level Level) + // Enabled returns true if the given level is at or above this level. + Enabled(lvl Level) bool // Recovers a panic, logs the error, and rethrows the panic. StopOnPanic() diff --git a/avalanchego/utils/logging/mock_logger.go b/avalanchego/utils/logging/mock_logger.go deleted file mode 100644 index 19ade121..00000000 --- a/avalanchego/utils/logging/mock_logger.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/utils/logging (interfaces: Logger) - -// Package logging is a generated GoMock package. -package logging - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - zapcore "go.uber.org/zap/zapcore" -) - -// MockLogger is a mock of Logger interface. -type MockLogger struct { - ctrl *gomock.Controller - recorder *MockLoggerMockRecorder -} - -// MockLoggerMockRecorder is the mock recorder for MockLogger. -type MockLoggerMockRecorder struct { - mock *MockLogger -} - -// NewMockLogger creates a new mock instance. -func NewMockLogger(ctrl *gomock.Controller) *MockLogger { - mock := &MockLogger{ctrl: ctrl} - mock.recorder = &MockLoggerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLogger) EXPECT() *MockLoggerMockRecorder { - return m.recorder -} - -// Debug mocks base method. -func (m *MockLogger) Debug(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Debug", varargs...) -} - -// Debug indicates an expected call of Debug. -func (mr *MockLoggerMockRecorder) Debug(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...) -} - -// Error mocks base method. -func (m *MockLogger) Error(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Error", varargs...) -} - -// Error indicates an expected call of Error. -func (mr *MockLoggerMockRecorder) Error(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...) -} - -// Fatal mocks base method. -func (m *MockLogger) Fatal(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Fatal", varargs...) -} - -// Fatal indicates an expected call of Fatal. -func (mr *MockLoggerMockRecorder) Fatal(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fatal", reflect.TypeOf((*MockLogger)(nil).Fatal), varargs...) -} - -// Info mocks base method. -func (m *MockLogger) Info(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Info", varargs...) -} - -// Info indicates an expected call of Info. -func (mr *MockLoggerMockRecorder) Info(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...) -} - -// RecoverAndExit mocks base method. -func (m *MockLogger) RecoverAndExit(arg0, arg1 func()) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndExit", arg0, arg1) -} - -// RecoverAndExit indicates an expected call of RecoverAndExit. -func (mr *MockLoggerMockRecorder) RecoverAndExit(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndExit", reflect.TypeOf((*MockLogger)(nil).RecoverAndExit), arg0, arg1) -} - -// RecoverAndPanic mocks base method. -func (m *MockLogger) RecoverAndPanic(arg0 func()) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndPanic", arg0) -} - -// RecoverAndPanic indicates an expected call of RecoverAndPanic. -func (mr *MockLoggerMockRecorder) RecoverAndPanic(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndPanic", reflect.TypeOf((*MockLogger)(nil).RecoverAndPanic), arg0) -} - -// SetLevel mocks base method. -func (m *MockLogger) SetLevel(arg0 Level) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLevel", arg0) -} - -// SetLevel indicates an expected call of SetLevel. -func (mr *MockLoggerMockRecorder) SetLevel(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLevel", reflect.TypeOf((*MockLogger)(nil).SetLevel), arg0) -} - -// Stop mocks base method. -func (m *MockLogger) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") -} - -// Stop indicates an expected call of Stop. -func (mr *MockLoggerMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockLogger)(nil).Stop)) -} - -// StopOnPanic mocks base method. -func (m *MockLogger) StopOnPanic() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StopOnPanic") -} - -// StopOnPanic indicates an expected call of StopOnPanic. -func (mr *MockLoggerMockRecorder) StopOnPanic() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopOnPanic", reflect.TypeOf((*MockLogger)(nil).StopOnPanic)) -} - -// Trace mocks base method. -func (m *MockLogger) Trace(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Trace", varargs...) -} - -// Trace indicates an expected call of Trace. -func (mr *MockLoggerMockRecorder) Trace(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trace", reflect.TypeOf((*MockLogger)(nil).Trace), varargs...) -} - -// Verbo mocks base method. -func (m *MockLogger) Verbo(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Verbo", varargs...) -} - -// Verbo indicates an expected call of Verbo. -func (mr *MockLoggerMockRecorder) Verbo(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verbo", reflect.TypeOf((*MockLogger)(nil).Verbo), varargs...) -} - -// Warn mocks base method. -func (m *MockLogger) Warn(arg0 string, arg1 ...zapcore.Field) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Warn", varargs...) -} - -// Warn indicates an expected call of Warn. -func (mr *MockLoggerMockRecorder) Warn(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...) -} - -// Write mocks base method. -func (m *MockLogger) Write(arg0 []byte) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", arg0) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Write indicates an expected call of Write. -func (mr *MockLoggerMockRecorder) Write(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockLogger)(nil).Write), arg0) -} diff --git a/avalanchego/utils/logging/sanitize.go b/avalanchego/utils/logging/sanitize.go index 05b24ff9..18fc4021 100644 --- a/avalanchego/utils/logging/sanitize.go +++ b/avalanchego/utils/logging/sanitize.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -12,7 +12,7 @@ import ( type sanitizedString string func (s sanitizedString) String() string { - return strings.ReplaceAll(string(s), "\n", "\\n") + return strings.ReplaceAll(string(s), "\n", `\n`) } // UserString constructs a field with the given key and the value stripped of @@ -29,7 +29,7 @@ func (s sanitizedStrings) String() string { if i != 0 { _, _ = strs.WriteString(", ") } - _, _ = strs.WriteString(strings.ReplaceAll(str, "\n", "\\n")) + _, _ = strs.WriteString(strings.ReplaceAll(str, "\n", `\n`)) } return strs.String() } diff --git a/avalanchego/utils/logging/test_log.go b/avalanchego/utils/logging/test_log.go index 126128f5..a8a85dc7 100644 --- a/avalanchego/utils/logging/test_log.go +++ b/avalanchego/utils/logging/test_log.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -38,6 +38,10 @@ func (NoLog) Verbo(string, ...zap.Field) {} func (NoLog) SetLevel(Level) {} +func (NoLog) Enabled(Level) bool { + return false +} + func (NoLog) StopOnPanic() {} func (NoLog) RecoverAndPanic(f func()) { diff --git a/avalanchego/utils/math/averager.go b/avalanchego/utils/math/averager.go index a926aba0..8573fbc8 100644 --- a/avalanchego/utils/math/averager.go +++ b/avalanchego/utils/math/averager.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math -import ( - "time" -) +import "time" // Averager tracks a continuous time exponential moving average of the provided // values. diff --git a/avalanchego/utils/math/averager_heap.go b/avalanchego/utils/math/averager_heap.go index b09393b4..57d04678 100644 --- a/avalanchego/utils/math/averager_heap.go +++ b/avalanchego/utils/math/averager_heap.go @@ -1,19 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math import ( - "container/heap" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/heap" ) -var ( - _ AveragerHeap = averagerHeap{} - _ heap.Interface = (*averagerHeapBackend)(nil) -) +var _ AveragerHeap = (*averagerHeap)(nil) +// TODO replace this interface with utils/heap // AveragerHeap maintains a heap of the averagers. type AveragerHeap interface { // Add the average to the heap. If [nodeID] is already in the heap, the @@ -33,113 +30,36 @@ type AveragerHeap interface { Len() int } -type averagerHeapEntry struct { - nodeID ids.NodeID - averager Averager - index int -} - -type averagerHeapBackend struct { - isMaxHeap bool - nodeIDToEntry map[ids.NodeID]*averagerHeapEntry - entries []*averagerHeapEntry -} - type averagerHeap struct { - b *averagerHeapBackend -} - -// NewMinAveragerHeap returns a new empty min heap. The returned heap is not -// thread safe. -func NewMinAveragerHeap() AveragerHeap { - return averagerHeap{b: &averagerHeapBackend{ - nodeIDToEntry: make(map[ids.NodeID]*averagerHeapEntry), - }} + heap heap.Map[ids.NodeID, Averager] } // NewMaxAveragerHeap returns a new empty max heap. The returned heap is not // thread safe. func NewMaxAveragerHeap() AveragerHeap { - return averagerHeap{b: &averagerHeapBackend{ - isMaxHeap: true, - nodeIDToEntry: make(map[ids.NodeID]*averagerHeapEntry), - }} + return averagerHeap{ + heap: heap.NewMap[ids.NodeID, Averager](func(a, b Averager) bool { + return a.Read() > b.Read() + }), + } } func (h averagerHeap) Add(nodeID ids.NodeID, averager Averager) (Averager, bool) { - if e, exists := h.b.nodeIDToEntry[nodeID]; exists { - oldAverager := e.averager - e.averager = averager - heap.Fix(h.b, e.index) - return oldAverager, true - } - - heap.Push(h.b, &averagerHeapEntry{ - nodeID: nodeID, - averager: averager, - }) - return nil, false + return h.heap.Push(nodeID, averager) } func (h averagerHeap) Remove(nodeID ids.NodeID) (Averager, bool) { - e, exists := h.b.nodeIDToEntry[nodeID] - if !exists { - return nil, false - } - heap.Remove(h.b, e.index) - return e.averager, true + return h.heap.Remove(nodeID) } func (h averagerHeap) Pop() (ids.NodeID, Averager, bool) { - if len(h.b.entries) == 0 { - return ids.EmptyNodeID, nil, false - } - e := h.b.entries[0] - heap.Pop(h.b) - return e.nodeID, e.averager, true + return h.heap.Pop() } func (h averagerHeap) Peek() (ids.NodeID, Averager, bool) { - if len(h.b.entries) == 0 { - return ids.EmptyNodeID, nil, false - } - e := h.b.entries[0] - return e.nodeID, e.averager, true + return h.heap.Peek() } func (h averagerHeap) Len() int { - return len(h.b.entries) -} - -func (h *averagerHeapBackend) Len() int { - return len(h.entries) -} - -func (h *averagerHeapBackend) Less(i, j int) bool { - if h.isMaxHeap { - return h.entries[i].averager.Read() > h.entries[j].averager.Read() - } - return h.entries[i].averager.Read() < h.entries[j].averager.Read() -} - -func (h *averagerHeapBackend) Swap(i, j int) { - h.entries[i], h.entries[j] = h.entries[j], h.entries[i] - h.entries[i].index = i - h.entries[j].index = j -} - -func (h *averagerHeapBackend) Push(x interface{}) { - e := x.(*averagerHeapEntry) - e.index = len(h.entries) - h.nodeIDToEntry[e.nodeID] = e - h.entries = append(h.entries, e) -} - -func (h *averagerHeapBackend) Pop() interface{} { - newLen := len(h.entries) - 1 - e := h.entries[newLen] - h.entries[newLen] = nil - delete(h.nodeIDToEntry, e.nodeID) - h.entries = h.entries[:newLen] - return e + return h.heap.Len() } diff --git a/avalanchego/utils/math/averager_heap_test.go b/avalanchego/utils/math/averager_heap_test.go index a9796129..8df49537 100644 --- a/avalanchego/utils/math/averager_heap_test.go +++ b/avalanchego/utils/math/averager_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -13,26 +13,18 @@ import ( ) func TestAveragerHeap(t *testing.T) { - require := require.New(t) - n0 := ids.GenerateTestNodeID() n1 := ids.GenerateTestNodeID() n2 := ids.GenerateTestNodeID() tests := []struct { - h AveragerHeap - a []Averager + name string + h AveragerHeap + a []Averager }{ { - h: NewMinAveragerHeap(), - a: []Averager{ - NewAverager(0, time.Second, time.Now()), - NewAverager(1, time.Second, time.Now()), - NewAverager(2, time.Second, time.Now()), - }, - }, - { - h: NewMaxAveragerHeap(), + name: "max heap", + h: NewMaxAveragerHeap(), a: []Averager{ NewAverager(0, time.Second, time.Now()), NewAverager(-1, time.Second, time.Now()), @@ -42,67 +34,71 @@ func TestAveragerHeap(t *testing.T) { } for _, test := range tests { - _, _, ok := test.h.Pop() - require.False(ok) + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + _, _, ok := test.h.Pop() + require.False(ok) - _, _, ok = test.h.Peek() - require.False(ok) + _, _, ok = test.h.Peek() + require.False(ok) - l := test.h.Len() - require.Zero(l) + l := test.h.Len() + require.Zero(l) - _, ok = test.h.Add(n1, test.a[1]) - require.False(ok) + _, ok = test.h.Add(n1, test.a[1]) + require.False(ok) - n, a, ok := test.h.Peek() - require.True(ok) - require.Equal(n1, n) - require.Equal(test.a[1], a) + n, a, ok := test.h.Peek() + require.True(ok) + require.Equal(n1, n) + require.Equal(test.a[1], a) - l = test.h.Len() - require.Equal(1, l) + l = test.h.Len() + require.Equal(1, l) - a, ok = test.h.Add(n1, test.a[1]) - require.True(ok) - require.Equal(test.a[1], a) + a, ok = test.h.Add(n1, test.a[1]) + require.True(ok) + require.Equal(test.a[1], a) - l = test.h.Len() - require.Equal(1, l) + l = test.h.Len() + require.Equal(1, l) - _, ok = test.h.Add(n0, test.a[0]) - require.False(ok) + _, ok = test.h.Add(n0, test.a[0]) + require.False(ok) - _, ok = test.h.Add(n2, test.a[2]) - require.False(ok) + _, ok = test.h.Add(n2, test.a[2]) + require.False(ok) - n, a, ok = test.h.Pop() - require.True(ok) - require.Equal(n0, n) - require.Equal(test.a[0], a) + n, a, ok = test.h.Pop() + require.True(ok) + require.Equal(n0, n) + require.Equal(test.a[0], a) - l = test.h.Len() - require.Equal(2, l) + l = test.h.Len() + require.Equal(2, l) - a, ok = test.h.Remove(n1) - require.True(ok) - require.Equal(test.a[1], a) + a, ok = test.h.Remove(n1) + require.True(ok) + require.Equal(test.a[1], a) - l = test.h.Len() - require.Equal(1, l) + l = test.h.Len() + require.Equal(1, l) - _, ok = test.h.Remove(n1) - require.False(ok) + _, ok = test.h.Remove(n1) + require.False(ok) - l = test.h.Len() - require.Equal(1, l) + l = test.h.Len() + require.Equal(1, l) - a, ok = test.h.Add(n2, test.a[0]) - require.True(ok) - require.Equal(test.a[2], a) + a, ok = test.h.Add(n2, test.a[0]) + require.True(ok) + require.Equal(test.a[2], a) - n, a, ok = test.h.Pop() - require.True(ok) - require.Equal(n2, n) - require.Equal(test.a[0], a) + n, a, ok = test.h.Pop() + require.True(ok) + require.Equal(n2, n) + require.Equal(test.a[0], a) + }) } } diff --git a/avalanchego/utils/math/continuous_averager.go b/avalanchego/utils/math/continuous_averager.go index e60832f2..7bc89257 100644 --- a/avalanchego/utils/math/continuous_averager.go +++ b/avalanchego/utils/math/continuous_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/continuous_averager_benchmark_test.go b/avalanchego/utils/math/continuous_averager_benchmark_test.go index 7a8d30a3..3ebee526 100644 --- a/avalanchego/utils/math/continuous_averager_benchmark_test.go +++ b/avalanchego/utils/math/continuous_averager_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/continuous_averager_test.go b/avalanchego/utils/math/continuous_averager_test.go index e7595537..c169f390 100644 --- a/avalanchego/utils/math/continuous_averager_test.go +++ b/avalanchego/utils/math/continuous_averager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -11,42 +11,44 @@ import ( ) func TestAverager(t *testing.T) { + require := require.New(t) + halflife := time.Second currentTime := time.Now() a := NewSyncAverager(NewAverager(0, halflife, currentTime)) - expectedValue := float64(0) - require.Equal(t, expectedValue, a.Read()) + require.Zero(a.Read()) currentTime = currentTime.Add(halflife) a.Observe(1, currentTime) - expectedValue = 1.0 / 1.5 - require.Equal(t, expectedValue, a.Read()) + require.Equal(1.0/1.5, a.Read()) } func TestAveragerTimeTravel(t *testing.T) { + require := require.New(t) + halflife := time.Second currentTime := time.Now() a := NewSyncAverager(NewAverager(1, halflife, currentTime)) - expectedValue := float64(1) - require.Equal(t, expectedValue, a.Read()) + require.Equal(float64(1), a.Read()) currentTime = currentTime.Add(-halflife) a.Observe(0, currentTime) - expectedValue = 1.0 / 1.5 - require.Equal(t, expectedValue, a.Read()) + require.Equal(1.0/1.5, a.Read()) } func TestUninitializedAverager(t *testing.T) { + require := require.New(t) + halfLife := time.Second currentTime := time.Now() firstObservation := float64(10) a := NewUninitializedAverager(halfLife) - require.Equal(t, 0.0, a.Read()) + require.Zero(a.Read()) a.Observe(firstObservation, currentTime) - require.Equal(t, firstObservation, a.Read()) + require.Equal(firstObservation, a.Read()) } diff --git a/avalanchego/utils/math/meter/continuous_meter.go b/avalanchego/utils/math/meter/continuous_meter.go index 4bd3f000..378248a1 100644 --- a/avalanchego/utils/math/meter/continuous_meter.go +++ b/avalanchego/utils/math/meter/continuous_meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/meter/factory.go b/avalanchego/utils/math/meter/factory.go index 49e4859e..a4d3722e 100644 --- a/avalanchego/utils/math/meter/factory.go +++ b/avalanchego/utils/math/meter/factory.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter -import ( - "time" -) +import "time" // Factory returns new meters. type Factory interface { diff --git a/avalanchego/utils/math/meter/meter.go b/avalanchego/utils/math/meter/meter.go index cfc5fcd8..e9ec6782 100644 --- a/avalanchego/utils/math/meter/meter.go +++ b/avalanchego/utils/math/meter/meter.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter -import ( - "time" -) +import "time" // Meter tracks a continuous exponential moving average of the % of time this // meter has been running. diff --git a/avalanchego/utils/math/meter/meter_benchmark_test.go b/avalanchego/utils/math/meter/meter_benchmark_test.go index 65f3dcfa..80ed1ad9 100644 --- a/avalanchego/utils/math/meter/meter_benchmark_test.go +++ b/avalanchego/utils/math/meter/meter_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/meter/meter_test.go b/avalanchego/utils/math/meter/meter_test.go index 2bf29185..ed9e8356 100644 --- a/avalanchego/utils/math/meter/meter_test.go +++ b/avalanchego/utils/math/meter/meter_test.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter import ( "fmt" - "math" "testing" "time" @@ -54,116 +53,91 @@ func TestMeters(t *testing.T) { } func NewTest(t *testing.T, factory Factory) { - m := factory.New(halflife) - require.NotNil(t, m, "should have returned a valid interface") + require.NotNil(t, factory.New(halflife)) } func TimeTravelTest(t *testing.T, factory Factory) { + require := require.New(t) + m := factory.New(halflife) now := time.Date(1, 2, 3, 4, 5, 6, 7, time.UTC) m.Inc(now, 1) now = now.Add(halflife - 1) - epsilon := 0.0001 - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + delta := 0.0001 + require.InDelta(.5, m.Read(now), delta) m.Dec(now, 1) now = now.Add(-halflife) - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + require.InDelta(.5, m.Read(now), delta) m.Inc(now, 1) now = now.Add(halflife / 2) - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + require.InDelta(.5, m.Read(now), delta) } func StandardUsageTest(t *testing.T, factory Factory) { + require := require.New(t) + m := factory.New(halflife) now := time.Date(1, 2, 3, 4, 5, 6, 7, time.UTC) m.Inc(now, 1) now = now.Add(halflife - 1) - epsilon := 0.0001 - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + delta := 0.0001 + require.InDelta(.5, m.Read(now), delta) m.Inc(now, 1) - - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + require.InDelta(.5, m.Read(now), delta) m.Dec(now, 1) - - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + require.InDelta(.5, m.Read(now), delta) m.Dec(now, 1) - if uptime := m.Read(now); math.Abs(uptime-.5) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .5, uptime) - } + require.InDelta(.5, m.Read(now), delta) now = now.Add(halflife) - if uptime := m.Read(now); math.Abs(uptime-.25) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .25, uptime) - } + require.InDelta(.25, m.Read(now), delta) m.Inc(now, 1) now = now.Add(halflife) - if uptime := m.Read(now); math.Abs(uptime-.625) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .625, uptime) - } + require.InDelta(.625, m.Read(now), delta) now = now.Add(34 * halflife) - if uptime := m.Read(now); math.Abs(uptime-1) > epsilon { - t.Fatalf("Wrong uptime value. Expected %d got %f", 1, uptime) - } + require.InDelta(1, m.Read(now), delta) m.Dec(now, 1) now = now.Add(34 * halflife) - if uptime := m.Read(now); math.Abs(uptime-0) > epsilon { - t.Fatalf("Wrong uptime value. Expected %d got %f", 0, uptime) - } + require.InDelta(0, m.Read(now), delta) m.Inc(now, 1) now = now.Add(2 * halflife) - if uptime := m.Read(now); math.Abs(uptime-.75) > epsilon { - t.Fatalf("Wrong uptime value. Expected %f got %f", .75, uptime) - } + require.InDelta(.75, m.Read(now), delta) // Second start m.Inc(now, 1) now = now.Add(34 * halflife) - if uptime := m.Read(now); math.Abs(uptime-2) > epsilon { - t.Fatalf("Wrong uptime value. Expected %d got %f", 2, uptime) - } + require.InDelta(2, m.Read(now), delta) // Stop the second CPU m.Dec(now, 1) now = now.Add(34 * halflife) - if uptime := m.Read(now); math.Abs(uptime-1) > epsilon { - t.Fatalf("Wrong uptime value. Expected %d got %f", 1, uptime) - } + require.InDelta(1, m.Read(now), delta) } func TestTimeUntil(t *testing.T) { + require := require.New(t) + halflife := 5 * time.Second f := ContinuousFactory{} m := f.New(halflife) @@ -184,9 +158,9 @@ func TestTimeUntil(t *testing.T) { now = now.Add(timeUntilDesiredVal) actualVal := m.Read(now) // Make sure the actual/expected are close - require.InDelta(t, desiredVal, actualVal, .00001) + require.InDelta(desiredVal, actualVal, .00001) // Make sure TimeUntil returns the zero duration if // the value provided >= the current value - require.Zero(t, m.TimeUntil(now, actualVal)) - require.Zero(t, m.TimeUntil(now, actualVal+.1)) + require.Zero(m.TimeUntil(now, actualVal)) + require.Zero(m.TimeUntil(now, actualVal+.1)) } diff --git a/avalanchego/utils/math/safe_math.go b/avalanchego/utils/math/safe_math.go index 83454758..e975d0d3 100644 --- a/avalanchego/utils/math/safe_math.go +++ b/avalanchego/utils/math/safe_math.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -17,24 +17,6 @@ var ( ErrUnderflow = errors.New("underflow") ) -func Max[T constraints.Ordered](max T, nums ...T) T { - for _, num := range nums { - if num > max { - max = num - } - } - return max -} - -func Min[T constraints.Ordered](min T, nums ...T) T { - for _, num := range nums { - if num < min { - min = num - } - } - return min -} - // Add64 returns: // 1) a + b // 2) If there is overflow, an error @@ -74,5 +56,5 @@ func Mul64(a, b uint64) (uint64, error) { } func AbsDiff[T constraints.Unsigned](a, b T) T { - return Max(a, b) - Min(a, b) + return max(a, b) - min(a, b) } diff --git a/avalanchego/utils/math/safe_math_test.go b/avalanchego/utils/math/safe_math_test.go index 5d9bb702..7bcd12a6 100644 --- a/avalanchego/utils/math/safe_math_test.go +++ b/avalanchego/utils/math/safe_math_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -12,29 +12,6 @@ import ( const maxUint64 uint64 = math.MaxUint64 -func TestMax(t *testing.T) { - require := require.New(t) - - require.Equal(maxUint64, Max(0, maxUint64)) - require.Equal(maxUint64, Max(maxUint64, 0)) - require.Equal(1, Max(1, 0)) - require.Equal(1, Max(0, 1)) - require.Equal(0, Max(0, 0)) - require.Equal(2, Max(2, 2)) -} - -func TestMin(t *testing.T) { - require := require.New(t) - - require.Equal(uint64(0), Min(uint64(0), maxUint64)) - require.Equal(uint64(0), Min(maxUint64, uint64(0))) - require.Equal(0, Min(1, 0)) - require.Equal(0, Min(0, 1)) - require.Equal(0, Min(0, 0)) - require.Equal(2, Min(2, 2)) - require.Equal(1, Min(1, 2)) -} - func TestAdd64(t *testing.T) { require := require.New(t) @@ -69,11 +46,11 @@ func TestSub(t *testing.T) { got, err = Sub(uint64(2), uint64(2)) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Sub(maxUint64, maxUint64) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Sub(uint64(3), uint64(2)) require.NoError(err) @@ -91,11 +68,11 @@ func TestMul64(t *testing.T) { got, err := Mul64(0, maxUint64) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Mul64(maxUint64, 0) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Mul64(uint64(1), uint64(3)) require.NoError(err) @@ -111,7 +88,7 @@ func TestMul64(t *testing.T) { got, err = Mul64(maxUint64, 0) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) _, err = Mul64(maxUint64-1, 2) require.ErrorIs(err, ErrOverflow) @@ -124,6 +101,6 @@ func TestAbsDiff(t *testing.T) { require.Equal(maxUint64, AbsDiff(maxUint64, 0)) require.Equal(uint64(2), AbsDiff(uint64(3), uint64(1))) require.Equal(uint64(2), AbsDiff(uint64(1), uint64(3))) - require.Equal(uint64(0), AbsDiff(uint64(1), uint64(1))) - require.Equal(uint64(0), AbsDiff(uint64(0), uint64(0))) + require.Zero(AbsDiff(uint64(1), uint64(1))) + require.Zero(AbsDiff(uint64(0), uint64(0))) } diff --git a/avalanchego/utils/math/sync_averager.go b/avalanchego/utils/math/sync_averager.go index cbe8ba10..92210ab4 100644 --- a/avalanchego/utils/math/sync_averager.go +++ b/avalanchego/utils/math/sync_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/maybe/maybe.go b/avalanchego/utils/maybe/maybe.go new file mode 100644 index 00000000..fd50b415 --- /dev/null +++ b/avalanchego/utils/maybe/maybe.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package maybe + +import "fmt" + +// Maybe T = Some T | Nothing. +// A data wrapper that allows values to be something [Some T] or nothing [Nothing]. +// Invariant: If [hasValue] is false, then [value] is the zero value of type T. +// Maybe is used to wrap types: +// * That can't be represented by nil. +// * That use nil as a valid value instead of an indicator of a missing value. +// For more info see https://en.wikipedia.org/wiki/Option_type +type Maybe[T any] struct { + hasValue bool + // If [hasValue] is false, [value] is the zero value of type T. + value T +} + +// Some returns a new Maybe[T] with the value val. +// If m.IsNothing(), returns the zero value of type T. +func Some[T any](val T) Maybe[T] { + return Maybe[T]{ + value: val, + hasValue: true, + } +} + +// Nothing returns a new Maybe[T] with no value. +func Nothing[T any]() Maybe[T] { + return Maybe[T]{} +} + +// IsNothing returns false iff [m] has a value. +func (m Maybe[T]) IsNothing() bool { + return !m.hasValue +} + +// HasValue returns true iff [m] has a value. +func (m Maybe[T]) HasValue() bool { + return m.hasValue +} + +// Value returns the value of [m]. +func (m Maybe[T]) Value() T { + return m.value +} + +func (m Maybe[T]) String() string { + if !m.hasValue { + return fmt.Sprintf("Nothing[%T]", m.value) + } + return fmt.Sprintf("Some[%T]{%v}", m.value, m.value) +} + +// Bind returns Nothing iff [m] is Nothing. +// Otherwise applies [f] to the value of [m] and returns the result as a Some. +func Bind[T, U any](m Maybe[T], f func(T) U) Maybe[U] { + if m.IsNothing() { + return Nothing[U]() + } + return Some(f(m.Value())) +} + +// Equal returns true if both m1 and m2 are nothing or have the same value according to [equalFunc]. +func Equal[T any](m1 Maybe[T], m2 Maybe[T], equalFunc func(T, T) bool) bool { + if m1.IsNothing() { + return m2.IsNothing() + } + + if m2.IsNothing() { + return false + } + return equalFunc(m1.Value(), m2.Value()) +} diff --git a/avalanchego/utils/maybe/maybe_test.go b/avalanchego/utils/maybe/maybe_test.go new file mode 100644 index 00000000..f2a88f66 --- /dev/null +++ b/avalanchego/utils/maybe/maybe_test.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package maybe + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMaybeClone(t *testing.T) { + require := require.New(t) + + // Case: Value is maybe + { + val := []byte{1, 2, 3} + originalVal := slices.Clone(val) + m := Some(val) + mClone := Bind(m, slices.Clone[[]byte]) + m.value[0] = 0 + require.NotEqual(mClone.value, m.value) + require.Equal(originalVal, mClone.value) + } + + // Case: Value is nothing + { + m := Nothing[[]byte]() + mClone := Bind(m, slices.Clone[[]byte]) + require.True(mClone.IsNothing()) + } +} + +func TestMaybeString(t *testing.T) { + require := require.New(t) + + // Case: Value is maybe + { + val := []int{1, 2, 3} + m := Some(val) + require.Equal("Some[[]int]{[1 2 3]}", m.String()) + } + + // Case: Value is nothing + { + m := Nothing[int]() + require.Equal("Nothing[int]", m.String()) + } +} + +func TestMaybeEquality(t *testing.T) { + require := require.New(t) + require.True(Equal(Nothing[int](), Nothing[int](), func(i int, i2 int) bool { + return i == i2 + })) + require.False(Equal(Nothing[int](), Some(1), func(i int, i2 int) bool { + return i == i2 + })) + require.False(Equal(Some(1), Nothing[int](), func(i int, i2 int) bool { + return i == i2 + })) + require.True(Equal(Some(1), Some(1), func(i int, i2 int) bool { + return i == i2 + })) +} diff --git a/avalanchego/utils/metric/api_interceptor.go b/avalanchego/utils/metric/api_interceptor.go index 57810fce..7d970b22 100644 --- a/avalanchego/utils/metric/api_interceptor.go +++ b/avalanchego/utils/metric/api_interceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric @@ -9,10 +9,9 @@ import ( "time" "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type APIInterceptor interface { @@ -55,8 +54,7 @@ func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APII []string{"method"}, ) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( registerer.Register(requestDurationCount), registerer.Register(requestDurationSum), registerer.Register(requestErrors), @@ -65,7 +63,7 @@ func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APII requestDurationCount: requestDurationCount, requestDurationSum: requestDurationSum, requestErrors: requestErrors, - }, errs.Err + }, err } func (*apiInterceptor) InterceptRequest(i *rpc.RequestInfo) *http.Request { diff --git a/avalanchego/utils/metric/averager.go b/avalanchego/utils/metric/averager.go index 9cc4588d..e63e0007 100644 --- a/avalanchego/utils/metric/averager.go +++ b/avalanchego/utils/metric/averager.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric import ( + "errors" "fmt" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +12,8 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +var ErrFailedRegistering = errors.New("failed registering metric") + type Averager interface { Observe(float64) } @@ -30,20 +33,22 @@ func NewAveragerWithErrs(namespace, name, desc string, reg prometheus.Registerer a := averager{ count: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_count", name), - Help: fmt.Sprintf("Total # of observations of %s", desc), + Name: name + "_count", + Help: "Total # of observations of " + desc, }), sum: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_sum", name), - Help: fmt.Sprintf("Sum of %s", desc), + Name: name + "_sum", + Help: "Sum of " + desc, }), } - errs.Add( - reg.Register(a.count), - reg.Register(a.sum), - ) + if err := reg.Register(a.count); err != nil { + errs.Add(fmt.Errorf("%w: %w", ErrFailedRegistering, err)) + } + if err := reg.Register(a.sum); err != nil { + errs.Add(fmt.Errorf("%w: %w", ErrFailedRegistering, err)) + } return &a } diff --git a/avalanchego/utils/metric/namespace.go b/avalanchego/utils/metric/namespace.go new file mode 100644 index 00000000..4371bb1d --- /dev/null +++ b/avalanchego/utils/metric/namespace.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import "strings" + +func AppendNamespace(prefix, suffix string) string { + switch { + case len(prefix) == 0: + return suffix + case len(suffix) == 0: + return prefix + default: + return strings.Join([]string{prefix, suffix}, "_") + } +} diff --git a/avalanchego/utils/metric/namespace_test.go b/avalanchego/utils/metric/namespace_test.go new file mode 100644 index 00000000..b1daf8ec --- /dev/null +++ b/avalanchego/utils/metric/namespace_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAppendNamespace(t *testing.T) { + tests := []struct { + prefix string + suffix string + expected string + }{ + { + prefix: "avalanchego", + suffix: "isgreat", + expected: "avalanchego_isgreat", + }, + { + prefix: "", + suffix: "sucks", + expected: "sucks", + }, + { + prefix: "sucks", + suffix: "", + expected: "sucks", + }, + { + prefix: "", + suffix: "", + expected: "", + }, + } + for _, test := range tests { + t.Run(strings.Join([]string{test.prefix, test.suffix}, "_"), func(t *testing.T) { + namespace := AppendNamespace(test.prefix, test.suffix) + require.Equal(t, test.expected, namespace) + }) + } +} diff --git a/avalanchego/utils/password/hash.go b/avalanchego/utils/password/hash.go index 19c6f731..fc304bd5 100644 --- a/avalanchego/utils/password/hash.go +++ b/avalanchego/utils/password/hash.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/avalanchego/utils/password/hash_test.go b/avalanchego/utils/password/hash_test.go index b7d90e6b..07f56d03 100644 --- a/avalanchego/utils/password/hash_test.go +++ b/avalanchego/utils/password/hash_test.go @@ -1,24 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password import ( "testing" + + "github.com/stretchr/testify/require" ) func TestHash(t *testing.T) { + require := require.New(t) + h := Hash{} - if err := h.Set("heytherepal"); err != nil { - t.Fatal(err) - } - if !h.Check("heytherepal") { - t.Fatalf("Should have verified the password") - } - if h.Check("heytherepal!") { - t.Fatalf("Shouldn't have verified the password") - } - if h.Check("") { - t.Fatalf("Shouldn't have verified the password") - } + require.NoError(h.Set("heytherepal")) + require.True(h.Check("heytherepal")) + require.False(h.Check("heytherepal!")) + require.False(h.Check("")) } diff --git a/avalanchego/utils/password/password.go b/avalanchego/utils/password/password.go index af66b0c7..fa4d240e 100644 --- a/avalanchego/utils/password/password.go +++ b/avalanchego/utils/password/password.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password @@ -51,9 +51,9 @@ const ( ) var ( - errEmptyPassword = errors.New("empty password") - errPassMaxLength = fmt.Errorf("password exceeds maximum length of %d chars", maxPassLen) - errWeakPassword = errors.New("password is too weak") + ErrEmptyPassword = errors.New("empty password") + ErrPassMaxLength = fmt.Errorf("password exceeds maximum length of %d chars", maxPassLen) + ErrWeakPassword = errors.New("password is too weak") ) // SufficientlyStrong returns true if [password] has strength greater than or @@ -70,11 +70,11 @@ func SufficientlyStrong(password string, minimumStrength Strength) bool { func IsValid(password string, minimumStrength Strength) error { switch { case len(password) == 0: - return errEmptyPassword + return ErrEmptyPassword case len(password) > maxPassLen: - return errPassMaxLength + return ErrPassMaxLength case !SufficientlyStrong(password, minimumStrength): - return errWeakPassword + return ErrWeakPassword default: return nil } diff --git a/avalanchego/utils/password/password_test.go b/avalanchego/utils/password/password_test.go index 26efb783..2c4f534c 100644 --- a/avalanchego/utils/password/password_test.go +++ b/avalanchego/utils/password/password_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password @@ -6,6 +6,8 @@ package password import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) func TestSufficientlyStrong(t *testing.T) { @@ -32,64 +34,53 @@ func TestSufficientlyStrong(t *testing.T) { } for _, test := range tests { t.Run(fmt.Sprintf("%s-%d", test.password, test.expected), func(t *testing.T) { - if !SufficientlyStrong(test.password, test.expected) { - t.Fatalf("expected %q to be rated stronger", test.password) - } + require.True(t, SufficientlyStrong(test.password, test.expected)) }) } } func TestIsValid(t *testing.T) { tests := []struct { - password string - expected Strength - shouldErr bool + password string + expected Strength + expectedErr error }{ { - password: "", - expected: VeryWeak, - shouldErr: true, + password: "", + expected: VeryWeak, + expectedErr: ErrEmptyPassword, }, { - password: "a", - expected: VeryWeak, - shouldErr: false, + password: "a", + expected: VeryWeak, }, { - password: "password", - expected: VeryWeak, - shouldErr: false, + password: "password", + expected: VeryWeak, }, { - password: "thisisareallylongandpresumablyverystrongpassword", - expected: VeryStrong, - shouldErr: false, + password: "thisisareallylongandpresumablyverystrongpassword", + expected: VeryStrong, }, { - password: string(make([]byte, maxPassLen)), - expected: VeryWeak, - shouldErr: false, + password: string(make([]byte, maxPassLen)), + expected: VeryWeak, }, { - password: string(make([]byte, maxPassLen+1)), - expected: VeryWeak, - shouldErr: true, + password: string(make([]byte, maxPassLen+1)), + expected: VeryWeak, + expectedErr: ErrPassMaxLength, }, { - password: "password", - expected: Weak, - shouldErr: true, + password: "password", + expected: Weak, + expectedErr: ErrWeakPassword, }, } for _, test := range tests { t.Run(fmt.Sprintf("%s-%d", test.password, test.expected), func(t *testing.T) { err := IsValid(test.password, test.expected) - if err == nil && test.shouldErr { - t.Fatalf("expected %q to be invalid", test.password) - } - if err != nil && !test.shouldErr { - t.Fatalf("expected %q to be valid but returned %s", test.password, err) - } + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/avalanchego/utils/perms/chmod.go b/avalanchego/utils/perms/chmod.go index 5b4ff4a3..a5a8710b 100644 --- a/avalanchego/utils/perms/chmod.go +++ b/avalanchego/utils/perms/chmod.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/create.go b/avalanchego/utils/perms/create.go index 8d91baea..123637e1 100644 --- a/avalanchego/utils/perms/create.go +++ b/avalanchego/utils/perms/create.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/perms.go b/avalanchego/utils/perms/perms.go index e89dcc94..0bb633d9 100644 --- a/avalanchego/utils/perms/perms.go +++ b/avalanchego/utils/perms/perms.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/write_file.go b/avalanchego/utils/perms/write_file.go index 9ce7f6bf..f716459a 100644 --- a/avalanchego/utils/perms/write_file.go +++ b/avalanchego/utils/perms/write_file.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms @@ -6,15 +6,18 @@ package perms import ( "errors" "os" + + "github.com/google/renameio/v2/maybe" ) // WriteFile writes [data] to [filename] and ensures that [filename] has [perm] -// permissions. +// permissions. Will write atomically on linux/macos and fall back to non-atomic +// ioutil.WriteFile on windows. func WriteFile(filename string, data []byte, perm os.FileMode) error { info, err := os.Stat(filename) if errors.Is(err, os.ErrNotExist) { // The file doesn't exist, so try to write it. - return os.WriteFile(filename, data, perm) + return maybe.WriteFile(filename, data, perm) } if err != nil { return err @@ -27,5 +30,5 @@ func WriteFile(filename string, data []byte, perm os.FileMode) error { } // The file has the right permissions, so truncate any data and write the // file. - return os.WriteFile(filename, data, perm) + return maybe.WriteFile(filename, data, perm) } diff --git a/avalanchego/utils/profiler/continuous.go b/avalanchego/utils/profiler/continuous.go index 548e8877..ee31af4d 100644 --- a/avalanchego/utils/profiler/continuous.go +++ b/avalanchego/utils/profiler/continuous.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -37,7 +37,7 @@ type continuousProfiler struct { func NewContinuous(dir string, freq time.Duration, maxNumFiles int) ContinuousProfiler { return &continuousProfiler{ - profiler: new(dir), + profiler: newProfiler(dir), freq: freq, maxNumFiles: maxNumFiles, closer: make(chan struct{}), @@ -108,7 +108,7 @@ func rotate(name string, maxNumFiles int) error { return err } } - destFilename := fmt.Sprintf("%s.1", name) + destFilename := name + ".1" _, err := filesystem.RenameIfExists(name, destFilename) return err } diff --git a/avalanchego/utils/profiler/profiler.go b/avalanchego/utils/profiler/profiler.go index c35606e7..00a540cc 100644 --- a/avalanchego/utils/profiler/profiler.go +++ b/avalanchego/utils/profiler/profiler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -23,6 +23,8 @@ const ( ) var ( + _ Profiler = (*profiler)(nil) + errCPUProfilerRunning = errors.New("cpu profiler already running") errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist") ) @@ -53,10 +55,10 @@ type profiler struct { } func New(dir string) Profiler { - return new(dir) + return newProfiler(dir) } -func new(dir string) *profiler { +func newProfiler(dir string) *profiler { return &profiler{ dir: dir, cpuProfileName: filepath.Join(dir, cpuProfileFile), diff --git a/avalanchego/utils/profiler/profiler_test.go b/avalanchego/utils/profiler/profiler_test.go index c2c89143..17ae695e 100644 --- a/avalanchego/utils/profiler/profiler_test.go +++ b/avalanchego/utils/profiler/profiler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -12,35 +12,33 @@ import ( ) func TestProfiler(t *testing.T) { + require := require.New(t) + dir := t.TempDir() p := New(dir) // Test Start and Stop CPU Profiler - err := p.StartCPUProfiler() - require.NoError(t, err) + require.NoError(p.StartCPUProfiler()) - err = p.StopCPUProfiler() - require.NoError(t, err) + require.NoError(p.StopCPUProfiler()) - _, err = os.Stat(filepath.Join(dir, cpuProfileFile)) - require.NoError(t, err) + _, err := os.Stat(filepath.Join(dir, cpuProfileFile)) + require.NoError(err) // Test Stop CPU Profiler without it running err = p.StopCPUProfiler() - require.Error(t, err) + require.ErrorIs(err, errCPUProfilerNotRunning) // Test Memory Profiler - err = p.MemoryProfile() - require.NoError(t, err) + require.NoError(p.MemoryProfile()) _, err = os.Stat(filepath.Join(dir, memProfileFile)) - require.NoError(t, err) + require.NoError(err) // Test Lock Profiler - err = p.LockProfile() - require.NoError(t, err) + require.NoError(p.LockProfile()) _, err = os.Stat(filepath.Join(dir, lockProfileFile)) - require.NoError(t, err) + require.NoError(err) } diff --git a/avalanchego/utils/resource/metrics.go b/avalanchego/utils/resource/metrics.go new file mode 100644 index 00000000..3ce87ade --- /dev/null +++ b/avalanchego/utils/resource/metrics.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package resource + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils" +) + +type metrics struct { + numCPUCycles *prometheus.GaugeVec + numDiskReads *prometheus.GaugeVec + numDiskReadBytes *prometheus.GaugeVec + numDiskWrites *prometheus.GaugeVec + numDiskWritesBytes *prometheus.GaugeVec +} + +func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { + m := &metrics{ + numCPUCycles: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_cpu_cycles", + Help: "Total number of CPU cycles", + }, + []string{"processID"}, + ), + numDiskReads: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_disk_reads", + Help: "Total number of disk reads", + }, + []string{"processID"}, + ), + numDiskReadBytes: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_disk_read_bytes", + Help: "Total number of disk read bytes", + }, + []string{"processID"}, + ), + numDiskWrites: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_disk_writes", + Help: "Total number of disk writes", + }, + []string{"processID"}, + ), + numDiskWritesBytes: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "num_disk_write_bytes", + Help: "Total number of disk write bytes", + }, + []string{"processID"}, + ), + } + err := utils.Err( + registerer.Register(m.numCPUCycles), + registerer.Register(m.numDiskReads), + registerer.Register(m.numDiskReadBytes), + registerer.Register(m.numDiskWrites), + registerer.Register(m.numDiskWritesBytes), + ) + return m, err +} diff --git a/avalanchego/utils/resource/mock_user.go b/avalanchego/utils/resource/mock_user.go index 9b344ca1..d333f2c5 100644 --- a/avalanchego/utils/resource/mock_user.go +++ b/avalanchego/utils/resource/mock_user.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/resource (interfaces: User) +// +// Generated by this command: +// +// mockgen -package=resource -destination=utils/resource/mock_user.go github.com/ava-labs/avalanchego/utils/resource User +// // Package resource is a generated GoMock package. package resource @@ -10,7 +12,7 @@ package resource import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockUser is a mock of User interface. diff --git a/avalanchego/utils/resource/no_usage.go b/avalanchego/utils/resource/no_usage.go index 8a10d11c..baa42437 100644 --- a/avalanchego/utils/resource/no_usage.go +++ b/avalanchego/utils/resource/no_usage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/avalanchego/utils/resource/usage.go b/avalanchego/utils/resource/usage.go index 2c83aa0d..32a9d196 100644 --- a/avalanchego/utils/resource/usage.go +++ b/avalanchego/utils/resource/usage.go @@ -1,15 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource import ( "math" + "strconv" "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/storage" ) @@ -62,6 +67,9 @@ type Manager interface { } type manager struct { + log logging.Logger + processMetrics *metrics + processesLock sync.Mutex processes map[int]*proc @@ -78,14 +86,29 @@ type manager struct { onClose chan struct{} } -func NewManager(diskPath string, frequency, cpuHalflife, diskHalflife time.Duration) Manager { +func NewManager( + log logging.Logger, + diskPath string, + frequency, + cpuHalflife, + diskHalflife time.Duration, + metricsRegisterer prometheus.Registerer, +) (Manager, error) { + processMetrics, err := newMetrics("system_resources", metricsRegisterer) + if err != nil { + return nil, err + } + m := &manager{ + log: log, + processMetrics: processMetrics, processes: make(map[int]*proc), onClose: make(chan struct{}), availableDiskBytes: math.MaxUint64, } + go m.update(diskPath, frequency, cpuHalflife, diskHalflife) - return m + return m, nil } func (m *manager) CPUUsage() float64 { @@ -115,7 +138,10 @@ func (m *manager) TrackProcess(pid int) { return } - process := &proc{p: p} + process := &proc{ + p: p, + log: m.log, + } m.processesLock.Lock() m.processes[pid] = process @@ -149,6 +175,13 @@ func (m *manager) update(diskPath string, frequency, cpuHalflife, diskHalflife t currentScaledWriteUsage := newDiskWeight * currentWriteUsage availableBytes, getBytesErr := storage.AvailableBytes(diskPath) + if getBytesErr != nil { + m.log.Verbo("failed to lookup resource", + zap.String("resource", "system disk"), + zap.String("path", diskPath), + zap.Error(getBytesErr), + ) + } m.usageLock.Lock() m.cpuUsage = oldCPUWeight*m.cpuUsage + currentScaledCPUUsage @@ -187,21 +220,34 @@ func (m *manager) getActiveUsage(secondsSinceLastUpdate float64) (float64, float totalCPU += cpu totalRead += read totalWrite += write + + processIDStr := strconv.Itoa(int(p.p.Pid)) + m.processMetrics.numCPUCycles.WithLabelValues(processIDStr).Set(p.lastTotalCPU) + m.processMetrics.numDiskReads.WithLabelValues(processIDStr).Set(float64(p.numReads)) + m.processMetrics.numDiskReadBytes.WithLabelValues(processIDStr).Set(float64(p.lastReadBytes)) + m.processMetrics.numDiskWrites.WithLabelValues(processIDStr).Set(float64(p.numWrites)) + m.processMetrics.numDiskWritesBytes.WithLabelValues(processIDStr).Set(float64(p.lastWriteBytes)) } return totalCPU, totalRead, totalWrite } type proc struct { - p *process.Process + p *process.Process + log logging.Logger initialized bool // [lastTotalCPU] is the most recent measurement of total CPU usage. lastTotalCPU float64 + // [numReads] is the total number of disk reads performed. + numReads uint64 // [lastReadBytes] is the most recent measurement of total disk bytes read. lastReadBytes uint64 + + // [numWrites] is the total number of disk writes performed. + numWrites uint64 // [lastWriteBytes] is the most recent measurement of total disk bytes // written. lastWriteBytes uint64 @@ -212,12 +258,24 @@ func (p *proc) getActiveUsage(secondsSinceLastUpdate float64) (float64, float64, // assume that the utilization is 0. times, err := p.p.Times() if err != nil { - return 0, 0, 0 + p.log.Verbo("failed to lookup resource", + zap.String("resource", "process CPU"), + zap.Int32("pid", p.p.Pid), + zap.Error(err), + ) + times = &cpu.TimesStat{} } + // Note: IOCounters is not implemented on macos and therefore always returns + // an error on macos. io, err := p.p.IOCounters() if err != nil { - return 0, 0, 0 + p.log.Verbo("failed to lookup resource", + zap.String("resource", "process IO"), + zap.Int32("pid", p.p.Pid), + zap.Error(err), + ) + io = &process.IOCountersStat{} } var ( @@ -243,7 +301,9 @@ func (p *proc) getActiveUsage(secondsSinceLastUpdate float64) (float64, float64, p.initialized = true p.lastTotalCPU = totalCPU + p.numReads = io.ReadCount p.lastReadBytes = io.ReadBytes + p.numWrites = io.WriteCount p.lastWriteBytes = io.WriteBytes return cpu, read, write diff --git a/avalanchego/utils/resource/usage_test.go b/avalanchego/utils/resource/usage_test.go index 5c1df781..b0ee74ec 100644 --- a/avalanchego/utils/resource/usage_test.go +++ b/avalanchego/utils/resource/usage_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/avalanchego/utils/rpc/json.go b/avalanchego/utils/rpc/json.go index cf8819e7..9b87661e 100644 --- a/avalanchego/utils/rpc/json.go +++ b/avalanchego/utils/rpc/json.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc @@ -31,7 +31,7 @@ func SendJSONRequest( request, err := http.NewRequestWithContext( ctx, - "POST", + http.MethodPost, uri.String(), bytes.NewBuffer(requestBodyBytes), ) diff --git a/avalanchego/utils/rpc/options.go b/avalanchego/utils/rpc/options.go index ce79bc25..79c32c72 100644 --- a/avalanchego/utils/rpc/options.go +++ b/avalanchego/utils/rpc/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/avalanchego/utils/rpc/requester.go b/avalanchego/utils/rpc/requester.go index 49f3ffa0..6f2e312f 100644 --- a/avalanchego/utils/rpc/requester.go +++ b/avalanchego/utils/rpc/requester.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/avalanchego/utils/sampler/rand.go b/avalanchego/utils/sampler/rand.go index 476d8475..ce62d4a9 100644 --- a/avalanchego/utils/sampler/rand.go +++ b/avalanchego/utils/sampler/rand.go @@ -1,61 +1,86 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "math/rand" + "math" "sync" "time" "gonum.org/v1/gonum/mathext/prng" ) -var ( - int63Mask uint64 = 1<<63 - 1 - globalRNG = newRNG() -) +var globalRNG = newRNG() -func newRNG() rng { - source := prng.NewMT19937() - source.Seed(uint64(time.Now().UnixNano())) +func newRNG() *rng { // We don't use a cryptographically secure source of randomness here, as // there's no need to ensure a truly random sampling. - return rand.New(&syncSource{rng: source}) // #nosec G404 + source := prng.NewMT19937() + source.Seed(uint64(time.Now().UnixNano())) + return &rng{rng: source} } -func Seed(seed int64) { - globalRNG.Seed(seed) +type rng struct { + lock sync.Mutex + rng Source } -type rng interface { - // Seed uses the provided seed value to initialize the generator to a - // deterministic state. - Seed(seed int64) - - // Int63n returns, as an int64, a non-negative pseudo-random number in - // [0,n). It panics if n <= 0. - Int63n(n int64) int64 +type Source interface { + // Uint64 returns a random number in [0, MaxUint64] and advances the + // generator's state. + Uint64() uint64 } -type syncSource struct { - lock sync.Mutex - rng *prng.MT19937 -} +// Uint64Inclusive returns a pseudo-random number in [0,n]. +// +// Invariant: The result of this function is stored in chain state, so any +// modifications are considered breaking. +func (r *rng) Uint64Inclusive(n uint64) uint64 { + switch { + // n+1 is power of two, so we can just mask + // + // Note: This does work for MaxUint64 as overflow is explicitly part of the + // compiler specification: https://go.dev/ref/spec#Integer_overflow + case n&(n+1) == 0: + return r.uint64() & n + + // n is greater than MaxUint64/2 so we need to just iterate until we get a + // number in the requested range. + case n > math.MaxInt64: + v := r.uint64() + for v > n { + v = r.uint64() + } + return v -func (s *syncSource) Seed(seed int64) { - s.lock.Lock() - s.rng.Seed(uint64(seed)) - s.lock.Unlock() + // n is less than MaxUint64/2 so we generate a number in the range + // [0, k*(n+1)) where k is the largest integer such that k*(n+1) is less + // than or equal to MaxUint64/2. We can't easily find k such that k*(n+1) is + // less than or equal to MaxUint64 because the calculation would overflow. + // + // ref: https://github.com/golang/go/blob/ce10e9d84574112b224eae88dc4e0f43710808de/src/math/rand/rand.go#L127-L132 + default: + max := (1 << 63) - 1 - (1<<63)%(n+1) + v := r.uint63() + for v > max { + v = r.uint63() + } + return v % (n + 1) + } } -func (s *syncSource) Int63() int64 { - return int64(s.Uint64() & int63Mask) +// uint63 returns a random number in [0, MaxInt64] +func (r *rng) uint63() uint64 { + return r.uint64() & math.MaxInt64 } -func (s *syncSource) Uint64() uint64 { - s.lock.Lock() - n := s.rng.Uint64() - s.lock.Unlock() +// uint64 returns a random number in [0, MaxUint64] +func (r *rng) uint64() uint64 { + // Note: We must grab a write lock here because rng.Uint64 internally + // modifies state. + r.lock.Lock() + n := r.rng.Uint64() + r.lock.Unlock() return n } diff --git a/avalanchego/utils/sampler/rand_test.go b/avalanchego/utils/sampler/rand_test.go new file mode 100644 index 00000000..d44f6c2d --- /dev/null +++ b/avalanchego/utils/sampler/rand_test.go @@ -0,0 +1,228 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sampler + +import ( + "math" + "math/rand" + "strconv" + "testing" + + "github.com/stretchr/testify/require" + "github.com/thepudds/fzgen/fuzzer" + "gonum.org/v1/gonum/mathext/prng" +) + +type testSource struct { + onInvalid func() + nums []uint64 +} + +func (s *testSource) Seed(uint64) { + s.onInvalid() +} + +func (s *testSource) Uint64() uint64 { + if len(s.nums) == 0 { + s.onInvalid() + } + num := s.nums[0] + s.nums = s.nums[1:] + return num +} + +type testSTDSource struct { + onInvalid func() + nums []uint64 +} + +func (s *testSTDSource) Seed(int64) { + s.onInvalid() +} + +func (s *testSTDSource) Int63() int64 { + return int64(s.Uint64() & (1<<63 - 1)) +} + +func (s *testSTDSource) Uint64() uint64 { + if len(s.nums) == 0 { + s.onInvalid() + } + num := s.nums[0] + s.nums = s.nums[1:] + return num +} + +func TestRNG(t *testing.T) { + tests := []struct { + max uint64 + nums []uint64 + expected uint64 + }{ + { + max: math.MaxUint64, + nums: []uint64{ + 0x01, + }, + expected: 0x01, + }, + { + max: math.MaxUint64, + nums: []uint64{ + 0x0102030405060708, + }, + expected: 0x0102030405060708, + }, + { + max: math.MaxUint64, + nums: []uint64{ + 0xF102030405060708, + }, + expected: 0xF102030405060708, + }, + { + max: math.MaxInt64, + nums: []uint64{ + 0x01, + }, + expected: 0x01, + }, + { + max: math.MaxInt64, + nums: []uint64{ + 0x0102030405060708, + }, + expected: 0x0102030405060708, + }, + { + max: math.MaxInt64, + nums: []uint64{ + 0x8102030405060708, + }, + expected: 0x0102030405060708, + }, + { + max: 15, + nums: []uint64{ + 0x810203040506071a, + }, + expected: 0x0a, + }, + { + max: math.MaxInt64 + 1, + nums: []uint64{ + math.MaxInt64 + 1, + }, + expected: math.MaxInt64 + 1, + }, + { + max: math.MaxInt64 + 1, + nums: []uint64{ + math.MaxInt64 + 2, + 0, + }, + expected: 0, + }, + { + max: math.MaxInt64 + 1, + nums: []uint64{ + math.MaxInt64 + 2, + 0x0102030405060708, + }, + expected: 0x0102030405060708, + }, + { + max: 2, + nums: []uint64{ + math.MaxInt64 - 2, + }, + expected: 0x02, + }, + { + max: 2, + nums: []uint64{ + math.MaxInt64 - 1, + 0x01, + }, + expected: 0x01, + }, + } + for i, test := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + require := require.New(t) + + source := &testSource{ + onInvalid: t.FailNow, + nums: test.nums, + } + r := &rng{rng: source} + val := r.Uint64Inclusive(test.max) + require.Equal(test.expected, val) + require.Empty(source.nums) + + if test.max >= math.MaxInt64 { + return + } + + stdSource := &testSTDSource{ + onInvalid: t.FailNow, + nums: test.nums, + } + mathRNG := rand.New(stdSource) //#nosec G404 + stdVal := mathRNG.Int63n(int64(test.max + 1)) + require.Equal(test.expected, uint64(stdVal)) + require.Empty(source.nums) + }) + } +} + +func FuzzRNG(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var ( + max uint64 + sourceNums []uint64 + ) + fz := fuzzer.NewFuzzer(data) + fz.Fill(&max, &sourceNums) + if max >= math.MaxInt64 { + t.SkipNow() + } + + source := &testSource{ + onInvalid: t.SkipNow, + nums: sourceNums, + } + r := &rng{rng: source} + val := r.Uint64Inclusive(max) + + stdSource := &testSTDSource{ + onInvalid: t.SkipNow, + nums: sourceNums, + } + mathRNG := rand.New(stdSource) //#nosec G404 + stdVal := mathRNG.Int63n(int64(max + 1)) + require.Equal(val, uint64(stdVal)) + require.Len(stdSource.nums, len(source.nums)) + }) +} + +func BenchmarkSeed32(b *testing.B) { + source := prng.NewMT19937() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + source.Seed(0) + } +} + +func BenchmarkSeed64(b *testing.B) { + source := prng.NewMT19937_64() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + source.Seed(0) + } +} diff --git a/avalanchego/utils/sampler/uniform.go b/avalanchego/utils/sampler/uniform.go index 03f9068c..5ae9a21d 100644 --- a/avalanchego/utils/sampler/uniform.go +++ b/avalanchego/utils/sampler/uniform.go @@ -1,21 +1,32 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler // Uniform samples values without replacement in the provided range type Uniform interface { - Initialize(sampleRange uint64) error + Initialize(sampleRange uint64) + // Sample returns length numbers in the range [0,sampleRange). If there + // aren't enough numbers in the range, an error is returned. If length is + // negative the implementation may panic. Sample(length int) ([]uint64, error) - Seed(int64) - ClearSeed() - Reset() Next() (uint64, error) } // NewUniform returns a new sampler func NewUniform() Uniform { - return &uniformReplacer{} + return &uniformReplacer{ + rng: globalRNG, + } +} + +// NewDeterministicUniform returns a new sampler +func NewDeterministicUniform(source Source) Uniform { + return &uniformReplacer{ + rng: &rng{ + rng: source, + }, + } } diff --git a/avalanchego/utils/sampler/uniform_benchmark_test.go b/avalanchego/utils/sampler/uniform_benchmark_test.go index d0fa5650..915fe45c 100644 --- a/avalanchego/utils/sampler/uniform_benchmark_test.go +++ b/avalanchego/utils/sampler/uniform_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -27,10 +27,7 @@ func BenchmarkAllUniform(b *testing.B) { } func UniformBenchmark(b *testing.B, s Uniform, size uint64, toSample int) { - err := s.Initialize(size) - if err != nil { - b.Fatal(err) - } + s.Initialize(size) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/avalanchego/utils/sampler/uniform_best.go b/avalanchego/utils/sampler/uniform_best.go index e7ac3df4..21f7870d 100644 --- a/avalanchego/utils/sampler/uniform_best.go +++ b/avalanchego/utils/sampler/uniform_best.go @@ -1,21 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "errors" "math" "time" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var ( - errNoValidUniformSamplers = errors.New("no valid uniform samplers found") - - _ Uniform = (*uniformBest)(nil) -) +var _ Uniform = (*uniformBest)(nil) // Sampling is performed by using another implementation of the Uniform // interface. @@ -34,15 +29,19 @@ type uniformBest struct { func NewBestUniform(expectedSampleSize int) Uniform { return &uniformBest{ samplers: []Uniform{ - &uniformReplacer{}, - &uniformResample{}, + &uniformReplacer{ + rng: globalRNG, + }, + &uniformResample{ + rng: globalRNG, + }, }, maxSampleSize: expectedSampleSize, benchmarkIterations: 100, } } -func (s *uniformBest) Initialize(length uint64) error { +func (s *uniformBest) Initialize(length uint64) { s.Uniform = nil bestDuration := time.Duration(math.MaxInt64) @@ -53,9 +52,7 @@ func (s *uniformBest) Initialize(length uint64) error { samplerLoop: for _, sampler := range s.samplers { - if err := sampler.Initialize(length); err != nil { - continue - } + sampler.Initialize(length) start := s.clock.Time() for i := 0; i < s.benchmarkIterations; i++ { @@ -71,9 +68,5 @@ samplerLoop: } } - if s.Uniform == nil { - return errNoValidUniformSamplers - } s.Uniform.Reset() - return nil } diff --git a/avalanchego/utils/sampler/uniform_replacer.go b/avalanchego/utils/sampler/uniform_replacer.go index 5be2c383..80666a23 100644 --- a/avalanchego/utils/sampler/uniform_replacer.go +++ b/avalanchego/utils/sampler/uniform_replacer.go @@ -1,14 +1,8 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler -import ( - "math" - - "golang.org/x/exp/maps" -) - type defaultMap map[uint64]uint64 func (m defaultMap) get(key uint64, defaultVal uint64) uint64 { @@ -30,23 +24,16 @@ func (m defaultMap) get(key uint64, defaultVal uint64) uint64 { // // Sampling is performed in O(count) time and O(count) space. type uniformReplacer struct { - rng rng - seededRNG rng + rng *rng length uint64 drawn defaultMap drawsCount uint64 } -func (s *uniformReplacer) Initialize(length uint64) error { - if length > math.MaxInt64 { - return errOutOfRange - } - s.rng = globalRNG - s.seededRNG = newRNG() +func (s *uniformReplacer) Initialize(length uint64) { s.length = length s.drawn = make(defaultMap) s.drawsCount = 0 - return nil } func (s *uniformReplacer) Sample(count int) ([]uint64, error) { @@ -63,26 +50,17 @@ func (s *uniformReplacer) Sample(count int) ([]uint64, error) { return results, nil } -func (s *uniformReplacer) Seed(seed int64) { - s.rng = s.seededRNG - s.rng.Seed(seed) -} - -func (s *uniformReplacer) ClearSeed() { - s.rng = globalRNG -} - func (s *uniformReplacer) Reset() { - maps.Clear(s.drawn) + clear(s.drawn) s.drawsCount = 0 } func (s *uniformReplacer) Next() (uint64, error) { if s.drawsCount >= s.length { - return 0, errOutOfRange + return 0, ErrOutOfRange } - draw := uint64(s.rng.Int63n(int64(s.length-s.drawsCount))) + s.drawsCount + draw := s.rng.Uint64Inclusive(s.length-1-s.drawsCount) + s.drawsCount ret := s.drawn.get(draw, draw) s.drawn[draw] = s.drawn.get(s.drawsCount, s.drawsCount) s.drawsCount++ diff --git a/avalanchego/utils/sampler/uniform_resample.go b/avalanchego/utils/sampler/uniform_resample.go index d6404f67..b05ce62f 100644 --- a/avalanchego/utils/sampler/uniform_resample.go +++ b/avalanchego/utils/sampler/uniform_resample.go @@ -1,14 +1,8 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler -import ( - "math" - - "github.com/ava-labs/avalanchego/utils/set" -) - // uniformResample allows for sampling over a uniform distribution without // replacement. // @@ -19,21 +13,14 @@ import ( // // Sampling is performed in O(count) time and O(count) space. type uniformResample struct { - rng rng - seededRNG rng - length uint64 - drawn set.Set[uint64] + rng *rng + length uint64 + drawn map[uint64]struct{} } -func (s *uniformResample) Initialize(length uint64) error { - if length > math.MaxInt64 { - return errOutOfRange - } - s.rng = globalRNG - s.seededRNG = newRNG() +func (s *uniformResample) Initialize(length uint64) { s.length = length - s.drawn.Clear() - return nil + s.drawn = make(map[uint64]struct{}) } func (s *uniformResample) Sample(count int) ([]uint64, error) { @@ -50,31 +37,22 @@ func (s *uniformResample) Sample(count int) ([]uint64, error) { return results, nil } -func (s *uniformResample) Seed(seed int64) { - s.rng = s.seededRNG - s.rng.Seed(seed) -} - -func (s *uniformResample) ClearSeed() { - s.rng = globalRNG -} - func (s *uniformResample) Reset() { - s.drawn.Clear() + clear(s.drawn) } func (s *uniformResample) Next() (uint64, error) { i := uint64(len(s.drawn)) if i >= s.length { - return 0, errOutOfRange + return 0, ErrOutOfRange } for { - draw := uint64(s.rng.Int63n(int64(s.length))) - if s.drawn.Contains(draw) { + draw := s.rng.Uint64Inclusive(s.length - 1) + if _, ok := s.drawn[draw]; ok { continue } - s.drawn.Add(draw) + s.drawn[draw] = struct{}{} return draw, nil } } diff --git a/avalanchego/utils/sampler/uniform_test.go b/avalanchego/utils/sampler/uniform_test.go index d431d0ee..eb9862e7 100644 --- a/avalanchego/utils/sampler/uniform_test.go +++ b/avalanchego/utils/sampler/uniform_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -6,11 +6,10 @@ package sampler import ( "fmt" "math" + "slices" "testing" "github.com/stretchr/testify/require" - - "golang.org/x/exp/slices" ) var ( @@ -19,12 +18,16 @@ var ( sampler Uniform }{ { - name: "replacer", - sampler: &uniformReplacer{}, + name: "replacer", + sampler: &uniformReplacer{ + rng: globalRNG, + }, }, { - name: "resampler", - sampler: &uniformResample{}, + name: "resampler", + sampler: &uniformResample{ + rng: globalRNG, + }, }, { name: "best", @@ -36,8 +39,8 @@ var ( test func(*testing.T, Uniform) }{ { - name: "initialize overflow", - test: UniformInitializeOverflowTest, + name: "can sample large values", + test: UniformInitializeMaxUint64Test, }, { name: "out of range", @@ -76,132 +79,83 @@ func TestAllUniform(t *testing.T) { } } -func UniformInitializeOverflowTest(t *testing.T, s Uniform) { - err := s.Initialize(math.MaxUint64) - require.Error(t, err, "should have reported an overflow error") +func UniformInitializeMaxUint64Test(t *testing.T, s Uniform) { + s.Initialize(math.MaxUint64) + + for { + val, err := s.Next() + require.NoError(t, err) + + if val > math.MaxInt64 { + break + } + } } func UniformOutOfRangeTest(t *testing.T, s Uniform) { - err := s.Initialize(0) - require.NoError(t, err) + s.Initialize(0) - _, err = s.Sample(1) - require.Error(t, err, "should have reported an out of range error") + _, err := s.Sample(1) + require.ErrorIs(t, err, ErrOutOfRange) } func UniformEmptyTest(t *testing.T, s Uniform) { - err := s.Initialize(1) - require.NoError(t, err) + require := require.New(t) + + s.Initialize(1) val, err := s.Sample(0) - require.NoError(t, err) - require.Len(t, val, 0, "shouldn't have selected any element") + require.NoError(err) + require.Empty(val) } func UniformSingletonTest(t *testing.T, s Uniform) { - err := s.Initialize(1) - require.NoError(t, err) + require := require.New(t) + + s.Initialize(1) val, err := s.Sample(1) - require.NoError(t, err) - require.Equal(t, []uint64{0}, val, "should have selected the only element") + require.NoError(err) + require.Equal([]uint64{0}, val) } func UniformDistributionTest(t *testing.T, s Uniform) { - err := s.Initialize(3) - require.NoError(t, err) + require := require.New(t) + + s.Initialize(3) val, err := s.Sample(3) - require.NoError(t, err) + require.NoError(err) slices.Sort(val) - require.Equal( - t, - []uint64{0, 1, 2}, - val, - "should have selected the only element", - ) + require.Equal([]uint64{0, 1, 2}, val) } func UniformOverSampleTest(t *testing.T, s Uniform) { - err := s.Initialize(3) - require.NoError(t, err) + s.Initialize(3) - _, err = s.Sample(4) - require.Error(t, err, "should have returned an out of range error") + _, err := s.Sample(4) + require.ErrorIs(t, err, ErrOutOfRange) } func UniformLazilySample(t *testing.T, s Uniform) { - err := s.Initialize(3) - require.NoError(t, err) + require := require.New(t) + + s.Initialize(3) for j := 0; j < 2; j++ { sampled := map[uint64]bool{} for i := 0; i < 3; i++ { val, err := s.Next() - require.NoError(t, err) - require.False(t, sampled[val]) + require.NoError(err) + require.False(sampled[val]) sampled[val] = true } - _, err = s.Next() - require.Error(t, err, "should have returned an out of range error") + _, err := s.Next() + require.ErrorIs(err, ErrOutOfRange) s.Reset() } } - -func TestSeeding(t *testing.T) { - require := require.New(t) - - s1 := NewBestUniform(30) - s2 := NewBestUniform(30) - - err := s1.Initialize(50) - require.NoError(err) - - err = s2.Initialize(50) - require.NoError(err) - - s1.Seed(0) - - s1.Reset() - s1Val, err := s1.Next() - require.NoError(err) - - s2.Seed(1) - s2.Reset() - - s1.Seed(0) - v, err := s2.Next() - require.NoError(err) - require.NotEqualValues(s1Val, v) - - s1.ClearSeed() - - _, err = s1.Next() - require.NoError(err) -} - -func TestSeedingProducesTheSame(t *testing.T) { - require := require.New(t) - - s := NewBestUniform(30) - - err := s.Initialize(50) - require.NoError(err) - - s.Seed(0) - s.Reset() - - val0, err := s.Next() - require.NoError(err) - - s.Seed(0) - s.Reset() - - val1, err := s.Next() - require.NoError(err) - require.Equal(val0, val1) -} diff --git a/avalanchego/utils/sampler/weighted.go b/avalanchego/utils/sampler/weighted.go index 58998d47..2296da08 100644 --- a/avalanchego/utils/sampler/weighted.go +++ b/avalanchego/utils/sampler/weighted.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import "errors" -var errOutOfRange = errors.New("out of range") +var ErrOutOfRange = errors.New("out of range") // Weighted defines how to sample a specified valued based on a provided // weighted distribution diff --git a/avalanchego/utils/sampler/weighted_array.go b/avalanchego/utils/sampler/weighted_array.go index c6b411b8..bbbf9891 100644 --- a/avalanchego/utils/sampler/weighted_array.go +++ b/avalanchego/utils/sampler/weighted_array.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "cmp" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" ) @@ -19,8 +21,8 @@ type weightedArrayElement struct { } // Note that this sorts in order of decreasing weight. -func (e weightedArrayElement) Less(other weightedArrayElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedArrayElement) Compare(other weightedArrayElement) int { + return cmp.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a modified binary search over the provided @@ -81,7 +83,7 @@ func (s *weightedArray) Initialize(weights []uint64) error { func (s *weightedArray) Sample(value uint64) (int, error) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, errOutOfRange + return 0, ErrOutOfRange } minIndex := 0 maxIndex := len(s.arr) - 1 diff --git a/avalanchego/utils/sampler/weighted_array_test.go b/avalanchego/utils/sampler/weighted_array_test.go index e1058363..866a0c7d 100644 --- a/avalanchego/utils/sampler/weighted_array_test.go +++ b/avalanchego/utils/sampler/weighted_array_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedArrayElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedArrayElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedArrayElement{ - cumulativeWeight: 1, +func TestWeightedArrayElementCompare(t *testing.T) { + tests := []struct { + a weightedArrayElement + b weightedArrayElement + expected int + }{ + { + a: weightedArrayElement{}, + b: weightedArrayElement{}, + expected: 0, + }, + { + a: weightedArrayElement{ + cumulativeWeight: 1, + }, + b: weightedArrayElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedArrayElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/avalanchego/utils/sampler/weighted_benchmark_test.go b/avalanchego/utils/sampler/weighted_benchmark_test.go index f0bfb4e7..897e0019 100644 --- a/avalanchego/utils/sampler/weighted_benchmark_test.go +++ b/avalanchego/utils/sampler/weighted_benchmark_test.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "errors" "fmt" "math" "testing" @@ -12,8 +11,6 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) -var errOverflow = errors.New("overflow error") - // BenchmarkAllWeightedSampling func BenchmarkAllWeightedSampling(b *testing.B) { pows := []float64{ @@ -97,9 +94,6 @@ func CalcWeightedPoW(exponent float64, size int) (uint64, []uint64, error) { } totalWeight = newWeight } - if totalWeight > math.MaxInt64 { - return 0, nil, errOverflow - } return totalWeight, weights, nil } @@ -119,14 +113,14 @@ func WeightedPowBenchmarkSampler( b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = s.Sample(uint64(globalRNG.Int63n(int64(totalWeight)))) + _, _ = s.Sample(globalRNG.Uint64Inclusive(totalWeight - 1)) } return true } func WeightedSingletonBenchmarkSampler(b *testing.B, s Weighted, size int) bool { weights := make([]uint64, size) - weights[0] = uint64(math.MaxInt64 - size + 1) + weights[0] = math.MaxUint64 - uint64(size-1) for i := 1; i < len(weights); i++ { weights[i] = 1 } @@ -138,7 +132,7 @@ func WeightedSingletonBenchmarkSampler(b *testing.B, s Weighted, size int) bool b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = s.Sample(uint64(globalRNG.Int63n(math.MaxInt64))) + _, _ = s.Sample(globalRNG.Uint64Inclusive(math.MaxUint64 - 1)) } return true } @@ -159,7 +153,7 @@ func WeightedPowBenchmarkInitializer( func WeightedSingletonBenchmarkInitializer(b *testing.B, s Weighted, size int) { weights := make([]uint64, size) - weights[0] = uint64(math.MaxInt64 - size + 1) + weights[0] = math.MaxUint64 - uint64(size-1) for i := 1; i < len(weights); i++ { weights[i] = 1 } diff --git a/avalanchego/utils/sampler/weighted_best.go b/avalanchego/utils/sampler/weighted_best.go index 473ded5f..59bf6001 100644 --- a/avalanchego/utils/sampler/weighted_best.go +++ b/avalanchego/utils/sampler/weighted_best.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -41,15 +41,11 @@ func (s *weightedBest) Initialize(weights []uint64) error { totalWeight = newWeight } - if totalWeight > math.MaxInt64 { - return errWeightsTooLarge - } - samples := []uint64(nil) if totalWeight > 0 { samples = make([]uint64, s.benchmarkIterations) for i := range samples { - samples[i] = uint64(globalRNG.Int63n(int64(totalWeight))) + samples[i] = globalRNG.Uint64Inclusive(totalWeight - 1) } } diff --git a/avalanchego/utils/sampler/weighted_heap.go b/avalanchego/utils/sampler/weighted_heap.go index 1457d665..f4002a85 100644 --- a/avalanchego/utils/sampler/weighted_heap.go +++ b/avalanchego/utils/sampler/weighted_heap.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "cmp" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" ) @@ -19,17 +21,16 @@ type weightedHeapElement struct { index int } -func (e weightedHeapElement) Less(other weightedHeapElement) bool { +// Compare the elements. Weight is in decreasing order. Index is in increasing +// order. +func (e weightedHeapElement) Compare(other weightedHeapElement) int { // By accounting for the initial index of the weights, this results in a // stable sort. We do this rather than using `sort.Stable` because of the // reported change in performance of the sort used. - if e.weight > other.weight { - return true - } - if e.weight < other.weight { - return false + if weightCmp := cmp.Compare(other.weight, e.weight); weightCmp != 0 { + return weightCmp } - return e.index < other.index + return cmp.Compare(e.index, other.index) } // Sampling is performed by executing a search over a tree of elements in the @@ -81,7 +82,7 @@ func (s *weightedHeap) Initialize(weights []uint64) error { func (s *weightedHeap) Sample(value uint64) (int, error) { if len(s.heap) == 0 || s.heap[0].cumulativeWeight <= value { - return 0, errOutOfRange + return 0, ErrOutOfRange } index := 0 diff --git a/avalanchego/utils/sampler/weighted_heap_test.go b/avalanchego/utils/sampler/weighted_heap_test.go index 098f431b..03aa94df 100644 --- a/avalanchego/utils/sampler/weighted_heap_test.go +++ b/avalanchego/utils/sampler/weighted_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -14,8 +14,7 @@ func TestWeightedHeapInitialize(t *testing.T) { h := weightedHeap{} - err := h.Initialize([]uint64{2, 2, 1, 3, 3, 1, 3}) - require.NoError(err) + require.NoError(h.Initialize([]uint64{2, 2, 1, 3, 3, 1, 3})) expectedOrdering := []int{3, 4, 6, 0, 1, 2, 5} for i, elem := range h.heap { @@ -24,58 +23,44 @@ func TestWeightedHeapInitialize(t *testing.T) { } } -func TestWeightedHeapElementLess(t *testing.T) { +func TestWeightedHeapElementCompare(t *testing.T) { type test struct { name string elt1 weightedHeapElement elt2 weightedHeapElement - expected bool + expected int } tests := []test{ { name: "all same", elt1: weightedHeapElement{}, elt2: weightedHeapElement{}, - expected: false, + expected: 0, }, { - name: "first lower weight", + name: "lower weight", elt1: weightedHeapElement{}, elt2: weightedHeapElement{ weight: 1, }, - expected: false, + expected: 1, }, { - name: "first higher weight", - elt1: weightedHeapElement{ - weight: 1, - }, - elt2: weightedHeapElement{}, - expected: true, - }, - { - name: "first higher index", + name: "higher index", elt1: weightedHeapElement{ index: 1, }, elt2: weightedHeapElement{}, - expected: false, - }, - { - name: "second higher index", - elt1: weightedHeapElement{}, - elt2: weightedHeapElement{ - index: 1, - }, - expected: true, + expected: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - require.Equal(tt.expected, tt.elt1.Less(tt.elt2)) + + require.Equal(tt.expected, tt.elt1.Compare(tt.elt2)) + require.Equal(-tt.expected, tt.elt2.Compare(tt.elt1)) }) } } diff --git a/avalanchego/utils/sampler/weighted_linear.go b/avalanchego/utils/sampler/weighted_linear.go index f58fa22a..d6f0c5d7 100644 --- a/avalanchego/utils/sampler/weighted_linear.go +++ b/avalanchego/utils/sampler/weighted_linear.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "cmp" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" ) @@ -19,8 +21,8 @@ type weightedLinearElement struct { } // Note that this sorts in order of decreasing cumulative weight. -func (e weightedLinearElement) Less(other weightedLinearElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedLinearElement) Compare(other weightedLinearElement) int { + return cmp.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a linear search over the provided elements @@ -68,7 +70,7 @@ func (s *weightedLinear) Initialize(weights []uint64) error { func (s *weightedLinear) Sample(value uint64) (int, error) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, errOutOfRange + return 0, ErrOutOfRange } index := 0 diff --git a/avalanchego/utils/sampler/weighted_linear_test.go b/avalanchego/utils/sampler/weighted_linear_test.go index b3403501..dd86679d 100644 --- a/avalanchego/utils/sampler/weighted_linear_test.go +++ b/avalanchego/utils/sampler/weighted_linear_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedLinearElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedLinearElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedLinearElement{ - cumulativeWeight: 1, +func TestWeightedLinearElementCompare(t *testing.T) { + tests := []struct { + a weightedLinearElement + b weightedLinearElement + expected int + }{ + { + a: weightedLinearElement{}, + b: weightedLinearElement{}, + expected: 0, + }, + { + a: weightedLinearElement{ + cumulativeWeight: 1, + }, + b: weightedLinearElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedLinearElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/avalanchego/utils/sampler/weighted_test.go b/avalanchego/utils/sampler/weighted_test.go index cb22b431..ea08230d 100644 --- a/avalanchego/utils/sampler/weighted_test.go +++ b/avalanchego/utils/sampler/weighted_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -9,6 +9,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -87,44 +89,48 @@ func TestAllWeighted(t *testing.T) { func WeightedInitializeOverflowTest(t *testing.T, s Weighted) { err := s.Initialize([]uint64{1, math.MaxUint64}) - require.Error(t, err, "should have reported an overflow error") + require.ErrorIs(t, err, safemath.ErrOverflow) } func WeightedOutOfRangeTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1})) - _, err = s.Sample(1) - require.Error(t, err, "should have reported an out of range error") + _, err := s.Sample(1) + require.ErrorIs(err, ErrOutOfRange) } func WeightedSingletonTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1})) index, err := s.Sample(0) - require.NoError(t, err) - require.Equal(t, 0, index, "should have selected the first element") + require.NoError(err) + require.Zero(index) } func WeightedWithZeroTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{0, 1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{0, 1})) index, err := s.Sample(0) - require.NoError(t, err) - require.Equal(t, 1, index, "should have selected the second element") + require.NoError(err) + require.Equal(1, index) } func WeightedDistributionTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1, 1, 2, 3, 4}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1, 1, 2, 3, 4})) counts := make([]int, 5) for i := uint64(0); i < 11; i++ { index, err := s.Sample(i) - require.NoError(t, err) + require.NoError(err) counts[index]++ } - require.Equal(t, []int{1, 1, 2, 3, 4}, counts, "wrong distribution returned") + require.Equal([]int{1, 1, 2, 3, 4}, counts) } diff --git a/avalanchego/utils/sampler/weighted_uniform.go b/avalanchego/utils/sampler/weighted_uniform.go index 14620475..22dbb6b5 100644 --- a/avalanchego/utils/sampler/weighted_uniform.go +++ b/avalanchego/utils/sampler/weighted_uniform.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -63,7 +63,7 @@ func (s *weightedUniform) Initialize(weights []uint64) error { func (s *weightedUniform) Sample(value uint64) (int, error) { if uint64(len(s.indices)) <= value { - return 0, errOutOfRange + return 0, ErrOutOfRange } return s.indices[int(value)], nil } diff --git a/avalanchego/utils/sampler/weighted_without_replacement.go b/avalanchego/utils/sampler/weighted_without_replacement.go index a6039a65..a6b61992 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement.go +++ b/avalanchego/utils/sampler/weighted_without_replacement.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -9,15 +9,12 @@ package sampler type WeightedWithoutReplacement interface { Initialize(weights []uint64) error Sample(count int) ([]int, error) - - Seed(int64) - ClearSeed() } // NewWeightedWithoutReplacement returns a new sampler -func NewDeterministicWeightedWithoutReplacement() WeightedWithoutReplacement { +func NewDeterministicWeightedWithoutReplacement(source Source) WeightedWithoutReplacement { return &weightedWithoutReplacementGeneric{ - u: NewUniform(), + u: NewDeterministicUniform(source), w: NewDeterministicWeighted(), } } diff --git a/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go b/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go index 3d9b0085..58becf9d 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -6,6 +6,8 @@ package sampler import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) // BenchmarkAllWeightedWithoutReplacement @@ -40,13 +42,11 @@ func WeightedWithoutReplacementPowBenchmark( size int, count int, ) { + require := require.New(b) + _, weights, err := CalcWeightedPoW(exponent, size) - if err != nil { - b.Fatal(err) - } - if err := s.Initialize(weights); err != nil { - b.Fatal(err) - } + require.NoError(err) + require.NoError(s.Initialize(weights)) b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/avalanchego/utils/sampler/weighted_without_replacement_generic.go b/avalanchego/utils/sampler/weighted_without_replacement_generic.go index 08731baf..c45d64d0 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_generic.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_generic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -21,9 +21,7 @@ func (s *weightedWithoutReplacementGeneric) Initialize(weights []uint64) error { } totalWeight = newWeight } - if err := s.u.Initialize(totalWeight); err != nil { - return err - } + s.u.Initialize(totalWeight) return s.w.Initialize(weights) } @@ -43,11 +41,3 @@ func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, error) { } return indices, nil } - -func (s *weightedWithoutReplacementGeneric) Seed(seed int64) { - s.u.Seed(seed) -} - -func (s *weightedWithoutReplacementGeneric) ClearSeed() { - s.u.ClearSeed() -} diff --git a/avalanchego/utils/sampler/weighted_without_replacement_test.go b/avalanchego/utils/sampler/weighted_without_replacement_test.go index 42b5ac2a..8d346914 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_test.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -6,11 +6,12 @@ package sampler import ( "fmt" "math" + "slices" "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -21,7 +22,9 @@ var ( { name: "generic with replacer and best", sampler: &weightedWithoutReplacementGeneric{ - u: &uniformReplacer{}, + u: &uniformReplacer{ + rng: globalRNG, + }, w: &weightedBest{ samplers: []Weighted{ &weightedArray{}, @@ -85,88 +88,84 @@ func WeightedWithoutReplacementInitializeOverflowTest( s WeightedWithoutReplacement, ) { err := s.Initialize([]uint64{1, math.MaxUint64}) - require.Error(t, err, "should have reported an overflow error") + require.ErrorIs(t, err, safemath.ErrOverflow) } func WeightedWithoutReplacementOutOfRangeTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1})) - _, err = s.Sample(2) - require.Error(t, err, "should have reported an out of range error") + _, err := s.Sample(2) + require.ErrorIs(err, ErrOutOfRange) } func WeightedWithoutReplacementEmptyWithoutWeightTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize(nil) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize(nil)) indices, err := s.Sample(0) - require.NoError(t, err) - require.Len(t, indices, 0, "shouldn't have selected any elements") + require.NoError(err) + require.Empty(indices) } func WeightedWithoutReplacementEmptyTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1})) indices, err := s.Sample(0) - require.NoError(t, err) - require.Len(t, indices, 0, "shouldn't have selected any elements") + require.NoError(err) + require.Empty(indices) } func WeightedWithoutReplacementSingletonTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1})) indices, err := s.Sample(1) - require.NoError(t, err) - require.Equal(t, []int{0}, indices, "should have selected the first element") + require.NoError(err) + require.Equal([]int{0}, indices) } func WeightedWithoutReplacementWithZeroTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{0, 1}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{0, 1})) indices, err := s.Sample(1) - require.NoError(t, err) - require.Equal( - t, - []int{1}, - indices, - "should have selected the second element", - ) + require.NoError(err) + require.Equal([]int{1}, indices) } func WeightedWithoutReplacementDistributionTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1, 1, 2}) - require.NoError(t, err) + require := require.New(t) + + require.NoError(s.Initialize([]uint64{1, 1, 2})) indices, err := s.Sample(4) - require.NoError(t, err) + require.NoError(err) slices.Sort(indices) - require.Equal( - t, - []int{0, 1, 2, 2}, - indices, - "should have selected all the elements", - ) + require.Equal([]int{0, 1, 2, 2}, indices) } diff --git a/avalanchego/utils/set/bits.go b/avalanchego/utils/set/bits.go index bf7f5f7b..a6e74fb6 100644 --- a/avalanchego/utils/set/bits.go +++ b/avalanchego/utils/set/bits.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set import ( - "fmt" + "encoding/hex" "math/big" "math/bits" ) @@ -98,5 +98,5 @@ func BitsFromBytes(bytes []byte) Bits { // String returns the hex representation of this bitset func (b Bits) String() string { - return fmt.Sprintf("%x", b.bits.Bytes()) + return hex.EncodeToString(b.bits.Bytes()) } diff --git a/avalanchego/utils/set/bits_64.go b/avalanchego/utils/set/bits_64.go index eed00afd..d67c405a 100644 --- a/avalanchego/utils/set/bits_64.go +++ b/avalanchego/utils/set/bits_64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/avalanchego/utils/set/bits_64_test.go b/avalanchego/utils/set/bits_64_test.go index 3f62d1cc..b31bf997 100644 --- a/avalanchego/utils/set/bits_64_test.go +++ b/avalanchego/utils/set/bits_64_test.go @@ -1,154 +1,111 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestBits64(t *testing.T) { - var bs1 Bits64 + require := require.New(t) - if bs1.Len() != 0 { - t.Fatalf("Empty set's len should be 0") - } + var bs1 Bits64 + require.Empty(bs1) bs1.Add(5) - if bs1.Len() != 1 { - t.Fatalf("Wrong set length") - } else if !bs1.Contains(5) { - t.Fatalf("Set should contain element") - } + require.Equal(1, bs1.Len()) + require.True(bs1.Contains(5)) bs1.Add(10) - switch { - case bs1.Len() != 2: - t.Fatalf("Wrong set length") - case !bs1.Contains(5): - t.Fatalf("Set should contain element") - case !bs1.Contains(10): - t.Fatalf("Set should contain element") - } + require.Equal(2, bs1.Len()) + require.True(bs1.Contains(5)) + require.True(bs1.Contains(10)) bs1.Add(10) - switch { - case bs1.Len() != 2: - t.Fatalf("Wrong set length") - case !bs1.Contains(5): - t.Fatalf("Set should contain element") - case !bs1.Contains(10): - t.Fatalf("Set should contain element") - } + require.Equal(2, bs1.Len()) + require.True(bs1.Contains(5)) + require.True(bs1.Contains(10)) var bs2 Bits64 + require.Empty(bs2) bs2.Add(0) - if bs2.Len() != 1 { - t.Fatalf("Wrong set length") - } else if !bs2.Contains(0) { - t.Fatalf("Set should contain element") - } + require.Equal(1, bs2.Len()) + require.True(bs2.Contains(0)) bs2.Union(bs1) - switch { - case bs1.Len() != 2: - t.Fatalf("Wrong set length") - case !bs1.Contains(5): - t.Fatalf("Set should contain element") - case !bs1.Contains(10): - t.Fatalf("Set should contain element") - case bs2.Len() != 3: - t.Fatalf("Wrong set length") - case !bs2.Contains(0): - t.Fatalf("Set should contain element") - case !bs2.Contains(5): - t.Fatalf("Set should contain element") - case !bs2.Contains(10): - t.Fatalf("Set should contain element") - } + require.Equal(2, bs1.Len()) + require.True(bs1.Contains(5)) + require.True(bs1.Contains(10)) + require.Equal(3, bs2.Len()) + require.True(bs2.Contains(0)) + require.True(bs2.Contains(5)) + require.True(bs2.Contains(10)) bs1.Clear() - switch { - case bs1.Len() != 0: - t.Fatalf("Wrong set length") - case bs2.Len() != 3: - t.Fatalf("Wrong set length") - case !bs2.Contains(0): - t.Fatalf("Set should contain element") - case !bs2.Contains(5): - t.Fatalf("Set should contain element") - case !bs2.Contains(10): - t.Fatalf("Set should contain element") - } + require.Empty(bs1) + require.Equal(3, bs2.Len()) + require.True(bs2.Contains(0)) + require.True(bs2.Contains(5)) + require.True(bs2.Contains(10)) bs1.Add(63) - if bs1.Len() != 1 { - t.Fatalf("Wrong set length") - } else if !bs1.Contains(63) { - t.Fatalf("Set should contain element") - } + require.Equal(1, bs1.Len()) + require.True(bs1.Contains(63)) bs1.Add(1) - switch { - case bs1.Len() != 2: - t.Fatalf("Wrong set length") - case !bs1.Contains(1): - t.Fatalf("Set should contain element") - case !bs1.Contains(63): - t.Fatalf("Set should contain element") - } + require.Equal(2, bs1.Len()) + require.True(bs1.Contains(1)) + require.True(bs1.Contains(63)) bs1.Remove(63) - if bs1.Len() != 1 { - t.Fatalf("Wrong set length") - } else if !bs1.Contains(1) { - t.Fatalf("Set should contain element") - } + require.Equal(1, bs1.Len()) + require.True(bs1.Contains(1)) var bs3 Bits64 + require.Empty(bs3) bs3.Add(0) bs3.Add(2) bs3.Add(5) var bs4 Bits64 + require.Empty(bs4) bs4.Add(2) bs4.Add(5) bs3.Intersection(bs4) - switch { - case bs3.Len() != 2: - t.Fatalf("Wrong set length") - case !bs3.Contains(2): - t.Fatalf("Set should contain element") - case !bs3.Contains(5): - t.Fatalf("Set should contain element") - case bs4.Len() != 2: - t.Fatalf("Wrong set length") - } + require.Equal(2, bs3.Len()) + require.True(bs3.Contains(2)) + require.True(bs3.Contains(5)) + require.Equal(2, bs4.Len()) + require.True(bs4.Contains(2)) + require.True(bs4.Contains(5)) var bs5 Bits64 + require.Empty(bs5) bs5.Add(7) bs5.Add(11) bs5.Add(9) var bs6 Bits64 + require.Empty(bs6) bs6.Add(9) bs6.Add(11) bs5.Difference(bs6) - - switch { - case bs5.Len() != 1: - t.Fatalf("Wrong set length") - case !bs5.Contains(7): - t.Fatalf("Set should contain element") - case bs6.Len() != 2: - t.Fatalf("Wrong set length") - } + require.Equal(1, bs5.Len()) + require.True(bs5.Contains(7)) + require.Equal(2, bs6.Len()) + require.True(bs6.Contains(9)) + require.True(bs6.Contains(11)) } func TestBits64String(t *testing.T) { @@ -156,9 +113,5 @@ func TestBits64String(t *testing.T) { bs.Add(17) - expected := "0000000000020000" - - if bsString := bs.String(); bsString != expected { - t.Fatalf("BitSet.String returned %s expected %s", bsString, expected) - } + require.Equal(t, "0000000000020000", bs.String()) } diff --git a/avalanchego/utils/set/bits_test.go b/avalanchego/utils/set/bits_test.go index 5541395a..c4c838d5 100644 --- a/avalanchego/utils/set/bits_test.go +++ b/avalanchego/utils/set/bits_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set @@ -471,8 +471,6 @@ func Test_Bits_Len(t *testing.T) { } func Test_Bits_Bytes(t *testing.T) { - require := require.New(t) - type test struct { name string elts []int @@ -495,6 +493,8 @@ func Test_Bits_Bytes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + b := NewBits(tt.elts...) bytes := b.Bytes() fromBytes := BitsFromBytes(bytes) diff --git a/avalanchego/utils/set/sampleable_set.go b/avalanchego/utils/set/sampleable_set.go new file mode 100644 index 00000000..efb63c2d --- /dev/null +++ b/avalanchego/utils/set/sampleable_set.go @@ -0,0 +1,235 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "bytes" + "encoding/json" + "slices" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/wrappers" + + avajson "github.com/ava-labs/avalanchego/utils/json" +) + +var _ json.Marshaler = (*Set[int])(nil) + +// SampleableSet is a set of elements that supports sampling. +type SampleableSet[T comparable] struct { + // indices maps the element in the set to the index that it appears in + // elements. + indices map[T]int + elements []T +} + +// OfSampleable returns a Set initialized with [elts] +func OfSampleable[T comparable](elts ...T) SampleableSet[T] { + s := NewSampleableSet[T](len(elts)) + s.Add(elts...) + return s +} + +// Return a new sampleable set with initial capacity [size]. +// More or less than [size] elements can be added to this set. +// Using NewSampleableSet() rather than SampleableSet[T]{} is just an +// optimization that can be used if you know how many elements will be put in +// this set. +func NewSampleableSet[T comparable](size int) SampleableSet[T] { + if size < 0 { + return SampleableSet[T]{} + } + return SampleableSet[T]{ + indices: make(map[T]int, size), + elements: make([]T, 0, size), + } +} + +// Add all the elements to this set. +// If the element is already in the set, nothing happens. +func (s *SampleableSet[T]) Add(elements ...T) { + s.resize(2 * len(elements)) + for _, e := range elements { + s.add(e) + } +} + +// Union adds all the elements from the provided set to this set. +func (s *SampleableSet[T]) Union(set SampleableSet[T]) { + s.resize(2 * set.Len()) + for _, e := range set.elements { + s.add(e) + } +} + +// Difference removes all the elements in [set] from [s]. +func (s *SampleableSet[T]) Difference(set SampleableSet[T]) { + for _, e := range set.elements { + s.remove(e) + } +} + +// Contains returns true iff the set contains this element. +func (s SampleableSet[T]) Contains(e T) bool { + _, contains := s.indices[e] + return contains +} + +// Overlaps returns true if the intersection of the set is non-empty +func (s SampleableSet[T]) Overlaps(big SampleableSet[T]) bool { + small := s + if small.Len() > big.Len() { + small, big = big, small + } + + for _, e := range small.elements { + if _, ok := big.indices[e]; ok { + return true + } + } + return false +} + +// Len returns the number of elements in this set. +func (s SampleableSet[_]) Len() int { + return len(s.elements) +} + +// Remove all the given elements from this set. +// If an element isn't in the set, it's ignored. +func (s *SampleableSet[T]) Remove(elements ...T) { + for _, e := range elements { + s.remove(e) + } +} + +// Clear empties this set +func (s *SampleableSet[T]) Clear() { + clear(s.indices) + for i := range s.elements { + s.elements[i] = utils.Zero[T]() + } + s.elements = s.elements[:0] +} + +// List converts this set into a list +func (s SampleableSet[T]) List() []T { + return slices.Clone(s.elements) +} + +// Equals returns true if the sets contain the same elements +func (s SampleableSet[T]) Equals(other SampleableSet[T]) bool { + if len(s.indices) != len(other.indices) { + return false + } + for k := range s.indices { + if _, ok := other.indices[k]; !ok { + return false + } + } + return true +} + +func (s SampleableSet[T]) Sample(numToSample int) []T { + if numToSample <= 0 { + return nil + } + + uniform := sampler.NewUniform() + uniform.Initialize(uint64(len(s.elements))) + indices, _ := uniform.Sample(min(len(s.elements), numToSample)) + elements := make([]T, len(indices)) + for i, index := range indices { + elements[i] = s.elements[index] + } + return elements +} + +func (s *SampleableSet[T]) UnmarshalJSON(b []byte) error { + str := string(b) + if str == avajson.Null { + return nil + } + var elements []T + if err := json.Unmarshal(b, &elements); err != nil { + return err + } + s.Clear() + s.Add(elements...) + return nil +} + +func (s *SampleableSet[_]) MarshalJSON() ([]byte, error) { + var ( + elementBytes = make([][]byte, len(s.elements)) + err error + ) + for i, e := range s.elements { + elementBytes[i], err = json.Marshal(e) + if err != nil { + return nil, err + } + } + // Sort for determinism + slices.SortFunc(elementBytes, bytes.Compare) + + // Build the JSON + var ( + jsonBuf = bytes.Buffer{} + errs = wrappers.Errs{} + ) + _, err = jsonBuf.WriteString("[") + errs.Add(err) + for i, elt := range elementBytes { + _, err := jsonBuf.Write(elt) + errs.Add(err) + if i != len(elementBytes)-1 { + _, err := jsonBuf.WriteString(",") + errs.Add(err) + } + } + _, err = jsonBuf.WriteString("]") + errs.Add(err) + + return jsonBuf.Bytes(), errs.Err +} + +func (s *SampleableSet[T]) resize(size int) { + if s.elements == nil { + if minSetSize > size { + size = minSetSize + } + s.indices = make(map[T]int, size) + } +} + +func (s *SampleableSet[T]) add(e T) { + _, ok := s.indices[e] + if ok { + return + } + + s.indices[e] = len(s.elements) + s.elements = append(s.elements, e) +} + +func (s *SampleableSet[T]) remove(e T) { + indexToRemove, ok := s.indices[e] + if !ok { + return + } + + lastIndex := len(s.elements) - 1 + if indexToRemove != lastIndex { + lastElement := s.elements[lastIndex] + + s.indices[lastElement] = indexToRemove + s.elements[indexToRemove] = lastElement + } + + delete(s.indices, e) + s.elements[lastIndex] = utils.Zero[T]() + s.elements = s.elements[:lastIndex] +} diff --git a/avalanchego/utils/set/sampleable_set_test.go b/avalanchego/utils/set/sampleable_set_test.go new file mode 100644 index 00000000..31bd0771 --- /dev/null +++ b/avalanchego/utils/set/sampleable_set_test.go @@ -0,0 +1,174 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSampleableSet(t *testing.T) { + require := require.New(t) + id1 := 1 + + s := SampleableSet[int]{} + + s.Add(id1) + require.True(s.Contains(id1)) + + s.Remove(id1) + require.False(s.Contains(id1)) + + s.Add(id1) + require.True(s.Contains(id1)) + require.Len(s.List(), 1) + require.Equal(id1, s.List()[0]) + + s.Clear() + require.False(s.Contains(id1)) + + s.Add(id1) + + s2 := SampleableSet[int]{} + + require.False(s.Overlaps(s2)) + + s2.Union(s) + require.True(s2.Contains(id1)) + require.True(s.Overlaps(s2)) + + s2.Difference(s) + require.False(s2.Contains(id1)) + require.False(s.Overlaps(s2)) +} + +func TestSampleableSetClear(t *testing.T) { + require := require.New(t) + + set := SampleableSet[int]{} + for i := 0; i < 25; i++ { + set.Add(i) + } + set.Clear() + require.Zero(set.Len()) + set.Add(1337) + require.Equal(1, set.Len()) +} + +func TestSampleableSetMarshalJSON(t *testing.T) { + require := require.New(t) + set := SampleableSet[int]{} + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal("[]", string(asJSON)) + } + id1, id2 := 1, 2 + id1JSON, err := json.Marshal(id1) + require.NoError(err) + id2JSON, err := json.Marshal(id2) + require.NoError(err) + set.Add(id1) + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal(fmt.Sprintf("[%s]", string(id1JSON)), string(asJSON)) + } + set.Add(id2) + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)), string(asJSON)) + } +} + +func TestSampleableSetUnmarshalJSON(t *testing.T) { + require := require.New(t) + set := SampleableSet[int]{} + { + require.NoError(set.UnmarshalJSON([]byte("[]"))) + require.Zero(set.Len()) + } + id1, id2 := 1, 2 + id1JSON, err := json.Marshal(id1) + require.NoError(err) + id2JSON, err := json.Marshal(id2) + require.NoError(err) + { + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON))))) + require.Equal(1, set.Len()) + require.True(set.Contains(id1)) + } + { + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) + require.Equal(2, set.Len()) + require.True(set.Contains(id1)) + require.True(set.Contains(id2)) + } + { + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5)))) + require.Equal(3, set.Len()) + require.True(set.Contains(3)) + require.True(set.Contains(4)) + require.True(set.Contains(5)) + } + { + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3)))) + require.Equal(3, set.Len()) + require.True(set.Contains(3)) + require.True(set.Contains(4)) + require.True(set.Contains(5)) + } + { + set1 := SampleableSet[int]{} + set2 := SampleableSet[int]{} + require.NoError(set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) + require.NoError(set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON))))) + require.True(set1.Equals(set2)) + } +} + +func TestOfSampleable(t *testing.T) { + tests := []struct { + name string + elements []int + expected []int + }{ + { + name: "nil", + elements: nil, + expected: []int{}, + }, + { + name: "empty", + elements: []int{}, + expected: []int{}, + }, + { + name: "unique elements", + elements: []int{1, 2, 3}, + expected: []int{1, 2, 3}, + }, + { + name: "duplicate elements", + elements: []int{1, 2, 3, 1, 2, 3}, + expected: []int{1, 2, 3}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + s := OfSampleable(tt.elements...) + + require.Equal(len(tt.expected), s.Len()) + for _, expected := range tt.expected { + require.True(s.Contains(expected)) + } + }) + } +} diff --git a/avalanchego/utils/set/set.go b/avalanchego/utils/set/set.go index 76b65e77..aa5e2e26 100644 --- a/avalanchego/utils/set/set.go +++ b/avalanchego/utils/set/set.go @@ -1,28 +1,36 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set import ( "bytes" - - stdjson "encoding/json" + "encoding/json" + "slices" "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/wrappers" + + avajson "github.com/ava-labs/avalanchego/utils/json" ) // The minimum capacity of a set const minSetSize = 16 -var _ stdjson.Marshaler = (*Set[int])(nil) +var _ json.Marshaler = (*Set[int])(nil) // Set is a set of elements. type Set[T comparable] map[T]struct{} +// Of returns a Set initialized with [elts] +func Of[T comparable](elts ...T) Set[T] { + s := NewSet[T](len(elts)) + s.Add(elts...) + return s +} + // Return a new set with initial capacity [size]. // More or less than [size] elements can be added to this set. // Using NewSet() rather than Set[T]{} is just an optimization that can @@ -103,7 +111,7 @@ func (s *Set[T]) Remove(elts ...T) { // Clear empties this set func (s *Set[_]) Clear() { - maps.Clear(*s) + clear(*s) } // List converts this set into a list @@ -111,27 +119,6 @@ func (s Set[T]) List() []T { return maps.Keys(s) } -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (s Set[T]) CappedList(size int) []T { - if size < 0 { - return nil - } - if l := s.Len(); l < size { - size = l - } - i := 0 - elts := make([]T, size) - for elt := range s { - if i >= size { - break - } - elts[i] = elt - i++ - } - return elts -} - // Equals returns true if the sets contain the same elements func (s Set[T]) Equals(other Set[T]) bool { return maps.Equal(s, other) @@ -149,11 +136,11 @@ func (s *Set[T]) Pop() (T, bool) { func (s *Set[T]) UnmarshalJSON(b []byte) error { str := string(b) - if str == json.Null { + if str == avajson.Null { return nil } var elts []T - if err := stdjson.Unmarshal(b, &elts); err != nil { + if err := json.Unmarshal(b, &elts); err != nil { return err } s.Clear() @@ -161,21 +148,21 @@ func (s *Set[T]) UnmarshalJSON(b []byte) error { return nil } -func (s *Set[_]) MarshalJSON() ([]byte, error) { +func (s Set[_]) MarshalJSON() ([]byte, error) { var ( - eltBytes = make([][]byte, len(*s)) + eltBytes = make([][]byte, len(s)) i int err error ) - for elt := range *s { - eltBytes[i], err = stdjson.Marshal(elt) + for elt := range s { + eltBytes[i], err = json.Marshal(elt) if err != nil { return nil, err } i++ } // Sort for determinism - utils.SortBytes(eltBytes) + slices.SortFunc(eltBytes, bytes.Compare) // Build the JSON var ( @@ -198,7 +185,7 @@ func (s *Set[_]) MarshalJSON() ([]byte, error) { return jsonBuf.Bytes(), errs.Err } -// Returns an element. If the set is empty, returns false +// Returns a random element. If the set is empty, returns false func (s *Set[T]) Peek() (T, bool) { for elt := range *s { return elt, true diff --git a/avalanchego/utils/set/set_benchmark_test.go b/avalanchego/utils/set/set_benchmark_test.go index c762b72c..300b8c8c 100644 --- a/avalanchego/utils/set/set_benchmark_test.go +++ b/avalanchego/utils/set/set_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set diff --git a/avalanchego/utils/set/set_test.go b/avalanchego/utils/set/set_test.go index 341d4661..3b0a7e18 100644 --- a/avalanchego/utils/set/set_test.go +++ b/avalanchego/utils/set/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package set @@ -26,7 +26,6 @@ func TestSet(t *testing.T) { s.Add(id1) require.True(s.Contains(id1)) require.Len(s.List(), 1) - require.Equal(len(s.List()), 1) require.Equal(id1, s.List()[0]) s.Clear() @@ -47,70 +46,86 @@ func TestSet(t *testing.T) { require.False(s.Overlaps(s2)) } -func TestSetCappedList(t *testing.T) { - require := require.New(t) - s := Set[int]{} - - id := 0 - - require.Len(s.CappedList(0), 0) - - s.Add(id) - - require.Len(s.CappedList(0), 0) - require.Len(s.CappedList(1), 1) - require.Equal(s.CappedList(1)[0], id) - require.Len(s.CappedList(2), 1) - require.Equal(s.CappedList(2)[0], id) +func TestOf(t *testing.T) { + tests := []struct { + name string + elements []int + expected []int + }{ + { + name: "nil", + elements: nil, + expected: []int{}, + }, + { + name: "empty", + elements: []int{}, + expected: []int{}, + }, + { + name: "unique elements", + elements: []int{1, 2, 3}, + expected: []int{1, 2, 3}, + }, + { + name: "duplicate elements", + elements: []int{1, 2, 3, 1, 2, 3}, + expected: []int{1, 2, 3}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) - id2 := 1 - s.Add(id2) + s := Of(tt.elements...) - require.Len(s.CappedList(0), 0) - require.Len(s.CappedList(1), 1) - require.Len(s.CappedList(2), 2) - require.Len(s.CappedList(3), 2) - gotList := s.CappedList(2) - require.Contains(gotList, id) - require.Contains(gotList, id2) - require.NotEqual(gotList[0], gotList[1]) + require.Len(s, len(tt.expected)) + for _, expected := range tt.expected { + require.True(s.Contains(expected)) + } + }) + } } func TestSetClear(t *testing.T) { + require := require.New(t) + set := Set[int]{} for i := 0; i < 25; i++ { set.Add(i) } set.Clear() - require.Len(t, set, 0) + require.Empty(set) set.Add(1337) - require.Len(t, set, 1) + require.Len(set, 1) } func TestSetPop(t *testing.T) { + require := require.New(t) + var s Set[int] _, ok := s.Pop() - require.False(t, ok) + require.False(ok) s = make(Set[int]) _, ok = s.Pop() - require.False(t, ok) + require.False(ok) id1, id2 := 0, 1 s.Add(id1, id2) got, ok := s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 1, s.Len()) + require.True(ok) + require.True(got == id1 || got == id2) + require.Equal(1, s.Len()) got, ok = s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 0, s.Len()) + require.True(ok) + require.True(got == id1 || got == id2) + require.Zero(s.Len()) _, ok = s.Pop() - require.False(t, ok) + require.False(ok) } func TestSetMarshalJSON(t *testing.T) { @@ -144,8 +159,7 @@ func TestSetUnmarshalJSON(t *testing.T) { require := require.New(t) set := Set[int]{} { - err := set.UnmarshalJSON([]byte("[]")) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte("[]"))) require.Empty(set) } id1, id2 := 1, 2 @@ -154,29 +168,25 @@ func TestSetUnmarshalJSON(t *testing.T) { id2JSON, err := json.Marshal(id2) require.NoError(err) { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON)))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON))))) require.Len(set, 1) require.Contains(set, id1) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) require.Len(set, 2) require.Contains(set, id1) require.Contains(set, id2) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5)))) require.Len(set, 3) require.Contains(set, 3) require.Contains(set, 4) require.Contains(set, 5) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3)))) require.Len(set, 3) require.Contains(set, 3) require.Contains(set, 4) @@ -185,10 +195,34 @@ func TestSetUnmarshalJSON(t *testing.T) { { set1 := Set[int]{} set2 := Set[int]{} - err := set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) + require.NoError(set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) + require.NoError(set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON))))) + require.Equal(set1, set2) + } +} + +func TestSetReflectJSONMarshal(t *testing.T) { + require := require.New(t) + set := Set[int]{} + { + asJSON, err := json.Marshal(set) require.NoError(err) - err = set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON)))) + require.Equal("[]", string(asJSON)) + } + id1JSON, err := json.Marshal(1) + require.NoError(err) + id2JSON, err := json.Marshal(2) + require.NoError(err) + set.Add(1) + { + asJSON, err := json.Marshal(set) require.NoError(err) - require.Equal(set1, set2) + require.Equal(fmt.Sprintf("[%s]", string(id1JSON)), string(asJSON)) + } + set.Add(2) + { + asJSON, err := json.Marshal(set) + require.NoError(err) + require.Equal(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)), string(asJSON)) } } diff --git a/avalanchego/utils/setmap/setmap.go b/avalanchego/utils/setmap/setmap.go new file mode 100644 index 00000000..a2924894 --- /dev/null +++ b/avalanchego/utils/setmap/setmap.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package setmap + +import ( + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" +) + +type Entry[K any, V comparable] struct { + Key K + Set set.Set[V] +} + +// SetMap is a map to a set where all sets are non-overlapping. +type SetMap[K, V comparable] struct { + keyToSet map[K]set.Set[V] + valueToKey map[V]K +} + +// New creates a new empty setmap. +func New[K, V comparable]() *SetMap[K, V] { + return &SetMap[K, V]{ + keyToSet: make(map[K]set.Set[V]), + valueToKey: make(map[V]K), + } +} + +// Put the new entry into the map. Removes and returns: +// * The existing entry for [key]. +// * Existing entries where the set overlaps with the [set]. +func (m *SetMap[K, V]) Put(key K, set set.Set[V]) []Entry[K, V] { + removed := m.DeleteOverlapping(set) + if removedSet, ok := m.DeleteKey(key); ok { + removed = append(removed, Entry[K, V]{ + Key: key, + Set: removedSet, + }) + } + + m.keyToSet[key] = set + for val := range set { + m.valueToKey[val] = key + } + return removed +} + +// GetKey that maps to the provided value. +func (m *SetMap[K, V]) GetKey(val V) (K, bool) { + key, ok := m.valueToKey[val] + return key, ok +} + +// GetSet that is mapped to by the provided key. +func (m *SetMap[K, V]) GetSet(key K) (set.Set[V], bool) { + val, ok := m.keyToSet[key] + return val, ok +} + +// HasKey returns true if [key] is in the map. +func (m *SetMap[K, _]) HasKey(key K) bool { + _, ok := m.keyToSet[key] + return ok +} + +// HasValue returns true if [val] is in a set in the map. +func (m *SetMap[_, V]) HasValue(val V) bool { + _, ok := m.valueToKey[val] + return ok +} + +// HasOverlap returns true if [set] overlaps with any of the sets in the map. +func (m *SetMap[_, V]) HasOverlap(set set.Set[V]) bool { + if set.Len() < len(m.valueToKey) { + for val := range set { + if _, ok := m.valueToKey[val]; ok { + return true + } + } + } else { + for val := range m.valueToKey { + if set.Contains(val) { + return true + } + } + } + return false +} + +// DeleteKey removes [key] from the map and returns the set it mapped to. +func (m *SetMap[K, V]) DeleteKey(key K) (set.Set[V], bool) { + set, ok := m.keyToSet[key] + if !ok { + return nil, false + } + + delete(m.keyToSet, key) + for val := range set { + delete(m.valueToKey, val) + } + return set, true +} + +// DeleteValue removes and returns the entry that contained [val]. +func (m *SetMap[K, V]) DeleteValue(val V) (K, set.Set[V], bool) { + key, ok := m.valueToKey[val] + if !ok { + return utils.Zero[K](), nil, false + } + set, _ := m.DeleteKey(key) + return key, set, true +} + +// DeleteOverlapping removes and returns all the entries where the set overlaps +// with [set]. +func (m *SetMap[K, V]) DeleteOverlapping(set set.Set[V]) []Entry[K, V] { + var removed []Entry[K, V] + for val := range set { + if k, removedSet, ok := m.DeleteValue(val); ok { + removed = append(removed, Entry[K, V]{ + Key: k, + Set: removedSet, + }) + } + } + return removed +} + +// Len return the number of sets in the map. +func (m *SetMap[K, V]) Len() int { + return len(m.keyToSet) +} + +// LenValues return the total number of values across all sets in the map. +func (m *SetMap[K, V]) LenValues() int { + return len(m.valueToKey) +} diff --git a/avalanchego/utils/setmap/setmap_test.go b/avalanchego/utils/setmap/setmap_test.go new file mode 100644 index 00000000..f3e70985 --- /dev/null +++ b/avalanchego/utils/setmap/setmap_test.go @@ -0,0 +1,450 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package setmap + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestSetMapPut(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + key int + value set.Set[int] + expectedRemoved []Entry[int, int] + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + value: set.Of(2), + expectedRemoved: nil, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + value: set.Of(3), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(3), + }, + valueToKey: map[int]int{ + 3: 1, + }, + }, + }, + { + name: "value removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 3, + value: set.Of(2), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 3: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 3, + }, + }, + }, + { + name: "key and value removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + 3: set.Of(4), + }, + valueToKey: map[int]int{ + 2: 1, + 4: 3, + }, + }, + key: 1, + value: set.Of(4), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + { + Key: 3, + Set: set.Of(4), + }, + }, + expectedState: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(4), + }, + valueToKey: map[int]int{ + 4: 1, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.Put(test.key, test.value) + require.ElementsMatch(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapHasValueAndGetKeyAndSetOverlaps(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + + tests := []struct { + name string + value int + expectedKey int + expectedExists bool + }{ + { + name: "fetch unknown", + value: 3, + expectedKey: 0, + expectedExists: false, + }, + { + name: "fetch known value", + value: 2, + expectedKey: 1, + expectedExists: true, + }, + { + name: "fetch known key", + value: 1, + expectedKey: 0, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasValue(test.value) + require.Equal(test.expectedExists, exists) + + key, exists := m.GetKey(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestSetMapHasOverlap(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + require.Empty(t, m.Put(2, set.Of(3, 4))) + + tests := []struct { + name string + set set.Set[int] + expectedOverlaps bool + }{ + { + name: "small fetch unknown", + set: set.Of(5), + expectedOverlaps: false, + }, + { + name: "large fetch unknown", + set: set.Of(5, 6, 7, 8), + expectedOverlaps: false, + }, + { + name: "small fetch known", + set: set.Of(3), + expectedOverlaps: true, + }, + { + name: "large fetch known", + set: set.Of(3, 5, 6, 7, 8), + expectedOverlaps: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + overlaps := m.HasOverlap(test.set) + require.Equal(t, test.expectedOverlaps, overlaps) + }) + } +} + +func TestSetMapHasKeyAndGetSet(t *testing.T) { + m := New[int, int]() + require.Empty(t, m.Put(1, set.Of(2))) + + tests := []struct { + name string + key int + expectedValue set.Set[int] + expectedExists bool + }{ + { + name: "fetch unknown", + key: 3, + expectedValue: nil, + expectedExists: false, + }, + { + name: "fetch known key", + key: 1, + expectedValue: set.Of(2), + expectedExists: true, + }, + { + name: "fetch known value", + key: 2, + expectedValue: nil, + expectedExists: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + exists := m.HasKey(test.key) + require.Equal(test.expectedExists, exists) + + value, exists := m.GetSet(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedExists, exists) + }) + } +} + +func TestSetMapDeleteKey(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + key int + expectedValue set.Set[int] + expectedRemoved bool + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + key: 1, + expectedValue: nil, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + key: 1, + expectedValue: set.Of(2), + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + value, removed := test.state.DeleteKey(test.key) + require.Equal(test.expectedValue, value) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapDeleteValue(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + value int + expectedKey int + expectedSet set.Set[int] + expectedRemoved bool + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + value: 1, + expectedKey: 0, + expectedSet: nil, + expectedRemoved: false, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + value: 2, + expectedKey: 1, + expectedSet: set.Of(2), + expectedRemoved: true, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + key, set, removed := test.state.DeleteValue(test.value) + require.Equal(test.expectedKey, key) + require.Equal(test.expectedSet, set) + require.Equal(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapDeleteOverlapping(t *testing.T) { + tests := []struct { + name string + state *SetMap[int, int] + set set.Set[int] + expectedRemoved []Entry[int, int] + expectedState *SetMap[int, int] + }{ + { + name: "none removed", + state: New[int, int](), + set: set.Of(1), + expectedRemoved: nil, + expectedState: New[int, int](), + }, + { + name: "key removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2), + }, + valueToKey: map[int]int{ + 2: 1, + }, + }, + set: set.Of(2), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2), + }, + }, + expectedState: New[int, int](), + }, + { + name: "multiple keys removed", + state: &SetMap[int, int]{ + keyToSet: map[int]set.Set[int]{ + 1: set.Of(2, 3), + 2: set.Of(4), + }, + valueToKey: map[int]int{ + 2: 1, + 3: 1, + 4: 2, + }, + }, + set: set.Of(2, 4), + expectedRemoved: []Entry[int, int]{ + { + Key: 1, + Set: set.Of(2, 3), + }, + { + Key: 2, + Set: set.Of(4), + }, + }, + expectedState: New[int, int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + removed := test.state.DeleteOverlapping(test.set) + require.ElementsMatch(test.expectedRemoved, removed) + require.Equal(test.expectedState, test.state) + }) + } +} + +func TestSetMapLen(t *testing.T) { + require := require.New(t) + + m := New[int, int]() + require.Zero(m.Len()) + require.Zero(m.LenValues()) + + m.Put(1, set.Of(2)) + require.Equal(1, m.Len()) + require.Equal(1, m.LenValues()) + + m.Put(2, set.Of(3, 4)) + require.Equal(2, m.Len()) + require.Equal(3, m.LenValues()) + + m.Put(1, set.Of(4, 5)) + require.Equal(1, m.Len()) + require.Equal(2, m.LenValues()) + + m.DeleteKey(1) + require.Zero(m.Len()) + require.Zero(m.LenValues()) +} diff --git a/avalanchego/utils/sorting.go b/avalanchego/utils/sorting.go index 6c3911f1..a9c900ab 100644 --- a/avalanchego/utils/sorting.go +++ b/avalanchego/utils/sorting.go @@ -1,53 +1,50 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils import ( "bytes" - "sort" - - "golang.org/x/exp/constraints" - "golang.org/x/exp/slices" + "cmp" + "slices" "github.com/ava-labs/avalanchego/utils/hashing" ) -// TODO can we handle sorting where the Less function relies on a codec? +// TODO can we handle sorting where the Compare function relies on a codec? type Sortable[T any] interface { - Less(T) bool + Compare(T) int } // Sorts the elements of [s]. func Sort[T Sortable[T]](s []T) { - slices.SortFunc(s, func(i, j T) bool { - return i.Less(j) - }) + slices.SortFunc(s, T.Compare) } // Sorts the elements of [s] based on their hashes. func SortByHash[T ~[]byte](s []T) { - slices.SortFunc(s, func(i, j T) bool { + slices.SortFunc(s, func(i, j T) int { iHash := hashing.ComputeHash256(i) jHash := hashing.ComputeHash256(j) - return bytes.Compare(iHash, jHash) == -1 + return bytes.Compare(iHash, jHash) }) } -// Sorts a 2D byte slice. -// Each byte slice is not sorted internally; the byte slices are sorted relative -// to one another. -func SortBytes[T ~[]byte](arr []T) { - slices.SortFunc(arr, func(i, j T) bool { - return bytes.Compare(i, j) == -1 - }) +// Returns true iff the elements in [s] are sorted. +func IsSortedBytes[T ~[]byte](s []T) bool { + for i := 0; i < len(s)-1; i++ { + if bytes.Compare(s[i], s[i+1]) == 1 { + return false + } + } + return true } // Returns true iff the elements in [s] are unique and sorted. -func IsSortedAndUniqueSortable[T Sortable[T]](s []T) bool { +func IsSortedAndUnique[T Sortable[T]](s []T) bool { for i := 0; i < len(s)-1; i++ { - if !s[i].Less(s[i+1]) { + if s[i].Compare(s[i+1]) >= 0 { return false } } @@ -55,7 +52,7 @@ func IsSortedAndUniqueSortable[T Sortable[T]](s []T) bool { } // Returns true iff the elements in [s] are unique and sorted. -func IsSortedAndUniqueOrdered[T constraints.Ordered](s []T) bool { +func IsSortedAndUniqueOrdered[T cmp.Ordered](s []T) bool { for i := 0; i < len(s)-1; i++ { if s[i] >= s[i+1] { return false @@ -80,29 +77,3 @@ func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { } return true } - -// Returns true iff the elements in [s] are unique. -func IsUnique[T comparable](elts []T) bool { - // Can't use set.Set because it'd be a circular import. - asMap := make(map[T]struct{}, len(elts)) - for _, elt := range elts { - if _, ok := asMap[elt]; ok { - return false - } - asMap[elt] = struct{}{} - } - return true -} - -// IsSortedAndUnique returns true if the elements in the data are unique and -// sorted. -// -// Deprecated: Use one of the other [IsSortedAndUnique...] functions instead. -func IsSortedAndUnique(data sort.Interface) bool { - for i := 0; i < data.Len()-1; i++ { - if !data.Less(i, i+1) { - return false - } - } - return true -} diff --git a/avalanchego/utils/sorting_test.go b/avalanchego/utils/sorting_test.go index 9de3dffd..247b1a39 100644 --- a/avalanchego/utils/sorting_test.go +++ b/avalanchego/utils/sorting_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils import ( + "cmp" "testing" "github.com/stretchr/testify/require" @@ -13,8 +14,8 @@ var _ Sortable[sortable] = sortable(0) type sortable int -func (s sortable) Less(other sortable) bool { - return s < other +func (s sortable) Compare(other sortable) int { + return cmp.Compare(s, other) } func TestSortSliceSortable(t *testing.T) { @@ -22,12 +23,12 @@ func TestSortSliceSortable(t *testing.T) { var s []sortable Sort(s) - require.True(IsSortedAndUniqueSortable(s)) - require.Equal(0, len(s)) + require.True(IsSortedAndUnique(s)) + require.Empty(s) s = []sortable{1} Sort(s) - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) require.Equal([]sortable{1}, s) s = []sortable{1, 1} @@ -36,12 +37,12 @@ func TestSortSliceSortable(t *testing.T) { s = []sortable{1, 2} Sort(s) - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) require.Equal([]sortable{1, 2}, s) s = []sortable{2, 1} Sort(s) - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) require.Equal([]sortable{1, 2}, s) s = []sortable{1, 2, 1} @@ -61,53 +62,28 @@ func TestIsSortedAndUniqueSortable(t *testing.T) { require := require.New(t) var s []sortable - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) s = []sortable{} - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) s = []sortable{1} - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) s = []sortable{1, 2} - require.True(IsSortedAndUniqueSortable(s)) + require.True(IsSortedAndUnique(s)) s = []sortable{1, 1} - require.False(IsSortedAndUniqueSortable(s)) + require.False(IsSortedAndUnique(s)) s = []sortable{2, 1} - require.False(IsSortedAndUniqueSortable(s)) + require.False(IsSortedAndUnique(s)) s = []sortable{1, 2, 1} - require.False(IsSortedAndUniqueSortable(s)) + require.False(IsSortedAndUnique(s)) s = []sortable{1, 2, 0} - require.False(IsSortedAndUniqueSortable(s)) -} - -func TestIsUnique(t *testing.T) { - require := require.New(t) - - var s []int - require.True(IsUnique(s)) - - s = []int{} - require.True(IsUnique(s)) - - s = []int{1} - require.True(IsUnique(s)) - - s = []int{1, 2} - require.True(IsUnique(s)) - - s = []int{1, 1} - require.False(IsUnique(s)) - - s = []int{2, 1} - require.True(IsUnique(s)) - - s = []int{1, 2, 1} - require.False(IsUnique(s)) + require.False(IsSortedAndUnique(s)) } func TestSortByHash(t *testing.T) { @@ -115,7 +91,7 @@ func TestSortByHash(t *testing.T) { s := [][]byte{} SortByHash(s) - require.Len(s, 0) + require.Empty(s) s = [][]byte{{1}} SortByHash(s) diff --git a/avalanchego/utils/stacktrace.go b/avalanchego/utils/stacktrace.go index d68ee4ea..d0e0de56 100644 --- a/avalanchego/utils/stacktrace.go +++ b/avalanchego/utils/stacktrace.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/avalanchego/utils/storage/storage_common.go b/avalanchego/utils/storage/storage_common.go index cf1fbd3b..6fa5692c 100644 --- a/avalanchego/utils/storage/storage_common.go +++ b/avalanchego/utils/storage/storage_common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package storage diff --git a/avalanchego/utils/storage/storage_unix.go b/avalanchego/utils/storage/storage_unix.go index a5995b1d..247bc244 100644 --- a/avalanchego/utils/storage/storage_unix.go +++ b/avalanchego/utils/storage/storage_unix.go @@ -1,9 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build !windows // +build !windows -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package storage import "syscall" diff --git a/avalanchego/utils/storage/storage_windows.go b/avalanchego/utils/storage/storage_windows.go index 7879fd65..2514717c 100644 --- a/avalanchego/utils/storage/storage_windows.go +++ b/avalanchego/utils/storage/storage_windows.go @@ -1,9 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build windows // +build windows -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package storage import ( diff --git a/avalanchego/utils/timer/adaptive_timeout_manager.go b/avalanchego/utils/timer/adaptive_timeout_manager.go index 8bfab057..49376901 100644 --- a/avalanchego/utils/timer/adaptive_timeout_manager.go +++ b/avalanchego/utils/timer/adaptive_timeout_manager.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer import ( - "container/heap" "errors" "fmt" "sync" @@ -13,20 +12,22 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( - errNonPositiveHalflife = errors.New("timeout halflife must be positive") + errNonPositiveHalflife = errors.New("timeout halflife must be positive") + errInitialTimeoutAboveMaximum = errors.New("initial timeout cannot be greater than maximum timeout") + errInitialTimeoutBelowMinimum = errors.New("initial timeout cannot be less than minimum timeout") + errTooSmallTimeoutCoefficient = errors.New("timeout coefficient must be >= 1") - _ heap.Interface = (*timeoutQueue)(nil) _ AdaptiveTimeoutManager = (*adaptiveTimeoutManager)(nil) ) type adaptiveTimeout struct { - index int // Index in the wait queue id ids.RequestID // Unique ID of this timeout handler func() // Function to execute if timed out duration time.Duration // How long this timeout was set for @@ -34,38 +35,6 @@ type adaptiveTimeout struct { measureLatency bool // Whether this request should impact latency } -type timeoutQueue []*adaptiveTimeout - -func (tq timeoutQueue) Len() int { - return len(tq) -} - -func (tq timeoutQueue) Less(i, j int) bool { - return tq[i].deadline.Before(tq[j].deadline) -} - -func (tq timeoutQueue) Swap(i, j int) { - tq[i], tq[j] = tq[j], tq[i] - tq[i].index = i - tq[j].index = j -} - -// Push adds an item to this priority queue. x must have type *adaptiveTimeout -func (tq *timeoutQueue) Push(x interface{}) { - item := x.(*adaptiveTimeout) - item.index = len(*tq) - *tq = append(*tq, item) -} - -// Pop returns the next item in this queue -func (tq *timeoutQueue) Pop() interface{} { - n := len(*tq) - item := (*tq)[n-1] - (*tq)[n-1] = nil // make sure the item is freed from memory - *tq = (*tq)[:n-1] - return item -} - // AdaptiveTimeoutConfig contains the parameters provided to the // adaptive timeout manager. type AdaptiveTimeoutConfig struct { @@ -117,8 +86,7 @@ type adaptiveTimeoutManager struct { minimumTimeout time.Duration maximumTimeout time.Duration currentTimeout time.Duration // Amount of time before a timeout - timeoutMap map[ids.RequestID]*adaptiveTimeout - timeoutQueue timeoutQueue + timeoutHeap heap.Map[ids.RequestID, *adaptiveTimeout] timer *Timer // Timer that will fire to clear the timeouts } @@ -129,11 +97,11 @@ func NewAdaptiveTimeoutManager( ) (AdaptiveTimeoutManager, error) { switch { case config.InitialTimeout > config.MaximumTimeout: - return nil, fmt.Errorf("initial timeout (%s) > maximum timeout (%s)", config.InitialTimeout, config.MaximumTimeout) + return nil, fmt.Errorf("%w: (%s) > (%s)", errInitialTimeoutAboveMaximum, config.InitialTimeout, config.MaximumTimeout) case config.InitialTimeout < config.MinimumTimeout: - return nil, fmt.Errorf("initial timeout (%s) < minimum timeout (%s)", config.InitialTimeout, config.MinimumTimeout) + return nil, fmt.Errorf("%w: (%s) < (%s)", errInitialTimeoutBelowMinimum, config.InitialTimeout, config.MinimumTimeout) case config.TimeoutCoefficient < 1: - return nil, fmt.Errorf("timeout coefficient must be >= 1 but got %f", config.TimeoutCoefficient) + return nil, fmt.Errorf("%w: %f", errTooSmallTimeoutCoefficient, config.TimeoutCoefficient) case config.TimeoutHalflife <= 0: return nil, errNonPositiveHalflife } @@ -163,19 +131,20 @@ func NewAdaptiveTimeoutManager( maximumTimeout: config.MaximumTimeout, currentTimeout: config.InitialTimeout, timeoutCoefficient: config.TimeoutCoefficient, - timeoutMap: make(map[ids.RequestID]*adaptiveTimeout), + timeoutHeap: heap.NewMap[ids.RequestID, *adaptiveTimeout](func(a, b *adaptiveTimeout) bool { + return a.deadline.Before(b.deadline) + }), } tm.timer = NewTimer(tm.timeout) tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) - errs := &wrappers.Errs{} - errs.Add( + err := utils.Err( metricsRegister.Register(tm.networkTimeoutMetric), metricsRegister.Register(tm.avgLatency), metricsRegister.Register(tm.numTimeouts), metricsRegister.Register(tm.numPendingTimeouts), ) - return tm, errs.Err + return tm, err } func (tm *adaptiveTimeoutManager) TimeoutDuration() time.Duration { @@ -212,9 +181,8 @@ func (tm *adaptiveTimeoutManager) put(id ids.RequestID, measureLatency bool, han deadline: now.Add(tm.currentTimeout), measureLatency: measureLatency, } - tm.timeoutMap[id] = timeout - tm.numPendingTimeouts.Set(float64(len(tm.timeoutMap))) - heap.Push(&tm.timeoutQueue, timeout) + tm.timeoutHeap.Push(id, timeout) + tm.numPendingTimeouts.Set(float64(tm.timeoutHeap.Len())) tm.setNextTimeoutTime() } @@ -228,24 +196,18 @@ func (tm *adaptiveTimeoutManager) Remove(id ids.RequestID) { // Assumes [tm.lock] is held func (tm *adaptiveTimeoutManager) remove(id ids.RequestID, now time.Time) { - timeout, exists := tm.timeoutMap[id] + // Observe the response time to update average network response time. + timeout, exists := tm.timeoutHeap.Remove(id) if !exists { return } - // Observe the response time to update average network response time. if timeout.measureLatency { timeoutRegisteredAt := timeout.deadline.Add(-1 * timeout.duration) latency := now.Sub(timeoutRegisteredAt) tm.observeLatencyAndUpdateTimeout(latency, now) } - - // Remove the timeout from the map - delete(tm.timeoutMap, id) - tm.numPendingTimeouts.Set(float64(len(tm.timeoutMap))) - - // Remove the timeout from the queue - heap.Remove(&tm.timeoutQueue, timeout.index) + tm.numPendingTimeouts.Set(float64(tm.timeoutHeap.Len())) } // Assumes [tm.lock] is not held. @@ -297,11 +259,10 @@ func (tm *adaptiveTimeoutManager) observeLatencyAndUpdateTimeout(latency time.Du // returns nil. // Assumes [tm.lock] is held func (tm *adaptiveTimeoutManager) getNextTimeoutHandler(now time.Time) func() { - if tm.timeoutQueue.Len() == 0 { + _, nextTimeout, ok := tm.timeoutHeap.Peek() + if !ok { return nil } - - nextTimeout := tm.timeoutQueue[0] if nextTimeout.deadline.After(now) { return nil } @@ -312,14 +273,14 @@ func (tm *adaptiveTimeoutManager) getNextTimeoutHandler(now time.Time) func() { // Calculate the time of the next timeout and set // the timer to fire at that time. func (tm *adaptiveTimeoutManager) setNextTimeoutTime() { - if tm.timeoutQueue.Len() == 0 { + _, nextTimeout, ok := tm.timeoutHeap.Peek() + if !ok { // There are no pending timeouts tm.timer.Cancel() return } now := tm.clock.Time() - nextTimeout := tm.timeoutQueue[0] timeToNextTimeout := nextTimeout.deadline.Sub(now) tm.timer.SetTimeoutIn(timeToNextTimeout) } diff --git a/avalanchego/utils/timer/adaptive_timeout_manager_test.go b/avalanchego/utils/timer/adaptive_timeout_manager_test.go index 72f1e0c2..5b725303 100644 --- a/avalanchego/utils/timer/adaptive_timeout_manager_test.go +++ b/avalanchego/utils/timer/adaptive_timeout_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer @@ -9,7 +9,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -18,11 +17,11 @@ import ( // Test that Initialize works func TestAdaptiveTimeoutManagerInit(t *testing.T) { type test struct { - config AdaptiveTimeoutConfig - shouldErrWith string + config AdaptiveTimeoutConfig + expectedErr error } - tests := []test{ + tests := []*test{ { config: AdaptiveTimeoutConfig{ InitialTimeout: time.Second, @@ -31,7 +30,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { TimeoutCoefficient: 2, TimeoutHalflife: 5 * time.Minute, }, - shouldErrWith: "initial timeout < minimum timeout", + expectedErr: errInitialTimeoutBelowMinimum, }, { config: AdaptiveTimeoutConfig{ @@ -41,7 +40,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { TimeoutCoefficient: 2, TimeoutHalflife: 5 * time.Minute, }, - shouldErrWith: "initial timeout > maximum timeout", + expectedErr: errInitialTimeoutAboveMaximum, }, { config: AdaptiveTimeoutConfig{ @@ -51,7 +50,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { TimeoutCoefficient: 0.9, TimeoutHalflife: 5 * time.Minute, }, - shouldErrWith: "timeout coefficient < 1", + expectedErr: errTooSmallTimeoutCoefficient, }, { config: AdaptiveTimeoutConfig{ @@ -60,7 +59,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { MaximumTimeout: 3 * time.Second, TimeoutCoefficient: 1, }, - shouldErrWith: "timeout halflife is 0", + expectedErr: errNonPositiveHalflife, }, { config: AdaptiveTimeoutConfig{ @@ -70,7 +69,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { TimeoutCoefficient: 1, TimeoutHalflife: -1 * time.Second, }, - shouldErrWith: "timeout halflife is negative", + expectedErr: errNonPositiveHalflife, }, { config: AdaptiveTimeoutConfig{ @@ -85,11 +84,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { for _, test := range tests { _, err := NewAdaptiveTimeoutManager(&test.config, "", prometheus.NewRegistry()) - if err != nil && test.shouldErrWith == "" { - require.FailNow(t, "error from valid config", err) - } else if err == nil && test.shouldErrWith != "" { - require.FailNowf(t, "should have errored", test.shouldErrWith) - } + require.ErrorIs(t, err, test.expectedErr) } } @@ -105,9 +100,7 @@ func TestAdaptiveTimeoutManager(t *testing.T) { "", prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) go tm.Dispatch() var lock sync.Mutex diff --git a/avalanchego/utils/timer/eta.go b/avalanchego/utils/timer/eta.go index fe3b6add..6af353fd 100644 --- a/avalanchego/utils/timer/eta.go +++ b/avalanchego/utils/timer/eta.go @@ -1,12 +1,23 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer import ( + "encoding/binary" "time" ) +// ProgressFromHash returns the progress out of MaxUint64 assuming [b] is a key +// in a uniformly distributed sequence that is being iterated lexicographically. +func ProgressFromHash(b []byte) uint64 { + // binary.BigEndian.Uint64 will panic if the input length is less than 8, so + // pad 0s as needed. + var progress [8]byte + copy(progress[:], b) + return binary.BigEndian.Uint64(progress[:]) +} + // EstimateETA attempts to estimate the remaining time for a job to finish given // the [startTime] and it's current progress. func EstimateETA(startTime time.Time, progress, end uint64) time.Duration { diff --git a/avalanchego/utils/timer/meter.go b/avalanchego/utils/timer/meter.go index c78376e1..e0459e92 100644 --- a/avalanchego/utils/timer/meter.go +++ b/avalanchego/utils/timer/meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/mockable/clock.go b/avalanchego/utils/timer/mockable/clock.go index c331ff78..753da957 100644 --- a/avalanchego/utils/timer/mockable/clock.go +++ b/avalanchego/utils/timer/mockable/clock.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable -import ( - "time" -) +import "time" // MaxTime was taken from https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go/32620397#32620397 var MaxTime = time.Unix(1<<63-62135596801, 0) // 0 is used because we drop the nano-seconds diff --git a/avalanchego/utils/timer/mockable/clock_test.go b/avalanchego/utils/timer/mockable/clock_test.go index b43da19c..a15e71ef 100644 --- a/avalanchego/utils/timer/mockable/clock_test.go +++ b/avalanchego/utils/timer/mockable/clock_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable @@ -11,38 +11,34 @@ import ( ) func TestClockSet(t *testing.T) { + require := require.New(t) + clock := Clock{} - clock.Set(time.Unix(1000000, 0)) - if clock.faked == false { - t.Error("Fake time was set, but .faked flag was not set") - } - if !clock.Time().Equal(time.Unix(1000000, 0)) { - t.Error("Fake time was set, but not returned") - } + time := time.Unix(1000000, 0) + clock.Set(time) + require.True(clock.faked) + require.Equal(time, clock.Time()) } func TestClockSync(t *testing.T) { + require := require.New(t) + clock := Clock{true, time.Unix(0, 0)} clock.Sync() - if clock.faked == true { - t.Error("Clock was synced, but .faked flag was set") - } - if clock.Time().Equal(time.Unix(0, 0)) { - t.Error("Clock was synced, but returned a fake time") - } + require.False(clock.faked) + require.NotEqual(time.Unix(0, 0), clock.Time()) } func TestClockUnixTime(t *testing.T) { + require := require.New(t) + clock := Clock{true, time.Unix(123, 123)} - require.Zero(t, clock.UnixTime().Nanosecond()) - require.Equal(t, 123, clock.Time().Nanosecond()) + require.Zero(clock.UnixTime().Nanosecond()) + require.Equal(123, clock.Time().Nanosecond()) } func TestClockUnix(t *testing.T) { clock := Clock{true, time.Unix(-14159040, 0)} actual := clock.Unix() - if actual != 0 { - // We are Unix of 1970s, Moon landings are irrelevant - t.Errorf("Expected time prior to Unix epoch to be clamped to 0, got %d", actual) - } + require.Zero(t, actual) // time prior to Unix epoch should be clamped to 0 } diff --git a/avalanchego/utils/timer/staged_timer.go b/avalanchego/utils/timer/staged_timer.go deleted file mode 100644 index eec885ee..00000000 --- a/avalanchego/utils/timer/staged_timer.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import "time" - -// NewStagedTimer returns a timer that will execute [f] -// when a timeout occurs and execute an additional timeout after -// the returned duration if [f] returns true and some duration. -// -// Deprecated: NewStagedTimer exists for historical compatibility -// and should not be used. -func NewStagedTimer(f func() (time.Duration, bool)) *Timer { - t := NewTimer(nil) - t.handler = func() { - delay, repeat := f() - if repeat { - t.SetTimeoutIn(delay) - } - } - return t -} diff --git a/avalanchego/utils/timer/staged_timer_test.go b/avalanchego/utils/timer/staged_timer_test.go deleted file mode 100644 index bd83ef20..00000000 --- a/avalanchego/utils/timer/staged_timer_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestSingleStagedTimer(t *testing.T) { - wg := sync.WaitGroup{} - wg.Add(1) - ticks := 1 - i := 0 - timer := NewStagedTimer(func() (time.Duration, bool) { - defer wg.Done() - i++ - return 0, false - }) - go timer.Dispatch() - - timer.SetTimeoutIn(time.Millisecond) - wg.Wait() - require.Equal(t, i, ticks) -} - -func TestMultiStageTimer(t *testing.T) { - wg := sync.WaitGroup{} - ticks := 3 - wg.Add(ticks) - - i := 0 - timer := NewStagedTimer(func() (time.Duration, bool) { - defer wg.Done() - i++ - return time.Millisecond, i < ticks - }) - go timer.Dispatch() - - timer.SetTimeoutIn(time.Millisecond) - wg.Wait() - require.Equal(t, i, ticks) -} diff --git a/avalanchego/utils/timer/timer.go b/avalanchego/utils/timer/timer.go index 1b5914fe..b4d34534 100644 --- a/avalanchego/utils/timer/timer.go +++ b/avalanchego/utils/timer/timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/timer_test.go b/avalanchego/utils/timer/timer_test.go index 228b19f2..83994f96 100644 --- a/avalanchego/utils/timer/timer_test.go +++ b/avalanchego/utils/timer/timer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/ulimit/ulimit_bsd.go b/avalanchego/utils/ulimit/ulimit_bsd.go index 191b7882..bb4c5e15 100644 --- a/avalanchego/utils/ulimit/ulimit_bsd.go +++ b/avalanchego/utils/ulimit/ulimit_bsd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build freebsd @@ -10,6 +10,8 @@ import ( "fmt" "syscall" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/utils/logging" ) diff --git a/avalanchego/utils/ulimit/ulimit_darwin.go b/avalanchego/utils/ulimit/ulimit_darwin.go index 9eaab72b..224d8faf 100644 --- a/avalanchego/utils/ulimit/ulimit_darwin.go +++ b/avalanchego/utils/ulimit/ulimit_darwin.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build darwin diff --git a/avalanchego/utils/ulimit/ulimit_unix.go b/avalanchego/utils/ulimit/ulimit_unix.go index 898b361c..8b23ab70 100644 --- a/avalanchego/utils/ulimit/ulimit_unix.go +++ b/avalanchego/utils/ulimit/ulimit_unix.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build linux || netbsd || openbsd diff --git a/avalanchego/utils/ulimit/ulimit_windows.go b/avalanchego/utils/ulimit/ulimit_windows.go index 7646d6f1..82a88273 100644 --- a/avalanchego/utils/ulimit/ulimit_windows.go +++ b/avalanchego/utils/ulimit/ulimit_windows.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build windows diff --git a/avalanchego/utils/units/avax.go b/avalanchego/utils/units/avax.go index 341fd8be..bbd66492 100644 --- a/avalanchego/utils/units/avax.go +++ b/avalanchego/utils/units/avax.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/avalanchego/utils/units/bytes.go b/avalanchego/utils/units/bytes.go index 93678e95..42d2526a 100644 --- a/avalanchego/utils/units/bytes.go +++ b/avalanchego/utils/units/bytes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/avalanchego/utils/window/window.go b/avalanchego/utils/window/window.go index da14e060..86dba5b7 100644 --- a/avalanchego/utils/window/window.go +++ b/avalanchego/utils/window/window.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window @@ -28,9 +28,12 @@ type window[T any] struct { ttl time.Duration // max amount of elements allowed in the window maxSize int + // min amount of elements required in the window before allowing removal + // based on time + minSize int // mutex for synchronization - lock sync.Mutex + lock sync.RWMutex // elements in the window elements buffer.Deque[node[T]] } @@ -39,6 +42,7 @@ type window[T any] struct { type Config struct { Clock *mockable.Clock MaxSize int + MinSize int TTL time.Duration } @@ -48,6 +52,7 @@ func New[T any](config Config) Window[T] { clock: config.Clock, ttl: config.TTL, maxSize: config.MaxSize, + minSize: config.MinSize, elements: buffer.NewUnboundedDeque[node[T]](config.MaxSize + 1), } } @@ -58,24 +63,22 @@ func (w *window[T]) Add(value T) { w.lock.Lock() defer w.lock.Unlock() - w.removeStaleNodes() - if w.elements.Len() >= w.maxSize { - _, _ = w.elements.PopLeft() - } - // add the new block id w.elements.PushRight(node[T]{ value: value, entryTime: w.clock.Time(), }) + + w.removeStaleNodes() + if w.elements.Len() > w.maxSize { + _, _ = w.elements.PopLeft() + } } // Oldest returns the oldest element in the window. func (w *window[T]) Oldest() (T, bool) { - w.lock.Lock() - defer w.lock.Unlock() - - w.removeStaleNodes() + w.lock.RLock() + defer w.lock.RUnlock() oldest, ok := w.elements.PeekLeft() if !ok { @@ -86,10 +89,9 @@ func (w *window[T]) Oldest() (T, bool) { // Length returns the number of elements in the window. func (w *window[T]) Length() int { - w.lock.Lock() - defer w.lock.Unlock() + w.lock.RLock() + defer w.lock.RUnlock() - w.removeStaleNodes() return w.elements.Len() } @@ -98,9 +100,13 @@ func (w *window[T]) removeStaleNodes() { // If we're beyond the expiry threshold, removeStaleNodes this node from our // window. Nodes are guaranteed to be strictly increasing in entry time, // so we can break this loop once we find the first non-stale one. - for { + newest, ok := w.elements.PeekRight() + if !ok { + return + } + for w.elements.Len() > w.minSize { oldest, ok := w.elements.PeekLeft() - if !ok || w.clock.Time().Sub(oldest.entryTime) <= w.ttl { + if !ok || newest.entryTime.Sub(oldest.entryTime) <= w.ttl { return } _, _ = w.elements.PopLeft() diff --git a/avalanchego/utils/window/window_test.go b/avalanchego/utils/window/window_test.go index 8ca71567..332d20b3 100644 --- a/avalanchego/utils/window/window_test.go +++ b/avalanchego/utils/window/window_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window @@ -42,9 +42,14 @@ func TestAdd(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + clock := &mockable.Clock{} + clock.Set(time.Now()) + window := New[int]( Config{ - Clock: &mockable.Clock{}, + Clock: clock, MaxSize: testMaxSize, TTL: testTTL, }, @@ -55,10 +60,10 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, len(test.window)+1, window.Length()) + require.Equal(len(test.window)+1, window.Length()) oldest, ok := window.Oldest() - require.Equal(t, test.expectedOldest, oldest) - require.True(t, ok) + require.True(ok) + require.Equal(test.expectedOldest, oldest) }) } } @@ -66,16 +71,19 @@ func TestAdd(t *testing.T) { // TestTTLAdd tests the case where an element is stale in the window // and needs to be evicted on Add. func TestTTLAdd(t *testing.T) { - clock := mockable.Clock{} + require := require.New(t) + + clock := &mockable.Clock{} + start := time.Now() + clock.Set(start) + window := New[int]( Config{ - Clock: &clock, + Clock: clock, MaxSize: testMaxSize, TTL: testTTL, }, ) - epochStart := time.Unix(0, 0) - clock.Set(epochStart) // Now the window looks like this: // [1, 2, 3] @@ -83,47 +91,57 @@ func TestTTLAdd(t *testing.T) { window.Add(2) window.Add(3) - require.Equal(t, 3, window.Length()) + require.Equal(3, window.Length()) oldest, ok := window.Oldest() - require.Equal(t, 1, oldest) - require.True(t, ok) + require.True(ok) + require.Equal(1, oldest) // Now we're one second past the ttl of 10 seconds as defined in testTTL, // so all existing elements need to be evicted. - clock.Set(epochStart.Add(11 * time.Second)) + clock.Set(start.Add(testTTL + time.Second)) // Now the window should look like this: // [4] window.Add(4) - require.Equal(t, 1, window.Length()) + require.Equal(1, window.Length()) oldest, ok = window.Oldest() - require.Equal(t, 4, oldest) - require.True(t, ok) - // Now we're one second past the ttl of 10 seconds of when [4] was added, - // so all existing elements should be evicted. - clock.Set(epochStart.Add(22 * time.Second)) + require.True(ok) + require.Equal(4, oldest) + // Now we're one second before the ttl of 10 seconds of when [4] was added, + // no element should be evicted + // [4, 5] + clock.Set(start.Add(2 * testTTL)) + window.Add(5) + require.Equal(2, window.Length()) + oldest, ok = window.Oldest() + require.True(ok) + require.Equal(4, oldest) - // Now the window should look like this: - // [] - require.Equal(t, 0, window.Length()) + // Now the window is still containing 4: + // [4, 5] + // we only evict on Add method because the window is calculated in the last element added + require.Equal(2, window.Length()) oldest, ok = window.Oldest() - require.Equal(t, 0, oldest) - require.False(t, ok) + require.True(ok) + require.Equal(4, oldest) } -// TestTTLReadOnly tests that stale elements are still evicted on Length +// TestTTLReadOnly tests that elements are not evicted on Length func TestTTLLength(t *testing.T) { - clock := mockable.Clock{} + require := require.New(t) + + clock := &mockable.Clock{} + start := time.Now() + clock.Set(start) + window := New[int]( Config{ - Clock: &clock, + Clock: clock, MaxSize: testMaxSize, TTL: testTTL, }, ) - epochStart := time.Unix(0, 0) - clock.Set(epochStart) // Now the window looks like this: // [1, 2, 3] @@ -131,30 +149,33 @@ func TestTTLLength(t *testing.T) { window.Add(2) window.Add(3) - require.Equal(t, 3, window.Length()) + require.Equal(3, window.Length()) // Now we're one second past the ttl of 10 seconds as defined in testTTL, // so all existing elements need to be evicted. - clock.Set(epochStart.Add(11 * time.Second)) + clock.Set(start.Add(testTTL + time.Second)) // No more elements should be present in the window. - require.Equal(t, 0, window.Length()) + require.Equal(3, window.Length()) } -// TestTTLReadOnly tests that stale elements are still evicted on calling Oldest +// TestTTLReadOnly tests that stale elements are not evicted on calling Oldest func TestTTLOldest(t *testing.T) { - clock := mockable.Clock{} + require := require.New(t) + + clock := &mockable.Clock{} + start := time.Now() + clock.Set(start) + windowIntf := New[int]( Config{ - Clock: &clock, + Clock: clock, MaxSize: testMaxSize, TTL: testTTL, }, ) - window, ok := windowIntf.(*window[int]) - require.True(t, ok) - epochStart := time.Unix(0, 0) - clock.Set(epochStart) + require.IsType(&window[int]{}, windowIntf) + window := windowIntf.(*window[int]) // Now the window looks like this: // [1, 2, 3] @@ -163,26 +184,31 @@ func TestTTLOldest(t *testing.T) { window.Add(3) oldest, ok := window.Oldest() - require.Equal(t, 1, oldest) - require.True(t, ok) - require.Equal(t, 3, window.elements.Len()) + require.True(ok) + require.Equal(1, oldest) + require.Equal(3, window.elements.Len()) // Now we're one second past the ttl of 10 seconds as defined in testTTL, - // so all existing elements need to be evicted. - clock.Set(epochStart.Add(11 * time.Second)) + // so all existing elements shoud still exist. + clock.Set(start.Add(testTTL + time.Second)) - // Now there shouldn't be any elements in the window + // Now there should be three elements in the window oldest, ok = window.Oldest() - require.Equal(t, 0, oldest) - require.False(t, ok) - require.Equal(t, 0, window.elements.Len()) + require.True(ok) + require.Equal(1, oldest) + require.Equal(3, window.elements.Len()) } // Tests that we bound the amount of elements in the window func TestMaxCapacity(t *testing.T) { + require := require.New(t) + + clock := &mockable.Clock{} + clock.Set(time.Now()) + window := New[int]( Config{ - Clock: &mockable.Clock{}, + Clock: clock, MaxSize: 3, TTL: testTTL, }, @@ -207,8 +233,45 @@ func TestMaxCapacity(t *testing.T) { // [4, 5, 6] window.Add(6) - require.Equal(t, 3, window.Length()) + require.Equal(3, window.Length()) + oldest, ok := window.Oldest() + require.True(ok) + require.Equal(4, oldest) +} + +// Tests that we do not evict past the minimum window size +func TestMinCapacity(t *testing.T) { + require := require.New(t) + + clock := &mockable.Clock{} + start := time.Now() + clock.Set(start) + + window := New[int]( + Config{ + Clock: clock, + MaxSize: 3, + MinSize: 2, + TTL: testTTL, + }, + ) + + // Now the window looks like this: + // [1, 2, 3] + window.Add(1) + window.Add(2) + window.Add(3) + + clock.Set(start.Add(testTTL + time.Second)) + + // All of [1, 2, 3] are past the ttl now, but we don't evict 3 because of + // the minimum length. + // Now the window should look like this: + // [3, 4] + window.Add(4) + + require.Equal(2, window.Length()) oldest, ok := window.Oldest() - require.Equal(t, 4, oldest) - require.True(t, ok) + require.True(ok) + require.Equal(3, oldest) } diff --git a/avalanchego/utils/wrappers/closers.go b/avalanchego/utils/wrappers/closers.go index d366e928..b16e4baa 100644 --- a/avalanchego/utils/wrappers/closers.go +++ b/avalanchego/utils/wrappers/closers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers diff --git a/avalanchego/utils/wrappers/errors.go b/avalanchego/utils/wrappers/errors.go index dab20705..d887ffb4 100644 --- a/avalanchego/utils/wrappers/errors.go +++ b/avalanchego/utils/wrappers/errors.go @@ -1,14 +1,8 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers -import ( - "strings" -) - -var _ error = (*aggregate)(nil) - type Errs struct{ Err error } func (errs *Errs) Errored() bool { @@ -25,28 +19,3 @@ func (errs *Errs) Add(errors ...error) { } } } - -// NewAggregate returns an aggregate error from a list of errors -func NewAggregate(errs []error) error { - err := &aggregate{errs} - if len(err.Errors()) == 0 { - return nil - } - return err -} - -type aggregate struct{ errs []error } - -// Error returns the slice of errors with comma separated messsages wrapped in brackets -// [ error string 0 ], [ error string 1 ] ... -func (a *aggregate) Error() string { - errString := make([]string, len(a.errs)) - for i, err := range a.errs { - errString[i] = "[" + err.Error() + "]" - } - return strings.Join(errString, ",") -} - -func (a *aggregate) Errors() []error { - return a.errs -} diff --git a/avalanchego/utils/wrappers/packing.go b/avalanchego/utils/wrappers/packing.go index e6869b1b..8700ebe1 100644 --- a/avalanchego/utils/wrappers/packing.go +++ b/avalanchego/utils/wrappers/packing.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers @@ -22,8 +22,6 @@ const ( LongLen = 8 // BoolLen is the number of bytes per bool BoolLen = 1 - // IPLen is the number of bytes per IP - IPLen = 16 + ShortLen ) func StringLen(str string) int { @@ -33,11 +31,11 @@ func StringLen(str string) int { } var ( - errBadLength = errors.New("packer has insufficient length for input") - errNegativeOffset = errors.New("negative offset") - errInvalidInput = errors.New("input does not match expected format") - errBadBool = errors.New("unexpected value when unpacking bool") - errOversized = errors.New("size is larger than limit") + ErrInsufficientLength = errors.New("packer has insufficient length for input") + errNegativeOffset = errors.New("negative offset") + errInvalidInput = errors.New("input does not match expected format") + errBadBool = errors.New("unexpected value when unpacking bool") + errOversized = errors.New("size is larger than limit") ) // Packer packs and unpacks a byte array from/to standard values @@ -252,7 +250,7 @@ func (p *Packer) checkSpace(bytes int) { case bytes < 0: p.Add(errInvalidInput) case len(p.Bytes)-p.Offset < bytes: - p.Add(errBadLength) + p.Add(ErrInsufficientLength) } } @@ -266,7 +264,7 @@ func (p *Packer) expand(bytes int) { case neededSize <= len(p.Bytes): // Byte slice has sufficient length already return case neededSize > p.MaxSize: // Lengthening the byte slice would cause it to grow too large - p.Err = errBadLength + p.Err = ErrInsufficientLength return case neededSize <= cap(p.Bytes): // Byte slice has sufficient capacity to lengthen it without mem alloc p.Bytes = p.Bytes[:neededSize] diff --git a/avalanchego/utils/wrappers/packing_test.go b/avalanchego/utils/wrappers/packing_test.go index 1d674020..99f61fad 100644 --- a/avalanchego/utils/wrappers/packing_test.go +++ b/avalanchego/utils/wrappers/packing_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers @@ -33,12 +33,12 @@ func TestPackerCheckSpace(t *testing.T) { p = Packer{Bytes: []byte{0x01}, Offset: 1} p.checkSpace(1) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) p = Packer{Bytes: []byte{0x01}, Offset: 2} p.checkSpace(0) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerExpand(t *testing.T) { @@ -47,7 +47,7 @@ func TestPackerExpand(t *testing.T) { p := Packer{Bytes: []byte{0x01}, Offset: 2} p.expand(1) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) p = Packer{Bytes: []byte{0x01, 0x02, 0x03}, Offset: 0} p.expand(1) @@ -67,7 +67,7 @@ func TestPackerPackByte(t *testing.T) { p.PackByte(0x02) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackByte(t *testing.T) { @@ -81,7 +81,7 @@ func TestPackerUnpackByte(t *testing.T) { require.Equal(uint8(ByteSentinel), p.UnpackByte()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerPackShort(t *testing.T) { @@ -105,7 +105,7 @@ func TestPackerUnpackShort(t *testing.T) { require.Equal(uint16(ShortSentinel), p.UnpackShort()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerPackInt(t *testing.T) { @@ -119,7 +119,7 @@ func TestPackerPackInt(t *testing.T) { p.PackInt(0x05060708) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackInt(t *testing.T) { @@ -133,7 +133,7 @@ func TestPackerUnpackInt(t *testing.T) { require.Equal(uint32(IntSentinel), p.UnpackInt()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerPackLong(t *testing.T) { @@ -147,7 +147,7 @@ func TestPackerPackLong(t *testing.T) { p.PackLong(0x090a0b0c0d0e0f00) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackLong(t *testing.T) { @@ -161,7 +161,7 @@ func TestPackerUnpackLong(t *testing.T) { require.Equal(uint64(LongSentinel), p.UnpackLong()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerPackFixedBytes(t *testing.T) { @@ -175,7 +175,7 @@ func TestPackerPackFixedBytes(t *testing.T) { p.PackFixedBytes([]byte("Avax")) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackFixedBytes(t *testing.T) { @@ -189,7 +189,7 @@ func TestPackerUnpackFixedBytes(t *testing.T) { require.Nil(p.UnpackFixedBytes(4)) require.True(p.Errored()) - require.Error(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerPackBytes(t *testing.T) { @@ -203,7 +203,7 @@ func TestPackerPackBytes(t *testing.T) { p.PackBytes([]byte("Avax")) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackBytes(t *testing.T) { @@ -217,7 +217,7 @@ func TestPackerUnpackBytes(t *testing.T) { require.Nil(p.UnpackBytes()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackLimitedBytes(t *testing.T) { @@ -231,7 +231,7 @@ func TestPackerUnpackLimitedBytes(t *testing.T) { require.Nil(p.UnpackLimitedBytes(10)) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) // Reset and don't allow enough bytes p = Packer{Bytes: p.Bytes} @@ -263,7 +263,7 @@ func TestPackerUnpackString(t *testing.T) { require.Equal("", p.UnpackStr()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackLimitedString(t *testing.T) { @@ -277,7 +277,7 @@ func TestPackerUnpackLimitedString(t *testing.T) { require.Equal("", p.UnpackLimitedStr(10)) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) // Reset and don't allow enough bytes p = Packer{Bytes: p.Bytes} @@ -301,7 +301,7 @@ func TestPacker(t *testing.T) { p.PackShort(1) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) p = Packer{Bytes: p.Bytes} require.Equal(uint16(17), p.UnpackShort()) @@ -340,7 +340,7 @@ func TestPackerPackBool(t *testing.T) { p.PackBool(false) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) } func TestPackerUnpackBool(t *testing.T) { @@ -348,17 +348,17 @@ func TestPackerUnpackBool(t *testing.T) { p := Packer{Bytes: []byte{0x01}, Offset: 0} - require.Equal(true, p.UnpackBool()) + require.True(p.UnpackBool()) require.False(p.Errored()) require.NoError(p.Err) require.Equal(BoolLen, p.Offset) require.Equal(BoolSentinel, p.UnpackBool()) require.True(p.Errored()) - require.ErrorIs(p.Err, errBadLength) + require.ErrorIs(p.Err, ErrInsufficientLength) p = Packer{Bytes: []byte{0x42}, Offset: 0} - require.Equal(false, p.UnpackBool()) + require.False(p.UnpackBool()) require.True(p.Errored()) require.ErrorIs(p.Err, errBadBool) } diff --git a/avalanchego/utils/zero.go b/avalanchego/utils/zero.go index c5ca3b9c..c691ed2e 100644 --- a/avalanchego/utils/zero.go +++ b/avalanchego/utils/zero.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils @@ -7,3 +7,13 @@ package utils func Zero[T any]() T { return *new(T) } + +// ZeroSlice sets all values of the provided slice to the type's zero value. +// +// This can be useful to ensure that the garbage collector doesn't hold +// references to values that are no longer desired. +func ZeroSlice[T any](s []T) { + for i := range s { + s[i] = *new(T) + } +} diff --git a/avalanchego/version/application.go b/avalanchego/version/application.go index 83613055..a69ce870 100644 --- a/avalanchego/version/application.go +++ b/avalanchego/version/application.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -6,7 +6,7 @@ package version import ( "errors" "fmt" - "sync/atomic" + "sync" ) var ( @@ -16,29 +16,30 @@ var ( ) type Application struct { - Major int `json:"major" yaml:"major"` - Minor int `json:"minor" yaml:"minor"` - Patch int `json:"patch" yaml:"patch"` + Name string `json:"name" yaml:"name"` + Major int `json:"major" yaml:"major"` + Minor int `json:"minor" yaml:"minor"` + Patch int `json:"patch" yaml:"patch"` - str atomic.Value + makeStrOnce sync.Once + str string } // The only difference here between Application and Semantic is that Application // prepends "avalanche/" or "flare/" rather than "v". func (a *Application) String() string { - strIntf := a.str.Load() - if strIntf != nil { - return strIntf.(string) - } + a.makeStrOnce.Do(a.initString) + return a.str +} - str := fmt.Sprintf( - GetApplicationPrefix()+"/%d.%d.%d", +func (a *Application) initString() { + a.str = fmt.Sprintf( + "%s/%d.%d.%d", + a.Name, a.Major, a.Minor, a.Patch, ) - a.str.Store(str) - return str } func (a *Application) Compatible(o *Application) error { diff --git a/avalanchego/version/application_test.go b/avalanchego/version/application_test.go index 95757f30..2fdfd288 100644 --- a/avalanchego/version/application_test.go +++ b/avalanchego/version/application_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -11,15 +11,18 @@ import ( ) func TestNewDefaultApplication(t *testing.T) { + require := require.New(t) + v := &Application{ + Name: GetApplicationPrefix(), Major: 1, Minor: 2, Patch: 3, } - require.Equal(t, "avalanche/1.2.3", v.String()) - require.NoError(t, v.Compatible(v)) - require.False(t, v.Before(v)) + require.Equal("avalanche/1.2.3", v.String()) + require.NoError(v.Compatible(v)) + require.False(v.Before(v)) } func TestComparingVersions(t *testing.T) { @@ -31,11 +34,13 @@ func TestComparingVersions(t *testing.T) { }{ { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -45,11 +50,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 4, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -59,11 +66,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 4, @@ -73,11 +82,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -87,11 +98,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 3, @@ -101,11 +114,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 2, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, @@ -115,11 +130,13 @@ func TestComparingVersions(t *testing.T) { }, { myVersion: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 3, }, peerVersion: &Application{ + Name: Client, Major: 2, Minor: 2, Patch: 3, @@ -130,19 +147,14 @@ func TestComparingVersions(t *testing.T) { } for _, test := range tests { t.Run(fmt.Sprintf("%s %s", test.myVersion, test.peerVersion), func(t *testing.T) { + require := require.New(t) err := test.myVersion.Compatible(test.peerVersion) - if test.compatible && err != nil { - t.Fatalf("Expected version to be compatible but returned: %s", - err) - } else if !test.compatible && err == nil { - t.Fatalf("Expected version to be incompatible but returned no error") - } - before := test.myVersion.Before(test.peerVersion) - if test.before && !before { - t.Fatalf("Expected version to be before the peer version but wasn't") - } else if !test.before && before { - t.Fatalf("Expected version not to be before the peer version but was") + if test.compatible { + require.NoError(err) + } else { + require.ErrorIs(err, errDifferentMajor) } + require.Equal(test.before, test.myVersion.Before(test.peerVersion)) }) } } diff --git a/avalanchego/version/compatibility.go b/avalanchego/version/compatibility.go index 6c3cae20..1cde1899 100644 --- a/avalanchego/version/compatibility.go +++ b/avalanchego/version/compatibility.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/compatibility.json b/avalanchego/version/compatibility.json index e11fdd35..756a6a98 100644 --- a/avalanchego/version/compatibility.json +++ b/avalanchego/version/compatibility.json @@ -1,4 +1,38 @@ { + "33": [ + "v1.11.0" + ], + "31": [ + "v1.10.18", + "v1.10.19" + ], + "30": [ + "v1.10.15", + "v1.10.16", + "v1.10.17" + ], + "29": [ + "v1.10.13", + "v1.10.14" + ], + "28": [ + "v1.10.9", + "v1.10.10", + "v1.10.11", + "v1.10.12" + ], + "27": [ + "v1.10.5", + "v1.10.6", + "v1.10.7", + "v1.10.8" + ], + "26": [ + "v1.10.1", + "v1.10.2", + "v1.10.3", + "v1.10.4" + ], "25": [ "v1.10.0" ], diff --git a/avalanchego/version/compatibility_test.go b/avalanchego/version/compatibility_test.go index 014f28ad..21d4cbab 100644 --- a/avalanchego/version/compatibility_test.go +++ b/avalanchego/version/compatibility_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -13,17 +13,20 @@ import ( func TestCompatibility(t *testing.T) { v := &Application{ + Name: Client, Major: 1, Minor: 4, Patch: 3, } minCompatable := &Application{ + Name: Client, Major: 1, Minor: 4, Patch: 0, } minCompatableTime := time.Unix(9000, 0) prevMinCompatable := &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 0, @@ -38,74 +41,84 @@ func TestCompatibility(t *testing.T) { require.Equal(t, v, compatibility.Version()) tests := []struct { - peer *Application - time time.Time - compatible bool + peer *Application + time time.Time + expectedErr error }{ { peer: &Application{ + Name: GetApplicationPrefix(), Major: 1, Minor: 5, Patch: 0, }, - time: minCompatableTime, - compatible: true, + time: minCompatableTime, }, { peer: &Application{ + Name: Client, + Major: 1, + Minor: 5, + Patch: 0, + }, + time: minCompatableTime, + }, + { + peer: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 5, }, - time: time.Unix(8500, 0), - compatible: true, + time: time.Unix(8500, 0), }, { peer: &Application{ + Name: Client, Major: 0, Minor: 1, Patch: 0, }, - time: minCompatableTime, - compatible: false, + time: minCompatableTime, + expectedErr: errDifferentMajor, }, { peer: &Application{ + Name: Client, Major: 1, Minor: 3, Patch: 5, }, - time: minCompatableTime, - compatible: false, + time: minCompatableTime, + expectedErr: errIncompatible, }, { peer: &Application{ + Name: Client, Major: 1, Minor: 2, Patch: 5, }, - time: time.Unix(8500, 0), - compatible: false, + time: time.Unix(8500, 0), + expectedErr: errIncompatible, }, { peer: &Application{ + Name: Client, Major: 1, Minor: 1, Patch: 5, }, - time: time.Unix(7500, 0), - compatible: false, + time: time.Unix(7500, 0), + expectedErr: errIncompatible, }, } for _, test := range tests { peer := test.peer compatibility.clock.Set(test.time) t.Run(fmt.Sprintf("%s-%s", peer, test.time), func(t *testing.T) { - if err := compatibility.Compatible(peer); test.compatible && err != nil { - t.Fatalf("incorrectly marked %s as incompatible with %s", peer, err) - } else if !test.compatible && err == nil { - t.Fatalf("incorrectly marked %s as compatible", peer) - } + err := compatibility.Compatible(peer) + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/avalanchego/version/constants.go b/avalanchego/version/constants.go index 111113ed..bfcf5dcb 100644 --- a/avalanchego/version/constants.go +++ b/avalanchego/version/constants.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -9,54 +9,65 @@ import ( _ "embed" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" ) -// RPCChainVMProtocol should be bumped anytime changes are made which require -// the plugin vm to upgrade to latest avalanchego release to be compatible. -const RPCChainVMProtocol uint = 25 +const ( + Client = "avalanchego" + // RPCChainVMProtocol should be bumped anytime changes are made which + // require the plugin vm to upgrade to latest avalanchego release to be + // compatible. + RPCChainVMProtocol uint = 33 +) // These are globals that describe network upgrades and node versions var ( Current = &Semantic{ Major: 1, - Minor: 10, + Minor: 11, Patch: 0, } CurrentApp = &Application{ + Name: Client, Major: Current.Major, Minor: Current.Minor, Patch: Current.Patch, } MinimumCompatibleVersion = &Application{ + Name: Client, Major: 1, - Minor: 10, + Minor: 11, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ + Name: Client, Major: 1, - Minor: 9, + Minor: 10, Patch: 0, } CurrentSgb = &Semantic{ Major: 0, - Minor: 8, + Minor: 9, Patch: 0, } CurrentSgbApp = &Application{ + Name: Client, Major: CurrentSgb.Major, Minor: CurrentSgb.Minor, Patch: CurrentSgb.Patch, } MinimumCompatibleSgbVersion = &Application{ + Name: Client, Major: 0, - Minor: 8, + Minor: 9, Patch: 0, } PrevMinimumCompatibleSgbVersion = &Application{ + Name: Client, Major: 0, - Minor: 7, + Minor: 8, Patch: 0, } @@ -81,78 +92,69 @@ var ( // by avalanchego, but is useful for downstream libraries. RPCChainVMProtocolCompatibility map[uint][]*Semantic + DefaultUpgradeTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + ApricotPhase3Times = map[uint32]time.Time{ - constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostonID: time.Date(2022, time.February, 25, 14, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2022, time.March, 7, 14, 0, 0, 0, time.UTC), + constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 14, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 14, 0, 0, 0, time.UTC), } - ApricotPhase3DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) ApricotPhase4Times = map[uint32]time.Time{ - constants.MainnetID: time.Date(2021, time.September, 22, 21, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostonID: time.Date(2022, time.February, 25, 15, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2022, time.March, 7, 15, 0, 0, 0, time.UTC), - } - ApricotPhase4DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + constants.MainnetID: time.Date(2021, time.September, 22, 21, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 15, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 15, 0, 0, 0, time.UTC), + } + ApricotPhase4MinPChainHeight = map[uint32]uint64{ constants.MainnetID: 793005, } - ApricotPhase4DefaultMinPChainHeight uint64 ApricotPhase5Times = map[uint32]time.Time{ - constants.MainnetID: time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), - constants.CostonID: time.Date(2022, time.February, 25, 16, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2022, time.March, 7, 16, 0, 0, 0, time.UTC), + constants.MainnetID: time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 16, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 16, 0, 0, 0, time.UTC), } - ApricotPhase5DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) ApricotPhase6Times = map[uint32]time.Time{ - constants.MainnetID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2024, time.December, 17, 13, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2024, time.November, 26, 13, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(2024, time.November, 5, 13, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(2024, time.November, 5, 13, 0, 0, 0, time.UTC), - constants.CostonID: time.Date(2025, time.January, 7, 13, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2025, time.January, 28, 13, 0, 0, 0, time.UTC), - constants.LocalID: time.Date(2024, time.November, 5, 13, 0, 0, 0, time.UTC), + constants.MainnetID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 13, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 13, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 13, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 13, 0, 0, 0, time.UTC), } - ApricotPhase6DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) BanffTimes = map[uint32]time.Time{ - constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2024, time.November, 26, 15, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(2024, time.November, 5, 15, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(2024, time.May, 29, 9, 15, 0, 0, time.UTC), - constants.CostonID: time.Date(2025, time.January, 7, 15, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2025, time.January, 28, 15, 0, 0, 0, time.UTC), - constants.LocalID: time.Date(2024, time.November, 5, 15, 0, 0, 0, time.UTC), + constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 15, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 15, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 15, 0, 0, 0, time.UTC), } - BanffDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) CortinaTimes = map[uint32]time.Time{ - constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), - constants.FlareID: time.Date(2025, time.May, 13, 12, 0, 0, 0, time.UTC), - constants.CostwoID: time.Date(2025, time.April, 8, 12, 0, 0, 0, time.UTC), - constants.StagingID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), - constants.LocalFlareID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), - constants.CostonID: time.Date(2025, time.March, 27, 13, 0, 0, 0, time.UTC), - constants.SongbirdID: time.Date(2025, time.May, 6, 12, 0, 0, 0, time.UTC), - constants.LocalID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), - } - CortinaDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + constants.MainnetID: time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2025, time.May, 13, 12, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2025, time.April, 8, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.March, 27, 13, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.May, 6, 12, 0, 0, 0, time.UTC), + } + CortinaXChainStopVertexID = map[uint32]ids.ID{ + // The mainnet stop vertex is well known. It can be verified on any + // fully synced node by looking at the parentID of the genesis block. + // + // Ref: https://subnets.avax.network/x-chain/block/0 + constants.MainnetID: ids.FromStringOrPanic("jrGWDh5Po9FMj54depyunNixpia5PN4aAYxfmNzU8n752Rjga"), + } + + DurangoTimes = map[uint32]time.Time{ + constants.MainnetID: time.Date(2024, time.March, 6, 16, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2025, time.August, 5, 12, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2025, time.June, 24, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.July, 1, 12, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.July, 22, 12, 0, 0, 0, time.UTC), + constants.LocalID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), + } ) func init() { @@ -180,49 +182,49 @@ func GetApricotPhase3Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase3Times[networkID]; exists { return upgradeTime } - return ApricotPhase3DefaultTime + return DefaultUpgradeTime } func GetApricotPhase4Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase4Times[networkID]; exists { return upgradeTime } - return ApricotPhase4DefaultTime -} - -func GetApricotPhase4MinPChainHeight(networkID uint32) uint64 { - if minHeight, exists := ApricotPhase4MinPChainHeight[networkID]; exists { - return minHeight - } - return ApricotPhase4DefaultMinPChainHeight + return DefaultUpgradeTime } func GetApricotPhase5Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase5Times[networkID]; exists { return upgradeTime } - return ApricotPhase5DefaultTime + return DefaultUpgradeTime } func GetApricotPhase6Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase6Times[networkID]; exists { return upgradeTime } - return ApricotPhase6DefaultTime + return DefaultUpgradeTime } func GetBanffTime(networkID uint32) time.Time { if upgradeTime, exists := BanffTimes[networkID]; exists { return upgradeTime } - return BanffDefaultTime + return DefaultUpgradeTime } func GetCortinaTime(networkID uint32) time.Time { if upgradeTime, exists := CortinaTimes[networkID]; exists { return upgradeTime } - return CortinaDefaultTime + return DefaultUpgradeTime +} + +func GetDurangoTime(networkID uint32) time.Time { + if upgradeTime, exists := DurangoTimes[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime } func GetCompatibility(networkID uint32) Compatibility { @@ -230,14 +232,14 @@ func GetCompatibility(networkID uint32) Compatibility { return NewCompatibility( CurrentSgbApp, MinimumCompatibleSgbVersion, - GetCortinaTime(networkID), + GetDurangoTime(networkID), PrevMinimumCompatibleSgbVersion, ) } return NewCompatibility( CurrentApp, MinimumCompatibleVersion, - GetCortinaTime(networkID), + GetDurangoTime(networkID), PrevMinimumCompatibleVersion, ) } diff --git a/avalanchego/version/constants_test.go b/avalanchego/version/constants_test.go index 5e409dd9..aea7ef49 100644 --- a/avalanchego/version/constants_test.go +++ b/avalanchego/version/constants_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/parser.go b/avalanchego/version/parser.go index 6520ca9a..415f8085 100644 --- a/avalanchego/version/parser.go +++ b/avalanchego/version/parser.go @@ -1,17 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version import ( + "errors" "fmt" "strconv" "strings" ) +var ( + errMissingVersionPrefix = errors.New("missing required version prefix") + errMissingApplicationPrefix = errors.New("missing required application prefix") + errMissingVersions = errors.New("missing version numbers") +) + func Parse(s string) (*Semantic, error) { if !strings.HasPrefix(s, "v") { - return nil, fmt.Errorf("version string %q missing required prefix", s) + return nil, fmt.Errorf("%w: %q", errMissingVersionPrefix, s) } s = s[1:] @@ -27,20 +34,21 @@ func Parse(s string) (*Semantic, error) { }, nil } -func ParseApplication(s string) (*Application, error) { - prefix := GetApplicationPrefix() - - if !strings.HasPrefix(s, prefix+"/") { - return nil, fmt.Errorf("application string %q missing required prefix", s) +// TODO: Remove after v1.11.x is activated +func ParseLegacyApplication(s string) (*Application, error) { + prefix := GetApplicationPrefix() + "/" + if !strings.HasPrefix(s, prefix) { + return nil, fmt.Errorf("%w: %q", errMissingApplicationPrefix, s) } - s = s[(len(prefix) + 1):] + s = s[len(prefix):] major, minor, patch, err := parseVersions(s) if err != nil { return nil, err } return &Application{ + Name: Client, // Convert the legacy name to the current client name Major: major, Minor: minor, Patch: patch, @@ -49,8 +57,8 @@ func ParseApplication(s string) (*Application, error) { func parseVersions(s string) (int, int, int, error) { splitVersion := strings.SplitN(s, ".", 3) - if len(splitVersion) != 3 { - return 0, 0, 0, fmt.Errorf("failed to parse %s as a version", s) + if numSeperators := len(splitVersion); numSeperators != 3 { + return 0, 0, 0, fmt.Errorf("%w: expected 3 only got %d", errMissingVersions, numSeperators) } major, err := strconv.Atoi(splitVersion[0]) diff --git a/avalanchego/version/parser_test.go b/avalanchego/version/parser_test.go index 16c435ac..2914de5b 100644 --- a/avalanchego/version/parser_test.go +++ b/avalanchego/version/parser_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version import ( + "strconv" "testing" "github.com/ava-labs/avalanchego/utils/constants" @@ -20,42 +21,103 @@ func TestParse(t *testing.T) { require.Equal(t, 2, v.Minor) require.Equal(t, 3, v.Patch) - badVersions := []string{ - "", - "1.2.3", - "vz.2.3", - "v1.z.3", - "v1.2.z", + tests := []struct { + version string + expectedErr error + }{ + { + version: "", + expectedErr: errMissingVersionPrefix, + }, + { + version: "1.2.3", + expectedErr: errMissingVersionPrefix, + }, + { + version: "z1.2.3", + expectedErr: errMissingVersionPrefix, + }, + { + version: "v1.2", + expectedErr: errMissingVersions, + }, + { + version: "vz.2.3", + expectedErr: strconv.ErrSyntax, + }, + { + version: "v1.z.3", + expectedErr: strconv.ErrSyntax, + }, + { + version: "v1.2.z", + expectedErr: strconv.ErrSyntax, + }, + { + version: "v1.2.3.4", + expectedErr: strconv.ErrSyntax, + }, } - for _, badVersion := range badVersions { - _, err := Parse(badVersion) - require.Error(t, err) + for _, test := range tests { + t.Run(test.version, func(t *testing.T) { + _, err := Parse(test.version) + require.ErrorIs(t, err, test.expectedErr) + }) } } -func TestParseApplication(t *testing.T) { +func TestParseLegacyApplication(t *testing.T) { InitApplicationPrefix(constants.FlareID) - v, err := ParseApplication("avalanche/1.2.3") + v, err := ParseLegacyApplication("avalanche/1.2.3") require.NoError(t, err) require.NotNil(t, v) - require.Equal(t, "avalanche/1.2.3", v.String()) + require.Equal(t, "avalanchego/1.2.3", v.String()) + require.Equal(t, "avalanchego", v.Name) require.Equal(t, 1, v.Major) require.Equal(t, 2, v.Minor) require.Equal(t, 3, v.Patch) require.NoError(t, v.Compatible(v)) require.False(t, v.Before(v)) - badVersions := []string{ - "", - "avalanche/", - "avalanche/z.0.0", - "avalanche/0.z.0", - "avalanche/0.0.z", + tests := []struct { + version string + expectedErr error + }{ + { + version: "", + expectedErr: errMissingApplicationPrefix, + }, + { + version: "avalanchego/v1.2.3", + expectedErr: errMissingApplicationPrefix, + }, + { + version: "avalanche/", + expectedErr: errMissingVersions, + }, + { + version: "avalanche/z.0.0", + expectedErr: strconv.ErrSyntax, + }, + { + version: "avalanche/0.z.0", + expectedErr: strconv.ErrSyntax, + }, + { + version: "avalanche/0.0.z", + expectedErr: strconv.ErrSyntax, + }, + { + version: "avalanche/0.0.0.0", + expectedErr: strconv.ErrSyntax, + }, } - for _, badVersion := range badVersions { - _, err := ParseApplication(badVersion) - require.Error(t, err) + for _, test := range tests { + t.Run(test.version, func(t *testing.T) { + _, err := ParseLegacyApplication(test.version) + require.ErrorIs(t, err, test.expectedErr) + }) } } diff --git a/avalanchego/version/string.go b/avalanchego/version/string.go index 5978181b..9abe555b 100644 --- a/avalanchego/version/string.go +++ b/avalanchego/version/string.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version import ( "fmt" + "runtime" + "strings" ) var ( @@ -26,6 +28,13 @@ func init() { format += ", commit=%s" args = append(args, GitCommit) } + + // add golang version + goVersion := runtime.Version() + goVersionNumber := strings.TrimPrefix(goVersion, "go") + format += ", go=%s" + args = append(args, goVersionNumber) + format += "]\n" String = fmt.Sprintf(format, args...) } diff --git a/avalanchego/version/version.go b/avalanchego/version/version.go index 81acdc42..b8fe119b 100644 --- a/avalanchego/version/version.go +++ b/avalanchego/version/version.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -28,7 +28,7 @@ type Semantic struct { } // The only difference here between Semantic and Application is that Semantic -// prepends "v" rather than "avalanche/". +// prepends "v" rather than the client name. func (s *Semantic) String() string { strIntf := s.str.Load() if strIntf != nil { diff --git a/avalanchego/version/version_test.go b/avalanchego/version/version_test.go index d66c1212..69c494c8 100644 --- a/avalanchego/version/version_test.go +++ b/avalanchego/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/vms/avm/blocks/block.go b/avalanchego/vms/avm/block/block.go similarity index 90% rename from avalanchego/vms/avm/blocks/block.go rename to avalanchego/vms/avm/block/block.go index 6ab2af6b..376062c0 100644 --- a/avalanchego/vms/avm/blocks/block.go +++ b/avalanchego/vms/avm/block/block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "time" diff --git a/avalanchego/vms/avm/blocks/block_test.go b/avalanchego/vms/avm/block/block_test.go similarity index 85% rename from avalanchego/vms/avm/blocks/block_test.go rename to avalanchego/vms/avm/block/block_test.go index 88ccce3a..3b12838b 100644 --- a/avalanchego/vms/avm/blocks/block_test.go +++ b/avalanchego/vms/avm/block/block_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" @@ -25,11 +25,28 @@ var ( assetID = ids.GenerateTestID() ) +func TestInvalidBlock(t *testing.T) { + require := require.New(t) + + parser, err := NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) + require.NoError(err) + + _, err = parser.ParseBlock(nil) + require.ErrorIs(err, codec.ErrCantUnpackVersion) +} + func TestStandardBlocks(t *testing.T) { // check standard block can be built and parsed require := require.New(t) parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -58,8 +75,8 @@ func TestStandardBlocks(t *testing.T) { require.Equal(standardBlk.Bytes(), parsed.Bytes()) require.Equal(standardBlk.Timestamp(), parsed.Timestamp()) - parsedStandardBlk, ok := parsed.(*StandardBlock) - require.True(ok) + require.IsType(&StandardBlock{}, parsed) + parsedStandardBlk := parsed.(*StandardBlock) require.Equal(txs, parsedStandardBlk.Txs()) require.Equal(parsed.Txs(), parsedStandardBlk.Txs()) diff --git a/avalanchego/vms/avm/blocks/builder/builder.go b/avalanchego/vms/avm/block/builder/builder.go similarity index 82% rename from avalanchego/vms/avm/blocks/builder/builder.go rename to avalanchego/vms/avm/block/builder/builder.go index 77b7d258..80e73481 100644 --- a/avalanchego/vms/avm/blocks/builder/builder.go +++ b/avalanchego/vms/avm/block/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -12,12 +12,12 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm/blocks" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" - blockexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + blockexecutor "github.com/ava-labs/avalanchego/vms/avm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) @@ -82,7 +82,7 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { nextTimestamp = preferredTimestamp } - stateDiff, err := states.NewDiff(preferredID, b.manager) + stateDiff, err := state.NewDiff(preferredID, b.manager) if err != nil { return nil, err } @@ -93,15 +93,19 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { remainingSize = targetBlockSize ) for { - tx := b.mempool.Peek(remainingSize) - if tx == nil { + tx, exists := b.mempool.Peek() + // Invariant: [mempool.MaxTxSize] < [targetBlockSize]. This guarantees + // that we will only stop building a block once there are no + // transactions in the mempool or the block is at least + // [targetBlockSize - mempool.MaxTxSize] bytes full. + if !exists || len(tx.Bytes()) > remainingSize { break } - b.mempool.Remove([]*txs.Tx{tx}) + b.mempool.Remove(tx) // Invariant: [tx] has already been syntactically verified. - txDiff, err := wrapState(stateDiff) + txDiff, err := state.NewDiffOn(stateDiff) if err != nil { return nil, err } @@ -153,7 +157,7 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { return nil, ErrNoTransactions } - statelessBlk, err := blocks.NewStandardBlock( + statelessBlk, err := block.NewStandardBlock( preferredID, nextHeight, nextTimestamp, @@ -166,17 +170,3 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { return b.manager.NewBlock(statelessBlk), nil } - -type stateGetter struct { - state states.Chain -} - -func (s stateGetter) GetState(ids.ID) (states.Chain, bool) { - return s.state, true -} - -func wrapState(parentState states.Chain) (states.Diff, error) { - return states.NewDiff(ids.Empty, stateGetter{ - state: parentState, - }) -} diff --git a/avalanchego/vms/avm/blocks/builder/builder_test.go b/avalanchego/vms/avm/block/builder/builder_test.go similarity index 86% rename from avalanchego/vms/avm/blocks/builder/builder_test.go rename to avalanchego/vms/avm/block/builder/builder_test.go index 058d82b1..716f63bc 100644 --- a/avalanchego/vms/avm/blocks/builder/builder_test.go +++ b/avalanchego/vms/avm/block/builder/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -9,14 +9,12 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -26,20 +24,21 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - blkexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + blkexecutor "github.com/ava-labs/avalanchego/vms/avm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) +const trackChecksums = false + var ( errTest = errors.New("test error") chainID = ids.GenerateTestID() @@ -84,7 +83,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) @@ -107,7 +106,7 @@ func TestBuilderBuildBlock(t *testing.T) { mempool, ) }, - expectedErr: states.ErrMissingParentState, + expectedErr: state.ErrMissingParentState, }, { name: "tx fails semantic verification", @@ -115,11 +114,11 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -133,11 +132,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -159,11 +158,11 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -178,11 +177,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -204,11 +203,11 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -224,11 +223,11 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) mempool.EXPECT().MarkDropped(tx.ID(), errTest) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() return New( @@ -250,11 +249,11 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -265,8 +264,8 @@ func TestBuilderBuildBlock(t *testing.T) { unsignedTx1.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification unsignedTx1.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution func(visitor txs.Visitor) error { - executor, ok := visitor.(*txexecutor.Executor) - require.True(t, ok) + require.IsType(t, &txexecutor.Executor{}, visitor) + executor := visitor.(*txexecutor.Executor) executor.Inputs.Add(inputID) return nil }, @@ -282,8 +281,8 @@ func TestBuilderBuildBlock(t *testing.T) { unsignedTx2.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification unsignedTx2.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution func(visitor txs.Visitor) error { - executor, ok := visitor.(*txexecutor.Executor) - require.True(t, ok) + require.IsType(t, &txexecutor.Executor{}, visitor) + executor := visitor.(*txexecutor.Executor) executor.Inputs.Add(inputID) return nil }, @@ -298,7 +297,7 @@ func TestBuilderBuildBlock(t *testing.T) { // Assert created block has one tx, tx1, // and other fields are set correctly. manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( - func(block *blocks.StandardBlock) snowman.Block { + func(block *block.StandardBlock) snowman.Block { require.Len(t, block.Transactions, 1) require.Equal(t, tx1, block.Transactions[0]) require.Equal(t, preferredHeight+1, block.Height()) @@ -308,14 +307,14 @@ func TestBuilderBuildBlock(t *testing.T) { ) mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(targetBlockSize).Return(tx1) + mempool.EXPECT().Peek().Return(tx1, true) mempool.EXPECT().Remove([]*txs.Tx{tx1}) // Second loop iteration - mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(tx2) + mempool.EXPECT().Peek().Return(tx2, true) mempool.EXPECT().Remove([]*txs.Tx{tx2}) mempool.EXPECT().MarkDropped(tx2.ID(), blkexecutor.ErrConflictingBlockTxs) // Third loop iteration - mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -343,7 +342,7 @@ func TestBuilderBuildBlock(t *testing.T) { preferredID := ids.GenerateTestID() preferredHeight := uint64(1337) preferredTimestamp := time.Now() - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) @@ -352,7 +351,7 @@ func TestBuilderBuildBlock(t *testing.T) { clock := &mockable.Clock{} clock.Set(preferredTimestamp.Add(-2 * time.Second)) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -363,7 +362,7 @@ func TestBuilderBuildBlock(t *testing.T) { manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(nil) // Assert that the created block has the right timestamp manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( - func(block *blocks.StandardBlock) snowman.Block { + func(block *block.StandardBlock) snowman.Block { require.Equal(t, preferredTimestamp.Unix(), block.Timestamp().Unix()) return nil }, @@ -374,8 +373,8 @@ func TestBuilderBuildBlock(t *testing.T) { unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification unsignedTx.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution func(visitor txs.Visitor) error { - executor, ok := visitor.(*txexecutor.Executor) - require.True(t, ok) + require.IsType(t, &txexecutor.Executor{}, visitor) + executor := visitor.(*txexecutor.Executor) executor.Inputs.Add(inputID) return nil }, @@ -384,10 +383,10 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -417,7 +416,7 @@ func TestBuilderBuildBlock(t *testing.T) { // preferred block's timestamp is after the time reported by clock now := time.Now() preferredTimestamp := now.Add(-2 * time.Second) - preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock := block.NewMockBlock(ctrl) preferredBlock.EXPECT().Height().Return(preferredHeight) preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) @@ -426,7 +425,7 @@ func TestBuilderBuildBlock(t *testing.T) { clock := &mockable.Clock{} clock.Set(now) - preferredState := states.NewMockChain(ctrl) + preferredState := state.NewMockChain(ctrl) preferredState.EXPECT().GetLastAccepted().Return(preferredID) preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) @@ -437,7 +436,7 @@ func TestBuilderBuildBlock(t *testing.T) { manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(nil) // Assert that the created block has the right timestamp manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( - func(block *blocks.StandardBlock) snowman.Block { + func(block *block.StandardBlock) snowman.Block { require.Equal(t, now.Unix(), block.Timestamp().Unix()) return nil }, @@ -448,8 +447,8 @@ func TestBuilderBuildBlock(t *testing.T) { unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification unsignedTx.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution func(visitor txs.Visitor) error { - executor, ok := visitor.(*txexecutor.Executor) - require.True(t, ok) + require.IsType(t, &txexecutor.Executor{}, visitor) + executor := visitor.(*txexecutor.Executor) executor.Inputs.Add(inputID) return nil }, @@ -458,10 +457,10 @@ func TestBuilderBuildBlock(t *testing.T) { tx := &txs.Tx{Unsigned: unsignedTx} mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Peek().Return(tx, true) mempool.EXPECT().Remove([]*txs.Tx{tx}) // Second loop iteration - mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().Peek().Return(nil, false) mempool.EXPECT().RequestBuildBlock() // To marshal the tx/block @@ -487,13 +486,11 @@ func TestBuilderBuildBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() builder := tt.builderFunc(ctrl) _, err := builder.BuildBlock(context.Background()) - require.ErrorIs(err, tt.expectedErr) + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -510,16 +507,13 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { // add a tx to the mempool tx := transactions[0] txID := tx.ID() - err = mempool.Add(tx) - require.NoError(err) - - has := mempool.Has(txID) - require.True(has) + require.NoError(mempool.Add(tx)) - ctrl := gomock.NewController(t) - defer ctrl.Finish() + _, ok := mempool.Get(txID) + require.True(ok) - parser, err := blocks.NewParser( + parser, err := block.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -534,10 +528,9 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { Codec: parser.Codec(), } - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - baseDB := versiondb.New(baseDBManager.Current().Database) + baseDB := versiondb.New(memdb.New()) - state, err := states.New(baseDB, parser, registerer) + state, err := state.New(baseDB, parser, registerer, trackChecksums) require.NoError(err) clk := &mockable.Clock{} @@ -548,7 +541,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { cm := parser.Codec() txs, err := createParentTxs(cm) require.NoError(err) - parentBlk, err := blocks.NewStandardBlock(parentID, 0, parentTimestamp, txs, cm) + parentBlk, err := block.NewStandardBlock(parentID, 0, parentTimestamp, txs, cm) require.NoError(err) state.AddBlock(parentBlk) state.SetLastAccepted(parentBlk.ID()) @@ -597,7 +590,7 @@ func createTxs() []*txs.Tx { }}, Creds: []*fxs.FxCredential{ { - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, }, }, }} diff --git a/avalanchego/vms/avm/blocks/executor/block.go b/avalanchego/vms/avm/block/executor/block.go similarity index 92% rename from avalanchego/vms/avm/blocks/executor/block.go rename to avalanchego/vms/avm/block/executor/block.go index b2743abe..8663f27b 100644 --- a/avalanchego/vms/avm/blocks/executor/block.go +++ b/avalanchego/vms/avm/block/executor/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/avm/blocks" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) @@ -37,7 +37,7 @@ var ( // Exported for testing in avm package. type Block struct { - blocks.Block + block.Block manager *manager rejected bool } @@ -106,7 +106,7 @@ func (b *Block) Verify(context.Context) error { ) } - stateDiff, err := states.NewDiff(parentID, b.manager) + stateDiff, err := state.NewDiff(parentID, b.manager) if err != nil { return err } @@ -200,7 +200,7 @@ func (b *Block) Verify(context.Context) error { stateDiff.AddBlock(b.Block) b.manager.blkIDToState[blkID] = blockState - b.manager.mempool.Remove(txs) + b.manager.mempool.Remove(txs...) return nil } @@ -208,13 +208,6 @@ func (b *Block) Accept(context.Context) error { blkID := b.ID() defer b.manager.free(blkID) - b.manager.backend.Ctx.Log.Debug( - "accepting block", - zap.Stringer("blkID", blkID), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - txs := b.Txs() for _, tx := range txs { if err := b.manager.onAccept(tx); err != nil { @@ -227,7 +220,7 @@ func (b *Block) Accept(context.Context) error { } b.manager.lastAccepted = blkID - b.manager.mempool.Remove(txs) + b.manager.mempool.Remove(txs...) blkState, ok := b.manager.blkIDToState[blkID] if !ok { @@ -251,7 +244,21 @@ func (b *Block) Accept(context.Context) error { if err := b.manager.backend.Ctx.SharedMemory.Apply(blkState.atomicRequests, batch); err != nil { return fmt.Errorf("failed to apply state diff to shared memory: %w", err) } - return b.manager.metrics.MarkBlockAccepted(b) + + if err := b.manager.metrics.MarkBlockAccepted(b); err != nil { + return err + } + + txChecksum, utxoChecksum := b.manager.state.Checksums() + b.manager.backend.Ctx.Log.Trace( + "accepted block", + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + zap.Stringer("txChecksum", txChecksum), + zap.Stringer("utxoChecksum", utxoChecksum), + ) + return nil } func (b *Block) Reject(context.Context) error { @@ -283,6 +290,10 @@ func (b *Block) Reject(context.Context) error { } } + // If we added transactions to the mempool, we should be willing to build a + // block. + b.manager.mempool.RequestBuildBlock() + b.rejected = true return nil } diff --git a/avalanchego/vms/avm/blocks/executor/block_test.go b/avalanchego/vms/avm/block/executor/block_test.go similarity index 88% rename from avalanchego/vms/avm/blocks/executor/block_test.go rename to avalanchego/vms/avm/block/executor/block_test.go index 781ede46..988f02be 100644 --- a/avalanchego/vms/avm/blocks/executor/block_test.go +++ b/avalanchego/vms/avm/block/executor/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,9 +9,8 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" @@ -22,9 +21,9 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -41,7 +40,7 @@ func TestBlockVerify(t *testing.T) { { name: "block already verified", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() b := &Block{ Block: mockBlock, @@ -59,7 +58,7 @@ func TestBlockVerify(t *testing.T) { { name: "block timestamp too far in the future", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.GenerateTestID()).AnyTimes() return &Block{ @@ -72,7 +71,7 @@ func TestBlockVerify(t *testing.T) { { name: "block timestamp too far in the future", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() now := time.Now() @@ -92,7 +91,7 @@ func TestBlockVerify(t *testing.T) { { name: "block contains no transactions", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() @@ -110,7 +109,7 @@ func TestBlockVerify(t *testing.T) { { name: "block transaction fails verification", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() @@ -138,7 +137,7 @@ func TestBlockVerify(t *testing.T) { { name: "parent doesn't exist", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() @@ -153,7 +152,7 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(parentID).Return(nil, errTest) return &Block{ Block: mockBlock, @@ -169,7 +168,7 @@ func TestBlockVerify(t *testing.T) { { name: "block height isn't parent height + 1", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() @@ -186,8 +185,8 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockState := states.NewMockState(ctrl) - mockParentBlock := blocks.NewMockBlock(ctrl) + mockState := state.NewMockState(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight) // Should be blockHeight - 1 mockState.EXPECT().GetBlock(parentID).Return(mockParentBlock, nil) @@ -205,7 +204,7 @@ func TestBlockVerify(t *testing.T) { { name: "block timestamp before parent timestamp", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -223,10 +222,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp.Add(1)) @@ -249,7 +248,7 @@ func TestBlockVerify(t *testing.T) { { name: "tx fails semantic verification", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -268,10 +267,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -298,7 +297,7 @@ func TestBlockVerify(t *testing.T) { { name: "tx fails execution", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -318,10 +317,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -349,7 +348,7 @@ func TestBlockVerify(t *testing.T) { { name: "tx imported inputs overlap", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -396,10 +395,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -427,7 +426,7 @@ func TestBlockVerify(t *testing.T) { { name: "tx input overlaps with other tx", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -458,10 +457,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -473,7 +472,7 @@ func TestBlockVerify(t *testing.T) { parentID: { onAcceptState: mockParentState, statelessBlock: mockParentBlock, - importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, + importedInputs: set.Of(inputID), }, }, clk: &mockable.Clock{}, @@ -486,7 +485,7 @@ func TestBlockVerify(t *testing.T) { { name: "happy path", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() blockTimestamp := time.Now() @@ -506,10 +505,10 @@ func TestBlockVerify(t *testing.T) { parentID := ids.GenerateTestID() mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() - mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock := block.NewMockBlock(ctrl) mockParentBlock.EXPECT().Height().Return(blockHeight - 1) - mockParentState := states.NewMockDiff(ctrl) + mockParentState := state.NewMockDiff(ctrl) mockParentState.EXPECT().GetLastAccepted().Return(parentID) mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) @@ -561,7 +560,6 @@ func TestBlockVerify(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() b := tt.blockFunc(ctrl) err := b.Verify(context.Background()) @@ -583,10 +581,8 @@ func TestBlockAccept(t *testing.T) { { name: "block not found", blockFunc: func(ctrl *gomock.Controller) *Block { - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() - mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() - mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() mempool := mempool.NewMockMempool(ctrl) @@ -612,20 +608,18 @@ func TestBlockAccept(t *testing.T) { name: "can't get commit batch", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() - mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) mockManagerState.EXPECT().CommitBatch().Return(nil, errTest) mockManagerState.EXPECT().Abort() - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) return &Block{ @@ -652,16 +646,14 @@ func TestBlockAccept(t *testing.T) { name: "can't apply shared memory", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() - mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) @@ -670,7 +662,7 @@ func TestBlockAccept(t *testing.T) { mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(errTest) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) return &Block{ @@ -698,16 +690,14 @@ func TestBlockAccept(t *testing.T) { name: "failed to apply metrics", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() - mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) @@ -716,7 +706,7 @@ func TestBlockAccept(t *testing.T) { mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) metrics := metrics.NewMockMetrics(ctrl) @@ -748,7 +738,7 @@ func TestBlockAccept(t *testing.T) { name: "no error", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() @@ -757,16 +747,17 @@ func TestBlockAccept(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Remove(gomock.Any()).AnyTimes() - mockManagerState := states.NewMockState(ctrl) + mockManagerState := state.NewMockState(ctrl) // Note the returned batch is nil but not used // because we mock the call to shared memory mockManagerState.EXPECT().CommitBatch().Return(nil, nil) mockManagerState.EXPECT().Abort() + mockManagerState.EXPECT().Checksums().Return(ids.Empty, ids.Empty) mockSharedMemory := atomic.NewMockSharedMemory(ctrl) mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) - mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState := state.NewMockDiff(ctrl) mockOnAcceptState.EXPECT().Apply(mockManagerState) metrics := metrics.NewMockMetrics(ctrl) @@ -799,7 +790,6 @@ func TestBlockAccept(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() b := tt.blockFunc(ctrl) err := b.Accept(context.Background()) @@ -823,7 +813,7 @@ func TestBlockReject(t *testing.T) { name: "one tx passes verification; one fails syntactic verification; one fails semantic verification; one fails execution", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() @@ -866,28 +856,27 @@ func TestBlockReject(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Add(validTx).Return(nil) // Only add the one that passes verification + mempool.EXPECT().RequestBuildBlock() - preferredID := ids.GenerateTestID() - mockPreferredState := states.NewMockDiff(ctrl) - mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() - mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + lastAcceptedID := ids.GenerateTestID() + mockState := state.NewMockState(ctrl) + mockState.EXPECT().GetLastAccepted().Return(lastAcceptedID).AnyTimes() + mockState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() return &Block{ Block: mockBlock, manager: &manager{ - preferred: preferredID, - mempool: mempool, - metrics: metrics.NewMockMetrics(ctrl), + lastAccepted: lastAcceptedID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), backend: &executor.Backend{ Bootstrapped: true, Ctx: &snow.Context{ Log: logging.NoLog{}, }, }, + state: mockState, blkIDToState: map[ids.ID]*blockState{ - preferredID: { - onAcceptState: mockPreferredState, - }, blockID: {}, }, }, @@ -898,7 +887,7 @@ func TestBlockReject(t *testing.T) { name: "all txs valid", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() @@ -925,28 +914,27 @@ func TestBlockReject(t *testing.T) { mempool := mempool.NewMockMempool(ctrl) mempool.EXPECT().Add(tx1).Return(nil) mempool.EXPECT().Add(tx2).Return(nil) + mempool.EXPECT().RequestBuildBlock() - preferredID := ids.GenerateTestID() - mockPreferredState := states.NewMockDiff(ctrl) - mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() - mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + lastAcceptedID := ids.GenerateTestID() + mockState := state.NewMockState(ctrl) + mockState.EXPECT().GetLastAccepted().Return(lastAcceptedID).AnyTimes() + mockState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() return &Block{ Block: mockBlock, manager: &manager{ - preferred: preferredID, - mempool: mempool, - metrics: metrics.NewMockMetrics(ctrl), + lastAccepted: lastAcceptedID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), backend: &executor.Backend{ Bootstrapped: true, Ctx: &snow.Context{ Log: logging.NoLog{}, }, }, + state: mockState, blkIDToState: map[ids.ID]*blockState{ - preferredID: { - onAcceptState: mockPreferredState, - }, blockID: {}, }, }, @@ -958,11 +946,9 @@ func TestBlockReject(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() b := tt.blockFunc(ctrl) - err := b.Reject(context.Background()) - require.NoError(err) + require.NoError(b.Reject(context.Background())) require.True(b.rejected) _, ok := b.manager.blkIDToState[b.ID()] require.False(ok) @@ -979,7 +965,7 @@ func TestBlockStatus(t *testing.T) { tests := []test{ { name: "block is rejected", - blockFunc: func(ctrl *gomock.Controller) *Block { + blockFunc: func(*gomock.Controller) *Block { return &Block{ rejected: true, } @@ -990,7 +976,7 @@ func TestBlockStatus(t *testing.T) { name: "block is last accepted", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() return &Block{ Block: mockBlock, @@ -1005,7 +991,7 @@ func TestBlockStatus(t *testing.T) { name: "block is processing", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() return &Block{ Block: mockBlock, @@ -1022,10 +1008,10 @@ func TestBlockStatus(t *testing.T) { name: "block is accepted but not last accepted", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(blockID).Return(nil, nil) return &Block{ @@ -1042,10 +1028,10 @@ func TestBlockStatus(t *testing.T) { name: "block is unknown", blockFunc: func(ctrl *gomock.Controller) *Block { blockID := ids.GenerateTestID() - mockBlock := blocks.NewMockBlock(ctrl) + mockBlock := block.NewMockBlock(ctrl) mockBlock.EXPECT().ID().Return(blockID).AnyTimes() - mockState := states.NewMockState(ctrl) + mockState := state.NewMockState(ctrl) mockState.EXPECT().GetBlock(blockID).Return(nil, database.ErrNotFound) return &Block{ @@ -1063,7 +1049,6 @@ func TestBlockStatus(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() b := tt.blockFunc(ctrl) require.Equal(tt.expected, b.Status()) diff --git a/avalanchego/vms/avm/blocks/executor/manager.go b/avalanchego/vms/avm/block/executor/manager.go similarity index 82% rename from avalanchego/vms/avm/blocks/executor/manager.go rename to avalanchego/vms/avm/block/executor/manager.go index 0a233b6a..9822743b 100644 --- a/avalanchego/vms/avm/blocks/executor/manager.go +++ b/avalanchego/vms/avm/block/executor/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -11,9 +11,9 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -27,7 +27,7 @@ var ( ) type Manager interface { - states.Versions + state.Versions // Returns the ID of the most recently accepted block. LastAccepted() ids.ID @@ -36,22 +36,22 @@ type Manager interface { Preferred() ids.ID GetBlock(blkID ids.ID) (snowman.Block, error) - GetStatelessBlock(blkID ids.ID) (blocks.Block, error) - NewBlock(blocks.Block) snowman.Block + GetStatelessBlock(blkID ids.ID) (block.Block, error) + NewBlock(block.Block) snowman.Block - // VerifyTx verifies that the transaction can be issued based on the - // currently preferred state. + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. VerifyTx(tx *txs.Tx) error - // VerifyUniqueInputs verifies that the inputs are not duplicated in the - // provided blk or any of its ancestors pinned in memory. + // VerifyUniqueInputs returns nil iff no blocks in the inclusive + // ancestry of [blkID] consume an input in [inputs]. VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error } func NewManager( mempool mempool.Mempool, metrics metrics.Metrics, - state states.State, + state state.State, backend *executor.Backend, clk *mockable.Clock, onAccept func(*txs.Tx) error, @@ -72,7 +72,7 @@ func NewManager( type manager struct { backend *executor.Backend - state states.State + state state.State metrics metrics.Metrics mempool mempool.Mempool clk *mockable.Clock @@ -92,13 +92,13 @@ type manager struct { } type blockState struct { - statelessBlock blocks.Block - onAcceptState states.Diff + statelessBlock block.Block + onAcceptState state.Diff importedInputs set.Set[ids.ID] atomicRequests map[ids.ID]*atomic.Requests } -func (m *manager) GetState(blkID ids.ID) (states.Chain, bool) { +func (m *manager) GetState(blkID ids.ID) (state.Chain, bool) { // If the block is in the map, it is processing. if state, ok := m.blkIDToState[blkID]; ok { return state.onAcceptState, true @@ -126,7 +126,7 @@ func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { return m.NewBlock(blk), nil } -func (m *manager) GetStatelessBlock(blkID ids.ID) (blocks.Block, error) { +func (m *manager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { // See if the block is in memory. if blkState, ok := m.blkIDToState[blkID]; ok { return blkState.statelessBlock, nil @@ -135,7 +135,7 @@ func (m *manager) GetStatelessBlock(blkID ids.ID) (blocks.Block, error) { return m.state.GetBlock(blkID) } -func (m *manager) NewBlock(blk blocks.Block) snowman.Block { +func (m *manager) NewBlock(blk block.Block) snowman.Block { return &Block{ Block: blk, manager: m, @@ -155,7 +155,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return err } - stateDiff, err := states.NewDiff(m.preferred, m) + stateDiff, err := state.NewDiff(m.lastAccepted, m) if err != nil { return err } @@ -174,12 +174,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { State: stateDiff, Tx: tx, } - err = tx.Unsigned.Visit(executor) - if err != nil { - return err - } - - return m.VerifyUniqueInputs(m.preferred, executor.Inputs) + return tx.Unsigned.Visit(executor) } func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { diff --git a/avalanchego/vms/avm/blocks/executor/manager_test.go b/avalanchego/vms/avm/block/executor/manager_test.go similarity index 67% rename from avalanchego/vms/avm/blocks/executor/manager_test.go rename to avalanchego/vms/avm/block/executor/manager_test.go index f5c43d1f..275bbabc 100644 --- a/avalanchego/vms/avm/blocks/executor/manager_test.go +++ b/avalanchego/vms/avm/block/executor/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -8,14 +8,13 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/avm/blocks" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) @@ -30,9 +29,8 @@ var ( func TestManagerGetStatelessBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) m := &manager{ state: state, blkIDToState: map[ids.ID]*blockState{}, @@ -40,7 +38,7 @@ func TestManagerGetStatelessBlock(t *testing.T) { // Case: block is in memory { - statelessBlk := blocks.NewMockBlock(ctrl) + statelessBlk := block.NewMockBlock(ctrl) blkID := ids.GenerateTestID() blk := &blockState{ statelessBlock: statelessBlk, @@ -54,7 +52,7 @@ func TestManagerGetStatelessBlock(t *testing.T) { // Case: block isn't in memory { blkID := ids.GenerateTestID() - blk := blocks.NewMockBlock(ctrl) + blk := block.NewMockBlock(ctrl) state.EXPECT().GetBlock(blkID).Return(blk, nil) gotBlk, err := m.GetStatelessBlock(blkID) require.NoError(err) @@ -73,18 +71,17 @@ func TestManagerGetStatelessBlock(t *testing.T) { func TestManagerGetState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - state := states.NewMockState(ctrl) + s := state.NewMockState(ctrl) m := &manager{ - state: state, + state: s, blkIDToState: map[ids.ID]*blockState{}, lastAccepted: ids.GenerateTestID(), } // Case: Block is in memory { - diff := states.NewMockDiff(ctrl) + diff := state.NewMockDiff(ctrl) blkID := ids.GenerateTestID() m.blkIDToState[blkID] = &blockState{ onAcceptState: diff, @@ -99,14 +96,14 @@ func TestManagerGetState(t *testing.T) { blkID := ids.GenerateTestID() gotState, ok := m.GetState(blkID) require.False(ok) - require.Equal(state, gotState) + require.Equal(s, gotState) } // Case: Block isn't in memory; block is last accepted { gotState, ok := m.GetState(m.lastAccepted) require.True(ok) - require.Equal(state, gotState) + require.Equal(s, gotState) } } @@ -118,14 +115,13 @@ func TestManagerVerifyTx(t *testing.T) { expectedErr error } - inputID := ids.GenerateTestID() tests := []test{ { name: "not bootstrapped", txF: func(*gomock.Controller) *txs.Tx { return &txs.Tx{} }, - managerF: func(ctrl *gomock.Controller) *manager { + managerF: func(*gomock.Controller) *manager { return &manager{ backend: &executor.Backend{}, } @@ -163,11 +159,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -175,8 +171,7 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, + lastAccepted: lastAcceptedID, } }, expectedErr: errTestSemanticVerifyFail, @@ -196,11 +191,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -208,57 +203,10 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, - } - }, - expectedErr: errTestExecutionFail, - }, - { - name: "non-unique inputs", - txF: func(ctrl *gomock.Controller) *txs.Tx { - unsigned := txs.NewMockUnsignedTx(ctrl) - // Syntactic verification passes - unsigned.EXPECT().Visit(gomock.Any()).Return(nil) - // Semantic verification passes - unsigned.EXPECT().Visit(gomock.Any()).Return(nil) - // Execution passes - unsigned.EXPECT().Visit(gomock.Any()).DoAndReturn(func(e *executor.Executor) error { - e.Inputs.Add(inputID) - return nil - }) - return &txs.Tx{ - Unsigned: unsigned, - } - }, - managerF: func(ctrl *gomock.Controller) *manager { - lastAcceptedID := ids.GenerateTestID() - - preferredID := ids.GenerateTestID() - preferred := blocks.NewMockBlock(ctrl) - preferred.EXPECT().Parent().Return(lastAcceptedID).AnyTimes() - - // These values don't matter for this test - diffState := states.NewMockDiff(ctrl) - diffState.EXPECT().GetLastAccepted().Return(preferredID) - diffState.EXPECT().GetTimestamp().Return(time.Time{}) - - return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, - blkIDToState: map[ids.ID]*blockState{ - preferredID: { - statelessBlock: preferred, - onAcceptState: diffState, - importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, - }, - }, lastAccepted: lastAcceptedID, - preferred: preferredID, } }, - expectedErr: ErrConflictingParentTxs, + expectedErr: errTestExecutionFail, }, { name: "happy path", @@ -275,11 +223,11 @@ func TestManagerVerifyTx(t *testing.T) { } }, managerF: func(ctrl *gomock.Controller) *manager { - preferred := ids.GenerateTestID() + lastAcceptedID := ids.GenerateTestID() // These values don't matter for this test - state := states.NewMockState(ctrl) - state.EXPECT().GetLastAccepted().Return(preferred) + state := state.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(lastAcceptedID) state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ @@ -287,8 +235,7 @@ func TestManagerVerifyTx(t *testing.T) { Bootstrapped: true, }, state: state, - lastAccepted: preferred, - preferred: preferred, + lastAccepted: lastAcceptedID, } }, expectedErr: nil, @@ -299,7 +246,6 @@ func TestManagerVerifyTx(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() m := test.managerF(ctrl) tx := test.txF(ctrl) @@ -312,18 +258,16 @@ func TestManagerVerifyTx(t *testing.T) { func TestVerifyUniqueInputs(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Case: No inputs { m := &manager{} - err := m.VerifyUniqueInputs(ids.GenerateTestID(), set.Set[ids.ID]{}) - require.NoError(err) + require.NoError(m.VerifyUniqueInputs(ids.GenerateTestID(), set.Set[ids.ID]{})) } // blk0 is blk1's parent blk0ID, blk1ID := ids.GenerateTestID(), ids.GenerateTestID() - blk0, blk1 := blocks.NewMockBlock(ctrl), blocks.NewMockBlock(ctrl) + blk0, blk1 := block.NewMockBlock(ctrl), block.NewMockBlock(ctrl) blk1.EXPECT().Parent().Return(blk0ID).AnyTimes() blk0.EXPECT().Parent().Return(ids.Empty).AnyTimes() // blk0's parent is accepted @@ -332,18 +276,17 @@ func TestVerifyUniqueInputs(t *testing.T) { blkIDToState: map[ids.ID]*blockState{ blk0ID: { statelessBlock: blk0, - importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, + importedInputs: set.Of(inputID), }, blk1ID: { statelessBlock: blk1, - importedInputs: set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}}, + importedInputs: set.Of(ids.GenerateTestID()), }, }, } // [blk1]'s parent, [blk0], has [inputID] as an input - err := m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{inputID: struct{}{}}) + err := m.VerifyUniqueInputs(blk1ID, set.Of(inputID)) require.ErrorIs(err, ErrConflictingParentTxs) - err = m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}}) - require.NoError(err) + require.NoError(m.VerifyUniqueInputs(blk1ID, set.Of(ids.GenerateTestID()))) } diff --git a/avalanchego/vms/avm/blocks/executor/mock_manager.go b/avalanchego/vms/avm/block/executor/mock_manager.go similarity index 66% rename from avalanchego/vms/avm/blocks/executor/mock_manager.go rename to avalanchego/vms/avm/block/executor/mock_manager.go index a727d06b..a882ec51 100644 --- a/avalanchego/vms/avm/blocks/executor/mock_manager.go +++ b/avalanchego/vms/avm/block/executor/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/blocks/executor (interfaces: Manager) +// Source: vms/avm/block/executor/manager.go +// +// Generated by this command: +// +// mockgen -source=vms/avm/block/executor/manager.go -destination=vms/avm/block/executor/mock_manager.go -package=executor -exclude_interfaces= +// // Package executor is a generated GoMock package. package executor @@ -13,10 +15,10 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" set "github.com/ava-labs/avalanchego/utils/set" - blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" - states "github.com/ava-labs/avalanchego/vms/avm/states" + block "github.com/ava-labs/avalanchego/vms/avm/block" + state "github.com/ava-labs/avalanchego/vms/avm/state" txs "github.com/ava-labs/avalanchego/vms/avm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockManager is a mock of Manager interface. @@ -43,48 +45,48 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // GetBlock mocks base method. -func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { +func (m *MockManager) GetBlock(blkID ids.ID) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) + ret := m.ctrl.Call(m, "GetBlock", blkID) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), blkID) } // GetState mocks base method. -func (m *MockManager) GetState(arg0 ids.ID) (states.Chain, bool) { +func (m *MockManager) GetState(blkID ids.ID) (state.Chain, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) - ret0, _ := ret[0].(states.Chain) + ret := m.ctrl.Call(m, "GetState", blkID) + ret0, _ := ret[0].(state.Chain) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetState indicates an expected call of GetState. -func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetState(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), blkID) } // GetStatelessBlock mocks base method. -func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (blocks.Block, error) { +func (m *MockManager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) - ret0, _ := ret[0].(blocks.Block) + ret := m.ctrl.Call(m, "GetStatelessBlock", blkID) + ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetStatelessBlock(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), blkID) } // LastAccepted mocks base method. @@ -102,7 +104,7 @@ func (mr *MockManagerMockRecorder) LastAccepted() *gomock.Call { } // NewBlock mocks base method. -func (m *MockManager) NewBlock(arg0 blocks.Block) snowman.Block { +func (m *MockManager) NewBlock(arg0 block.Block) snowman.Block { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewBlock", arg0) ret0, _ := ret[0].(snowman.Block) @@ -110,7 +112,7 @@ func (m *MockManager) NewBlock(arg0 blocks.Block) snowman.Block { } // NewBlock indicates an expected call of NewBlock. -func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) NewBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) } @@ -130,41 +132,41 @@ func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { } // SetPreference mocks base method. -func (m *MockManager) SetPreference(arg0 ids.ID) { +func (m *MockManager) SetPreference(blkID ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPreference", arg0) + m.ctrl.Call(m, "SetPreference", blkID) } // SetPreference indicates an expected call of SetPreference. -func (mr *MockManagerMockRecorder) SetPreference(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) SetPreference(blkID any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), blkID) } // VerifyTx mocks base method. -func (m *MockManager) VerifyTx(arg0 *txs.Tx) error { +func (m *MockManager) VerifyTx(tx *txs.Tx) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyTx", arg0) + ret := m.ctrl.Call(m, "VerifyTx", tx) ret0, _ := ret[0].(error) return ret0 } // VerifyTx indicates an expected call of VerifyTx. -func (mr *MockManagerMockRecorder) VerifyTx(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) VerifyTx(tx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), tx) } // VerifyUniqueInputs mocks base method. -func (m *MockManager) VerifyUniqueInputs(arg0 ids.ID, arg1 set.Set[ids.ID]) error { +func (m *MockManager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyUniqueInputs", arg0, arg1) + ret := m.ctrl.Call(m, "VerifyUniqueInputs", blkID, inputs) ret0, _ := ret[0].(error) return ret0 } // VerifyUniqueInputs indicates an expected call of VerifyUniqueInputs. -func (mr *MockManagerMockRecorder) VerifyUniqueInputs(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) VerifyUniqueInputs(blkID, inputs any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), blkID, inputs) } diff --git a/avalanchego/vms/avm/blocks/mock_block.go b/avalanchego/vms/avm/block/mock_block.go similarity index 90% rename from avalanchego/vms/avm/blocks/mock_block.go rename to avalanchego/vms/avm/block/mock_block.go index 83537a9a..bc332e88 100644 --- a/avalanchego/vms/avm/blocks/mock_block.go +++ b/avalanchego/vms/avm/block/mock_block.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/blocks (interfaces: Block) +// Source: github.com/ava-labs/avalanchego/vms/avm/block (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=block -destination=vms/avm/block/mock_block.go github.com/ava-labs/avalanchego/vms/avm/block Block +// -// Package blocks is a generated GoMock package. -package blocks +// Package block is a generated GoMock package. +package block import ( reflect "reflect" @@ -15,7 +17,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" txs "github.com/ava-labs/avalanchego/vms/avm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockBlock is a mock of Block interface. @@ -90,7 +92,7 @@ func (m *MockBlock) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockBlockMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockBlock)(nil).InitCtx), arg0) } @@ -160,7 +162,7 @@ func (m *MockBlock) initialize(arg0 []byte, arg1 codec.Manager) error { } // initialize indicates an expected call of initialize. -func (mr *MockBlockMockRecorder) initialize(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) initialize(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "initialize", reflect.TypeOf((*MockBlock)(nil).initialize), arg0, arg1) } diff --git a/avalanchego/vms/avm/blocks/parser.go b/avalanchego/vms/avm/block/parser.go similarity index 60% rename from avalanchego/vms/avm/blocks/parser.go rename to avalanchego/vms/avm/block/parser.go index b2df91e7..c4ad364c 100644 --- a/avalanchego/vms/avm/blocks/parser.go +++ b/avalanchego/vms/avm/block/parser.go @@ -1,17 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( - "fmt" "reflect" "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -26,55 +25,51 @@ type Parser interface { ParseBlock(bytes []byte) (Block, error) ParseGenesisBlock(bytes []byte) (Block, error) - - InitializeBlock(block Block) error - InitializeGenesisBlock(block Block) error } type parser struct { txs.Parser } -func NewParser(cortinaTime time.Time, fxs []fxs.Fx) (Parser, error) { - p, err := txs.NewParser(cortinaTime, fxs) +func NewParser(cortinaTime time.Time, durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { + p, err := txs.NewParser(cortinaTime, durangoTime, fxs) if err != nil { return nil, err } c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - errs := wrappers.Errs{} - errs.Add( + err = utils.Err( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) return &parser{ Parser: p, - }, errs.Err + }, err } func NewCustomParser( cortinaTime time.Time, + durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - p, err := txs.NewCustomParser(cortinaTime, typeToFxIndex, clock, log, fxs) + p, err := txs.NewCustomParser(cortinaTime, durangoTime, typeToFxIndex, clock, log, fxs) if err != nil { return nil, err } c := p.CodecRegistry() gc := p.GenesisCodecRegistry() - errs := wrappers.Errs{} - errs.Add( + err = utils.Err( c.RegisterType(&StandardBlock{}), gc.RegisterType(&StandardBlock{}), ) return &parser{ Parser: p, - }, errs.Err + }, err } func (p *parser) ParseBlock(bytes []byte) (Block, error) { @@ -92,21 +87,3 @@ func parse(cm codec.Manager, bytes []byte) (Block, error) { } return blk, blk.initialize(bytes, cm) } - -func (p *parser) InitializeBlock(block Block) error { - return initialize(block, p.Codec()) -} - -func (p *parser) InitializeGenesisBlock(block Block) error { - return initialize(block, p.GenesisCodec()) -} - -func initialize(blk Block, cm codec.Manager) error { - // We serialize this block as a pointer so that it can be deserialized into - // a Block - bytes, err := cm.Marshal(CodecVersion, &blk) - if err != nil { - return fmt.Errorf("couldn't marshal block: %w", err) - } - return blk.initialize(bytes, cm) -} diff --git a/avalanchego/vms/avm/blocks/standard_block.go b/avalanchego/vms/avm/block/standard_block.go similarity index 77% rename from avalanchego/vms/avm/blocks/standard_block.go rename to avalanchego/vms/avm/block/standard_block.go index 2383a24a..614c7bdc 100644 --- a/avalanchego/vms/avm/blocks/standard_block.go +++ b/avalanchego/vms/avm/block/standard_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "fmt" @@ -26,12 +26,12 @@ type StandardBlock struct { // List of transactions contained in this block. Transactions []*txs.Tx `serialize:"true" json:"txs"` - id ids.ID - bytes []byte + BlockID ids.ID `json:"id"` + bytes []byte } func (b *StandardBlock) initialize(bytes []byte, cm codec.Manager) error { - b.id = hashing.ComputeHash256Array(bytes) + b.BlockID = hashing.ComputeHash256Array(bytes) b.bytes = bytes for _, tx := range b.Transactions { if err := tx.Initialize(cm); err != nil { @@ -48,7 +48,7 @@ func (b *StandardBlock) InitCtx(ctx *snow.Context) { } func (b *StandardBlock) ID() ids.ID { - return b.id + return b.BlockID } func (b *StandardBlock) Parent() ids.ID { @@ -88,5 +88,16 @@ func NewStandardBlock( Time: uint64(timestamp.Unix()), Transactions: txs, } - return blk, initialize(blk, cm) + + // We serialize this block as a pointer so that it can be deserialized into + // a Block + var blkIntf Block = blk + bytes, err := cm.Marshal(CodecVersion, &blkIntf) + if err != nil { + return nil, fmt.Errorf("couldn't marshal block: %w", err) + } + + blk.BlockID = hashing.ComputeHash256Array(bytes) + blk.bytes = bytes + return blk, nil } diff --git a/avalanchego/vms/avm/chain_state.go b/avalanchego/vms/avm/chain_state.go deleted file mode 100644 index 62e80527..00000000 --- a/avalanchego/vms/avm/chain_state.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/vms/avm/states" - "github.com/ava-labs/avalanchego/vms/avm/txs" -) - -var _ states.Chain = (*chainState)(nil) - -// chainState wraps the disk state and filters non-accepted transactions from -// being returned in GetTx. -type chainState struct { - states.State -} - -func (s *chainState) GetTx(txID ids.ID) (*txs.Tx, error) { - tx, err := s.State.GetTx(txID) - if err != nil { - return nil, err - } - - // Before the linearization, transactions were persisted before they were - // marked as Accepted. However, this function aims to only return accepted - // transactions. - status, err := s.State.GetStatus(txID) - if err == database.ErrNotFound { - // If the status wasn't persisted, then the transaction was written - // after the linearization, and is accepted. - return tx, nil - } - if err != nil { - return nil, err - } - - // If the status was persisted, then the transaction was written before the - // linearization. If it wasn't marked as accepted, then we treat it as if it - // doesn't exist. - if status != choices.Accepted { - return nil, database.ErrNotFound - } - return tx, nil -} diff --git a/avalanchego/vms/avm/client.go b/avalanchego/vms/avm/client.go index 33d3b21f..63df6543 100644 --- a/avalanchego/vms/avm/client.go +++ b/avalanchego/vms/avm/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -15,9 +15,8 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" - - cjson "github.com/ava-labs/avalanchego/utils/json" ) var _ Client = (*client)(nil) @@ -44,8 +43,6 @@ type Client interface { ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) // GetTx returns the byte representation of [txID] GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) - // IssueStopVertex issues a stop vertex. - IssueStopVertex(ctx context.Context, options ...rpc.Option) error // GetUTXOs returns the byte representation of the UTXOs controlled by [addrs] GetUTXOs( ctx context.Context, @@ -246,20 +243,18 @@ func (c *client) GetBlock(ctx context.Context, blkID ids.ID, options ...rpc.Opti if err != nil { return nil, err } - return formatting.Decode(res.Encoding, res.Block) } func (c *client) GetBlockByHeight(ctx context.Context, height uint64, options ...rpc.Option) ([]byte, error) { res := &api.FormattedBlock{} err := c.requester.SendRequest(ctx, "avm.getBlockByHeight", &api.GetBlockByHeightArgs{ - Height: height, + Height: json.Uint64(height), Encoding: formatting.HexNC, }, res, options...) if err != nil { return nil, err } - return formatting.Decode(res.Encoding, res.Block) } @@ -282,10 +277,6 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt return res.TxID, err } -func (c *client) IssueStopVertex(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "avm.issueStopVertex", &struct{}{}, &struct{}{}, options...) -} - func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (choices.Status, error) { res := &GetTxStatusReply{} err := c.requester.SendRequest(ctx, "avm.getTxStatus", &api.JSONTxID{ @@ -323,12 +314,7 @@ func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) if err != nil { return nil, err } - - txBytes, err := formatting.Decode(res.Encoding, res.Tx) - if err != nil { - return nil, err - } - return txBytes, nil + return formatting.Decode(res.Encoding, res.Tx) } func (c *client) GetUTXOs( @@ -355,7 +341,7 @@ func (c *client) GetAtomicUTXOs( err := c.requester.SendRequest(ctx, "avm.getUTXOs", &api.GetUTXOsArgs{ Addresses: ids.ShortIDsToStrings(addrs), SourceChain: sourceChain, - Limit: cjson.Uint32(limit), + Limit: json.Uint32(limit), StartIndex: api.Index{ Address: startAddress.String(), UTXO: startUTXOID.String(), @@ -448,14 +434,14 @@ func (c *client) CreateAsset( holders := make([]*Holder, len(clientHolders)) for i, clientHolder := range clientHolders { holders[i] = &Holder{ - Amount: cjson.Uint64(clientHolder.Amount), + Amount: json.Uint64(clientHolder.Amount), Address: clientHolder.Address.String(), } } minters := make([]Owners, len(clientMinters)) for i, clientMinter := range clientMinters { minters[i] = Owners{ - Threshold: cjson.Uint32(clientMinter.Threshold), + Threshold: json.Uint32(clientMinter.Threshold), Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } @@ -489,7 +475,7 @@ func (c *client) CreateFixedCapAsset( holders := make([]*Holder, len(clientHolders)) for i, clientHolder := range clientHolders { holders[i] = &Holder{ - Amount: cjson.Uint64(clientHolder.Amount), + Amount: json.Uint64(clientHolder.Amount), Address: clientHolder.Address.String(), } } @@ -522,7 +508,7 @@ func (c *client) CreateVariableCapAsset( minters := make([]Owners, len(clientMinters)) for i, clientMinter := range clientMinters { minters[i] = Owners{ - Threshold: cjson.Uint32(clientMinter.Threshold), + Threshold: json.Uint32(clientMinter.Threshold), Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } @@ -554,7 +540,7 @@ func (c *client) CreateNFTAsset( minters := make([]Owners, len(clientMinters)) for i, clientMinter := range clientMinters { minters[i] = Owners{ - Threshold: cjson.Uint32(clientMinter.Threshold), + Threshold: json.Uint32(clientMinter.Threshold), Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } @@ -629,7 +615,7 @@ func (c *client) Send( JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, }, SendOutput: SendOutput{ - Amount: cjson.Uint64(amount), + Amount: json.Uint64(amount), AssetID: assetID, To: to.String(), }, @@ -651,7 +637,7 @@ func (c *client) SendMultiple( outputs := make([]SendOutput, len(clientOutputs)) for i, clientOutput := range clientOutputs { outputs[i] = SendOutput{ - Amount: cjson.Uint64(clientOutput.Amount), + Amount: json.Uint64(clientOutput.Amount), AssetID: clientOutput.AssetID, To: clientOutput.To.String(), } @@ -685,7 +671,7 @@ func (c *client) Mint( JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, }, - Amount: cjson.Uint64(amount), + Amount: json.Uint64(amount), AssetID: assetID, To: to.String(), }, res, options...) @@ -710,7 +696,7 @@ func (c *client) SendNFT( JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, }, AssetID: assetID, - GroupID: cjson.Uint32(groupID), + GroupID: json.Uint32(groupID), To: to.String(), }, res, options...) return res.TxID, err @@ -773,7 +759,7 @@ func (c *client) Export( JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, }, - Amount: cjson.Uint64(amount), + Amount: json.Uint64(amount), TargetChain: targetChain, To: to.String(), AssetID: assetID, diff --git a/avalanchego/vms/avm/client_test.go b/avalanchego/vms/avm/client_test.go index e8013b15..23dd74ce 100644 --- a/avalanchego/vms/avm/client_test.go +++ b/avalanchego/vms/avm/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -27,7 +27,7 @@ func (mc *mockClient) SendRequest( _ interface{}, _ ...rpc.Option, ) error { - mc.require.Equal(inData, mc.expectedInData) + mc.require.Equal(mc.expectedInData, inData) return nil } diff --git a/avalanchego/vms/avm/config.go b/avalanchego/vms/avm/config.go new file mode 100644 index 00000000..f7661bbe --- /dev/null +++ b/avalanchego/vms/avm/config.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "encoding/json" + + "github.com/ava-labs/avalanchego/vms/avm/network" +) + +var DefaultConfig = Config{ + Network: network.DefaultConfig, + IndexTransactions: false, + IndexAllowIncomplete: false, + ChecksumsEnabled: false, +} + +type Config struct { + Network network.Config `json:"network"` + IndexTransactions bool `json:"index-transactions"` + IndexAllowIncomplete bool `json:"index-allow-incomplete"` + ChecksumsEnabled bool `json:"checksums-enabled"` +} + +func ParseConfig(configBytes []byte) (Config, error) { + if len(configBytes) == 0 { + return DefaultConfig, nil + } + + config := DefaultConfig + err := json.Unmarshal(configBytes, &config) + return config, err +} diff --git a/avalanchego/vms/avm/config/config.go b/avalanchego/vms/avm/config/config.go index 045b4474..df6e4f7d 100644 --- a/avalanchego/vms/avm/config/config.go +++ b/avalanchego/vms/avm/config/config.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config +import "time" + // Struct collecting all the foundational parameters of the AVM type Config struct { // Fee that is burned by every non-asset creating transaction @@ -10,4 +12,7 @@ type Config struct { // Fee that must be burned by every asset creating transaction CreateAssetTxFee uint64 + + // Time of the Durango network upgrade + DurangoTime time.Time } diff --git a/avalanchego/vms/avm/config_test.go b/avalanchego/vms/avm/config_test.go new file mode 100644 index 00000000..27481d78 --- /dev/null +++ b/avalanchego/vms/avm/config_test.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/avm/network" +) + +func TestParseConfig(t *testing.T) { + tests := []struct { + name string + configBytes []byte + expectedConfig Config + }{ + { + name: "unspecified config", + configBytes: nil, + expectedConfig: DefaultConfig, + }, + { + name: "manually specified checksums enabled", + configBytes: []byte(`{"checksums-enabled":true}`), + expectedConfig: Config{ + Network: network.DefaultConfig, + IndexTransactions: DefaultConfig.IndexTransactions, + IndexAllowIncomplete: DefaultConfig.IndexAllowIncomplete, + ChecksumsEnabled: true, + }, + }, + { + name: "manually specified checksums enabled", + configBytes: []byte(`{"network":{"max-validator-set-staleness":1}}`), + expectedConfig: Config{ + Network: network.Config{ + MaxValidatorSetStaleness: time.Nanosecond, + TargetGossipSize: network.DefaultConfig.TargetGossipSize, + PullGossipPollSize: network.DefaultConfig.PullGossipPollSize, + PullGossipFrequency: network.DefaultConfig.PullGossipFrequency, + PullGossipThrottlingPeriod: network.DefaultConfig.PullGossipThrottlingPeriod, + PullGossipThrottlingLimit: network.DefaultConfig.PullGossipThrottlingLimit, + ExpectedBloomFilterElements: network.DefaultConfig.ExpectedBloomFilterElements, + ExpectedBloomFilterFalsePositiveProbability: network.DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + MaxBloomFilterFalsePositiveProbability: network.DefaultConfig.MaxBloomFilterFalsePositiveProbability, + LegacyPushGossipCacheSize: network.DefaultConfig.LegacyPushGossipCacheSize, + }, + IndexTransactions: DefaultConfig.IndexTransactions, + IndexAllowIncomplete: DefaultConfig.IndexAllowIncomplete, + ChecksumsEnabled: DefaultConfig.ChecksumsEnabled, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + config, err := ParseConfig(test.configBytes) + require.NoError(err) + require.Equal(test.expectedConfig, config) + }) + } +} diff --git a/avalanchego/vms/avm/dag_state.go b/avalanchego/vms/avm/dag_state.go deleted file mode 100644 index 927a8559..00000000 --- a/avalanchego/vms/avm/dag_state.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/avm/states" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" -) - -var _ states.Chain = (*dagState)(nil) - -type dagState struct { - states.Chain - vm *VM -} - -func (s *dagState) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { - inputID := utxoID.InputID() - utxo, err := s.GetUTXO(inputID) - if err == nil { - // If the UTXO exists in the base state, then we can immediately return - // it. - return utxo, nil - } - if err != database.ErrNotFound { - s.vm.ctx.Log.Error("fetching UTXO returned unexpected error", - zap.Stringer("txID", utxoID.TxID), - zap.Uint32("index", utxoID.OutputIndex), - zap.Stringer("utxoID", inputID), - zap.Error(err), - ) - return nil, err - } - - // The UTXO doesn't exist in the base state, so we need to check if the UTXO - // could exist from a currently processing tx. - inputTxID, inputIndex := utxoID.InputSource() - parent := UniqueTx{ - vm: s.vm, - txID: inputTxID, - } - - // If the parent doesn't exist or is otherwise invalid, then this UTXO isn't - // available. - if err := parent.verifyWithoutCacheWrites(); err != nil { - return nil, database.ErrNotFound - } - - // If the parent was accepted, the UTXO should have been in the base state. - // This means the UTXO was already consumed by a conflicting tx. - if status := parent.Status(); status.Decided() { - return nil, database.ErrNotFound - } - - parentUTXOs := parent.UTXOs() - - // At this point we have only verified the TxID portion of [utxoID] as being - // potentially valid. It is still possible that a user specified an invalid - // index. So, we must bounds check the parents UTXOs. - // - // Invariant: len(parentUTXOs) <= MaxInt32. This guarantees that casting - // inputIndex to an int, even on 32-bit architectures, will not overflow. - if uint32(len(parentUTXOs)) <= inputIndex { - return nil, database.ErrNotFound - } - return parentUTXOs[int(inputIndex)], nil -} - -func (s *dagState) GetTx(txID ids.ID) (*txs.Tx, error) { - tx := &UniqueTx{ - vm: s.vm, - txID: txID, - } - if status := tx.Status(); !status.Fetched() { - return nil, database.ErrNotFound - } - return tx.Tx, nil -} diff --git a/avalanchego/vms/avm/environment_test.go b/avalanchego/vms/avm/environment_test.go new file mode 100644 index 00000000..47a8068c --- /dev/null +++ b/avalanchego/vms/avm/environment_test.go @@ -0,0 +1,525 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "context" + "encoding/json" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/api/keystore" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/vms/avm/block/executor" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + avajson "github.com/ava-labs/avalanchego/utils/json" + keystoreutils "github.com/ava-labs/avalanchego/vms/components/keystore" +) + +const ( + testTxFee uint64 = 1000 + startBalance uint64 = 50000 + + username = "bobby" + password = "StrnasfqewiurPasswdn56d" //#nosec G101 + feeAssetName = "TEST" + otherAssetName = "OTHER" +) + +var ( + testChangeAddr = ids.GenerateTestShortID() + testCases = []struct { + name string + avaxAsset bool + }{ + { + name: "genesis asset is AVAX", + avaxAsset: true, + }, + { + name: "genesis asset is TEST", + avaxAsset: false, + }, + } + + assetID = ids.ID{1, 2, 3} + + keys = secp256k1.TestKeys()[:3] // TODO: Remove [:3] + addrs []ids.ShortID // addrs[i] corresponds to keys[i] +) + +func init() { + addrs = make([]ids.ShortID, len(keys)) + for i, key := range keys { + addrs[i] = key.Address() + } +} + +type user struct { + username string + password string + initialKeys []*secp256k1.PrivateKey +} + +type envConfig struct { + isCustomFeeAsset bool + keystoreUsers []*user + vmStaticConfig *config.Config + vmDynamicConfig *Config + additionalFxs []*common.Fx + notLinearized bool + notBootstrapped bool +} + +type environment struct { + genesisBytes []byte + genesisTx *txs.Tx + sharedMemory *atomic.Memory + issuer chan common.Message + vm *VM + service *Service + walletService *WalletService +} + +// setup the testing environment +func setup(tb testing.TB, c *envConfig) *environment { + require := require.New(tb) + + var ( + genesisArgs *BuildGenesisArgs + assetName = "AVAX" + ) + if c.isCustomFeeAsset { + genesisArgs = makeCustomAssetGenesis(tb) + assetName = feeAssetName + } else { + genesisArgs = makeDefaultGenesis(tb) + } + + genesisBytes := buildGenesisTestWithArgs(tb, genesisArgs) + + ctx := snowtest.Context(tb, snowtest.XChainID) + + baseDB := memdb.New() + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + + // NB: this lock is intentionally left locked when this function returns. + // The caller of this function is responsible for unlocking. + ctx.Lock.Lock() + + userKeystore := keystore.New(logging.NoLog{}, memdb.New()) + ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) + + for _, user := range c.keystoreUsers { + require.NoError(userKeystore.CreateUser(user.username, user.password)) + + // Import the initially funded private keys + keystoreUser, err := keystoreutils.NewUserFromKeystore(ctx.Keystore, user.username, user.password) + require.NoError(err) + + require.NoError(keystoreUser.PutKeys(user.initialKeys...)) + require.NoError(keystoreUser.Close()) + } + + vmStaticConfig := config.Config{ + TxFee: testTxFee, + CreateAssetTxFee: testTxFee, + } + if c.vmStaticConfig != nil { + vmStaticConfig = *c.vmStaticConfig + } + + vm := &VM{ + Config: vmStaticConfig, + } + + vmDynamicConfig := DefaultConfig + vmDynamicConfig.IndexTransactions = true + if c.vmDynamicConfig != nil { + vmDynamicConfig = *c.vmDynamicConfig + } + configBytes, err := json.Marshal(vmDynamicConfig) + require.NoError(err) + + require.NoError(vm.Initialize( + context.Background(), + ctx, + prefixdb.New([]byte{1}, baseDB), + genesisBytes, + nil, + configBytes, + nil, + append( + []*common.Fx{ + { + ID: secp256k1fx.ID, + Fx: &secp256k1fx.Fx{}, + }, + { + ID: nftfx.ID, + Fx: &nftfx.Fx{}, + }, + }, + c.additionalFxs..., + ), + &common.SenderTest{}, + )) + + stopVertexID := ids.GenerateTestID() + issuer := make(chan common.Message, 1) + + env := &environment{ + genesisBytes: genesisBytes, + genesisTx: getCreateTxFromGenesisTest(tb, genesisBytes, assetName), + sharedMemory: m, + issuer: issuer, + vm: vm, + service: &Service{ + vm: vm, + }, + walletService: &WalletService{ + vm: vm, + pendingTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + }, + } + + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + if c.notLinearized { + return env + } + + require.NoError(vm.Linearize(context.Background(), stopVertexID, issuer)) + if c.notBootstrapped { + return env + } + + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + return env +} + +// Returns: +// +// 1. tx in genesis that creates asset +// 2. the index of the output +func getCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { + require := require.New(tb) + + parser, err := txs.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) + require.NoError(err) + + cm := parser.GenesisCodec() + genesis := Genesis{} + _, err = cm.Unmarshal(genesisBytes, &genesis) + require.NoError(err) + require.NotEmpty(genesis.Txs) + + var assetTx *GenesisAsset + for _, tx := range genesis.Txs { + if tx.Name == assetName { + assetTx = tx + break + } + } + require.NotNil(assetTx) + + tx := &txs.Tx{ + Unsigned: &assetTx.CreateAssetTx, + } + require.NoError(tx.Initialize(parser.GenesisCodec())) + return tx +} + +// buildGenesisTest is the common Genesis builder for most tests +func buildGenesisTest(tb testing.TB) []byte { + defaultArgs := makeDefaultGenesis(tb) + return buildGenesisTestWithArgs(tb, defaultArgs) +} + +// buildGenesisTestWithArgs allows building the genesis while injecting different starting points (args) +func buildGenesisTestWithArgs(tb testing.TB, args *BuildGenesisArgs) []byte { + require := require.New(tb) + + ss := CreateStaticService() + + reply := BuildGenesisReply{} + require.NoError(ss.BuildGenesis(nil, args, &reply)) + + b, err := formatting.Decode(reply.Encoding, reply.Bytes) + require.NoError(err) + return b +} + +func newTx(tb testing.TB, genesisBytes []byte, chainID ids.ID, parser txs.Parser, assetName string) *txs.Tx { + require := require.New(tb) + + createTx := getCreateTxFromGenesisTest(tb, genesisBytes, assetName) + tx := &txs.Tx{Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: createTx.ID(), + OutputIndex: 2, + }, + Asset: avax.Asset{ID: createTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: startBalance, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + }, + }} + require.NoError(tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + return tx +} + +// Sample from a set of addresses and return them raw and formatted as strings. +// The size of the sample is between 1 and len(addrs) +// If len(addrs) == 0, returns nil +func sampleAddrs(tb testing.TB, addressFormatter avax.AddressManager, addrs []ids.ShortID) ([]ids.ShortID, []string) { + require := require.New(tb) + + sampledAddrs := []ids.ShortID{} + sampledAddrsStr := []string{} + + sampler := sampler.NewUniform() + sampler.Initialize(uint64(len(addrs))) + + numAddrs := 1 + rand.Intn(len(addrs)) // #nosec G404 + indices, err := sampler.Sample(numAddrs) + require.NoError(err) + for _, index := range indices { + addr := addrs[index] + addrStr, err := addressFormatter.FormatLocalAddress(addr) + require.NoError(err) + + sampledAddrs = append(sampledAddrs, addr) + sampledAddrsStr = append(sampledAddrsStr, addrStr) + } + return sampledAddrs, sampledAddrsStr +} + +func makeDefaultGenesis(tb testing.TB) *BuildGenesisArgs { + require := require.New(tb) + + addr0Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[0].Bytes()) + require.NoError(err) + + addr1Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[1].Bytes()) + require.NoError(err) + + addr2Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[2].Bytes()) + require.NoError(err) + + return &BuildGenesisArgs{ + Encoding: formatting.Hex, + GenesisData: map[string]AssetDefinition{ + "asset1": { + Name: "AVAX", + Symbol: "SYMB", + InitialState: map[string][]interface{}{ + "fixedCap": { + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr0Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr1Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr2Str, + }, + }, + }, + }, + "asset2": { + Name: "myVarCapAsset", + Symbol: "MVCA", + InitialState: map[string][]interface{}{ + "variableCap": { + Owners{ + Threshold: 1, + Minters: []string{ + addr0Str, + addr1Str, + }, + }, + Owners{ + Threshold: 2, + Minters: []string{ + addr0Str, + addr1Str, + addr2Str, + }, + }, + }, + }, + }, + "asset3": { + Name: "myOtherVarCapAsset", + InitialState: map[string][]interface{}{ + "variableCap": { + Owners{ + Threshold: 1, + Minters: []string{ + addr0Str, + }, + }, + }, + }, + }, + "asset4": { + Name: "myFixedCapAsset", + InitialState: map[string][]interface{}{ + "fixedCap": { + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr0Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr1Str, + }, + }, + }, + }, + }, + } +} + +func makeCustomAssetGenesis(tb testing.TB) *BuildGenesisArgs { + require := require.New(tb) + + addr0Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[0].Bytes()) + require.NoError(err) + + addr1Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[1].Bytes()) + require.NoError(err) + + addr2Str, err := address.FormatBech32(constants.UnitTestHRP, addrs[2].Bytes()) + require.NoError(err) + + return &BuildGenesisArgs{ + Encoding: formatting.Hex, + GenesisData: map[string]AssetDefinition{ + "asset1": { + Name: feeAssetName, + Symbol: "TST", + InitialState: map[string][]interface{}{ + "fixedCap": { + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr0Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr1Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr2Str, + }, + }, + }, + }, + "asset2": { + Name: otherAssetName, + Symbol: "OTH", + InitialState: map[string][]interface{}{ + "fixedCap": { + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr0Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr1Str, + }, + Holder{ + Amount: avajson.Uint64(startBalance), + Address: addr2Str, + }, + }, + }, + }, + }, + } +} + +// issueAndAccept expects the context lock not to be held +func issueAndAccept( + require *require.Assertions, + vm *VM, + issuer <-chan common.Message, + tx *txs.Tx, +) { + txID, err := vm.issueTx(tx) + require.NoError(err) + require.Equal(tx.ID(), txID) + + buildAndAccept(require, vm, issuer, txID) +} + +// buildAndAccept expects the context lock not to be held +func buildAndAccept( + require *require.Assertions, + vm *VM, + issuer <-chan common.Message, + txID ids.ID, +) { + require.Equal(common.PendingTxs, <-issuer) + + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + blkIntf, err := vm.BuildBlock(context.Background()) + require.NoError(err) + require.IsType(&executor.Block{}, blkIntf) + + blk := blkIntf.(*executor.Block) + txs := blk.Txs() + require.Len(txs, 1) + + issuedTx := txs[0] + require.Equal(txID, issuedTx.ID()) + require.NoError(blk.Verify(context.Background())) + require.NoError(vm.SetPreference(context.Background(), blk.ID())) + require.NoError(blk.Accept(context.Background())) +} diff --git a/avalanchego/vms/avm/factory.go b/avalanchego/vms/avm/factory.go index 1e2c6f68..ee71cac0 100644 --- a/avalanchego/vms/avm/factory.go +++ b/avalanchego/vms/avm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/avalanchego/vms/avm/fx_test.go b/avalanchego/vms/avm/fx_test.go index ebb525a2..7cea92cf 100644 --- a/avalanchego/vms/avm/fx_test.go +++ b/avalanchego/vms/avm/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -6,6 +6,8 @@ package avm import ( "errors" "testing" + + "github.com/stretchr/testify/require" ) var ( @@ -48,7 +50,7 @@ func (fx *FxTest) Initialize(vm interface{}) error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledInitialize) + require.FailNow(fx.T, errCalledInitialize.Error()) } return errCalledInitialize } @@ -61,7 +63,7 @@ func (fx *FxTest) Bootstrapping() error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledBootstrapping) + require.FailNow(fx.T, errCalledBootstrapping.Error()) } return errCalledBootstrapping } @@ -74,7 +76,7 @@ func (fx *FxTest) Bootstrapped() error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledBootstrapped) + require.FailNow(fx.T, errCalledBootstrapped.Error()) } return errCalledBootstrapped } @@ -87,7 +89,7 @@ func (fx *FxTest) VerifyTransfer(tx, in, cred, utxo interface{}) error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledVerifyTransfer) + require.FailNow(fx.T, errCalledVerifyTransfer.Error()) } return errCalledVerifyTransfer } @@ -100,7 +102,7 @@ func (fx *FxTest) VerifyOperation(tx, op, cred interface{}, utxos []interface{}) return nil } if fx.T != nil { - fx.T.Fatal(errCalledVerifyOperation) + require.FailNow(fx.T, errCalledVerifyOperation.Error()) } return errCalledVerifyOperation } diff --git a/avalanchego/vms/avm/fxs/fx.go b/avalanchego/vms/avm/fxs/fx.go index e16ad4a8..2749ee45 100644 --- a/avalanchego/vms/avm/fxs/fx.go +++ b/avalanchego/vms/avm/fxs/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fxs @@ -14,9 +14,10 @@ import ( ) var ( - _ Fx = (*secp256k1fx.Fx)(nil) - _ Fx = (*nftfx.Fx)(nil) - _ Fx = (*propertyfx.Fx)(nil) + _ Fx = (*secp256k1fx.Fx)(nil) + _ Fx = (*nftfx.Fx)(nil) + _ Fx = (*propertyfx.Fx)(nil) + _ verify.Verifiable = (*FxCredential)(nil) ) type ParsedFx struct { @@ -58,6 +59,10 @@ type FxOperation interface { } type FxCredential struct { - FxID ids.ID `serialize:"false" json:"fxID"` - verify.Verifiable `serialize:"true" json:"credential"` + FxID ids.ID `serialize:"false" json:"fxID"` + Credential verify.Verifiable `serialize:"true" json:"credential"` +} + +func (f *FxCredential) Verify() error { + return f.Credential.Verify() } diff --git a/avalanchego/vms/avm/genesis.go b/avalanchego/vms/avm/genesis.go index 506d2465..a3ca5ca2 100644 --- a/avalanchego/vms/avm/genesis.go +++ b/avalanchego/vms/avm/genesis.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "cmp" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -19,6 +21,6 @@ type GenesisAsset struct { txs.CreateAssetTx `serialize:"true"` } -func (g *GenesisAsset) Less(other *GenesisAsset) bool { - return g.Alias < other.Alias +func (g *GenesisAsset) Compare(other *GenesisAsset) int { + return cmp.Compare(g.Alias, other.Alias) } diff --git a/avalanchego/vms/avm/genesis_test.go b/avalanchego/vms/avm/genesis_test.go index 10c7aac4..2e5da96f 100644 --- a/avalanchego/vms/avm/genesis_test.go +++ b/avalanchego/vms/avm/genesis_test.go @@ -1,27 +1,42 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestGenesisAssetLess(t *testing.T) { - require := require.New(t) - - var g1, g2 GenesisAsset - require.False(g1.Less(&g2)) - require.False(g2.Less(&g1)) - - g1 = GenesisAsset{ - Alias: "a", +func TestGenesisAssetCompare(t *testing.T) { + tests := []struct { + a *GenesisAsset + b *GenesisAsset + expected int + }{ + { + a: &GenesisAsset{}, + b: &GenesisAsset{}, + expected: 0, + }, + { + a: &GenesisAsset{ + Alias: "a", + }, + b: &GenesisAsset{ + Alias: "aa", + }, + expected: -1, + }, } - g2 = GenesisAsset{ - Alias: "aa", + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.Alias, test.b.Alias, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.True(g1.Less(&g2)) - require.False(g2.Less(&g1)) } diff --git a/avalanchego/vms/avm/health.go b/avalanchego/vms/avm/health.go index 725418b1..6cb2e14b 100644 --- a/avalanchego/vms/avm/health.go +++ b/avalanchego/vms/avm/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/avalanchego/vms/avm/index_test.go b/avalanchego/vms/avm/index_test.go index 8a2b681c..d5978e0e 100644 --- a/avalanchego/vms/avm/index_test.go +++ b/avalanchego/vms/avm/index_test.go @@ -1,385 +1,173 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( "context" - "encoding/binary" - "encoding/json" - "fmt" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var indexEnabledAvmConfig = Config{ - IndexTransactions: true, -} - func TestIndexTransaction_Ordered(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) + require := require.New(t) + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() key := keys[0] addr := key.PublicKey().Address() + txAssetID := avax.Asset{ID: env.genesisTx.ID()} - var uniqueTxs []*UniqueTx - txAssetID := avax.Asset{ID: avaxID} - - ctx.Lock.Lock() + var txs []*txs.Tx for i := 0; i < 5; i++ { - // create utxoID and assetIDs + // make utxo utxoID := avax.UTXOID{ TxID: ids.GenerateTestID(), } + utxo := buildUTXO(utxoID, txAssetID, addr) + env.vm.state.AddUTXO(utxo) - // build the transaction - tx := buildTX(utxoID, txAssetID, addr) + // make transaction + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addr) + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + env.vm.ctx.Lock.Unlock() - // Provide the platform UTXO - utxo := buildPlatformUTXO(utxoID, txAssetID, addr) + issueAndAccept(require, env.vm, env.issuer, tx) - // save utxo to state - vm.state.AddUTXO(utxo) + env.vm.ctx.Lock.Lock() - // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } - - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - ctx.Lock.Lock() - - // get pending transactions - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - - parsedTx := txs[0] - uniqueParsedTX := parsedTx.(*UniqueTx) - uniqueTxs = append(uniqueTxs, uniqueParsedTX) - - var inputUTXOs []*avax.UTXO - for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } - - inputUTXOs = append(inputUTXOs, utxo) - } - - // index the transaction - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) + txs = append(txs, tx) } - // ensure length is 5 - require.Len(t, uniqueTxs, 5) - // for each *UniqueTx check its indexed at right index - for i, tx := range uniqueTxs { - assertIndexedTX(t, vm.db, uint64(i), addr, txAssetID.ID, tx.ID()) + // for each tx check its indexed at right index + for i, tx := range txs { + assertIndexedTX(t, env.vm.db, uint64(i), addr, txAssetID.ID, tx.ID()) } - - assertLatestIdx(t, vm.db, addr, txAssetID.ID, 5) + assertLatestIdx(t, env.vm.db, addr, txAssetID.ID, 5) } func TestIndexTransaction_MultipleTransactions(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - avaxID := genesisTx.ID() - vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) + require := require.New(t) + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - addressTxMap := map[ids.ShortID]*UniqueTx{} - txAssetID := avax.Asset{ID: avaxID} + addressTxMap := map[ids.ShortID]*txs.Tx{} + txAssetID := avax.Asset{ID: env.genesisTx.ID()} - ctx.Lock.Lock() for _, key := range keys { addr := key.PublicKey().Address() - // create utxoID and assetIDs + + // make utxo utxoID := avax.UTXOID{ TxID: ids.GenerateTestID(), } + utxo := buildUTXO(utxoID, txAssetID, addr) + env.vm.state.AddUTXO(utxo) - // build the transaction - tx := buildTX(utxoID, txAssetID, addr) - - // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + // make transaction + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addr) + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - // Provide the platform UTXO - utxo := buildPlatformUTXO(utxoID, txAssetID, addr) - - // save utxo to state - vm.state.AddUTXO(utxo) + env.vm.ctx.Lock.Unlock() // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } + issueAndAccept(require, env.vm, env.issuer, tx) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - ctx.Lock.Lock() - - // get pending transactions - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - - parsedTx := txs[0] - uniqueParsedTX := parsedTx.(*UniqueTx) - addressTxMap[addr] = uniqueParsedTX - - var inputUTXOs []*avax.UTXO - for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } - - inputUTXOs = append(inputUTXOs, utxo) - } - - // index the transaction - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) + addressTxMap[addr] = tx } // ensure length is same as keys length - require.Len(t, addressTxMap, len(keys)) + require.Len(addressTxMap, len(keys)) // for each *UniqueTx check its indexed at right index for the right address - for key, tx := range addressTxMap { - assertIndexedTX(t, vm.db, uint64(0), key, txAssetID.ID, tx.ID()) - assertLatestIdx(t, vm.db, key, txAssetID.ID, 1) + for addr, tx := range addressTxMap { + assertIndexedTX(t, env.vm.db, 0, addr, txAssetID.ID, tx.ID()) + assertLatestIdx(t, env.vm.db, addr, txAssetID.ID, 1) } } func TestIndexTransaction_MultipleAddresses(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - avaxID := genesisTx.ID() - vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) + require := require.New(t) + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - txAssetID := avax.Asset{ID: avaxID} addrs := make([]ids.ShortID, len(keys)) - for _, key := range keys { - addrs = append(addrs, key.PublicKey().Address()) + for i, key := range keys { + addrs[i] = key.PublicKey().Address() } + utils.Sort(addrs) - ctx.Lock.Lock() + txAssetID := avax.Asset{ID: env.genesisTx.ID()} key := keys[0] addr := key.PublicKey().Address() - // create utxoID and assetIDs + + // make utxo utxoID := avax.UTXOID{ TxID: ids.GenerateTestID(), } + utxo := buildUTXO(utxoID, txAssetID, addr) + env.vm.state.AddUTXO(utxo) - // build the transaction - tx := buildTX(utxoID, txAssetID, addrs...) - - // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } - - // Provide the platform UTXO - utxo := buildPlatformUTXO(utxoID, txAssetID, addr) - - // save utxo to state - vm.state.AddUTXO(utxo) - - var inputUTXOs []*avax.UTXO //nolint:prealloc - for _, utxoID := range tx.Unsigned.InputUTXOs() { - utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } - - inputUTXOs = append(inputUTXOs, utxo) - } - - // index the transaction - err := vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, tx.UTXOs()) - require.NoError(t, err) - require.NoError(t, err) - - assertIndexedTX(t, vm.db, uint64(0), addr, txAssetID.ID, tx.ID()) - assertLatestIdx(t, vm.db, addr, txAssetID.ID, 1) -} - -func TestIndexTransaction_UnorderedWrites(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - addressTxMap := map[ids.ShortID]*UniqueTx{} - txAssetID := avax.Asset{ID: avaxID} + // make transaction + tx := buildTX(env.vm.ctx.XChainID, utxoID, txAssetID, addrs...) + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - ctx.Lock.Lock() - for _, key := range keys { - addr := key.PublicKey().Address() - // create utxoID and assetIDs - utxoID := avax.UTXOID{ - TxID: ids.GenerateTestID(), - } - - // build the transaction - tx := buildTX(utxoID, txAssetID, addr) - - // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + env.vm.ctx.Lock.Unlock() - // Provide the platform UTXO - utxo := buildPlatformUTXO(utxoID, txAssetID, addr) - - // save utxo to state - vm.state.AddUTXO(utxo) - - // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } - - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + issueAndAccept(require, env.vm, env.issuer, tx) - ctx.Lock.Lock() + env.vm.ctx.Lock.Lock() - // get pending transactions - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - - parsedTx := txs[0] - uniqueParsedTX := parsedTx.(*UniqueTx) - addressTxMap[addr] = uniqueParsedTX - - var inputUTXOs []*avax.UTXO - for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } - - inputUTXOs = append(inputUTXOs, utxo) - } - - // index the transaction, NOT calling Accept(ids.ID) method - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) - } - - // ensure length is same as keys length - require.Len(t, addressTxMap, len(keys)) - - // for each *UniqueTx check its indexed at right index for the right address - for key, tx := range addressTxMap { - assertIndexedTX(t, vm.db, uint64(0), key, txAssetID.ID, tx.ID()) - assertLatestIdx(t, vm.db, key, txAssetID.ID, 1) - } + assertIndexedTX(t, env.vm.db, 0, addr, txAssetID.ID, tx.ID()) + assertLatestIdx(t, env.vm.db, addr, txAssetID.ID, 1) } func TestIndexer_Read(t *testing.T) { - // setup vm, db etc - _, vm, _, _, _ := setup(t, true) + require := require.New(t) + env := setup(t, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() // generate test address and asset IDs @@ -387,85 +175,85 @@ func TestIndexer_Read(t *testing.T) { addr := ids.GenerateTestShortID() // setup some fake txs under the above generated address and asset IDs - testTxCount := 25 - testTxs := setupTestTxsInDB(t, vm.db, addr, assetID, testTxCount) - require.Len(t, testTxs, 25) + testTxs := initTestTxIndex(t, env.vm.db, addr, assetID, 25) + require.Len(testTxs, 25) // read the pages, 5 items at a time - var cursor uint64 - var pageSize uint64 = 5 + var ( + cursor uint64 + pageSize uint64 = 5 + ) for cursor < 25 { - txIDs, err := vm.addressTxsIndexer.Read(addr[:], assetID, cursor, pageSize) - require.NoError(t, err) - require.Len(t, txIDs, 5) - require.Equal(t, txIDs, testTxs[cursor:cursor+pageSize]) + txIDs, err := env.vm.addressTxsIndexer.Read(addr[:], assetID, cursor, pageSize) + require.NoError(err) + require.Len(txIDs, 5) + require.Equal(txIDs, testTxs[cursor:cursor+pageSize]) cursor += pageSize } } func TestIndexingNewInitWithIndexingEnabled(t *testing.T) { - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) + require := require.New(t) - db := baseDBManager.NewPrefixDBManager([]byte{1}).Current().Database + db := memdb.New() // start with indexing enabled - _, err := index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), true) - require.NoError(t, err) + _, err := index.NewIndexer(db, logging.NoWarn{}, "", prometheus.NewRegistry(), true) + require.NoError(err) // now disable indexing with allow-incomplete set to false _, err = index.NewNoIndexer(db, false) - require.Error(t, err) + require.ErrorIs(err, index.ErrCausesIncompleteIndex) // now disable indexing with allow-incomplete set to true _, err = index.NewNoIndexer(db, true) - require.NoError(t, err) + require.NoError(err) } func TestIndexingNewInitWithIndexingDisabled(t *testing.T) { - ctx := NewContext(t) + require := require.New(t) + db := memdb.New() // disable indexing with allow-incomplete set to false _, err := index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) // It's not OK to have an incomplete index when allowIncompleteIndices is false - _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), false) - require.Error(t, err) + _, err = index.NewIndexer(db, logging.NoWarn{}, "", prometheus.NewRegistry(), false) + require.ErrorIs(err, index.ErrIndexingRequiredFromGenesis) // It's OK to have an incomplete index when allowIncompleteIndices is true - _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), true) - require.NoError(t, err) + _, err = index.NewIndexer(db, logging.NoWarn{}, "", prometheus.NewRegistry(), true) + require.NoError(err) // It's OK to have an incomplete index when indexing currently disabled _, err = index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) // It's OK to have an incomplete index when allowIncompleteIndices is true _, err = index.NewNoIndexer(db, true) - require.NoError(t, err) + require.NoError(err) } func TestIndexingAllowIncomplete(t *testing.T) { - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - ctx := NewContext(t) + require := require.New(t) - prefixDB := baseDBManager.NewPrefixDBManager([]byte{1}).Current().Database - db := versiondb.New(prefixDB) + baseDB := memdb.New() + db := versiondb.New(baseDB) // disabled indexer will persist idxEnabled as false _, err := index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) - // we initialise with indexing enabled now and allow incomplete indexing as false - _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), false) + // we initialize with indexing enabled now and allow incomplete indexing as false + _, err = index.NewIndexer(db, logging.NoWarn{}, "", prometheus.NewRegistry(), false) // we should get error because: // - indexing was disabled previously // - node now is asked to enable indexing with allow incomplete set to false - require.Error(t, err) + require.ErrorIs(err, index.ErrIndexingRequiredFromGenesis) } -func buildPlatformUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax.UTXO { +func buildUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax.UTXO { return &avax.UTXO{ UTXOID: utxoID, Asset: txAssetID, @@ -479,11 +267,7 @@ func buildPlatformUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortI } } -func signTX(codec codec.Manager, tx *txs.Tx, key *secp256k1.PrivateKey) error { - return tx.SignSECP256K1Fx(codec, [][]*secp256k1.PrivateKey{{key}}) -} - -func buildTX(utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) *txs.Tx { +func buildTX(chainID ids.ID, utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -510,79 +294,28 @@ func buildTX(utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) * }} } -func setupTestVM(t *testing.T, ctx *snow.Context, baseDBManager manager.Manager, genesisBytes []byte, issuer chan common.Message, config Config) *VM { - vm := &VM{} - avmConfigBytes, err := json.Marshal(config) - require.NoError(t, err) - appSender := &common.SenderTest{T: t} - - err = vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - avmConfigBytes, - issuer, - []*common.Fx{{ - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }}, - appSender, - ) - if err != nil { - t.Fatal(err) - } - - vm.batchTimeout = 0 - - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - return vm -} - func assertLatestIdx(t *testing.T, db database.Database, sourceAddress ids.ShortID, assetID ids.ID, expectedIdx uint64) { + require := require.New(t) + addressDB := prefixdb.New(sourceAddress[:], db) assetDB := prefixdb.New(assetID[:], addressDB) - expectedIdxBytes := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(expectedIdxBytes, expectedIdx) - + expectedIdxBytes := database.PackUInt64(expectedIdx) idxBytes, err := assetDB.Get([]byte("idx")) - require.NoError(t, err) - - require.EqualValues(t, expectedIdxBytes, idxBytes) + require.NoError(err) + require.Equal(expectedIdxBytes, idxBytes) } -func checkIndexedTX(db database.Database, index uint64, sourceAddress ids.ShortID, assetID ids.ID, transactionID ids.ID) error { +func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAddress ids.ShortID, assetID ids.ID, transactionID ids.ID) { + require := require.New(t) + addressDB := prefixdb.New(sourceAddress[:], db) assetDB := prefixdb.New(assetID[:], addressDB) - idxBytes := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(idxBytes, index) - tx1Bytes, err := assetDB.Get(idxBytes) - if err != nil { - return err - } - - var txID ids.ID - copy(txID[:], tx1Bytes) - - if txID != transactionID { - return fmt.Errorf("txID %s not same as %s", txID, transactionID) - } - return nil -} - -func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAddress ids.ShortID, assetID ids.ID, transactionID ids.ID) { - if err := checkIndexedTX(db, index, sourceAddress, assetID, transactionID); err != nil { - t.Fatal(err) - } + idxBytes := database.PackUInt64(index) + txID, err := database.GetID(assetDB, idxBytes) + require.NoError(err) + require.Equal(transactionID, txID) } // Sets up test tx IDs in DB in the following structure for the indexer to pick @@ -593,30 +326,27 @@ func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAdd // - "idx": 2 // - 0: txID1 // - 1: txID1 -func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, assetID ids.ID, txCount int) []ids.ID { - var testTxs []ids.ID +func initTestTxIndex(t *testing.T, db *versiondb.Database, address ids.ShortID, assetID ids.ID, txCount int) []ids.ID { + require := require.New(t) + + testTxs := make([]ids.ID, txCount) for i := 0; i < txCount; i++ { - testTxs = append(testTxs, ids.GenerateTestID()) + testTxs[i] = ids.GenerateTestID() } addressPrefixDB := prefixdb.New(address[:], db) assetPrefixDB := prefixdb.New(assetID[:], addressPrefixDB) - var idx uint64 - idxBytes := make([]byte, 8) - binary.BigEndian.PutUint64(idxBytes, idx) - for _, txID := range testTxs { + + for i, txID := range testTxs { + idxBytes := database.PackUInt64(uint64(i)) txID := txID - err := assetPrefixDB.Put(idxBytes, txID[:]) - require.NoError(t, err) - idx++ - binary.BigEndian.PutUint64(idxBytes, idx) + require.NoError(assetPrefixDB.Put(idxBytes, txID[:])) } _, err := db.CommitBatch() - require.NoError(t, err) + require.NoError(err) - err = assetPrefixDB.Put([]byte("idx"), idxBytes) - require.NoError(t, err) - err = db.Commit() - require.NoError(t, err) + idxBytes := database.PackUInt64(uint64(len(testTxs))) + require.NoError(assetPrefixDB.Put([]byte("idx"), idxBytes)) + require.NoError(db.Commit()) return testTxs } diff --git a/avalanchego/vms/avm/metrics/metrics.go b/avalanchego/vms/avm/metrics/metrics.go index 233dd65a..9e4053e1 100644 --- a/avalanchego/vms/avm/metrics/metrics.go +++ b/avalanchego/vms/avm/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -23,7 +23,7 @@ type Metrics interface { // MarkBlockAccepted updates all metrics relating to the acceptance of a // block, including the underlying acceptance of the contained transactions. - MarkBlockAccepted(b blocks.Block) error + MarkBlockAccepted(b block.Block) error // MarkTxAccepted updates all metrics relating to the acceptance of a // transaction. // @@ -53,7 +53,7 @@ func (m *metrics) IncTxRefreshMisses() { m.numTxRefreshMisses.Inc() } -func (m *metrics) MarkBlockAccepted(b blocks.Block) error { +func (m *metrics) MarkBlockAccepted(b block.Block) error { for _, tx := range b.Txs() { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { return err diff --git a/avalanchego/vms/avm/metrics/mock_metrics.go b/avalanchego/vms/avm/metrics/mock_metrics.go index d002c1dc..2ae4a078 100644 --- a/avalanchego/vms/avm/metrics/mock_metrics.go +++ b/avalanchego/vms/avm/metrics/mock_metrics.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/avm/metrics (interfaces: Metrics) +// +// Generated by this command: +// +// mockgen -package=metrics -destination=vms/avm/metrics/mock_metrics.go github.com/ava-labs/avalanchego/vms/avm/metrics Metrics +// // Package metrics is a generated GoMock package. package metrics @@ -11,10 +13,10 @@ import ( http "net/http" reflect "reflect" - blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" + block "github.com/ava-labs/avalanchego/vms/avm/block" txs "github.com/ava-labs/avalanchego/vms/avm/txs" - gomock "github.com/golang/mock/gomock" rpc "github.com/gorilla/rpc/v2" + gomock "go.uber.org/mock/gomock" ) // MockMetrics is a mock of Metrics interface. @@ -47,7 +49,7 @@ func (m *MockMetrics) AfterRequest(arg0 *rpc.RequestInfo) { } // AfterRequest indicates an expected call of AfterRequest. -func (mr *MockMetricsMockRecorder) AfterRequest(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) AfterRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterRequest", reflect.TypeOf((*MockMetrics)(nil).AfterRequest), arg0) } @@ -97,13 +99,13 @@ func (m *MockMetrics) InterceptRequest(arg0 *rpc.RequestInfo) *http.Request { } // InterceptRequest indicates an expected call of InterceptRequest. -func (mr *MockMetricsMockRecorder) InterceptRequest(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) InterceptRequest(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptRequest", reflect.TypeOf((*MockMetrics)(nil).InterceptRequest), arg0) } // MarkBlockAccepted mocks base method. -func (m *MockMetrics) MarkBlockAccepted(arg0 blocks.Block) error { +func (m *MockMetrics) MarkBlockAccepted(arg0 block.Block) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarkBlockAccepted", arg0) ret0, _ := ret[0].(error) @@ -111,7 +113,7 @@ func (m *MockMetrics) MarkBlockAccepted(arg0 blocks.Block) error { } // MarkBlockAccepted indicates an expected call of MarkBlockAccepted. -func (mr *MockMetricsMockRecorder) MarkBlockAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) MarkBlockAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkBlockAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkBlockAccepted), arg0) } @@ -125,7 +127,7 @@ func (m *MockMetrics) MarkTxAccepted(arg0 *txs.Tx) error { } // MarkTxAccepted indicates an expected call of MarkTxAccepted. -func (mr *MockMetricsMockRecorder) MarkTxAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockMetricsMockRecorder) MarkTxAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkTxAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkTxAccepted), arg0) } diff --git a/avalanchego/vms/avm/metrics/tx_metrics.go b/avalanchego/vms/avm/metrics/tx_metrics.go index 217eeb18..0e5cd184 100644 --- a/avalanchego/vms/avm/metrics/tx_metrics.go +++ b/avalanchego/vms/avm/metrics/tx_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -45,7 +45,7 @@ func newTxMetric( ) prometheus.Counter { txMetric := prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_txs_accepted", txName), + Name: txName + "_txs_accepted", Help: fmt.Sprintf("Number of %s transactions accepted", txName), }) errs.Add(registerer.Register(txMetric)) diff --git a/avalanchego/vms/avm/network/atomic.go b/avalanchego/vms/avm/network/atomic.go index c6b011da..0774ed36 100644 --- a/avalanchego/vms/avm/network/atomic.go +++ b/avalanchego/vms/avm/network/atomic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -51,12 +51,14 @@ func (a *atomic) CrossChainAppRequestFailed( ctx context.Context, chainID ids.ID, requestID uint32, + appErr *common.AppError, ) error { h := a.handler.Get() return h.CrossChainAppRequestFailed( ctx, chainID, requestID, + appErr, ) } @@ -96,12 +98,14 @@ func (a *atomic) AppRequestFailed( ctx context.Context, nodeID ids.NodeID, requestID uint32, + appErr *common.AppError, ) error { h := a.handler.Get() return h.AppRequestFailed( ctx, nodeID, requestID, + appErr, ) } diff --git a/avalanchego/vms/avm/network/config.go b/avalanchego/vms/avm/network/config.go new file mode 100644 index 00000000..8536504d --- /dev/null +++ b/avalanchego/vms/avm/network/config.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + +var DefaultConfig = Config{ + MaxValidatorSetStaleness: time.Minute, + TargetGossipSize: 20 * units.KiB, + PullGossipPollSize: 1, + PullGossipFrequency: 1500 * time.Millisecond, + PullGossipThrottlingPeriod: 10 * time.Second, + PullGossipThrottlingLimit: 2, + ExpectedBloomFilterElements: 8 * 1024, + ExpectedBloomFilterFalsePositiveProbability: .01, + MaxBloomFilterFalsePositiveProbability: .05, + LegacyPushGossipCacheSize: 512, +} + +type Config struct { + // MaxValidatorSetStaleness limits how old of a validator set the network + // will use for peer sampling and rate limiting. + MaxValidatorSetStaleness time.Duration `json:"max-validator-set-staleness"` + // TargetGossipSize is the number of bytes that will be attempted to be + // sent when pushing transactions and when responded to transaction pull + // requests. + TargetGossipSize int `json:"target-gossip-size"` + // PullGossipPollSize is the number of validators to sample when performing + // a round of pull gossip. + PullGossipPollSize int `json:"pull-gossip-poll-size"` + // PullGossipFrequency is how frequently rounds of pull gossip are + // performed. + PullGossipFrequency time.Duration `json:"pull-gossip-frequency"` + // PullGossipThrottlingPeriod is how large of a window the throttler should + // use. + PullGossipThrottlingPeriod time.Duration `json:"pull-gossip-throttling-period"` + // PullGossipThrottlingLimit is the number of pull querys that are allowed + // by a validator in every throttling window. + PullGossipThrottlingLimit int `json:"pull-gossip-throttling-limit"` + // ExpectedBloomFilterElements is the number of elements to expect when + // creating a new bloom filter. The larger this number is, the larger the + // bloom filter will be. + ExpectedBloomFilterElements int `json:"expected-bloom-filter-elements"` + // ExpectedBloomFilterFalsePositiveProbability is the expected probability + // of a false positive after having inserted ExpectedBloomFilterElements + // into a bloom filter. The smaller this number is, the larger the bloom + // filter will be. + ExpectedBloomFilterFalsePositiveProbability float64 `json:"expected-bloom-filter-false-positive-probability"` + // MaxBloomFilterFalsePositiveProbability is used to determine when the + // bloom filter should be refreshed. Once the expected probability of a + // false positive exceeds this value, the bloom filter will be regenerated. + // The smaller this number is, the more frequently that the bloom filter + // will be regenerated. + MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` + // LegacyPushGossipCacheSize tracks the most recently received transactions + // and ensures to only gossip them once. + // + // Deprecated: The legacy push gossip mechanism is deprecated in favor of + // the p2p SDK's push gossip mechanism. + LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` +} diff --git a/avalanchego/vms/avm/network/gossip.go b/avalanchego/vms/avm/network/gossip.go new file mode 100644 index 00000000..0876f122 --- /dev/null +++ b/avalanchego/vms/avm/network/gossip.go @@ -0,0 +1,161 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + _ gossip.Set[*txs.Tx] = (*gossipMempool)(nil) + _ gossip.Marshaller[*txs.Tx] = (*txParser)(nil) +) + +// bloomChurnMultiplier is the number used to multiply the size of the mempool +// to determine how large of a bloom filter to create. +const bloomChurnMultiplier = 3 + +// txGossipHandler is the handler called when serving gossip messages +type txGossipHandler struct { + p2p.NoOpHandler + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip( + ctx context.Context, + nodeID ids.NodeID, + gossipBytes []byte, +) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t txGossipHandler) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, +) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +type txParser struct { + parser txs.Parser +} + +func (*txParser) MarshalGossip(tx *txs.Tx) ([]byte, error) { + return tx.Bytes(), nil +} + +func (g *txParser) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { + return g.parser.ParseTx(bytes) +} + +func newGossipMempool( + mempool mempool.Mempool, + registerer prometheus.Registerer, + log logging.Logger, + txVerifier TxVerifier, + parser txs.Parser, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) (*gossipMempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + return &gossipMempool{ + Mempool: mempool, + log: log, + txVerifier: txVerifier, + parser: parser, + bloom: bloom, + }, err +} + +type gossipMempool struct { + mempool.Mempool + log logging.Logger + txVerifier TxVerifier + parser txs.Parser + + lock sync.RWMutex + bloom *gossip.BloomFilter +} + +// Add is called by the p2p SDK when handling transactions that were pushed to +// us and when handling transactions that were pulled from a peer. If this +// returns a nil error while handling push gossip, the p2p SDK will queue the +// transaction to push gossip as well. +func (g *gossipMempool) Add(tx *txs.Tx) error { + txID := tx.ID() + if _, ok := g.Mempool.Get(txID); ok { + return fmt.Errorf("attempted to issue %w: %s ", mempool.ErrDuplicateTx, txID) + } + + if reason := g.Mempool.GetDropReason(txID); reason != nil { + // If the tx is being dropped - just ignore it + // + // TODO: Should we allow re-verification of the transaction even if it + // failed previously? + return reason + } + + // Verify the tx at the currently preferred state + if err := g.txVerifier.VerifyTx(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + return g.AddVerified(tx) +} + +func (g *gossipMempool) AddVerified(tx *txs.Tx) error { + if err := g.Mempool.Add(tx); err != nil { + g.Mempool.MarkDropped(tx.ID(), err) + return err + } + + g.lock.Lock() + defer g.lock.Unlock() + + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, g.Mempool.Len()*bloomChurnMultiplier) + if err != nil { + return err + } + + if reset { + g.log.Debug("resetting bloom filter") + g.Mempool.Iterate(func(tx *txs.Tx) bool { + g.bloom.Add(tx) + return true + }) + } + + g.Mempool.RequestBuildBlock() + return nil +} + +func (g *gossipMempool) Iterate(f func(*txs.Tx) bool) { + g.Mempool.Iterate(f) +} + +func (g *gossipMempool) GetFilter() (bloom []byte, salt []byte) { + g.lock.RLock() + defer g.lock.RUnlock() + + return g.bloom.Marshal() +} diff --git a/avalanchego/vms/avm/network/gossip_test.go b/avalanchego/vms/avm/network/gossip_test.go new file mode 100644 index 00000000..8681af3b --- /dev/null +++ b/avalanchego/vms/avm/network/gossip_test.go @@ -0,0 +1,134 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var _ TxVerifier = (*testVerifier)(nil) + +type testVerifier struct { + err error +} + +func (v testVerifier) VerifyTx(*txs.Tx) error { + return v.err +} + +func TestMarshaller(t *testing.T) { + require := require.New(t) + + parser, err := txs.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + }, + ) + require.NoError(err) + + marhsaller := txParser{ + parser: parser, + } + + want := &txs.Tx{Unsigned: &txs.BaseTx{}} + require.NoError(want.Initialize(parser.Codec())) + + bytes, err := marhsaller.MarshalGossip(want) + require.NoError(err) + + got, err := marhsaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestGossipMempoolAdd(t *testing.T) { + require := require.New(t) + + metrics := prometheus.NewRegistry() + toEngine := make(chan common.Message, 1) + + baseMempool, err := mempool.New("", metrics, toEngine) + require.NoError(err) + + parser, err := txs.NewParser(time.Time{}, time.Time{}, nil) + require.NoError(err) + + mempool, err := newGossipMempool( + baseMempool, + metrics, + logging.NoLog{}, + testVerifier{}, + parser, + DefaultConfig.ExpectedBloomFilterElements, + DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + DefaultConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{}, + }, + }, + TxID: ids.GenerateTestID(), + } + + require.NoError(mempool.Add(tx)) + require.True(mempool.bloom.Has(tx)) +} + +func TestGossipMempoolAddVerified(t *testing.T) { + require := require.New(t) + + metrics := prometheus.NewRegistry() + toEngine := make(chan common.Message, 1) + + baseMempool, err := mempool.New("", metrics, toEngine) + require.NoError(err) + + parser, err := txs.NewParser(time.Time{}, time.Time{}, nil) + require.NoError(err) + + mempool, err := newGossipMempool( + baseMempool, + metrics, + logging.NoLog{}, + testVerifier{ + err: errTest, // We shouldn't be attempting to verify the tx in this flow + }, + parser, + DefaultConfig.ExpectedBloomFilterElements, + DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, + DefaultConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + tx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{}, + }, + }, + TxID: ids.GenerateTestID(), + } + + require.NoError(mempool.AddVerified(tx)) + require.True(mempool.bloom.Has(tx)) +} diff --git a/avalanchego/vms/avm/network/network.go b/avalanchego/vms/avm/network/network.go index 6e83d97e..9cad3cb9 100644 --- a/avalanchego/vms/avm/network/network.go +++ b/avalanchego/vms/avm/network/network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -6,43 +6,40 @@ package network import ( "context" "sync" + "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/message" ) -// We allow [recentTxsCacheSize] to be fairly large because we only store hashes -// in the cache, not entire transactions. -const recentTxsCacheSize = 512 +const txGossipHandlerID = 0 -var _ Network = (*network)(nil) - -type Network interface { - common.AppHandler +var ( + _ common.AppHandler = (*Network)(nil) + _ validators.Connector = (*Network)(nil) +) - // IssueTx verifies the transaction at the currently preferred state, adds - // it to the mempool, and gossips it to the network. - // - // Invariant: Assumes the context lock is held. - IssueTx(context.Context, *txs.Tx) error -} +type Network struct { + *p2p.Network -type network struct { - // We embed a noop handler for all unhandled messages - common.AppHandler + txPushGossiper gossip.Accumulator[*txs.Tx] + txPullGossiper gossip.Gossiper + txPullGossipFrequency time.Duration ctx *snow.Context parser txs.Parser - manager executor.Manager - mempool mempool.Mempool + mempool *gossipMempool appSender common.AppSender // gossip related attributes @@ -53,26 +50,128 @@ type network struct { func New( ctx *snow.Context, parser txs.Parser, - manager executor.Manager, + txVerifier TxVerifier, mempool mempool.Mempool, appSender common.AppSender, -) Network { - return &network{ - AppHandler: common.NewNoOpAppHandler(ctx.Log), + registerer prometheus.Registerer, + config Config, +) (*Network, error) { + p2pNetwork, err := p2p.NewNetwork(ctx.Log, appSender, registerer, "p2p") + if err != nil { + return nil, err + } + + marshaller := &txParser{ + parser: parser, + } + validators := p2p.NewValidators( + p2pNetwork.Peers, + ctx.Log, + ctx.SubnetID, + ctx.ValidatorState, + config.MaxValidatorSetStaleness, + ) + txGossipClient := p2pNetwork.NewClient( + txGossipHandlerID, + p2p.WithValidatorSampling(validators), + ) + txGossipMetrics, err := gossip.NewMetrics(registerer, "tx") + if err != nil { + return nil, err + } + + txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + txGossipClient, + txGossipMetrics, + config.TargetGossipSize, + ) + + gossipMempool, err := newGossipMempool( + mempool, + registerer, + ctx.Log, + txVerifier, + parser, + config.ExpectedBloomFilterElements, + config.ExpectedBloomFilterFalsePositiveProbability, + config.MaxBloomFilterFalsePositiveProbability, + ) + if err != nil { + return nil, err + } - ctx: ctx, - parser: parser, - manager: manager, - mempool: mempool, - appSender: appSender, + var txPullGossiper gossip.Gossiper + txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( + ctx.Log, + marshaller, + gossipMempool, + txGossipClient, + txGossipMetrics, + config.PullGossipPollSize, + ) + + // Gossip requests are only served if a node is a validator + txPullGossiper = gossip.ValidatorGossiper{ + Gossiper: txPullGossiper, + NodeID: ctx.NodeID, + Validators: validators, + } + + handler := gossip.NewHandler[*txs.Tx]( + ctx.Log, + marshaller, + txPushGossiper, + gossipMempool, + txGossipMetrics, + config.TargetGossipSize, + ) + + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler( + config.PullGossipThrottlingPeriod, + config.PullGossipThrottlingLimit, + ), + ctx.Log, + ), + validators, + ctx.Log, + ) + + // We allow pushing txs between all peers, but only serve gossip requests + // from validators + txGossipHandler := txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } + + if err := p2pNetwork.AddHandler(txGossipHandlerID, txGossipHandler); err != nil { + return nil, err + } + + return &Network{ + Network: p2pNetwork, + txPushGossiper: txPushGossiper, + txPullGossiper: txPullGossiper, + txPullGossipFrequency: config.PullGossipFrequency, + ctx: ctx, + parser: parser, + mempool: gossipMempool, + appSender: appSender, recentTxs: &cache.LRU[ids.ID, struct{}]{ - Size: recentTxsCacheSize, + Size: config.LegacyPushGossipCacheSize, }, - } + }, nil } -func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { +func (n *Network) Gossip(ctx context.Context) { + gossip.Every(ctx, n.ctx.Log, n.txPullGossiper, n.txPullGossipFrequency) +} + +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { n.ctx.Log.Debug("called AppGossip message handler", zap.Stringer("nodeID", nodeID), zap.Int("messageLen", len(msgBytes)), @@ -80,10 +179,11 @@ func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []b msgIntf, err := message.Parse(msgBytes) if err != nil { - n.ctx.Log.Debug("dropping AppGossip message", + n.ctx.Log.Debug("forwarding AppGossip message to SDK network", zap.String("reason", "failed to parse message"), ) - return nil + + return n.Network.AppGossip(ctx, nodeID, msgBytes) } msg, ok := msgIntf.(*message.Tx) @@ -104,81 +204,75 @@ func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []b return nil } - // We need to grab the context lock here to avoid racy behavior with - // transaction verification + mempool modifications. - n.ctx.Lock.Lock() - err = n.issueTx(tx) - n.ctx.Lock.Unlock() - if err == nil { + if err := n.mempool.Add(tx); err == nil { txID := tx.ID() - n.gossipTx(ctx, txID, msgBytes) + n.txPushGossiper.Add(tx) + if err := n.txPushGossiper.Gossip(ctx); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + } + n.gossipTxMessage(ctx, txID, msgBytes) } return nil } -func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { - if err := n.issueTx(tx); err != nil { +// IssueTx attempts to add a tx to the mempool, after verifying it. If the tx is +// added to the mempool, it will attempt to push gossip the tx to random peers +// in the network using both the legacy and p2p SDK. +// +// If the tx is already in the mempool, mempool.ErrDuplicateTx will be +// returned. +// If the tx is not added to the mempool, an error will be returned. +func (n *Network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.mempool.Add(tx); err != nil { return err } + return n.gossipTx(ctx, tx) +} - txBytes := tx.Bytes() - msg := &message.Tx{ - Tx: txBytes, - } - msgBytes, err := message.Build(msg) - if err != nil { +// IssueVerifiedTx attempts to add a tx to the mempool, without first verifying +// it. If the tx is added to the mempool, it will attempt to push gossip the tx +// to random peers in the network using both the legacy and p2p SDK. +// +// If the tx is already in the mempool, mempool.ErrDuplicateTx will be +// returned. +// If the tx is not added to the mempool, an error will be returned. +func (n *Network) IssueVerifiedTx(ctx context.Context, tx *txs.Tx) error { + if err := n.mempool.AddVerified(tx); err != nil { return err } - - txID := tx.ID() - n.gossipTx(ctx, txID, msgBytes) - return nil + return n.gossipTx(ctx, tx) } -// returns nil if the tx is in the mempool -func (n *network) issueTx(tx *txs.Tx) error { - txID := tx.ID() - if n.mempool.Has(txID) { - // The tx is already in the mempool - return nil - } - - if reason := n.mempool.GetDropReason(txID); reason != nil { - // If the tx is being dropped - just ignore it - // - // TODO: Should we allow re-verification of the transaction even if it - // failed previously? - return reason - } - - // Verify the tx at the currently preferred state - if err := n.manager.VerifyTx(tx); err != nil { - n.ctx.Log.Debug("tx failed verification", - zap.Stringer("txID", txID), +// gossipTx pushes the tx to peers using both the legacy and p2p SDK. +func (n *Network) gossipTx(ctx context.Context, tx *txs.Tx) error { + n.txPushGossiper.Add(tx) + if err := n.txPushGossiper.Gossip(ctx); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", tx.ID()), zap.Error(err), ) - - n.mempool.MarkDropped(txID, err) - return err } - if err := n.mempool.Add(tx); err != nil { - n.ctx.Log.Debug("tx failed to be added to the mempool", - zap.Stringer("txID", txID), - zap.Error(err), - ) - - n.mempool.MarkDropped(txID, err) + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { return err } - n.mempool.RequestBuildBlock() + txID := tx.ID() + n.gossipTxMessage(ctx, txID, msgBytes) return nil } -func (n *network) gossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { - // This lock is just to ensure there isn't racy behavior between checking if - // the tx was gossiped and marking the tx as gossiped. +// gossipTxMessage pushes the tx message to peers using the legacy format. +// If the tx was recently gossiped, this function does nothing. +func (n *Network) gossipTxMessage(ctx context.Context, txID ids.ID, msgBytes []byte) { n.recentTxsLock.Lock() _, has := n.recentTxs.Get(txID) n.recentTxs.Put(txID, struct{}{}) diff --git a/avalanchego/vms/avm/network/network_test.go b/avalanchego/vms/avm/network/network_test.go index 873c3d2e..acef8737 100644 --- a/avalanchego/vms/avm/network/network_test.go +++ b/avalanchego/vms/avm/network/network_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -9,15 +9,15 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" @@ -28,7 +28,22 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var errTest = errors.New("test error") +var ( + testConfig = Config{ + MaxValidatorSetStaleness: time.Second, + TargetGossipSize: 1, + PullGossipPollSize: 1, + PullGossipFrequency: time.Second, + PullGossipThrottlingPeriod: time.Second, + PullGossipThrottlingLimit: 1, + ExpectedBloomFilterElements: 10, + ExpectedBloomFilterFalsePositiveProbability: .1, + MaxBloomFilterFalsePositiveProbability: .5, + LegacyPushGossipCacheSize: 512, + } + + errTest = errors.New("test error") +) func TestNetworkAppGossip(t *testing.T) { testTx := &txs.Tx{ @@ -43,40 +58,31 @@ func TestNetworkAppGossip(t *testing.T) { } parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, ) - require.NoError(t, err) - require.NoError(t, parser.InitializeTx(testTx)) + require.NoError(t, testTx.Initialize(parser.Codec())) type test struct { - name string - msgBytesFunc func() []byte - mempoolFunc func(*gomock.Controller) mempool.Mempool - appSenderFunc func(*gomock.Controller) common.AppSender + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifierFunc func(*gomock.Controller) TxVerifier + appSenderFunc func(*gomock.Controller) common.AppSender } tests := []test{ { - // Shouldn't attempt to issue or gossip the tx name: "invalid message bytes", msgBytesFunc: func() []byte { return []byte{0x00} }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - // Unused in this test - return nil - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return nil - }, }, { - // Shouldn't attempt to issue or gossip the tx name: "invalid tx bytes", msgBytesFunc: func() []byte { msg := message.Tx{ @@ -86,18 +92,42 @@ func TestNetworkAppGossip(t *testing.T) { require.NoError(t, err) return msgBytes }, + }, + { + name: "tx already in mempool", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - // Unused in this test - return mempool.NewMockMempool(ctrl) + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(testTx, true) + return mempool }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return common.NewMockSender(ctrl) + }, + { + name: "tx previously dropped", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool }, }, { - // Issue returns nil because mempool has tx. We should gossip the tx. - name: "issuance succeeds", + name: "transaction invalid", msgBytesFunc: func() []byte { msg := message.Tx{ Tx: testTx.Bytes(), @@ -108,18 +138,19 @@ func TestNetworkAppGossip(t *testing.T) { }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(true) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) - return appSender + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return txVerifier }, }, { - // Issue returns error because tx was dropped. We shouldn't gossip the tx. - name: "issuance fails", + name: "happy path", msgBytesFunc: func() []byte { msg := message.Tx{ Tx: testTx.Bytes(), @@ -130,13 +161,22 @@ func TestNetworkAppGossip(t *testing.T) { }, mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock() return mempool }, + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier + }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Unused in this test - return common.NewMockSender(ctrl) + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender }, }, } @@ -145,9 +185,9 @@ func TestNetworkAppGossip(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -157,28 +197,51 @@ func TestNetworkAppGossip(t *testing.T) { ) require.NoError(err) - n := New( + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + txVerifierFunc := func(ctrl *gomock.Controller) TxVerifier { + return executor.NewMockManager(ctrl) + } + if tt.txVerifierFunc != nil { + txVerifierFunc = tt.txVerifierFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, parser, - executor.NewMockManager(ctrl), // Manager is unused in this test - tt.mempoolFunc(ctrl), - tt.appSenderFunc(ctrl), + txVerifierFunc(ctrl), + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, ) - err = n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc()) require.NoError(err) + require.NoError(n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc())) }) } } func TestNetworkIssueTx(t *testing.T) { type test struct { - name string - mempoolFunc func(*gomock.Controller) mempool.Mempool - managerFunc func(*gomock.Controller) executor.Manager - appSenderFunc func(*gomock.Controller) common.AppSender - expectedErr error + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifierFunc func(*gomock.Controller) TxVerifier + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error } tests := []test{ @@ -186,56 +249,34 @@ func TestNetworkIssueTx(t *testing.T) { name: "mempool has transaction", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(true) + mempool.EXPECT().Get(gomock.Any()).Return(nil, true) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - // Unused in this test - return executor.NewMockManager(ctrl) - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Should gossip the tx - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) - return appSender - }, - expectedErr: nil, + expectedErr: mempool.ErrDuplicateTx, }, { name: "transaction marked as dropped in mempool", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - // Unused in this test - return executor.NewMockManager(ctrl) - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) - }, expectedErr: errTest, }, { name: "transaction invalid", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(errTest) - return manager - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return txVerifier }, expectedErr: errTest, }, @@ -243,20 +284,16 @@ func TestNetworkIssueTx(t *testing.T) { name: "can't add transaction to mempool", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(errTest) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) - return manager - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Shouldn't gossip the tx - return common.NewMockSender(ctrl) + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier }, expectedErr: errTest, }, @@ -264,21 +301,21 @@ func TestNetworkIssueTx(t *testing.T) { name: "happy path", mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) mempool.EXPECT().RequestBuildBlock() return mempool }, - managerFunc: func(ctrl *gomock.Controller) executor.Manager { - manager := executor.NewMockManager(ctrl) - manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) - return manager + txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { + txVerifier := executor.NewMockManager(ctrl) + txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return txVerifier }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // Should gossip the tx appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) return appSender }, expectedErr: nil, @@ -289,9 +326,9 @@ func TestNetworkIssueTx(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -301,27 +338,136 @@ func TestNetworkIssueTx(t *testing.T) { ) require.NoError(err) - n := New( + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + txVerifierFunc := func(ctrl *gomock.Controller) TxVerifier { + return executor.NewMockManager(ctrl) + } + if tt.txVerifierFunc != nil { + txVerifierFunc = tt.txVerifierFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, parser, - tt.managerFunc(ctrl), - tt.mempoolFunc(ctrl), - tt.appSenderFunc(ctrl), + txVerifierFunc(ctrl), + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, ) + require.NoError(err) err = n.IssueTx(context.Background(), &txs.Tx{}) require.ErrorIs(err, tt.expectedErr) }) } } +func TestNetworkIssueVerifiedTx(t *testing.T) { + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + expectedErr: errTest, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock() + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + parser, err := txs.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }, + ) + require.NoError(err) + + mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + } + if tt.mempoolFunc != nil { + mempoolFunc = tt.mempoolFunc + } + + appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + } + if tt.appSenderFunc != nil { + appSenderFunc = tt.appSenderFunc + } + + n, err := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + parser, + executor.NewMockManager(ctrl), // Should never verify a tx + mempoolFunc(ctrl), + appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + err = n.IssueVerifiedTx(context.Background(), &txs.Tx{}) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + func TestNetworkGossipTx(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -331,7 +477,7 @@ func TestNetworkGossipTx(t *testing.T) { appSender := common.NewMockSender(ctrl) - nIntf := New( + n, err := New( &snow.Context{ Log: logging.NoLog{}, }, @@ -339,19 +485,20 @@ func TestNetworkGossipTx(t *testing.T) { executor.NewMockManager(ctrl), mempool.NewMockMempool(ctrl), appSender, + prometheus.NewRegistry(), + testConfig, ) - n, ok := nIntf.(*network) - require.True(ok) + require.NoError(err) // Case: Tx was recently gossiped txID := ids.GenerateTestID() n.recentTxs.Put(txID, struct{}{}) - n.gossipTx(context.Background(), txID, []byte{}) + n.gossipTxMessage(context.Background(), txID, []byte{}) // Didn't make a call to SendAppGossip // Case: Tx was not recently gossiped msgBytes := []byte{1, 2, 3} appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) - n.gossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + n.gossipTxMessage(context.Background(), ids.GenerateTestID(), msgBytes) // Did make a call to SendAppGossip } diff --git a/avalanchego/vms/avm/network/tx_verifier.go b/avalanchego/vms/avm/network/tx_verifier.go new file mode 100644 index 00000000..09f86928 --- /dev/null +++ b/avalanchego/vms/avm/network/tx_verifier.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "sync" + + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ TxVerifier = (*LockedTxVerifier)(nil) + +type TxVerifier interface { + // VerifyTx verifies that the transaction should be issued into the mempool. + VerifyTx(tx *txs.Tx) error +} + +type LockedTxVerifier struct { + lock sync.Locker + txVerifier TxVerifier +} + +func (l *LockedTxVerifier) VerifyTx(tx *txs.Tx) error { + l.lock.Lock() + defer l.lock.Unlock() + + return l.txVerifier.VerifyTx(tx) +} + +func NewLockedTxVerifier(lock sync.Locker, txVerifier TxVerifier) *LockedTxVerifier { + return &LockedTxVerifier{ + lock: lock, + txVerifier: txVerifier, + } +} diff --git a/avalanchego/vms/avm/pubsub_filterer.go b/avalanchego/vms/avm/pubsub_filterer.go index 24297034..caf0ba34 100644 --- a/avalanchego/vms/avm/pubsub_filterer.go +++ b/avalanchego/vms/avm/pubsub_filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/avalanchego/vms/avm/pubsub_filterer_test.go b/avalanchego/vms/avm/pubsub_filterer_test.go index 9d5fb5c2..0059b221 100644 --- a/avalanchego/vms/avm/pubsub_filterer_test.go +++ b/avalanchego/vms/avm/pubsub_filterer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -42,8 +42,7 @@ func TestFilter(t *testing.T) { addrBytes := addrID[:] fp := pubsub.NewFilterParam() - err := fp.Add(addrBytes) - require.NoError(err) + require.NoError(fp.Add(addrBytes)) parser := NewPubSubFilterer(&tx) fr, _ := parser.Filter([]pubsub.Filter{&mockFilter{addr: addrBytes}}) diff --git a/avalanchego/vms/avm/service.go b/avalanchego/vms/avm/service.go index 499b9aff..4dcc210d 100644 --- a/avalanchego/vms/avm/service.go +++ b/avalanchego/vms/avm/service.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "encoding/json" "errors" "fmt" "math" @@ -18,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -28,6 +28,7 @@ import ( "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + avajson "github.com/ava-labs/avalanchego/utils/json" safemath "github.com/ava-labs/avalanchego/utils/math" ) @@ -70,6 +71,9 @@ func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, reply *api.G zap.Stringer("encoding", args.Encoding), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if s.vm.chainManager == nil { return errNotLinearized } @@ -79,6 +83,7 @@ func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, reply *api.G } reply.Encoding = args.Encoding + var result any if args.Encoding == formatting.JSON { block.InitCtx(s.vm.ctx) for _, tx := range block.Txs() { @@ -92,16 +97,16 @@ func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, reply *api.G return err } } - reply.Block = block - return nil - } - - reply.Block, err = formatting.Encode(args.Encoding, block.Bytes()) - if err != nil { - return fmt.Errorf("couldn't encode block %s as string: %w", args.BlockID, err) + result = block + } else { + result, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as string: %w", args.BlockID, err) + } } - return nil + reply.Block, err = json.Marshal(result) + return err } // GetBlockByHeight returns the block at the given height. @@ -109,15 +114,18 @@ func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightAr s.vm.ctx.Log.Debug("API called", zap.String("service", "avm"), zap.String("method", "getBlockByHeight"), - zap.Uint64("height", args.Height), + zap.Uint64("height", uint64(args.Height)), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if s.vm.chainManager == nil { return errNotLinearized } reply.Encoding = args.Encoding - blockID, err := s.vm.state.GetBlockID(args.Height) + blockID, err := s.vm.state.GetBlockIDAtHeight(uint64(args.Height)) if err != nil { return fmt.Errorf("couldn't get block at height %d: %w", args.Height, err) } @@ -130,6 +138,7 @@ func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightAr return fmt.Errorf("couldn't get block with id %s: %w", blockID, err) } + var result any if args.Encoding == formatting.JSON { block.InitCtx(s.vm.ctx) for _, tx := range block.Txs() { @@ -143,16 +152,16 @@ func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightAr return err } } - reply.Block = block - return nil - } - - reply.Block, err = formatting.Encode(args.Encoding, block.Bytes()) - if err != nil { - return fmt.Errorf("couldn't encode block %s as string: %w", blockID, err) + result = block + } else { + result, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as string: %w", blockID, err) + } } - return nil + reply.Block, err = json.Marshal(result) + return err } // GetHeight returns the height of the last accepted block. @@ -162,6 +171,9 @@ func (s *Service) GetHeight(_ *http.Request, _ *struct{}, reply *api.GetHeightRe zap.String("method", "getHeight"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if s.vm.chainManager == nil { return errNotLinearized } @@ -176,7 +188,7 @@ func (s *Service) GetHeight(_ *http.Request, _ *struct{}, reply *api.GetHeightRe return fmt.Errorf("couldn't get block with id %s: %w", blockID, err) } - reply.Height = json.Uint64(block.Height()) + reply.Height = avajson.Uint64(block.Height()) return nil } @@ -192,18 +204,17 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, reply *api.JSO if err != nil { return fmt.Errorf("problem decoding transaction: %w", err) } - txID, err := s.vm.IssueTx(txBytes) + + tx, err := s.vm.parser.ParseTx(txBytes) if err != nil { + s.vm.ctx.Log.Debug("failed to parse tx", + zap.Error(err), + ) return err } - reply.TxID = txID - return nil -} - -// TODO: After the chain is linearized, remove this. -func (s *Service) IssueStopVertex(_ *http.Request, _, _ *struct{}) error { - return s.vm.issueStopVertex() + reply.TxID, err = s.vm.issueTx(tx) + return err } // GetTxStatusReply defines the GetTxStatus replies returned from the API @@ -214,9 +225,9 @@ type GetTxStatusReply struct { type GetAddressTxsArgs struct { api.JSONAddress // Cursor used as a page index / offset - Cursor json.Uint64 `json:"cursor"` + Cursor avajson.Uint64 `json:"cursor"` // PageSize num of items per page - PageSize json.Uint64 `json:"pageSize"` + PageSize avajson.Uint64 `json:"pageSize"` // AssetID defaulted to AVAX if omitted or left blank AssetID string `json:"assetID"` } @@ -224,7 +235,7 @@ type GetAddressTxsArgs struct { type GetAddressTxsReply struct { TxIDs []ids.ID `json:"txIDs"` // Cursor used as a page index / offset - Cursor json.Uint64 `json:"cursor"` + Cursor avajson.Uint64 `json:"cursor"` } // GetAddressTxs returns list of transactions for a given address @@ -264,6 +275,9 @@ func (s *Service) GetAddressTxs(_ *http.Request, args *GetAddressTxsArgs, reply zap.Uint64("pageSize", pageSize), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Read transactions from the indexer reply.TxIDs, err = s.vm.addressTxsIndexer.Read(address[:], assetID, cursor, pageSize) if err != nil { @@ -278,7 +292,7 @@ func (s *Service) GetAddressTxs(_ *http.Request, args *GetAddressTxsArgs, reply // To get the next set of tx IDs, the user should provide this cursor. // e.g. if they provided cursor 5, and read 6 tx IDs, they should start // next time from index (cursor) 11. - reply.Cursor = json.Uint64(cursor + uint64(len(reply.TxIDs))) + reply.Cursor = avajson.Uint64(cursor + uint64(len(reply.TxIDs))) return nil } @@ -297,10 +311,10 @@ func (s *Service) GetTxStatus(_ *http.Request, args *api.JSONTxID, reply *GetTxS return errNilTxID } - chainState := &chainState{ - State: s.vm.state, - } - _, err := chainState.GetTx(args.TxID) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + + _, err := s.vm.state.GetTx(args.TxID) switch err { case nil: reply.Status = choices.Accepted @@ -324,30 +338,33 @@ func (s *Service) GetTx(_ *http.Request, args *api.GetTxArgs, reply *api.GetTxRe return errNilTxID } - chainState := &chainState{ - State: s.vm.state, - } - tx, err := chainState.GetTx(args.TxID) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + + tx, err := s.vm.state.GetTx(args.TxID) if err != nil { return err } - reply.Encoding = args.Encoding + + var result any if args.Encoding == formatting.JSON { - reply.Tx = tx - return tx.Unsigned.Visit(&txInit{ + err = tx.Unsigned.Visit(&txInit{ tx: tx, ctx: s.vm.ctx, typeToFxIndex: s.vm.typeToFxIndex, fxs: s.vm.fxs, }) + result = tx + } else { + result, err = formatting.Encode(args.Encoding, tx.Bytes()) } - - reply.Tx, err = formatting.Encode(args.Encoding, tx.Bytes()) if err != nil { - return fmt.Errorf("couldn't encode tx as string: %w", err) + return err } - return nil + + reply.Tx, err = json.Marshal(result) + return err } // GetUTXOs gets all utxos for passed in addresses @@ -403,6 +420,10 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, reply *api.G if limit <= 0 || int(maxPageSize) < limit { limit = int(maxPageSize) } + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if sourceChain == s.vm.ctx.ChainID { utxos, endAddr, endUTXOID, err = avax.GetPaginatedUTXOs( s.vm.state, @@ -444,7 +465,7 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, reply *api.G reply.EndIndex.Address = endAddress reply.EndIndex.UTXO = endUTXOID.String() - reply.NumFetched = json.Uint64(len(utxos)) + reply.NumFetched = avajson.Uint64(len(utxos)) reply.Encoding = args.Encoding return nil } @@ -457,9 +478,9 @@ type GetAssetDescriptionArgs struct { // GetAssetDescriptionReply defines the GetAssetDescription replies returned from the API type GetAssetDescriptionReply struct { FormattedAssetID - Name string `json:"name"` - Symbol string `json:"symbol"` - Denomination json.Uint8 `json:"denomination"` + Name string `json:"name"` + Symbol string `json:"symbol"` + Denomination avajson.Uint8 `json:"denomination"` } // GetAssetDescription creates an empty account with the name passed in @@ -475,10 +496,10 @@ func (s *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescription return err } - chainState := &chainState{ - State: s.vm.state, - } - tx, err := chainState.GetTx(assetID) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + + tx, err := s.vm.state.GetTx(assetID) if err != nil { return err } @@ -490,7 +511,7 @@ func (s *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescription reply.AssetID = assetID reply.Name = createAssetTx.Name reply.Symbol = createAssetTx.Symbol - reply.Denomination = json.Uint8(createAssetTx.Denomination) + reply.Denomination = avajson.Uint8(createAssetTx.Denomination) return nil } @@ -504,8 +525,8 @@ type GetBalanceArgs struct { // GetBalanceReply defines the GetBalance replies returned from the API type GetBalanceReply struct { - Balance json.Uint64 `json:"balance"` - UTXOIDs []avax.UTXOID `json:"utxoIDs"` + Balance avajson.Uint64 `json:"balance"` + UTXOIDs []avax.UTXOID `json:"utxoIDs"` } // GetBalance returns the balance of an asset held by an address. @@ -531,8 +552,10 @@ func (s *Service) GetBalance(_ *http.Request, args *GetBalanceArgs, reply *GetBa return err } - addrSet := set.Set[ids.ShortID]{} - addrSet.Add(addr) + addrSet := set.Of(addr) + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() utxos, err := avax.GetAllUTXOs(s.vm.state, addrSet) if err != nil { @@ -558,7 +581,7 @@ func (s *Service) GetBalance(_ *http.Request, args *GetBalanceArgs, reply *GetBa if err != nil { return err } - reply.Balance = json.Uint64(amt) + reply.Balance = avajson.Uint64(amt) reply.UTXOIDs = append(reply.UTXOIDs, utxo.UTXOID) } @@ -566,8 +589,8 @@ func (s *Service) GetBalance(_ *http.Request, args *GetBalanceArgs, reply *GetBa } type Balance struct { - AssetID string `json:"asset"` - Balance json.Uint64 `json:"balance"` + AssetID string `json:"asset"` + Balance avajson.Uint64 `json:"balance"` } type GetAllBalancesArgs struct { @@ -599,8 +622,10 @@ func (s *Service) GetAllBalances(_ *http.Request, args *GetAllBalancesArgs, repl if err != nil { return fmt.Errorf("problem parsing address '%s': %w", args.Address, err) } - addrSet := set.Set[ids.ShortID]{} - addrSet.Add(address) + addrSet := set.Of(address) + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() utxos, err := avax.GetAllUTXOs(s.vm.state, addrSet) if err != nil { @@ -637,7 +662,7 @@ func (s *Service) GetAllBalances(_ *http.Request, args *GetAllBalancesArgs, repl alias := s.vm.PrimaryAliasOrDefault(assetID) reply.Balances[i] = Balance{ AssetID: alias, - Balance: json.Uint64(balances[assetID]), + Balance: avajson.Uint64(balances[assetID]), } i++ } @@ -647,14 +672,14 @@ func (s *Service) GetAllBalances(_ *http.Request, args *GetAllBalancesArgs, repl // Holder describes how much an address owns of an asset type Holder struct { - Amount json.Uint64 `json:"amount"` - Address string `json:"address"` + Amount avajson.Uint64 `json:"amount"` + Address string `json:"address"` } // Owners describes who can perform an action type Owners struct { - Threshold json.Uint32 `json:"threshold"` - Minters []string `json:"minters"` + Threshold avajson.Uint32 `json:"threshold"` + Minters []string `json:"minters"` } // CreateAssetArgs are arguments for passing into CreateAsset @@ -684,29 +709,48 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass zap.Int("numMinters", len(args.MinterSets)), ) + tx, changeAddr, err := s.buildCreateAssetTx(args) + if err != nil { + return err + } + + assetID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildCreateAssetTx(args *CreateAssetArgs) (*txs.Tx, ids.ShortID, error) { if len(args.InitialHolders) == 0 && len(args.MinterSets) == 0 { - return errNoHoldersOrMinters + return nil, ids.ShortEmpty, errNoHoldersOrMinters } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -717,7 +761,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -742,7 +786,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass for _, holder := range args.InitialHolders { addr, err := avax.ParseServiceAddress(s.vm, holder.Address) if err != nil { - return err + return nil, ids.ShortEmpty, err } initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ Amt: uint64(holder.Amount), @@ -761,15 +805,17 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass } minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { - return err + return nil, ids.ShortEmpty, err } minter.Addrs = minterAddrsSet.List() utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ + codec := s.vm.parser.Codec() + initialState.Sort(codec) + + tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -781,18 +827,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass Denomination: args.Denomination, States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - assetID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.AssetID = assetID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } // CreateFixedCapAsset returns ID of the newly created asset @@ -839,29 +874,48 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl zap.Int("numMinters", len(args.MinterSets)), ) + tx, changeAddr, err := s.buildCreateNFTAsset(args) + if err != nil { + return err + } + + assetID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildCreateNFTAsset(args *CreateNFTAssetArgs) (*txs.Tx, ids.ShortID, error) { if len(args.MinterSets) == 0 { - return errNoMinters + return nil, ids.ShortEmpty, errNoMinters } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -872,7 +926,7 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -903,15 +957,17 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl } minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { - return err + return nil, ids.ShortEmpty, err } minter.Addrs = minterAddrsSet.List() utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ + codec := s.vm.parser.Codec() + initialState.Sort(codec) + + tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -923,18 +979,7 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl Denomination: 0, // NFTs are non-fungible States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - assetID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.AssetID = assetID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } // CreateAddress creates an address for the user [args.Username] @@ -945,6 +990,9 @@ func (s *Service) CreateAddress(_ *http.Request, args *api.UserPass, reply *api. logging.UserString("username", args.Username), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -974,6 +1022,9 @@ func (s *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *a logging.UserString("username", args.Username), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -1026,6 +1077,9 @@ func (s *Service) ExportKey(_ *http.Request, args *ExportKeyArgs, reply *ExportK return fmt.Errorf("problem parsing address %q: %w", args.Address, err) } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -1065,6 +1119,9 @@ func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSO return errMissingPrivateKey } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -1087,7 +1144,7 @@ func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSO // SendOutput specifies that [Amount] of asset [AssetID] be sent to [To] type SendOutput struct { // The amount of funds to send - Amount json.Uint64 `json:"amount"` + Amount avajson.Uint64 `json:"amount"` // ID of the asset being sent AssetID string `json:"assetID"` @@ -1137,33 +1194,52 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildSendMultiple(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildSendMultiple(args *SendMultipleArgs) (*txs.Tx, ids.ShortID, error) { // Validate the memo field memoBytes := []byte(args.Memo) if l := len(memoBytes); l > avax.MaxMemoSize { - return fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) + return nil, ids.ShortEmpty, fmt.Errorf("max memo length is %d but provided memo field is length %d", avax.MaxMemoSize, l) } else if len(args.Outputs) == 0 { - return errNoOutputs + return nil, ids.ShortEmpty, errNoOutputs } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Load user's UTXOs/keys utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Calculate required input amounts and create the desired outputs @@ -1175,27 +1251,27 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a outs := []*avax.TransferableOutput{} for _, output := range args.Outputs { if output.Amount == 0 { - return errZeroAmount + return nil, ids.ShortEmpty, errZeroAmount } assetID, ok := assetIDs[output.AssetID] // Asset ID of next output if !ok { assetID, err = s.vm.lookupAssetID(output.AssetID) if err != nil { - return fmt.Errorf("couldn't find asset %s", output.AssetID) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't find asset %s", output.AssetID) } assetIDs[output.AssetID] = assetID } currentAmount := amounts[assetID] newAmount, err := safemath.Add64(currentAmount, uint64(output.Amount)) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amounts[assetID] = newAmount // Parse the to address to, err := avax.ParseServiceAddress(s.vm, output.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", output.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", output.To, err) } // Create the Output @@ -1219,7 +1295,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a amountWithFee, err := safemath.Add64(amounts[s.vm.feeAssetID], s.vm.TxFee) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amountsWithFee[s.vm.feeAssetID] = amountWithFee @@ -1229,7 +1305,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a amountsWithFee, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Add the required change outputs @@ -1250,20 +1326,42 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a }) } } - avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + codec := s.vm.parser.Codec() + avax.SortTransferableOutputs(outs, codec) + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, Memo: memoBytes, }}} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) +} + +// MintArgs are arguments for passing into Mint requests +type MintArgs struct { + api.JSONSpendHeader // User, password, from addrs, change addr + Amount avajson.Uint64 `json:"amount"` + AssetID string `json:"assetID"` + To string `json:"to"` +} + +// Mint issues a transaction that mints more of the asset +func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "mint"), + logging.UserString("username", args.Username), + ) + + tx, changeAddr, err := s.buildMint(args) + if err != nil { return err } - txID, err := s.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.issueTx(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1273,16 +1371,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a return err } -// MintArgs are arguments for passing into Mint requests -type MintArgs struct { - api.JSONSpendHeader // User, password, from addrs, change addr - Amount json.Uint64 `json:"amount"` - AssetID string `json:"assetID"` - To string `json:"to"` -} - -// Mint issues a transaction that mints more of the asset -func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) buildMint(args *MintArgs) (*txs.Tx, ids.ShortID, error) { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "avm"), zap.String("method", "mint"), @@ -1290,38 +1379,41 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang ) if args.Amount == 0 { - return errInvalidMintAmount + return nil, ids.ShortEmpty, errInvalidMintAmount } assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(feeKc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, keys, err := s.vm.Spend( @@ -1332,7 +1424,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1353,7 +1445,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang // Get all UTXOs/keys for the user utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, ids.ShortEmpty, err } ops, opKeys, err := s.vm.Mint( @@ -1365,11 +1457,11 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } keys = append(keys, opKeys...) - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1378,26 +1470,15 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys) } // SendNFTArgs are arguments for passing into SendNFT requests type SendNFTArgs struct { - api.JSONSpendHeader // User, password, from addrs, change addr - AssetID string `json:"assetID"` - GroupID json.Uint32 `json:"groupID"` - To string `json:"to"` + api.JSONSpendHeader // User, password, from addrs, change addr + AssetID string `json:"assetID"` + GroupID avajson.Uint32 `json:"groupID"` + To string `json:"to"` } // SendNFT sends an NFT @@ -1408,37 +1489,56 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildSendNFT(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildSendNFT(args *SendNFTArgs) (*txs.Tx, ids.ShortID, error) { // Parse the asset ID assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the to address to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, secpKeys, err := s.vm.Spend( @@ -1449,7 +1549,7 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1475,10 +1575,10 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1487,21 +1587,12 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { - return err - } - if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { - return err - } - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) + codec := s.vm.parser.Codec() + if err := tx.SignSECP256K1Fx(codec, secpKeys); err != nil { + return nil, ids.ShortEmpty, err } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignNFTFx(codec, nftKeys) } // MintNFTArgs are arguments for passing into MintNFT requests @@ -1521,40 +1612,59 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI logging.UserString("username", args.Username), ) - assetID, err := s.vm.lookupAssetID(args.AssetID) + tx, changeAddr, err := s.buildMintNFT(args) if err != nil { return err } + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildMintNFT(args *MintNFTArgs) (*txs.Tx, ids.ShortID, error) { + assetID, err := s.vm.lookupAssetID(args.AssetID) + if err != nil { + return nil, ids.ShortEmpty, err + } + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } payloadBytes, err := formatting.Decode(args.Encoding, args.Payload) if err != nil { - return fmt.Errorf("problem decoding payload bytes: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem decoding payload bytes: %w", err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(feeKc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amountsSpent, ins, secpKeys, err := s.vm.Spend( @@ -1565,7 +1675,7 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI }, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } outs := []*avax.TransferableOutput{} @@ -1586,7 +1696,7 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI // Get all UTXOs/keys utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, ids.ShortEmpty, err } ops, nftKeys, err := s.vm.MintNFT( @@ -1597,10 +1707,10 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI to, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - tx := txs.Tx{Unsigned: &txs.OperationTx{ + tx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1609,21 +1719,12 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { - return err - } - if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { - return err - } - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) + codec := s.vm.parser.Codec() + if err := tx.SignSECP256K1Fx(codec, secpKeys); err != nil { + return nil, ids.ShortEmpty, err } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignNFTFx(codec, nftKeys) } // ImportArgs are arguments for passing into Import requests @@ -1648,29 +1749,47 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) logging.UserString("username", args.Username), ) + tx, err := s.buildImport(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} + +func (s *Service) buildImport(args *ImportArgs) (*txs.Tx, error) { chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) + return nil, fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { - return fmt.Errorf("problem parsing to address %q: %w", args.To, err) + return nil, fmt.Errorf("problem parsing to address %q: %w", args.To, err) } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { - return err + return nil, err } atomicUTXOs, _, _, err := s.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, int(maxPageSize)) if err != nil { - return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + return nil, fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } amountsSpent, importInputs, importKeys, err := s.vm.SpendAll(atomicUTXOs, kc) if err != nil { - return err + return nil, err } ins := []*avax.TransferableInput{} @@ -1686,12 +1805,12 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) }, ) if err != nil { - return err + return nil, err } for asset, amount := range localAmountsSpent { newAmount, err := safemath.Add64(amountsSpent[asset], amount) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, fmt.Errorf("problem calculating required spend amount: %w", err) } amountsSpent[asset] = newAmount } @@ -1721,7 +1840,7 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) } avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.ImportTx{ + tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1731,17 +1850,7 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) SourceChain: chainID, ImportedIns: importInputs, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - return nil + return tx, tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys) } // ExportArgs are arguments for passing into ExportAVA requests @@ -1749,7 +1858,7 @@ type ExportArgs struct { // User, password, from addrs, change addr api.JSONSpendHeader // Amount of nAVAX to send - Amount json.Uint64 `json:"amount"` + Amount avajson.Uint64 `json:"amount"` // Chain the funds are going to. Optional. Used if To address does not include the chainID. TargetChain string `json:"targetChain"` @@ -1771,10 +1880,26 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC logging.UserString("username", args.Username), ) + tx, changeAddr, err := s.buildExport(args) + if err != nil { + return err + } + + txID, err := s.vm.issueTx(tx) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) + return err +} + +func (s *Service) buildExport(args *ExportArgs) (*txs.Tx, ids.ShortID, error) { // Parse the asset ID assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Get the chainID and parse the to address @@ -1782,44 +1907,47 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC if err != nil { chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err = ids.ShortFromString(args.To) if err != nil { - return err + return nil, ids.ShortEmpty, err } } if args.Amount == 0 { - return errZeroAmount + return nil, ids.ShortEmpty, errZeroAmount } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // Get the UTXOs/keys for the from addresses utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the change address. if len(kc.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { - return err + return nil, ids.ShortEmpty, err } amounts := map[ids.ID]uint64{} if assetID == s.vm.feeAssetID { amountWithFee, err := safemath.Add64(uint64(args.Amount), s.vm.TxFee) if err != nil { - return fmt.Errorf("problem calculating required spend amount: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem calculating required spend amount: %w", err) } amounts[s.vm.feeAssetID] = amountWithFee } else { @@ -1829,7 +1957,7 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC amountsSpent, ins, keys, err := s.vm.Spend(utxos, kc, amounts) if err != nil { - return err + return nil, ids.ShortEmpty, err } exportOuts := []*avax.TransferableOutput{{ @@ -1861,9 +1989,11 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC }) } } - avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) - tx := txs.Tx{Unsigned: &txs.ExportTx{ + codec := s.vm.parser.Codec() + avax.SortTransferableOutputs(outs, codec) + + tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: s.vm.ctx.NetworkID, BlockchainID: s.vm.ctx.ChainID, @@ -1873,16 +2003,5 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC DestinationChain: chainID, ExportedOuts: exportOuts, }} - if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { - return err - } - - txID, err := s.vm.IssueTx(tx.Bytes()) - if err != nil { - return fmt.Errorf("problem issuing transaction: %w", err) - } - - reply.TxID = txID - reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) - return err + return tx, changeAddr, tx.SignSECP256K1Fx(codec, keys) } diff --git a/avalanchego/vms/avm/service_test.go b/avalanchego/vms/avm/service_test.go index d6b0b81e..9d187987 100644 --- a/avalanchego/vms/avm/service_test.go +++ b/avalanchego/vms/avm/service_test.go @@ -1,29 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "bytes" "context" - "fmt" - "math/rand" + "encoding/json" + "strings" "testing" "time" - stdjson "encoding/json" - - "github.com/golang/mock/gomock" - + "github.com/btcsuite/btcd/btcutil/bech32" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -32,258 +27,97 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/sampler" - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/blocks" - "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/block/executor" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" - "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -var testChangeAddr = ids.GenerateTestShortID() - -var testCases = []struct { - name string - avaxAsset bool -}{ - {"genesis asset is AVAX", true}, - {"genesis asset is TEST", false}, -} - -// Returns: -// 1) genesis bytes of vm -// 2) the VM -// 3) The service that wraps the VM -// 4) atomic memory to use in tests -func setup(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, *atomic.Memory, *txs.Tx) { - var genesisBytes []byte - var vm *VM - var m *atomic.Memory - var genesisTx *txs.Tx - if isAVAXAsset { - genesisBytes, _, vm, m = GenesisVM(t) - genesisTx = GetAVAXTxFromGenesisTest(genesisBytes, t) - } else { - genesisBytes, _, vm, m = setupTxFeeAssets(t) - genesisTx = GetCreateTxFromGenesisTest(t, genesisBytes, feeAssetName) - } - s := &Service{vm: vm} - return genesisBytes, vm, s, m, genesisTx -} - -// Returns: -// 1) genesis bytes of vm -// 2) the VM -// 3) The service that wraps the VM -// 4) Issuer channel -// 5) atomic memory to use in tests -func setupWithIssuer(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, chan common.Message) { - var genesisBytes []byte - var vm *VM - var issuer chan common.Message - if isAVAXAsset { - genesisBytes, issuer, vm, _ = GenesisVM(t) - } else { - genesisBytes, issuer, vm, _ = setupTxFeeAssets(t) - } - s := &Service{vm: vm} - return genesisBytes, vm, s, issuer -} - -// Returns: -// 1) genesis bytes of vm -// 2) the VM -// 3) The service that wraps the VM -// 4) atomic memory to use in tests -func setupWithKeys(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, *atomic.Memory, *txs.Tx) { - genesisBytes, vm, s, m, tx := setup(t, isAVAXAsset) - - // Import the initially funded private keys - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, username, password) - if err != nil { - t.Fatal(err) - } - - if err := user.PutKeys(keys...); err != nil { - t.Fatalf("Failed to set key for user: %s", err) - } - - if err := user.Close(); err != nil { - t.Fatal(err) - } - return genesisBytes, vm, s, m, tx -} - -// Sample from a set of addresses and return them raw and formatted as strings. -// The size of the sample is between 1 and len(addrs) -// If len(addrs) == 0, returns nil -func sampleAddrs(t *testing.T, vm *VM, addrs []ids.ShortID) ([]ids.ShortID, []string) { - sampledAddrs := []ids.ShortID{} - sampledAddrsStr := []string{} - - sampler := sampler.NewUniform() - if err := sampler.Initialize(uint64(len(addrs))); err != nil { - t.Fatal(err) - } - - numAddrs := 1 + rand.Intn(len(addrs)) // #nosec G404 - indices, err := sampler.Sample(numAddrs) - if err != nil { - t.Fatal(err) - } - for _, index := range indices { - addr := addrs[index] - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } - - sampledAddrs = append(sampledAddrs, addr) - sampledAddrsStr = append(sampledAddrsStr, addrStr) - } - return sampledAddrs, sampledAddrsStr -} -// Returns error if [numTxFees] tx fees was not deducted from the addresses in [fromAddrs] -// relative to their starting balance -func verifyTxFeeDeducted(t *testing.T, s *Service, fromAddrs []ids.ShortID, numTxFees int) error { - totalTxFee := uint64(numTxFees) * s.vm.TxFee - fromAddrsStartBalance := startBalance * uint64(len(fromAddrs)) - - // Key: Address - // Value: AVAX balance - balances := map[ids.ShortID]uint64{} - - for _, addr := range addrs { // get balances for all addresses - addrStr, err := s.vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } - reply := &GetBalanceReply{} - err = s.GetBalance(nil, - &GetBalanceArgs{ - Address: addrStr, - AssetID: s.vm.feeAssetID.String(), - }, - reply, - ) - if err != nil { - return fmt.Errorf("couldn't get balance of %s: %w", addr, err) - } - balances[addr] = uint64(reply.Balance) - } + avajson "github.com/ava-labs/avalanchego/utils/json" +) - fromAddrsTotalBalance := uint64(0) - for _, addr := range fromAddrs { - fromAddrsTotalBalance += balances[addr] - } +func TestServiceIssueTx(t *testing.T) { + require := require.New(t) - if fromAddrsTotalBalance != fromAddrsStartBalance-totalTxFee { - return fmt.Errorf("expected fromAddrs to have %d balance but have %d", - fromAddrsStartBalance-totalTxFee, - fromAddrsTotalBalance, - ) - } - return nil -} + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() -func TestServiceIssueTx(t *testing.T) { - genesisBytes, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() txArgs := &api.FormattedTx{} txReply := &api.JSONTxID{} - err := s.IssueTx(nil, txArgs, txReply) - if err == nil { - t.Fatal("Expected empty transaction to return an error") - } - tx := NewTx(t, genesisBytes, vm) + err := env.service.IssueTx(nil, txArgs, txReply) + require.ErrorIs(err, codec.ErrCantUnpackVersion) + + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") txArgs.Tx, err = formatting.Encode(formatting.Hex, tx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txArgs.Encoding = formatting.Hex txReply = &api.JSONTxID{} - if err := s.IssueTx(nil, txArgs, txReply); err != nil { - t.Fatal(err) - } - if txReply.TxID != tx.ID() { - t.Fatalf("Expected %q, got %q", txReply.TxID, tx.ID()) - } + require.NoError(env.service.IssueTx(nil, txArgs, txReply)) + require.Equal(tx.ID(), txReply.TxID) } func TestServiceGetTxStatus(t *testing.T) { require := require.New(t) - genesisBytes, vm, s, issuer := setupWithIssuer(t, true) - ctx := vm.ctx + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() statusArgs := &api.JSONTxID{} statusReply := &GetTxStatusReply{} - err := s.GetTxStatus(nil, statusArgs, statusReply) + err := env.service.GetTxStatus(nil, statusArgs, statusReply) require.ErrorIs(err, errNilTxID) - newTx := newAvaxBaseTxWithOutputs(t, genesisBytes, vm) + newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) txID := newTx.ID() statusArgs = &api.JSONTxID{ TxID: txID, } statusReply = &GetTxStatusReply{} - require.NoError(s.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Unknown, statusReply.Status) - _, err = vm.IssueTx(newTx.Bytes()) - require.NoError(err) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() - - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 1) - require.NoError(txs[0].Accept(context.Background())) + issueAndAccept(require, env.vm, env.issuer, newTx) statusReply = &GetTxStatusReply{} - require.NoError(s.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Accepted, statusReply.Status) } // Test the GetBalance method when argument Strict is true func TestServiceGetBalanceStrict(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) // A UTXO with a 2 out of 2 multisig // where one of the addresses is [addr] @@ -302,8 +136,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(twoOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(twoOfTwoUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs := &GetBalanceArgs{ @@ -312,11 +148,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply := &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 1, "should have only returned 1 utxoID") + require.Equal(uint64(1337), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 1) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -324,11 +159,12 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) + + env.vm.ctx.Lock.Lock() // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -347,8 +183,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(oneOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(oneOfTwoUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -357,11 +195,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337+1337), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 2, "should have only returned 2 utxoIDs") + require.Equal(uint64(1337+1337), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 2) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -369,15 +206,16 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) + + env.vm.ctx.Lock.Lock() // A UTXO with a 1 out of 1 multisig // but with a locktime in the future - now := vm.clock.Time() + now := env.vm.clock.Time() futureUTXO := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), @@ -394,8 +232,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(futureUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(futureUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -404,11 +244,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337*3), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 3, "should have returned 3 utxoIDs") + require.Equal(uint64(1337*3), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 3) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -416,34 +255,33 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) } func TestServiceGetTxs(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + env := setup(t, &envConfig{}) var err error - vm.addressTxsIndexer, err = index.NewIndexer(vm.db, vm.ctx.Log, "", prometheus.NewRegistry(), false) - require.NoError(t, err) + env.vm.addressTxsIndexer, err = index.NewIndexer(env.vm.db, env.vm.ctx.Log, "", prometheus.NewRegistry(), false) + require.NoError(err) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) testTxCount := 25 - testTxs := setupTestTxsInDB(t, vm.db, addr, assetID, testTxCount) + testTxs := initTestTxIndex(t, env.vm.db, addr, assetID, testTxCount) + + env.vm.ctx.Lock.Unlock() // get the first page getTxsArgs := &GetAddressTxsArgs{ @@ -452,35 +290,32 @@ func TestServiceGetTxs(t *testing.T) { AssetID: assetID.String(), } getTxsReply := &GetAddressTxsReply{} - err = s.GetAddressTxs(nil, getTxsArgs, getTxsReply) - require.NoError(t, err) - require.Len(t, getTxsReply.TxIDs, 10) - require.Equal(t, getTxsReply.TxIDs, testTxs[:10]) + require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.Len(getTxsReply.TxIDs, 10) + require.Equal(getTxsReply.TxIDs, testTxs[:10]) // get the second page getTxsArgs.Cursor = getTxsReply.Cursor getTxsReply = &GetAddressTxsReply{} - err = s.GetAddressTxs(nil, getTxsArgs, getTxsReply) - require.NoError(t, err) - require.Len(t, getTxsReply.TxIDs, 10) - require.Equal(t, getTxsReply.TxIDs, testTxs[10:20]) + require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.Len(getTxsReply.TxIDs, 10) + require.Equal(getTxsReply.TxIDs, testTxs[10:20]) } func TestServiceGetAllBalances(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) // A UTXO with a 2 out of 2 multisig // where one of the addresses is [addr] twoOfTwoUTXO := &avax.UTXO{ @@ -498,8 +333,10 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(twoOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(twoOfTwoUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs := &GetAllBalancesArgs{ @@ -507,21 +344,21 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply := &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) - require.Len(t, reply.Balances, 0) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.Empty(reply.Balances) + + env.vm.ctx.Lock.Lock() // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -540,8 +377,10 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(oneOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(oneOfTwoUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -549,26 +388,26 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337*2), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337*2), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(reply.Balances) + + env.vm.ctx.Lock.Lock() // A UTXO with a 1 out of 1 multisig // but with a locktime in the future - now := vm.clock.Time() + now := env.vm.clock.Time() futureUTXO := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), @@ -585,8 +424,10 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(futureUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(futureUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -594,22 +435,22 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337*3), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337*3), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(reply.Balances) + + env.vm.ctx.Lock.Lock() // A UTXO for a different asset otherAssetID := ids.GenerateTestID() @@ -628,8 +469,10 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - vm.state.AddUTXO(otherAssetUTXO) - require.NoError(t, vm.state.Commit()) + env.vm.state.AddUTXO(otherAssetUTXO) + require.NoError(env.vm.state.Commit()) + + env.vm.ctx.Lock.Unlock() // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -637,808 +480,1087 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 2) + require.Len(reply.Balances, 2) gotAssetIDs := []string{reply.Balances[0].AssetID, reply.Balances[1].AssetID} - require.Contains(t, gotAssetIDs, assetID.String()) - require.Contains(t, gotAssetIDs, otherAssetID.String()) + require.Contains(gotAssetIDs, assetID.String()) + require.Contains(gotAssetIDs, otherAssetID.String()) gotBalances := []uint64{uint64(reply.Balances[0].Balance), uint64(reply.Balances[1].Balance)} - require.Contains(t, gotBalances, uint64(1337)) - require.Contains(t, gotBalances, uint64(1337*3)) + require.Contains(gotBalances, uint64(1337)) + require.Contains(gotBalances, uint64(1337*3)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(reply.Balances) } func TestServiceGetTx(t *testing.T) { - _, vm, s, _, genesisTx := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - txID := genesisTx.ID() + txID := env.genesisTx.ID() reply := api.GetTxReply{} - err := s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - }, &reply) - require.NoError(t, err) - if err != nil { - t.Fatal(err) - } - txBytes, err := formatting.Decode(reply.Encoding, reply.Tx.(string)) - if err != nil { - t.Fatal(err) - } - require.Equal(t, genesisTx.Bytes(), txBytes, "Wrong tx returned from service.GetTx") + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: txID, + Encoding: formatting.Hex, + }, &reply)) + + var txStr string + require.NoError(json.Unmarshal(reply.Tx, &txStr)) + + txBytes, err := formatting.Decode(reply.Encoding, txStr) + require.NoError(err) + require.Equal(env.genesisTx.Bytes(), txBytes) } func TestServiceGetTxJSON_BaseTx(t *testing.T) { require := require.New(t) - genesisBytes, vm, s, issuer := setupWithIssuer(t, true) - ctx := vm.ctx + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - newTx := newAvaxBaseTxWithOutputs(t, genesisBytes, vm) + newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, newTx) + + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: newTx.ID(), + Encoding: formatting.JSON, + }, &reply)) + + require.Equal(formatting.JSON, reply.Encoding) - txID, err := vm.IssueTx(newTx.Bytes()) + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(newTx.ID(), txID) - ctx.Lock.Unlock() - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], + "memo": "0x0102030405060708" + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 1) - require.NoError(txs[0].Accept(context.Background())) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.BaseTx).BlockchainID.String(), 1) - reply := api.GetTxReply{} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(err) - jsonString := string(jsonTxBytes) - // fxID in the VM is really set to 11111111111111111111111111111111LpoYY for [secp256k1fx.TransferOutput] - require.Contains(jsonString, "\"memo\":\"0x0102030405060708\"") - require.Contains(jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") - require.Contains(jsonString, "\"outputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_ExportTx(t *testing.T) { require := require.New(t) - genesisBytes, vm, s, issuer := setupWithIssuer(t, true) - ctx := vm.ctx + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - newTx := newAvaxExportTxWithOutputs(t, genesisBytes, vm) + newTx := newAvaxExportTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, newTx) - txID, err := vm.IssueTx(newTx.Bytes()) + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: newTx.ID(), + Encoding: formatting.JSON, + }, &reply)) + + require.Equal(formatting.JSON, reply.Encoding) + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(newTx.ID(), txID) - ctx.Lock.Unlock() - msg := <-issuer - require.Equal(common.PendingTxs, msg) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], + "memo": "0x", + "destinationChain": "11111111111111111111111111111111LpoYY", + "exportedOutputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 1) - require.NoError(txs[0].Accept(context.Background())) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", newTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", newTx.Unsigned.(*txs.ExportTx).BlockchainID.String(), 1) - reply := api.GetTxReply{} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) + sigStr, err := formatting.Encode(formatting.HexNC, newTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(err) - jsonString := string(jsonTxBytes) - // fxID in the VM is really set to 11111111111111111111111111111111LpoYY for [secp256k1fx.TransferOutput] - require.Contains(jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") - require.Contains(jsonString, "\"exportedOutputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]}") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) + + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: createAssetTx.ID(), + Encoding: formatting.JSON, + }, &reply)) + + require.Equal(formatting.JSON, reply.Encoding) + + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) + + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "name": "Team Rocket", + "symbol": "TR", + "denomination": 0, + "initialStates": [ { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, + "fxIndex": 0, + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] }, { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, + "fxIndex": 1, + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "groupID": 1, + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "groupID": 2, + "locktime": 0, + "threshold": 1 + } + ] }, { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 - - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - txID, err := vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) - require.Equal(createAssetTx.ID(), txID) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() - - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 1) - require.NoError(txs[0].Accept(context.Background())) - - reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + "fxIndex": 2, + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + ] + }, + "credentials": null, + "id": "PLACEHOLDER_TX_ID" +}` - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(err) - jsonString := string(jsonTxBytes) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", createAssetTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID.String(), 1) - // contains the address in the right format - require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]}") - require.Contains(jsonString, "\"initialStates\":[{\"fxIndex\":0,\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":1,\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":2,\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]}]},\"credentials\":[]}") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 + key := keys[0] + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildNFTxMintOp(createAssetTx, key, 2, 1)) + require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintNFTTx.ID(), + Encoding: formatting.JSON, + }, &reply)) - mintNFTTx := buildOperationTxWithOp(buildNFTxMintOp(createAssetTx, key, 2, 1)) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.Equal(formatting.JSON, reply.Encoding) - txID, err := vm.IssueTx(mintNFTTx.Bytes()) + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(mintNFTTx.ID(), txID) - ctx.Lock.Unlock() - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 2 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 1, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + } + ] + }, + "credentials": [ + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(err) - jsonString := string(jsonTxBytes) - // assert memo and payload are in hex - require.Contains(jsonString, "\"memo\":\"0x\"") - require.Contains(jsonString, "\"payload\":\"0x68656c6c6f\"") - // contains the address in the right format - require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") - // contains the fxID - require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x571f18cfdb254263ab6b987f742409bd5403eafe08b4dbc297c5cd8d1c85eb8812e4541e11d3dc692cd14b5f4bccc1835ec001df6d8935ce881caf97017c2a4801\"]}}]") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 - - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) - mintNFTTx := buildOperationTxWithOp(mintOp1, mintOp2) + mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, mintOp1, mintOp2) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) - - txID, err := vm.IssueTx(mintNFTTx.Bytes()) - require.NoError(err) - require.Equal(mintNFTTx.ID(), txID) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() - - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + issueAndAccept(require, env.vm, env.issuer, mintNFTTx) reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintNFTTx.ID(), Encoding: formatting.JSON, - }, &reply) + }, &reply)) + + require.Equal(formatting.JSON, reply.Encoding) + + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 2 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 1, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 3 + } + ], + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "groupID": 2, + "payload": "0x68656c6c6f", + "outputs": [ + { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + } + ] + } + } + ] + }, + "credentials": [ + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - jsonString := string(jsonTxBytes) - // contains the address in the right format - require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}},{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}}]") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 + key := keys[0] + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildSecpMintOp(createAssetTx, key, 0)) + require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintSecpOpTx.ID(), + Encoding: formatting.JSON, + }, &reply)) - mintSecpOpTx := buildOperationTxWithOp(buildSecpMintOp(createAssetTx, key, 0)) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.Equal(formatting.JSON, reply.Encoding) - txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(mintSecpOpTx.ID(), txID) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 0 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - jsonString := string(jsonTxBytes) - // ensure memo is in hex - require.Contains(jsonString, "\"memo\":\"0x\"") - // contains the address in the right format - require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") - require.Contains(jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}]}") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) - // contains the fxID - require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":0}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0x6d7406d5e1bdb1d80de542e276e2d162b0497d0df1170bec72b14d40e84ecf7929cb571211d60149404413a9342fdfa0a2b5d07b48e6f3eaea1e2f9f183b480500\"]}}]") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 - - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) op1 := buildSecpMintOp(createAssetTx, key, 0) op2 := buildSecpMintOp(createAssetTx, key, 1) - mintSecpOpTx := buildOperationTxWithOp(op1, op2) + mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) + require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) - txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) - require.NoError(err) - require.Equal(mintSecpOpTx.ID(), txID) - ctx.Lock.Unlock() + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintSecpOpTx.ID(), + Encoding: formatting.JSON, + }, &reply)) - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() + require.Equal(formatting.JSON, reply.Encoding) + + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") + require.NoError(err) - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 0 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 1 + } + ], + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "transferOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintSecpOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintSecpOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) + sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - jsonString := string(jsonTxBytes) - // contains the address in the right format - require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") - require.Contains(jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":1}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}},{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}}]") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 + key := keys[0] + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildPropertyFxMintOp(createAssetTx, key, 4)) + require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + reply := api.GetTxReply{} + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintPropertyFxOpTx.ID(), + Encoding: formatting.JSON, + }, &reply)) - mintPropertyFxOpTx := buildOperationTxWithOp(buildPropertyFxMintOp(createAssetTx, key, 4)) - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.Equal(formatting.JSON, reply.Encoding) - txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(mintPropertyFxOpTx.ID(), txID) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 4 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` - reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, - Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - jsonString := string(jsonTxBytes) - // ensure memo is in hex - require.Contains(jsonString, "\"memo\":\"0x\"") - // contains the address in the right format - require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) - // contains the fxID - require.Contains(jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0xa3a00a03d3f1551ff696d6c0abdde73ae7002cd6dcce1c37d720de3b7ed80757411c9698cd9681a0fa55ca685904ca87056a3b8abc858a8ac08f45483b32a80201\"]}}]") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager, - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - &common.SenderTest{T: t}, - ) - require.NoError(err) - vm.batchTimeout = 0 - - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) op1 := buildPropertyFxMintOp(createAssetTx, key, 4) op2 := buildPropertyFxMintOp(createAssetTx, key, 5) - mintPropertyFxOpTx := buildOperationTxWithOp(op1, op2) - - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) - - txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) - require.NoError(err) - require.Equal(mintPropertyFxOpTx.ID(), txID) - ctx.Lock.Unlock() - - msg := <-issuer - require.Equal(common.PendingTxs, msg) - ctx.Lock.Lock() + mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - txs := vm.PendingTxs(context.Background()) - require.Len(txs, 2) - require.NoError(txs[0].Accept(context.Background())) - require.NoError(txs[1].Accept(context.Background())) + require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) reply := api.GetTxReply{} - s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ - TxID: txID, + require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + TxID: mintPropertyFxOpTx.ID(), Encoding: formatting.JSON, - }, &reply) + }, &reply)) + + require.Equal(formatting.JSON, reply.Encoding) + + replyTxBytes, err := json.MarshalIndent(reply.Tx, "", "\t") require.NoError(err) - require.Equal(reply.Encoding, formatting.JSON) - jsonTxBytes, err := stdjson.Marshal(reply.Tx) + expectedReplyTxString := `{ + "unsignedTx": { + "networkID": 10, + "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", + "outputs": null, + "inputs": null, + "memo": "0x", + "operations": [ + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 4 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + }, + { + "assetID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "inputIDs": [ + { + "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", + "outputIndex": 5 + } + ], + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "operation": { + "mintInput": { + "signatureIndices": [ + 0 + ] + }, + "mintOutput": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "locktime": 0, + "threshold": 1 + }, + "ownedOutput": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } + } + } + ] + }, + "credentials": [ + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, + { + "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], + "id": "PLACEHOLDER_TX_ID" +}` + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_CREATE_ASSET_TX_ID", createAssetTx.ID().String(), 4) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) + + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - jsonString := string(jsonTxBytes) - // contains the address in the right format - require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) - // contains the fxID - require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") - require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}},{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}}]") + require.Equal(expectedReplyTxString, string(replyTxBytes)) } -func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { - avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) +func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { + avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") key := keys[0] - tx := buildBaseTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + tx := buildBaseTx(avaxTx, chainID, fee, key) + require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } -func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { - avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) +func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { + avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") key := keys[0] - tx := buildExportTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + tx := buildExportTx(avaxTx, chainID, fee, key) + require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } -func newAvaxCreateAssetTxWithOutputs(t *testing.T, vm *VM) *txs.Tx { +func newAvaxCreateAssetTxWithOutputs(t *testing.T, chainID ids.ID, parser txs.Parser) *txs.Tx { key := keys[0] - tx := buildCreateAssetTx(key) - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } + tx := buildCreateAssetTx(chainID, key) + require.NoError(t, tx.Initialize(parser.Codec())) return tx } -func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { +func buildBaseTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -1462,7 +1584,7 @@ func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { Outs: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxTx.ID()}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - fee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -1473,7 +1595,7 @@ func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { }} } -func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { +func buildExportTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{ BaseTx: avax.BaseTx{ @@ -1496,7 +1618,7 @@ func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { ExportedOuts: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxTx.ID()}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - fee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -1506,7 +1628,7 @@ func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { }} } -func buildCreateAssetTx(key *secp256k1.PrivateKey) *txs.Tx { +func buildCreateAssetTx(chainID ids.ID, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -1645,7 +1767,7 @@ func buildSecpMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputInd } } -func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { +func buildOperationTxWithOp(chainID ids.ID, op ...*txs.Operation) *txs.Tx { return &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -1656,40 +1778,45 @@ func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { } func TestServiceGetNilTx(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() reply := api.GetTxReply{} - err := s.GetTx(nil, &api.GetTxArgs{}, &reply) - require.Error(t, err, "Nil TxID should have returned an error") + err := env.service.GetTx(nil, &api.GetTxArgs{}, &reply) + require.ErrorIs(err, errNilTxID) } func TestServiceGetUnknownTx(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() reply := api.GetTxReply{} - err := s.GetTx(nil, &api.GetTxArgs{TxID: ids.Empty}, &reply) - require.Error(t, err, "Unknown TxID should have returned an error") + err := env.service.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) + require.ErrorIs(err, database.ErrNotFound) } func TestServiceGetUTXOs(t *testing.T) { - _, vm, s, m, _ := setup(t, true) + env := setup(t, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(t, env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() rawAddr := ids.GenerateTestShortID() @@ -1702,7 +1829,7 @@ func TestServiceGetUTXOs(t *testing.T) { UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), }, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 1, OutputOwners: secp256k1fx.OutputOwners{ @@ -1711,20 +1838,20 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, } - vm.state.AddUTXO(utxo) + env.vm.state.AddUTXO(utxo) } - require.NoError(t, vm.state.Commit()) + require.NoError(t, env.vm.state.Commit()) - sm := m.NewSharedMemory(constants.PlatformChainID) + sm := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) elems := make([]*atomic.Element, numUTXOs) - codec := vm.parser.Codec() + codec := env.vm.parser.Codec() for i := range elems { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), }, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 1, OutputOwners: secp256k1fx.OutputOwners{ @@ -1735,9 +1862,7 @@ func TestServiceGetUTXOs(t *testing.T) { } utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) utxoID := utxo.InputID() elems[i] = &atomic.Element{ Key: utxoID[:], @@ -1748,87 +1873,83 @@ func TestServiceGetUTXOs(t *testing.T) { } } - if err := sm.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: elems}}); err != nil { - t.Fatal(err) - } + require.NoError(t, sm.Apply(map[ids.ID]*atomic.Requests{ + env.vm.ctx.ChainID: { + PutRequests: elems, + }, + })) - hrp := constants.GetHRP(vm.ctx.NetworkID) - xAddr, err := vm.FormatLocalAddress(rawAddr) - if err != nil { - t.Fatal(err) - } - pAddr, err := vm.FormatAddress(constants.PlatformChainID, rawAddr) - if err != nil { - t.Fatal(err) - } + hrp := constants.GetHRP(env.vm.ctx.NetworkID) + xAddr, err := env.vm.FormatLocalAddress(rawAddr) + require.NoError(t, err) + pAddr, err := env.vm.FormatAddress(constants.PlatformChainID, rawAddr) + require.NoError(t, err) unknownChainAddr, err := address.Format("R", hrp, rawAddr.Bytes()) - if err != nil { - t.Fatal(err) - } - xEmptyAddr, err := vm.FormatLocalAddress(rawEmptyAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + xEmptyAddr, err := env.vm.FormatLocalAddress(rawEmptyAddr) + require.NoError(t, err) + + env.vm.ctx.Lock.Unlock() tests := []struct { - label string - count int - shouldErr bool - args *api.GetUTXOsArgs + label string + count int + expectedErr error + args *api.GetUTXOsArgs }{ { - label: "invalid address: ''", - shouldErr: true, + label: "invalid address: ''", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ Addresses: []string{""}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: bech32.ErrInvalidLength(0), args: &api.GetUTXOsArgs{ Addresses: []string{"-"}, }, }, { - label: "invalid address: 'foo'", - shouldErr: true, + label: "invalid address: 'foo'", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ Addresses: []string{"foo"}, }, }, { - label: "invalid address: 'foo-bar'", - shouldErr: true, + label: "invalid address: 'foo-bar'", + expectedErr: bech32.ErrInvalidLength(3), args: &api.GetUTXOsArgs{ Addresses: []string{"foo-bar"}, }, }, { - label: "invalid address: ''", - shouldErr: true, + label: "invalid address: ''", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ - Addresses: []string{vm.ctx.ChainID.String()}, + Addresses: []string{env.vm.ctx.ChainID.String()}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: bech32.ErrInvalidLength(0), args: &api.GetUTXOsArgs{ - Addresses: []string{fmt.Sprintf("%s-", vm.ctx.ChainID.String())}, + Addresses: []string{env.vm.ctx.ChainID.String() + "-"}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: ids.ErrNoIDWithAlias, args: &api.GetUTXOsArgs{ Addresses: []string{unknownChainAddr}, }, }, { - label: "no addresses", - shouldErr: true, - args: &api.GetUTXOsArgs{}, + label: "no addresses", + expectedErr: errNoAddresses, + args: &api.GetUTXOsArgs{}, }, { label: "get all X-chain UTXOs", @@ -1856,7 +1977,7 @@ func TestServiceGetUTXOs(t *testing.T) { Addresses: []string{ xAddr, }, - Limit: json.Uint32(numUTXOs + 1), + Limit: avajson.Uint32(numUTXOs + 1), }, }, { @@ -1889,9 +2010,9 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "invalid source chain ID", - shouldErr: true, - count: numUTXOs, + label: "invalid source chain ID", + expectedErr: ids.ErrNoIDWithAlias, + count: numUTXOs, args: &api.GetUTXOsArgs{ Addresses: []string{ xAddr, @@ -1910,8 +2031,8 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "get UTXOs from multiple chains", - shouldErr: true, + label: "get UTXOs from multiple chains", + expectedErr: avax.ErrMismatchedChainIDs, args: &api.GetUTXOsArgs{ Addresses: []string{ xAddr, @@ -1920,8 +2041,8 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "get UTXOs for an address on a different chain", - shouldErr: true, + label: "get UTXOs for an address on a different chain", + expectedErr: avax.ErrMismatchedChainIDs, args: &api.GetUTXOsArgs{ Addresses: []string{ pAddr, @@ -1931,104 +2052,96 @@ func TestServiceGetUTXOs(t *testing.T) { } for _, test := range tests { t.Run(test.label, func(t *testing.T) { + require := require.New(t) reply := &api.GetUTXOsReply{} - err := s.GetUTXOs(nil, test.args, reply) - if err != nil { - if !test.shouldErr { - t.Fatal(err) - } + err := env.service.GetUTXOs(nil, test.args, reply) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { return } - if test.shouldErr { - t.Fatal("should have erred") - } - if test.count != len(reply.UTXOs) { - t.Fatalf("Expected %d utxos, got %d", test.count, len(reply.UTXOs)) - } + require.Len(reply.UTXOs, test.count) }) } } func TestGetAssetDescription(t *testing.T) { - _, vm, s, _, genesisTx := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - avaxAssetID := genesisTx.ID() + avaxAssetID := env.genesisTx.ID() reply := GetAssetDescriptionReply{} - err := s.GetAssetDescription(nil, &GetAssetDescriptionArgs{ + require.NoError(env.service.GetAssetDescription(nil, &GetAssetDescriptionArgs{ AssetID: avaxAssetID.String(), - }, &reply) - if err != nil { - t.Fatal(err) - } + }, &reply)) - if reply.Name != "AVAX" { - t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Name) - } - if reply.Symbol != "SYMB" { - t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Symbol) - } + require.Equal("AVAX", reply.Name) + require.Equal("SYMB", reply.Symbol) } func TestGetBalance(t *testing.T) { - _, vm, s, _, genesisTx := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - avaxAssetID := genesisTx.ID() + avaxAssetID := env.genesisTx.ID() reply := GetBalanceReply{} - addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } - err = s.GetBalance(nil, &GetBalanceArgs{ + addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) + require.NoError(err) + require.NoError(env.service.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: avaxAssetID.String(), - }, &reply) - if err != nil { - t.Fatal(err) - } + }, &reply)) - if uint64(reply.Balance) != startBalance { - t.Fatalf("Wrong balance returned from GetBalance %d", reply.Balance) - } + require.Equal(startBalance, uint64(reply.Balance)) } func TestCreateFixedCapAsset(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() reply := AssetIDChangeAddr{} - addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) + require.NoError(err) - changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } - _, fromAddrsStr := sampleAddrs(t, vm, addrs) + changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) + require.NoError(err) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) - err = s.CreateFixedCapAsset(nil, &CreateAssetArgs{ + require.NoError(env.service.CreateFixedCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2044,12 +2157,8 @@ func TestCreateFixedCapAsset(t *testing.T) { Amount: 123456789, Address: addrStr, }}, - }, &reply) - if err != nil { - t.Fatal(err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, reply.ChangeAddr) - } + }, &reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) }) } } @@ -2057,26 +2166,31 @@ func TestCreateFixedCapAsset(t *testing.T) { func TestCreateVariableCapAsset(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() reply := AssetIDChangeAddr{} - minterAddrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } - _, fromAddrsStr := sampleAddrs(t, vm, addrs) + minterAddrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) + require.NoError(err) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) changeAddrStr := fromAddrsStr[0] - if err != nil { - t.Fatal(err) - } - err = s.CreateVariableCapAsset(nil, &CreateAssetArgs{ + require.NoError(env.service.CreateVariableCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2095,23 +2209,10 @@ func TestCreateVariableCapAsset(t *testing.T) { }, }, }, - }, &reply) - if err != nil { - t.Fatal(err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, reply.ChangeAddr) - } + }, &reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) - createAssetTx := UniqueTx{ - vm: vm, - txID: reply.AssetID, - } - if status := createAssetTx.Status(); status != choices.Processing { - t.Fatalf("CreateVariableCapAssetTx status should have been Processing, but was %s", status) - } - if err := createAssetTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept CreateVariableCapAssetTx due to: %s", err) - } + buildAndAccept(require, env.vm, env.issuer, reply.AssetID) createdAssetID := reply.AssetID.String() // Test minting of the created variable cap asset @@ -2128,23 +2229,10 @@ func TestCreateVariableCapAsset(t *testing.T) { To: minterAddrStr, // Send newly minted tokens to this address } mintReply := &api.JSONTxIDChangeAddr{} - if err := s.Mint(nil, mintArgs, mintReply); err != nil { - t.Fatalf("Failed to mint variable cap asset due to: %s", err) - } else if mintReply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, mintReply.ChangeAddr) - } - - mintTx := UniqueTx{ - vm: vm, - txID: mintReply.TxID, - } + require.NoError(env.service.Mint(nil, mintArgs, mintReply)) + require.Equal(changeAddrStr, mintReply.ChangeAddr) - if status := mintTx.Status(); status != choices.Processing { - t.Fatalf("MintTx status should have been Processing, but was %s", status) - } - if err := mintTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept MintTx due to: %s", err) - } + buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) sendArgs := &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2162,11 +2250,8 @@ func TestCreateVariableCapAsset(t *testing.T) { }, } sendReply := &api.JSONTxIDChangeAddr{} - if err := s.Send(nil, sendArgs, sendReply); err != nil { - t.Fatalf("Failed to send newly minted variable cap asset due to: %s", err) - } else if sendReply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, sendReply.ChangeAddr) - } + require.NoError(env.service.Send(nil, sendArgs, sendReply)) + require.Equal(changeAddrStr, sendReply.ChangeAddr) }) } } @@ -2174,21 +2259,29 @@ func TestCreateVariableCapAsset(t *testing.T) { func TestNFTWorkflow(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - fromAddrs, fromAddrsStr := sampleAddrs(t, vm, addrs) + fromAddrs, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) // Test minting of the created variable cap asset - addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) + require.NoError(err) createArgs := &CreateNFTAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2211,31 +2304,41 @@ func TestNFTWorkflow(t *testing.T) { }, } createReply := &AssetIDChangeAddr{} - if err := s.CreateNFTAsset(nil, createArgs, createReply); err != nil { - t.Fatalf("Failed to mint variable cap asset due to: %s", err) - } else if createReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], createReply.ChangeAddr) - } + require.NoError(env.service.CreateNFTAsset(nil, createArgs, createReply)) + require.Equal(fromAddrsStr[0], createReply.ChangeAddr) - assetID := createReply.AssetID - createNFTTx := UniqueTx{ - vm: vm, - txID: createReply.AssetID, - } - // Accept the transaction so that we can Mint NFTs for the test - if createNFTTx.Status() != choices.Processing { - t.Fatalf("CreateNFTTx should have been processing after creating the NFT") + buildAndAccept(require, env.vm, env.issuer, createReply.AssetID) + + // Key: Address + // Value: AVAX balance + balances := map[ids.ShortID]uint64{} + for _, addr := range addrs { // get balances for all addresses + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) + + reply := &GetBalanceReply{} + require.NoError(env.service.GetBalance(nil, + &GetBalanceArgs{ + Address: addrStr, + AssetID: env.vm.feeAssetID.String(), + }, + reply, + )) + + balances[addr] = uint64(reply.Balance) } - if err := createNFTTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept CreateNFT transaction: %s", err) - } else if err := verifyTxFeeDeducted(t, s, fromAddrs, 1); err != nil { - t.Fatal(err) + + fromAddrsTotalBalance := uint64(0) + for _, addr := range fromAddrs { + fromAddrsTotalBalance += balances[addr] } + fromAddrsStartBalance := startBalance * uint64(len(fromAddrs)) + require.Equal(fromAddrsStartBalance-env.vm.TxFee, fromAddrsTotalBalance) + + assetID := createReply.AssetID payload, err := formatting.Encode(formatting.Hex, []byte{1, 2, 3, 4, 5}) - if err != nil { - t.Fatal(err) - } + require.NoError(err) mintArgs := &MintNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ @@ -2252,24 +2355,11 @@ func TestNFTWorkflow(t *testing.T) { } mintReply := &api.JSONTxIDChangeAddr{} - if err := s.MintNFT(nil, mintArgs, mintReply); err != nil { - t.Fatalf("MintNFT returned an error: %s", err) - } else if createReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], mintReply.ChangeAddr) - } - - mintNFTTx := UniqueTx{ - vm: vm, - txID: mintReply.TxID, - } - if mintNFTTx.Status() != choices.Processing { - t.Fatal("MintNFTTx should have been processing after minting the NFT") - } + require.NoError(env.service.MintNFT(nil, mintArgs, mintReply)) + require.Equal(fromAddrsStr[0], createReply.ChangeAddr) // Accept the transaction so that we can send the newly minted NFT - if err := mintNFTTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept MintNFTTx: %s", err) - } + buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) sendArgs := &SendNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2285,29 +2375,31 @@ func TestNFTWorkflow(t *testing.T) { To: addrStr, } sendReply := &api.JSONTxIDChangeAddr{} - if err := s.SendNFT(nil, sendArgs, sendReply); err != nil { - t.Fatalf("Failed to send NFT due to: %s", err) - } else if sendReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], sendReply.ChangeAddr) - } + require.NoError(env.service.SendNFT(nil, sendArgs, sendReply)) + require.Equal(fromAddrsStr[0], sendReply.ChangeAddr) }) } } func TestImportExportKey(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{ + keystoreUsers: []*user{{ + username: username, + password: password, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() - if err != nil { - t.Fatalf("problem generating private key: %s", err) - } + sk, err := secp256k1.NewPrivateKey() + require.NoError(err) importArgs := &ImportKeyArgs{ UserPass: api.UserPass{ @@ -2317,14 +2409,10 @@ func TestImportExportKey(t *testing.T) { PrivateKey: sk, } importReply := &api.JSONAddress{} - if err := s.ImportKey(nil, importArgs, importReply); err != nil { - t.Fatal(err) - } + require.NoError(env.service.ImportKey(nil, importArgs, importReply)) - addrStr, err := vm.FormatLocalAddress(sk.PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + addrStr, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) + require.NoError(err) exportArgs := &ExportKeyArgs{ UserPass: api.UserPass{ Username: username, @@ -2333,30 +2421,29 @@ func TestImportExportKey(t *testing.T) { Address: addrStr, } exportReply := &ExportKeyReply{} - if err := s.ExportKey(nil, exportArgs, exportReply); err != nil { - t.Fatal(err) - } - - if !bytes.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) { - t.Fatal("Unexpected key was found in ExportKeyReply") - } + require.NoError(env.service.ExportKey(nil, exportArgs, exportReply)) + require.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) } func TestImportAVMKeyNoDuplicates(t *testing.T) { - _, vm, s, _, _ := setup(t, true) - ctx := vm.ctx + require := require.New(t) + + env := setup(t, &envConfig{ + keystoreUsers: []*user{{ + username: username, + password: password, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() - if err != nil { - t.Fatalf("problem generating private key: %s", err) - } + sk, err := secp256k1.NewPrivateKey() + require.NoError(err) args := ImportKeyArgs{ UserPass: api.UserPass{ Username: username, @@ -2365,67 +2452,55 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { PrivateKey: sk, } reply := api.JSONAddress{} - if err := s.ImportKey(nil, &args, &reply); err != nil { - t.Fatal(err) - } + require.NoError(env.service.ImportKey(nil, &args, &reply)) - expectedAddress, err := vm.FormatLocalAddress(sk.PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + expectedAddress, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) + require.NoError(err) - if reply.Address != expectedAddress { - t.Fatalf("Reply address: %s did not match expected address: %s", reply.Address, expectedAddress) - } + require.Equal(expectedAddress, reply.Address) reply2 := api.JSONAddress{} - if err := s.ImportKey(nil, &args, &reply2); err != nil { - t.Fatal(err) - } + require.NoError(env.service.ImportKey(nil, &args, &reply2)) - if reply2.Address != expectedAddress { - t.Fatalf("Reply address: %s did not match expected address: %s", reply2.Address, expectedAddress) - } + require.Equal(expectedAddress, reply2.Address) addrsArgs := api.UserPass{ Username: username, Password: password, } addrsReply := api.JSONAddresses{} - if err := s.ListAddresses(nil, &addrsArgs, &addrsReply); err != nil { - t.Fatal(err) - } - - if len(addrsReply.Addresses) != 1 { - t.Fatal("Importing the same key twice created duplicate addresses") - } + require.NoError(env.service.ListAddresses(nil, &addrsArgs, &addrsReply)) - if addrsReply.Addresses[0] != expectedAddress { - t.Fatal("List addresses returned an incorrect address") - } + require.Len(addrsReply.Addresses, 1) + require.Equal(expectedAddress, addrsReply.Addresses[0]) } func TestSend(t *testing.T) { - _, vm, s, _, genesisTx := setupWithKeys(t, true) + require := require.New(t) + + env := setup(t, &envConfig{ + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - assetID := genesisTx.ID() + assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } - changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } - _, fromAddrsStr := sampleAddrs(t, vm, addrs) + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) + changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) + require.NoError(err) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2443,46 +2518,41 @@ func TestSend(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - vm.timer.Cancel() - if err := s.Send(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } + require.NoError(env.service.Send(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) - pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } - - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by Send does not match the transaction found in vm's pending transactions") - } + buildAndAccept(require, env.vm, env.issuer, reply.TxID) } func TestSendMultiple(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, s, _, genesisTx := setupWithKeys(t, tc.avaxAsset) + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - assetID := genesisTx.ID() + assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } - changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } - _, fromAddrsStr := sampleAddrs(t, vm, addrs) + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) + changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) + require.NoError(err) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2507,36 +2577,29 @@ func TestSendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - vm.timer.Cancel() - if err := s.SendMultiple(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } - - pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } - - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") - } + require.NoError(env.service.SendMultiple(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) - if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { - t.Fatalf("Failed to retrieve created transaction: %s", err) - } + buildAndAccept(require, env.vm, env.issuer, reply.TxID) }) } } func TestCreateAndListAddresses(t *testing.T) { - _, vm, s, _, _ := setup(t, true) + require := require.New(t) + + env := setup(t, &envConfig{ + keystoreUsers: []*user{{ + username: username, + password: password, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() createArgs := &api.UserPass{ @@ -2545,9 +2608,7 @@ func TestCreateAndListAddresses(t *testing.T) { } createReply := &api.JSONAddress{} - if err := s.CreateAddress(nil, createArgs, createReply); err != nil { - t.Fatalf("Failed to create address: %s", err) - } + require.NoError(env.service.CreateAddress(nil, createArgs, createReply)) newAddr := createReply.Address @@ -2557,29 +2618,30 @@ func TestCreateAndListAddresses(t *testing.T) { } listReply := &api.JSONAddresses{} - if err := s.ListAddresses(nil, listArgs, listReply); err != nil { - t.Fatalf("Failed to list addresses: %s", err) - } - - for _, addr := range listReply.Addresses { - if addr == newAddr { - return - } - } - t.Fatalf("Failed to find newly created address among %d addresses", len(listReply.Addresses)) + require.NoError(env.service.ListAddresses(nil, listArgs, listReply)) + require.Contains(listReply.Addresses, newAddr) } func TestImport(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, s, m, genesisTx := setupWithKeys(t, tc.avaxAsset) + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - assetID := genesisTx.ID() + assetID := env.genesisTx.ID() addr0 := keys[0].PublicKey().Address() utxo := &avax.UTXO{ @@ -2593,27 +2655,27 @@ func TestImport(t *testing.T) { }, }, } - utxoBytes, err := vm.parser.Codec().Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, utxo) + require.NoError(err) - peerSharedMemory := m.NewSharedMemory(constants.PlatformChainID) + peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) utxoID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ - Key: utxoID[:], - Value: utxoBytes, - Traits: [][]byte{ - addr0.Bytes(), + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + env.vm.ctx.ChainID: { + PutRequests: []*atomic.Element{{ + Key: utxoID[:], + Value: utxoBytes, + Traits: [][]byte{ + addr0.Bytes(), + }, + }}, }, - }}}}); err != nil { - t.Fatal(err) - } + })) - addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + env.vm.ctx.Lock.Unlock() + + addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) + require.NoError(err) args := &ImportArgs{ UserPass: api.UserPass{ Username: username, @@ -2623,23 +2685,19 @@ func TestImport(t *testing.T) { To: addrStr, } reply := &api.JSONTxID{} - if err := s.Import(nil, args, reply); err != nil { - t.Fatalf("Failed to import AVAX due to %s", err) - } + require.NoError(env.service.Import(nil, args, reply)) }) } } func TestServiceGetBlock(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() blockID := ids.GenerateTestID() type test struct { name string - serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + serviceAndExpectedBlockFunc func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) encoding formatting.Encoding expectedErr error } @@ -2647,7 +2705,7 @@ func TestServiceGetBlock(t *testing.T) { tests := []test{ { name: "chain not linearized", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(*testing.T, *gomock.Controller) (*Service, interface{}) { return &Service{ vm: &VM{ ctx: &snow.Context{ @@ -2661,7 +2719,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "block not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) return &Service{ @@ -2678,8 +2736,8 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "JSON format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) block.EXPECT().InitCtx(gomock.Any()) block.EXPECT().Txs().Return(nil) @@ -2699,13 +2757,13 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hex format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.Hex, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2723,13 +2781,13 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hexc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.HexC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2747,13 +2805,13 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hexnc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.HexNC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2773,7 +2831,9 @@ func TestServiceGetBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + require := require.New(t) + + service, expected := tt.serviceAndExpectedBlockFunc(t, ctrl) args := &api.GetBlockArgs{ BlockID: blockID, @@ -2782,25 +2842,28 @@ func TestServiceGetBlock(t *testing.T) { reply := &api.GetBlockResponse{} err := service.GetBlock(nil, args, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.encoding, reply.Encoding) - require.Equal(expected, reply.Block) + if tt.expectedErr != nil { + return } + require.Equal(tt.encoding, reply.Encoding) + + expectedJSON, err := json.Marshal(expected) + require.NoError(err) + + require.Equal(json.RawMessage(expectedJSON), reply.Block) }) } } func TestServiceGetBlockByHeight(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() blockID := ids.GenerateTestID() blockHeight := uint64(1337) type test struct { name string - serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + serviceAndExpectedBlockFunc func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) encoding formatting.Encoding expectedErr error } @@ -2808,7 +2871,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { tests := []test{ { name: "chain not linearized", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(*testing.T, *gomock.Controller) (*Service, interface{}) { return &Service{ vm: &VM{ ctx: &snow.Context{ @@ -2822,9 +2885,9 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "block height not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(ids.Empty, database.ErrNotFound) + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(ids.Empty, database.ErrNotFound) manager := executor.NewMockManager(ctrl) return &Service{ @@ -2842,9 +2905,9 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "block not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) @@ -2863,13 +2926,13 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "JSON format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) block.EXPECT().InitCtx(gomock.Any()) block.EXPECT().Txs().Return(nil) - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2888,16 +2951,16 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hex format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.Hex, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2916,16 +2979,16 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hexc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2944,16 +3007,16 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hexnc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { - block := blocks.NewMockBlock(ctrl) + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) - state := states.NewMockState(ctrl) - state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexNC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2974,27 +3037,32 @@ func TestServiceGetBlockByHeight(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + require := require.New(t) + + service, expected := tt.serviceAndExpectedBlockFunc(t, ctrl) args := &api.GetBlockByHeightArgs{ - Height: blockHeight, + Height: avajson.Uint64(blockHeight), Encoding: tt.encoding, } reply := &api.GetBlockResponse{} err := service.GetBlockByHeight(nil, args, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.encoding, reply.Encoding) - require.Equal(expected, reply.Block) + if tt.expectedErr != nil { + return } + require.Equal(tt.encoding, reply.Encoding) + + expectedJSON, err := json.Marshal(expected) + require.NoError(err) + + require.Equal(json.RawMessage(expectedJSON), reply.Block) }) } } func TestServiceGetHeight(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() blockID := ids.GenerateTestID() blockHeight := uint64(1337) @@ -3008,7 +3076,7 @@ func TestServiceGetHeight(t *testing.T) { tests := []test{ { name: "chain not linearized", - serviceFunc: func(ctrl *gomock.Controller) *Service { + serviceFunc: func(*gomock.Controller) *Service { return &Service{ vm: &VM{ ctx: &snow.Context{ @@ -3022,7 +3090,7 @@ func TestServiceGetHeight(t *testing.T) { { name: "block not found", serviceFunc: func(ctrl *gomock.Controller) *Service { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetLastAccepted().Return(blockID) manager := executor.NewMockManager(ctrl) @@ -3042,10 +3110,10 @@ func TestServiceGetHeight(t *testing.T) { { name: "happy path", serviceFunc: func(ctrl *gomock.Controller) *Service { - state := states.NewMockState(ctrl) + state := state.NewMockState(ctrl) state.EXPECT().GetLastAccepted().Return(blockID) - block := blocks.NewMockBlock(ctrl) + block := block.NewMockBlock(ctrl) block.EXPECT().Height().Return(blockHeight) manager := executor.NewMockManager(ctrl) @@ -3066,14 +3134,16 @@ func TestServiceGetHeight(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) service := tt.serviceFunc(ctrl) reply := &api.GetHeightResponse{} err := service.GetHeight(nil, nil, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(json.Uint64(blockHeight), reply.Height) + if tt.expectedErr != nil { + return } + require.Equal(avajson.Uint64(blockHeight), reply.Height) }) } } diff --git a/avalanchego/vms/avm/states/diff.go b/avalanchego/vms/avm/state/diff.go similarity index 78% rename from avalanchego/vms/avm/states/diff.go rename to avalanchego/vms/avm/state/diff.go index 5b77c0a7..83e9c5e0 100644 --- a/avalanchego/vms/avm/states/diff.go +++ b/avalanchego/vms/avm/state/diff.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( "errors" @@ -10,13 +10,14 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) var ( - _ Diff = (*diff)(nil) + _ Diff = (*diff)(nil) + _ Versions = stateGetter{} ErrMissingParentState = errors.New("missing parent state") ) @@ -33,9 +34,9 @@ type diff struct { // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed modifiedUTXOs map[ids.ID]*avax.UTXO - addedTxs map[ids.ID]*txs.Tx // map of txID -> tx - addedBlockIDs map[uint64]ids.ID // map of height -> blockID - addedBlocks map[ids.ID]blocks.Block // map of blockID -> block + addedTxs map[ids.ID]*txs.Tx // map of txID -> tx + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + addedBlocks map[ids.ID]block.Block // map of blockID -> block lastAccepted ids.ID timestamp time.Time @@ -55,12 +56,26 @@ func NewDiff( modifiedUTXOs: make(map[ids.ID]*avax.UTXO), addedTxs: make(map[ids.ID]*txs.Tx), addedBlockIDs: make(map[uint64]ids.ID), - addedBlocks: make(map[ids.ID]blocks.Block), + addedBlocks: make(map[ids.ID]block.Block), lastAccepted: parentState.GetLastAccepted(), timestamp: parentState.GetTimestamp(), }, nil } +type stateGetter struct { + state Chain +} + +func (s stateGetter) GetState(ids.ID) (Chain, bool) { + return s.state, true +} + +func NewDiffOn(parentState Chain) (Diff, error) { + return NewDiff(ids.Empty, stateGetter{ + state: parentState, + }) +} + func (d *diff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { if utxo, modified := d.modifiedUTXOs[utxoID]; modified { if utxo == nil { @@ -76,10 +91,6 @@ func (d *diff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { return parentState.GetUTXO(utxoID) } -func (d *diff) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { - return d.GetUTXO(utxoID.InputID()) -} - func (d *diff) AddUTXO(utxo *avax.UTXO) { d.modifiedUTXOs[utxo.InputID()] = utxo } @@ -104,7 +115,7 @@ func (d *diff) AddTx(tx *txs.Tx) { d.addedTxs[tx.ID()] = tx } -func (d *diff) GetBlockID(height uint64) (ids.ID, error) { +func (d *diff) GetBlockIDAtHeight(height uint64) (ids.ID, error) { if blkID, exists := d.addedBlockIDs[height]; exists { return blkID, nil } @@ -113,10 +124,10 @@ func (d *diff) GetBlockID(height uint64) (ids.ID, error) { if !ok { return ids.Empty, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) } - return parentState.GetBlockID(height) + return parentState.GetBlockIDAtHeight(height) } -func (d *diff) GetBlock(blkID ids.ID) (blocks.Block, error) { +func (d *diff) GetBlock(blkID ids.ID) (block.Block, error) { if blk, exists := d.addedBlocks[blkID]; exists { return blk, nil } @@ -128,7 +139,7 @@ func (d *diff) GetBlock(blkID ids.ID) (blocks.Block, error) { return parentState.GetBlock(blkID) } -func (d *diff) AddBlock(blk blocks.Block) { +func (d *diff) AddBlock(blk block.Block) { blkID := blk.ID() d.addedBlockIDs[blk.Height()] = blkID d.addedBlocks[blkID] = blk diff --git a/avalanchego/vms/avm/states/mock_states.go b/avalanchego/vms/avm/state/mock_state.go similarity index 74% rename from avalanchego/vms/avm/states/mock_states.go rename to avalanchego/vms/avm/state/mock_state.go index a5b5f6ea..cb5138c9 100644 --- a/avalanchego/vms/avm/states/mock_states.go +++ b/avalanchego/vms/avm/state/mock_state.go @@ -1,23 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/states (interfaces: Chain,State,Diff) +// Source: github.com/ava-labs/avalanchego/vms/avm/state (interfaces: Chain,State,Diff) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/avm/state/mock_state.go github.com/ava-labs/avalanchego/vms/avm/state Chain,State,Diff +// -// Package states is a generated GoMock package. -package states +// Package state is a generated GoMock package. +package state import ( reflect "reflect" + sync "sync" time "time" database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" - choices "github.com/ava-labs/avalanchego/snow/choices" - blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" + logging "github.com/ava-labs/avalanchego/utils/logging" + block "github.com/ava-labs/avalanchego/vms/avm/block" txs "github.com/ava-labs/avalanchego/vms/avm/txs" avax "github.com/ava-labs/avalanchego/vms/components/avax" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockChain is a mock of Chain interface. @@ -44,13 +47,13 @@ func (m *MockChain) EXPECT() *MockChainMockRecorder { } // AddBlock mocks base method. -func (m *MockChain) AddBlock(arg0 blocks.Block) { +func (m *MockChain) AddBlock(arg0 block.Block) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddBlock", arg0) } // AddBlock indicates an expected call of AddBlock. -func (mr *MockChainMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockChain)(nil).AddBlock), arg0) } @@ -62,7 +65,7 @@ func (m *MockChain) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockChainMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0) } @@ -74,7 +77,7 @@ func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockChainMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) } @@ -86,39 +89,39 @@ func (m *MockChain) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) } // GetBlock mocks base method. -func (m *MockChain) GetBlock(arg0 ids.ID) (blocks.Block, error) { +func (m *MockChain) GetBlock(arg0 ids.ID) (block.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetBlock", arg0) - ret0, _ := ret[0].(blocks.Block) + ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockChainMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChain)(nil).GetBlock), arg0) } -// GetBlockID mocks base method. -func (m *MockChain) GetBlockID(arg0 uint64) (ids.ID, error) { +// GetBlockIDAtHeight mocks base method. +func (m *MockChain) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBlockID indicates an expected call of GetBlockID. -func (mr *MockChainMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockChainMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockChain)(nil).GetBlockID), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockChain)(nil).GetBlockIDAtHeight), arg0) } // GetLastAccepted mocks base method. @@ -159,7 +162,7 @@ func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockChainMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) } @@ -174,26 +177,11 @@ func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) } -// GetUTXOFromID mocks base method. -func (m *MockChain) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXOFromID indicates an expected call of GetUTXOFromID. -func (mr *MockChainMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockChain)(nil).GetUTXOFromID), arg0) -} - // SetLastAccepted mocks base method. func (m *MockChain) SetLastAccepted(arg0 ids.ID) { m.ctrl.T.Helper() @@ -201,7 +189,7 @@ func (m *MockChain) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockChainMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockChain)(nil).SetLastAccepted), arg0) } @@ -213,7 +201,7 @@ func (m *MockChain) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockChainMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockChainMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) } @@ -254,29 +242,17 @@ func (mr *MockStateMockRecorder) Abort() *gomock.Call { } // AddBlock mocks base method. -func (m *MockState) AddBlock(arg0 blocks.Block) { +func (m *MockState) AddBlock(arg0 block.Block) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddBlock", arg0) } // AddBlock indicates an expected call of AddBlock. -func (mr *MockStateMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockState)(nil).AddBlock), arg0) } -// AddStatus mocks base method. -func (m *MockState) AddStatus(arg0 ids.ID, arg1 choices.Status) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddStatus", arg0, arg1) -} - -// AddStatus indicates an expected call of AddStatus. -func (mr *MockStateMockRecorder) AddStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatus", reflect.TypeOf((*MockState)(nil).AddStatus), arg0, arg1) -} - // AddTx mocks base method. func (m *MockState) AddTx(arg0 *txs.Tx) { m.ctrl.T.Helper() @@ -284,7 +260,7 @@ func (m *MockState) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockStateMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), arg0) } @@ -296,11 +272,26 @@ func (m *MockState) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) } +// Checksums mocks base method. +func (m *MockState) Checksums() (ids.ID, ids.ID) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Checksums") + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(ids.ID) + return ret0, ret1 +} + +// Checksums indicates an expected call of Checksums. +func (mr *MockStateMockRecorder) Checksums() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checksums", reflect.TypeOf((*MockState)(nil).Checksums)) +} + // Close mocks base method. func (m *MockState) Close() error { m.ctrl.T.Helper() @@ -351,39 +342,39 @@ func (m *MockState) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockStateMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), arg0) } // GetBlock mocks base method. -func (m *MockState) GetBlock(arg0 ids.ID) (blocks.Block, error) { +func (m *MockState) GetBlock(arg0 ids.ID) (block.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetBlock", arg0) - ret0, _ := ret[0].(blocks.Block) + ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockStateMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockState)(nil).GetBlock), arg0) } -// GetBlockID mocks base method. -func (m *MockState) GetBlockID(arg0 uint64) (ids.ID, error) { +// GetBlockIDAtHeight mocks base method. +func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBlockID indicates an expected call of GetBlockID. -func (mr *MockStateMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockState)(nil).GetBlockID), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) } // GetLastAccepted mocks base method. @@ -400,21 +391,6 @@ func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) } -// GetStatus mocks base method. -func (m *MockState) GetStatus(arg0 ids.ID) (choices.Status, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatus", arg0) - ret0, _ := ret[0].(choices.Status) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatus indicates an expected call of GetStatus. -func (mr *MockStateMockRecorder) GetStatus(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockState)(nil).GetStatus), arg0) -} - // GetTimestamp mocks base method. func (m *MockState) GetTimestamp() time.Time { m.ctrl.T.Helper() @@ -439,7 +415,7 @@ func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockStateMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), arg0) } @@ -454,26 +430,11 @@ func (m *MockState) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), arg0) } -// GetUTXOFromID mocks base method. -func (m *MockState) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXOFromID indicates an expected call of GetUTXOFromID. -func (mr *MockStateMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockState)(nil).GetUTXOFromID), arg0) -} - // InitializeChainState mocks base method. func (m *MockState) InitializeChainState(arg0 ids.ID, arg1 time.Time) error { m.ctrl.T.Helper() @@ -483,7 +444,7 @@ func (m *MockState) InitializeChainState(arg0 ids.ID, arg1 time.Time) error { } // InitializeChainState indicates an expected call of InitializeChainState. -func (mr *MockStateMockRecorder) InitializeChainState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) InitializeChainState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeChainState", reflect.TypeOf((*MockState)(nil).InitializeChainState), arg0, arg1) } @@ -503,6 +464,20 @@ func (mr *MockStateMockRecorder) IsInitialized() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInitialized", reflect.TypeOf((*MockState)(nil).IsInitialized)) } +// Prune mocks base method. +func (m *MockState) Prune(arg0 sync.Locker, arg1 logging.Logger) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Prune", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Prune indicates an expected call of Prune. +func (mr *MockStateMockRecorder) Prune(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockState)(nil).Prune), arg0, arg1) +} + // SetInitialized mocks base method. func (m *MockState) SetInitialized() error { m.ctrl.T.Helper() @@ -524,7 +499,7 @@ func (m *MockState) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) } @@ -536,7 +511,7 @@ func (m *MockState) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), arg0) } @@ -551,7 +526,7 @@ func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error } // UTXOIDs indicates an expected call of UTXOIDs. -func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), arg0, arg1, arg2) } @@ -580,13 +555,13 @@ func (m *MockDiff) EXPECT() *MockDiffMockRecorder { } // AddBlock mocks base method. -func (m *MockDiff) AddBlock(arg0 blocks.Block) { +func (m *MockDiff) AddBlock(arg0 block.Block) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddBlock", arg0) } // AddBlock indicates an expected call of AddBlock. -func (mr *MockDiffMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockDiff)(nil).AddBlock), arg0) } @@ -598,7 +573,7 @@ func (m *MockDiff) AddTx(arg0 *txs.Tx) { } // AddTx indicates an expected call of AddTx. -func (mr *MockDiffMockRecorder) AddTx(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0) } @@ -610,7 +585,7 @@ func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) } @@ -622,7 +597,7 @@ func (m *MockDiff) Apply(arg0 Chain) { } // Apply indicates an expected call of Apply. -func (mr *MockDiffMockRecorder) Apply(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) Apply(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) } @@ -634,39 +609,39 @@ func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) } // GetBlock mocks base method. -func (m *MockDiff) GetBlock(arg0 ids.ID) (blocks.Block, error) { +func (m *MockDiff) GetBlock(arg0 ids.ID) (block.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetBlock", arg0) - ret0, _ := ret[0].(blocks.Block) + ret0, _ := ret[0].(block.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockDiffMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockDiff)(nil).GetBlock), arg0) } -// GetBlockID mocks base method. -func (m *MockDiff) GetBlockID(arg0 uint64) (ids.ID, error) { +// GetBlockIDAtHeight mocks base method. +func (m *MockDiff) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBlockID indicates an expected call of GetBlockID. -func (mr *MockDiffMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockDiffMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockDiff)(nil).GetBlockID), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockDiff)(nil).GetBlockIDAtHeight), arg0) } // GetLastAccepted mocks base method. @@ -707,7 +682,7 @@ func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockDiffMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) } @@ -722,26 +697,11 @@ func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) } -// GetUTXOFromID mocks base method. -func (m *MockDiff) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXOFromID indicates an expected call of GetUTXOFromID. -func (mr *MockDiffMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockDiff)(nil).GetUTXOFromID), arg0) -} - // SetLastAccepted mocks base method. func (m *MockDiff) SetLastAccepted(arg0 ids.ID) { m.ctrl.T.Helper() @@ -749,7 +709,7 @@ func (m *MockDiff) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockDiffMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockDiff)(nil).SetLastAccepted), arg0) } @@ -761,7 +721,7 @@ func (m *MockDiff) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockDiffMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockDiffMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) } diff --git a/avalanchego/vms/avm/states/state.go b/avalanchego/vms/avm/state/state.go similarity index 50% rename from avalanchego/vms/avm/states/state.go rename to avalanchego/vms/avm/state/state.go index f6394e73..297a7e76 100644 --- a/avalanchego/vms/avm/states/state.go +++ b/avalanchego/vms/avm/state/state.go @@ -1,13 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( + "bytes" + "errors" "fmt" + "math" + "sync" "time" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -16,8 +21,10 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -27,6 +34,11 @@ const ( txCacheSize = 8192 blockIDCacheSize = 8192 blockCacheSize = 2048 + + pruneCommitLimit = 1024 + pruneCommitSleepMultiplier = 5 + pruneCommitSleepCap = 10 * time.Second + pruneUpdateFrequency = 30 * time.Second ) var ( @@ -41,18 +53,17 @@ var ( timestampKey = []byte{0x01} lastAcceptedKey = []byte{0x02} + errStatusWithoutTx = errors.New("unexpected status without transactions") + _ State = (*state)(nil) ) type ReadOnlyChain interface { avax.UTXOGetter - // TODO: Remove GetUTXOFromID after the DAG linearization - GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) - GetTx(txID ids.ID) (*txs.Tx, error) - GetBlockID(height uint64) (ids.ID, error) - GetBlock(blkID ids.ID) (blocks.Block, error) + GetBlockIDAtHeight(height uint64) (ids.ID, error) + GetBlock(blkID ids.ID) (block.Block, error) GetLastAccepted() ids.ID GetTimestamp() time.Time } @@ -63,7 +74,7 @@ type Chain interface { avax.UTXODeleter AddTx(tx *txs.Tx) - AddBlock(block blocks.Block) + AddBlock(block block.Block) SetLastAccepted(blkID ids.ID) SetTimestamp(t time.Time) } @@ -85,12 +96,6 @@ type State interface { // called during startup. InitializeChainState(stopVertexID ids.ID, genesisTimestamp time.Time) error - // TODO: deprecate statuses. We should only persist accepted state - // Status returns a status from storage. - GetStatus(id ids.ID) (choices.Status, error) - // AddStatus saves a status in storage. - AddStatus(id ids.ID, status choices.Status) - // Discard uncommitted changes to the database. Abort() @@ -101,6 +106,22 @@ type State interface { // pending changes to the base database. CommitBatch() (database.Batch, error) + // Asynchronously removes unneeded state from disk. + // + // Specifically, this removes: + // - All transaction statuses + // - All non-accepted transactions + // - All UTXOs that were consumed by accepted transactions + // + // [lock] is the AVM's context lock and is assumed to be unlocked when this + // method is called. + // + // TODO: remove after v1.11.x is activated + Prune(lock sync.Locker, log logging.Logger) error + + // Checksums returns the current TxChecksum and UTXOChecksum. + Checksums() (txChecksum ids.ID, utxoChecksum ids.ID) + Close() error } @@ -122,16 +143,16 @@ type State interface { * '-- lastAcceptedKey -> lastAccepted */ type state struct { - parser blocks.Parser + parser block.Parser db *versiondb.Database modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed utxoDB database.Database utxoState avax.UTXOState - addedStatuses map[ids.ID]choices.Status - statusCache cache.Cacher[ids.ID, *choices.Status] // cache of id -> choices.Status. If the entry is nil, it is not in the database - statusDB database.Database + statusesPruned bool + statusCache cache.Cacher[ids.ID, *choices.Status] // cache of id -> choices.Status. If the entry is nil, it is not in the database + statusDB database.Database addedTxs map[ids.ID]*txs.Tx // map of txID -> *txs.Tx txCache cache.Cacher[ids.ID, *txs.Tx] // cache of txID -> *txs.Tx. If the entry is nil, it is not in the database @@ -141,20 +162,24 @@ type state struct { blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database blockIDDB database.Database - addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block - blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + addedBlocks map[ids.ID]block.Block // map of blockID -> Block + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database blockDB database.Database // [lastAccepted] is the most recently accepted block. lastAccepted, persistedLastAccepted ids.ID timestamp, persistedTimestamp time.Time singletonDB database.Database + + trackChecksum bool + txChecksum ids.ID } func New( db *versiondb.Database, - parser blocks.Parser, + parser block.Parser, metrics prometheus.Registerer, + trackChecksums bool, ) (State, error) { utxoDB := prefixdb.New(utxoPrefix, db) statusDB := prefixdb.New(statusPrefix, db) @@ -190,17 +215,21 @@ func New( return nil, err } - blockCache, err := metercacher.New[ids.ID, blocks.Block]( + blockCache, err := metercacher.New[ids.ID, block.Block]( "block_cache", metrics, - &cache.LRU[ids.ID, blocks.Block]{Size: blockCacheSize}, + &cache.LRU[ids.ID, block.Block]{Size: blockCacheSize}, ) if err != nil { return nil, err } - utxoState, err := avax.NewMeteredUTXOState(utxoDB, parser.Codec(), metrics) - return &state{ + utxoState, err := avax.NewMeteredUTXOState(utxoDB, parser.Codec(), metrics, trackChecksums) + if err != nil { + return nil, err + } + + s := &state{ parser: parser, db: db, @@ -208,9 +237,8 @@ func New( utxoDB: utxoDB, utxoState: utxoState, - addedStatuses: make(map[ids.ID]choices.Status), - statusCache: statusCache, - statusDB: statusDB, + statusCache: statusCache, + statusDB: statusDB, addedTxs: make(map[ids.ID]*txs.Tx), txCache: txCache, @@ -220,12 +248,15 @@ func New( blockIDCache: blockIDCache, blockIDDB: blockIDDB, - addedBlocks: make(map[ids.ID]blocks.Block), + addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, blockDB: blockDB, singletonDB: singletonDB, - }, err + + trackChecksum: trackChecksums, + } + return s, s.initTxChecksum() } func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { @@ -238,10 +269,6 @@ func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { return s.utxoState.GetUTXO(utxoID) } -func (s *state) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { - return s.GetUTXO(utxoID.InputID()) -} - func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { return s.utxoState.UTXOIDs(addr, start, limit) } @@ -254,7 +281,69 @@ func (s *state) DeleteUTXO(utxoID ids.ID) { s.modifiedUTXOs[utxoID] = nil } +// TODO: After v1.11.x has activated we can rename [getTx] to [GetTx] and delete +// [getStatus]. func (s *state) GetTx(txID ids.ID) (*txs.Tx, error) { + tx, err := s.getTx(txID) + if err != nil { + return nil, err + } + + // Before the linearization, transactions were persisted before they were + // marked as Accepted. However, this function aims to only return accepted + // transactions. + status, err := s.getStatus(txID) + if err == database.ErrNotFound { + // If the status wasn't persisted, then the transaction was written + // after the linearization, and is accepted. + return tx, nil + } + if err != nil { + return nil, err + } + + // If the status was persisted, then the transaction was written before the + // linearization. If it wasn't marked as accepted, then we treat it as if it + // doesn't exist. + if status != choices.Accepted { + return nil, database.ErrNotFound + } + return tx, nil +} + +func (s *state) getStatus(id ids.ID) (choices.Status, error) { + if s.statusesPruned { + return choices.Unknown, database.ErrNotFound + } + + if _, ok := s.addedTxs[id]; ok { + return choices.Unknown, database.ErrNotFound + } + if status, found := s.statusCache.Get(id); found { + if status == nil { + return choices.Unknown, database.ErrNotFound + } + return *status, nil + } + + val, err := database.GetUInt32(s.statusDB, id[:]) + if err == database.ErrNotFound { + s.statusCache.Put(id, nil) + return choices.Unknown, database.ErrNotFound + } + if err != nil { + return choices.Unknown, err + } + + status := choices.Status(val) + if err := status.Valid(); err != nil { + return choices.Unknown, err + } + s.statusCache.Put(id, &status) + return status, nil +} + +func (s *state) getTx(txID ids.ID) (*txs.Tx, error) { if tx, exists := s.addedTxs[txID]; exists { return tx, nil } @@ -285,10 +374,12 @@ func (s *state) GetTx(txID ids.ID) (*txs.Tx, error) { } func (s *state) AddTx(tx *txs.Tx) { - s.addedTxs[tx.ID()] = tx + txID := tx.ID() + s.updateTxChecksum(txID) + s.addedTxs[txID] = tx } -func (s *state) GetBlockID(height uint64) (ids.ID, error) { +func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { if blkID, exists := s.addedBlockIDs[height]; exists { return blkID, nil } @@ -315,7 +406,7 @@ func (s *state) GetBlockID(height uint64) (ids.ID, error) { return blkID, nil } -func (s *state) GetBlock(blkID ids.ID) (blocks.Block, error) { +func (s *state) GetBlock(blkID ids.ID) (block.Block, error) { if blk, exists := s.addedBlocks[blkID]; exists { return blk, nil } @@ -345,7 +436,7 @@ func (s *state) GetBlock(blkID ids.ID) (blocks.Block, error) { return blk, nil } -func (s *state) AddBlock(block blocks.Block) { +func (s *state) AddBlock(block block.Block) { blkID := block.ID() s.addedBlockIDs[block.Height()] = blkID s.addedBlocks[blkID] = block @@ -366,7 +457,7 @@ func (s *state) InitializeChainState(stopVertexID ids.ID, genesisTimestamp time. } func (s *state) initializeChainState(stopVertexID ids.ID, genesisTimestamp time.Time) error { - genesis, err := blocks.NewStandardBlock( + genesis, err := block.NewStandardBlock( stopVertexID, 0, genesisTimestamp, @@ -407,41 +498,6 @@ func (s *state) SetTimestamp(t time.Time) { s.timestamp = t } -// TODO: remove status support -func (s *state) GetStatus(id ids.ID) (choices.Status, error) { - if status, exists := s.addedStatuses[id]; exists { - return status, nil - } - if status, found := s.statusCache.Get(id); found { - if status == nil { - return choices.Unknown, database.ErrNotFound - } - return *status, nil - } - - val, err := database.GetUInt32(s.statusDB, id[:]) - if err == database.ErrNotFound { - s.statusCache.Put(id, nil) - return choices.Unknown, database.ErrNotFound - } - if err != nil { - return choices.Unknown, err - } - - status := choices.Status(val) - if err := status.Valid(); err != nil { - return choices.Unknown, err - } - - s.statusCache.Put(id, &status) - return status, nil -} - -// TODO: remove status support -func (s *state) AddStatus(id ids.ID, status choices.Status) { - s.addedStatuses[id] = status -} - func (s *state) Commit() error { defer s.Abort() batch, err := s.CommitBatch() @@ -463,8 +519,7 @@ func (s *state) CommitBatch() (database.Batch, error) { } func (s *state) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.utxoDB.Close(), s.statusDB.Close(), s.txDB.Close(), @@ -473,20 +528,16 @@ func (s *state) Close() error { s.singletonDB.Close(), s.db.Close(), ) - return errs.Err } func (s *state) write() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.writeUTXOs(), s.writeTxs(), s.writeBlockIDs(), s.writeBlocks(), s.writeMetadata(), - s.writeStatuses(), ) - return errs.Err } func (s *state) writeUTXOs() error { @@ -513,9 +564,13 @@ func (s *state) writeTxs() error { delete(s.addedTxs, txID) s.txCache.Put(txID, tx) + s.statusCache.Put(txID, nil) if err := s.txDB.Put(txID[:], txBytes); err != nil { return fmt.Errorf("failed to add tx: %w", err) } + if err := s.statusDB.Delete(txID[:]); err != nil { + return fmt.Errorf("failed to delete status: %w", err) + } } return nil } @@ -563,16 +618,280 @@ func (s *state) writeMetadata() error { return nil } -func (s *state) writeStatuses() error { - for id, status := range s.addedStatuses { - id := id - status := status +func (s *state) Prune(lock sync.Locker, log logging.Logger) error { + lock.Lock() + // It is possible that more txs are added after grabbing this iterator. No + // new txs will write a status, so we don't need to check those txs. + statusIter := s.statusDB.NewIterator() + // Releasing is done using a closure to ensure that updating statusIter will + // result in having the most recent iterator released when executing the + // deferred function. + defer func() { + statusIter.Release() + }() + + if !statusIter.Next() { + // If there are no statuses on disk, pruning was previously run and + // finished. + lock.Unlock() - delete(s.addedStatuses, id) - s.statusCache.Put(id, &status) - if err := database.PutUInt32(s.statusDB, id[:], uint32(status)); err != nil { - return fmt.Errorf("failed to add status: %w", err) + log.Info("state already pruned") + + return statusIter.Error() + } + + startTxIDBytes := statusIter.Key() + txIter := s.txDB.NewIteratorWithStart(startTxIDBytes) + // Releasing is done using a closure to ensure that updating statusIter will + // result in having the most recent iterator released when executing the + // deferred function. + defer func() { + txIter.Release() + }() + + // While we are pruning the disk, we disable caching of the data we are + // modifying. Caching is re-enabled when pruning finishes. + // + // Note: If an unexpected error occurs the caches are never re-enabled. + // That's fine as the node is going to be in an unhealthy state regardless. + oldTxCache := s.txCache + s.statusCache = &cache.Empty[ids.ID, *choices.Status]{} + s.txCache = &cache.Empty[ids.ID, *txs.Tx]{} + lock.Unlock() + + startTime := time.Now() + lastCommit := startTime + lastUpdate := startTime + startProgress := timer.ProgressFromHash(startTxIDBytes) + + startStatusBytes := statusIter.Value() + if err := s.cleanupTx(lock, startTxIDBytes, startStatusBytes, txIter); err != nil { + return err + } + + numPruned := 1 + for statusIter.Next() { + txIDBytes := statusIter.Key() + statusBytes := statusIter.Value() + if err := s.cleanupTx(lock, txIDBytes, statusBytes, txIter); err != nil { + return err + } + + numPruned++ + + if numPruned%pruneCommitLimit == 0 { + // We must hold the lock during committing to make sure we don't + // attempt to commit to disk while a block is concurrently being + // accepted. + lock.Lock() + err := utils.Err( + s.Commit(), + statusIter.Error(), + txIter.Error(), + ) + lock.Unlock() + if err != nil { + return err + } + + // We release the iterators here to allow the underlying database to + // clean up deleted state. + statusIter.Release() + txIter.Release() + + now := time.Now() + if now.Sub(lastUpdate) > pruneUpdateFrequency { + lastUpdate = now + + progress := timer.ProgressFromHash(txIDBytes) + eta := timer.EstimateETA( + startTime, + progress-startProgress, + math.MaxUint64-startProgress, + ) + log.Info("committing state pruning", + zap.Int("numPruned", numPruned), + zap.Duration("eta", eta), + ) + } + + // We take the minimum here because it's possible that the node is + // currently bootstrapping. This would mean that grabbing the lock + // could take an extremely long period of time; which we should not + // delay processing for. + pruneDuration := now.Sub(lastCommit) + sleepDuration := min( + pruneCommitSleepMultiplier*pruneDuration, + pruneCommitSleepCap, + ) + time.Sleep(sleepDuration) + + // Make sure not to include the sleep duration into the next prune + // duration. + lastCommit = time.Now() + + // We shouldn't need to grab the lock here, but doing so ensures + // that we see a consistent view across both the statusDB and the + // txDB. + lock.Lock() + statusIter = s.statusDB.NewIteratorWithStart(txIDBytes) + txIter = s.txDB.NewIteratorWithStart(txIDBytes) + lock.Unlock() } } - return nil + + lock.Lock() + defer lock.Unlock() + + err := utils.Err( + s.Commit(), + statusIter.Error(), + txIter.Error(), + ) + + // Make sure we flush the original cache before re-enabling it to prevent + // surfacing any stale data. + oldTxCache.Flush() + s.statusesPruned = true + s.txCache = oldTxCache + + log.Info("finished state pruning", + zap.Int("numPruned", numPruned), + zap.Duration("duration", time.Since(startTime)), + ) + + return err +} + +// Assumes [lock] is unlocked. +func (s *state) cleanupTx(lock sync.Locker, txIDBytes []byte, statusBytes []byte, txIter database.Iterator) error { + // After the linearization, we write txs to disk without statuses to mark + // them as accepted. This means that there may be more txs than statuses and + // we need to skip over them. + // + // Note: We do not need to remove UTXOs consumed after the linearization, as + // those UTXOs are guaranteed to have already been deleted. + if err := skipTo(txIter, txIDBytes); err != nil { + return err + } + // txIter.Key() is now `txIDBytes` + + statusInt, err := database.ParseUInt32(statusBytes) + if err != nil { + return err + } + status := choices.Status(statusInt) + + if status == choices.Accepted { + txBytes := txIter.Value() + tx, err := s.parser.ParseGenesisTx(txBytes) + if err != nil { + return err + } + + utxos := tx.Unsigned.InputUTXOs() + + // Locking is done here to make sure that any concurrent verification is + // performed with a valid view of the state. + lock.Lock() + defer lock.Unlock() + + // Remove all the UTXOs consumed by the accepted tx. Technically we only + // need to remove UTXOs consumed by operations, but it's easy to just + // remove all of them. + for _, UTXO := range utxos { + if err := s.utxoState.DeleteUTXO(UTXO.InputID()); err != nil { + return err + } + } + } else { + lock.Lock() + defer lock.Unlock() + + // This tx wasn't accepted, so we can remove it entirely from disk. + if err := s.txDB.Delete(txIDBytes); err != nil { + return err + } + } + // By removing the status, we will treat the tx as accepted if it is still + // on disk. + return s.statusDB.Delete(txIDBytes) +} + +// skipTo advances [iter] until its key is equal to [targetKey]. If [iter] does +// not contain [targetKey] an error will be returned. +// +// Note: [iter.Next()] will always be called at least once. +func skipTo(iter database.Iterator, targetKey []byte) error { + for { + if !iter.Next() { + return fmt.Errorf("%w: 0x%x", database.ErrNotFound, targetKey) + } + key := iter.Key() + switch bytes.Compare(targetKey, key) { + case -1: + return fmt.Errorf("%w: 0x%x", database.ErrNotFound, targetKey) + case 0: + return nil + } + } +} + +func (s *state) Checksums() (ids.ID, ids.ID) { + return s.txChecksum, s.utxoState.Checksum() +} + +func (s *state) initTxChecksum() error { + if !s.trackChecksum { + return nil + } + + txIt := s.txDB.NewIterator() + defer txIt.Release() + statusIt := s.statusDB.NewIterator() + defer statusIt.Release() + + statusHasNext := statusIt.Next() + for txIt.Next() { + txIDBytes := txIt.Key() + if statusHasNext { // if status was exhausted, everything is accepted + statusIDBytes := statusIt.Key() + if bytes.Equal(txIDBytes, statusIDBytes) { // if the status key doesn't match this was marked as accepted + statusInt, err := database.ParseUInt32(statusIt.Value()) + if err != nil { + return err + } + + statusHasNext = statusIt.Next() // we processed the txID, so move on to the next status + + if choices.Status(statusInt) != choices.Accepted { // the status isn't accepted, so we skip the txID + continue + } + } + } + + txID, err := ids.ToID(txIDBytes) + if err != nil { + return err + } + + s.updateTxChecksum(txID) + } + + if statusHasNext { + return errStatusWithoutTx + } + + return utils.Err( + txIt.Error(), + statusIt.Error(), + ) +} + +func (s *state) updateTxChecksum(modifiedID ids.ID) { + if !s.trackChecksum { + return + } + + s.txChecksum = s.txChecksum.XOR(modifiedID) } diff --git a/avalanchego/vms/avm/states/state_test.go b/avalanchego/vms/avm/state/state_test.go similarity index 80% rename from avalanchego/vms/avm/states/state_test.go rename to avalanchego/vms/avm/state/state_test.go index f9c1e760..70c9006f 100644 --- a/avalanchego/vms/avm/states/state_test.go +++ b/avalanchego/vms/avm/state/state_test.go @@ -1,14 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state import ( "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -16,27 +15,30 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +const trackChecksums = false + var ( - parser blocks.Parser + parser block.Parser populatedUTXO *avax.UTXO populatedUTXOID ids.ID populatedTx *txs.Tx populatedTxID ids.ID - populatedBlk blocks.Block + populatedBlk block.Block populatedBlkHeight uint64 populatedBlkID ids.ID ) func init() { var err error - parser, err = blocks.NewParser( + parser, err = block.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -62,13 +64,13 @@ func init() { populatedTx = &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ BlockchainID: ids.GenerateTestID(), }}} - err = parser.InitializeTx(populatedTx) + err = populatedTx.Initialize(parser.Codec()) if err != nil { panic(err) } populatedTxID = populatedTx.ID() - populatedBlk, err = blocks.NewStandardBlock( + populatedBlk, err = block.NewStandardBlock( ids.GenerateTestID(), 1, time.Now(), @@ -98,18 +100,20 @@ func (v *versions) GetState(blkID ids.ID) (Chain, bool) { } func TestState(t *testing.T) { + require := require.New(t) + db := memdb.New() vdb := versiondb.New(db) - s, err := New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + s, err := New(vdb, parser, prometheus.NewRegistry(), trackChecksums) + require.NoError(err) s.AddUTXO(populatedUTXO) s.AddTx(populatedTx) s.AddBlock(populatedBlk) - require.NoError(t, s.Commit()) + require.NoError(s.Commit()) - s, err = New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + s, err = New(vdb, parser, prometheus.NewRegistry(), trackChecksums) + require.NoError(err) ChainUTXOTest(t, s) ChainTxTest(t, s) @@ -117,15 +121,17 @@ func TestState(t *testing.T) { } func TestDiff(t *testing.T) { + require := require.New(t) + db := memdb.New() vdb := versiondb.New(db) - s, err := New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + s, err := New(vdb, parser, prometheus.NewRegistry(), trackChecksums) + require.NoError(err) s.AddUTXO(populatedUTXO) s.AddTx(populatedTx) s.AddBlock(populatedBlk) - require.NoError(t, s.Commit()) + require.NoError(s.Commit()) parentID := ids.GenerateTestID() d, err := NewDiff(parentID, &versions{ @@ -133,7 +139,7 @@ func TestDiff(t *testing.T) { parentID: s, }, }) - require.NoError(t, err) + require.NoError(err) ChainUTXOTest(t, d) ChainTxTest(t, d) @@ -194,7 +200,7 @@ func ChainTxTest(t *testing.T, c Chain) { tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ BlockchainID: ids.GenerateTestID(), }}} - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) txID := tx.ID() _, err = c.GetTx(txID) @@ -214,7 +220,7 @@ func ChainTxTest(t *testing.T, c Chain) { func ChainBlockTest(t *testing.T, c Chain) { require := require.New(t) - fetchedBlkID, err := c.GetBlockID(populatedBlkHeight) + fetchedBlkID, err := c.GetBlockIDAtHeight(populatedBlkHeight) require.NoError(err) require.Equal(populatedBlkID, fetchedBlkID) @@ -223,7 +229,7 @@ func ChainBlockTest(t *testing.T, c Chain) { require.Equal(populatedBlk.ID(), fetchedBlk.ID()) // Pull again for the cached path - fetchedBlkID, err = c.GetBlockID(populatedBlkHeight) + fetchedBlkID, err = c.GetBlockIDAtHeight(populatedBlkHeight) require.NoError(err) require.Equal(populatedBlkID, fetchedBlkID) @@ -231,7 +237,7 @@ func ChainBlockTest(t *testing.T, c Chain) { require.NoError(err) require.Equal(populatedBlk.ID(), fetchedBlk.ID()) - blk, err := blocks.NewStandardBlock( + blk, err := block.NewStandardBlock( ids.GenerateTestID(), 10, time.Now(), @@ -250,14 +256,14 @@ func ChainBlockTest(t *testing.T, c Chain) { blkID := blk.ID() blkHeight := blk.Height() - _, err = c.GetBlockID(blkHeight) + _, err = c.GetBlockIDAtHeight(blkHeight) require.ErrorIs(err, database.ErrNotFound) _, err = c.GetBlock(blkID) require.ErrorIs(err, database.ErrNotFound) // Pull again for the cached path - _, err = c.GetBlockID(blkHeight) + _, err = c.GetBlockIDAtHeight(blkHeight) require.ErrorIs(err, database.ErrNotFound) _, err = c.GetBlock(blkID) @@ -265,7 +271,7 @@ func ChainBlockTest(t *testing.T, c Chain) { c.AddBlock(blk) - fetchedBlkID, err = c.GetBlockID(blkHeight) + fetchedBlkID, err = c.GetBlockIDAtHeight(blkHeight) require.NoError(err) require.Equal(blkID, fetchedBlkID) @@ -279,13 +285,12 @@ func TestInitializeChainState(t *testing.T) { db := memdb.New() vdb := versiondb.New(db) - s, err := New(vdb, parser, prometheus.NewRegistry()) + s, err := New(vdb, parser, prometheus.NewRegistry(), trackChecksums) require.NoError(err) stopVertexID := ids.GenerateTestID() - genesisTimestamp := version.CortinaDefaultTime - err = s.InitializeChainState(stopVertexID, genesisTimestamp) - require.NoError(err) + genesisTimestamp := version.DefaultUpgradeTime + require.NoError(s.InitializeChainState(stopVertexID, genesisTimestamp)) lastAcceptedID := s.GetLastAccepted() genesis, err := s.GetBlock(lastAcceptedID) @@ -293,7 +298,7 @@ func TestInitializeChainState(t *testing.T) { require.Equal(stopVertexID, genesis.Parent()) require.Equal(genesisTimestamp.UnixNano(), genesis.Timestamp().UnixNano()) - childBlock, err := blocks.NewStandardBlock( + childBlock, err := block.NewStandardBlock( genesis.ID(), genesis.Height()+1, genesisTimestamp, @@ -304,11 +309,9 @@ func TestInitializeChainState(t *testing.T) { s.AddBlock(childBlock) s.SetLastAccepted(childBlock.ID()) - err = s.Commit() - require.NoError(err) + require.NoError(s.Commit()) - err = s.InitializeChainState(stopVertexID, genesisTimestamp) - require.NoError(err) + require.NoError(s.InitializeChainState(stopVertexID, genesisTimestamp)) lastAcceptedID = s.GetLastAccepted() lastAccepted, err := s.GetBlock(lastAcceptedID) diff --git a/avalanchego/vms/avm/states/versions.go b/avalanchego/vms/avm/state/versions.go similarity index 65% rename from avalanchego/vms/avm/states/versions.go rename to avalanchego/vms/avm/state/versions.go index 581ec3b3..6afb0fe8 100644 --- a/avalanchego/vms/avm/states/versions.go +++ b/avalanchego/vms/avm/state/versions.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package states +package state -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" type Versions interface { // GetState returns the state of the chain after [blkID] has been accepted. diff --git a/avalanchego/vms/avm/state_test.go b/avalanchego/vms/avm/state_test.go index 32a3874f..b17604b2 100644 --- a/avalanchego/vms/avm/state_test.go +++ b/avalanchego/vms/avm/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -22,42 +21,37 @@ import ( ) func TestSetsAndGets(t *testing.T) { - _, _, vm, _ := GenesisVMWithArgs( - t, - []*common.Fx{{ + require := require.New(t) + + env := setup(t, &envConfig{ + additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ InitializeF: func(vmIntf interface{}) error { vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) + return vm.CodecRegistry().RegisterType(&avax.TestState{}) }, }, }}, - nil, - ) - ctx := vm.ctx + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - state := vm.state - utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, Asset: avax.Asset{ID: ids.Empty}, - Out: &avax.TestVerifiable{}, + Out: &avax.TestState{}, } utxoID := utxo.InputID() tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: ids.Empty, @@ -74,81 +68,57 @@ func TestSetsAndGets(t *testing.T) { }, }}, }}} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) txID := tx.ID() - state.AddUTXO(utxo) - state.AddTx(tx) - state.AddStatus(txID, choices.Accepted) + env.vm.state.AddUTXO(utxo) + env.vm.state.AddTx(tx) - resultUTXO, err := state.GetUTXO(utxoID) - if err != nil { - t.Fatal(err) - } - resultTx, err := state.GetTx(txID) - if err != nil { - t.Fatal(err) - } - resultStatus, err := state.GetStatus(txID) - if err != nil { - t.Fatal(err) - } + resultUTXO, err := env.vm.state.GetUTXO(utxoID) + require.NoError(err) + resultTx, err := env.vm.state.GetTx(txID) + require.NoError(err) - if resultUTXO.OutputIndex != 1 { - t.Fatalf("Wrong UTXO returned") - } - if resultTx.ID() != tx.ID() { - t.Fatalf("Wrong Tx returned") - } - if resultStatus != choices.Accepted { - t.Fatalf("Wrong Status returned") - } + require.Equal(uint32(1), resultUTXO.OutputIndex) + require.Equal(tx.ID(), resultTx.ID()) } func TestFundingNoAddresses(t *testing.T) { - _, _, vm, _ := GenesisVMWithArgs( - t, - []*common.Fx{{ + env := setup(t, &envConfig{ + additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ InitializeF: func(vmIntf interface{}) error { vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) + return vm.CodecRegistry().RegisterType(&avax.TestState{}) }, }, }}, - nil, - ) - ctx := vm.ctx + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(t, env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - state := vm.state - utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, Asset: avax.Asset{ID: ids.Empty}, - Out: &avax.TestVerifiable{}, + Out: &avax.TestState{}, } - state.AddUTXO(utxo) - state.DeleteUTXO(utxo.InputID()) + env.vm.state.AddUTXO(utxo) + env.vm.state.DeleteUTXO(utxo.InputID()) } func TestFundingAddresses(t *testing.T) { - _, _, vm, _ := GenesisVMWithArgs( - t, - []*common.Fx{{ + require := require.New(t) + + env := setup(t, &envConfig{ + additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ InitializeF: func(vmIntf interface{}) error { @@ -157,18 +127,12 @@ func TestFundingAddresses(t *testing.T) { }, }, }}, - nil, - ) - ctx := vm.ctx + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - state := vm.state - utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.Empty, @@ -180,18 +144,18 @@ func TestFundingAddresses(t *testing.T) { }, } - state.AddUTXO(utxo) - require.NoError(t, state.Commit()) + env.vm.state.AddUTXO(utxo) + require.NoError(env.vm.state.Commit()) - utxos, err := state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - require.NoError(t, err) - require.Len(t, utxos, 1) - require.Equal(t, utxo.InputID(), utxos[0]) + utxos, err := env.vm.state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) + require.NoError(err) + require.Len(utxos, 1) + require.Equal(utxo.InputID(), utxos[0]) - state.DeleteUTXO(utxo.InputID()) - require.NoError(t, state.Commit()) + env.vm.state.DeleteUTXO(utxo.InputID()) + require.NoError(env.vm.state.Commit()) - utxos, err = state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - require.NoError(t, err) - require.Empty(t, utxos) + utxos, err = env.vm.state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) + require.NoError(err) + require.Empty(utxos) } diff --git a/avalanchego/vms/avm/static_client.go b/avalanchego/vms/avm/static_client.go deleted file mode 100644 index 78014785..00000000 --- a/avalanchego/vms/avm/static_client.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "context" - - "github.com/ava-labs/avalanchego/utils/rpc" -) - -var _ StaticClient = (*staticClient)(nil) - -// StaticClient for interacting with the AVM static api -type StaticClient interface { - BuildGenesis(ctx context.Context, args *BuildGenesisArgs, options ...rpc.Option) (*BuildGenesisReply, error) -} - -// staticClient is an implementation of an AVM client for interacting with the -// avm static api -type staticClient struct { - requester rpc.EndpointRequester -} - -// NewClient returns an AVM client for interacting with the avm static api -func NewStaticClient(uri string) StaticClient { - return &staticClient{requester: rpc.NewEndpointRequester( - uri + "/ext/vm/avm", - )} -} - -func (c *staticClient) BuildGenesis(ctx context.Context, args *BuildGenesisArgs, options ...rpc.Option) (resp *BuildGenesisReply, err error) { - resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "avm.buildGenesis", args, resp, options...) - return resp, err -} diff --git a/avalanchego/vms/avm/static_service.go b/avalanchego/vms/avm/static_service.go index 979fa2f8..00738013 100644 --- a/avalanchego/vms/avm/static_service.go +++ b/avalanchego/vms/avm/static_service.go @@ -1,21 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "encoding/json" "errors" "fmt" "net/http" "time" - stdjson "encoding/json" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -23,6 +21,8 @@ import ( "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + avajson "github.com/ava-labs/avalanchego/utils/json" ) var ( @@ -56,7 +56,7 @@ func CreateStaticService() *StaticService { // BuildGenesisArgs are arguments for BuildGenesis type BuildGenesisArgs struct { - NetworkID json.Uint32 `json:"networkID"` + NetworkID avajson.Uint32 `json:"networkID"` GenesisData map[string]AssetDefinition `json:"genesisData"` Encoding formatting.Encoding `json:"encoding"` } @@ -64,7 +64,7 @@ type BuildGenesisArgs struct { type AssetDefinition struct { Name string `json:"name"` Symbol string `json:"symbol"` - Denomination json.Uint8 `json:"denomination"` + Denomination avajson.Uint8 `json:"denomination"` InitialState map[string][]interface{} `json:"initialState"` Memo string `json:"memo"` } @@ -79,6 +79,7 @@ type BuildGenesisReply struct { // referenced in the UTXO. func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -118,12 +119,12 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl switch assetType { case "fixedCap": for _, state := range initialStates { - b, err := stdjson.Marshal(state) + b, err := json.Marshal(state) if err != nil { return fmt.Errorf("problem marshaling state: %w", err) } holder := Holder{} - if err := stdjson.Unmarshal(b, &holder); err != nil { + if err := json.Unmarshal(b, &holder); err != nil { return fmt.Errorf("problem unmarshaling holder: %w", err) } _, addrbuff, err := address.ParseBech32(holder.Address) @@ -144,12 +145,12 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl } case "variableCap": for _, state := range initialStates { - b, err := stdjson.Marshal(state) + b, err := json.Marshal(state) if err != nil { return fmt.Errorf("problem marshaling state: %w", err) } owners := Owners{} - if err := stdjson.Unmarshal(b, &owners); err != nil { + if err := json.Unmarshal(b, &owners); err != nil { return fmt.Errorf("problem unmarshaling Owners: %w", err) } diff --git a/avalanchego/vms/avm/static_service_test.go b/avalanchego/vms/avm/static_service_test.go deleted file mode 100644 index ed2b7e43..00000000 --- a/avalanchego/vms/avm/static_service_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" -) - -var addrStrArray = []string{ - "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", - "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", - "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", - "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", -} - -var testHRP = constants.NetworkIDToHRP[constants.UnitTestID] - -func TestBuildGenesis(t *testing.T) { - ss := CreateStaticService() - addrMap := map[string]string{} - for _, addrStr := range addrStrArray { - addr, err := ids.ShortFromString(addrStr) - if err != nil { - t.Fatal(err) - } - addrMap[addrStr], err = address.FormatBech32(testHRP, addr[:]) - if err != nil { - t.Fatal(err) - } - } - args := BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]AssetDefinition{ - "asset1": { - Name: "myFixedCapAsset", - Symbol: "MFCA", - Denomination: 8, - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: 100000, - Address: addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - Holder{ - Amount: 100000, - Address: addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - "asset2": { - Name: "myVarCapAsset", - Symbol: "MVCA", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - addrMap["6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv"], - }, - }, - Owners{ - Threshold: 2, - Minters: []string{ - addrMap["6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa"], - addrMap["Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7"], - }, - }, - }, - }, - }, - "asset3": { - Name: "myOtherVarCapAsset", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addrMap["A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy"], - }, - }, - }, - }, - }, - }, - } - reply := BuildGenesisReply{} - err := ss.BuildGenesis(nil, &args, &reply) - if err != nil { - t.Fatal(err) - } -} diff --git a/avalanchego/vms/avm/tx.go b/avalanchego/vms/avm/tx.go new file mode 100644 index 00000000..13064a59 --- /dev/null +++ b/avalanchego/vms/avm/tx.go @@ -0,0 +1,133 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "context" + "errors" + "fmt" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" +) + +var ( + _ snowstorm.Tx = (*Tx)(nil) + + errTxNotProcessing = errors.New("transaction is not processing") + errUnexpectedReject = errors.New("attempting to reject transaction") +) + +type Tx struct { + vm *VM + tx *txs.Tx +} + +func (tx *Tx) ID() ids.ID { + return tx.tx.ID() +} + +func (tx *Tx) Accept(context.Context) error { + if s := tx.Status(); s != choices.Processing { + return fmt.Errorf("%w: %s", errTxNotProcessing, s) + } + + if err := tx.vm.onAccept(tx.tx); err != nil { + return err + } + + executor := &executor.Executor{ + Codec: tx.vm.txBackend.Codec, + State: tx.vm.state, + Tx: tx.tx, + } + err := tx.tx.Unsigned.Visit(executor) + if err != nil { + return fmt.Errorf("error staging accepted state changes: %w", err) + } + + tx.vm.state.AddTx(tx.tx) + + commitBatch, err := tx.vm.state.CommitBatch() + if err != nil { + txID := tx.tx.ID() + return fmt.Errorf("couldn't create commitBatch while processing tx %s: %w", txID, err) + } + + defer tx.vm.state.Abort() + err = tx.vm.ctx.SharedMemory.Apply( + executor.AtomicRequests, + commitBatch, + ) + if err != nil { + txID := tx.tx.ID() + return fmt.Errorf("error committing accepted state changes while processing tx %s: %w", txID, err) + } + + return tx.vm.metrics.MarkTxAccepted(tx.tx) +} + +func (*Tx) Reject(context.Context) error { + return errUnexpectedReject +} + +func (tx *Tx) Status() choices.Status { + txID := tx.tx.ID() + _, err := tx.vm.state.GetTx(txID) + switch err { + case nil: + return choices.Accepted + case database.ErrNotFound: + return choices.Processing + default: + tx.vm.ctx.Log.Error("failed looking up tx status", + zap.Stringer("txID", txID), + zap.Error(err), + ) + return choices.Processing + } +} + +func (tx *Tx) MissingDependencies() (set.Set[ids.ID], error) { + txIDs := set.Set[ids.ID]{} + for _, in := range tx.tx.Unsigned.InputUTXOs() { + if in.Symbolic() { + continue + } + txID, _ := in.InputSource() + + _, err := tx.vm.state.GetTx(txID) + switch err { + case nil: + // Tx was already accepted + case database.ErrNotFound: + txIDs.Add(txID) + default: + return nil, err + } + } + return txIDs, nil +} + +func (tx *Tx) Bytes() []byte { + return tx.tx.Bytes() +} + +func (tx *Tx) Verify(context.Context) error { + if s := tx.Status(); s != choices.Processing { + return fmt.Errorf("%w: %s", errTxNotProcessing, s) + } + return tx.tx.Unsigned.Visit(&executor.SemanticVerifier{ + Backend: tx.vm.txBackend, + State: tx.vm.state, + Tx: tx.tx, + }) +} diff --git a/avalanchego/vms/avm/tx_init.go b/avalanchego/vms/avm/tx_init.go index 2b016bdd..00112bf6 100644 --- a/avalanchego/vms/avm/tx_init.go +++ b/avalanchego/vms/avm/tx_init.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -44,7 +44,7 @@ func (t *txInit) init() error { t.tx.Unsigned.InitCtx(t.ctx) for _, cred := range t.tx.Creds { - fx, err := t.getParsedFx(cred.Verifiable) + fx, err := t.getParsedFx(cred.Credential) if err != nil { return err } diff --git a/avalanchego/vms/avm/txs/base_tx.go b/avalanchego/vms/avm/txs/base_tx.go index 617769d3..5cc0d222 100644 --- a/avalanchego/vms/avm/txs/base_tx.go +++ b/avalanchego/vms/avm/txs/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/avm/txs/base_tx_test.go b/avalanchego/vms/avm/txs/base_tx_test.go index 99aeb43e..49aa3109 100644 --- a/avalanchego/vms/avm/txs/base_tx_test.go +++ b/avalanchego/vms/avm/txs/base_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -127,6 +127,7 @@ func TestBaseTxSerialization(t *testing.T) { }}} parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -134,8 +135,8 @@ func TestBaseTxSerialization(t *testing.T) { ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) - require.Equal(tx.ID().String(), "zeqT8FTnRAxes7QQQYkaWhNkHavd9d6aCdH8TQu2Mx5KEydEz") + require.NoError(tx.Initialize(parser.Codec())) + require.Equal("zeqT8FTnRAxes7QQQYkaWhNkHavd9d6aCdH8TQu2Mx5KEydEz", tx.ID().String()) result := tx.Bytes() require.Equal(expected, result) @@ -190,15 +191,14 @@ func TestBaseTxSerialization(t *testing.T) { 0xc8, 0x06, 0xd7, 0x43, 0x00, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) - require.Equal(tx.ID().String(), "QnTUuie2qe6BKyYrC2jqd73bJ828QNhYnZbdA2HWsnVRPjBfV") + )) + require.Equal("QnTUuie2qe6BKyYrC2jqd73bJ828QNhYnZbdA2HWsnVRPjBfV", tx.ID().String()) // there are two credentials expected[len(expected)-1] = 0x02 @@ -208,46 +208,6 @@ func TestBaseTxSerialization(t *testing.T) { require.Equal(expected, result) } -func TestBaseTxGetters(t *testing.T) { - require := require.New(t) - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.GenerateTestID(), - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - - assets := tx.AssetIDs() - require.Len(assets, 1) - require.Contains(assets, assetID) - - consumedAssets := tx.ConsumedAssetIDs() - require.Len(consumedAssets, 1) - require.Contains(consumedAssets, assetID) -} - func TestBaseTxNotState(t *testing.T) { require := require.New(t) diff --git a/avalanchego/vms/avm/txs/codec.go b/avalanchego/vms/avm/txs/codec.go index cacf8a76..22b72f5b 100644 --- a/avalanchego/vms/avm/txs/codec.go +++ b/avalanchego/vms/avm/txs/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/avm/txs/create_asset_tx.go b/avalanchego/vms/avm/txs/create_asset_tx.go index 4a80d018..818bea5b 100644 --- a/avalanchego/vms/avm/txs/create_asset_tx.go +++ b/avalanchego/vms/avm/txs/create_asset_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/avm/txs/create_asset_tx_test.go b/avalanchego/vms/avm/txs/create_asset_tx_test.go index 57b0b2ca..f5f8c3c4 100644 --- a/avalanchego/vms/avm/txs/create_asset_tx_test.go +++ b/avalanchego/vms/avm/txs/create_asset_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -195,6 +195,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { }} parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -202,7 +203,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) result := tx.Bytes() require.Equal(expected, result) @@ -367,13 +368,14 @@ func TestCreateAssetTxSerializationAgain(t *testing.T) { } parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) + require.NoError(tx.Initialize(parser.Codec())) result := tx.Bytes() require.Equal(expected, result) diff --git a/avalanchego/vms/avm/txs/executor/backend.go b/avalanchego/vms/avm/txs/executor/backend.go index fbf4a756..fdb02042 100644 --- a/avalanchego/vms/avm/txs/executor/backend.go +++ b/avalanchego/vms/avm/txs/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/avalanchego/vms/avm/txs/executor/executor.go b/avalanchego/vms/avm/txs/executor/executor.go index 040b1d9c..2e7db565 100644 --- a/avalanchego/vms/avm/txs/executor/executor.go +++ b/avalanchego/vms/avm/txs/executor/executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -19,7 +19,7 @@ var _ txs.Visitor = (*Executor)(nil) type Executor struct { Codec codec.Manager - State states.Chain // state will be modified + State state.Chain // state will be modified Tx *txs.Tx Inputs set.Set[ids.ID] // imported inputs AtomicRequests map[ids.ID]*atomic.Requests // may be nil diff --git a/avalanchego/vms/avm/txs/executor/executor_test.go b/avalanchego/vms/avm/txs/executor/executor_test.go new file mode 100644 index 00000000..3823fcc3 --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/executor_test.go @@ -0,0 +1,464 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/state" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +const trackChecksums = false + +var ( + chainID = ids.ID{5, 4, 3, 2, 1} + assetID = ids.ID{1, 2, 3} +) + +func TestBaseTxExecutor(t *testing.T) { + require := require.New(t) + + secpFx := &secp256k1fx.Fx{} + parser, err := block.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{secpFx}, + ) + require.NoError(err) + codec := parser.Codec() + + db := memdb.New() + vdb := versiondb.New(db) + registerer := prometheus.NewRegistry() + state, err := state.New(vdb, parser, registerer, trackChecksums) + require.NoError(err) + + utxoID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + + addr := keys[0].Address() + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 20 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + } + + // Populate the UTXO that we will be consuming + state.AddUTXO(utxo) + require.NoError(state.Commit()) + + baseTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }}, + }}} + require.NoError(baseTx.SignSECP256K1Fx(codec, [][]*secp256k1.PrivateKey{{keys[0]}})) + + executor := &Executor{ + Codec: codec, + State: state, + Tx: baseTx, + } + + // Execute baseTx + require.NoError(baseTx.Unsigned.Visit(executor)) + + // Verify the consumed UTXO was removed from the state + _, err = executor.State.GetUTXO(utxoID.InputID()) + require.ErrorIs(err, database.ErrNotFound) + + // Verify the produced UTXO was added to the state + expectedOutputUTXO := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: baseTx.TxID, + OutputIndex: 0, + }, + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + } + expectedOutputUTXOID := expectedOutputUTXO.InputID() + outputUTXO, err := executor.State.GetUTXO(expectedOutputUTXOID) + require.NoError(err) + + outputUTXOID := outputUTXO.InputID() + require.Equal(expectedOutputUTXOID, outputUTXOID) + require.Equal(expectedOutputUTXO, outputUTXO) +} + +func TestCreateAssetTxExecutor(t *testing.T) { + require := require.New(t) + + secpFx := &secp256k1fx.Fx{} + parser, err := block.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{secpFx}, + ) + require.NoError(err) + codec := parser.Codec() + + db := memdb.New() + vdb := versiondb.New(db) + registerer := prometheus.NewRegistry() + state, err := state.New(vdb, parser, registerer, trackChecksums) + require.NoError(err) + + utxoID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + + addr := keys[0].Address() + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 20 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + } + + // Populate the UTXO that we will be consuming + state.AddUTXO(utxo) + require.NoError(state.Commit()) + + createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }}, + }}, + Name: "name", + Symbol: "symb", + Denomination: 0, + States: []*txs.InitialState{ + { + FxIndex: 0, + Outs: []verify.State{ + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }, + }, + }, + }} + require.NoError(createAssetTx.SignSECP256K1Fx(codec, [][]*secp256k1.PrivateKey{{keys[0]}})) + + executor := &Executor{ + Codec: codec, + State: state, + Tx: createAssetTx, + } + + // Execute createAssetTx + require.NoError(createAssetTx.Unsigned.Visit(executor)) + + // Verify the consumed UTXO was removed from the state + _, err = executor.State.GetUTXO(utxoID.InputID()) + require.ErrorIs(err, database.ErrNotFound) + + // Verify the produced UTXOs were added to the state + txID := createAssetTx.ID() + expectedOutputUTXOs := []*avax.UTXO{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: txID, + }, + Out: &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }, + } + for _, expectedOutputUTXO := range expectedOutputUTXOs { + expectedOutputUTXOID := expectedOutputUTXO.InputID() + outputUTXO, err := executor.State.GetUTXO(expectedOutputUTXOID) + require.NoError(err) + + outputUTXOID := outputUTXO.InputID() + require.Equal(expectedOutputUTXOID, outputUTXOID) + require.Equal(expectedOutputUTXO, outputUTXO) + } +} + +func TestOperationTxExecutor(t *testing.T) { + require := require.New(t) + + secpFx := &secp256k1fx.Fx{} + parser, err := block.NewParser( + time.Time{}, + time.Time{}, + []fxs.Fx{secpFx}, + ) + require.NoError(err) + codec := parser.Codec() + + db := memdb.New() + vdb := versiondb.New(db) + registerer := prometheus.NewRegistry() + state, err := state.New(vdb, parser, registerer, trackChecksums) + require.NoError(err) + + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + + utxoID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 20 * units.KiloAvax, + OutputOwners: outputOwners, + }, + } + + opUTXOID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + } + opUTXO := &avax.UTXO{ + UTXOID: opUTXOID, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + } + + // Populate the UTXOs that we will be consuming + state.AddUTXO(utxo) + state.AddUTXO(opUTXO) + require.NoError(state.Commit()) + + operationTx := &txs.Tx{Unsigned: &txs.OperationTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: utxoID, + Asset: avax.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: outputOwners, + }, + }}, + }}, + Ops: []*txs.Operation{{ + Asset: avax.Asset{ID: assetID}, + UTXOIDs: []*avax.UTXOID{ + &opUTXOID, + }, + Op: &secp256k1fx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + }, + }, + }}, + }} + require.NoError(operationTx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + {keys[0]}, + }, + )) + + executor := &Executor{ + Codec: codec, + State: state, + Tx: operationTx, + } + + // Execute operationTx + require.NoError(operationTx.Unsigned.Visit(executor)) + + // Verify the consumed UTXOs were removed from the state + _, err = executor.State.GetUTXO(utxo.InputID()) + require.ErrorIs(err, database.ErrNotFound) + _, err = executor.State.GetUTXO(opUTXO.InputID()) + require.ErrorIs(err, database.ErrNotFound) + + // Verify the produced UTXOs were added to the state + txID := operationTx.ID() + expectedOutputUTXOs := []*avax.UTXO{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 10 * units.KiloAvax, + OutputOwners: outputOwners, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + }, + }, + } + for _, expectedOutputUTXO := range expectedOutputUTXOs { + expectedOutputUTXOID := expectedOutputUTXO.InputID() + outputUTXO, err := executor.State.GetUTXO(expectedOutputUTXOID) + require.NoError(err) + + outputUTXOID := outputUTXO.InputID() + require.Equal(expectedOutputUTXOID, outputUTXOID) + require.Equal(expectedOutputUTXO, outputUTXO) + } +} diff --git a/avalanchego/vms/avm/txs/executor/semantic_verifier.go b/avalanchego/vms/avm/txs/executor/semantic_verifier.go index 6b91d2de..efa4d4cd 100644 --- a/avalanchego/vms/avm/txs/executor/semantic_verifier.go +++ b/avalanchego/vms/avm/txs/executor/semantic_verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -27,7 +27,7 @@ var ( type SemanticVerifier struct { *Backend - State states.ReadOnlyChain + State state.ReadOnlyChain Tx *txs.Tx } @@ -35,7 +35,7 @@ func (v *SemanticVerifier) BaseTx(tx *txs.BaseTx) error { for i, in := range tx.Ins { // Note: Verification of the length of [t.tx.Creds] happens during // syntactic verification, which happens before semantic verification. - cred := v.Tx.Creds[i].Verifiable + cred := v.Tx.Creds[i].Credential if err := v.verifyTransfer(tx, in, cred); err != nil { return err } @@ -73,7 +73,7 @@ func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { for i, op := range tx.Ops { // Note: Verification of the length of [t.tx.Creds] happens during // syntactic verification, which happens before semantic verification. - cred := v.Tx.Creds[i+offset].Verifiable + cred := v.Tx.Creds[i+offset].Credential if err := v.verifyOperation(tx, op, cred); err != nil { return err } @@ -114,7 +114,7 @@ func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { // Note: Verification of the length of [t.tx.Creds] happens during // syntactic verification, which happens before semantic verification. - cred := v.Tx.Creds[i+offset].Verifiable + cred := v.Tx.Creds[i+offset].Credential if err := v.verifyTransferOfUTXO(tx, in, cred, &utxo); err != nil { return err } @@ -152,7 +152,7 @@ func (v *SemanticVerifier) verifyTransfer( in *avax.TransferableInput, cred verify.Verifiable, ) error { - utxo, err := v.State.GetUTXOFromID(&in.UTXOID) + utxo, err := v.State.GetUTXO(in.UTXOID.InputID()) if err != nil { return err } @@ -195,7 +195,7 @@ func (v *SemanticVerifier) verifyOperation( utxos = make([]interface{}, numUTXOs) ) for i, utxoID := range op.UTXOIDs { - utxo, err := v.State.GetUTXOFromID(utxoID) + utxo, err := v.State.GetUTXO(utxoID.InputID()) if err != nil { return err } diff --git a/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go b/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go index da2b13f4..3a809038 100644 --- a/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go +++ b/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -8,18 +8,22 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -27,11 +31,12 @@ import ( ) func TestSemanticVerifierBaseTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, time.Time{}, typeToFxIndex, new(mockable.Clock), @@ -115,16 +120,16 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tests := []struct { name string - stateFunc func(*gomock.Controller) states.Chain + stateFunc func(*gomock.Controller) state.Chain txFunc func(*require.Assertions) *txs.Tx err error }{ { name: "valid", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -133,26 +138,25 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: nil, }, { name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) utxo := utxo utxo.Asset.ID = ids.GenerateTestID() - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) return state }, @@ -160,21 +164,20 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errAssetIDMismatch, }, { name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -183,7 +186,7 @@ func TestSemanticVerifierBaseTx(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) state.EXPECT().GetTimestamp().Return(time.Time{}) @@ -193,23 +196,22 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, }, { name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -218,23 +220,22 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[1]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrWrongSig, }, { name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(nil, database.ErrNotFound) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) return state }, @@ -242,21 +243,20 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, }, { name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) output := output output.Amt-- @@ -264,7 +264,7 @@ func TestSemanticVerifierBaseTx(t *testing.T) { utxo := utxo utxo.Out = &output - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -273,21 +273,20 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrMismatchedAmounts, }, { name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -313,21 +312,20 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{}, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, }, { name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) return state @@ -336,27 +334,26 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, }, { name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) tx := txs.Tx{ Unsigned: &baseTx, } - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&tx, nil) return state @@ -365,13 +362,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errNotAnAsset, @@ -381,7 +377,6 @@ func TestSemanticVerifierBaseTx(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := test.stateFunc(ctrl) tx := test.txFunc(require) @@ -397,18 +392,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { } func TestSemanticVerifierExportTx(t *testing.T) { - ctx := newContext(t) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - validatorState := validators.NewMockState(ctrl) - validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ctx.SubnetID, nil) - ctx.ValidatorState = validatorState + ctx := snowtest.Context(t, snowtest.XChainID) typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, time.Time{}, typeToFxIndex, new(mockable.Clock), @@ -496,16 +485,16 @@ func TestSemanticVerifierExportTx(t *testing.T) { tests := []struct { name string - stateFunc func(*gomock.Controller) states.Chain + stateFunc func(*gomock.Controller) state.Chain txFunc func(*require.Assertions) *txs.Tx err error }{ { name: "valid", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -514,26 +503,25 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: nil, }, { name: "assetID mismatch", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) utxo := utxo utxo.Asset.ID = ids.GenerateTestID() - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) return state }, @@ -541,21 +529,20 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errAssetIDMismatch, }, { name: "not allowed input feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -564,7 +551,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) state.EXPECT().GetTimestamp().Return(time.Time{}) @@ -574,23 +561,22 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, }, { name: "invalid signature", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -599,23 +585,22 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[1]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrWrongSig, }, { name: "missing UTXO", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(nil, database.ErrNotFound) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(nil, database.ErrNotFound) return state }, @@ -623,21 +608,20 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, }, { name: "invalid UTXO amount", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) output := output output.Amt-- @@ -645,7 +629,7 @@ func TestSemanticVerifierExportTx(t *testing.T) { utxo := utxo utxo.Out = &output - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) return state @@ -654,21 +638,20 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrMismatchedAmounts, }, { name: "not allowed output feature extension", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) unsignedCreateAssetTx := unsignedCreateAssetTx unsignedCreateAssetTx.States = nil @@ -694,21 +677,20 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{}, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, }, { name: "unknown asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) return state @@ -717,27 +699,26 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, }, { name: "not an asset", - stateFunc: func(ctrl *gomock.Controller) states.Chain { - state := states.NewMockChain(ctrl) + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) tx := txs.Tx{ Unsigned: &baseTx, } - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&tx, nil) return state @@ -746,13 +727,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errNotAnAsset, @@ -762,7 +742,6 @@ func TestSemanticVerifierExportTx(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := test.stateFunc(ctrl) tx := test.txFunc(require) @@ -779,11 +758,9 @@ func TestSemanticVerifierExportTx(t *testing.T) { func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { require := require.New(t) - - ctx := newContext(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() + + ctx := snowtest.Context(t, snowtest.XChainID) validatorState := validators.NewMockState(ctrl) validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ids.GenerateTestID(), nil) @@ -792,6 +769,7 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( + time.Time{}, time.Time{}, typeToFxIndex, new(mockable.Clock), @@ -877,21 +855,20 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { Unsigned: &unsignedCreateAssetTx, } - state := states.NewMockChain(ctrl) + state := state.NewMockChain(ctrl) - state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil) state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) tx := &txs.Tx{ Unsigned: &exportTx, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) err = tx.Unsigned.Visit(&SemanticVerifier{ Backend: backend, @@ -900,3 +877,261 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { }) require.ErrorIs(err, verify.ErrMismatchedSubnetIDs) } + +func TestSemanticVerifierImportTx(t *testing.T) { + ctx := snowtest.Context(t, snowtest.XChainID) + + m := atomic.NewMemory(prefixdb.New([]byte{0}, memdb.New())) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + + typeToFxIndex := make(map[reflect.Type]int) + fx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + time.Time{}, + time.Time{}, + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + fx, + }, + ) + require.NoError(t, err) + + codec := parser.Codec() + utxoID := avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 2, + } + + asset := avax.Asset{ + ID: ids.GenerateTestID(), + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{{ + Asset: asset, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: outputOwners, + }, + }}, + }, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &secp256k1fx.TransferInput{ + Amt: 12345, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + } + unsignedImportTx := txs.ImportTx{ + BaseTx: baseTx, + SourceChain: ctx.CChainID, + ImportedIns: []*avax.TransferableInput{ + &input, + }, + } + importTx := &txs.Tx{ + Unsigned: &unsignedImportTx, + } + require.NoError(t, importTx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + )) + + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + TypeToFxIndex: typeToFxIndex, + Codec: codec, + FeeAssetID: ids.GenerateTestID(), + Bootstrapped: true, + } + require.NoError(t, fx.Bootstrapped()) + + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } + utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) + require.NoError(t, err) + + peerSharedMemory := m.NewSharedMemory(ctx.CChainID) + inputID := utxo.InputID() + require.NoError(t, peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ctx.ChainID: {PutRequests: []*atomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + keys[0].PublicKey().Address().Bytes(), + }, + }}}})) + + unsignedCreateAssetTx := txs.CreateAssetTx{ + States: []*txs.InitialState{{ + FxIndex: 0, + }}, + } + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + tests := []struct { + name string + stateFunc func(*gomock.Controller) state.Chain + txFunc func(*require.Assertions) *txs.Tx + expectedErr error + }{ + { + name: "valid", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + return importTx + }, + expectedErr: nil, + }, + { + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() + state.EXPECT().GetTimestamp().Return(time.Time{}) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + return importTx + }, + expectedErr: errIncompatibleFx, + }, + { + name: "invalid signature", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &unsignedImportTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[1]}, + }, + )) + return tx + }, + expectedErr: secp256k1fx.ErrWrongSig, + }, + { + name: "not allowed output feature extension", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil).AnyTimes() + state.EXPECT().GetTimestamp().Return(time.Time{}) + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + importTx := unsignedImportTx + importTx.Ins = nil + importTx.ImportedIns = []*avax.TransferableInput{ + &input, + } + tx := &txs.Tx{ + Unsigned: &importTx, + } + require.NoError(tx.SignSECP256K1Fx( + codec, + nil, + )) + return tx + }, + expectedErr: errIncompatibleFx, + }, + { + name: "unknown asset", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() + state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + return importTx + }, + expectedErr: database.ErrNotFound, + }, + { + name: "not an asset", + stateFunc: func(ctrl *gomock.Controller) state.Chain { + state := state.NewMockChain(ctrl) + tx := txs.Tx{ + Unsigned: &baseTx, + } + state.EXPECT().GetUTXO(utxoID.InputID()).Return(&utxo, nil).AnyTimes() + state.EXPECT().GetTx(asset.ID).Return(&tx, nil) + return state + }, + txFunc: func(*require.Assertions) *txs.Tx { + return importTx + }, + expectedErr: errNotAnAsset, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + state := test.stateFunc(ctrl) + tx := test.txFunc(require) + err := tx.Unsigned.Visit(&SemanticVerifier{ + Backend: backend, + State: state, + Tx: tx, + }) + require.ErrorIs(err, test.expectedErr) + }) + } +} diff --git a/avalanchego/vms/avm/txs/executor/syntactic_verifier.go b/avalanchego/vms/avm/txs/executor/syntactic_verifier.go index b7023887..81a2f2a7 100644 --- a/avalanchego/vms/avm/txs/executor/syntactic_verifier.go +++ b/avalanchego/vms/avm/txs/executor/syntactic_verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -134,7 +134,7 @@ func (v *SyntacticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { return err } } - if !utils.IsSortedAndUniqueSortable(tx.States) { + if !utils.IsSortedAndUnique(tx.States) { return errInitialStatesNotSortedUnique } diff --git a/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go b/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go index dc70321e..ff70a908 100644 --- a/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go @@ -1,28 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "math" "strings" "testing" "time" - stdmath "math" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -33,28 +33,12 @@ var ( } ) -func newContext(t testing.TB) *snow.Context { - require := require.New(t) - - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.ChainID = ids.GenerateTestID() - ctx.XChainID = ctx.ChainID - ctx.CChainID = ids.GenerateTestID() - - aliaser := ctx.BCLookup.(ids.Aliaser) - require.NoError(aliaser.Alias(ctx.XChainID, "X")) - require.NoError(aliaser.Alias(ctx.XChainID, ctx.XChainID.String())) - require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) - require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) - return ctx -} - func TestSyntacticVerifierBaseTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ fx, @@ -106,7 +90,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, } cred := fxs.FxCredential{ - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, } creds := []*fxs.FxCredential{ &cred, @@ -278,7 +262,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { input1 := input input1.UTXOID.OutputIndex++ input1.In = &secp256k1fx.TransferInput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, Input: inputSigners, } @@ -296,7 +280,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { }, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "output overflow", @@ -309,7 +293,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { output1 := output output1.Out = &secp256k1fx.TransferOutput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, OutputOwners: outputOwners, } @@ -326,7 +310,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { Creds: creds, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "insufficient funds", @@ -354,7 +338,7 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { return &txs.Tx{ Unsigned: &txs.BaseTx{BaseTx: baseTx}, Creds: []*fxs.FxCredential{{ - Verifiable: (*secp256k1fx.Credential)(nil), + Credential: (*secp256k1fx.Credential)(nil), }}, } }, @@ -412,24 +396,23 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } func TestSyntacticVerifierCreateAssetTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ fx, @@ -496,7 +479,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, } cred := fxs.FxCredential{ - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, } creds := []*fxs.FxCredential{ &cred, @@ -776,7 +759,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { input1 := input input1.UTXOID.OutputIndex++ input1.In = &secp256k1fx.TransferInput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, Input: inputSigners, } @@ -794,7 +777,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { }, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "output overflow", @@ -807,7 +790,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { output1 := output output1.Out = &secp256k1fx.TransferOutput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, OutputOwners: outputOwners, } @@ -824,7 +807,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { Creds: creds, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "insufficient funds", @@ -966,7 +949,7 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { return &txs.Tx{ Unsigned: &tx, Creds: []*fxs.FxCredential{{ - Verifiable: (*secp256k1fx.Credential)(nil), + Credential: (*secp256k1fx.Credential)(nil), }}, } }, @@ -1024,24 +1007,23 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } func TestSyntacticVerifierOperationTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ fx, @@ -1115,7 +1097,7 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { }, } cred := fxs.FxCredential{ - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, } creds := []*fxs.FxCredential{ &cred, @@ -1300,7 +1282,7 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { input1 := input input1.UTXOID.OutputIndex++ input1.In = &secp256k1fx.TransferInput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, Input: inputSigners, } @@ -1318,14 +1300,14 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { }, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "output overflow", txFunc: func() *txs.Tx { output := output output.Out = &secp256k1fx.TransferOutput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, OutputOwners: outputOwners, } @@ -1341,7 +1323,7 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { Creds: creds, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "insufficient funds", @@ -1458,7 +1440,7 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { return &txs.Tx{ Unsigned: &tx, Creds: []*fxs.FxCredential{{ - Verifiable: (*secp256k1fx.Credential)(nil), + Credential: (*secp256k1fx.Credential)(nil), }}, } }, @@ -1516,24 +1498,23 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } func TestSyntacticVerifierImportTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ fx, @@ -1589,7 +1570,7 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, } cred := fxs.FxCredential{ - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, } creds := []*fxs.FxCredential{ &cred, @@ -1792,7 +1773,7 @@ func TestSyntacticVerifierImportTx(t *testing.T) { input1 := input input1.UTXOID.OutputIndex++ input1.In = &secp256k1fx.TransferInput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, Input: inputSigners, } @@ -1810,14 +1791,14 @@ func TestSyntacticVerifierImportTx(t *testing.T) { }, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "output overflow", txFunc: func() *txs.Tx { output := output output.Out = &secp256k1fx.TransferOutput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, OutputOwners: outputOwners, } @@ -1833,7 +1814,7 @@ func TestSyntacticVerifierImportTx(t *testing.T) { Creds: creds, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "insufficient funds", @@ -1861,7 +1842,7 @@ func TestSyntacticVerifierImportTx(t *testing.T) { return &txs.Tx{ Unsigned: &tx, Creds: []*fxs.FxCredential{{ - Verifiable: (*secp256k1fx.Credential)(nil), + Credential: (*secp256k1fx.Credential)(nil), }}, } }, @@ -1919,24 +1900,23 @@ func TestSyntacticVerifierImportTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } func TestSyntacticVerifierExportTx(t *testing.T) { - ctx := newContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ fx, @@ -1992,7 +1972,7 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, } cred := fxs.FxCredential{ - Verifiable: &secp256k1fx.Credential{}, + Credential: &secp256k1fx.Credential{}, } creds := []*fxs.FxCredential{ &cred, @@ -2207,7 +2187,7 @@ func TestSyntacticVerifierExportTx(t *testing.T) { input1 := input input1.UTXOID.OutputIndex++ input1.In = &secp256k1fx.TransferInput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, Input: inputSigners, } @@ -2225,14 +2205,14 @@ func TestSyntacticVerifierExportTx(t *testing.T) { }, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "output overflow", txFunc: func() *txs.Tx { output := output output.Out = &secp256k1fx.TransferOutput{ - Amt: stdmath.MaxUint64, + Amt: math.MaxUint64, OutputOwners: outputOwners, } @@ -2248,7 +2228,7 @@ func TestSyntacticVerifierExportTx(t *testing.T) { Creds: creds, } }, - err: math.ErrOverflow, + err: safemath.ErrOverflow, }, { name: "insufficient funds", @@ -2276,7 +2256,7 @@ func TestSyntacticVerifierExportTx(t *testing.T) { return &txs.Tx{ Unsigned: &tx, Creds: []*fxs.FxCredential{{ - Verifiable: (*secp256k1fx.Credential)(nil), + Credential: (*secp256k1fx.Credential)(nil), }}, } }, @@ -2334,15 +2314,13 @@ func TestSyntacticVerifierExportTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } diff --git a/avalanchego/vms/avm/txs/export_tx.go b/avalanchego/vms/avm/txs/export_tx.go index aec13141..e0be4536 100644 --- a/avalanchego/vms/avm/txs/export_tx.go +++ b/avalanchego/vms/avm/txs/export_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/avm/txs/export_tx_test.go b/avalanchego/vms/avm/txs/export_tx_test.go index 840d3236..95899773 100644 --- a/avalanchego/vms/avm/txs/export_tx_test.go +++ b/avalanchego/vms/avm/txs/export_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -110,6 +110,7 @@ func TestExportTxSerialization(t *testing.T) { }} parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -117,8 +118,8 @@ func TestExportTxSerialization(t *testing.T) { ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) - require.Equal(tx.ID().String(), "2PKJE4TrKYpgynBFCpNPpV3GHK7d9QTgrL5mpYG6abHKDvNBG3") + require.NoError(tx.Initialize(parser.Codec())) + require.Equal("2PKJE4TrKYpgynBFCpNPpV3GHK7d9QTgrL5mpYG6abHKDvNBG3", tx.ID().String()) result := tx.Bytes() require.Equal(expected, result) @@ -172,15 +173,14 @@ func TestExportTxSerialization(t *testing.T) { 0x8f, 0xe0, 0x2a, 0xf3, 0xcc, 0x31, 0x32, 0xef, 0xfe, 0x7d, 0x3d, 0x9f, 0x14, 0x94, 0x01, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) - require.Equal(tx.ID().String(), "2oG52e7Cb7XF1yUzv3pRFndAypgbpswWRcSAKD5SH5VgaiTm5D") + )) + require.Equal("2oG52e7Cb7XF1yUzv3pRFndAypgbpswWRcSAKD5SH5VgaiTm5D", tx.ID().String()) // there are two credentials expected[len(expected)-1] = 0x02 diff --git a/avalanchego/vms/avm/txs/import_tx.go b/avalanchego/vms/avm/txs/import_tx.go index 5076aa1f..5ef8929f 100644 --- a/avalanchego/vms/avm/txs/import_tx.go +++ b/avalanchego/vms/avm/txs/import_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -44,24 +44,6 @@ func (t *ImportTx) InputIDs() set.Set[ids.ID] { return inputs } -// ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *ImportTx) ConsumedAssetIDs() set.Set[ids.ID] { - assets := t.BaseTx.AssetIDs() - for _, in := range t.ImportedIns { - assets.Add(in.AssetID()) - } - return assets -} - -// AssetIDs returns the IDs of the assets this transaction depends on -func (t *ImportTx) AssetIDs() set.Set[ids.ID] { - assets := t.BaseTx.AssetIDs() - for _, in := range t.ImportedIns { - assets.Add(in.AssetID()) - } - return assets -} - // NumCredentials returns the number of expected credentials func (t *ImportTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.ImportedIns) diff --git a/avalanchego/vms/avm/txs/import_tx_test.go b/avalanchego/vms/avm/txs/import_tx_test.go index 106fe951..eba4731d 100644 --- a/avalanchego/vms/avm/txs/import_tx_test.go +++ b/avalanchego/vms/avm/txs/import_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -110,6 +110,7 @@ func TestImportTxSerialization(t *testing.T) { }} parser, err := NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, @@ -117,8 +118,8 @@ func TestImportTxSerialization(t *testing.T) { ) require.NoError(err) - require.NoError(parser.InitializeTx(tx)) - require.Equal(tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") + require.NoError(tx.Initialize(parser.Codec())) + require.Equal("9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ", tx.ID().String()) result := tx.Bytes() require.Equal(expected, result) @@ -172,15 +173,14 @@ func TestImportTxSerialization(t *testing.T) { 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, 0x46, 0x4e, 0xa1, 0xaf, 0x00, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) - require.Equal(tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") + )) + require.Equal("pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx", tx.ID().String()) // there are two credentials expected[len(expected)-1] = 0x02 diff --git a/avalanchego/vms/avm/txs/initial_state.go b/avalanchego/vms/avm/txs/initial_state.go index ae20e6f1..63321e56 100644 --- a/avalanchego/vms/avm/txs/initial_state.go +++ b/avalanchego/vms/avm/txs/initial_state.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "bytes" + "cmp" "errors" "sort" @@ -25,9 +26,9 @@ var ( ) type InitialState struct { - FxIndex uint32 `serialize:"true" json:"fxIndex"` + FxIndex uint32 `serialize:"true" json:"fxIndex"` FxID ids.ID `serialize:"false" json:"fxID"` - Outs []verify.State `serialize:"true" json:"outputs"` + Outs []verify.State `serialize:"true" json:"outputs"` } func (is *InitialState) InitCtx(ctx *snow.Context) { @@ -59,8 +60,8 @@ func (is *InitialState) Verify(c codec.Manager, numFxs int) error { return nil } -func (is *InitialState) Less(other *InitialState) bool { - return is.FxIndex < other.FxIndex +func (is *InitialState) Compare(other *InitialState) int { + return cmp.Compare(is.FxIndex, other.FxIndex) } func (is *InitialState) Sort(c codec.Manager) { diff --git a/avalanchego/vms/avm/txs/initial_state_test.go b/avalanchego/vms/avm/txs/initial_state_test.go index 1d294870..5f61deb3 100644 --- a/avalanchego/vms/avm/txs/initial_state_test.go +++ b/avalanchego/vms/avm/txs/initial_state_test.go @@ -1,12 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "bytes" "errors" + "fmt" "testing" + "time" "github.com/stretchr/testify/require" @@ -21,14 +22,12 @@ import ( var errTest = errors.New("non-nil error") func TestInitialStateVerifySerialization(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferOutput{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) expected := []byte{ // Codec version: @@ -75,93 +74,78 @@ func TestInitialStateVerifySerialization(t *testing.T) { } isBytes, err := m.Marshal(CodecVersion, is) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(isBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - isBytes, - ) - } + require.NoError(err) + require.Equal(expected, isBytes) } func TestInitialStateVerifyNil(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := (*InitialState)(nil) - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to nil initial state") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrNilInitialState) } func TestInitialStateVerifyUnknownFxID(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 1, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to unknown FxIndex") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrUnknownFx) } func TestInitialStateVerifyNilOutput(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 0, Outs: []verify.State{nil}, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to a nil output") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrNilFxOutput) } func TestInitialStateVerifyInvalidOutput(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&avax.TestVerifiable{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&avax.TestState{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 0, - Outs: []verify.State{&avax.TestVerifiable{Err: errTest}}, - } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to an invalid output") + Outs: []verify.State{&avax.TestState{Err: errTest}}, } + err := is.Verify(m, numFxs) + require.ErrorIs(err, errTest) } func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&avax.TestTransferable{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&avax.TestTransferable{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ @@ -171,25 +155,37 @@ func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { &avax.TestTransferable{Val: 0}, }, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to unsorted outputs") - } - + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrOutputsNotSorted) is.Sort(m) - - if err := is.Verify(m, numFxs); err != nil { - t.Fatal(err) - } + require.NoError(is.Verify(m, numFxs)) } -func TestInitialStateLess(t *testing.T) { - require := require.New(t) - - var is1, is2 InitialState - require.False(is1.Less(&is2)) - require.False(is2.Less(&is1)) +func TestInitialStateCompare(t *testing.T) { + tests := []struct { + a *InitialState + b *InitialState + expected int + }{ + { + a: &InitialState{}, + b: &InitialState{}, + expected: 0, + }, + { + a: &InitialState{ + FxIndex: 1, + }, + b: &InitialState{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.FxIndex, test.b.FxIndex, test.expected), func(t *testing.T) { + require := require.New(t) - is1.FxIndex = 1 - require.False(is1.Less(&is2)) - require.True(is2.Less(&is1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/avalanchego/vms/avm/txs/mempool/mempool.go b/avalanchego/vms/avm/txs/mempool/mempool.go index a04f1061..4ac275a2 100644 --- a/avalanchego/vms/avm/txs/mempool/mempool.go +++ b/avalanchego/vms/avm/txs/mempool/mempool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -6,14 +6,16 @@ package mempool import ( "errors" "fmt" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/setmap" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/txs" ) @@ -26,31 +28,32 @@ const ( // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 - initialConsumedUTXOsSize = 512 - // maxMempoolSize is the maximum number of bytes allowed in the mempool maxMempoolSize = 64 * units.MiB ) var ( - _ Mempool = &mempool{} + _ Mempool = (*mempool)(nil) - errDuplicateTx = errors.New("duplicate tx") - errTxTooLarge = errors.New("tx too large") - errMempoolFull = errors.New("mempool is full") - errConflictsWithOtherTx = errors.New("tx conflicts with other tx") + ErrDuplicateTx = errors.New("duplicate tx") + ErrTxTooLarge = errors.New("tx too large") + ErrMempoolFull = errors.New("mempool is full") + ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") ) // Mempool contains transactions that have not yet been put into a block. type Mempool interface { Add(tx *txs.Tx) error - Has(txID ids.ID) bool - Get(txID ids.ID) *txs.Tx - Remove(txs []*txs.Tx) + Get(txID ids.ID) (*txs.Tx, bool) + // Remove [txs] and any conflicts of [txs] from the mempool. + Remove(txs ...*txs.Tx) + + // Peek returns the oldest tx in the mempool. + Peek() (tx *txs.Tx, exists bool) - // Peek returns the next first tx that was added to the mempool whose size - // is less than or equal to maxTxSize. - Peek(maxTxSize int) *txs.Tx + // Iterate over transactions from oldest to newest until the function + // returns false or there are no more transactions. + Iterate(f func(tx *txs.Tx) bool) // RequestBuildBlock notifies the consensus engine that a block should be // built if there is at least one transaction in the mempool. @@ -60,22 +63,22 @@ type Mempool interface { // unissued. This allows previously dropped txs to be possibly reissued. MarkDropped(txID ids.ID, reason error) GetDropReason(txID ids.ID) error + + // Len returns the number of txs in the mempool. + Len() int } type mempool struct { - bytesAvailableMetric prometheus.Gauge - bytesAvailable int - - unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] - numTxs prometheus.Gauge + lock sync.RWMutex + unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs + bytesAvailable int + droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> Verification error toEngine chan<- common.Message - // Key: Tx ID - // Value: Verification error - droppedTxIDs *cache.LRU[ids.ID, error] - - consumedUTXOs set.Set[ids.ID] + numTxs prometheus.Gauge + bytesAvailableMetric prometheus.Gauge } func New( @@ -83,47 +86,46 @@ func New( registerer prometheus.Registerer, toEngine chan<- common.Message, ) (Mempool, error) { - bytesAvailableMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }) - if err := registerer.Register(bytesAvailableMetric); err != nil { - return nil, err + m := &mempool{ + unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + consumedUTXOs: setmap.New[ids.ID, ids.ID](), + bytesAvailable: maxMempoolSize, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + toEngine: toEngine, + numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of transactions in the mempool", + }), + bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }), } + m.bytesAvailableMetric.Set(maxMempoolSize) - numTxsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "count", - Help: "Number of transactions in the mempool", - }) - if err := registerer.Register(numTxsMetric); err != nil { - return nil, err - } - - bytesAvailableMetric.Set(maxMempoolSize) - return &mempool{ - bytesAvailableMetric: bytesAvailableMetric, - bytesAvailable: maxMempoolSize, - unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - numTxs: numTxsMetric, - toEngine: toEngine, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), - }, nil + err := utils.Err( + registerer.Register(m.numTxs), + registerer.Register(m.bytesAvailableMetric), + ) + return m, err } func (m *mempool) Add(tx *txs.Tx) error { - // Note: a previously dropped tx can be re-added txID := tx.ID() - if m.Has(txID) { - return fmt.Errorf("%w: %s", errDuplicateTx, txID) + + m.lock.Lock() + defer m.lock.Unlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) } txSize := len(tx.Bytes()) if txSize > MaxTxSize { return fmt.Errorf("%w: %s size (%d) > max size (%d)", - errTxTooLarge, + ErrTxTooLarge, txID, txSize, MaxTxSize, @@ -131,7 +133,7 @@ func (m *mempool) Add(tx *txs.Tx) error { } if txSize > m.bytesAvailable { return fmt.Errorf("%w: %s size (%d) > available space (%d)", - errMempoolFull, + ErrMempoolFull, txID, txSize, m.bytesAvailable, @@ -139,8 +141,8 @@ func (m *mempool) Add(tx *txs.Tx) error { } inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("%w: %s", errConflictsWithOtherTx, txID) + if m.consumedUTXOs.HasOverlap(inputs) { + return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) } m.bytesAvailable -= txSize @@ -150,52 +152,58 @@ func (m *mempool) Add(tx *txs.Tx) error { m.numTxs.Inc() // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Union(inputs) + m.consumedUTXOs.Put(txID, inputs) - // An explicitly added tx must not be marked as dropped. + // An added tx must not be marked as dropped. m.droppedTxIDs.Evict(txID) return nil } -func (m *mempool) Has(txID ids.ID) bool { - return m.Get(txID) != nil +func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { + tx, ok := m.unissuedTxs.Get(txID) + return tx, ok } -func (m *mempool) Get(txID ids.ID) *txs.Tx { - unissuedTxs, _ := m.unissuedTxs.Get(txID) - return unissuedTxs -} +func (m *mempool) Remove(txs ...*txs.Tx) { + m.lock.Lock() + defer m.lock.Unlock() -func (m *mempool) Remove(txsToRemove []*txs.Tx) { - for _, tx := range txsToRemove { + for _, tx := range txs { txID := tx.ID() - if _, ok := m.unissuedTxs.Get(txID); !ok { - // If tx isn't in the mempool, there is nothing to do. + // If the transaction is in the mempool, remove it. + if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { + m.unissuedTxs.Delete(txID) + m.bytesAvailable += len(tx.Bytes()) continue } - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - - m.unissuedTxs.Delete(txID) - m.numTxs.Dec() - + // If the transaction isn't in the mempool, remove any conflicts it has. inputs := tx.Unsigned.InputIDs() - m.consumedUTXOs.Difference(inputs) + for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { + tx, _ := m.unissuedTxs.Get(removed.Key) + m.unissuedTxs.Delete(removed.Key) + m.bytesAvailable += len(tx.Bytes()) + } } + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + m.numTxs.Set(float64(m.unissuedTxs.Len())) +} + +func (m *mempool) Peek() (*txs.Tx, bool) { + _, tx, exists := m.unissuedTxs.Oldest() + return tx, exists } -func (m *mempool) Peek(maxTxSize int) *txs.Tx { - txIter := m.unissuedTxs.NewIterator() - for txIter.Next() { - tx := txIter.Value() - txSize := len(tx.Bytes()) - if txSize <= maxTxSize { - return tx +func (m *mempool) Iterate(f func(*txs.Tx) bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + it := m.unissuedTxs.NewIterator() + for it.Next() { + if !f(it.Value()) { + return } } - return nil } func (m *mempool) RequestBuildBlock() { @@ -210,6 +218,17 @@ func (m *mempool) RequestBuildBlock() { } func (m *mempool) MarkDropped(txID ids.ID, reason error) { + if errors.Is(reason, ErrMempoolFull) { + return + } + + m.lock.RLock() + defer m.lock.RUnlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return + } + m.droppedTxIDs.Put(txID, reason) } @@ -217,3 +236,10 @@ func (m *mempool) GetDropReason(txID ids.ID) error { err, _ := m.droppedTxIDs.Get(txID) return err } + +func (m *mempool) Len() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.unissuedTxs.Len() +} diff --git a/avalanchego/vms/avm/txs/mempool/mempool_test.go b/avalanchego/vms/avm/txs/mempool/mempool_test.go index d02abc48..410d2b76 100644 --- a/avalanchego/vms/avm/txs/mempool/mempool_test.go +++ b/avalanchego/vms/avm/txs/mempool/mempool_test.go @@ -1,173 +1,318 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool import ( + "errors" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var ( - keys = secp256k1.TestKeys() - chainID = ids.ID{5, 4, 3, 2, 1} - assetID = ids.ID{1, 2, 3} -) +func TestAdd(t *testing.T) { + tx0 := newTx(0, 32) + + tests := []struct { + name string + initialTxs []*txs.Tx + tx *txs.Tx + err error + dropReason error + }{ + { + name: "successfully add tx", + initialTxs: nil, + tx: tx0, + err: nil, + dropReason: nil, + }, + { + name: "attempt adding duplicate tx", + initialTxs: []*txs.Tx{tx0}, + tx: tx0, + err: ErrDuplicateTx, + dropReason: nil, + }, + { + name: "attempt adding too large tx", + initialTxs: nil, + tx: newTx(0, MaxTxSize+1), + err: ErrTxTooLarge, + dropReason: ErrTxTooLarge, + }, + { + name: "attempt adding tx when full", + initialTxs: newTxs(maxMempoolSize/MaxTxSize, MaxTxSize), + tx: newTx(maxMempoolSize/MaxTxSize, MaxTxSize), + err: ErrMempoolFull, + dropReason: nil, + }, + { + name: "attempt adding conflicting tx", + initialTxs: []*txs.Tx{tx0}, + tx: newTx(0, 32), + err: ErrConflictsWithOtherTx, + dropReason: ErrConflictsWithOtherTx, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + for _, tx := range test.initialTxs { + require.NoError(mempool.Add(tx)) + } + + err = mempool.Add(test.tx) + require.ErrorIs(err, test.err) + + txID := test.tx.ID() -// shows that valid tx is not added to mempool if this would exceed its maximum -// size -func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { + if err != nil { + mempool.MarkDropped(txID, err) + } + + err = mempool.GetDropReason(txID) + require.ErrorIs(err, test.dropReason) + }) + } +} + +func TestGet(t *testing.T) { require := require.New(t) - registerer := prometheus.NewRegistry() - mempoolIntf, err := New("mempool", registerer, nil) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) require.NoError(err) - mempool := mempoolIntf.(*mempool) + tx := newTx(0, 32) + txID := tx.ID() - testTxs := createTestTxs(2) - tx := testTxs[0] + _, exists := mempool.Get(txID) + require.False(exists) - // shortcut to simulated almost filled mempool - mempool.bytesAvailable = len(tx.Bytes()) - 1 + require.NoError(mempool.Add(tx)) - err = mempool.Add(tx) - require.ErrorIs(err, errMempoolFull) + returned, exists := mempool.Get(txID) + require.True(exists) + require.Equal(tx, returned) - // shortcut to simulated almost filled mempool - mempool.bytesAvailable = len(tx.Bytes()) + mempool.Remove(tx) + + _, exists = mempool.Get(txID) + require.False(exists) +} + +func TestPeek(t *testing.T) { + require := require.New(t) - err = mempool.Add(tx) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) require.NoError(err) + + _, exists := mempool.Peek() + require.False(exists) + + tx0 := newTx(0, 32) + tx1 := newTx(1, 32) + + require.NoError(mempool.Add(tx0)) + require.NoError(mempool.Add(tx1)) + + tx, exists := mempool.Peek() + require.True(exists) + require.Equal(tx, tx0) + + mempool.Remove(tx0) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) + + mempool.Remove(tx0) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) + + mempool.Remove(tx1) + + _, exists = mempool.Peek() + require.False(exists) } -func TestTxsInMempool(t *testing.T) { +func TestRemoveConflict(t *testing.T) { require := require.New(t) - registerer := prometheus.NewRegistry() - toEngine := make(chan common.Message, 100) - mempool, err := New("mempool", registerer, toEngine) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) require.NoError(err) - testTxs := createTestTxs(2) + tx := newTx(0, 32) + txConflict := newTx(0, 32) - mempool.RequestBuildBlock() - select { - case <-toEngine: - t.Fatalf("should not have sent message to engine") - default: + require.NoError(mempool.Add(tx)) + + returnedTx, exists := mempool.Peek() + require.True(exists) + require.Equal(returnedTx, tx) + + mempool.Remove(txConflict) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestIterate(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + var ( + iteratedTxs []*txs.Tx + maxLen = 2 + ) + addTxs := func(tx *txs.Tx) bool { + iteratedTxs = append(iteratedTxs, tx) + return len(iteratedTxs) < maxLen } + mempool.Iterate(addTxs) + require.Empty(iteratedTxs) - for _, tx := range testTxs { - txID := tx.ID() - // tx not already there - require.False(mempool.Has(txID)) + tx0 := newTx(0, 32) + require.NoError(mempool.Add(tx0)) - // we can insert - require.NoError(mempool.Add(tx)) + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0}, iteratedTxs) - // we can get it - require.True(mempool.Has(txID)) + tx1 := newTx(1, 32) + require.NoError(mempool.Add(tx1)) - retrieved := mempool.Get(txID) - require.True(retrieved != nil) - require.Equal(tx, retrieved) + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) - // tx exists in mempool - require.True(mempool.Has(txID)) + tx2 := newTx(2, 32) + require.NoError(mempool.Add(tx2)) - // once removed it cannot be there - mempool.Remove([]*txs.Tx{tx}) + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) - require.False(mempool.Has(txID)) - require.Nil(mempool.Get(txID)) + mempool.Remove(tx0, tx2) - // we can reinsert it again to grow the mempool - require.NoError(mempool.Add(tx)) - } + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*txs.Tx{tx1}, iteratedTxs) +} + +func TestRequestBuildBlock(t *testing.T) { + require := require.New(t) + + toEngine := make(chan common.Message, 1) + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + toEngine, + ) + require.NoError(err) mempool.RequestBuildBlock() select { case <-toEngine: + require.FailNow("should not have sent message to engine") default: - t.Fatalf("should have sent message to engine") } - mempool.Remove(testTxs) + tx := newTx(0, 32) + require.NoError(mempool.Add(tx)) mempool.RequestBuildBlock() + mempool.RequestBuildBlock() // Must not deadlock select { case <-toEngine: - t.Fatalf("should not have sent message to engine") + default: + require.FailNow("should have sent message to engine") + } + select { + case <-toEngine: + require.FailNow("should have only sent one message to engine") default: } } -func createTestTxs(count int) []*txs.Tx { - testTxs := make([]*txs.Tx, 0, count) - addr := keys[0].PublicKey().Address() - for i := uint32(0); i < uint32(count); i++ { - tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{'t', 'x', 'I', 'D'}, - OutputIndex: i, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{i}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }, - }}, - }}, - Name: "NormalName", - Symbol: "TICK", - Denomination: byte(2), - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }, - }, - }, - }, - }} - tx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) - testTxs = append(testTxs, tx) +func TestDropped(t *testing.T) { + require := require.New(t) + + mempool, err := New( + "mempool", + prometheus.NewRegistry(), + nil, + ) + require.NoError(err) + + tx := newTx(0, 32) + txID := tx.ID() + testErr := errors.New("test") + + mempool.MarkDropped(txID, testErr) + + err = mempool.GetDropReason(txID) + require.ErrorIs(err, testErr) + + require.NoError(mempool.Add(tx)) + require.NoError(mempool.GetDropReason(txID)) + + mempool.MarkDropped(txID, testErr) + require.NoError(mempool.GetDropReason(txID)) +} + +func newTxs(num int, size int) []*txs.Tx { + txs := make([]*txs.Tx, num) + for i := range txs { + txs[i] = newTx(uint32(i), size) } - return testTxs + return txs +} + +func newTx(index uint32, size int) *txs.Tx { + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: index, + }, + }}, + }}} + tx.SetBytes(utils.RandomBytes(size), utils.RandomBytes(size)) + return tx } diff --git a/avalanchego/vms/avm/txs/mempool/mock_mempool.go b/avalanchego/vms/avm/txs/mempool/mock_mempool.go index 215e97f1..69860c38 100644 --- a/avalanchego/vms/avm/txs/mempool/mock_mempool.go +++ b/avalanchego/vms/avm/txs/mempool/mock_mempool.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/avm/txs/mempool (interfaces: Mempool) +// +// Generated by this command: +// +// mockgen -package=mempool -destination=vms/avm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/avm/txs/mempool Mempool +// // Package mempool is a generated GoMock package. package mempool @@ -12,7 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" txs "github.com/ava-labs/avalanchego/vms/avm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockMempool is a mock of Mempool interface. @@ -47,21 +49,22 @@ func (m *MockMempool) Add(arg0 *txs.Tx) error { } // Add indicates an expected call of Add. -func (mr *MockMempoolMockRecorder) Add(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Add(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockMempool)(nil).Add), arg0) } // Get mocks base method. -func (m *MockMempool) Get(arg0 ids.ID) *txs.Tx { +func (m *MockMempool) Get(arg0 ids.ID) (*txs.Tx, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMempool)(nil).Get), arg0) } @@ -75,23 +78,35 @@ func (m *MockMempool) GetDropReason(arg0 ids.ID) error { } // GetDropReason indicates an expected call of GetDropReason. -func (mr *MockMempoolMockRecorder) GetDropReason(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) GetDropReason(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDropReason", reflect.TypeOf((*MockMempool)(nil).GetDropReason), arg0) } -// Has mocks base method. -func (m *MockMempool) Has(arg0 ids.ID) bool { +// Iterate mocks base method. +func (m *MockMempool) Iterate(arg0 func(*txs.Tx) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) + m.ctrl.Call(m, "Iterate", arg0) +} + +// Iterate indicates an expected call of Iterate. +func (mr *MockMempoolMockRecorder) Iterate(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockMempool)(nil).Iterate), arg0) +} + +// Len mocks base method. +func (m *MockMempool) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) return ret0 } -// Has indicates an expected call of Has. -func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { +// Len indicates an expected call of Len. +func (mr *MockMempoolMockRecorder) Len() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockMempool)(nil).Len)) } // MarkDropped mocks base method. @@ -101,35 +116,40 @@ func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { } // MarkDropped indicates an expected call of MarkDropped. -func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) } // Peek mocks base method. -func (m *MockMempool) Peek(arg0 int) *txs.Tx { +func (m *MockMempool) Peek() (*txs.Tx, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Peek", arg0) + ret := m.ctrl.Call(m, "Peek") ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Peek indicates an expected call of Peek. -func (mr *MockMempoolMockRecorder) Peek(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Peek() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek)) } // Remove mocks base method. -func (m *MockMempool) Remove(arg0 []*txs.Tx) { +func (m *MockMempool) Remove(arg0 ...*txs.Tx) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Remove", arg0) + varargs := []any{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Remove", varargs...) } // Remove indicates an expected call of Remove. -func (mr *MockMempoolMockRecorder) Remove(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Remove(arg0 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0...) } // RequestBuildBlock mocks base method. diff --git a/avalanchego/vms/avm/txs/mock_unsigned_tx.go b/avalanchego/vms/avm/txs/mock_unsigned_tx.go index 25a7a8c4..25bc9d50 100644 --- a/avalanchego/vms/avm/txs/mock_unsigned_tx.go +++ b/avalanchego/vms/avm/txs/mock_unsigned_tx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/avm/txs (interfaces: UnsignedTx) +// Source: vms/avm/txs/tx.go +// +// Generated by this command: +// +// mockgen -source=vms/avm/txs/tx.go -destination=vms/avm/txs/mock_unsigned_tx.go -package=txs -exclude_interfaces= +// // Package txs is a generated GoMock package. package txs @@ -14,7 +16,7 @@ import ( snow "github.com/ava-labs/avalanchego/snow" set "github.com/ava-labs/avalanchego/utils/set" avax "github.com/ava-labs/avalanchego/vms/components/avax" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockUnsignedTx is a mock of UnsignedTx interface. @@ -40,20 +42,6 @@ func (m *MockUnsignedTx) EXPECT() *MockUnsignedTxMockRecorder { return m.recorder } -// AssetIDs mocks base method. -func (m *MockUnsignedTx) AssetIDs() set.Set[ids.ID] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AssetIDs") - ret0, _ := ret[0].(set.Set[ids.ID]) - return ret0 -} - -// AssetIDs indicates an expected call of AssetIDs. -func (mr *MockUnsignedTxMockRecorder) AssetIDs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssetIDs", reflect.TypeOf((*MockUnsignedTx)(nil).AssetIDs)) -} - // Bytes mocks base method. func (m *MockUnsignedTx) Bytes() []byte { m.ctrl.T.Helper() @@ -68,30 +56,16 @@ func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockUnsignedTx)(nil).Bytes)) } -// ConsumedAssetIDs mocks base method. -func (m *MockUnsignedTx) ConsumedAssetIDs() set.Set[ids.ID] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ConsumedAssetIDs") - ret0, _ := ret[0].(set.Set[ids.ID]) - return ret0 -} - -// ConsumedAssetIDs indicates an expected call of ConsumedAssetIDs. -func (mr *MockUnsignedTxMockRecorder) ConsumedAssetIDs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsumedAssetIDs", reflect.TypeOf((*MockUnsignedTx)(nil).ConsumedAssetIDs)) -} - // InitCtx mocks base method. -func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { +func (m *MockUnsignedTx) InitCtx(ctx *snow.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "InitCtx", arg0) + m.ctrl.Call(m, "InitCtx", ctx) } // InitCtx indicates an expected call of InitCtx. -func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) InitCtx(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), ctx) } // InputIDs mocks base method. @@ -137,27 +111,27 @@ func (mr *MockUnsignedTxMockRecorder) NumCredentials() *gomock.Call { } // SetBytes mocks base method. -func (m *MockUnsignedTx) SetBytes(arg0 []byte) { +func (m *MockUnsignedTx) SetBytes(unsignedBytes []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBytes", arg0) + m.ctrl.Call(m, "SetBytes", unsignedBytes) } // SetBytes indicates an expected call of SetBytes. -func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SetBytes(unsignedBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), unsignedBytes) } // Visit mocks base method. -func (m *MockUnsignedTx) Visit(arg0 Visitor) error { +func (m *MockUnsignedTx) Visit(visitor Visitor) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Visit", arg0) + ret := m.ctrl.Call(m, "Visit", visitor) ret0, _ := ret[0].(error) return ret0 } // Visit indicates an expected call of Visit. -func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) Visit(visitor any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), visitor) } diff --git a/avalanchego/vms/avm/txs/operation.go b/avalanchego/vms/avm/txs/operation.go index 5978ab47..d37b1629 100644 --- a/avalanchego/vms/avm/txs/operation.go +++ b/avalanchego/vms/avm/txs/operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -25,9 +25,9 @@ var ( type Operation struct { avax.Asset `serialize:"true"` - UTXOIDs []*avax.UTXOID `serialize:"true" json:"inputIDs"` + UTXOIDs []*avax.UTXOID `serialize:"true" json:"inputIDs"` FxID ids.ID `serialize:"false" json:"fxID"` - Op fxs.FxOperation `serialize:"true" json:"operation"` + Op fxs.FxOperation `serialize:"true" json:"operation"` } func (op *Operation) Verify() error { @@ -36,48 +36,54 @@ func (op *Operation) Verify() error { return ErrNilOperation case op.Op == nil: return ErrNilFxOperation - case !utils.IsSortedAndUniqueSortable(op.UTXOIDs): + case !utils.IsSortedAndUnique(op.UTXOIDs): return ErrNotSortedAndUniqueUTXOIDs default: return verify.All(&op.Asset, op.Op) } } -type innerSortOperation struct { - ops []*Operation +type operationAndCodec struct { + op *Operation codec codec.Manager } -func (ops *innerSortOperation) Less(i, j int) bool { - iOp := ops.ops[i] - jOp := ops.ops[j] - - iBytes, err := ops.codec.Marshal(CodecVersion, iOp) +func (o *operationAndCodec) Compare(other *operationAndCodec) int { + oBytes, err := o.codec.Marshal(CodecVersion, o.op) if err != nil { - return false + return 0 } - jBytes, err := ops.codec.Marshal(CodecVersion, jOp) + otherBytes, err := o.codec.Marshal(CodecVersion, other.op) if err != nil { - return false + return 0 } - return bytes.Compare(iBytes, jBytes) == -1 -} - -func (ops *innerSortOperation) Len() int { - return len(ops.ops) -} - -func (ops *innerSortOperation) Swap(i, j int) { - o := ops.ops - o[j], o[i] = o[i], o[j] + return bytes.Compare(oBytes, otherBytes) } func SortOperations(ops []*Operation, c codec.Manager) { - sort.Sort(&innerSortOperation{ops: ops, codec: c}) + sortableOps := make([]*operationAndCodec, len(ops)) + for i, op := range ops { + sortableOps[i] = &operationAndCodec{ + op: op, + codec: c, + } + } + + utils.Sort(sortableOps) + for i, sortableOp := range sortableOps { + ops[i] = sortableOp.op + } } func IsSortedAndUniqueOperations(ops []*Operation, c codec.Manager) bool { - return utils.IsSortedAndUnique(&innerSortOperation{ops: ops, codec: c}) + sortableOps := make([]*operationAndCodec, len(ops)) + for i, op := range ops { + sortableOps[i] = &operationAndCodec{ + op: op, + codec: c, + } + } + return utils.IsSortedAndUnique(sortableOps) } type innerSortOperationsWithSigners struct { diff --git a/avalanchego/vms/avm/txs/operation_test.go b/avalanchego/vms/avm/txs/operation_test.go index 55a06268..3ca4676e 100644 --- a/avalanchego/vms/avm/txs/operation_test.go +++ b/avalanchego/vms/avm/txs/operation_test.go @@ -1,10 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -28,18 +31,16 @@ func (o *testOperable) Outs() []verify.State { func TestOperationVerifyNil(t *testing.T) { op := (*Operation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to nil operation") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNilOperation) } func TestOperationVerifyEmpty(t *testing.T) { op := &Operation{ Asset: avax.Asset{ID: ids.Empty}, } - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to empty operation") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNilFxOperation) } func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { @@ -57,9 +58,8 @@ func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to unsorted utxoIDs") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNotSortedAndUniqueUTXOIDs) } func TestOperationVerify(t *testing.T) { @@ -74,21 +74,17 @@ func TestOperationVerify(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(t, op.Verify()) } func TestOperationSorting(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&testOperable{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&testOperable{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) ops := []*Operation{ { @@ -112,13 +108,9 @@ func TestOperationSorting(t *testing.T) { Op: &testOperable{}, }, } - if IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Shouldn't be sorted") - } + require.False(IsSortedAndUniqueOperations(ops, m)) SortOperations(ops, m) - if !IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Should be sorted") - } + require.True(IsSortedAndUniqueOperations(ops, m)) ops = append(ops, &Operation{ Asset: avax.Asset{ID: ids.Empty}, UTXOIDs: []*avax.UTXOID{ @@ -129,14 +121,11 @@ func TestOperationSorting(t *testing.T) { }, Op: &testOperable{}, }) - if IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Shouldn't be unique") - } + require.False(IsSortedAndUniqueOperations(ops, m)) } func TestOperationTxNotState(t *testing.T) { intf := interface{}(&OperationTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/avm/txs/operation_tx.go b/avalanchego/vms/avm/txs/operation_tx.go index 503ce742..8a1b261c 100644 --- a/avalanchego/vms/avm/txs/operation_tx.go +++ b/avalanchego/vms/avm/txs/operation_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -54,26 +54,6 @@ func (t *OperationTx) InputIDs() set.Set[ids.ID] { return inputs } -// ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *OperationTx) ConsumedAssetIDs() set.Set[ids.ID] { - assets := t.AssetIDs() - for _, op := range t.Ops { - if len(op.UTXOIDs) > 0 { - assets.Add(op.AssetID()) - } - } - return assets -} - -// AssetIDs returns the IDs of the assets this transaction depends on -func (t *OperationTx) AssetIDs() set.Set[ids.ID] { - assets := t.BaseTx.AssetIDs() - for _, op := range t.Ops { - assets.Add(op.AssetID()) - } - return assets -} - // NumCredentials returns the number of expected credentials func (t *OperationTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ops) diff --git a/avalanchego/vms/avm/txs/parser.go b/avalanchego/vms/avm/txs/parser.go index f2b06cce..710215ad 100644 --- a/avalanchego/vms/avm/txs/parser.go +++ b/avalanchego/vms/avm/txs/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -11,10 +11,9 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/codec/reflectcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/fxs" ) @@ -32,9 +31,6 @@ type Parser interface { ParseTx(bytes []byte) (*Tx, error) ParseGenesisTx(bytes []byte) (*Tx, error) - - InitializeTx(tx *Tx) error - InitializeGenesisTx(tx *Tx) error } type parser struct { @@ -44,9 +40,10 @@ type parser struct { gc linearcodec.Codec } -func NewParser(cortinaTime time.Time, fxs []fxs.Fx) (Parser, error) { +func NewParser(cortinaTime time.Time, durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { return NewCustomParser( cortinaTime, + durangoTime, make(map[reflect.Type]int), &mockable.Clock{}, logging.NoLog{}, @@ -56,19 +53,19 @@ func NewParser(cortinaTime time.Time, fxs []fxs.Fx) (Parser, error) { func NewCustomParser( cortinaTime time.Time, + durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - gc := linearcodec.New([]string{reflectcodec.DefaultTagName}, 1<<20) - c := linearcodec.NewDefault() + gc := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault(durangoTime) gcm := codec.NewManager(math.MaxInt32) cm := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( c.RegisterType(&BaseTx{}), c.RegisterType(&CreateAssetTx{}), c.RegisterType(&OperationTx{}), @@ -83,8 +80,8 @@ func NewCustomParser( gc.RegisterType(&ExportTx{}), gcm.RegisterCodec(CodecVersion, gc), ) - if errs.Errored() { - return nil, errs.Err + if err != nil { + return nil, err } vm := &fxVM{ @@ -135,14 +132,6 @@ func (p *parser) ParseGenesisTx(bytes []byte) (*Tx, error) { return parse(p.gcm, bytes) } -func (p *parser) InitializeTx(tx *Tx) error { - return initializeTx(p.cm, tx) -} - -func (p *parser) InitializeGenesisTx(tx *Tx) error { - return initializeTx(p.gcm, tx) -} - func parse(cm codec.Manager, signedBytes []byte) (*Tx, error) { tx := &Tx{} parsedVersion, err := cm.Unmarshal(signedBytes, tx) @@ -162,19 +151,3 @@ func parse(cm codec.Manager, signedBytes []byte) (*Tx, error) { tx.SetBytes(unsignedBytes, signedBytes) return tx, nil } - -func initializeTx(cm codec.Manager, tx *Tx) error { - signedBytes, err := cm.Marshal(CodecVersion, tx) - if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) - } - - unsignedBytesLen, err := cm.Size(CodecVersion, &tx.Unsigned) - if err != nil { - return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) - } - - unsignedBytes := signedBytes[:unsignedBytesLen] - tx.SetBytes(unsignedBytes, signedBytes) - return nil -} diff --git a/avalanchego/vms/avm/txs/tx.go b/avalanchego/vms/avm/txs/tx.go index 309d5974..42e845b0 100644 --- a/avalanchego/vms/avm/txs/tx.go +++ b/avalanchego/vms/avm/txs/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var _ gossip.Gossipable = (*Tx)(nil) + type UnsignedTx interface { snow.ContextInitializable @@ -27,9 +30,6 @@ type UnsignedTx interface { InputIDs() set.Set[ids.ID] - ConsumedAssetIDs() set.Set[ids.ID] - AssetIDs() set.Set[ids.ID] - NumCredentials() int // TODO: deprecate after x-chain linearization InputUTXOs() []*avax.UTXOID @@ -47,7 +47,7 @@ type Tx struct { Unsigned UnsignedTx `serialize:"true" json:"unsignedTx"` Creds []*fxs.FxCredential `serialize:"true" json:"credentials"` // The credentials of this transaction - id ids.ID + TxID ids.ID `json:"id"` bytes []byte } @@ -68,14 +68,19 @@ func (t *Tx) Initialize(c codec.Manager) error { } func (t *Tx) SetBytes(unsignedBytes, signedBytes []byte) { - t.id = hashing.ComputeHash256Array(signedBytes) + t.TxID = hashing.ComputeHash256Array(signedBytes) t.bytes = signedBytes t.Unsigned.SetBytes(unsignedBytes) } // ID returns the unique ID of this tx func (t *Tx) ID() ids.ID { - return t.id + return t.TxID +} + +// GossipID returns the unique ID that this tx should use for mempool gossip +func (t *Tx) GossipID() ids.ID { + return t.TxID } // Bytes returns the binary representation of this tx @@ -110,7 +115,7 @@ func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*secp256k1.PrivateKey) } copy(cred.Sigs[i][:], sig) } - t.Creds = append(t.Creds, &fxs.FxCredential{Verifiable: cred}) + t.Creds = append(t.Creds, &fxs.FxCredential{Credential: cred}) } signedBytes, err := c.Marshal(CodecVersion, t) @@ -139,7 +144,7 @@ func (t *Tx) SignPropertyFx(c codec.Manager, signers [][]*secp256k1.PrivateKey) } copy(cred.Sigs[i][:], sig) } - t.Creds = append(t.Creds, &fxs.FxCredential{Verifiable: cred}) + t.Creds = append(t.Creds, &fxs.FxCredential{Credential: cred}) } signedBytes, err := c.Marshal(CodecVersion, t) @@ -168,7 +173,7 @@ func (t *Tx) SignNFTFx(c codec.Manager, signers [][]*secp256k1.PrivateKey) error } copy(cred.Sigs[i][:], sig) } - t.Creds = append(t.Creds, &fxs.FxCredential{Verifiable: cred}) + t.Creds = append(t.Creds, &fxs.FxCredential{Credential: cred}) } signedBytes, err := c.Marshal(CodecVersion, t) diff --git a/avalanchego/vms/avm/txs/visitor.go b/avalanchego/vms/avm/txs/visitor.go index 8de00c1b..31eccb67 100644 --- a/avalanchego/vms/avm/txs/visitor.go +++ b/avalanchego/vms/avm/txs/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/avm/unique_tx.go b/avalanchego/vms/avm/unique_tx.go deleted file mode 100644 index 6a57a87d..00000000 --- a/avalanchego/vms/avm/unique_tx.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "context" - "errors" - "fmt" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/executor" - "github.com/ava-labs/avalanchego/vms/components/avax" -) - -var ( - errMissingUTXO = errors.New("missing utxo") - errUnknownTx = errors.New("transaction is unknown") - errRejectedTx = errors.New("transaction is rejected") -) - -var ( - _ snowstorm.Tx = (*UniqueTx)(nil) - _ cache.Evictable[ids.ID] = (*UniqueTx)(nil) -) - -// UniqueTx provides a de-duplication service for txs. This only provides a -// performance boost -type UniqueTx struct { - *TxCachedState - - vm *VM - txID ids.ID -} - -type TxCachedState struct { - *txs.Tx - - unique, verifiedTx, verifiedState bool - validity error - - inputs []ids.ID - inputUTXOs []*avax.UTXOID - utxos []*avax.UTXO - deps []snowstorm.Tx - - status choices.Status -} - -func (tx *UniqueTx) refresh() { - tx.vm.metrics.IncTxRefreshes() - - if tx.TxCachedState == nil { - tx.TxCachedState = &TxCachedState{} - } - if tx.unique { - return - } - unique := tx.vm.DeduplicateTx(tx) - prevTx := tx.Tx - if unique == tx { - tx.vm.metrics.IncTxRefreshMisses() - - // If no one was in the cache, make sure that there wasn't an - // intermediate object whose state I must reflect - if status, err := tx.vm.state.GetStatus(tx.ID()); err == nil { - tx.status = status - } - tx.unique = true - } else { - tx.vm.metrics.IncTxRefreshHits() - - // If someone is in the cache, they must be up to date - - // This ensures that every unique tx object points to the same tx state - tx.TxCachedState = unique.TxCachedState - } - - if tx.Tx != nil { - return - } - - if prevTx == nil { - if innerTx, err := tx.vm.state.GetTx(tx.ID()); err == nil { - tx.Tx = innerTx - } - } else { - tx.Tx = prevTx - } -} - -// Evict is called when this UniqueTx will no longer be returned from a cache -// lookup -func (tx *UniqueTx) Evict() { - // Lock is already held here - tx.unique = false - tx.deps = nil -} - -func (tx *UniqueTx) setStatus(status choices.Status) { - tx.refresh() - if tx.status != status { - tx.status = status - tx.vm.state.AddStatus(tx.ID(), status) - } -} - -// ID returns the wrapped txID -func (tx *UniqueTx) ID() ids.ID { - return tx.txID -} - -func (tx *UniqueTx) Key() ids.ID { - return tx.txID -} - -// Accept is called when the transaction was finalized as accepted by consensus -func (tx *UniqueTx) Accept(context.Context) error { - if s := tx.Status(); s != choices.Processing { - return fmt.Errorf("transaction has invalid status: %s", s) - } - - if err := tx.vm.onAccept(tx.Tx); err != nil { - return err - } - - executor := &executor.Executor{ - Codec: tx.vm.txBackend.Codec, - State: tx.vm.state, - Tx: tx.Tx, - } - err := tx.Tx.Unsigned.Visit(executor) - if err != nil { - return fmt.Errorf("error staging accepted state changes: %w", err) - } - - tx.setStatus(choices.Accepted) - - commitBatch, err := tx.vm.state.CommitBatch() - if err != nil { - txID := tx.ID() - return fmt.Errorf("couldn't create commitBatch while processing tx %s: %w", txID, err) - } - - defer tx.vm.state.Abort() - err = tx.vm.ctx.SharedMemory.Apply( - executor.AtomicRequests, - commitBatch, - ) - if err != nil { - txID := tx.ID() - return fmt.Errorf("error committing accepted state changes while processing tx %s: %w", txID, err) - } - - tx.deps = nil // Needed to prevent a memory leak - return tx.vm.metrics.MarkTxAccepted(tx.Tx) -} - -// Reject is called when the transaction was finalized as rejected by consensus -func (tx *UniqueTx) Reject(context.Context) error { - tx.setStatus(choices.Rejected) - - txID := tx.ID() - tx.vm.ctx.Log.Debug("rejecting tx", - zap.Stringer("txID", txID), - ) - - if err := tx.vm.state.Commit(); err != nil { - tx.vm.ctx.Log.Error("failed to commit reject", - zap.Stringer("txID", tx.txID), - zap.Error(err), - ) - return err - } - - tx.vm.walletService.decided(txID) - - tx.deps = nil // Needed to prevent a memory leak - return nil -} - -// Status returns the current status of this transaction -func (tx *UniqueTx) Status() choices.Status { - tx.refresh() - return tx.status -} - -// Dependencies returns the set of transactions this transaction builds on -func (tx *UniqueTx) Dependencies() ([]snowstorm.Tx, error) { - tx.refresh() - if tx.Tx == nil || len(tx.deps) != 0 { - return tx.deps, nil - } - - txIDs := set.Set[ids.ID]{} - for _, in := range tx.InputUTXOs() { - if in.Symbolic() { - continue - } - txID, _ := in.InputSource() - if txIDs.Contains(txID) { - continue - } - txIDs.Add(txID) - tx.deps = append(tx.deps, &UniqueTx{ - vm: tx.vm, - txID: txID, - }) - } - consumedIDs := tx.Tx.Unsigned.ConsumedAssetIDs() - for assetID := range tx.Tx.Unsigned.AssetIDs() { - if consumedIDs.Contains(assetID) || txIDs.Contains(assetID) { - continue - } - txIDs.Add(assetID) - tx.deps = append(tx.deps, &UniqueTx{ - vm: tx.vm, - txID: assetID, - }) - } - return tx.deps, nil -} - -// InputIDs returns the set of utxoIDs this transaction consumes -func (tx *UniqueTx) InputIDs() []ids.ID { - tx.refresh() - if tx.Tx == nil || len(tx.inputs) != 0 { - return tx.inputs - } - - inputUTXOs := tx.InputUTXOs() - tx.inputs = make([]ids.ID, len(inputUTXOs)) - for i, utxo := range inputUTXOs { - tx.inputs[i] = utxo.InputID() - } - return tx.inputs -} - -// Whitelist is not supported by this transaction type, so [false] is returned. -func (*UniqueTx) HasWhitelist() bool { - return false -} - -// Whitelist is not supported by this transaction type, so [false] is returned. -func (*UniqueTx) Whitelist(context.Context) (set.Set[ids.ID], error) { - return nil, nil -} - -// InputUTXOs returns the utxos that will be consumed on tx acceptance -func (tx *UniqueTx) InputUTXOs() []*avax.UTXOID { - tx.refresh() - if tx.Tx == nil || len(tx.inputUTXOs) != 0 { - return tx.inputUTXOs - } - tx.inputUTXOs = tx.Tx.Unsigned.InputUTXOs() - return tx.inputUTXOs -} - -// UTXOs returns the utxos that will be added to the UTXO set on tx acceptance -func (tx *UniqueTx) UTXOs() []*avax.UTXO { - tx.refresh() - if tx.Tx == nil || len(tx.utxos) != 0 { - return tx.utxos - } - tx.utxos = tx.Tx.UTXOs() - return tx.utxos -} - -// Bytes returns the binary representation of this transaction -func (tx *UniqueTx) Bytes() []byte { - tx.refresh() - return tx.Tx.Bytes() -} - -func (tx *UniqueTx) verifyWithoutCacheWrites() error { - switch status := tx.Status(); status { - case choices.Unknown: - return errUnknownTx - case choices.Accepted: - return nil - case choices.Rejected: - return errRejectedTx - default: - return tx.SemanticVerify() - } -} - -// Verify the validity of this transaction -func (tx *UniqueTx) Verify(context.Context) error { - if err := tx.verifyWithoutCacheWrites(); err != nil { - return err - } - - tx.verifiedState = true - return nil -} - -// SyntacticVerify verifies that this transaction is well formed -func (tx *UniqueTx) SyntacticVerify() error { - tx.refresh() - - if tx.Tx == nil { - return errUnknownTx - } - - if tx.verifiedTx { - return tx.validity - } - - tx.verifiedTx = true - tx.validity = tx.Tx.Unsigned.Visit(&executor.SyntacticVerifier{ - Backend: tx.vm.txBackend, - Tx: tx.Tx, - }) - return tx.validity -} - -// SemanticVerify the validity of this transaction -func (tx *UniqueTx) SemanticVerify() error { - if err := tx.SyntacticVerify(); err != nil { - return err - } - - if tx.validity != nil || tx.verifiedState { - return tx.validity - } - - return tx.Unsigned.Visit(&executor.SemanticVerifier{ - Backend: tx.vm.txBackend, - State: tx.vm.dagState, - Tx: tx.Tx, - }) -} diff --git a/avalanchego/vms/avm/utxo/spender.go b/avalanchego/vms/avm/utxo/spender.go index ed57549d..02ade0d9 100644 --- a/avalanchego/vms/avm/utxo/spender.go +++ b/avalanchego/vms/avm/utxo/spender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo diff --git a/avalanchego/vms/avm/vm.go b/avalanchego/vms/avm/vm.go index 7d692acd..9a8a8b8d 100644 --- a/avalanchego/vms/avm/vm.go +++ b/avalanchego/vms/avm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -7,41 +7,35 @@ import ( "context" "errors" "fmt" + "net/http" "reflect" - "time" - - stdjson "encoding/json" + "sync" "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/pubsub" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/network" - "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/avm/utxo" @@ -50,24 +44,18 @@ import ( "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - blockbuilder "github.com/ava-labs/avalanchego/vms/avm/blocks/builder" - blockexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + blockbuilder "github.com/ava-labs/avalanchego/vms/avm/block/builder" + blockexecutor "github.com/ava-labs/avalanchego/vms/avm/block/executor" extensions "github.com/ava-labs/avalanchego/vms/avm/fxs" txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) -const ( - batchTimeout = time.Second - batchSize = 30 - assetToFxCacheSize = 1024 - txDeduplicatorSize = 8192 -) +const assetToFxCacheSize = 1024 var ( errIncompatibleFx = errors.New("incompatible feature extension") errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") - errBootstrapping = errors.New("chain is currently bootstrapping") _ vertex.LinearizableVMWithEngine = (*VM)(nil) ) @@ -92,14 +80,16 @@ type VM struct { registerer prometheus.Registerer - parser blocks.Parser + connectedPeers map[ids.NodeID]*version.Application + + parser block.Parser pubsub *pubsub.Server appSender common.AppSender // State management - state states.State + state state.State // Set to true once this VM is marked as `Bootstrapped` by the engine bootstrapped bool @@ -110,12 +100,6 @@ type VM struct { // Asset ID --> Bit set with fx IDs the asset supports assetToFxCache *cache.LRU[ids.ID, set.Bits64] - // Transaction issuing - timer *timer.Timer - batchTimeout time.Duration - txs []snowstorm.Tx - toEngine chan<- common.Message - baseDB database.Database db *versiondb.Database @@ -126,23 +110,39 @@ type VM struct { addressTxsIndexer index.AddressTxsIndexer - uniqueTxs cache.Deduplicator[ids.ID, *UniqueTx] - txBackend *txexecutor.Backend - dagState *dagState + // Cancelled on shutdown + onShutdownCtx context.Context + // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() + onShutdownCtxCancel context.CancelFunc + awaitShutdown sync.WaitGroup + + networkConfig network.Config // These values are only initialized after the chain has been linearized. blockbuilder.Builder chainManager blockexecutor.Manager - network network.Network + network *network.Network } -func (*VM) Connected(context.Context, ids.NodeID, *version.Application) error { - return nil +func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + // If the chain isn't linearized yet, we must track the peers externally + // until the network is initialized. + if vm.network == nil { + vm.connectedPeers[nodeID] = version + return nil + } + return vm.network.Connected(ctx, nodeID, version) } -func (*VM) Disconnected(context.Context, ids.NodeID) error { - return nil +func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + // If the chain isn't linearized yet, we must track the peers externally + // until the network is initialized. + if vm.network == nil { + delete(vm.connectedPeers, nodeID) + return nil + } + return vm.network.Disconnected(ctx, nodeID) } /* @@ -151,34 +151,27 @@ func (*VM) Disconnected(context.Context, ids.NodeID) error { ****************************************************************************** */ -type Config struct { - IndexTransactions bool `json:"index-transactions"` - IndexAllowIncomplete bool `json:"index-allow-incomplete"` -} - func (vm *VM) Initialize( _ context.Context, ctx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, _ []byte, configBytes []byte, - toEngine chan<- common.Message, + _ chan<- common.Message, fxs []*common.Fx, appSender common.AppSender, ) error { noopMessageHandler := common.NewNoOpAppHandler(ctx.Log) vm.Atomic = network.NewAtomic(noopMessageHandler) - avmConfig := Config{} - if len(configBytes) > 0 { - if err := stdjson.Unmarshal(configBytes, &avmConfig); err != nil { - return err - } - ctx.Log.Info("VM config initialized", - zap.Reflect("config", avmConfig), - ) + avmConfig, err := ParseConfig(configBytes) + if err != nil { + return err } + ctx.Log.Info("VM config initialized", + zap.Reflect("config", avmConfig), + ) registerer := prometheus.NewRegistry() if err := ctx.Metrics.Register(registerer); err != nil { @@ -186,8 +179,9 @@ func (vm *VM) Initialize( } vm.registerer = registerer + vm.connectedPeers = make(map[ids.NodeID]*version.Application) + // Initialize metrics as soon as possible - var err error vm.metrics, err = metrics.New("", registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) @@ -196,9 +190,7 @@ func (vm *VM) Initialize( vm.AddressManager = avax.NewAddressManager(ctx) vm.Aliaser = ids.NewAliaser() - db := dbManager.Current().Database vm.ctx = ctx - vm.toEngine = toEngine vm.appSender = appSender vm.baseDB = db vm.db = versiondb.New(db) @@ -224,8 +216,9 @@ func (vm *VM) Initialize( } vm.typeToFxIndex = map[reflect.Type]int{} - vm.parser, err = blocks.NewCustomParser( + vm.parser, err = block.NewCustomParser( version.GetCortinaTime(ctx.NetworkID), + vm.DurangoTime, vm.typeToFxIndex, &vm.clock, ctx.Log, @@ -239,7 +232,12 @@ func (vm *VM) Initialize( vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, codec) vm.Spender = utxo.NewSpender(&vm.clock, codec) - state, err := states.New(vm.db, vm.parser, vm.registerer) + state, err := state.New( + vm.db, + vm.parser, + vm.registerer, + avmConfig.ChecksumsEnabled, + ) if err != nil { return err } @@ -250,18 +248,6 @@ func (vm *VM) Initialize( return err } - vm.timer = timer.NewTimer(func() { - ctx.Lock.Lock() - defer ctx.Lock.Unlock() - - vm.FlushTxs() - }) - go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) - vm.batchTimeout = batchTimeout - - vm.uniqueTxs = &cache.EvictableLRU[ids.ID, *UniqueTx]{ - Size: txDeduplicatorSize, - } vm.walletService.vm = vm vm.walletService.pendingTxs = linkedhashmap.New[ids.ID, *txs.Tx]() @@ -289,11 +275,9 @@ func (vm *VM) Initialize( FeeAssetID: vm.feeAssetID, Bootstrapped: false, } - vm.dagState = &dagState{ - Chain: vm.state, - vm: vm, - } + vm.onShutdownCtx, vm.onShutdownCtxCancel = context.WithCancel(context.Background()) + vm.networkConfig = avmConfig.Network return vm.state.Commit() } @@ -316,19 +300,6 @@ func (vm *VM) onNormalOperationsStarted() error { } } - txID, err := ids.FromString("2JPwx3rbUy877CWYhtXpfPVS5tD8KfnbiF5pxMRu6jCaq5dnME") - if err != nil { - return err - } - utxoID := avax.UTXOID{ - TxID: txID, - OutputIndex: 192, - } - vm.state.DeleteUTXO(utxoID.InputID()) - if err := vm.state.Commit(); err != nil { - return err - } - vm.bootstrapped = true return nil } @@ -345,29 +316,24 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { } func (vm *VM) Shutdown(context.Context) error { - if vm.timer == nil { + if vm.state == nil { return nil } - // There is a potential deadlock if the timer is about to execute a timeout. - // So, the lock must be released before stopping the timer. - vm.ctx.Lock.Unlock() - vm.timer.Stop() - vm.ctx.Lock.Lock() + vm.onShutdownCtxCancel() + vm.awaitShutdown.Wait() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( vm.state.Close(), vm.baseDB.Close(), ) - return errs.Err } func (*VM) Version(context.Context) (string, error) { return version.Current.String(), nil } -func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { codec := json.NewCodec() rpcServer := rpc.NewServer() @@ -388,26 +354,13 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, e // name this service "wallet" err := walletServer.RegisterService(&vm.walletService, "wallet") - return map[string]*common.HTTPHandler{ - "": {Handler: rpcServer}, - "/wallet": {Handler: walletServer}, - "/events": {LockOptions: common.NoLock, Handler: vm.pubsub}, + return map[string]http.Handler{ + "": rpcServer, + "/wallet": walletServer, + "/events": vm.pubsub, }, err } -func (*VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHandler, error) { - newServer := rpc.NewServer() - codec := json.NewCodec() - newServer.RegisterCodec(codec, "application/json") - newServer.RegisterCodec(codec, "application/json;charset=UTF-8") - - // name this service "avm" - staticService := CreateStaticService() - return map[string]*common.HTTPHandler{ - "": {LockOptions: common.WriteLock, Handler: newServer}, - }, newServer.RegisterService(staticService, "avm") -} - /* ****************************************************************************** ********************************** Chain VM ********************************** @@ -435,13 +388,21 @@ func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { return vm.chainManager.LastAccepted(), nil } +func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { + return vm.state.GetBlockIDAtHeight(height) +} + +func (*VM) VerifyHeightIndex(context.Context) error { + return nil +} + /* ****************************************************************************** *********************************** DAG VM *********************************** ****************************************************************************** */ -func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- common.Message) error { +func (vm *VM) Linearize(ctx context.Context, stopVertexID ids.ID, toEngine chan<- common.Message) error { time := version.GetCortinaTime(vm.ctx.NetworkID) err := vm.state.InitializeChainState(stopVertexID, time) if err != nil { @@ -456,9 +417,7 @@ func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- vm.chainManager = blockexecutor.NewManager( mempool, vm.metrics, - &chainState{ - State: vm.state, - }, + vm.state, vm.txBackend, &vm.clock, vm.onAccept, @@ -471,118 +430,99 @@ func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- mempool, ) - vm.network = network.New( + // Invariant: The context lock is not held when calling network.IssueTx. + vm.network, err = network.New( vm.ctx, vm.parser, - vm.chainManager, + network.NewLockedTxVerifier( + &vm.ctx.Lock, + vm.chainManager, + ), mempool, vm.appSender, + vm.registerer, + vm.networkConfig, ) + if err != nil { + return fmt.Errorf("failed to initialize network: %w", err) + } + + // Notify the network of our current peers + for nodeID, version := range vm.connectedPeers { + if err := vm.network.Connected(ctx, nodeID, version); err != nil { + return err + } + } + vm.connectedPeers = nil // Note: It's important only to switch the networking stack after the full // chainVM has been initialized. Traffic will immediately start being // handled asynchronously. vm.Atomic.Set(vm.network) - return nil -} - -func (vm *VM) PendingTxs(context.Context) []snowstorm.Tx { - vm.timer.Cancel() - txs := vm.txs - vm.txs = nil - return txs -} - -func (vm *VM) ParseTx(_ context.Context, b []byte) (snowstorm.Tx, error) { - return vm.parseTx(b) -} + vm.awaitShutdown.Add(1) + go func() { + defer vm.awaitShutdown.Done() -func (vm *VM) GetTx(_ context.Context, txID ids.ID) (snowstorm.Tx, error) { - tx := &UniqueTx{ - vm: vm, - txID: txID, - } - // Verify must be called in the case the that tx was flushed from the unique - // cache. - return tx, tx.verifyWithoutCacheWrites() -} + // Invariant: Gossip must never grab the context lock. + vm.network.Gossip(vm.onShutdownCtx) + }() -/* - ****************************************************************************** - ********************************** JSON API ********************************** - ****************************************************************************** - */ - -// IssueTx attempts to send a transaction to consensus. -// If onDecide is specified, the function will be called when the transaction is -// either accepted or rejected with the appropriate status. This function will -// go out of scope when the transaction is removed from memory. -func (vm *VM) IssueTx(b []byte) (ids.ID, error) { - if !vm.bootstrapped { - return ids.ID{}, errBootstrapping - } - - // If the chain has been linearized, issue the tx to the network. - if vm.Builder != nil { - tx, err := vm.parser.ParseTx(b) + go func() { + err := vm.state.Prune(&vm.ctx.Lock, vm.ctx.Log) if err != nil { - vm.ctx.Log.Debug("failed to parse tx", + vm.ctx.Log.Warn("state pruning failed", zap.Error(err), ) - return ids.ID{}, err + return } + vm.ctx.Log.Info("state pruning finished") + }() - err = vm.network.IssueTx(context.TODO(), tx) - if err != nil { - vm.ctx.Log.Debug("failed to add tx to mempool", - zap.Error(err), - ) - return ids.ID{}, err - } + return nil +} - return tx.ID(), nil +func (vm *VM) ParseTx(_ context.Context, bytes []byte) (snowstorm.Tx, error) { + tx, err := vm.parser.ParseTx(bytes) + if err != nil { + return nil, err } - // TODO: After the chain is linearized, remove the following code. - tx, err := vm.parseTx(b) + err = tx.Unsigned.Visit(&txexecutor.SyntacticVerifier{ + Backend: vm.txBackend, + Tx: tx, + }) if err != nil { - return ids.ID{}, err - } - if err := tx.verifyWithoutCacheWrites(); err != nil { - return ids.ID{}, err + return nil, err } - vm.issueTx(tx) - return tx.ID(), nil -} -// TODO: After the chain is linearized, remove this. -func (vm *VM) issueStopVertex() error { - select { - case vm.toEngine <- common.StopVertex: - default: - vm.ctx.Log.Debug("dropping common.StopVertex message to engine due to contention") - } - return nil + return &Tx{ + vm: vm, + tx: tx, + }, nil } /* ****************************************************************************** - ********************************** Timer API ********************************* + ********************************** JSON API ********************************** ****************************************************************************** */ -// FlushTxs into consensus -func (vm *VM) FlushTxs() { - vm.timer.Cancel() - if len(vm.txs) != 0 { - select { - case vm.toEngine <- common.PendingTxs: - default: - vm.ctx.Log.Debug("dropping message to engine due to contention") - vm.timer.SetTimeoutIn(vm.batchTimeout) - } +// issueTx attempts to send a transaction to consensus. +// +// Invariant: The context lock is not held +// Invariant: This function is only called after Linearize has been called. +func (vm *VM) issueTx(tx *txs.Tx) (ids.ID, error) { + txID := tx.ID() + err := vm.network.IssueTx(context.TODO(), tx) + if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { + vm.ctx.Log.Debug("failed to add tx to mempool", + zap.Stringer("txID", txID), + zap.Error(err), + ) + return txID, err } + return txID, nil } /* @@ -614,7 +554,7 @@ func (vm *VM) initGenesis(genesisBytes []byte) error { tx := &txs.Tx{ Unsigned: &genesisTx.CreateAssetTx, } - if err := vm.parser.InitializeGenesisTx(tx); err != nil { + if err := tx.Initialize(genesisCodec); err != nil { return err } @@ -648,48 +588,11 @@ func (vm *VM) initState(tx *txs.Tx) { zap.Stringer("txID", txID), ) vm.state.AddTx(tx) - vm.state.AddStatus(txID, choices.Accepted) for _, utxo := range tx.UTXOs() { vm.state.AddUTXO(utxo) } } -func (vm *VM) parseTx(bytes []byte) (*UniqueTx, error) { - rawTx, err := vm.parser.ParseTx(bytes) - if err != nil { - return nil, err - } - - tx := &UniqueTx{ - TxCachedState: &TxCachedState{ - Tx: rawTx, - }, - vm: vm, - txID: rawTx.ID(), - } - if err := tx.SyntacticVerify(); err != nil { - return nil, err - } - - if tx.Status() == choices.Unknown { - vm.state.AddTx(tx.Tx) - tx.setStatus(choices.Processing) - return tx, vm.state.Commit() - } - - return tx, nil -} - -func (vm *VM) issueTx(tx snowstorm.Tx) { - vm.txs = append(vm.txs, tx) - switch { - case len(vm.txs) == batchSize: - vm.FlushTxs() - case len(vm.txs) == 1: - vm.timer.SetTimeoutIn(vm.batchTimeout) - } -} - // LoadUser returns: // 1) The UTXOs that reference one or more addresses controlled by the given user // 2) A keychain that contains this user's keys @@ -765,7 +668,7 @@ func (vm *VM) onAccept(tx *txs.Tx) error { continue } - utxo, err := vm.state.GetUTXOFromID(utxoID) + utxo, err := vm.state.GetUTXO(utxoID.InputID()) if err == database.ErrNotFound { vm.ctx.Log.Debug("dropping utxo from index", zap.Stringer("txID", txID), @@ -792,8 +695,3 @@ func (vm *VM) onAccept(tx *txs.Tx) error { vm.walletService.decided(txID) return nil } - -// UniqueTx de-duplicates the transaction. -func (vm *VM) DeduplicateTx(tx *UniqueTx) *UniqueTx { - return vm.uniqueTxs.Deduplicate(tx) -} diff --git a/avalanchego/vms/avm/vm_benchmark_test.go b/avalanchego/vms/avm/vm_benchmark_test.go index 09dfff61..713f809f 100644 --- a/avalanchego/vms/avm/vm_benchmark_test.go +++ b/avalanchego/vms/avm/vm_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -20,25 +20,24 @@ import ( func BenchmarkLoadUser(b *testing.B) { runLoadUserBenchmark := func(b *testing.B, numKeys int) { - // This will segfault instead of failing gracefully if there's an error - _, _, vm, _ := GenesisVM(nil) - ctx := vm.ctx + require := require.New(b) + + env := setup(b, &envConfig{ + keystoreUsers: []*user{{ + username: username, + password: password, + }}, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - b.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - user, err := keystore.NewUserFromKeystore(vm.ctx.Keystore, username, password) - if err != nil { - b.Fatalf("Failed to get user keystore db: %s", err) - } + user, err := keystore.NewUserFromKeystore(env.vm.ctx.Keystore, username, password) + require.NoError(err) keys, err := keystore.NewKeys(user, numKeys) - if err != nil { - b.Fatalf("problem generating private key: %s", err) - } + require.NoError(err) b.ResetTimer() @@ -47,19 +46,16 @@ func BenchmarkLoadUser(b *testing.B) { addrIndex := n % numKeys fromAddrs.Clear() fromAddrs.Add(keys[addrIndex].PublicKey().Address()) - if _, _, err := vm.LoadUser(username, password, fromAddrs); err != nil { - b.Fatalf("Failed to load user: %s", err) - } + _, _, err := env.vm.LoadUser(username, password, fromAddrs) + require.NoError(err) } b.StopTimer() - if err := user.Close(); err != nil { - b.Fatal(err) - } + require.NoError(user.Close()) } - benchmarkSize := []int{10, 100, 1000, 10000} + benchmarkSize := []int{10, 100, 1000, 5000} for _, numKeys := range benchmarkSize { b.Run(fmt.Sprintf("NumKeys=%d", numKeys), func(b *testing.B) { runLoadUserBenchmark(b, numKeys) @@ -69,13 +65,12 @@ func BenchmarkLoadUser(b *testing.B) { // GetAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { - _, _, vm, _ := GenesisVM(b) - ctx := vm.ctx + require := require.New(b) + + env := setup(b, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - b.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() addr := ids.GenerateTestShortID() @@ -87,7 +82,7 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { TxID: ids.GenerateTestID(), OutputIndex: rand.Uint32(), }, - Asset: avax.Asset{ID: ids.ID{'y', 'e', 'e', 't'}}, + Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 100000, OutputOwners: secp256k1fx.OutputOwners{ @@ -98,20 +93,19 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { }, } - vm.state.AddUTXO(utxo) + env.vm.state.AddUTXO(utxo) } - require.NoError(b, vm.state.Commit()) + require.NoError(env.vm.state.Commit()) - addrsSet := set.Set[ids.ShortID]{} - addrsSet.Add(addr) + addrsSet := set.Of(addr) b.ResetTimer() for i := 0; i < b.N; i++ { // Fetch all UTXOs older version - notPaginatedUTXOs, err := avax.GetAllUTXOs(vm.state, addrsSet) - require.NoError(b, err) - require.Len(b, notPaginatedUTXOs, utxoCount) + notPaginatedUTXOs, err := avax.GetAllUTXOs(env.vm.state, addrsSet) + require.NoError(err) + require.Len(notPaginatedUTXOs, utxoCount) } } @@ -120,9 +114,18 @@ func BenchmarkGetUTXOs(b *testing.B) { name string utxoCount int }{ - {"100", 100}, - {"10k", 10000}, - {"100k", 100000}, + { + name: "100", + utxoCount: 100, + }, + { + name: "10k", + utxoCount: 10_000, + }, + { + name: "100k", + utxoCount: 100_000, + }, } for _, count := range tests { diff --git a/avalanchego/vms/avm/vm_regression_test.go b/avalanchego/vms/avm/vm_regression_test.go index de148c05..c6ac40df 100644 --- a/avalanchego/vms/avm/vm_regression_test.go +++ b/avalanchego/vms/avm/vm_regression_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -9,13 +9,10 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -25,46 +22,21 @@ import ( func TestVerifyFxUsage(t *testing.T) { require := require.New(t) - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - require.NoError(vm.Shutdown(context.Background())) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - }, - nil, - ) - require.NoError(err) - vm.batchTimeout = 0 - - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -96,15 +68,13 @@ func TestVerifyFxUsage(t *testing.T) { }, }, }} - require.NoError(vm.parser.InitializeTx(createAssetTx)) - - _, err = vm.IssueTx(createAssetTx.Bytes()) - require.NoError(err) + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -122,14 +92,12 @@ func TestVerifyFxUsage(t *testing.T) { }, }}, }} - require.NoError(mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) - - _, err = vm.IssueTx(mintNFTTx.Bytes()) - require.NoError(err) + require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + issueAndAccept(require, env.vm, env.issuer, mintNFTTx) spendTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: createAssetTx.ID(), @@ -144,8 +112,6 @@ func TestVerifyFxUsage(t *testing.T) { }, }}, }}} - require.NoError(spendTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) - - _, err = vm.IssueTx(spendTx.Bytes()) - require.NoError(err) + require.NoError(spendTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + issueAndAccept(require, env.vm, env.issuer, spendTx) } diff --git a/avalanchego/vms/avm/vm_test.go b/avalanchego/vms/avm/vm_test.go index 49b11ca6..d8aeaf3b 100644 --- a/avalanchego/vms/avm/vm_test.go +++ b/avalanchego/vms/avm/vm_test.go @@ -1,45 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "bytes" "context" - "errors" "math" "testing" - "time" - - stdjson "encoding/json" - - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/cb58" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/metrics" - "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -48,501 +29,76 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var ( - chainID = ids.ID{5, 4, 3, 2, 1} - testTxFee = uint64(1000) - startBalance = uint64(50000) - - keys []*secp256k1.PrivateKey - addrs []ids.ShortID // addrs[i] corresponds to keys[i] - - assetID = ids.ID{1, 2, 3} - username = "bobby" - password = "StrnasfqewiurPasswdn56d" // #nosec G101 - feeAssetName = "TEST" - otherAssetName = "OTHER" - - errMissing = errors.New("missing") -) - -func init() { - factory := secp256k1.Factory{} - - for _, key := range []string{ - "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", - "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", - "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - } { - keyBytes, _ := cb58.Decode(key) - pk, _ := factory.ToPrivateKey(keyBytes) - keys = append(keys, pk) - addrs = append(addrs, pk.PublicKey().Address()) - } -} - -func NewContext(tb testing.TB) *snow.Context { - genesisBytes := BuildGenesisTest(tb) - - tx := GetAVAXTxFromGenesisTest(genesisBytes, tb) - - ctx := snow.DefaultContextTest() - ctx.NetworkID = constants.UnitTestID - ctx.ChainID = chainID - ctx.AVAXAssetID = tx.ID() - ctx.XChainID = ids.Empty.Prefix(0) - ctx.CChainID = ids.Empty.Prefix(1) - aliaser := ctx.BCLookup.(ids.Aliaser) - - errs := wrappers.Errs{} - errs.Add( - aliaser.Alias(chainID, "X"), - aliaser.Alias(chainID, chainID.String()), - aliaser.Alias(constants.PlatformChainID, "P"), - aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String()), - ) - if errs.Errored() { - tb.Fatal(errs.Err) - } - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: ctx.SubnetID, - chainID: ctx.SubnetID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - return ctx -} - -// Returns: -// -// 1. tx in genesis that creates asset -// 2. the index of the output -func GetCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { - parser, err := txs.NewParser( - time.Time{}, - []fxs.Fx{ - &secp256k1fx.Fx{}, - }, - ) - if err != nil { - tb.Fatal(err) - } - - cm := parser.GenesisCodec() - genesis := Genesis{} - if _, err := cm.Unmarshal(genesisBytes, &genesis); err != nil { - tb.Fatal(err) - } - - if len(genesis.Txs) == 0 { - tb.Fatal("genesis tx didn't have any txs") - } - - var assetTx *GenesisAsset - for _, tx := range genesis.Txs { - if tx.Name == assetName { - assetTx = tx - break - } - } - if assetTx == nil { - tb.Fatal("there is no create tx") - return nil - } - - tx := &txs.Tx{ - Unsigned: &assetTx.CreateAssetTx, - } - if err := parser.InitializeGenesisTx(tx); err != nil { - tb.Fatal(err) - } - return tx -} - -func GetAVAXTxFromGenesisTest(genesisBytes []byte, tb testing.TB) *txs.Tx { - return GetCreateTxFromGenesisTest(tb, genesisBytes, "AVAX") -} - -// BuildGenesisTest is the common Genesis builder for most tests -func BuildGenesisTest(tb testing.TB) []byte { - addr0Str, _ := address.FormatBech32(testHRP, addrs[0].Bytes()) - addr1Str, _ := address.FormatBech32(testHRP, addrs[1].Bytes()) - addr2Str, _ := address.FormatBech32(testHRP, addrs[2].Bytes()) - - defaultArgs := &BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]AssetDefinition{ - "asset1": { - Name: "AVAX", - Symbol: "SYMB", - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: json.Uint64(startBalance), - Address: addr0Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr1Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr2Str, - }, - }, - }, - }, - "asset2": { - Name: "myVarCapAsset", - Symbol: "MVCA", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addr0Str, - addr1Str, - }, - }, - Owners{ - Threshold: 2, - Minters: []string{ - addr0Str, - addr1Str, - addr2Str, - }, - }, - }, - }, - }, - "asset3": { - Name: "myOtherVarCapAsset", - InitialState: map[string][]interface{}{ - "variableCap": { - Owners{ - Threshold: 1, - Minters: []string{ - addr0Str, - }, - }, - }, - }, - }, - "asset4": { - Name: "myFixedCapAsset", - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: json.Uint64(startBalance), - Address: addr0Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr1Str, - }, - }, - }, - }, - }, - } - - return BuildGenesisTestWithArgs(tb, defaultArgs) -} - -// BuildGenesisTestWithArgs allows building the genesis while injecting different starting points (args) -func BuildGenesisTestWithArgs(tb testing.TB, args *BuildGenesisArgs) []byte { - ss := CreateStaticService() - - reply := BuildGenesisReply{} - err := ss.BuildGenesis(nil, args, &reply) - if err != nil { - tb.Fatal(err) - } - - b, err := formatting.Decode(reply.Encoding, reply.Bytes) - if err != nil { - tb.Fatal(err) - } - - return b -} - -func GenesisVM(tb testing.TB) ([]byte, chan common.Message, *VM, *atomic.Memory) { - return GenesisVMWithArgs(tb, nil, nil) -} - -func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGenesisArgs) ([]byte, chan common.Message, *VM, *atomic.Memory) { - var genesisBytes []byte - - if args != nil { - genesisBytes = BuildGenesisTestWithArgs(tb, args) - } else { - genesisBytes = BuildGenesisTest(tb) - } - - ctx := NewContext(tb) - - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - // NB: this lock is intentionally left locked when this function returns. - // The caller of this function is responsible for unlocking. - ctx.Lock.Lock() - - userKeystore, err := keystore.CreateTestKeystore() - if err != nil { - tb.Fatal(err) - } - if err := userKeystore.CreateUser(username, password); err != nil { - tb.Fatal(err) - } - ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - - issuer := make(chan common.Message, 1) - vm := &VM{Config: config.Config{ - TxFee: testTxFee, - CreateAssetTxFee: testTxFee, - }} - configBytes, err := stdjson.Marshal(Config{IndexTransactions: true}) - if err != nil { - tb.Fatal("should not have caused error in creating avm config bytes") - } - err = vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - configBytes, - issuer, - append( - []*common.Fx{ - { - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }, - { - ID: nftfx.ID, - Fx: &nftfx.Fx{}, - }, - }, - additionalFxs..., - ), - nil, - ) - if err != nil { - tb.Fatal(err) - } - vm.batchTimeout = 0 - - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - tb.Fatal(err) - } - - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - tb.Fatal(err) - } - - return genesisBytes, issuer, vm, m -} - -func NewTx(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { - return NewTxWithAsset(t, genesisBytes, vm, "AVAX") -} - -func NewTxWithAsset(t *testing.T, genesisBytes []byte, vm *VM, assetName string) *txs.Tx { - createTx := GetCreateTxFromGenesisTest(t, genesisBytes, assetName) - - newTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: createTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: createTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } - return newTx -} - -func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs.Tx) { - genesisBytes, issuer, vm, _ := GenesisVM(t) - ctx := vm.ctx - - avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - key := keys[0] - firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := firstTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - - secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := secondTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - return issuer, vm, ctx, []*txs.Tx{avaxTx, firstTx, secondTx} -} - func TestInvalidGenesis(t *testing.T) { + require := require.New(t) + vm := &VM{} - ctx := NewContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() + defer ctx.Lock.Unlock() err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - nil, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger - nil, // fxs - nil, // AppSender + ctx, // context + memdb.New(), // database + nil, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger + nil, // fxs + nil, // AppSender ) - if err == nil { - t.Fatalf("Should have erred due to an invalid genesis") - } + require.ErrorIs(err, codec.ErrCantUnpackVersion) } func TestInvalidFx(t *testing.T) { + require := require.New(t) + vm := &VM{} - ctx := NewContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() - genesisBytes := BuildGenesisTest(t) + genesisBytes := buildGenesisTest(t) err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - genesisBytes, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger + ctx, // context + memdb.New(), // database + genesisBytes, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger []*common.Fx{ // fxs nil, }, nil, ) - if err == nil { - t.Fatalf("Should have erred due to an invalid interface") - } + require.ErrorIs(err, errIncompatibleFx) } func TestFxInitializationFailure(t *testing.T) { + require := require.New(t) + vm := &VM{} - ctx := NewContext(t) + ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() - genesisBytes := BuildGenesisTest(t) + genesisBytes := buildGenesisTest(t) err := vm.Initialize( context.Background(), - ctx, // context - manager.NewMemDB(version.Semantic1_0_0), // dbManager - genesisBytes, // genesisState - nil, // upgradeBytes - nil, // configBytes - make(chan common.Message, 1), // engineMessenger + ctx, // context + memdb.New(), // database + genesisBytes, // genesisState + nil, // upgradeBytes + nil, // configBytes + make(chan common.Message, 1), // engineMessenger []*common.Fx{{ // fxs ID: ids.Empty, Fx: &FxTest{ @@ -553,132 +109,42 @@ func TestFxInitializationFailure(t *testing.T) { }}, nil, ) - if err == nil { - t.Fatalf("Should have erred due to an invalid fx initialization") - } + require.ErrorIs(err, errUnknownFx) } func TestIssueTx(t *testing.T) { - genesisBytes, issuer, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - newTx := NewTx(t, genesisBytes, vm) - - txID, err := vm.IssueTx(newTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if txID != newTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - ctx.Lock.Lock() - - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } -} + require := require.New(t) -// Test issuing a transaction that consumes a currently pending UTXO. The -// transaction should be issued successfully. -func TestIssueDependentTx(t *testing.T) { - issuer, vm, ctx, txs := setupIssueTx(t) + env := setup(t, &envConfig{}) + env.vm.ctx.Lock.Unlock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - firstTx := txs[1] - secondTx := txs[2] - - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(secondTx.Bytes()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - ctx.Lock.Lock() - - pendingTxs := vm.PendingTxs(context.Background()) - if len(pendingTxs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 2) - } + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") + issueAndAccept(require, env.vm, env.issuer, tx) } // Test issuing a transaction that creates an NFT family func TestIssueNFT(t *testing.T) { - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + require := require.New(t) + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - err = vm.SetState(context.Background(), snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -703,18 +169,13 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - if err := vm.parser.InitializeTx(createAssetTx); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -732,19 +193,14 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - if err := mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(mintNFTTx.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + issueAndAccept(require, env.vm, env.issuer, mintNFTTx) transferNFTTx := &txs.Tx{ Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -763,75 +219,37 @@ func TestIssueNFT(t *testing.T) { }}, }, Creds: []*fxs.FxCredential{ - {Verifiable: &nftfx.Credential{}}, + { + Credential: &nftfx.Credential{}, + }, }, } - if err := vm.parser.InitializeTx(transferNFTTx); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(transferNFTTx.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(transferNFTTx.Initialize(env.vm.parser.Codec())) + issueAndAccept(require, env.vm, env.issuer, transferNFTTx) } // Test issuing a transaction that creates an Property family func TestIssueProperty(t *testing.T) { - vm := &VM{} - ctx := NewContext(t) - ctx.Lock.Lock() + require := require.New(t) + + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + additionalFxs: []*common.Fx{{ + ID: propertyfx.ID, + Fx: &propertyfx.Fx{}, + }}, + }) + env.vm.ctx.Lock.Unlock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - context.Background(), - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty.Prefix(0), - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(1), - Fx: &nftfx.Fx{}, - }, - { - ID: ids.Empty.Prefix(2), - Fx: &propertyfx.Fx{}, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - err = vm.SetState(context.Background(), snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Name: "Team Rocket", Symbol: "TR", @@ -848,18 +266,13 @@ func TestIssueProperty(t *testing.T) { }, }}, }} - if err := vm.parser.InitializeTx(createAssetTx); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + issueAndAccept(require, env.vm, env.issuer, createAssetTx) mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -882,22 +295,16 @@ func TestIssueProperty(t *testing.T) { }}, }} - codec := vm.parser.Codec() - err = mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ + codec := env.vm.parser.Codec() + require.NoError(mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {keys[0]}, - }) - if err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(mintPropertyTx.Bytes()); err != nil { - t.Fatal(err) - } + })) + issueAndAccept(require, env.vm, env.issuer, mintPropertyTx) burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, }}, Ops: []*txs.Operation{{ Asset: avax.Asset{ID: createAssetTx.ID()}, @@ -909,118 +316,51 @@ func TestIssueProperty(t *testing.T) { }}, }} - err = burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ + require.NoError(burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {}, - }) - if err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(burnPropertyTx.Bytes()); err != nil { - t.Fatal(err) - } -} - -func setupTxFeeAssets(t *testing.T) ([]byte, chan common.Message, *VM, *atomic.Memory) { - addr0Str, _ := address.FormatBech32(testHRP, addrs[0].Bytes()) - addr1Str, _ := address.FormatBech32(testHRP, addrs[1].Bytes()) - addr2Str, _ := address.FormatBech32(testHRP, addrs[2].Bytes()) - assetAlias := "asset1" - customArgs := &BuildGenesisArgs{ - Encoding: formatting.Hex, - GenesisData: map[string]AssetDefinition{ - assetAlias: { - Name: feeAssetName, - Symbol: "TST", - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: json.Uint64(startBalance), - Address: addr0Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr1Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr2Str, - }, - }, - }, - }, - "asset2": { - Name: otherAssetName, - Symbol: "OTH", - InitialState: map[string][]interface{}{ - "fixedCap": { - Holder{ - Amount: json.Uint64(startBalance), - Address: addr0Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr1Str, - }, - Holder{ - Amount: json.Uint64(startBalance), - Address: addr2Str, - }, - }, - }, - }, - }, - } - genesisBytes, issuer, vm, m := GenesisVMWithArgs(t, nil, customArgs) - expectedID, err := vm.Aliaser.Lookup(assetAlias) - require.NoError(t, err) - require.Equal(t, expectedID, vm.feeAssetID) - return genesisBytes, issuer, vm, m + })) + issueAndAccept(require, env.vm, env.issuer, burnPropertyTx) } func TestIssueTxWithFeeAsset(t *testing.T) { - genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) - ctx := vm.ctx + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: true, + }) + env.vm.ctx.Lock.Unlock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - // send first asset - newTx := NewTxWithAsset(t, genesisBytes, vm, feeAssetName) - - txID, err := vm.IssueTx(newTx.Bytes()) - require.NoError(t, err) - require.Equal(t, txID, newTx.ID()) - - ctx.Lock.Unlock() - msg := <-issuer - require.Equal(t, msg, common.PendingTxs) - - ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - require.Len(t, txs, 1) - t.Log(txs) + // send first asset + tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, feeAssetName) + issueAndAccept(require, env.vm, env.issuer, tx) } func TestIssueTxWithAnotherAsset(t *testing.T) { - genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) - ctx := vm.ctx + require := require.New(t) + + env := setup(t, &envConfig{ + isCustomFeeAsset: true, + }) + env.vm.ctx.Lock.Unlock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) - ctx.Lock.Unlock() + env.vm.ctx.Lock.Lock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() // send second asset - feeAssetCreateTx := GetCreateTxFromGenesisTest(t, genesisBytes, feeAssetName) - createTx := GetCreateTxFromGenesisTest(t, genesisBytes, otherAssetName) + feeAssetCreateTx := getCreateTxFromGenesisTest(t, env.genesisBytes, feeAssetName) + createTx := getCreateTxFromGenesisTest(t, env.genesisBytes, otherAssetName) - newTx := &txs.Tx{Unsigned: &txs.BaseTx{ + tx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{ // fee asset { @@ -1057,488 +397,142 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { }, }, }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}}); err != nil { - t.Fatal(err) - } - - txID, err := vm.IssueTx(newTx.Bytes()) - require.NoError(t, err) - require.Equal(t, txID, newTx.ID()) - - ctx.Lock.Unlock() + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}})) - msg := <-issuer - require.Equal(t, msg, common.PendingTxs) - - ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - require.Len(t, txs, 1) + issueAndAccept(require, env.vm, env.issuer, tx) } func TestVMFormat(t *testing.T) { - _, _, vm, _ := GenesisVM(t) - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() - }() - - tests := []struct { - in ids.ShortID - expected string - }{ - {ids.ShortEmpty, "X-testing1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqtu2yas"}, - } - for _, test := range tests { - t.Run(test.in.String(), func(t *testing.T) { - addrStr, err := vm.FormatLocalAddress(test.in) - if err != nil { - t.Error(err) - } - if test.expected != addrStr { - t.Errorf("Expected %q, got %q", test.expected, addrStr) - } - }) - } -} - -func TestTxCached(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - newTx := NewTx(t, genesisBytes, vm) - txBytes := newTx.Bytes() - - _, err := vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) - - registerer := prometheus.NewRegistry() - - vm.metrics, err = metrics.New("", registerer) - require.NoError(t, err) - - db := memdb.New() - vdb := versiondb.New(db) - vm.state, err = states.New(vdb, vm.parser, registerer) - require.NoError(t, err) - - _, err = vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) - - count, err := database.Count(vdb) - require.NoError(t, err) - require.Zero(t, count) -} - -func TestTxNotCached(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - newTx := NewTx(t, genesisBytes, vm) - txBytes := newTx.Bytes() - - _, err := vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) - - registerer := prometheus.NewRegistry() - require.NoError(t, err) - - vm.metrics, err = metrics.New("", registerer) - require.NoError(t, err) - - db := memdb.New() - vdb := versiondb.New(db) - vm.state, err = states.New(vdb, vm.parser, registerer) - require.NoError(t, err) - - vm.uniqueTxs.Flush() - - _, err = vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) - - count, err := database.Count(vdb) - require.NoError(t, err) - require.NotZero(t, count) -} - -func TestTxVerifyAfterIssueTx(t *testing.T) { - issuer, vm, ctx, issueTxs := setupIssueTx(t) - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - firstTx := issueTxs[1] - secondTx := issueTxs[2] - parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - ctx.Lock.Lock() - - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - parsedFirstTx := txs[0] - - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") - } -} - -func TestTxVerifyAfterGet(t *testing.T) { - _, vm, ctx, issueTxs := setupIssueTx(t) - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - firstTx := issueTxs[1] - secondTx := issueTxs[2] - - parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") - } -} - -func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { - _, vm, ctx, issueTxs := setupIssueTx(t) + env := setup(t, &envConfig{}) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(t, env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - avaxTx := issueTxs[0] - firstTx := issueTxs[1] - secondTx := issueTxs[2] - key := keys[0] - firstTxDescendant := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: firstTx.ID(), - OutputIndex: 0, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance - vm.TxFee, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - 2*vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }}} - if err := firstTxDescendant.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTxDescendant.Bytes()); err != nil { - t.Fatal(err) - } - parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) + tests := []struct { + in ids.ShortID + expected string + }{ + { + in: ids.ShortEmpty, + expected: "X-testing1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqtu2yas", + }, } - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") + for _, test := range tests { + t.Run(test.in.String(), func(t *testing.T) { + require := require.New(t) + addrStr, err := env.vm.FormatLocalAddress(test.in) + require.NoError(err) + require.Equal(test.expected, addrStr) + }) } } -func TestImportTxSerialization(t *testing.T) { - _, vm, _, _ := setupIssueTx(t) - expected := []byte{ - // Codec version - 0x00, 0x00, - // txID: - 0x00, 0x00, 0x00, 0x03, - // networkID: - 0x00, 0x00, 0x00, 0x02, - // blockchainID: - 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, - 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, - 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, - 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, - // number of base outs: - 0x00, 0x00, 0x00, 0x00, - // number of base inputs: - 0x00, 0x00, 0x00, 0x00, - // Memo length: - 0x00, 0x00, 0x00, 0x04, - // Memo: - 0x00, 0x01, 0x02, 0x03, - // Source Chain ID: - 0x1f, 0x8f, 0x9f, 0x0f, 0x1e, 0x8e, 0x9e, 0x0e, - 0x2d, 0x7d, 0xad, 0xfd, 0x2c, 0x7c, 0xac, 0xfc, - 0x3b, 0x6b, 0xbb, 0xeb, 0x3a, 0x6a, 0xba, 0xea, - 0x49, 0x59, 0xc9, 0xd9, 0x48, 0x58, 0xc8, 0xd8, - // number of inputs: - 0x00, 0x00, 0x00, 0x01, - // utxoID: - 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, - 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, - 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, - 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, - // output index - 0x00, 0x00, 0x00, 0x00, - // assetID: - 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, - 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, - 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, - 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, - // input: - // input ID: - 0x00, 0x00, 0x00, 0x05, - // amount: - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, - // num sig indices: - 0x00, 0x00, 0x00, 0x01, - // sig index[0]: - 0x00, 0x00, 0x00, 0x00, - // number of credentials: - 0x00, 0x00, 0x00, 0x00, - } +func TestTxAcceptAfterParseTx(t *testing.T) { + require := require.New(t) - tx := &txs.Tx{Unsigned: &txs.ImportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: 2, - BlockchainID: ids.ID{ - 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, - 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, - 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, - 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, - }, - Memo: []byte{0x00, 0x01, 0x02, 0x03}, - }}, - SourceChain: ids.ID{ - 0x1f, 0x8f, 0x9f, 0x0f, 0x1e, 0x8e, 0x9e, 0x0e, - 0x2d, 0x7d, 0xad, 0xfd, 0x2c, 0x7c, 0xac, 0xfc, - 0x3b, 0x6b, 0xbb, 0xeb, 0x3a, 0x6a, 0xba, 0xea, - 0x49, 0x59, 0xc9, 0xd9, 0x48, 0x58, 0xc8, 0xd8, - }, - ImportedIns: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{TxID: ids.ID{ - 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, - 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, - 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, - 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + env := setup(t, &envConfig{ + notLinearized: true, + }) + defer func() { + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() + }() + + key := keys[0] + firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: env.vm.ctx.XChainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: env.genesisTx.ID(), + OutputIndex: 2, + }, + Asset: avax.Asset{ID: env.genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: startBalance, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, }}, - Asset: avax.Asset{ID: ids.ID{ - 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, - 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, - 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, - 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, }}, - In: &secp256k1fx.TransferInput{ - Amt: 1000, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, + }, }} + require.NoError(firstTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") - result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } - - credBytes := []byte{ - // type id - 0x00, 0x00, 0x00, 0x09, - - // there are two signers (thus two signatures) - 0x00, 0x00, 0x00, 0x02, - - // 65 bytes - 0x8c, 0xc7, 0xdc, 0x8c, 0x11, 0xd3, 0x75, 0x9e, 0x16, 0xa5, - 0x9f, 0xd2, 0x9c, 0x64, 0xd7, 0x1f, 0x9b, 0xad, 0x1a, 0x62, - 0x33, 0x98, 0xc7, 0xaf, 0x67, 0x02, 0xc5, 0xe0, 0x75, 0x8e, - 0x62, 0xcf, 0x15, 0x6d, 0x99, 0xf5, 0x4e, 0x71, 0xb8, 0xf4, - 0x8b, 0x5b, 0xbf, 0x0c, 0x59, 0x62, 0x79, 0x34, 0x97, 0x1a, - 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, - 0x46, 0x4e, 0xa1, 0xaf, 0x00, - - // 65 bytes - 0x8c, 0xc7, 0xdc, 0x8c, 0x11, 0xd3, 0x75, 0x9e, 0x16, 0xa5, - 0x9f, 0xd2, 0x9c, 0x64, 0xd7, 0x1f, 0x9b, 0xad, 0x1a, 0x62, - 0x33, 0x98, 0xc7, 0xaf, 0x67, 0x02, 0xc5, 0xe0, 0x75, 0x8e, - 0x62, 0xcf, 0x15, 0x6d, 0x99, 0xf5, 0x4e, 0x71, 0xb8, 0xf4, - 0x8b, 0x5b, 0xbf, 0x0c, 0x59, 0x62, 0x79, 0x34, 0x97, 0x1a, - 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, - 0x46, 0x4e, 0xa1, 0xaf, 0x00, - - // type id - 0x00, 0x00, 0x00, 0x09, - - // there are two signers (thus two signatures) - 0x00, 0x00, 0x00, 0x02, - - // 65 bytes - 0x8c, 0xc7, 0xdc, 0x8c, 0x11, 0xd3, 0x75, 0x9e, 0x16, 0xa5, - 0x9f, 0xd2, 0x9c, 0x64, 0xd7, 0x1f, 0x9b, 0xad, 0x1a, 0x62, - 0x33, 0x98, 0xc7, 0xaf, 0x67, 0x02, 0xc5, 0xe0, 0x75, 0x8e, - 0x62, 0xcf, 0x15, 0x6d, 0x99, 0xf5, 0x4e, 0x71, 0xb8, 0xf4, - 0x8b, 0x5b, 0xbf, 0x0c, 0x59, 0x62, 0x79, 0x34, 0x97, 0x1a, - 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, - 0x46, 0x4e, 0xa1, 0xaf, 0x00, - - // 65 bytes - 0x8c, 0xc7, 0xdc, 0x8c, 0x11, 0xd3, 0x75, 0x9e, 0x16, 0xa5, - 0x9f, 0xd2, 0x9c, 0x64, 0xd7, 0x1f, 0x9b, 0xad, 0x1a, 0x62, - 0x33, 0x98, 0xc7, 0xaf, 0x67, 0x02, 0xc5, 0xe0, 0x75, 0x8e, - 0x62, 0xcf, 0x15, 0x6d, 0x99, 0xf5, 0x4e, 0x71, 0xb8, 0xf4, - 0x8b, 0x5b, 0xbf, 0x0c, 0x59, 0x62, 0x79, 0x34, 0x97, 0x1a, - 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, - 0x46, 0x4e, 0xa1, 0xaf, 0x00, - } - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") - result = tx.Bytes() - - // there are two credentials - expected[len(expected)-1] = 0x02 - expected = append(expected, credBytes...) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } -} - -// Test issuing an import transaction. -func TestIssueImportTx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) + secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: env.vm.ctx.XChainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: firstTx.ID(), + OutputIndex: 0, + }, + Asset: avax.Asset{ID: env.genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: startBalance - env.vm.TxFee, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + }, + }} + require.NoError(secondTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + parsedFirstTx, err := env.vm.ParseTx(context.Background(), firstTx.Bytes()) + require.NoError(err) - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + require.NoError(parsedFirstTx.Verify(context.Background())) + require.NoError(parsedFirstTx.Accept(context.Background())) - ctx := NewContext(t) - ctx.SharedMemory = m.NewSharedMemory(chainID) - peerSharedMemory := m.NewSharedMemory(constants.PlatformChainID) + parsedSecondTx, err := env.vm.ParseTx(context.Background(), secondTx.Bytes()) + require.NoError(err) - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) + require.NoError(parsedSecondTx.Verify(context.Background())) + require.NoError(parsedSecondTx.Accept(context.Background())) - avaxID := genesisTx.ID() - platformID := ids.Empty.Prefix(0) + _, err = env.vm.state.GetTx(firstTx.ID()) + require.NoError(err) - ctx.Lock.Lock() + _, err = env.vm.state.GetTx(secondTx.ID()) + require.NoError(err) +} - avmConfig := Config{ - IndexTransactions: true, - } +// Test issuing an import transaction. +func TestIssueImportTx(t *testing.T) { + require := require.New(t) - avmConfigBytes, err := stdjson.Marshal(avmConfig) - require.NoError(t, err) - vm := &VM{} - err = vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - avmConfigBytes, - issuer, - []*common.Fx{{ - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }}, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + }) + defer func() { + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() + }() - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } + peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") + avaxID := genesisTx.ID() key := keys[0] - utxoID := avax.UTXOID{ TxID: ids.ID{ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, @@ -1552,7 +546,7 @@ func TestIssueImportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ @@ -1576,16 +570,9 @@ func TestIssueImportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(tx.Bytes()); err == nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) // Provide the platform UTXO: - utxo := &avax.UTXO{ UTXOID: utxoID, Asset: txAssetID, @@ -1598,118 +585,53 @@ func TestIssueImportTx(t *testing.T) { }, } - utxoBytes, err := vm.parser.Codec().Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, utxo) + require.NoError(err) inputID := utxo.InputID() - - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - key.PublicKey().Address().Bytes(), + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + env.vm.ctx.ChainID: { + PutRequests: []*atomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + key.PublicKey().Address().Bytes(), + }, + }}, }, - }}}}); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() + })) - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + env.vm.ctx.Lock.Unlock() - parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal("Failed verify", err) - } + issueAndAccept(require, env.vm, env.issuer, tx) - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + env.vm.ctx.Lock.Lock() - assertIndexedTX(t, vm.db, 0, key.PublicKey().Address(), txAssetID.AssetID(), parsedTx.ID()) - assertLatestIdx(t, vm.db, key.PublicKey().Address(), avaxID, 1) + assertIndexedTX(t, env.vm.db, 0, key.PublicKey().Address(), txAssetID.AssetID(), tx.ID()) + assertLatestIdx(t, env.vm.db, key.PublicKey().Address(), avaxID, 1) id := utxoID.InputID() - if _, err := vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}); err == nil { - t.Fatalf("shouldn't have been able to read the utxo") - } + _, err = env.vm.ctx.SharedMemory.Get(constants.PlatformChainID, [][]byte{id[:]}) + require.ErrorIs(err, database.ErrNotFound) } // Test force accepting an import transaction. func TestForceAcceptImportTx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + require := require.New(t) - ctx := NewContext(t) - ctx.SharedMemory = m.NewSharedMemory(chainID) - - platformID := ids.Empty.Prefix(0) - - vm := &VM{} - ctx.Lock.Lock() + env := setup(t, &envConfig{ + vmStaticConfig: &config.Config{}, + notLinearized: true, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - err := vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{{ - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }}, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") + avaxID := genesisTx.ID() key := keys[0] - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - utxoID := avax.UTXOID{ TxID: ids.ID{ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, @@ -1719,101 +641,76 @@ func TestForceAcceptImportTx(t *testing.T) { }, } + txAssetID := avax.Asset{ID: avaxID} tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, + Outs: []*avax.TransferableOutput{{ + Asset: txAssetID, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }}, }}, SourceChain: constants.PlatformChainID, ImportedIns: []*avax.TransferableInput{{ UTXOID: utxoID, - Asset: avax.Asset{ID: genesisTx.ID()}, + Asset: txAssetID, In: &secp256k1fx.TransferInput{ - Amt: 1000, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + Amt: 1010, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, }, }}, }} + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - - parsedTx, err := vm.ParseTx(context.Background(), tx.Bytes()) - if err != nil { - t.Fatal(err) - } + parsedTx, err := env.vm.ParseTx(context.Background(), tx.Bytes()) + require.NoError(err) - if err := parsedTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have failed verification") - } + require.NoError(parsedTx.Verify(context.Background())) + require.NoError(parsedTx.Accept(context.Background())) - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + assertIndexedTX(t, env.vm.db, 0, key.PublicKey().Address(), txAssetID.AssetID(), tx.ID()) + assertLatestIdx(t, env.vm.db, key.PublicKey().Address(), avaxID, 1) id := utxoID.InputID() - if _, err := vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}); err == nil { - t.Fatalf("shouldn't have been able to read the utxo") - } + _, err = env.vm.ctx.SharedMemory.Get(constants.PlatformChainID, [][]byte{id[:]}) + require.ErrorIs(err, database.ErrNotFound) } func TestImportTxNotState(t *testing.T) { + require := require.New(t) + intf := interface{}(&txs.ImportTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(ok) } -// Test issuing an import transaction. +// Test issuing an export transaction. func TestIssueExportTx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + require := require.New(t) - ctx := NewContext(t) - ctx.SharedMemory = m.NewSharedMemory(chainID) - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) + env := setup(t, &envConfig{}) + defer func() { + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() + }() + genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") avaxID := genesisTx.ID() - ctx.Lock.Lock() - vm := &VM{} - if err := vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - nil, - issuer, []*common.Fx{{ - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }}, - nil, - ); err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - key := keys[0] - tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: avaxID, @@ -1830,7 +727,7 @@ func TestIssueExportTx(t *testing.T) { ExportedOuts: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: avaxID}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - env.vm.TxFee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -1838,44 +735,11 @@ func TestIssueExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - - parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } else if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - peerSharedMemory := m.NewSharedMemory(constants.PlatformChainID) + peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) utxoBytes, _, _, err := peerSharedMemory.Indexed( - vm.ctx.ChainID, + env.vm.ctx.ChainID, [][]byte{ key.PublicKey().Address().Bytes(), }, @@ -1883,73 +747,46 @@ func TestIssueExportTx(t *testing.T) { nil, math.MaxInt32, ) - if err != nil { - t.Fatal(err) - } - if len(utxoBytes) != 1 { - t.Fatalf("wrong number of utxos %d", len(utxoBytes)) - } -} + require.NoError(err) + require.Empty(utxoBytes) -func TestClearForceAcceptedExportTx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - - issuer := make(chan common.Message, 1) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - - ctx := NewContext(t) - ctx.SharedMemory = m.NewSharedMemory(chainID) + env.vm.ctx.Lock.Unlock() - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) + issueAndAccept(require, env.vm, env.issuer, tx) - avaxID := genesisTx.ID() - platformID := ids.Empty.Prefix(0) - - ctx.Lock.Lock() + env.vm.ctx.Lock.Lock() - avmConfig := Config{ - IndexTransactions: true, - } - avmConfigBytes, err := stdjson.Marshal(avmConfig) - require.NoError(t, err) - vm := &VM{} - err = vm.Initialize( - context.Background(), - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, + utxoBytes, _, _, err = peerSharedMemory.Indexed( + env.vm.ctx.ChainID, + [][]byte{ + key.PublicKey().Address().Bytes(), + }, nil, - avmConfigBytes, - issuer, - []*common.Fx{{ - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }}, nil, + math.MaxInt32, ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 + require.NoError(err) + require.Len(utxoBytes, 1) +} - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } +func TestClearForceAcceptedExportTx(t *testing.T) { + require := require.New(t) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + env := setup(t, &envConfig{}) + defer func() { + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() + }() - key := keys[0] + genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") + avaxID := genesisTx.ID() + key := keys[0] assetID := avax.Asset{ID: avaxID} tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, - BlockchainID: chainID, + BlockchainID: env.vm.ctx.XChainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ TxID: avaxID, @@ -1966,7 +803,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { ExportedOuts: []*avax.TransferableOutput{{ Asset: assetID, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, + Amt: startBalance - env.vm.TxFee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, @@ -1974,38 +811,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } - - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - - parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) utxo := avax.UTXOID{ TxID: tx.ID(), @@ -2013,19 +819,25 @@ func TestClearForceAcceptedExportTx(t *testing.T) { } utxoID := utxo.InputID() - peerSharedMemory := m.NewSharedMemory(platformID) - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {RemoveRequests: [][]byte{utxoID[:]}}}); err != nil { - t.Fatal(err) - } + peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + env.vm.ctx.ChainID: { + RemoveRequests: [][]byte{utxoID[:]}, + }, + })) - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + _, err := peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) + require.ErrorIs(err, database.ErrNotFound) - assertIndexedTX(t, vm.db, 0, key.PublicKey().Address(), assetID.AssetID(), parsedTx.ID()) - assertLatestIdx(t, vm.db, key.PublicKey().Address(), assetID.AssetID(), 1) + env.vm.ctx.Lock.Unlock() - if _, err := peerSharedMemory.Get(vm.ctx.ChainID, [][]byte{utxoID[:]}); err == nil { - t.Fatalf("should have failed to read the utxo") - } + issueAndAccept(require, env.vm, env.issuer, tx) + + env.vm.ctx.Lock.Lock() + + _, err = peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) + require.ErrorIs(err, database.ErrNotFound) + + assertIndexedTX(t, env.vm.db, 0, key.PublicKey().Address(), assetID.AssetID(), tx.ID()) + assertLatestIdx(t, env.vm.db, key.PublicKey().Address(), assetID.AssetID(), 1) } diff --git a/avalanchego/vms/avm/wallet_client.go b/avalanchego/vms/avm/wallet_client.go index c74918e6..69bdc06f 100644 --- a/avalanchego/vms/avm/wallet_client.go +++ b/avalanchego/vms/avm/wallet_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/avalanchego/vms/avm/wallet_service.go b/avalanchego/vms/avm/wallet_service.go index d470cae6..321bf9e5 100644 --- a/avalanchego/vms/avm/wallet_service.go +++ b/avalanchego/vms/avm/wallet_service.go @@ -1,14 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" + "errors" "fmt" "net/http" "go.uber.org/zap" - "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/api" @@ -18,34 +19,83 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var errMissingUTXO = errors.New("missing utxo") + type WalletService struct { vm *VM pendingTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] } func (w *WalletService) decided(txID ids.ID) { - w.pendingTxs.Delete(txID) -} + if !w.pendingTxs.Delete(txID) { + return + } -func (w *WalletService) issue(txBytes []byte) (ids.ID, error) { - tx, err := w.vm.parser.ParseTx(txBytes) - if err != nil { - return ids.ID{}, err + w.vm.ctx.Log.Info("tx decided over wallet API", + zap.Stringer("txID", txID), + ) + for { + txID, tx, ok := w.pendingTxs.Oldest() + if !ok { + return + } + + err := w.vm.network.IssueVerifiedTx(context.TODO(), tx) + if err == nil { + w.vm.ctx.Log.Info("issued tx to mempool over wallet API", + zap.Stringer("txID", txID), + ) + return + } + if errors.Is(err, mempool.ErrDuplicateTx) { + return + } + + w.pendingTxs.Delete(txID) + w.vm.ctx.Log.Warn("dropping tx issued over wallet API", + zap.Stringer("txID", txID), + zap.Error(err), + ) } +} - txID, err := w.vm.IssueTx(txBytes) - if err != nil { - return ids.ID{}, err +func (w *WalletService) issue(tx *txs.Tx) (ids.ID, error) { + txID := tx.ID() + w.vm.ctx.Log.Info("issuing tx over wallet API", + zap.Stringer("txID", txID), + ) + + if _, ok := w.pendingTxs.Get(txID); ok { + w.vm.ctx.Log.Warn("issuing duplicate tx over wallet API", + zap.Stringer("txID", txID), + ) + return txID, nil } - if _, ok := w.pendingTxs.Get(txID); !ok { - w.pendingTxs.Put(txID, tx) + if w.pendingTxs.Len() == 0 { + if err := w.vm.network.IssueVerifiedTx(context.TODO(), tx); err == nil { + w.vm.ctx.Log.Info("issued tx to mempool over wallet API", + zap.Stringer("txID", txID), + ) + } else if !errors.Is(err, mempool.ErrDuplicateTx) { + w.vm.ctx.Log.Warn("failed to issue tx over wallet API", + zap.Stringer("txID", txID), + zap.Error(err), + ) + return ids.Empty, err + } + } else { + w.vm.ctx.Log.Info("enqueueing tx over wallet API", + zap.Stringer("txID", txID), + ) } + w.pendingTxs.Put(txID, tx) return txID, nil } @@ -90,7 +140,16 @@ func (w *WalletService) IssueTx(_ *http.Request, args *api.FormattedTx, reply *a if err != nil { return fmt.Errorf("problem decoding transaction: %w", err) } - txID, err := w.issue(txBytes) + + tx, err := w.vm.parser.ParseTx(txBytes) + if err != nil { + return err + } + + w.vm.ctx.Lock.Lock() + defer w.vm.ctx.Lock.Unlock() + + txID, err := w.issue(tx) reply.TxID = txID return err } @@ -128,6 +187,9 @@ func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, re return fmt.Errorf("couldn't parse 'From' addresses: %w", err) } + w.vm.ctx.Lock.Lock() + defer w.vm.ctx.Lock.Unlock() + // Load user's UTXOs/keys utxos, kc, err := w.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { @@ -233,7 +295,7 @@ func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, re codec := w.vm.parser.Codec() avax.SortTransferableOutputs(outs, codec) - tx := txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: w.vm.ctx.NetworkID, BlockchainID: w.vm.ctx.ChainID, Outs: outs, @@ -244,7 +306,7 @@ func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, re return err } - txID, err := w.issue(tx.Bytes()) + txID, err := w.issue(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } diff --git a/avalanchego/vms/avm/wallet_service_test.go b/avalanchego/vms/avm/wallet_service_test.go index 23713ff9..7ffdccda 100644 --- a/avalanchego/vms/avm/wallet_service_test.go +++ b/avalanchego/vms/avm/wallet_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -7,86 +7,39 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/keystore" ) -// Returns: -// 1) genesis bytes of vm -// 2) the VM -// 3) The wallet service that wraps the VM -// 4) atomic memory to use in tests -func setupWS(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *WalletService, *atomic.Memory, *txs.Tx) { - var genesisBytes []byte - var vm *VM - var m *atomic.Memory - var genesisTx *txs.Tx - if isAVAXAsset { - genesisBytes, _, vm, m = GenesisVM(t) - genesisTx = GetAVAXTxFromGenesisTest(genesisBytes, t) - } else { - genesisBytes, _, vm, m = setupTxFeeAssets(t) - genesisTx = GetCreateTxFromGenesisTest(t, genesisBytes, feeAssetName) - } - - ws := &WalletService{ - vm: vm, - pendingTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - } - return genesisBytes, vm, ws, m, genesisTx -} - -// Returns: -// 1) genesis bytes of vm -// 2) the VM -// 3) The wallet service that wraps the VM -// 4) atomic memory to use in tests -func setupWSWithKeys(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *WalletService, *atomic.Memory, *txs.Tx) { - genesisBytes, vm, ws, m, tx := setupWS(t, isAVAXAsset) - - // Import the initially funded private keys - user, err := keystore.NewUserFromKeystore(ws.vm.ctx.Keystore, username, password) - if err != nil { - t.Fatal(err) - } - - if err := user.PutKeys(keys...); err != nil { - t.Fatalf("Failed to set key for user: %s", err) - } - - if err := user.Close(); err != nil { - t.Fatal(err) - } - return genesisBytes, vm, ws, m, tx -} - func TestWalletService_SendMultiple(t *testing.T) { + require := require.New(t) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, vm, ws, _, genesisTx := setupWSWithKeys(t, tc.avaxAsset) + env := setup(t, &envConfig{ + isCustomFeeAsset: !tc.avaxAsset, + keystoreUsers: []*user{{ + username: username, + password: password, + initialKeys: keys, + }}, + }) + env.vm.ctx.Lock.Unlock() + defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + require.NoError(env.vm.Shutdown(context.Background())) + env.vm.ctx.Lock.Unlock() }() - assetID := genesisTx.ID() + assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() - addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } - changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } - _, fromAddrsStr := sampleAddrs(t, vm, addrs) + addrStr, err := env.vm.FormatLocalAddress(addr) + require.NoError(err) + changeAddrStr, err := env.vm.FormatLocalAddress(testChangeAddr) + require.NoError(err) + _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) args := &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -111,25 +64,15 @@ func TestWalletService_SendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - vm.timer.Cancel() - if err := ws.SendMultiple(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } + require.NoError(env.walletService.SendMultiple(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) - pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } + buildAndAccept(require, env.vm, env.issuer, reply.TxID) - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") - } + env.vm.ctx.Lock.Lock() - if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { - t.Fatalf("Failed to retrieve created transaction: %s", err) - } + _, err = env.vm.state.GetTx(reply.TxID) + require.NoError(err) }) } } diff --git a/avalanchego/vms/components/avax/addresses.go b/avalanchego/vms/components/avax/addresses.go index 400000f2..a1567f75 100644 --- a/avalanchego/vms/components/avax/addresses.go +++ b/avalanchego/vms/components/avax/addresses.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( + "errors" "fmt" "github.com/ava-labs/avalanchego/ids" @@ -13,7 +14,11 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -var _ AddressManager = (*addressManager)(nil) +var ( + _ AddressManager = (*addressManager)(nil) + + ErrMismatchedChainIDs = errors.New("mismatched chainIDs") +) type AddressManager interface { // ParseLocalAddress takes in an address for this chain and produces the ID @@ -49,7 +54,8 @@ func (a *addressManager) ParseLocalAddress(addrStr string) (ids.ShortID, error) } if chainID != a.ctx.ChainID { return ids.ShortID{}, fmt.Errorf( - "expected chainID to be %q but was %q", + "%w: expected %q but got %q", + ErrMismatchedChainIDs, a.ctx.ChainID, chainID, ) diff --git a/avalanchego/vms/components/avax/asset.go b/avalanchego/vms/components/avax/asset.go index 90a3eef6..bc165b4d 100644 --- a/avalanchego/vms/components/avax/asset.go +++ b/avalanchego/vms/components/avax/asset.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/asset_test.go b/avalanchego/vms/components/avax/asset_test.go index b1744f62..ad7628ce 100644 --- a/avalanchego/vms/components/avax/asset_test.go +++ b/avalanchego/vms/components/avax/asset_test.go @@ -1,10 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -13,24 +16,22 @@ import ( func TestAssetVerifyNil(t *testing.T) { id := (*Asset)(nil) - if err := id.Verify(); err == nil { - t.Fatalf("Should have errored due to nil AssetID") - } + err := id.Verify() + require.ErrorIs(t, err, errNilAssetID) } func TestAssetVerifyEmpty(t *testing.T) { id := Asset{} - if err := id.Verify(); err == nil { - t.Fatalf("Should have errored due to empty AssetID") - } + err := id.Verify() + require.ErrorIs(t, err, errEmptyAssetID) } func TestAssetID(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) id := Asset{ ID: ids.ID{ @@ -41,25 +42,16 @@ func TestAssetID(t *testing.T) { }, } - if err := id.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(id.Verify()) bytes, err := manager.Marshal(codecVersion, &id) - if err != nil { - t.Fatal(err) - } + require.NoError(err) newID := Asset{} - if _, err := manager.Unmarshal(bytes, &newID); err != nil { - t.Fatal(err) - } + _, err = manager.Unmarshal(bytes, &newID) + require.NoError(err) - if err := newID.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(newID.Verify()) - if id.AssetID() != newID.AssetID() { - t.Fatalf("Parsing returned the wrong Asset ID") - } + require.Equal(id.AssetID(), newID.AssetID()) } diff --git a/avalanchego/vms/components/avax/atomic_utxos.go b/avalanchego/vms/components/avax/atomic_utxos.go index 20b22420..3ac9c166 100644 --- a/avalanchego/vms/components/avax/atomic_utxos.go +++ b/avalanchego/vms/components/avax/atomic_utxos.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/base_tx.go b/avalanchego/vms/components/avax/base_tx.go index d176d7f0..5afed5f3 100644 --- a/avalanchego/vms/components/avax/base_tx.go +++ b/avalanchego/vms/components/avax/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/types" ) @@ -41,20 +40,6 @@ func (t *BaseTx) InputUTXOs() []*UTXOID { return utxos } -// ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *BaseTx) ConsumedAssetIDs() set.Set[ids.ID] { - assets := set.Set[ids.ID]{} - for _, in := range t.Ins { - assets.Add(in.AssetID()) - } - return assets -} - -// AssetIDs returns the IDs of the assets this transaction depends on -func (t *BaseTx) AssetIDs() set.Set[ids.ID] { - return t.ConsumedAssetIDs() -} - // NumCredentials returns the number of expected credentials func (t *BaseTx) NumCredentials() int { return len(t.Ins) @@ -80,3 +65,21 @@ func (t *BaseTx) Verify(ctx *snow.Context) error { return nil } } + +func VerifyMemoFieldLength(memo types.JSONByteSlice, isDurangoActive bool) error { + if !isDurangoActive { + // SyntacticVerify validates this field pre-Durango + return nil + } + + if len(memo) != 0 { + return fmt.Errorf( + "%w: %d > %d", + ErrMemoTooLarge, + len(memo), + 0, + ) + } + + return nil +} diff --git a/avalanchego/vms/components/avax/flow_checker.go b/avalanchego/vms/components/avax/flow_checker.go index b0ed8c86..e02aee71 100644 --- a/avalanchego/vms/components/avax/flow_checker.go +++ b/avalanchego/vms/components/avax/flow_checker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/metadata.go b/avalanchego/vms/components/avax/metadata.go index f03389c4..16304841 100644 --- a/avalanchego/vms/components/avax/metadata.go +++ b/avalanchego/vms/components/avax/metadata.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/metadata_test.go b/avalanchego/vms/components/avax/metadata_test.go index 4d14cb6e..9569e3e3 100644 --- a/avalanchego/vms/components/avax/metadata_test.go +++ b/avalanchego/vms/components/avax/metadata_test.go @@ -1,22 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + + "github.com/stretchr/testify/require" ) func TestMetaDataVerifyNil(t *testing.T) { md := (*Metadata)(nil) - if err := md.Verify(); err == nil { - t.Fatalf("Should have errored due to nil metadata") - } + err := md.Verify() + require.ErrorIs(t, err, errNilMetadata) } func TestMetaDataVerifyUninitialized(t *testing.T) { md := &Metadata{} - if err := md.Verify(); err == nil { - t.Fatalf("Should have errored due to uninitialized metadata") - } + err := md.Verify() + require.ErrorIs(t, err, errMetadataNotInitialize) } diff --git a/avalanchego/vms/components/avax/mock_transferable_in.go b/avalanchego/vms/components/avax/mock_transferable_in.go index 5d1e6fc7..b4db8993 100644 --- a/avalanchego/vms/components/avax/mock_transferable_in.go +++ b/avalanchego/vms/components/avax/mock_transferable_in.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableIn) +// +// Generated by this command: +// +// mockgen -package=avax -destination=vms/components/avax/mock_transferable_in.go github.com/ava-labs/avalanchego/vms/components/avax TransferableIn +// // Package avax is a generated GoMock package. package avax @@ -11,7 +13,7 @@ import ( reflect "reflect" snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockTransferableIn is a mock of TransferableIn interface. @@ -73,7 +75,7 @@ func (m *MockTransferableIn) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockTransferableInMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockTransferableInMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockTransferableIn)(nil).InitCtx), arg0) } diff --git a/avalanchego/vms/components/avax/mock_transferable_out.go b/avalanchego/vms/components/avax/mock_transferable_out.go index bb3ad3ae..b518b863 100644 --- a/avalanchego/vms/components/avax/mock_transferable_out.go +++ b/avalanchego/vms/components/avax/mock_transferable_out.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableOut) +// +// Generated by this command: +// +// mockgen -package=avax -destination=vms/components/avax/mock_transferable_out.go github.com/ava-labs/avalanchego/vms/components/avax TransferableOut +// // Package avax is a generated GoMock package. package avax @@ -11,11 +13,14 @@ import ( reflect "reflect" snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" + verify "github.com/ava-labs/avalanchego/vms/components/verify" + gomock "go.uber.org/mock/gomock" ) // MockTransferableOut is a mock of TransferableOut interface. type MockTransferableOut struct { + verify.IsState + ctrl *gomock.Controller recorder *MockTransferableOutMockRecorder } @@ -58,7 +63,7 @@ func (m *MockTransferableOut) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockTransferableOutMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockTransferableOutMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockTransferableOut)(nil).InitCtx), arg0) } @@ -77,16 +82,14 @@ func (mr *MockTransferableOutMockRecorder) Verify() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockTransferableOut)(nil).Verify)) } -// VerifyState mocks base method. -func (m *MockTransferableOut) VerifyState() error { +// isState mocks base method. +func (m *MockTransferableOut) isState() { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyState") - ret0, _ := ret[0].(error) - return ret0 + m.ctrl.Call(m, "isState") } -// VerifyState indicates an expected call of VerifyState. -func (mr *MockTransferableOutMockRecorder) VerifyState() *gomock.Call { +// isState indicates an expected call of isState. +func (mr *MockTransferableOutMockRecorder) isState() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyState", reflect.TypeOf((*MockTransferableOut)(nil).VerifyState)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isState", reflect.TypeOf((*MockTransferableOut)(nil).isState)) } diff --git a/avalanchego/vms/components/avax/state.go b/avalanchego/vms/components/avax/state.go index ab0d42ab..1a7616b2 100644 --- a/avalanchego/vms/components/avax/state.go +++ b/avalanchego/vms/components/avax/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/test_verifiable.go b/avalanchego/vms/components/avax/test_verifiable.go index 0e9eb877..8649b01e 100644 --- a/avalanchego/vms/components/avax/test_verifiable.go +++ b/avalanchego/vms/components/avax/test_verifiable.go @@ -1,24 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax -import "github.com/ava-labs/avalanchego/snow" +import ( + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/components/verify" +) -type TestVerifiable struct{ Err error } +var ( + _ verify.State = (*TestState)(nil) + _ TransferableOut = (*TestTransferable)(nil) + _ Addressable = (*TestAddressable)(nil) +) -func (*TestVerifiable) InitCtx(*snow.Context) {} +type TestState struct { + verify.IsState `json:"-"` -func (v *TestVerifiable) Verify() error { - return v.Err + Err error } -func (v *TestVerifiable) VerifyState() error { +func (*TestState) InitCtx(*snow.Context) {} + +func (v *TestState) Verify() error { return v.Err } type TestTransferable struct { - TestVerifiable + TestState Val uint64 `serialize:"true"` } diff --git a/avalanchego/vms/components/avax/transferables.go b/avalanchego/vms/components/avax/transferables.go index fee08be1..18e3cf77 100644 --- a/avalanchego/vms/components/avax/transferables.go +++ b/avalanchego/vms/components/avax/transferables.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -65,7 +65,7 @@ type TransferableOutput struct { Asset `serialize:"true"` // FxID has serialize false because we don't want this to be encoded in bytes FxID ids.ID `serialize:"false" json:"fxID"` - Out TransferableOut `serialize:"true" json:"output"` + Out TransferableOut `serialize:"true" json:"output"` } func (out *TransferableOutput) InitCtx(ctx *snow.Context) { @@ -142,7 +142,7 @@ type TransferableInput struct { Asset `serialize:"true"` // FxID has serialize false because we don't want this to be encoded in bytes FxID ids.ID `serialize:"false" json:"fxID"` - In TransferableIn `serialize:"true" json:"input"` + In TransferableIn `serialize:"true" json:"input"` } // Input returns the feature extension input that this Input is using. @@ -161,8 +161,8 @@ func (in *TransferableInput) Verify() error { } } -func (in *TransferableInput) Less(other *TransferableInput) bool { - return in.UTXOID.Less(&other.UTXOID) +func (in *TransferableInput) Compare(other *TransferableInput) int { + return in.UTXOID.Compare(&other.UTXOID) } type innerSortTransferableInputsWithSigners struct { @@ -233,7 +233,7 @@ func VerifyTx( } fc.Consume(in.AssetID(), in.Input().Amount()) } - if !utils.IsSortedAndUniqueSortable(ins) { + if !utils.IsSortedAndUnique(ins) { return ErrInputsNotSortedUnique } } diff --git a/avalanchego/vms/components/avax/transferables_test.go b/avalanchego/vms/components/avax/transferables_test.go index 589fb0e0..755a0124 100644 --- a/avalanchego/vms/components/avax/transferables_test.go +++ b/avalanchego/vms/components/avax/transferables_test.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( - "bytes" "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -16,41 +18,35 @@ import ( func TestTransferableOutputVerifyNil(t *testing.T) { to := (*TransferableOutput)(nil) - if err := to.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable output") - } + err := to.Verify() + require.ErrorIs(t, err, ErrNilTransferableOutput) } func TestTransferableOutputVerifyNilFx(t *testing.T) { to := &TransferableOutput{Asset: Asset{ID: ids.Empty}} - if err := to.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable fx output") - } + err := to.Verify() + require.ErrorIs(t, err, ErrNilTransferableFxOutput) } func TestTransferableOutputVerify(t *testing.T) { + require := require.New(t) + assetID := ids.GenerateTestID() to := &TransferableOutput{ Asset: Asset{ID: assetID}, Out: &TestTransferable{Val: 1}, } - if err := to.Verify(); err != nil { - t.Fatal(err) - } - if to.Output() != to.Out { - t.Fatalf("Should have returned the fx output") - } + require.NoError(to.Verify()) + require.Equal(to.Out, to.Output()) } func TestTransferableOutputSorting(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&TestTransferable{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&TestTransferable{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) assetID1 := ids.ID{1} outs := []*TransferableOutput{ @@ -76,39 +72,23 @@ func TestTransferableOutputSorting(t *testing.T) { }, } - if IsSortedTransferableOutputs(outs, manager) { - t.Fatalf("Shouldn't be sorted") - } + require.False(IsSortedTransferableOutputs(outs, manager)) SortTransferableOutputs(outs, manager) - if !IsSortedTransferableOutputs(outs, manager) { - t.Fatalf("Should be sorted") - } - if result := outs[0].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[1].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[2].Out.(*TestTransferable).Val; result != 1 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[3].AssetID(); result != assetID1 { - t.Fatalf("Val expected: %s ; result: %s", assetID1, result) - } - if result := outs[4].AssetID(); result != assetID1 { - t.Fatalf("Val expected: %s ; result: %s", assetID1, result) - } + require.True(IsSortedTransferableOutputs(outs, manager)) + require.Zero(outs[0].Out.(*TestTransferable).Val) + require.Zero(outs[1].Out.(*TestTransferable).Val) + require.Equal(uint64(1), outs[2].Out.(*TestTransferable).Val) + require.Equal(assetID1, outs[3].AssetID()) + require.Equal(assetID1, outs[4].AssetID()) } func TestTransferableOutputSerialization(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferOutput{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -161,22 +141,14 @@ func TestTransferableOutputSerialization(t *testing.T) { } outBytes, err := manager.Marshal(codecVersion, out) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(outBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - outBytes, - ) - } + require.NoError(err) + require.Equal(expected, outBytes) } func TestTransferableInputVerifyNil(t *testing.T) { ti := (*TransferableInput)(nil) - if err := ti.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable input") - } + err := ti.Verify() + require.ErrorIs(t, err, ErrNilTransferableInput) } func TestTransferableInputVerifyNilFx(t *testing.T) { @@ -184,31 +156,28 @@ func TestTransferableInputVerifyNilFx(t *testing.T) { UTXOID: UTXOID{TxID: ids.Empty}, Asset: Asset{ID: ids.Empty}, } - if err := ti.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable fx input") - } + err := ti.Verify() + require.ErrorIs(t, err, ErrNilTransferableFxInput) } func TestTransferableInputVerify(t *testing.T) { + require := require.New(t) + assetID := ids.GenerateTestID() ti := &TransferableInput{ UTXOID: UTXOID{TxID: assetID}, Asset: Asset{ID: assetID}, In: &TestTransferable{}, } - if err := ti.Verify(); err != nil { - t.Fatal(err) - } - if ti.Input() != ti.In { - t.Fatalf("Should have returned the fx input") - } + require.NoError(ti.Verify()) + require.Equal(ti.In, ti.Input()) } func TestTransferableInputSorting(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&TestTransferable{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&TestTransferable{})) ins := []*TransferableInput{ { @@ -245,13 +214,9 @@ func TestTransferableInputSorting(t *testing.T) { }, } - if utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Shouldn't be sorted") - } + require.False(utils.IsSortedAndUnique(ins)) utils.Sort(ins) - if !utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Should be sorted") - } + require.True(utils.IsSortedAndUnique(ins)) ins = append(ins, &TransferableInput{ UTXOID: UTXOID{ @@ -262,20 +227,16 @@ func TestTransferableInputSorting(t *testing.T) { In: &TestTransferable{}, }) - if utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Shouldn't be unique") - } + require.False(utils.IsSortedAndUnique(ins)) } func TestTransferableInputSerialization(t *testing.T) { - c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferInput{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -325,13 +286,6 @@ func TestTransferableInputSerialization(t *testing.T) { } inBytes, err := manager.Marshal(codecVersion, in) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(inBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - inBytes, - ) - } + require.NoError(err) + require.Equal(expected, inBytes) } diff --git a/avalanchego/vms/components/avax/utxo.go b/avalanchego/vms/components/avax/utxo.go index afea6891..a11c94af 100644 --- a/avalanchego/vms/components/avax/utxo.go +++ b/avalanchego/vms/components/avax/utxo.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/utxo_fetching.go b/avalanchego/vms/components/avax/utxo_fetching.go index 18525131..c5170f1d 100644 --- a/avalanchego/vms/components/avax/utxo_fetching.go +++ b/avalanchego/vms/components/avax/utxo_fetching.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/utxo_fetching_test.go b/avalanchego/vms/components/avax/utxo_fetching_test.go index 2af04f53..e36545c1 100644 --- a/avalanchego/vms/components/avax/utxo_fetching_test.go +++ b/avalanchego/vms/components/avax/utxo_fetching_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -13,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -23,8 +23,7 @@ func TestFetchUTXOs(t *testing.T) { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() - addrs := set.Set[ids.ShortID]{} - addrs.Add(addr) + addrs := set.Of(addr) utxo := &UTXO{ UTXOID: UTXOID{ TxID: txID, @@ -41,22 +40,18 @@ func TestFetchUTXOs(t *testing.T) { }, } - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.TransferOutput{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() - s := NewUTXOState(db, manager) - - err := s.PutUTXO(utxo) + s, err := NewUTXOState(db, manager, trackChecksum) require.NoError(err) + require.NoError(s.PutUTXO(utxo)) + utxos, err := GetAllUTXOs(s, addrs) require.NoError(err) require.Len(utxos, 1) @@ -64,7 +59,7 @@ func TestFetchUTXOs(t *testing.T) { balance, err := GetBalance(s, addrs) require.NoError(err) - require.EqualValues(12345, balance) + require.Equal(uint64(12345), balance) } // TestGetPaginatedUTXOs tests @@ -76,21 +71,17 @@ func TestGetPaginatedUTXOs(t *testing.T) { addr0 := ids.GenerateTestShortID() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() - addrs := set.Set[ids.ShortID]{} - addrs.Add(addr0, addr1) + addrs := set.Of(addr0, addr1) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.TransferOutput{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() - s := NewUTXOState(db, manager) + s, err := NewUTXOState(db, manager, trackChecksum) + require.NoError(err) // Create 1000 UTXOs each on addr0, addr1, and addr2. for i := 0; i < 1000; i++ { @@ -111,8 +102,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err := s.PutUTXO(utxo0) - require.NoError(err) + require.NoError(s.PutUTXO(utxo0)) utxo1 := &UTXO{ UTXOID: UTXOID{ @@ -129,8 +119,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err = s.PutUTXO(utxo1) - require.NoError(err) + require.NoError(s.PutUTXO(utxo1)) utxo2 := &UTXO{ UTXOID: UTXOID{ @@ -147,39 +136,26 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err = s.PutUTXO(utxo2) - require.NoError(err) + require.NoError(s.PutUTXO(utxo2)) } var ( fetchedUTXOs []*UTXO - err error + lastAddr = ids.ShortEmpty + lastIdx = ids.Empty + totalUTXOs []*UTXO ) - - lastAddr := ids.ShortEmpty - lastIdx := ids.Empty - - var totalUTXOs []*UTXO for i := 0; i <= 10; i++ { fetchedUTXOs, lastAddr, lastIdx, err = GetPaginatedUTXOs(s, addrs, lastAddr, lastIdx, 512) - if err != nil { - t.Fatal(err) - } + require.NoError(err) totalUTXOs = append(totalUTXOs, fetchedUTXOs...) } - if len(totalUTXOs) != 2000 { - t.Fatalf("Wrong number of utxos. Should have paginated through all. Expected (%d) returned (%d)", 2000, len(totalUTXOs)) - } + require.Len(totalUTXOs, 2000) // Fetch all UTXOs notPaginatedUTXOs, err := GetAllUTXOs(s, addrs) - if err != nil { - t.Fatal(err) - } - - if len(notPaginatedUTXOs) != len(totalUTXOs) { - t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", len(totalUTXOs), len(notPaginatedUTXOs)) - } + require.NoError(err) + require.Len(notPaginatedUTXOs, len(totalUTXOs)) } diff --git a/avalanchego/vms/components/avax/utxo_handler.go b/avalanchego/vms/components/avax/utxo_handler.go index 782d8592..c6e705af 100644 --- a/avalanchego/vms/components/avax/utxo_handler.go +++ b/avalanchego/vms/components/avax/utxo_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/utxo_id.go b/avalanchego/vms/components/avax/utxo_id.go index 57a4c3c9..5e8cf871 100644 --- a/avalanchego/vms/components/avax/utxo_id.go +++ b/avalanchego/vms/components/avax/utxo_id.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "bytes" + "cmp" "errors" "fmt" "strconv" @@ -68,12 +69,12 @@ func UTXOIDFromString(s string) (*UTXOID, error) { txID, err := ids.FromString(ss[0]) if err != nil { - return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDTxID, err) + return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDTxID, err) } idx, err := strconv.ParseUint(ss[1], 10, 32) if err != nil { - return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDIndex, err) + return nil, fmt.Errorf("%w: %w", errFailedDecodingUTXOIDIndex, err) } return &UTXOID{ @@ -91,16 +92,11 @@ func (utxo *UTXOID) Verify() error { } } -func (utxo *UTXOID) Less(other *UTXOID) bool { +func (utxo *UTXOID) Compare(other *UTXOID) int { utxoID, utxoIndex := utxo.InputSource() otherID, otherIndex := other.InputSource() - - switch bytes.Compare(utxoID[:], otherID[:]) { - case -1: - return true - case 0: - return utxoIndex < otherIndex - default: - return false + if txIDComp := bytes.Compare(utxoID[:], otherID[:]); txIDComp != 0 { + return txIDComp } + return cmp.Compare(utxoIndex, otherIndex) } diff --git a/avalanchego/vms/components/avax/utxo_id_test.go b/avalanchego/vms/components/avax/utxo_id_test.go index a35ac023..fed21d5c 100644 --- a/avalanchego/vms/components/avax/utxo_id_test.go +++ b/avalanchego/vms/components/avax/utxo_id_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -6,6 +6,7 @@ package avax import ( "math" "testing" + "time" "github.com/stretchr/testify/require" @@ -16,18 +17,16 @@ import ( func TestUTXOIDVerifyNil(t *testing.T) { utxoID := (*UTXOID)(nil) - - if err := utxoID.Verify(); err == nil { - t.Fatalf("Should have errored due to a nil utxo ID") - } + err := utxoID.Verify() + require.ErrorIs(t, err, errNilUTXOID) } func TestUTXOID(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) utxoID := UTXOID{ TxID: ids.ID{ @@ -39,80 +38,56 @@ func TestUTXOID(t *testing.T) { OutputIndex: 0x20212223, } - if err := utxoID.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(utxoID.Verify()) bytes, err := manager.Marshal(codecVersion, &utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) newUTXOID := UTXOID{} - if _, err := manager.Unmarshal(bytes, &newUTXOID); err != nil { - t.Fatal(err) - } - - if err := newUTXOID.Verify(); err != nil { - t.Fatal(err) - } + _, err = manager.Unmarshal(bytes, &newUTXOID) + require.NoError(err) - if utxoID.InputID() != newUTXOID.InputID() { - t.Fatalf("Parsing returned the wrong UTXO ID") - } + require.NoError(newUTXOID.Verify()) + require.Equal(utxoID.InputID(), newUTXOID.InputID()) } -func TestUTXOIDLess(t *testing.T) { +func TestUTXOIDCompare(t *testing.T) { type test struct { name string id1 UTXOID id2 UTXOID - expected bool + expected int } - tests := []test{ + tests := []*test{ { name: "same", id1: UTXOID{}, id2: UTXOID{}, - expected: false, + expected: 0, }, { - name: "first id smaller", + name: "id smaller", id1: UTXOID{}, id2: UTXOID{ TxID: ids.ID{1}, }, - expected: true, - }, - { - name: "first id larger", - id1: UTXOID{ - TxID: ids.ID{1}, - }, - id2: UTXOID{}, - expected: false, + expected: -1, }, { - name: "first index smaller", + name: "index smaller", id1: UTXOID{}, id2: UTXOID{ OutputIndex: 1, }, - expected: true, - }, - { - name: "first index larger", - id1: UTXOID{ - OutputIndex: 1, - }, - id2: UTXOID{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - require.Equal(tt.expected, tt.id1.Less(&tt.id2)) + + require.Equal(tt.expected, tt.id1.Compare(&tt.id2)) + require.Equal(-tt.expected, tt.id2.Compare(&tt.id1)) }) } } @@ -204,12 +179,12 @@ func TestUTXOIDFromString(t *testing.T) { retrievedUTXOID, err := UTXOIDFromString(test.expectedStr) require.ErrorIs(err, test.parseErr) - - if err == nil { - require.Equal(test.utxoID.InputID(), retrievedUTXOID.InputID()) - require.Equal(test.utxoID, retrievedUTXOID) - require.Equal(test.utxoID.String(), retrievedUTXOID.String()) + if test.parseErr != nil { + return } + require.Equal(test.utxoID.InputID(), retrievedUTXOID.InputID()) + require.Equal(test.utxoID, retrievedUTXOID) + require.Equal(test.utxoID.String(), retrievedUTXOID.String()) }) } } diff --git a/avalanchego/vms/components/avax/utxo_state.go b/avalanchego/vms/components/avax/utxo_state.go index beb1846e..9bc648a6 100644 --- a/avalanchego/vms/components/avax/utxo_state.go +++ b/avalanchego/vms/components/avax/utxo_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -30,6 +30,9 @@ var ( type UTXOState interface { UTXOReader UTXOWriter + + // Checksum returns the current UTXOChecksum. + Checksum() ids.ID } // UTXOReader is a thin wrapper around a database to provide fetching of UTXOs. @@ -76,10 +79,17 @@ type utxoState struct { indexDB database.Database indexCache cache.Cacher[string, linkeddb.LinkedDB] + + trackChecksum bool + checksum ids.ID } -func NewUTXOState(db database.Database, codec codec.Manager) UTXOState { - return &utxoState{ +func NewUTXOState( + db database.Database, + codec codec.Manager, + trackChecksum bool, +) (UTXOState, error) { + s := &utxoState{ codec: codec, utxoCache: &cache.LRU[ids.ID, *UTXO]{Size: utxoCacheSize}, @@ -87,10 +97,18 @@ func NewUTXOState(db database.Database, codec codec.Manager) UTXOState { indexDB: prefixdb.New(indexPrefix, db), indexCache: &cache.LRU[string, linkeddb.LinkedDB]{Size: indexCacheSize}, + + trackChecksum: trackChecksum, } + return s, s.initChecksum() } -func NewMeteredUTXOState(db database.Database, codec codec.Manager, metrics prometheus.Registerer) (UTXOState, error) { +func NewMeteredUTXOState( + db database.Database, + codec codec.Manager, + metrics prometheus.Registerer, + trackChecksum bool, +) (UTXOState, error) { utxoCache, err := metercacher.New[ids.ID, *UTXO]( "utxo_cache", metrics, @@ -107,7 +125,11 @@ func NewMeteredUTXOState(db database.Database, codec codec.Manager, metrics prom Size: indexCacheSize, }, ) - return &utxoState{ + if err != nil { + return nil, err + } + + s := &utxoState{ codec: codec, utxoCache: utxoCache, @@ -115,7 +137,10 @@ func NewMeteredUTXOState(db database.Database, codec codec.Manager, metrics prom indexDB: prefixdb.New(indexPrefix, db), indexCache: indexCache, - }, err + + trackChecksum: trackChecksum, + } + return s, s.initChecksum() } func (s *utxoState) GetUTXO(utxoID ids.ID) (*UTXO, error) { @@ -152,6 +177,8 @@ func (s *utxoState) PutUTXO(utxo *UTXO) error { } utxoID := utxo.InputID() + s.updateChecksum(utxoID) + s.utxoCache.Put(utxoID, utxo) if err := s.utxoDB.Put(utxoID[:], utxoBytes); err != nil { return err @@ -181,6 +208,8 @@ func (s *utxoState) DeleteUTXO(utxoID ids.ID) error { return err } + s.updateChecksum(utxoID) + s.utxoCache.Put(utxoID, nil) if err := s.utxoDB.Delete(utxoID[:]); err != nil { return err @@ -222,6 +251,10 @@ func (s *utxoState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, err return utxoIDs, iter.Error() } +func (s *utxoState) Checksum() ids.ID { + return s.checksum +} + func (s *utxoState) getIndexDB(addr []byte) linkeddb.LinkedDB { addrStr := string(addr) if indexList, exists := s.indexCache.Get(addrStr); exists { @@ -233,3 +266,29 @@ func (s *utxoState) getIndexDB(addr []byte) linkeddb.LinkedDB { s.indexCache.Put(addrStr, indexList) return indexList } + +func (s *utxoState) initChecksum() error { + if !s.trackChecksum { + return nil + } + + it := s.utxoDB.NewIterator() + defer it.Release() + + for it.Next() { + utxoID, err := ids.ToID(it.Key()) + if err != nil { + return err + } + s.updateChecksum(utxoID) + } + return it.Error() +} + +func (s *utxoState) updateChecksum(modifiedID ids.ID) { + if !s.trackChecksum { + return + } + + s.checksum = s.checksum.XOR(modifiedID) +} diff --git a/avalanchego/vms/components/avax/utxo_state_test.go b/avalanchego/vms/components/avax/utxo_state_test.go index 993fab76..fa4c530e 100644 --- a/avalanchego/vms/components/avax/utxo_state_test.go +++ b/avalanchego/vms/components/avax/utxo_state_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -13,10 +14,11 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +const trackChecksum = false + func TestUTXOState(t *testing.T) { require := require.New(t) @@ -40,34 +42,29 @@ func TestUTXOState(t *testing.T) { } utxoID := utxo.InputID() - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.Input{}), - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.Credential{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.Input{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) + require.NoError(c.RegisterType(&secp256k1fx.Credential{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() - s := NewUTXOState(db, manager) + s, err := NewUTXOState(db, manager, trackChecksum) + require.NoError(err) - _, err := s.GetUTXO(utxoID) + _, err = s.GetUTXO(utxoID) require.Equal(database.ErrNotFound, err) _, err = s.GetUTXO(utxoID) require.Equal(database.ErrNotFound, err) - err = s.DeleteUTXO(utxoID) - require.NoError(err) + require.NoError(s.DeleteUTXO(utxoID)) - err = s.PutUTXO(utxo) - require.NoError(err) + require.NoError(s.PutUTXO(utxo)) utxoIDs, err := s.UTXOIDs(addr[:], ids.Empty, 5) require.NoError(err) @@ -77,16 +74,15 @@ func TestUTXOState(t *testing.T) { require.NoError(err) require.Equal(utxo, readUTXO) - err = s.DeleteUTXO(utxoID) - require.NoError(err) + require.NoError(s.DeleteUTXO(utxoID)) _, err = s.GetUTXO(utxoID) require.Equal(database.ErrNotFound, err) - err = s.PutUTXO(utxo) - require.NoError(err) + require.NoError(s.PutUTXO(utxo)) - s = NewUTXOState(db, manager) + s, err = NewUTXOState(db, manager, trackChecksum) + require.NoError(err) readUTXO, err = s.GetUTXO(utxoID) require.NoError(err) diff --git a/avalanchego/vms/components/avax/utxo_test.go b/avalanchego/vms/components/avax/utxo_test.go index a872e673..a79c8fcb 100644 --- a/avalanchego/vms/components/avax/utxo_test.go +++ b/avalanchego/vms/components/avax/utxo_test.go @@ -1,25 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( - "bytes" "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestUTXOVerifyNil(t *testing.T) { utxo := (*UTXO)(nil) - - if err := utxo.Verify(); err == nil { - t.Fatalf("Should have errored due to a nil utxo") - } + err := utxo.Verify() + require.ErrorIs(t, err, errNilUTXO) } func TestUTXOVerifyEmpty(t *testing.T) { @@ -27,28 +26,22 @@ func TestUTXOVerifyEmpty(t *testing.T) { UTXOID: UTXOID{TxID: ids.Empty}, Asset: Asset{ID: ids.Empty}, } - - if err := utxo.Verify(); err == nil { - t.Fatalf("Should have errored due to an empty utxo") - } + err := utxo.Verify() + require.ErrorIs(t, err, errEmptyUTXO) } func TestUTXOSerialize(t *testing.T) { - c := linearcodec.NewDefault() + require := require.New(t) + + c := linearcodec.NewDefault(time.Time{}) manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.Input{}), - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.Credential{}), - manager.RegisterCodec(codecVersion, c), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.Input{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) + require.NoError(c.RegisterType(&secp256k1fx.Credential{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -116,13 +109,6 @@ func TestUTXOSerialize(t *testing.T) { } utxoBytes, err := manager.Marshal(codecVersion, utxo) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(utxoBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - utxoBytes, - ) - } + require.NoError(err) + require.Equal(expected, utxoBytes) } diff --git a/avalanchego/vms/components/chain/block.go b/avalanchego/vms/components/chain/block.go index d03659ed..3966dd20 100644 --- a/avalanchego/vms/components/chain/block.go +++ b/avalanchego/vms/components/chain/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain diff --git a/avalanchego/vms/components/chain/state.go b/avalanchego/vms/components/chain/state.go index fe499194..6ada30e7 100644 --- a/avalanchego/vms/components/chain/state.go +++ b/avalanchego/vms/components/chain/state.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain import ( "context" + "errors" "fmt" "github.com/prometheus/client_golang/prometheus" @@ -16,8 +17,17 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/constants" ) +func cachedBlockSize(_ ids.ID, bw *BlockWrapper) int { + return ids.IDLen + len(bw.Bytes()) + 2*constants.PointerOverhead +} + +func cachedBlockBytesSize(blockBytes string, _ ids.ID) int { + return len(blockBytes) + ids.IDLen +} + // State implements an efficient caching layer used to wrap a VM // implementation. type State struct { @@ -137,11 +147,20 @@ func (s *State) initialize(config *Config) { func NewState(config *Config) *State { c := &State{ - verifiedBlocks: make(map[ids.ID]*BlockWrapper), - decidedBlocks: &cache.LRU[ids.ID, *BlockWrapper]{Size: config.DecidedCacheSize}, - missingBlocks: &cache.LRU[ids.ID, struct{}]{Size: config.MissingCacheSize}, - unverifiedBlocks: &cache.LRU[ids.ID, *BlockWrapper]{Size: config.UnverifiedCacheSize}, - bytesToIDCache: &cache.LRU[string, ids.ID]{Size: config.BytesToIDCacheSize}, + verifiedBlocks: make(map[ids.ID]*BlockWrapper), + decidedBlocks: cache.NewSizedLRU[ids.ID, *BlockWrapper]( + config.DecidedCacheSize, + cachedBlockSize, + ), + missingBlocks: &cache.LRU[ids.ID, struct{}]{Size: config.MissingCacheSize}, + unverifiedBlocks: cache.NewSizedLRU[ids.ID, *BlockWrapper]( + config.UnverifiedCacheSize, + cachedBlockSize, + ), + bytesToIDCache: cache.NewSizedLRU[string, ids.ID]( + config.BytesToIDCacheSize, + cachedBlockBytesSize, + ), } c.initialize(config) return c @@ -154,7 +173,10 @@ func NewMeteredState( decidedCache, err := metercacher.New[ids.ID, *BlockWrapper]( "decided_cache", registerer, - &cache.LRU[ids.ID, *BlockWrapper]{Size: config.DecidedCacheSize}, + cache.NewSizedLRU[ids.ID, *BlockWrapper]( + config.DecidedCacheSize, + cachedBlockSize, + ), ) if err != nil { return nil, err @@ -170,7 +192,10 @@ func NewMeteredState( unverifiedCache, err := metercacher.New[ids.ID, *BlockWrapper]( "unverified_cache", registerer, - &cache.LRU[ids.ID, *BlockWrapper]{Size: config.UnverifiedCacheSize}, + cache.NewSizedLRU[ids.ID, *BlockWrapper]( + config.UnverifiedCacheSize, + cachedBlockSize, + ), ) if err != nil { return nil, err @@ -178,7 +203,10 @@ func NewMeteredState( bytesToIDCache, err := metercacher.New[string, ids.ID]( "bytes_to_id_cache", registerer, - &cache.LRU[string, ids.ID]{Size: config.BytesToIDCacheSize}, + cache.NewSizedLRU[string, ids.ID]( + config.BytesToIDCacheSize, + cachedBlockBytesSize, + ), ) if err != nil { return nil, err @@ -194,14 +222,17 @@ func NewMeteredState( return c, nil } -// SetLastAcceptedBlock sets the last accepted block to [lastAcceptedBlock]. This should be called -// with an internal block - not a wrapped block returned from state. +var errSetAcceptedWithProcessing = errors.New("cannot set last accepted block with blocks processing") + +// SetLastAcceptedBlock sets the last accepted block to [lastAcceptedBlock]. +// This should be called with an internal block - not a wrapped block returned +// from state. // -// This also flushes [lastAcceptedBlock] from missingBlocks and unverifiedBlocks to -// ensure that their contents stay valid. +// This also flushes [lastAcceptedBlock] from missingBlocks and unverifiedBlocks +// to ensure that their contents stay valid. func (s *State) SetLastAcceptedBlock(lastAcceptedBlock snowman.Block) error { if len(s.verifiedBlocks) != 0 { - return fmt.Errorf("cannot set chain state last accepted block with non-zero number of verified blocks in processing: %d", len(s.verifiedBlocks)) + return fmt.Errorf("%w: %d", errSetAcceptedWithProcessing, len(s.verifiedBlocks)) } // [lastAcceptedBlock] is no longer missing or unverified, so we evict it from the corresponding diff --git a/avalanchego/vms/components/chain/state_test.go b/avalanchego/vms/components/chain/state_test.go index 448fcb44..8bdda596 100644 --- a/avalanchego/vms/components/chain/state_test.go +++ b/avalanchego/vms/components/chain/state_test.go @@ -1,17 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain import ( - "bytes" "context" "errors" "fmt" "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -19,15 +17,17 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/metric" ) var ( _ Block = (*TestBlock)(nil) - errCantBuildBlock = errors.New("can't build new block") - errVerify = errors.New("verify failed") - errAccept = errors.New("accept failed") - errReject = errors.New("reject failed") + errCantBuildBlock = errors.New("can't build new block") + errVerify = errors.New("verify failed") + errAccept = errors.New("accept failed") + errReject = errors.New("reject failed") + errUnexpectedBlockBytes = errors.New("unexpected block bytes") ) type TestBlock struct { @@ -80,9 +80,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( for _, blk := range blks { blkMap[blk.ID()] = blk blkBytes := blk.Bytes() - if len(blkBytes) != 1 { - t.Fatalf("Expected block bytes to be length 1, but found %d", len(blkBytes)) - } + require.Len(t, blkBytes, 1) blkByteMap[blkBytes[0]] = blk } @@ -102,7 +100,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( blk, ok := blkByteMap[b[0]] if !ok { - return nil, fmt.Errorf("parsed unexpected block with bytes %x", b) + return nil, fmt.Errorf("%w: %x", errUnexpectedBlockBytes, b) } if blk.Status() == choices.Unknown { blk.SetStatus(choices.Processing) @@ -135,80 +133,49 @@ func cantBuildBlock(context.Context) (snowman.Block, error) { // checkProcessingBlock checks that [blk] is of the correct type and is // correctly uniquified when calling GetBlock and ParseBlock. func checkProcessingBlock(t *testing.T, s *State, blk snowman.Block) { - if _, ok := blk.(*BlockWrapper); !ok { - t.Fatalf("Expected block to be of type (*BlockWrapper)") - } + require := require.New(t) + + require.IsType(&BlockWrapper{}, blk) parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) - if err != nil { - t.Fatalf("Failed to parse verified block due to %s", err) - } - if parsedBlk.ID() != blk.ID() { - t.Fatalf("Expected parsed block to have the same ID as the requested block") - } - if !bytes.Equal(parsedBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected parsed block to have the same bytes as the requested block") - } - if status := parsedBlk.Status(); status != choices.Processing { - t.Fatalf("Expected parsed block to have status Processing, but found %s", status) - } - if parsedBlk != blk { - t.Fatalf("Expected parsed block to return a uniquified block") - } + require.NoError(err) + require.Equal(blk.ID(), parsedBlk.ID()) + require.Equal(blk.Bytes(), parsedBlk.Bytes()) + require.Equal(choices.Processing, parsedBlk.Status()) + require.Equal(blk, parsedBlk) getBlk, err := s.GetBlock(context.Background(), blk.ID()) - if err != nil { - t.Fatalf("Unexpected error during GetBlock for processing block %s", err) - } - if getBlk != parsedBlk { - t.Fatalf("Expected GetBlock to return the same unique block as ParseBlock") - } + require.NoError(err) + require.Equal(parsedBlk, getBlk) } // checkDecidedBlock asserts that [blk] is returned with the correct status by ParseBlock // and GetBlock. func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, expectedStatus choices.Status, cached bool) { - if _, ok := blk.(*BlockWrapper); !ok { - t.Fatalf("Expected block to be of type (*BlockWrapper)") - } + require := require.New(t) + + require.IsType(&BlockWrapper{}, blk) parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing decided block %s", err) - } - if parsedBlk.ID() != blk.ID() { - t.Fatalf("ParseBlock returned block with unexpected ID %s, expected %s", parsedBlk.ID(), blk.ID()) - } - if !bytes.Equal(parsedBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected parsed block to have the same bytes as the requested block") - } - if status := parsedBlk.Status(); status != expectedStatus { - t.Fatalf("Expected parsed block to have status %s, but found %s", expectedStatus, status) - } + require.NoError(err) + require.Equal(blk.ID(), parsedBlk.ID()) + require.Equal(blk.Bytes(), parsedBlk.Bytes()) + require.Equal(expectedStatus, parsedBlk.Status()) + // If the block should be in the cache, assert that the returned block is identical to [blk] - if cached && parsedBlk != blk { - t.Fatalf("Expected parsed block to have been cached, but retrieved non-unique decided block") + if cached { + require.Equal(blk, parsedBlk) } getBlk, err := s.GetBlock(context.Background(), blk.ID()) - if err != nil { - t.Fatalf("Unexpected error during GetBlock for decided block %s", err) - } - if getBlk.ID() != blk.ID() { - t.Fatalf("GetBlock returned block with unexpected ID %s, expected %s", getBlk.ID(), blk.ID()) - } - if !bytes.Equal(getBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected block from GetBlock to have the same bytes as the requested block") - } - if status := getBlk.Status(); status != expectedStatus { - t.Fatalf("Expected block from GetBlock to have status %s, but found %s", expectedStatus, status) - } + require.NoError(err) + require.Equal(blk.ID(), getBlk.ID()) + require.Equal(blk.Bytes(), getBlk.Bytes()) + require.Equal(expectedStatus, getBlk.Status()) // Since ParseBlock should have triggered a cache hit, assert that the block is identical // to the parsed block. - if getBlk != parsedBlk { - t.Fatalf("Expected block returned by GetBlock to have been cached, but retrieved non-unique decided block") - } + require.Equal(parsedBlk, getBlk) } func checkAcceptedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) { @@ -220,6 +187,8 @@ func checkRejectedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) } func TestState(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -256,97 +225,54 @@ func TestState(t *testing.T) { }) lastAccepted, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAccepted != genesisBlock.ID() { - t.Fatal("Expected last accepted block to be the genesis block") - } + require.NoError(err) + require.Equal(genesisBlock.ID(), lastAccepted) wrappedGenesisBlk, err := chainState.GetBlock(context.Background(), genesisBlock.ID()) - if err != nil { - t.Fatalf("Failed to get genesis block due to: %s", err) - } + require.NoError(err) // Check that a cache miss on a block is handled correctly - if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { - t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") - } - if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { - t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") - } + _, err = chainState.GetBlock(context.Background(), blk1.ID()) + require.ErrorIs(err, database.ErrNotFound) // Parse and verify blk1 and blk2 parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatal("Failed to parse blk1 due to: %w", err) - } - if err := parsedBlk1.Verify(context.Background()); err != nil { - t.Fatal("Parsed blk1 failed verification unexpectedly due to %w", err) - } + require.NoError(err) + require.NoError(parsedBlk1.Verify(context.Background())) + parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk2 due to: %s", err) - } - if err := parsedBlk2.Verify(context.Background()); err != nil { - t.Fatalf("Parsed blk2 failed verification unexpectedly due to %s", err) - } + require.NoError(err) + require.NoError(parsedBlk2.Verify(context.Background())) // Check that the verified blocks have been placed in the processing map - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 2 { - t.Fatalf("Expected chain state to have 2 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 2) parsedBlk3, err := chainState.ParseBlock(context.Background(), blk3.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk3 due to %s", err) - } + require.NoError(err) getBlk3, err := chainState.GetBlock(context.Background(), blk3.ID()) - if err != nil { - t.Fatalf("Failed to get blk3 due to %s", err) - } - require.Equal(t, parsedBlk3.ID(), getBlk3.ID(), "State GetBlock returned the wrong block") + require.NoError(err) + require.Equal(parsedBlk3.ID(), getBlk3.ID(), "State GetBlock returned the wrong block") // Check that parsing blk3 does not add it to processing blocks since it has // not been verified. - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 2 { - t.Fatalf("Expected State to have 2 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 2) - if err := parsedBlk3.Verify(context.Background()); err != nil { - t.Fatalf("Parsed blk3 failed verification unexpectedly due to %s", err) - } + require.NoError(parsedBlk3.Verify(context.Background())) // Check that blk3 has been added to processing blocks. - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 3 { - t.Fatalf("Expected chain state to have 3 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 3) // Decide the blocks and ensure they are removed from the processing blocks map - if err := parsedBlk1.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedBlk2.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedBlk3.Reject(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlk1.Accept(context.Background())) + require.NoError(parsedBlk2.Accept(context.Background())) + require.NoError(parsedBlk3.Reject(context.Background())) - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 0 { - t.Fatalf("Expected chain state to have 0 processing blocks, but found: %d", numProcessing) - } + require.Empty(chainState.verifiedBlocks) // Check that the last accepted block was updated correctly lastAcceptedID, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk2.ID() { - t.Fatal("Expected last accepted block to be blk2") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != blk2.ID() { - t.Fatal("Expected last accepted block to be blk2") - } + require.NoError(err) + require.Equal(blk2.ID(), lastAcceptedID) + require.Equal(blk2.ID(), chainState.LastAcceptedBlock().ID()) // Flush the caches to ensure decided blocks are handled correctly on cache misses. chainState.Flush() @@ -357,6 +283,8 @@ func TestState(t *testing.T) { } func TestBuildBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -382,26 +310,22 @@ func TestBuildBlock(t *testing.T) { }) builtBlk, err := chainState.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - require.Len(t, chainState.verifiedBlocks, 0) + require.NoError(err) + require.Empty(chainState.verifiedBlocks) - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatalf("Built block failed verification due to %s", err) - } - require.Len(t, chainState.verifiedBlocks, 1) + require.NoError(builtBlk.Verify(context.Background())) + require.Len(chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatalf("Unexpected error while accepting built block %s", err) - } + require.NoError(builtBlk.Accept(context.Background())) checkAcceptedBlock(t, chainState, builtBlk, true) } func TestStateDecideBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(4) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -426,50 +350,38 @@ func TestStateDecideBlock(t *testing.T) { // Parse badVerifyBlk (which should fail verification) badBlk, err := chainState.ParseBlock(context.Background(), badVerifyBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err == nil { - t.Fatal("Bad block should have failed verification") - } + require.NoError(err) + err = badBlk.Verify(context.Background()) + require.ErrorIs(err, errVerify) // Ensure a block that fails verification is not marked as processing - require.Len(t, chainState.verifiedBlocks, 0) + require.Empty(chainState.verifiedBlocks) // Ensure that an error during block acceptance is propagated correctly badBlk, err = chainState.ParseBlock(context.Background(), badAcceptBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - require.Len(t, chainState.verifiedBlocks, 1) + require.NoError(err) + require.NoError(badBlk.Verify(context.Background())) + require.Len(chainState.verifiedBlocks, 1) - if err := badBlk.Accept(context.Background()); err == nil { - t.Fatal("Block should have errored on Accept") - } + err = badBlk.Accept(context.Background()) + require.ErrorIs(err, errAccept) // Ensure that an error during block reject is propagated correctly badBlk, err = chainState.ParseBlock(context.Background(), badRejectBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(badBlk.Verify(context.Background())) // Note: an error during block Accept/Reject is fatal, so it is undefined whether // the block that failed on Accept should be removed from processing or not. We allow // either case here to make this test more flexible. - if numProcessing := len(chainState.verifiedBlocks); numProcessing > 2 || numProcessing == 0 { - t.Fatalf("Expected number of processing blocks to be either 1 or 2, but found %d", numProcessing) - } + numProcessing := len(chainState.verifiedBlocks) + require.Contains([]int{1, 2}, numProcessing) - if err := badBlk.Reject(context.Background()); err == nil { - t.Fatal("Block should have errored on Reject") - } + err = badBlk.Reject(context.Background()) + require.ErrorIs(err, errReject) } func TestStateParent(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -490,37 +402,29 @@ func TestStateParent(t *testing.T) { }) parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) missingBlk1ID := parsedBlk2.Parent() - if _, err := chainState.GetBlock(context.Background(), missingBlk1ID); err == nil { - t.Fatalf("Expected parent of blk2 to be not found") - } + _, err = chainState.GetBlock(context.Background(), missingBlk1ID) + require.ErrorIs(err, database.ErrNotFound) parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) genesisBlkParentID := parsedBlk1.Parent() genesisBlkParent, err := chainState.GetBlock(context.Background(), genesisBlkParentID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) checkAcceptedBlock(t, chainState, genesisBlkParent, true) parentBlk1ID := parsedBlk2.Parent() parentBlk1, err := chainState.GetBlock(context.Background(), parentBlk1ID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) checkProcessingBlock(t, chainState, parentBlk1) } func TestGetBlockInternal(t *testing.T) { + require := require.New(t) testBlks := NewTestBlocks(1) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -539,27 +443,19 @@ func TestGetBlockInternal(t *testing.T) { }) genesisBlockInternal := chainState.LastAcceptedBlockInternal() - if _, ok := genesisBlockInternal.(*TestBlock); !ok { - t.Fatalf("Expected LastAcceptedBlockInternal to return a block of type *snowman.TestBlock, but found %T", genesisBlockInternal) - } - if genesisBlockInternal.ID() != genesisBlock.ID() { - t.Fatalf("Expected LastAcceptedBlockInternal to be blk %s, but found %s", genesisBlock.ID(), genesisBlockInternal.ID()) - } + require.IsType(&TestBlock{}, genesisBlockInternal) + require.Equal(genesisBlock.ID(), genesisBlockInternal.ID()) blk, err := chainState.GetBlockInternal(context.Background(), genesisBlock.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if _, ok := blk.(*TestBlock); !ok { - t.Fatalf("Expected retrieved block to return a block of type *snowman.TestBlock, but found %T", blk) - } - if blk.ID() != genesisBlock.ID() { - t.Fatalf("Expected GetBlock to be blk %s, but found %s", genesisBlock.ID(), blk.ID()) - } + require.IsType(&TestBlock{}, blk) + require.Equal(genesisBlock.ID(), blk.ID()) } func TestGetBlockError(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -586,20 +482,14 @@ func TestGetBlockError(t *testing.T) { }) _, err := chainState.GetBlock(context.Background(), blk1.ID()) - if err == nil { - t.Fatal("Expected GetBlock to return an error for unknown block") - } + require.ErrorIs(err, database.ErrNotFound) // Update the status to Processing, so that it will be returned by the internal get block // function. blk1.SetStatus(choices.Processing) blk, err := chainState.GetBlock(context.Background(), blk1.ID()) - if err != nil { - t.Fatal(err) - } - if blk.ID() != blk1.ID() { - t.Fatalf("Expected GetBlock to retrieve %s, but found %s", blk1.ID(), blk.ID()) - } + require.NoError(err) + require.Equal(blk1.ID(), blk.ID()) checkProcessingBlock(t, chainState, blk) } @@ -621,10 +511,8 @@ func TestParseBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.ParseBlock(context.Background(), []byte{255}) - if err == nil { - t.Fatalf("Expected ParseBlock to return an error parsing an invalid block but found block of type %T", blk) - } + _, err := chainState.ParseBlock(context.Background(), []byte{255}) + require.ErrorIs(t, err, errUnexpectedBlockBytes) } func TestBuildBlockError(t *testing.T) { @@ -645,13 +533,13 @@ func TestBuildBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.BuildBlock(context.Background()) - if err == nil { - t.Fatalf("Expected BuildBlock to return an error but found block of type %T", blk) - } + _, err := chainState.BuildBlock(context.Background()) + require.ErrorIs(t, err, errCantBuildBlock) } func TestMeteredCache(t *testing.T) { + require := require.New(t) + registry := prometheus.NewRegistry() testBlks := NewTestBlocks(1) @@ -671,17 +559,15 @@ func TestMeteredCache(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, } _, err := NewMeteredState(registry, config) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, err = NewMeteredState(registry, config) - if err == nil { - t.Fatal("Expected creating a second NewMeteredState to error due to a registry conflict") - } + require.ErrorIs(err, metric.ErrFailedRegistering) } // Test the bytesToIDCache func TestStateBytesToIDCache(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -690,7 +576,7 @@ func TestStateBytesToIDCache(t *testing.T) { getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) buildBlock := func(context.Context) (snowman.Block, error) { - t.Fatal("shouldn't have been called") + require.FailNow("shouldn't have been called") return nil, nil } @@ -698,7 +584,7 @@ func TestStateBytesToIDCache(t *testing.T) { DecidedCacheSize: 0, MissingCacheSize: 0, UnverifiedCacheSize: 0, - BytesToIDCacheSize: 1, + BytesToIDCacheSize: 1 + ids.IDLen, // Size of one block LastAcceptedBlock: genesisBlock, GetBlock: getBlock, UnmarshalBlock: parseBlock, @@ -708,32 +594,34 @@ func TestStateBytesToIDCache(t *testing.T) { // Shouldn't have blk1 ID to start with _, err := chainState.GetBlock(context.Background(), blk1.ID()) - require.Error(t, err) + require.ErrorIs(err, database.ErrNotFound) _, ok := chainState.bytesToIDCache.Get(string(blk1.Bytes())) - require.False(t, ok) + require.False(ok) // Parse blk1 from bytes _, err = chainState.ParseBlock(context.Background(), blk1.Bytes()) - require.NoError(t, err) + require.NoError(err) // blk1 should be in cache now _, ok = chainState.bytesToIDCache.Get(string(blk1.Bytes())) - require.True(t, ok) + require.True(ok) // Parse another block _, err = chainState.ParseBlock(context.Background(), blk2.Bytes()) - require.NoError(t, err) + require.NoError(err) // Should have bumped blk1 from cache _, ok = chainState.bytesToIDCache.Get(string(blk2.Bytes())) - require.True(t, ok) + require.True(ok) _, ok = chainState.bytesToIDCache.Get(string(blk1.Bytes())) - require.False(t, ok) + require.False(ok) } // TestSetLastAcceptedBlock ensures chainState's last accepted block // can be updated by calling [SetLastAcceptedBlock]. func TestSetLastAcceptedBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(1) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -775,54 +663,32 @@ func TestSetLastAcceptedBlock(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) lastAcceptedID, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != genesisBlock.ID() { - t.Fatal("Expected last accepted block to be the genesis block") - } + require.NoError(err) + require.Equal(genesisBlock.ID(), lastAcceptedID) // call SetLastAcceptedBlock for postSetBlk1 - if err := chainState.SetLastAcceptedBlock(postSetBlk1); err != nil { - t.Fatal(err) - } + require.NoError(chainState.SetLastAcceptedBlock(postSetBlk1)) lastAcceptedID, err = chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != postSetBlk1.ID() { - t.Fatal("Expected last accepted block to be postSetBlk1") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk1.ID() { - t.Fatal("Expected last accepted block to be postSetBlk1") - } + require.NoError(err) + require.Equal(postSetBlk1.ID(), lastAcceptedID) + require.Equal(postSetBlk1.ID(), chainState.LastAcceptedBlock().ID()) // ensure further blocks can be accepted parsedpostSetBlk2, err := chainState.ParseBlock(context.Background(), postSetBlk2.Bytes()) - if err != nil { - t.Fatal("Failed to parse postSetBlk2 due to: %w", err) - } - if err := parsedpostSetBlk2.Verify(context.Background()); err != nil { - t.Fatal("Parsed postSetBlk2 failed verification unexpectedly due to %w", err) - } - if err := parsedpostSetBlk2.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parsedpostSetBlk2.Verify(context.Background())) + require.NoError(parsedpostSetBlk2.Accept(context.Background())) lastAcceptedID, err = chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != postSetBlk2.ID() { - t.Fatal("Expected last accepted block to be postSetBlk2") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk2.ID() { - t.Fatal("Expected last accepted block to be postSetBlk2") - } + require.NoError(err) + require.Equal(postSetBlk2.ID(), lastAcceptedID) + require.Equal(postSetBlk2.ID(), chainState.LastAcceptedBlock().ID()) checkAcceptedBlock(t, chainState, parsedpostSetBlk2, false) } func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(5) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -849,22 +715,21 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { }) builtBlk, err := chainState.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - require.Len(t, chainState.verifiedBlocks, 0) + require.NoError(err) + require.Empty(chainState.verifiedBlocks) - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatalf("Built block failed verification due to %s", err) - } - require.Len(t, chainState.verifiedBlocks, 1) + require.NoError(builtBlk.Verify(context.Background())) + require.Len(chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) - require.Error(t, chainState.SetLastAcceptedBlock(resetBlk), "should have errored resetting chain state with processing block") + err = chainState.SetLastAcceptedBlock(resetBlk) + require.ErrorIs(err, errSetAcceptedWithProcessing) } func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -886,16 +751,13 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { }) parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk1 due to: %s", err) - } - - if blk1.Height() != parsedBlk1.Height() { - t.Fatalf("Parsed blk1 reported incorrect height. Expected %d got %d", blk1.Height(), parsedBlk1.Height()) - } + require.NoError(err) + require.Equal(blk1.Height(), parsedBlk1.Height()) } func TestIsProcessing(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -916,22 +778,20 @@ func TestIsProcessing(t *testing.T) { // Parse blk1 parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - require.NoError(t, err) + require.NoError(err) // Check that it is not processing in consensus - require.False(t, chainState.IsProcessing(parsedBlk1.ID())) + require.False(chainState.IsProcessing(parsedBlk1.ID())) // Verify blk1 - err = parsedBlk1.Verify(context.Background()) - require.NoError(t, err) + require.NoError(parsedBlk1.Verify(context.Background())) // Check that it is processing in consensus - require.True(t, chainState.IsProcessing(parsedBlk1.ID())) + require.True(chainState.IsProcessing(parsedBlk1.ID())) // Accept blk1 - err = parsedBlk1.Accept(context.Background()) - require.NoError(t, err) + require.NoError(parsedBlk1.Accept(context.Background())) // Check that it is no longer processing in consensus - require.False(t, chainState.IsProcessing(parsedBlk1.ID())) + require.False(chainState.IsProcessing(parsedBlk1.ID())) } diff --git a/avalanchego/vms/components/index/index.go b/avalanchego/vms/components/index/index.go index 0597f836..e27c1dff 100644 --- a/avalanchego/vms/components/index/index.go +++ b/avalanchego/vms/components/index/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index @@ -10,7 +10,6 @@ import ( "fmt" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" @@ -23,10 +22,11 @@ import ( ) var ( - idxKey = []byte("idx") - idxCompleteKey = []byte("complete") - errIndexingRequiredFromGenesis = errors.New("running would create incomplete index. Allow incomplete indices or re-sync from genesis with indexing enabled") - errCausesIncompleteIndex = errors.New("running would create incomplete index. Allow incomplete indices or enable indexing") + ErrIndexingRequiredFromGenesis = errors.New("running would create incomplete index. Allow incomplete indices or re-sync from genesis with indexing enabled") + ErrCausesIncompleteIndex = errors.New("running would create incomplete index. Allow incomplete indices or enable indexing") + + idxKey = []byte("idx") + idxCompleteKey = []byte("complete") _ AddressTxsIndexer = (*indexer)(nil) _ AddressTxsIndexer = (*noIndexer)(nil) @@ -174,7 +174,7 @@ func (i *indexer) Accept(txID ids.ID, inputUTXOs []*avax.UTXO, outputUTXOs []*av // Read returns IDs of transactions that changed [address]'s balance of [assetID], // starting at [cursor], in order of transaction acceptance. e.g. if [cursor] == 1, does -// not return the first transaction that changed the balance. (This is for for pagination.) +// not return the first transaction that changed the balance. (This is for pagination.) // Returns at most [pageSize] elements. // See AddressTxsIndexer func (i *indexer) Read(address []byte, assetID ids.ID, cursor, pageSize uint64) ([]ids.ID, error) { @@ -229,7 +229,7 @@ func checkIndexStatus(db database.KeyValueReaderWriter, enableIndexing, allowInc if !idxComplete && enableIndexing && !allowIncomplete { // In a previous run, we did not index so it's incomplete. // indexing was disabled before but now we want to index. - return errIndexingRequiredFromGenesis + return ErrIndexingRequiredFromGenesis } else if !idxComplete { // either indexing is disabled, or incomplete indices are ok, so we don't care that index is incomplete return nil @@ -237,7 +237,7 @@ func checkIndexStatus(db database.KeyValueReaderWriter, enableIndexing, allowInc // the index is complete if !enableIndexing && !allowIncomplete { // indexing is disabled this run - return errCausesIncompleteIndex + return ErrCausesIncompleteIndex } else if !enableIndexing { // running without indexing makes it incomplete return database.PutBool(db, idxCompleteKey, false) diff --git a/avalanchego/vms/components/index/metrics.go b/avalanchego/vms/components/index/metrics.go index 181f9528..8531de69 100644 --- a/avalanchego/vms/components/index/metrics.go +++ b/avalanchego/vms/components/index/metrics.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index -import ( - "github.com/prometheus/client_golang/prometheus" -) +import "github.com/prometheus/client_golang/prometheus" type metrics struct { numTxsIndexed prometheus.Counter diff --git a/avalanchego/vms/components/keystore/codec.go b/avalanchego/vms/components/keystore/codec.go index 6e547c9e..15576b73 100644 --- a/avalanchego/vms/components/keystore/codec.go +++ b/avalanchego/vms/components/keystore/codec.go @@ -1,39 +1,35 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) -const ( - // CodecVersion is the current default codec version - CodecVersion = 0 -) +const CodecVersion = 0 -// Codecs do serialization and deserialization var ( Codec codec.Manager LegacyCodec codec.Manager ) func init() { - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) Codec = codec.NewDefaultManager() - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) + lc := linearcodec.NewDefault(time.Time{}) LegacyCodec = codec.NewManager(math.MaxInt32) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( Codec.RegisterCodec(CodecVersion, c), LegacyCodec.RegisterCodec(CodecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/avalanchego/vms/components/keystore/user.go b/avalanchego/vms/components/keystore/user.go index 17c95c94..20749e5b 100644 --- a/avalanchego/vms/components/keystore/user.go +++ b/avalanchego/vms/components/keystore/user.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -43,8 +43,7 @@ type User interface { } type user struct { - factory secp256k1.Factory - db *encdb.Database + db *encdb.Database } // NewUserFromKeystore tracks a keystore user from the provided keystore @@ -125,7 +124,7 @@ func (u *user) GetKey(address ids.ShortID) (*secp256k1.PrivateKey, error) { if err != nil { return nil, err } - return u.factory.ToPrivateKey(bytes) + return secp256k1.ToPrivateKey(bytes) } func (u *user) Close() error { @@ -143,11 +142,9 @@ func NewKey(u User) (*secp256k1.PrivateKey, error) { // Create and store [numKeys] new keys that will be controlled by this user. func NewKeys(u User, numKeys int) ([]*secp256k1.PrivateKey, error) { - factory := secp256k1.Factory{} - keys := make([]*secp256k1.PrivateKey, numKeys) for i := range keys { - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() if err != nil { return nil, err } diff --git a/avalanchego/vms/components/keystore/user_test.go b/avalanchego/vms/components/keystore/user_test.go index 38c40b78..66e331c2 100644 --- a/avalanchego/vms/components/keystore/user_test.go +++ b/avalanchego/vms/components/keystore/user_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" @@ -23,26 +24,24 @@ func TestUserClosedDB(t *testing.T) { db, err := encdb.New([]byte(testPassword), memdb.New()) require.NoError(err) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) u := NewUserFromDB(db) _, err = u.GetAddresses() - require.Error(err, "closed db should have caused an error") + require.ErrorIs(err, database.ErrClosed) _, err = u.GetKey(ids.ShortEmpty) - require.Error(err, "closed db should have caused an error") + require.ErrorIs(err, database.ErrClosed) _, err = GetKeychain(u, nil) - require.Error(err, "closed db should have caused an error") + require.ErrorIs(err, database.ErrClosed) - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) err = u.PutKeys(sk) - require.Error(err, "closed db should have caused an error") + require.ErrorIs(err, database.ErrClosed) } func TestUser(t *testing.T) { @@ -57,16 +56,13 @@ func TestUser(t *testing.T) { require.NoError(err) require.Empty(addresses, "new user shouldn't have address") - factory := secp256k1.Factory{} - sk, err := factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() require.NoError(err) - err = u.PutKeys(sk) - require.NoError(err) + require.NoError(u.PutKeys(sk)) // Putting the same key multiple times should be a noop - err = u.PutKeys(sk) - require.NoError(err) + require.NoError(u.PutKeys(sk)) addr := sk.PublicKey().Address() diff --git a/avalanchego/vms/components/message/codec.go b/avalanchego/vms/components/message/codec.go index d41de9b2..5614125b 100644 --- a/avalanchego/vms/components/message/codec.go +++ b/avalanchego/vms/components/message/codec.go @@ -1,34 +1,34 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" ) const ( - codecVersion = 0 + CodecVersion = 0 + maxMessageSize = 512 * units.KiB - maxSliceLen = maxMessageSize ) -// Codec does serialization and deserialization -var c codec.Manager +var Codec codec.Manager func init() { - c = codec.NewManager(maxMessageSize) - lc := linearcodec.NewCustomMaxLength(maxSliceLen) + Codec = codec.NewManager(maxMessageSize) + lc := linearcodec.NewDefault(time.Time{}) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( lc.RegisterType(&Tx{}), - c.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/avalanchego/vms/components/message/handler.go b/avalanchego/vms/components/message/handler.go index afe12351..2af2f55a 100644 --- a/avalanchego/vms/components/message/handler.go +++ b/avalanchego/vms/components/message/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/avalanchego/vms/components/message/handler_test.go b/avalanchego/vms/components/message/handler_test.go index cd6c5173..bc234283 100644 --- a/avalanchego/vms/components/message/handler_test.go +++ b/avalanchego/vms/components/message/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -27,8 +27,7 @@ func TestHandleTx(t *testing.T) { handler := CounterHandler{} msg := Tx{} - err := msg.Handle(&handler, ids.EmptyNodeID, 0) - require.NoError(err) + require.NoError(msg.Handle(&handler, ids.EmptyNodeID, 0)) require.Equal(1, handler.Tx) } @@ -37,6 +36,5 @@ func TestNoopHandler(t *testing.T) { Log: logging.NoLog{}, } - err := handler.HandleTx(ids.EmptyNodeID, 0, nil) - require.NoError(t, err) + require.NoError(t, handler.HandleTx(ids.EmptyNodeID, 0, nil)) } diff --git a/avalanchego/vms/components/message/message.go b/avalanchego/vms/components/message/message.go index 02e05401..a33d4104 100644 --- a/avalanchego/vms/components/message/message.go +++ b/avalanchego/vms/components/message/message.go @@ -1,18 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( "errors" + "fmt" + + "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" + + pb "github.com/ava-labs/avalanchego/proto/pb/message" ) var ( _ Message = (*Tx)(nil) ErrUnexpectedCodecVersion = errors.New("unexpected codec version") + errUnknownMessageType = errors.New("unknown message type") ) type Message interface { @@ -39,20 +45,40 @@ func (m *message) Bytes() []byte { } func Parse(bytes []byte) (Message, error) { - var msg Message - version, err := c.Unmarshal(bytes, &msg) - if err != nil { - return nil, err - } - if version != codecVersion { - return nil, ErrUnexpectedCodecVersion + var ( + msg Message + protoMsg pb.Message + ) + + if err := proto.Unmarshal(bytes, &protoMsg); err == nil { + // This message was encoded with proto. + switch m := protoMsg.GetMessage().(type) { + case *pb.Message_Tx: + msg = &Tx{ + Tx: m.Tx.Tx, + } + default: + return nil, fmt.Errorf("%w: %T", errUnknownMessageType, protoMsg.GetMessage()) + } + } else { + // This message wasn't encoded with proto. + // It must have been encoded with avalanchego's codec. + // TODO remove else statement remove once all nodes support proto encoding. + // i.e. when all nodes are on v1.11.0 or later. + version, err := Codec.Unmarshal(bytes, &msg) + if err != nil { + return nil, err + } + if version != CodecVersion { + return nil, ErrUnexpectedCodecVersion + } } msg.initialize(bytes) return msg, nil } func Build(msg Message) ([]byte, error) { - bytes, err := c.Marshal(codecVersion, &msg) + bytes, err := Codec.Marshal(CodecVersion, &msg) msg.initialize(bytes) return bytes, err } diff --git a/avalanchego/vms/components/message/message_test.go b/avalanchego/vms/components/message/message_test.go index dbaf1543..946241de 100644 --- a/avalanchego/vms/components/message/message_test.go +++ b/avalanchego/vms/components/message/message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -7,13 +7,43 @@ import ( "testing" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/codec" + + pb "github.com/ava-labs/avalanchego/proto/pb/message" ) func TestParseGibberish(t *testing.T) { - randomBytes := utils.RandomBytes(256 * units.KiB) + randomBytes := []byte{0, 1, 2, 3, 4, 5} _, err := Parse(randomBytes) - require.Error(t, err) + require.ErrorIs(t, err, codec.ErrUnknownVersion) +} + +func TestParseProto(t *testing.T) { + require := require.New(t) + + txBytes := []byte{'y', 'e', 'e', 't'} + protoMsg := pb.Message{ + Message: &pb.Message_Tx{ + Tx: &pb.Tx{ + Tx: txBytes, + }, + }, + } + msgBytes, err := proto.Marshal(&protoMsg) + require.NoError(err) + + parsedMsgIntf, err := Parse(msgBytes) + require.NoError(err) + + require.IsType(&Tx{}, parsedMsgIntf) + parsedMsg := parsedMsgIntf.(*Tx) + + require.Equal(txBytes, parsedMsg.Tx) + + // Parse invalid message + _, err = Parse([]byte{1, 3, 3, 7}) + // Can't parse as proto so it falls back to using avalanchego's codec + require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/avalanchego/vms/components/message/tx.go b/avalanchego/vms/components/message/tx.go index c930d3f9..4eced181 100644 --- a/avalanchego/vms/components/message/tx.go +++ b/avalanchego/vms/components/message/tx.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" var _ Message = (*Tx)(nil) diff --git a/avalanchego/vms/components/message/tx_test.go b/avalanchego/vms/components/message/tx_test.go index 58a06e1b..8c52828b 100644 --- a/avalanchego/vms/components/message/tx_test.go +++ b/avalanchego/vms/components/message/tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -27,8 +27,8 @@ func TestTx(t *testing.T) { require.NoError(err) require.Equal(builtMsgBytes, parsedMsgIntf.Bytes()) - parsedMsg, ok := parsedMsgIntf.(*Tx) - require.True(ok) + require.IsType(&Tx{}, parsedMsgIntf) + parsedMsg := parsedMsgIntf.(*Tx) require.Equal(tx, parsedMsg.Tx) } diff --git a/avalanchego/vms/components/verify/mock_verifiable.go b/avalanchego/vms/components/verify/mock_verifiable.go index 531b6ea3..fe0e5770 100644 --- a/avalanchego/vms/components/verify/mock_verifiable.go +++ b/avalanchego/vms/components/verify/mock_verifiable.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/verify (interfaces: Verifiable) +// +// Generated by this command: +// +// mockgen -package=verify -destination=vms/components/verify/mock_verifiable.go github.com/ava-labs/avalanchego/vms/components/verify Verifiable +// // Package verify is a generated GoMock package. package verify @@ -10,7 +12,7 @@ package verify import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockVerifiable is a mock of Verifiable interface. diff --git a/avalanchego/vms/components/verify/subnet.go b/avalanchego/vms/components/verify/subnet.go index a1030164..ba4e65ee 100644 --- a/avalanchego/vms/components/verify/subnet.go +++ b/avalanchego/vms/components/verify/subnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/avalanchego/vms/components/verify/subnet_test.go b/avalanchego/vms/components/verify/subnet_test.go index a159d226..1e5bee1c 100644 --- a/avalanchego/vms/components/verify/subnet_test.go +++ b/avalanchego/vms/components/verify/subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify @@ -8,9 +8,8 @@ import ( "errors" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -90,8 +89,6 @@ func TestSameSubnet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := test.ctxF(ctrl) result := SameSubnet(context.Background(), ctx, test.chainID) diff --git a/avalanchego/vms/components/verify/verification.go b/avalanchego/vms/components/verify/verification.go index b615d70b..b712b730 100644 --- a/avalanchego/vms/components/verify/verification.go +++ b/avalanchego/vms/components/verify/verification.go @@ -1,20 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify import "github.com/ava-labs/avalanchego/snow" -// Verifiable can be verified type Verifiable interface { Verify() error } -// State that can be verified type State interface { snow.ContextInitializable Verifiable - VerifyState() error + IsState +} + +type IsState interface { + isState() +} + +type IsNotState interface { + isState() error } // All returns nil if all the verifiables were verified with no errors diff --git a/avalanchego/vms/components/verify/verification_test.go b/avalanchego/vms/components/verify/verification_test.go index fe854e16..57f3b856 100644 --- a/avalanchego/vms/components/verify/verification_test.go +++ b/avalanchego/vms/components/verify/verification_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify @@ -6,6 +6,8 @@ package verify import ( "errors" "testing" + + "github.com/stretchr/testify/require" ) var errTest = errors.New("non-nil error") @@ -17,13 +19,10 @@ func (v testVerifiable) Verify() error { } func TestAllNil(t *testing.T) { - err := All( + require.NoError(t, All( testVerifiable{}, testVerifiable{}, - ) - if err != nil { - t.Fatal(err) - } + )) } func TestAllError(t *testing.T) { @@ -31,7 +30,5 @@ func TestAllError(t *testing.T) { testVerifiable{}, testVerifiable{err: errTest}, ) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, errTest) } diff --git a/avalanchego/vms/example/xsvm/README.md b/avalanchego/vms/example/xsvm/README.md new file mode 100644 index 00000000..53548228 --- /dev/null +++ b/avalanchego/vms/example/xsvm/README.md @@ -0,0 +1,369 @@ +# Cross Subnet Virtual Machine (XSVM) + +Cross Subnet Asset Transfers README Overview + +[Background](#avalanche-subnets-and-custom-vms) + +[Introduction](#introduction) + +[Usage](#how-it-works) + +[Running](#running-the-vm) + +[Demo](#cross-subnet-transaction-example) + +## Avalanche Subnets and Custom VMs + +Avalanche is a network composed of multiple sub-networks (called [subnets][Subnet]) that each contain any number of blockchains. Each blockchain is an instance of a [Virtual Machine (VM)](https://docs.avax.network/learn/platform-overview#virtual-machines), much like an object in an object-oriented language is an instance of a class. That is, the VM defines the behavior of the blockchain where it is instantiated. For example, [Coreth (EVM)][Coreth] is a VM that is instantiated by the [C-Chain]. Likewise, one could deploy another instance of the EVM as their own blockchain (to take this to its logical conclusion). + +## Introduction + +Just as [Coreth] powers the [C-Chain], XSVM can be used to power its own blockchain in an Avalanche [Subnet]. Instead of providing a place to execute Solidity smart contracts, however, XSVM enables asset transfers for assets originating on its own chain or other XSVM chains on other subnets. + +## How it Works + +XSVM utilizes AvalancheGo's [teleporter] package to create and authenticate Subnet Messages. + +### Transfer + +If you want to send an asset to someone, you can use a `tx.Transfer` to send to any address. + +### Export + +If you want to send this chain's native asset to a different subnet, you can use a `tx.Export` to send to any address on a destination chain. You may also use a `tx.Export` to return the destination chain's native asset. + +### Import + +To receive assets from another chain's `tx.Export`, you must issue a `tx.Import`. Note that, similarly to a bridge, the security of the other chain's native asset is tied to the other chain. The security of all other assets on this chain are unrelated to the other chain. + +### Fees + +Currently there are no fees enforced in the XSVM. + +### xsvm + +#### Install + +```bash +git clone https://github.com/ava-labs/avalanchego.git; +cd avalanchego; +go install -v ./vms/example/xsvm/cmd/xsvm; +``` + +#### Usage + +``` +Runs an XSVM plugin + +Usage: + xsvm [flags] + xsvm [command] + +Available Commands: + account Displays the state of the requested account + chain Manages XS chains + completion Generate the autocompletion script for the specified shell + help Help about any command + issue Issues transactions + version Prints out the version + +Flags: + -h, --help help for xsvm + +Use "xsvm [command] --help" for more information about a command. +``` + +### [Golang SDK](https://github.com/ava-labs/avalanchego/blob/master/vms/example/xsvm/client/client.go) + +```golang +// Client defines xsvm client operations. +type Client interface { + Network( + ctx context.Context, + options ...rpc.Option, + ) (uint32, ids.ID, ids.ID, error) + Genesis( + ctx context.Context, + options ...rpc.Option, + ) (*genesis.Genesis, error) + Nonce( + ctx context.Context, + address ids.ShortID, + options ...rpc.Option, + ) (uint64, error) + Balance( + ctx context.Context, + address ids.ShortID, + assetID ids.ID, + options ...rpc.Option, + ) (uint64, error) + Loan( + ctx context.Context, + chainID ids.ID, + options ...rpc.Option, + ) (uint64, error) + IssueTx( + ctx context.Context, + tx *tx.Tx, + options ...rpc.Option, + ) (ids.ID, error) + LastAccepted( + ctx context.Context, + options ...rpc.Option, + ) (ids.ID, *block.Stateless, error) + Block( + ctx context.Context, + blkID ids.ID, + options ...rpc.Option, + (*block.Stateless, error) + Message( + ctx context.Context, + txID ids.ID, + options ...rpc.Option, + ) (*teleporter.UnsignedMessage, []byte, error) +} +``` + +### Public Endpoints + +#### xsvm.network + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.network", + "params":{}, + "id": 1 +} +>>> {"networkID":, "subnetID":, "chainID":} +``` + +For example: + +```bash +curl --location --request POST 'http://34.235.54.228:9650/ext/bc/28iioW2fYMBnKv24VG5nw9ifY2PsFuwuhxhyzxZB5MmxDd3rnT' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "xsvm.network", + "params":{}, + "id": 1 +}' +``` + +> `{"jsonrpc":"2.0","result":{"networkID":1000000,"subnetID":"2gToFoYXURMQ6y4ZApFuRZN1HurGcDkwmtvkcMHNHcYarvsJN1","chainID":"28iioW2fYMBnKv24VG5nw9ifY2PsFuwuhxhyzxZB5MmxDd3rnT"},"id":1}` + +#### xsvm.genesis + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.genesis", + "params":{}, + "id": 1 +} +>>> {"genesis":} +``` + +#### xsvm.nonce + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.nonce", + "params":{ + "address": + }, + "id": 1 +} +>>> {"nonce":} +``` + +#### xsvm.balance + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.balance", + "params":{ + "address":, + "assetID": + }, + "id": 1 +} +>>> {"balance":} +``` + +#### xsvm.loan + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.loan", + "params":{ + "chainID": + }, + "id": 1 +} +>>> {"amount":} +``` + +#### xsvm.issueTx + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.issueTx", + "params":{ + "tx": + }, + "id": 1 +} +>>> {"txID":} +``` + +#### xsvm.lastAccepted + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.lastAccepted", + "params":{}, + "id": 1 +} +>>> {"blockID":, "block":} +``` + +#### xsvm.block + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.block", + "params":{ + "blockID": + }, + "id": 1 +} +>>> {"block":} +``` + +#### xsvm.message + +``` +<<< POST +{ + "jsonrpc": "2.0", + "method": "xsvm.message", + "params":{ + "txID": + }, + "id": 1 +} +>>> {"message":, "signature":} +``` + +## Running the VM + +To build the VM, run `./scripts/build_xsvm.sh`. + +### Deploying Your Own Network + +Anyone can deploy their own instance of the XSVM as a subnet on Avalanche. All you need to do is compile it, create a genesis, and send a few txs to the +P-Chain. + +You can do this by following the [subnet tutorial] or by using the [subnet-cli]. + +[teleporter]: https://github.com/ava-labs/avalanchego/tree/master/vms/platformvm/teleporter +[subnet tutorial]: https://docs.avax.network/build/tutorials/platform/subnets/create-a-subnet +[subnet-cli]: https://github.com/ava-labs/subnet-cli +[Coreth]: https://github.com/ava-labs/coreth +[C-Chain]: https://docs.avax.network/learn/platform-overview/#contract-chain-c-chain +[Subnet]: https://docs.avax.network/learn/platform-overview/#subnets + +## Cross Subnet Transaction Example + +The following example shows how to interact with the XSVM to send and receive native assets across subnets. + +### Overview of Steps + +1. Create & deploy Subnet A +2. Create & deploy Subnet B +3. Issue an **export** Tx on Subnet A +4. Issue an **import** Tx on Subnet B +5. Confirm Txs processed correctly + +> **Note:** This demo requires [avalanche-cli](https://github.com/ava-labs/avalanche-cli) version > 1.0.5, [xsvm](https://github.com/ava-labs/xsvm) version > 1.0.2 and [avalanche-network-runner](https://github.com/ava-labs/avalanche-network-runner) v1.3.5. + +### Create and Deploy Subnet A, Subnet B + +Using the avalanche-cli, this step deploys two subnets running the XSVM. Subnet A will act as the sender in this demo, and Subnet B will act as the receiver. + +Steps + +Build the [XSVM](https://github.com/ava-labs/xsvm) + +### Create a genesis file + +```bash +xsvm chain genesis --encoding binary > xsvm.genesis +``` + +### Create Subnet A and Subnet B + +```bash +avalanche subnet create subnetA --custom --genesis --vm +avalanche subnet create subnetB --custom --genesis --vm +``` + +### Deploy Subnet A and Subnet B + +```bash +avalanche subnet deploy subnetA --local +avalanche subnet deploy subnetB --local +``` + +### Issue Export Tx from Subnet A + +The SubnetID and ChainIDs are stored in the sidecar.json files in your avalanche-cli directory. Typically this is located at $HOME/.avalanche/subnets/ + +```bash +xsvm issue export --source-chain-id --amount --destination-chain-id +``` + +Save the TxID printed out by running the export command. + +### Issue Import Tx from Subnet B + +> Note: The import tx requires **snowman++** consensus to be activated on the importing chain. A chain requires ~3 blocks to be produced for snowman++ to start. +> Run `xsvm issue transfer --chain-id --amount 1000` to issue simple Txs on SubnetB + +```bash +xsvm issue import --source-chain-id --destination-chain-id --tx-id --source-uris +``` + +> The can be found by running `avalanche network status`. The default URIs are +"http://localhost:9650,http://localhost:9652,http://localhost:9654,http://localhost:9656,http://localhost:9658" + +**Account Values** +To check proper execution, use the `xsvm account` command to check balances. + +Verify the balance on SubnetA decreased by your export amount using + +```bash +xsvm account --chain-id +``` + +Now verify chain A's assets were successfully imported to SubnetB + +```bash +xsvm account --chain-id --asset-id +``` diff --git a/avalanchego/vms/example/xsvm/api/client.go b/avalanchego/vms/example/xsvm/api/client.go new file mode 100644 index 00000000..d9a6a711 --- /dev/null +++ b/avalanchego/vms/example/xsvm/api/client.go @@ -0,0 +1,243 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/rpc" + "github.com/ava-labs/avalanchego/vms/example/xsvm/block" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +// Client defines the xsvm API client. +type Client interface { + Network( + ctx context.Context, + options ...rpc.Option, + ) (uint32, ids.ID, ids.ID, error) + Genesis( + ctx context.Context, + options ...rpc.Option, + ) (*genesis.Genesis, error) + Nonce( + ctx context.Context, + address ids.ShortID, + options ...rpc.Option, + ) (uint64, error) + Balance( + ctx context.Context, + address ids.ShortID, + assetID ids.ID, + options ...rpc.Option, + ) (uint64, error) + Loan( + ctx context.Context, + chainID ids.ID, + options ...rpc.Option, + ) (uint64, error) + IssueTx( + ctx context.Context, + tx *tx.Tx, + options ...rpc.Option, + ) (ids.ID, error) + LastAccepted( + ctx context.Context, + options ...rpc.Option, + ) (ids.ID, *block.Stateless, error) + Block( + ctx context.Context, + blkID ids.ID, + options ...rpc.Option, + ) (*block.Stateless, error) + Message( + ctx context.Context, + txID ids.ID, + options ...rpc.Option, + ) (*warp.UnsignedMessage, []byte, error) +} + +func NewClient(uri, chain string) Client { + path := fmt.Sprintf( + "%s/ext/%s/%s", + uri, + constants.ChainAliasPrefix, + chain, + ) + return &client{ + req: rpc.NewEndpointRequester(path), + } +} + +type client struct { + req rpc.EndpointRequester +} + +func (c *client) Network( + ctx context.Context, + options ...rpc.Option, +) (uint32, ids.ID, ids.ID, error) { + resp := new(NetworkReply) + err := c.req.SendRequest( + ctx, + "xsvm.network", + nil, + resp, + options..., + ) + return resp.NetworkID, resp.SubnetID, resp.ChainID, err +} + +func (c *client) Genesis( + ctx context.Context, + options ...rpc.Option, +) (*genesis.Genesis, error) { + resp := new(GenesisReply) + err := c.req.SendRequest( + ctx, + "xsvm.genesis", + nil, + resp, + options..., + ) + return resp.Genesis, err +} + +func (c *client) Nonce( + ctx context.Context, + address ids.ShortID, + options ...rpc.Option, +) (uint64, error) { + resp := new(NonceReply) + err := c.req.SendRequest( + ctx, + "xsvm.nonce", + &NonceArgs{ + Address: address, + }, + resp, + options..., + ) + return resp.Nonce, err +} + +func (c *client) Balance( + ctx context.Context, + address ids.ShortID, + assetID ids.ID, + options ...rpc.Option, +) (uint64, error) { + resp := new(BalanceReply) + err := c.req.SendRequest( + ctx, + "xsvm.balance", + &BalanceArgs{ + Address: address, + AssetID: assetID, + }, + resp, + options..., + ) + return resp.Balance, err +} + +func (c *client) Loan( + ctx context.Context, + chainID ids.ID, + options ...rpc.Option, +) (uint64, error) { + resp := new(LoanReply) + err := c.req.SendRequest( + ctx, + "xsvm.loan", + &LoanArgs{ + ChainID: chainID, + }, + resp, + options..., + ) + return resp.Amount, err +} + +func (c *client) IssueTx( + ctx context.Context, + newTx *tx.Tx, + options ...rpc.Option, +) (ids.ID, error) { + txBytes, err := tx.Codec.Marshal(tx.CodecVersion, newTx) + if err != nil { + return ids.Empty, err + } + + resp := new(IssueTxReply) + err = c.req.SendRequest( + ctx, + "xsvm.issueTx", + &IssueTxArgs{ + Tx: txBytes, + }, + resp, + options..., + ) + return resp.TxID, err +} + +func (c *client) LastAccepted( + ctx context.Context, + options ...rpc.Option, +) (ids.ID, *block.Stateless, error) { + resp := new(LastAcceptedReply) + err := c.req.SendRequest( + ctx, + "xsvm.lastAccepted", + nil, + resp, + options..., + ) + return resp.BlockID, resp.Block, err +} + +func (c *client) Block( + ctx context.Context, + blkID ids.ID, + options ...rpc.Option, +) (*block.Stateless, error) { + resp := new(BlockReply) + err := c.req.SendRequest( + ctx, + "xsvm.lastAccepted", + &BlockArgs{ + BlockID: blkID, + }, + resp, + options..., + ) + return resp.Block, err +} + +func (c *client) Message( + ctx context.Context, + txID ids.ID, + options ...rpc.Option, +) (*warp.UnsignedMessage, []byte, error) { + resp := new(MessageReply) + err := c.req.SendRequest( + ctx, + "xsvm.message", + &MessageArgs{ + TxID: txID, + }, + resp, + options..., + ) + if err != nil { + return nil, nil, err + } + return resp.Message, resp.Signature, resp.Message.Initialize() +} diff --git a/avalanchego/vms/example/xsvm/api/server.go b/avalanchego/vms/example/xsvm/api/server.go new file mode 100644 index 00000000..dd2545e8 --- /dev/null +++ b/avalanchego/vms/example/xsvm/api/server.go @@ -0,0 +1,204 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "net/http" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/example/xsvm/block" + "github.com/ava-labs/avalanchego/vms/example/xsvm/builder" + "github.com/ava-labs/avalanchego/vms/example/xsvm/chain" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +// Server defines the xsvm API server. +type Server interface { + Network(r *http.Request, args *struct{}, reply *NetworkReply) error + Genesis(r *http.Request, args *struct{}, reply *GenesisReply) error + Nonce(r *http.Request, args *NonceArgs, reply *NonceReply) error + Balance(r *http.Request, args *BalanceArgs, reply *BalanceReply) error + Loan(r *http.Request, args *LoanArgs, reply *LoanReply) error + IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error + LastAccepted(r *http.Request, args *struct{}, reply *LastAcceptedReply) error + Block(r *http.Request, args *BlockArgs, reply *BlockReply) error + Message(r *http.Request, args *MessageArgs, reply *MessageReply) error +} + +func NewServer( + ctx *snow.Context, + genesis *genesis.Genesis, + state database.KeyValueReader, + chain chain.Chain, + builder builder.Builder, +) Server { + return &server{ + ctx: ctx, + genesis: genesis, + state: state, + chain: chain, + builder: builder, + } +} + +type server struct { + ctx *snow.Context + genesis *genesis.Genesis + state database.KeyValueReader + chain chain.Chain + builder builder.Builder +} + +type NetworkReply struct { + NetworkID uint32 `json:"networkID"` + SubnetID ids.ID `json:"subnetID"` + ChainID ids.ID `json:"chainID"` +} + +func (s *server) Network(_ *http.Request, _ *struct{}, reply *NetworkReply) error { + reply.NetworkID = s.ctx.NetworkID + reply.SubnetID = s.ctx.SubnetID + reply.ChainID = s.ctx.ChainID + return nil +} + +type GenesisReply struct { + Genesis *genesis.Genesis `json:"genesis"` +} + +func (s *server) Genesis(_ *http.Request, _ *struct{}, reply *GenesisReply) error { + reply.Genesis = s.genesis + return nil +} + +type NonceArgs struct { + Address ids.ShortID `json:"address"` +} + +type NonceReply struct { + Nonce uint64 `json:"nonce"` +} + +func (s *server) Nonce(_ *http.Request, args *NonceArgs, reply *NonceReply) error { + nonce, err := state.GetNonce(s.state, args.Address) + reply.Nonce = nonce + return err +} + +type BalanceArgs struct { + Address ids.ShortID `json:"address"` + AssetID ids.ID `json:"assetID"` +} + +type BalanceReply struct { + Balance uint64 `json:"balance"` +} + +func (s *server) Balance(_ *http.Request, args *BalanceArgs, reply *BalanceReply) error { + balance, err := state.GetBalance(s.state, args.Address, args.AssetID) + reply.Balance = balance + return err +} + +type LoanArgs struct { + ChainID ids.ID `json:"chainID"` +} + +type LoanReply struct { + Amount uint64 `json:"amount"` +} + +func (s *server) Loan(_ *http.Request, args *LoanArgs, reply *LoanReply) error { + amount, err := state.GetLoan(s.state, args.ChainID) + reply.Amount = amount + return err +} + +type IssueTxArgs struct { + Tx []byte `json:"tx"` +} + +type IssueTxReply struct { + TxID ids.ID `json:"txID"` +} + +func (s *server) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { + newTx, err := tx.Parse(args.Tx) + if err != nil { + return err + } + + ctx := r.Context() + s.ctx.Lock.Lock() + err = s.builder.AddTx(ctx, newTx) + s.ctx.Lock.Unlock() + if err != nil { + return err + } + + txID, err := newTx.ID() + reply.TxID = txID + return err +} + +type LastAcceptedReply struct { + BlockID ids.ID `json:"blockID"` + Block *block.Stateless `json:"block"` +} + +func (s *server) LastAccepted(_ *http.Request, _ *struct{}, reply *LastAcceptedReply) error { + s.ctx.Lock.RLock() + reply.BlockID = s.chain.LastAccepted() + s.ctx.Lock.RUnlock() + blkBytes, err := state.GetBlock(s.state, reply.BlockID) + if err != nil { + return err + } + + reply.Block, err = block.Parse(blkBytes) + return err +} + +type BlockArgs struct { + BlockID ids.ID `json:"blockID"` +} + +type BlockReply struct { + Block *block.Stateless `json:"block"` +} + +func (s *server) Block(_ *http.Request, args *BlockArgs, reply *BlockReply) error { + blkBytes, err := state.GetBlock(s.state, args.BlockID) + if err != nil { + return err + } + + reply.Block, err = block.Parse(blkBytes) + return err +} + +type MessageArgs struct { + TxID ids.ID `json:"txID"` +} + +type MessageReply struct { + Message *warp.UnsignedMessage `json:"message"` + Signature []byte `json:"signature"` +} + +func (s *server) Message(_ *http.Request, args *MessageArgs, reply *MessageReply) error { + message, err := state.GetMessage(s.state, args.TxID) + if err != nil { + return err + } + + reply.Message = message + reply.Signature, err = s.ctx.WarpSigner.Sign(message) + return err +} diff --git a/avalanchego/vms/example/xsvm/block/block.go b/avalanchego/vms/example/xsvm/block/block.go new file mode 100644 index 00000000..ab6b41d7 --- /dev/null +++ b/avalanchego/vms/example/xsvm/block/block.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" +) + +// Stateless blocks are blocks as they are marshalled/unmarshalled and sent over +// the p2p network. The stateful blocks which can be executed are built from +// Stateless blocks. +type Stateless struct { + ParentID ids.ID `serialize:"true" json:"parentID"` + Timestamp int64 `serialize:"true" json:"timestamp"` + Height uint64 `serialize:"true" json:"height"` + Txs []*tx.Tx `serialize:"true" json:"txs"` +} + +func (b *Stateless) Time() time.Time { + return time.Unix(b.Timestamp, 0) +} + +func (b *Stateless) ID() (ids.ID, error) { + bytes, err := Codec.Marshal(CodecVersion, b) + return hashing.ComputeHash256Array(bytes), err +} + +func Parse(bytes []byte) (*Stateless, error) { + blk := &Stateless{} + _, err := Codec.Unmarshal(bytes, blk) + return blk, err +} diff --git a/avalanchego/vms/example/xsvm/block/codec.go b/avalanchego/vms/example/xsvm/block/codec.go new file mode 100644 index 00000000..b4e5c811 --- /dev/null +++ b/avalanchego/vms/example/xsvm/block/codec.go @@ -0,0 +1,10 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + +const CodecVersion = tx.CodecVersion + +var Codec = tx.Codec diff --git a/avalanchego/vms/example/xsvm/builder/builder.go b/avalanchego/vms/example/xsvm/builder/builder.go new file mode 100644 index 00000000..231679f5 --- /dev/null +++ b/avalanchego/vms/example/xsvm/builder/builder.go @@ -0,0 +1,139 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/vms/example/xsvm/chain" + "github.com/ava-labs/avalanchego/vms/example/xsvm/execute" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + + smblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + xsblock "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +const MaxTxsPerBlock = 10 + +var _ Builder = (*builder)(nil) + +type Builder interface { + SetPreference(preferred ids.ID) + AddTx(ctx context.Context, tx *tx.Tx) error + BuildBlock(ctx context.Context, blockContext *smblock.Context) (chain.Block, error) +} + +type builder struct { + chainContext *snow.Context + engineChan chan<- common.Message + chain chain.Chain + + pendingTxs linkedhashmap.LinkedHashmap[ids.ID, *tx.Tx] + preference ids.ID +} + +func New(chainContext *snow.Context, engineChan chan<- common.Message, chain chain.Chain) Builder { + return &builder{ + chainContext: chainContext, + engineChan: engineChan, + chain: chain, + + pendingTxs: linkedhashmap.New[ids.ID, *tx.Tx](), + preference: chain.LastAccepted(), + } +} + +func (b *builder) SetPreference(preferred ids.ID) { + b.preference = preferred +} + +func (b *builder) AddTx(_ context.Context, newTx *tx.Tx) error { + // TODO: verify [tx] against the currently preferred state + txID, err := newTx.ID() + if err != nil { + return err + } + b.pendingTxs.Put(txID, newTx) + select { + case b.engineChan <- common.PendingTxs: + default: + } + return nil +} + +func (b *builder) BuildBlock(ctx context.Context, blockContext *smblock.Context) (chain.Block, error) { + preferredBlk, err := b.chain.GetBlock(b.preference) + if err != nil { + return nil, err + } + + preferredState, err := preferredBlk.State() + if err != nil { + return nil, err + } + + defer func() { + if b.pendingTxs.Len() == 0 { + return + } + select { + case b.engineChan <- common.PendingTxs: + default: + } + }() + + parentTimestamp := preferredBlk.Timestamp() + timestamp := time.Now().Truncate(time.Second) + if timestamp.Before(parentTimestamp) { + timestamp = parentTimestamp + } + + wipBlock := xsblock.Stateless{ + ParentID: b.preference, + Timestamp: timestamp.Unix(), + Height: preferredBlk.Height() + 1, + } + + currentState := versiondb.New(preferredState) + for len(wipBlock.Txs) < MaxTxsPerBlock { + txID, currentTx, exists := b.pendingTxs.Oldest() + if !exists { + break + } + b.pendingTxs.Delete(txID) + + sender, err := currentTx.SenderID() + if err != nil { + // This tx was invalid, drop it and continue block building + continue + } + + txState := versiondb.New(currentState) + txExecutor := execute.Tx{ + Context: ctx, + ChainContext: b.chainContext, + Database: txState, + BlockContext: blockContext, + TxID: txID, + Sender: sender, + // TODO: populate fees + } + if err := currentTx.Unsigned.Visit(&txExecutor); err != nil { + // This tx was invalid, drop it and continue block building + continue + } + if err := txState.Commit(); err != nil { + return nil, err + } + + wipBlock.Txs = append(wipBlock.Txs, currentTx) + } + return b.chain.NewBlock(&wipBlock) +} diff --git a/avalanchego/vms/example/xsvm/chain/block.go b/avalanchego/vms/example/xsvm/chain/block.go new file mode 100644 index 00000000..8ab761d5 --- /dev/null +++ b/avalanchego/vms/example/xsvm/chain/block.go @@ -0,0 +1,219 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "context" + "errors" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/example/xsvm/execute" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + + smblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + xsblock "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +const maxClockSkew = 10 * time.Second + +var ( + _ Block = (*block)(nil) + + errMissingParent = errors.New("missing parent block") + errMissingChild = errors.New("missing child block") + errParentNotVerified = errors.New("parent block has not been verified") + errMissingState = errors.New("missing state") + errFutureTimestamp = errors.New("future timestamp") + errTimestampBeforeParent = errors.New("timestamp before parent") + errWrongHeight = errors.New("wrong height") +) + +type Block interface { + snowman.Block + smblock.WithVerifyContext + + // State intends to return the new chain state following this block's + // acceptance. The new chain state is built (but not persisted) following a + // block's verification to allow block's descendants verification before + // being accepted. + State() (database.Database, error) +} + +type block struct { + *xsblock.Stateless + + chain *chain + + id ids.ID + status choices.Status + bytes []byte + + state *versiondb.Database + verifiedChildrenIDs set.Set[ids.ID] +} + +func (b *block) ID() ids.ID { + return b.id +} + +func (b *block) Status() choices.Status { + if !b.status.Decided() { + b.status = b.calculateStatus() + } + return b.status +} + +func (b *block) Parent() ids.ID { + return b.ParentID +} + +func (b *block) Bytes() []byte { + return b.bytes +} + +func (b *block) Height() uint64 { + return b.Stateless.Height +} + +func (b *block) Timestamp() time.Time { + return b.Time() +} + +func (b *block) Verify(ctx context.Context) error { + return b.VerifyWithContext(ctx, nil) +} + +func (b *block) Accept(context.Context) error { + if err := b.state.Commit(); err != nil { + return err + } + + // Following this block's acceptance, make sure that it's direct children + // point to the base state, which now also contains this block's changes. + for childID := range b.verifiedChildrenIDs { + child, exists := b.chain.verifiedBlocks[childID] + if !exists { + return errMissingChild + } + if err := child.state.SetDatabase(b.chain.acceptedState); err != nil { + return err + } + } + + b.status = choices.Accepted + b.chain.lastAccepted = b.id + delete(b.chain.verifiedBlocks, b.ParentID) + return nil +} + +func (b *block) Reject(context.Context) error { + b.status = choices.Rejected + delete(b.chain.verifiedBlocks, b.id) + + // TODO: push transactions back into the mempool + return nil +} + +func (b *block) ShouldVerifyWithContext(context.Context) (bool, error) { + return execute.ExpectsContext(b.Stateless) +} + +func (b *block) VerifyWithContext(ctx context.Context, blockContext *smblock.Context) error { + timestamp := b.Time() + if time.Until(timestamp) > maxClockSkew { + return errFutureTimestamp + } + + // parent block must be verified or accepted + parent, exists := b.chain.verifiedBlocks[b.ParentID] + if !exists { + return errMissingParent + } + + if b.Stateless.Height != parent.Stateless.Height+1 { + return errWrongHeight + } + + parentTimestamp := parent.Time() + if timestamp.Before(parentTimestamp) { + return errTimestampBeforeParent + } + + parentState, err := parent.State() + if err != nil { + return err + } + + // This block's state is a versionDB built on top of it's parent state. This + // block's changes are pushed atomically to the parent state when accepted. + blkState := versiondb.New(parentState) + err = execute.Block( + ctx, + b.chain.chainContext, + blkState, + b.chain.chainState == snow.Bootstrapping, + blockContext, + b.Stateless, + ) + if err != nil { + return err + } + + // Make sure to only state the state the first time we verify this block. + if b.state == nil { + b.state = blkState + parent.verifiedChildrenIDs.Add(b.id) + b.chain.verifiedBlocks[b.id] = b + } + + return nil +} + +func (b *block) State() (database.Database, error) { + if b.id == b.chain.lastAccepted { + return b.chain.acceptedState, nil + } + + // States of accepted blocks other than the lastAccepted are undefined. + if b.Status() == choices.Accepted { + return nil, errMissingState + } + + // We should not be calling State on an unverified block. + if b.state == nil { + return nil, errParentNotVerified + } + + return b.state, nil +} + +func (b *block) calculateStatus() choices.Status { + if b.chain.lastAccepted == b.id { + return choices.Accepted + } + if _, ok := b.chain.verifiedBlocks[b.id]; ok { + return choices.Processing + } + + _, err := state.GetBlock(b.chain.acceptedState, b.id) + switch { + case err == nil: + return choices.Accepted + + case errors.Is(err, database.ErrNotFound): + // This block hasn't been verified yet. + return choices.Processing + + default: + // TODO: correctly report this error to the consensus engine. + return choices.Processing + } +} diff --git a/avalanchego/vms/example/xsvm/chain/chain.go b/avalanchego/vms/example/xsvm/chain/chain.go new file mode 100644 index 00000000..7fc60261 --- /dev/null +++ b/avalanchego/vms/example/xsvm/chain/chain.go @@ -0,0 +1,117 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + + xsblock "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +var _ Chain = (*chain)(nil) + +type Chain interface { + LastAccepted() ids.ID + SetChainState(state snow.State) + GetBlock(blkID ids.ID) (Block, error) + + // Creates a fully verifiable and executable block, which can be processed + // by the consensus engine, from a stateless block. + NewBlock(blk *xsblock.Stateless) (Block, error) +} + +type chain struct { + chainContext *snow.Context + acceptedState database.Database + + // chain state as driven by the consensus engine + chainState snow.State + + lastAccepted ids.ID + verifiedBlocks map[ids.ID]*block +} + +func New(ctx *snow.Context, db database.Database) (Chain, error) { + // Load the last accepted block data. For a newly created VM, this will be + // the genesis. It is assumed the genesis was processed and stored + // previously during VM initialization. + lastAcceptedID, err := state.GetLastAccepted(db) + if err != nil { + return nil, err + } + + c := &chain{ + chainContext: ctx, + acceptedState: db, + lastAccepted: lastAcceptedID, + } + + lastAccepted, err := c.getBlock(lastAcceptedID) + c.verifiedBlocks = map[ids.ID]*block{ + lastAcceptedID: lastAccepted, + } + return c, err +} + +func (c *chain) LastAccepted() ids.ID { + return c.lastAccepted +} + +func (c *chain) SetChainState(state snow.State) { + c.chainState = state +} + +func (c *chain) GetBlock(blkID ids.ID) (Block, error) { + return c.getBlock(blkID) +} + +func (c *chain) NewBlock(blk *xsblock.Stateless) (Block, error) { + blkID, err := blk.ID() + if err != nil { + return nil, err + } + + if blk, exists := c.verifiedBlocks[blkID]; exists { + return blk, nil + } + + blkBytes, err := xsblock.Codec.Marshal(xsblock.CodecVersion, blk) + if err != nil { + return nil, err + } + + return &block{ + Stateless: blk, + chain: c, + id: blkID, + bytes: blkBytes, + }, nil +} + +func (c *chain) getBlock(blkID ids.ID) (*block, error) { + if blk, exists := c.verifiedBlocks[blkID]; exists { + return blk, nil + } + + blkBytes, err := state.GetBlock(c.acceptedState, blkID) + if err != nil { + return nil, err + } + + stateless, err := xsblock.Parse(blkBytes) + if err != nil { + return nil, err + } + return &block{ + Stateless: stateless, + chain: c, + id: blkID, + status: choices.Accepted, + bytes: blkBytes, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/account/cmd.go b/avalanchego/vms/example/xsvm/cmd/account/cmd.go new file mode 100644 index 00000000..cea0b7b6 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/account/cmd.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package account + +import ( + "log" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "account", + Short: "Displays the state of the requested account", + RunE: accountFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func accountFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + ctx := c.Context() + + client := api.NewClient(config.URI, config.ChainID) + + nonce, err := client.Nonce(ctx, config.Address) + if err != nil { + return err + } + + balance, err := client.Balance(ctx, config.Address, config.AssetID) + if err != nil { + return err + } + log.Printf("%s has %d of %s with nonce %d\n", config.Address, balance, config.AssetID, nonce) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/account/flags.go b/avalanchego/vms/example/xsvm/cmd/account/flags.go new file mode 100644 index 00000000..3a9588ab --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/account/flags.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package account + +import ( + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIKey = "uri" + ChainIDKey = "chain-id" + AddressKey = "address" + AssetIDKey = "asset-id" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(URIKey, primary.LocalAPIURI, "API URI to use to fetch the account state") + flags.String(ChainIDKey, "", "Chain to fetch the account state on") + flags.String(AddressKey, genesis.EWOQKey.Address().String(), "Address of the account to fetch") + flags.String(AssetIDKey, "[chain-id]", "Asset balance to fetch") +} + +type Config struct { + URI string + ChainID string + Address ids.ShortID + AssetID ids.ID +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + if err := flags.Parse(args); err != nil { + return nil, err + } + + uri, err := flags.GetString(URIKey) + if err != nil { + return nil, err + } + + chainID, err := flags.GetString(ChainIDKey) + if err != nil { + return nil, err + } + + addrStr, err := flags.GetString(AddressKey) + if err != nil { + return nil, err + } + + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return nil, err + } + + assetIDStr := chainID + if flags.Changed(AssetIDKey) { + assetIDStr, err = flags.GetString(AssetIDKey) + if err != nil { + return nil, err + } + } + + assetID, err := ids.FromString(assetIDStr) + if err != nil { + return nil, err + } + + return &Config{ + URI: uri, + ChainID: chainID, + Address: addr, + AssetID: assetID, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/chain/cmd.go b/avalanchego/vms/example/xsvm/cmd/chain/cmd.go new file mode 100644 index 00000000..679bdea0 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/chain/cmd.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chain + +import ( + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/chain/create" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/chain/genesis" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "chain", + Short: "Manages XS chains", + } + c.AddCommand( + create.Command(), + genesis.Command(), + ) + return c +} diff --git a/avalanchego/vms/example/xsvm/cmd/chain/create/cmd.go b/avalanchego/vms/example/xsvm/cmd/chain/create/cmd.go new file mode 100644 index 00000000..984ff45d --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/chain/create/cmd.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package create + +import ( + "log" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/example/xsvm" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "create", + Short: "Creates a new chain", + RunE: createFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func createFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + ctx := c.Context() + kc := secp256k1fx.NewKeychain(config.PrivateKey) + + // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: config.URI, + AVAXKeychain: kc, + EthKeychain: kc, + PChainTxsToFetch: set.Of(config.SubnetID), + }) + if err != nil { + return err + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, &genesis.Genesis{ + Timestamp: 0, + Allocations: []genesis.Allocation{ + { + Address: config.Address, + Balance: config.Balance, + }, + }, + }) + if err != nil { + return err + } + + createChainStartTime := time.Now() + createChainTxID, err := pWallet.IssueCreateChainTx( + config.SubnetID, + genesisBytes, + xsvm.ID, + nil, + config.Name, + common.WithContext(ctx), + ) + if err != nil { + return err + } + log.Printf("created chain %s in %s\n", createChainTxID, time.Since(createChainStartTime)) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/chain/create/flags.go b/avalanchego/vms/example/xsvm/cmd/chain/create/flags.go new file mode 100644 index 00000000..d3e55465 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/chain/create/flags.go @@ -0,0 +1,107 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package create + +import ( + "math" + + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIKey = "uri" + SubnetIDKey = "subnet-id" + AddressKey = "address" + BalanceKey = "balance" + NameKey = "name" + PrivateKeyKey = "private-key" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(URIKey, primary.LocalAPIURI, "API URI to use to issue the chain creation transaction") + flags.String(SubnetIDKey, "", "Subnet to create the chain under") + flags.String(AddressKey, genesis.EWOQKey.Address().String(), "Address to fund in the genesis") + flags.Uint64(BalanceKey, math.MaxUint64, "Amount to provide the funded address in the genesis") + flags.String(NameKey, "xs", "Name of the chain to create") + flags.String(PrivateKeyKey, genesis.EWOQKeyFormattedStr, "Private key to use when creating the new chain") +} + +type Config struct { + URI string + SubnetID ids.ID + Address ids.ShortID + Balance uint64 + Name string + PrivateKey *secp256k1.PrivateKey +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + if err := flags.Parse(args); err != nil { + return nil, err + } + + uri, err := flags.GetString(URIKey) + if err != nil { + return nil, err + } + + subnetIDStr, err := flags.GetString(SubnetIDKey) + if err != nil { + return nil, err + } + + subnetID, err := ids.FromString(subnetIDStr) + if err != nil { + return nil, err + } + + addrStr, err := flags.GetString(AddressKey) + if err != nil { + return nil, err + } + + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return nil, err + } + + balance, err := flags.GetUint64(BalanceKey) + if err != nil { + return nil, err + } + + name, err := flags.GetString(NameKey) + if err != nil { + return nil, err + } + + skStr, err := flags.GetString(PrivateKeyKey) + if err != nil { + return nil, err + } + + var sk secp256k1.PrivateKey + err = sk.UnmarshalText([]byte(`"` + skStr + `"`)) + if err != nil { + return nil, err + } + + return &Config{ + URI: uri, + SubnetID: subnetID, + Address: addr, + Balance: balance, + Name: name, + PrivateKey: &sk, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/chain/genesis/cmd.go b/avalanchego/vms/example/xsvm/cmd/chain/genesis/cmd.go new file mode 100644 index 00000000..be839fce --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/chain/genesis/cmd.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" +) + +var errUnknownEncoding = errors.New("unknown encoding") + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "genesis", + Short: "Creates a chain's genesis and prints it to stdout", + RunE: genesisFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func genesisFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, config.Genesis) + if err != nil { + return err + } + + switch config.Encoding { + case binaryEncoding: + _, err = os.Stdout.Write(genesisBytes) + return err + case hexEncoding: + encoded, err := formatting.Encode(formatting.Hex, genesisBytes) + if err != nil { + return err + } + _, err = fmt.Println(encoded) + return err + default: + return fmt.Errorf("%w: %q", errUnknownEncoding, config.Encoding) + } +} diff --git a/avalanchego/vms/example/xsvm/cmd/chain/genesis/flags.go b/avalanchego/vms/example/xsvm/cmd/chain/genesis/flags.go new file mode 100644 index 00000000..0bacf0ed --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/chain/genesis/flags.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "fmt" + "math" + "time" + + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + + xsgenesis "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" +) + +const ( + TimeKey = "time" + AddressKey = "address" + BalanceKey = "balance" + EncodingKey = "encoding" + + binaryEncoding = "binary" + hexEncoding = "hex" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.Int64(TimeKey, time.Now().Unix(), "Unix timestamp to include in the genesis") + flags.String(AddressKey, genesis.EWOQKey.Address().String(), "Address to fund in the genesis") + flags.Uint64(BalanceKey, math.MaxUint64, "Amount to provide the funded address in the genesis") + flags.String(EncodingKey, hexEncoding, fmt.Sprintf("Encoding to use for the genesis. Available values: %s or %s", hexEncoding, binaryEncoding)) +} + +type Config struct { + Genesis *xsgenesis.Genesis + Encoding string +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + timestamp, err := flags.GetInt64(TimeKey) + if err != nil { + return nil, err + } + + addrStr, err := flags.GetString(AddressKey) + if err != nil { + return nil, err + } + + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return nil, err + } + + balance, err := flags.GetUint64(BalanceKey) + if err != nil { + return nil, err + } + + encoding, err := flags.GetString(EncodingKey) + if err != nil { + return nil, err + } + + return &Config{ + Genesis: &xsgenesis.Genesis{ + Timestamp: timestamp, + Allocations: []xsgenesis.Allocation{ + { + Address: addr, + Balance: balance, + }, + }, + }, + Encoding: encoding, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/cmd.go b/avalanchego/vms/example/xsvm/cmd/issue/cmd.go new file mode 100644 index 00000000..12c156d0 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/cmd.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package issue + +import ( + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/export" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/importtx" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "issue", + Short: "Issues transactions", + } + c.AddCommand( + transfer.Command(), + export.Command(), + importtx.Command(), + ) + return c +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/export/cmd.go b/avalanchego/vms/example/xsvm/cmd/issue/export/cmd.go new file mode 100644 index 00000000..efde4799 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/export/cmd.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package export + +import ( + "encoding/json" + "log" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "export", + Short: "Issues an export transaction", + RunE: exportFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func exportFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + ctx := c.Context() + + client := api.NewClient(config.URI, config.SourceChainID.String()) + + nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + if err != nil { + return err + } + + utx := &tx.Export{ + ChainID: config.SourceChainID, + Nonce: nonce, + MaxFee: config.MaxFee, + PeerChainID: config.DestinationChainID, + IsReturn: config.IsReturn, + Amount: config.Amount, + To: config.To, + } + stx, err := tx.Sign(utx, config.PrivateKey) + if err != nil { + return err + } + + txJSON, err := json.MarshalIndent(stx, "", " ") + if err != nil { + return err + } + + issueTxStartTime := time.Now() + txID, err := client.IssueTx(ctx, stx) + if err != nil { + return err + } + log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/export/flags.go b/avalanchego/vms/example/xsvm/cmd/issue/export/flags.go new file mode 100644 index 00000000..6d7f4e49 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/export/flags.go @@ -0,0 +1,125 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package export + +import ( + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIKey = "uri" + SourceChainIDKey = "source-chain-id" + DestinationChainIDKey = "destination-chain-id" + MaxFeeKey = "max-fee" + IsReturnKey = "is-return" + AmountKey = "amount" + ToKey = "to" + PrivateKeyKey = "private-key" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(URIKey, primary.LocalAPIURI, "API URI to use during issuance") + flags.String(SourceChainIDKey, "", "Chain to issue the transaction on") + flags.String(DestinationChainIDKey, "", "Chain to send the asset to") + flags.Uint64(MaxFeeKey, 0, "Maximum fee to spend") + flags.Bool(IsReturnKey, false, "Mark this transaction as returning funds") + flags.Uint64(AmountKey, units.Schmeckle, "Amount to send") + flags.String(ToKey, genesis.EWOQKey.Address().String(), "Destination address") + flags.String(PrivateKeyKey, genesis.EWOQKeyFormattedStr, "Private key to sign the transaction") +} + +type Config struct { + URI string + SourceChainID ids.ID + DestinationChainID ids.ID + MaxFee uint64 + IsReturn bool + Amount uint64 + To ids.ShortID + PrivateKey *secp256k1.PrivateKey +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + uri, err := flags.GetString(URIKey) + if err != nil { + return nil, err + } + + sourceChainIDStr, err := flags.GetString(SourceChainIDKey) + if err != nil { + return nil, err + } + + sourceChainID, err := ids.FromString(sourceChainIDStr) + if err != nil { + return nil, err + } + + destinationChainIDStr, err := flags.GetString(DestinationChainIDKey) + if err != nil { + return nil, err + } + + destinationChainID, err := ids.FromString(destinationChainIDStr) + if err != nil { + return nil, err + } + + maxFee, err := flags.GetUint64(MaxFeeKey) + if err != nil { + return nil, err + } + + isReturn, err := flags.GetBool(IsReturnKey) + if err != nil { + return nil, err + } + + amount, err := flags.GetUint64(AmountKey) + if err != nil { + return nil, err + } + + toStr, err := flags.GetString(ToKey) + if err != nil { + return nil, err + } + + to, err := ids.ShortFromString(toStr) + if err != nil { + return nil, err + } + + skStr, err := flags.GetString(PrivateKeyKey) + if err != nil { + return nil, err + } + + var sk secp256k1.PrivateKey + err = sk.UnmarshalText([]byte(`"` + skStr + `"`)) + if err != nil { + return nil, err + } + + return &Config{ + URI: uri, + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + MaxFee: maxFee, + IsReturn: isReturn, + Amount: amount, + To: to, + PrivateKey: &sk, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/importtx/cmd.go b/avalanchego/vms/example/xsvm/cmd/issue/importtx/cmd.go new file mode 100644 index 00000000..5bf10421 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/importtx/cmd.go @@ -0,0 +1,135 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package importtx + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "import", + Short: "Issues an import transaction", + RunE: importFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func importFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + ctx := c.Context() + + var ( + // Note: here we assume the unsigned message is correct from the last + // URI in sourceURIs. In practice this shouldn't be done. + unsignedMessage *warp.UnsignedMessage + // Note: assumes that sourceURIs are all of the validators of the subnet + // and that they do not share public keys. + signatures = make([]*bls.Signature, len(config.SourceURIs)) + ) + for i, uri := range config.SourceURIs { + xsClient := api.NewClient(uri, config.SourceChainID) + + fetchStartTime := time.Now() + var rawSignature []byte + unsignedMessage, rawSignature, err = xsClient.Message(ctx, config.TxID) + if err != nil { + return fmt.Errorf("failed to fetch BLS signature from %s with: %w", uri, err) + } + + sig, err := bls.SignatureFromBytes(rawSignature) + if err != nil { + return fmt.Errorf("failed to parse BLS signature from %s with: %w", uri, err) + } + + // Note: the public key should not be fetched from the node in practice. + // The public key should be fetched from the P-chain directly. + infoClient := info.NewClient(uri) + _, nodePOP, err := infoClient.GetNodeID(ctx) + if err != nil { + return fmt.Errorf("failed to fetch BLS public key from %s with: %w", uri, err) + } + + pk := nodePOP.Key() + if !bls.Verify(pk, sig, unsignedMessage.Bytes()) { + return fmt.Errorf("failed to verify BLS signature against public key from %s", uri) + } + + log.Printf("fetched BLS signature from %s in %s\n", uri, time.Since(fetchStartTime)) + signatures[i] = sig + } + + signers := set.NewBits() + for i := range signatures { + signers.Add(i) + } + signature := &warp.BitSetSignature{ + Signers: signers.Bytes(), + } + + aggSignature, err := bls.AggregateSignatures(signatures) + if err != nil { + return err + } + + aggSignatureBytes := bls.SignatureToBytes(aggSignature) + copy(signature.Signature[:], aggSignatureBytes) + + message, err := warp.NewMessage( + unsignedMessage, + signature, + ) + if err != nil { + return err + } + + client := api.NewClient(config.URI, config.DestinationChainID) + + nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + if err != nil { + return err + } + + utx := &tx.Import{ + Nonce: nonce, + MaxFee: config.MaxFee, + Message: message.Bytes(), + } + stx, err := tx.Sign(utx, config.PrivateKey) + if err != nil { + return err + } + + txJSON, err := json.MarshalIndent(stx, "", " ") + if err != nil { + return err + } + + issueTxStartTime := time.Now() + txID, err := client.IssueTx(ctx, stx) + if err != nil { + return err + } + log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/importtx/flags.go b/avalanchego/vms/example/xsvm/cmd/issue/importtx/flags.go new file mode 100644 index 00000000..15b96877 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/importtx/flags.go @@ -0,0 +1,105 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package importtx + +import ( + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIKey = "uri" + SourceURIsKey = "source-uris" + SourceChainIDKey = "source-chain-id" + DestinationChainIDKey = "destination-chain-id" + TxIDKey = "tx-id" + MaxFeeKey = "max-fee" + PrivateKeyKey = "private-key" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(URIKey, primary.LocalAPIURI, "API URI to use during issuance") + flags.StringSlice(SourceURIsKey, []string{primary.LocalAPIURI}, "API URIs to use during the fetching of signatures") + flags.String(SourceChainIDKey, "", "Chain the export transaction was issued on") + flags.String(DestinationChainIDKey, "", "Chain to send the asset to") + flags.String(TxIDKey, "", "ID of the export transaction") + flags.Uint64(MaxFeeKey, 0, "Maximum fee to spend") + flags.String(PrivateKeyKey, genesis.EWOQKeyFormattedStr, "Private key to sign the transaction") +} + +type Config struct { + URI string + SourceURIs []string + SourceChainID string + DestinationChainID string + TxID ids.ID + MaxFee uint64 + PrivateKey *secp256k1.PrivateKey +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + uri, err := flags.GetString(URIKey) + if err != nil { + return nil, err + } + + sourceURIs, err := flags.GetStringSlice(SourceURIsKey) + if err != nil { + return nil, err + } + + sourceChainID, err := flags.GetString(SourceChainIDKey) + if err != nil { + return nil, err + } + + destinationChainID, err := flags.GetString(DestinationChainIDKey) + if err != nil { + return nil, err + } + + txIDStr, err := flags.GetString(TxIDKey) + if err != nil { + return nil, err + } + + txID, err := ids.FromString(txIDStr) + if err != nil { + return nil, err + } + + maxFee, err := flags.GetUint64(MaxFeeKey) + if err != nil { + return nil, err + } + + skStr, err := flags.GetString(PrivateKeyKey) + if err != nil { + return nil, err + } + + var sk secp256k1.PrivateKey + err = sk.UnmarshalText([]byte(`"` + skStr + `"`)) + if err != nil { + return nil, err + } + + return &Config{ + URI: uri, + SourceURIs: sourceURIs, + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + TxID: txID, + MaxFee: maxFee, + PrivateKey: &sk, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/transfer/cmd.go b/avalanchego/vms/example/xsvm/cmd/issue/transfer/cmd.go new file mode 100644 index 00000000..86c47032 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/transfer/cmd.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package transfer + +import ( + "encoding/json" + "log" + "time" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" +) + +func Command() *cobra.Command { + c := &cobra.Command{ + Use: "transfer", + Short: "Issues a transfer transaction", + RunE: transferFunc, + } + flags := c.Flags() + AddFlags(flags) + return c +} + +func transferFunc(c *cobra.Command, args []string) error { + flags := c.Flags() + config, err := ParseFlags(flags, args) + if err != nil { + return err + } + + ctx := c.Context() + + client := api.NewClient(config.URI, config.ChainID.String()) + + nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + if err != nil { + return err + } + + utx := &tx.Transfer{ + ChainID: config.ChainID, + Nonce: nonce, + MaxFee: config.MaxFee, + AssetID: config.AssetID, + Amount: config.Amount, + To: config.To, + } + stx, err := tx.Sign(utx, config.PrivateKey) + if err != nil { + return err + } + + txJSON, err := json.MarshalIndent(stx, "", " ") + if err != nil { + return err + } + + issueTxStartTime := time.Now() + txID, err := client.IssueTx(ctx, stx) + if err != nil { + return err + } + log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/issue/transfer/flags.go b/avalanchego/vms/example/xsvm/cmd/issue/transfer/flags.go new file mode 100644 index 00000000..043c0724 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/issue/transfer/flags.go @@ -0,0 +1,119 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package transfer + +import ( + "github.com/spf13/pflag" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIKey = "uri" + ChainIDKey = "chain-id" + MaxFeeKey = "max-fee" + AssetIDKey = "asset-id" + AmountKey = "amount" + ToKey = "to" + PrivateKeyKey = "private-key" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(URIKey, primary.LocalAPIURI, "API URI to use during issuance") + flags.String(ChainIDKey, "", "Chain to issue the transaction on") + flags.Uint64(MaxFeeKey, 0, "Maximum fee to spend") + flags.String(AssetIDKey, "[chain-id]", "Asset to send") + flags.Uint64(AmountKey, units.Schmeckle, "Amount to send") + flags.String(ToKey, genesis.EWOQKey.Address().String(), "Destination address") + flags.String(PrivateKeyKey, genesis.EWOQKeyFormattedStr, "Private key to sign the transaction") +} + +type Config struct { + URI string + ChainID ids.ID + MaxFee uint64 + AssetID ids.ID + Amount uint64 + To ids.ShortID + PrivateKey *secp256k1.PrivateKey +} + +func ParseFlags(flags *pflag.FlagSet, args []string) (*Config, error) { + if err := flags.Parse(args); err != nil { + return nil, err + } + + uri, err := flags.GetString(URIKey) + if err != nil { + return nil, err + } + + chainIDStr, err := flags.GetString(ChainIDKey) + if err != nil { + return nil, err + } + + chainID, err := ids.FromString(chainIDStr) + if err != nil { + return nil, err + } + + maxFee, err := flags.GetUint64(MaxFeeKey) + if err != nil { + return nil, err + } + + assetID := chainID + if flags.Changed(AssetIDKey) { + assetIDStr, err := flags.GetString(AssetIDKey) + if err != nil { + return nil, err + } + + assetID, err = ids.FromString(assetIDStr) + if err != nil { + return nil, err + } + } + + amount, err := flags.GetUint64(AmountKey) + if err != nil { + return nil, err + } + + toStr, err := flags.GetString(ToKey) + if err != nil { + return nil, err + } + + to, err := ids.ShortFromString(toStr) + if err != nil { + return nil, err + } + + skStr, err := flags.GetString(PrivateKeyKey) + if err != nil { + return nil, err + } + + var sk secp256k1.PrivateKey + err = sk.UnmarshalText([]byte(`"` + skStr + `"`)) + if err != nil { + return nil, err + } + + return &Config{ + URI: uri, + ChainID: chainID, + MaxFee: maxFee, + AssetID: assetID, + Amount: amount, + To: to, + PrivateKey: &sk, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/run/cmd.go b/avalanchego/vms/example/xsvm/cmd/run/cmd.go new file mode 100644 index 00000000..eace7e85 --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/run/cmd.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package run + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm" + "github.com/ava-labs/avalanchego/vms/rpcchainvm" +) + +func Command() *cobra.Command { + return &cobra.Command{ + Use: "xsvm", + Short: "Runs an XSVM plugin", + RunE: runFunc, + } +} + +func runFunc(*cobra.Command, []string) error { + return rpcchainvm.Serve(context.Background(), &xsvm.VM{}) +} diff --git a/avalanchego/vms/example/xsvm/cmd/version/cmd.go b/avalanchego/vms/example/xsvm/cmd/version/cmd.go new file mode 100644 index 00000000..1c956c6a --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/version/cmd.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package version + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/example/xsvm" +) + +const format = `%s: + VMID: %s + Version: %s + Plugin Version: %d +` + +func Command() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "Prints out the version", + RunE: versionFunc, + } +} + +func versionFunc(*cobra.Command, []string) error { + fmt.Printf( + format, + xsvm.Name, + xsvm.ID, + xsvm.Version, + version.RPCChainVMProtocol, + ) + return nil +} diff --git a/avalanchego/vms/example/xsvm/cmd/xsvm/main.go b/avalanchego/vms/example/xsvm/cmd/xsvm/main.go new file mode 100644 index 00000000..c6961a8c --- /dev/null +++ b/avalanchego/vms/example/xsvm/cmd/xsvm/main.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/account" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/chain" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/run" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/version" +) + +func init() { + cobra.EnablePrefixMatching = true +} + +func main() { + cmd := run.Command() + cmd.AddCommand( + account.Command(), + chain.Command(), + issue.Command(), + version.Command(), + ) + ctx := context.Background() + if err := cmd.ExecuteContext(ctx); err != nil { + fmt.Fprintf(os.Stderr, "command failed %v\n", err) + os.Exit(1) + } +} diff --git a/avalanchego/vms/example/xsvm/constants.go b/avalanchego/vms/example/xsvm/constants.go new file mode 100644 index 00000000..eb219921 --- /dev/null +++ b/avalanchego/vms/example/xsvm/constants.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xsvm + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" +) + +const Name = "xsvm" + +var ( + ID = ids.ID{'x', 's', 'v', 'm'} + + Version = &version.Semantic{ + Major: 1, + Minor: 0, + Patch: 4, + } +) diff --git a/avalanchego/vms/example/xsvm/execute/block.go b/avalanchego/vms/example/xsvm/execute/block.go new file mode 100644 index 00000000..b2938a58 --- /dev/null +++ b/avalanchego/vms/example/xsvm/execute/block.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package execute + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + + smblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + xsblock "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +var errNoTxs = errors.New("no transactions") + +func Block( + ctx context.Context, + chainContext *snow.Context, + db database.KeyValueReaderWriterDeleter, + skipVerify bool, + blockContext *smblock.Context, + blk *xsblock.Stateless, +) error { + if len(blk.Txs) == 0 { + return errNoTxs + } + + for _, currentTx := range blk.Txs { + txID, err := currentTx.ID() + if err != nil { + return err + } + sender, err := currentTx.SenderID() + if err != nil { + return err + } + txExecutor := Tx{ + Context: ctx, + ChainContext: chainContext, + Database: db, + SkipVerify: skipVerify, + BlockContext: blockContext, + TxID: txID, + Sender: sender, + // TODO: populate fees + } + if err := currentTx.Unsigned.Visit(&txExecutor); err != nil { + return err + } + } + + blkID, err := blk.ID() + if err != nil { + return err + } + + if err := state.SetLastAccepted(db, blkID); err != nil { + return err + } + + blkBytes, err := xsblock.Codec.Marshal(xsblock.CodecVersion, blk) + if err != nil { + return err + } + + return state.AddBlock(db, blk.Height, blkID, blkBytes) +} diff --git a/avalanchego/vms/example/xsvm/execute/expects_context.go b/avalanchego/vms/example/xsvm/execute/expects_context.go new file mode 100644 index 00000000..da21b520 --- /dev/null +++ b/avalanchego/vms/example/xsvm/execute/expects_context.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package execute + +import ( + "github.com/ava-labs/avalanchego/vms/example/xsvm/block" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" +) + +var _ tx.Visitor = (*TxExpectsContext)(nil) + +func ExpectsContext(blk *block.Stateless) (bool, error) { + t := TxExpectsContext{} + for _, tx := range blk.Txs { + if err := tx.Unsigned.Visit(&t); err != nil { + return false, err + } + } + return t.Result, nil +} + +type TxExpectsContext struct { + Result bool +} + +func (*TxExpectsContext) Transfer(*tx.Transfer) error { + return nil +} + +func (*TxExpectsContext) Export(*tx.Export) error { + return nil +} + +func (t *TxExpectsContext) Import(*tx.Import) error { + t.Result = true + return nil +} diff --git a/avalanchego/vms/example/xsvm/execute/genesis.go b/avalanchego/vms/example/xsvm/execute/genesis.go new file mode 100644 index 00000000..889432d3 --- /dev/null +++ b/avalanchego/vms/example/xsvm/execute/genesis.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package execute + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/example/xsvm/block" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" +) + +func Genesis(db database.KeyValueReaderWriterDeleter, chainID ids.ID, g *genesis.Genesis) error { + isInitialized, err := state.IsInitialized(db) + if err != nil { + return err + } + if isInitialized { + return nil + } + + blk, err := genesis.Block(g) + if err != nil { + return err + } + + for _, allocation := range g.Allocations { + if err := state.SetBalance(db, allocation.Address, chainID, allocation.Balance); err != nil { + return err + } + } + + blkID, err := blk.ID() + if err != nil { + return err + } + + blkBytes, err := block.Codec.Marshal(block.CodecVersion, blk) + if err != nil { + return err + } + + if err := state.AddBlock(db, blk.Height, blkID, blkBytes); err != nil { + return err + } + if err := state.SetLastAccepted(db, blkID); err != nil { + return err + } + return state.SetInitialized(db) +} diff --git a/avalanchego/vms/example/xsvm/execute/tx.go b/avalanchego/vms/example/xsvm/execute/tx.go new file mode 100644 index 00000000..f3f6ad50 --- /dev/null +++ b/avalanchego/vms/example/xsvm/execute/tx.go @@ -0,0 +1,177 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package execute + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +const ( + QuorumNumerator = 2 + QuorumDenominator = 3 +) + +var ( + _ tx.Visitor = (*Tx)(nil) + + errFeeTooHigh = errors.New("fee too high") + errWrongChainID = errors.New("wrong chainID") + errMissingBlockContext = errors.New("missing block context") + errDuplicateImport = errors.New("duplicate import") +) + +type Tx struct { + Context context.Context + ChainContext *snow.Context + Database database.KeyValueReaderWriterDeleter + + SkipVerify bool + BlockContext *block.Context + + TxID ids.ID + Sender ids.ShortID + TransferFee uint64 + ExportFee uint64 + ImportFee uint64 +} + +func (t *Tx) Transfer(tf *tx.Transfer) error { + if tf.MaxFee < t.TransferFee { + return errFeeTooHigh + } + if tf.ChainID != t.ChainContext.ChainID { + return errWrongChainID + } + + return utils.Err( + state.IncrementNonce(t.Database, t.Sender, tf.Nonce), + state.DecreaseBalance(t.Database, t.Sender, tf.ChainID, t.TransferFee), + state.DecreaseBalance(t.Database, t.Sender, tf.AssetID, tf.Amount), + state.IncreaseBalance(t.Database, tf.To, tf.AssetID, tf.Amount), + ) +} + +func (t *Tx) Export(e *tx.Export) error { + if e.MaxFee < t.ExportFee { + return errFeeTooHigh + } + if e.ChainID != t.ChainContext.ChainID { + return errWrongChainID + } + + payload, err := tx.NewPayload( + t.Sender, + e.Nonce, + e.IsReturn, + e.Amount, + e.To, + ) + if err != nil { + return err + } + + message, err := warp.NewUnsignedMessage( + t.ChainContext.NetworkID, + e.ChainID, + payload.Bytes(), + ) + if err != nil { + return err + } + + var errs wrappers.Errs + errs.Add( + state.IncrementNonce(t.Database, t.Sender, e.Nonce), + state.DecreaseBalance(t.Database, t.Sender, e.ChainID, t.ExportFee), + ) + + if e.IsReturn { + errs.Add( + state.DecreaseBalance(t.Database, t.Sender, e.PeerChainID, e.Amount), + ) + } else { + errs.Add( + state.DecreaseBalance(t.Database, t.Sender, e.ChainID, e.Amount), + state.IncreaseLoan(t.Database, e.PeerChainID, e.Amount), + ) + } + + errs.Add( + state.SetMessage(t.Database, t.TxID, message), + ) + return errs.Err +} + +func (t *Tx) Import(i *tx.Import) error { + if i.MaxFee < t.ImportFee { + return errFeeTooHigh + } + if t.BlockContext == nil { + return errMissingBlockContext + } + + message, err := warp.ParseMessage(i.Message) + if err != nil { + return err + } + + var errs wrappers.Errs + errs.Add( + state.IncrementNonce(t.Database, t.Sender, i.Nonce), + state.DecreaseBalance(t.Database, t.Sender, t.ChainContext.ChainID, t.ImportFee), + ) + + payload, err := tx.ParsePayload(message.Payload) + if err != nil { + return err + } + + if payload.IsReturn { + errs.Add( + state.IncreaseBalance(t.Database, payload.To, t.ChainContext.ChainID, payload.Amount), + state.DecreaseLoan(t.Database, message.SourceChainID, payload.Amount), + ) + } else { + errs.Add( + state.IncreaseBalance(t.Database, payload.To, message.SourceChainID, payload.Amount), + ) + } + + var loanID ids.ID = hashing.ComputeHash256Array(message.UnsignedMessage.Bytes()) + hasLoanID, err := state.HasLoanID(t.Database, message.SourceChainID, loanID) + if hasLoanID { + return errDuplicateImport + } + + errs.Add( + err, + state.AddLoanID(t.Database, message.SourceChainID, loanID), + ) + + if t.SkipVerify || errs.Errored() { + return errs.Err + } + + return message.Signature.Verify( + t.Context, + &message.UnsignedMessage, + t.ChainContext.NetworkID, + t.ChainContext.ValidatorState, + t.BlockContext.PChainHeight, + QuorumNumerator, + QuorumDenominator, + ) +} diff --git a/avalanchego/vms/example/xsvm/factory.go b/avalanchego/vms/example/xsvm/factory.go new file mode 100644 index 00000000..99d33b82 --- /dev/null +++ b/avalanchego/vms/example/xsvm/factory.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xsvm + +import ( + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms" +) + +var _ vms.Factory = (*Factory)(nil) + +type Factory struct{} + +func (*Factory) New(logging.Logger) (interface{}, error) { + return &VM{}, nil +} diff --git a/avalanchego/vms/example/xsvm/genesis/codec.go b/avalanchego/vms/example/xsvm/genesis/codec.go new file mode 100644 index 00000000..c0851ccc --- /dev/null +++ b/avalanchego/vms/example/xsvm/genesis/codec.go @@ -0,0 +1,10 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import "github.com/ava-labs/avalanchego/vms/example/xsvm/block" + +const CodecVersion = block.CodecVersion + +var Codec = block.Codec diff --git a/avalanchego/vms/example/xsvm/genesis/genesis.go b/avalanchego/vms/example/xsvm/genesis/genesis.go new file mode 100644 index 00000000..0fb420f3 --- /dev/null +++ b/avalanchego/vms/example/xsvm/genesis/genesis.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +type Genesis struct { + Timestamp int64 `serialize:"true" json:"timestamp"` + Allocations []Allocation `serialize:"true" json:"allocations"` +} + +type Allocation struct { + Address ids.ShortID `serialize:"true" json:"address"` + Balance uint64 `serialize:"true" json:"balance"` +} + +func Parse(bytes []byte) (*Genesis, error) { + genesis := &Genesis{} + _, err := Codec.Unmarshal(bytes, genesis) + return genesis, err +} + +func Block(genesis *Genesis) (*block.Stateless, error) { + bytes, err := Codec.Marshal(CodecVersion, genesis) + if err != nil { + return nil, err + } + return &block.Stateless{ + ParentID: hashing.ComputeHash256Array(bytes), + Timestamp: genesis.Timestamp, + }, nil +} diff --git a/avalanchego/vms/example/xsvm/genesis/genesis_test.go b/avalanchego/vms/example/xsvm/genesis/genesis_test.go new file mode 100644 index 00000000..ba050d12 --- /dev/null +++ b/avalanchego/vms/example/xsvm/genesis/genesis_test.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestGenesis(t *testing.T) { + require := require.New(t) + + id, err := ids.ShortFromString("6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV") + require.NoError(err) + id2, err := ids.ShortFromString("LeKrndtsMxcLMzHz3w4uo1XtLDpfi66c") + require.NoError(err) + + genesis := Genesis{ + Timestamp: 123, + Allocations: []Allocation{ + {Address: id, Balance: 1000000000}, + {Address: id2, Balance: 3000000000}, + }, + } + bytes, err := Codec.Marshal(CodecVersion, genesis) + require.NoError(err) + + parsed, err := Parse(bytes) + require.NoError(err) + require.Equal(genesis, *parsed) +} diff --git a/avalanchego/vms/example/xsvm/state/keys.go b/avalanchego/vms/example/xsvm/state/keys.go new file mode 100644 index 00000000..e0df1e36 --- /dev/null +++ b/avalanchego/vms/example/xsvm/state/keys.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +var ( + initializedKey = []byte{} + blockPrefix = []byte{0x00} + addressPrefix = []byte{0x01} + chainPrefix = []byte{0x02} + messagePrefix = []byte{0x03} +) + +func Flatten[T any](slices ...[]T) []T { + var size int + for _, slice := range slices { + size += len(slice) + } + + result := make([]T, 0, size) + for _, slice := range slices { + result = append(result, slice...) + } + return result +} diff --git a/avalanchego/vms/example/xsvm/state/storage.go b/avalanchego/vms/example/xsvm/state/storage.go new file mode 100644 index 00000000..48234e96 --- /dev/null +++ b/avalanchego/vms/example/xsvm/state/storage.go @@ -0,0 +1,210 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "errors" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var ( + errWrongNonce = errors.New("wrong nonce") + errInsufficientBalance = errors.New("insufficient balance") +) + +/* + * VMDB + * |-- initializedKey -> nil + * |-. blocks + * | |-- lastAcceptedKey -> blockID + * | |-- height -> blockID + * | '-- blockID -> block bytes + * |-. addresses + * | '-- addressID -> nonce + * | '-- addressID + chainID -> balance + * |-. chains + * | |-- chainID -> balance + * | '-- chainID + loanID -> nil + * '-. message + * '-- txID -> message bytes + */ + +// Chain state + +func IsInitialized(db database.KeyValueReader) (bool, error) { + return db.Has(initializedKey) +} + +func SetInitialized(db database.KeyValueWriter) error { + return db.Put(initializedKey, nil) +} + +// Block state + +func GetLastAccepted(db database.KeyValueReader) (ids.ID, error) { + return database.GetID(db, blockPrefix) +} + +func SetLastAccepted(db database.KeyValueWriter, blkID ids.ID) error { + return database.PutID(db, blockPrefix, blkID) +} + +func GetBlockIDByHeight(db database.KeyValueReader, height uint64) (ids.ID, error) { + key := Flatten(blockPrefix, database.PackUInt64(height)) + return database.GetID(db, key) +} + +func GetBlock(db database.KeyValueReader, blkID ids.ID) ([]byte, error) { + key := Flatten(blockPrefix, blkID[:]) + return db.Get(key) +} + +func AddBlock(db database.KeyValueWriter, height uint64, blkID ids.ID, blk []byte) error { + heightToIDKey := Flatten(blockPrefix, database.PackUInt64(height)) + if err := database.PutID(db, heightToIDKey, blkID); err != nil { + return err + } + idToBlockKey := Flatten(blockPrefix, blkID[:]) + return db.Put(idToBlockKey, blk) +} + +// Address state + +func GetNonce(db database.KeyValueReader, address ids.ShortID) (uint64, error) { + key := Flatten(addressPrefix, address[:]) + nonce, err := database.GetUInt64(db, key) + if errors.Is(err, database.ErrNotFound) { + return 0, nil + } + return nonce, err +} + +func SetNonce(db database.KeyValueWriter, address ids.ShortID, nonce uint64) error { + key := Flatten(addressPrefix, address[:]) + return database.PutUInt64(db, key, nonce) +} + +func IncrementNonce(db database.KeyValueReaderWriter, address ids.ShortID, nonce uint64) error { + expectedNonce, err := GetNonce(db, address) + if err != nil { + return err + } + if nonce != expectedNonce { + return errWrongNonce + } + return SetNonce(db, address, nonce+1) +} + +func GetBalance(db database.KeyValueReader, address ids.ShortID, chainID ids.ID) (uint64, error) { + key := Flatten(addressPrefix, address[:], chainID[:]) + balance, err := database.GetUInt64(db, key) + if errors.Is(err, database.ErrNotFound) { + return 0, nil + } + return balance, err +} + +func SetBalance(db database.KeyValueWriterDeleter, address ids.ShortID, chainID ids.ID, balance uint64) error { + key := Flatten(addressPrefix, address[:], chainID[:]) + if balance == 0 { + return db.Delete(key) + } + return database.PutUInt64(db, key, balance) +} + +func DecreaseBalance(db database.KeyValueReaderWriterDeleter, address ids.ShortID, chainID ids.ID, amount uint64) error { + balance, err := GetBalance(db, address, chainID) + if err != nil { + return err + } + if balance < amount { + return errInsufficientBalance + } + return SetBalance(db, address, chainID, balance-amount) +} + +func IncreaseBalance(db database.KeyValueReaderWriterDeleter, address ids.ShortID, chainID ids.ID, amount uint64) error { + balance, err := GetBalance(db, address, chainID) + if err != nil { + return err + } + balance, err = math.Add64(balance, amount) + if err != nil { + return err + } + return SetBalance(db, address, chainID, balance) +} + +// Chain state + +func HasLoanID(db database.KeyValueReader, chainID ids.ID, loanID ids.ID) (bool, error) { + key := Flatten(chainPrefix, chainID[:], loanID[:]) + return db.Has(key) +} + +func AddLoanID(db database.KeyValueWriter, chainID ids.ID, loanID ids.ID) error { + key := Flatten(chainPrefix, chainID[:], loanID[:]) + return db.Put(key, nil) +} + +func GetLoan(db database.KeyValueReader, chainID ids.ID) (uint64, error) { + key := Flatten(chainPrefix, chainID[:]) + balance, err := database.GetUInt64(db, key) + if errors.Is(err, database.ErrNotFound) { + return 0, nil + } + return balance, err +} + +func SetLoan(db database.KeyValueWriterDeleter, chainID ids.ID, balance uint64) error { + key := Flatten(chainPrefix, chainID[:]) + if balance == 0 { + return db.Delete(key) + } + return database.PutUInt64(db, key, balance) +} + +func DecreaseLoan(db database.KeyValueReaderWriterDeleter, chainID ids.ID, amount uint64) error { + balance, err := GetLoan(db, chainID) + if err != nil { + return err + } + if balance < amount { + return errInsufficientBalance + } + return SetLoan(db, chainID, balance-amount) +} + +func IncreaseLoan(db database.KeyValueReaderWriterDeleter, chainID ids.ID, amount uint64) error { + balance, err := GetLoan(db, chainID) + if err != nil { + return err + } + balance, err = math.Add64(balance, amount) + if err != nil { + return err + } + return SetLoan(db, chainID, balance) +} + +// Message state + +func GetMessage(db database.KeyValueReader, txID ids.ID) (*warp.UnsignedMessage, error) { + key := Flatten(messagePrefix, txID[:]) + bytes, err := db.Get(key) + if err != nil { + return nil, err + } + return warp.ParseUnsignedMessage(bytes) +} + +func SetMessage(db database.KeyValueWriter, txID ids.ID, message *warp.UnsignedMessage) error { + key := Flatten(messagePrefix, txID[:]) + bytes := message.Bytes() + return db.Put(key, bytes) +} diff --git a/avalanchego/vms/example/xsvm/tx/codec.go b/avalanchego/vms/example/xsvm/tx/codec.go new file mode 100644 index 00000000..f61c7bf1 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/codec.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" +) + +const CodecVersion = 0 + +var Codec codec.Manager + +func init() { + c := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) + + err := utils.Err( + c.RegisterType(&Transfer{}), + c.RegisterType(&Export{}), + c.RegisterType(&Import{}), + Codec.RegisterCodec(CodecVersion, c), + ) + if err != nil { + panic(err) + } +} diff --git a/avalanchego/vms/example/xsvm/tx/export.go b/avalanchego/vms/example/xsvm/tx/export.go new file mode 100644 index 00000000..d8de16a6 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/export.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +import "github.com/ava-labs/avalanchego/ids" + +var _ Unsigned = (*Export)(nil) + +type Export struct { + // ChainID provides cross chain replay protection + ChainID ids.ID `serialize:"true" json:"chainID"` + // Nonce provides internal chain replay protection + Nonce uint64 `serialize:"true" json:"nonce"` + MaxFee uint64 `serialize:"true" json:"maxFee"` + PeerChainID ids.ID `serialize:"true" json:"peerChainID"` + IsReturn bool `serialize:"true" json:"isReturn"` + Amount uint64 `serialize:"true" json:"amount"` + To ids.ShortID `serialize:"true" json:"to"` +} + +func (e *Export) Visit(v Visitor) error { + return v.Export(e) +} diff --git a/avalanchego/vms/example/xsvm/tx/import.go b/avalanchego/vms/example/xsvm/tx/import.go new file mode 100644 index 00000000..ff98b0a0 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/import.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +var _ Unsigned = (*Import)(nil) + +type Import struct { + // Nonce provides internal chain replay protection + Nonce uint64 `serialize:"true" json:"nonce"` + MaxFee uint64 `serialize:"true" json:"maxFee"` + // Message includes the chainIDs to provide cross chain replay protection + Message []byte `serialize:"true" json:"message"` +} + +func (i *Import) Visit(v Visitor) error { + return v.Import(i) +} diff --git a/avalanchego/vms/example/xsvm/tx/payload.go b/avalanchego/vms/example/xsvm/tx/payload.go new file mode 100644 index 00000000..eecc2f08 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/payload.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +import "github.com/ava-labs/avalanchego/ids" + +type Payload struct { + // Sender + Nonce provides replay protection + Sender ids.ShortID `serialize:"true" json:"sender"` + Nonce uint64 `serialize:"true" json:"nonce"` + IsReturn bool `serialize:"true" json:"isReturn"` + Amount uint64 `serialize:"true" json:"amount"` + To ids.ShortID `serialize:"true" json:"to"` + + bytes []byte +} + +func (p *Payload) Bytes() []byte { + return p.bytes +} + +func NewPayload( + sender ids.ShortID, + nonce uint64, + isReturn bool, + amount uint64, + to ids.ShortID, +) (*Payload, error) { + p := &Payload{ + Sender: sender, + Nonce: nonce, + IsReturn: isReturn, + Amount: amount, + To: to, + } + bytes, err := Codec.Marshal(CodecVersion, p) + p.bytes = bytes + return p, err +} + +func ParsePayload(bytes []byte) (*Payload, error) { + p := &Payload{ + bytes: bytes, + } + _, err := Codec.Unmarshal(bytes, p) + return p, err +} diff --git a/avalanchego/vms/example/xsvm/tx/transfer.go b/avalanchego/vms/example/xsvm/tx/transfer.go new file mode 100644 index 00000000..a3d29c14 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/transfer.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +import "github.com/ava-labs/avalanchego/ids" + +var _ Unsigned = (*Transfer)(nil) + +type Transfer struct { + // ChainID provides cross chain replay protection + ChainID ids.ID `serialize:"true" json:"chainID"` + // Nonce provides internal chain replay protection + Nonce uint64 `serialize:"true" json:"nonce"` + MaxFee uint64 `serialize:"true" json:"maxFee"` + AssetID ids.ID `serialize:"true" json:"assetID"` + Amount uint64 `serialize:"true" json:"amount"` + To ids.ShortID `serialize:"true" json:"to"` +} + +func (t *Transfer) Visit(v Visitor) error { + return v.Transfer(t) +} diff --git a/avalanchego/vms/example/xsvm/tx/tx.go b/avalanchego/vms/example/xsvm/tx/tx.go new file mode 100644 index 00000000..8b05d537 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/tx.go @@ -0,0 +1,64 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +import ( + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +var secpCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + Size: 2048, + }, +} + +type Tx struct { + Unsigned `serialize:"true" json:"unsigned"` + Signature [secp256k1.SignatureLen]byte `serialize:"true" json:"signature"` +} + +func Parse(bytes []byte) (*Tx, error) { + tx := &Tx{} + _, err := Codec.Unmarshal(bytes, tx) + return tx, err +} + +func Sign(utx Unsigned, key *secp256k1.PrivateKey) (*Tx, error) { + unsignedBytes, err := Codec.Marshal(CodecVersion, &utx) + if err != nil { + return nil, err + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + return nil, err + } + + tx := &Tx{ + Unsigned: utx, + } + copy(tx.Signature[:], sig) + return tx, nil +} + +func (tx *Tx) ID() (ids.ID, error) { + bytes, err := Codec.Marshal(CodecVersion, tx) + return hashing.ComputeHash256Array(bytes), err +} + +func (tx *Tx) SenderID() (ids.ShortID, error) { + unsignedBytes, err := Codec.Marshal(CodecVersion, &tx.Unsigned) + if err != nil { + return ids.ShortEmpty, err + } + + pk, err := secpCache.RecoverPublicKey(unsignedBytes, tx.Signature[:]) + if err != nil { + return ids.ShortEmpty, err + } + return pk.Address(), nil +} diff --git a/avalanchego/vms/example/xsvm/tx/unsigned.go b/avalanchego/vms/example/xsvm/tx/unsigned.go new file mode 100644 index 00000000..11061141 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/unsigned.go @@ -0,0 +1,8 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +type Unsigned interface { + Visit(Visitor) error +} diff --git a/avalanchego/vms/example/xsvm/tx/visitor.go b/avalanchego/vms/example/xsvm/tx/visitor.go new file mode 100644 index 00000000..045b0324 --- /dev/null +++ b/avalanchego/vms/example/xsvm/tx/visitor.go @@ -0,0 +1,10 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tx + +type Visitor interface { + Transfer(*Transfer) error + Export(*Export) error + Import(*Import) error +} diff --git a/avalanchego/vms/example/xsvm/vm.go b/avalanchego/vms/example/xsvm/vm.go new file mode 100644 index 00000000..38f25393 --- /dev/null +++ b/avalanchego/vms/example/xsvm/vm.go @@ -0,0 +1,177 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xsvm + +import ( + "context" + "fmt" + "net/http" + + "github.com/gorilla/rpc/v2" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/builder" + "github.com/ava-labs/avalanchego/vms/example/xsvm/chain" + "github.com/ava-labs/avalanchego/vms/example/xsvm/execute" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" + "github.com/ava-labs/avalanchego/vms/example/xsvm/state" + + smblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + xsblock "github.com/ava-labs/avalanchego/vms/example/xsvm/block" +) + +var ( + _ smblock.ChainVM = (*VM)(nil) + _ smblock.BuildBlockWithContextChainVM = (*VM)(nil) +) + +type VM struct { + common.AppHandler + + chainContext *snow.Context + db database.Database + genesis *genesis.Genesis + engineChan chan<- common.Message + + chain chain.Chain + builder builder.Builder +} + +func (vm *VM) Initialize( + _ context.Context, + chainContext *snow.Context, + db database.Database, + genesisBytes []byte, + _ []byte, + _ []byte, + engineChan chan<- common.Message, + _ []*common.Fx, + _ common.AppSender, +) error { + vm.AppHandler = common.NewNoOpAppHandler(chainContext.Log) + + chainContext.Log.Info("initializing xsvm", + zap.Stringer("version", Version), + ) + + vm.chainContext = chainContext + vm.db = db + g, err := genesis.Parse(genesisBytes) + if err != nil { + return fmt.Errorf("failed to parse genesis bytes: %w", err) + } + + vdb := versiondb.New(vm.db) + if err := execute.Genesis(vdb, chainContext.ChainID, g); err != nil { + return fmt.Errorf("failed to initialize genesis state: %w", err) + } + if err := vdb.Commit(); err != nil { + return err + } + + vm.genesis = g + vm.engineChan = engineChan + + vm.chain, err = chain.New(chainContext, vm.db) + if err != nil { + return fmt.Errorf("failed to initialize chain manager: %w", err) + } + + vm.builder = builder.New(chainContext, engineChan, vm.chain) + + chainContext.Log.Info("initialized xsvm", + zap.Stringer("lastAcceptedID", vm.chain.LastAccepted()), + ) + return nil +} + +func (vm *VM) SetState(_ context.Context, state snow.State) error { + vm.chain.SetChainState(state) + return nil +} + +func (vm *VM) Shutdown(context.Context) error { + if vm.chainContext == nil { + return nil + } + return vm.db.Close() +} + +func (*VM) Version(context.Context) (string, error) { + return Version.String(), nil +} + +func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { + server := rpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") + api := api.NewServer( + vm.chainContext, + vm.genesis, + vm.db, + vm.chain, + vm.builder, + ) + return map[string]http.Handler{ + "": server, + }, server.RegisterService(api, Name) +} + +func (*VM) HealthCheck(context.Context) (interface{}, error) { + return http.StatusOK, nil +} + +func (*VM) Connected(context.Context, ids.NodeID, *version.Application) error { + return nil +} + +func (*VM) Disconnected(context.Context, ids.NodeID) error { + return nil +} + +func (vm *VM) GetBlock(_ context.Context, blkID ids.ID) (snowman.Block, error) { + return vm.chain.GetBlock(blkID) +} + +func (vm *VM) ParseBlock(_ context.Context, blkBytes []byte) (snowman.Block, error) { + blk, err := xsblock.Parse(blkBytes) + if err != nil { + return nil, err + } + return vm.chain.NewBlock(blk) +} + +func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { + return vm.builder.BuildBlock(ctx, nil) +} + +func (vm *VM) SetPreference(_ context.Context, preferred ids.ID) error { + vm.builder.SetPreference(preferred) + return nil +} + +func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { + return vm.chain.LastAccepted(), nil +} + +func (vm *VM) BuildBlockWithContext(ctx context.Context, blockContext *smblock.Context) (snowman.Block, error) { + return vm.builder.BuildBlock(ctx, blockContext) +} + +func (*VM) VerifyHeightIndex(context.Context) error { + return nil +} + +func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { + return state.GetBlockIDByHeight(vm.db, height) +} diff --git a/avalanchego/vms/fx/factory.go b/avalanchego/vms/fx/factory.go new file mode 100644 index 00000000..a2c957a5 --- /dev/null +++ b/avalanchego/vms/fx/factory.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fx + +// Factory returns an instance of a feature extension +type Factory interface { + New() any +} diff --git a/avalanchego/vms/manager.go b/avalanchego/vms/manager.go index d1041d3a..f4ae49e3 100644 --- a/avalanchego/vms/manager.go +++ b/avalanchego/vms/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vms diff --git a/avalanchego/vms/metervm/batched_vm.go b/avalanchego/vms/metervm/batched_vm.go index dad17637..7b06f098 100644 --- a/avalanchego/vms/metervm/batched_vm.go +++ b/avalanchego/vms/metervm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/block.go b/avalanchego/vms/metervm/block.go index 17ffffd5..10d44d2c 100644 --- a/avalanchego/vms/metervm/block.go +++ b/avalanchego/vms/metervm/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/block_metrics.go b/avalanchego/vms/metervm/block_metrics.go index 10233b72..160d0eee 100644 --- a/avalanchego/vms/metervm/block_metrics.go +++ b/avalanchego/vms/metervm/block_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm @@ -23,6 +23,9 @@ type blockMetrics struct { verifyErr, accept, reject, + // Height metrics + verifyHeightIndex, + getBlockIDAtHeight, // Block verification with context metrics shouldVerifyWithContext, verifyWithContext, @@ -33,9 +36,6 @@ type blockMetrics struct { // Batched metrics getAncestors, batchedParseBlock, - // Height metrics - verifyHeightIndex, - getBlockIDAtHeight, // State sync metrics stateSyncEnabled, getOngoingSyncStateSummary, @@ -49,7 +49,6 @@ type blockMetrics struct { func (m *blockMetrics) Initialize( supportsBlockBuildingWithContext bool, supportsBatchedFetching bool, - supportsHeightIndexing bool, supportsStateSync bool, namespace string, reg prometheus.Registerer, @@ -70,6 +69,8 @@ func (m *blockMetrics) Initialize( m.shouldVerifyWithContext = newAverager(namespace, "should_verify_with_context", reg, &errs) m.verifyWithContext = newAverager(namespace, "verify_with_context", reg, &errs) m.verifyWithContextErr = newAverager(namespace, "verify_with_context_err", reg, &errs) + m.verifyHeightIndex = newAverager(namespace, "verify_height_index", reg, &errs) + m.getBlockIDAtHeight = newAverager(namespace, "get_block_id_at_height", reg, &errs) if supportsBlockBuildingWithContext { m.buildBlockWithContext = newAverager(namespace, "build_block_with_context", reg, &errs) @@ -79,10 +80,6 @@ func (m *blockMetrics) Initialize( m.getAncestors = newAverager(namespace, "get_ancestors", reg, &errs) m.batchedParseBlock = newAverager(namespace, "batched_parse_block", reg, &errs) } - if supportsHeightIndexing { - m.verifyHeightIndex = newAverager(namespace, "verify_height_index", reg, &errs) - m.getBlockIDAtHeight = newAverager(namespace, "get_block_id_at_height", reg, &errs) - } if supportsStateSync { m.stateSyncEnabled = newAverager(namespace, "state_sync_enabled", reg, &errs) m.getOngoingSyncStateSummary = newAverager(namespace, "get_ongoing_state_sync_summary", reg, &errs) diff --git a/avalanchego/vms/metervm/block_vm.go b/avalanchego/vms/metervm/block_vm.go index 62f2b4f8..73e94918 100644 --- a/avalanchego/vms/metervm/block_vm.go +++ b/avalanchego/vms/metervm/block_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -22,7 +22,6 @@ var ( _ block.ChainVM = (*blockVM)(nil) _ block.BuildBlockWithContextChainVM = (*blockVM)(nil) _ block.BatchedChainVM = (*blockVM)(nil) - _ block.HeightIndexedChainVM = (*blockVM)(nil) _ block.StateSyncableVM = (*blockVM)(nil) ) @@ -30,7 +29,6 @@ type blockVM struct { block.ChainVM buildBlockVM block.BuildBlockWithContextChainVM batchedVM block.BatchedChainVM - hVM block.HeightIndexedChainVM ssVM block.StateSyncableVM blockMetrics @@ -40,13 +38,11 @@ type blockVM struct { func NewBlockVM(vm block.ChainVM) block.ChainVM { buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) - hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &blockVM{ ChainVM: vm, buildBlockVM: buildBlockVM, batchedVM: batchedVM, - hVM: hVM, ssVM: ssVM, } } @@ -54,7 +50,7 @@ func NewBlockVM(vm block.ChainVM) block.ChainVM { func (vm *blockVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, @@ -66,7 +62,6 @@ func (vm *blockVM) Initialize( err := vm.blockMetrics.Initialize( vm.buildBlockVM != nil, vm.batchedVM != nil, - vm.hVM != nil, vm.ssVM != nil, "", registerer, @@ -154,3 +149,19 @@ func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { vm.blockMetrics.lastAccepted.Observe(float64(end.Sub(start))) return lastAcceptedID, err } + +func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { + start := vm.clock.Time() + err := vm.ChainVM.VerifyHeightIndex(ctx) + end := vm.clock.Time() + vm.blockMetrics.verifyHeightIndex.Observe(float64(end.Sub(start))) + return err +} + +func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { + start := vm.clock.Time() + blockID, err := vm.ChainVM.GetBlockIDAtHeight(ctx, height) + end := vm.clock.Time() + vm.blockMetrics.getBlockIDAtHeight.Observe(float64(end.Sub(start))) + return blockID, err +} diff --git a/avalanchego/vms/metervm/build_block_with_context_vm.go b/avalanchego/vms/metervm/build_block_with_context_vm.go index 141d68e0..012237ee 100644 --- a/avalanchego/vms/metervm/build_block_with_context_vm.go +++ b/avalanchego/vms/metervm/build_block_with_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/height_indexed_vm.go b/avalanchego/vms/metervm/height_indexed_vm.go deleted file mode 100644 index f13c337c..00000000 --- a/avalanchego/vms/metervm/height_indexed_vm.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metervm - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" -) - -func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { - if vm.hVM == nil { - return block.ErrHeightIndexedVMNotImplemented - } - - start := vm.clock.Time() - err := vm.hVM.VerifyHeightIndex(ctx) - end := vm.clock.Time() - vm.blockMetrics.verifyHeightIndex.Observe(float64(end.Sub(start))) - return err -} - -func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { - if vm.hVM == nil { - return ids.Empty, block.ErrHeightIndexedVMNotImplemented - } - - start := vm.clock.Time() - blockID, err := vm.hVM.GetBlockIDAtHeight(ctx, height) - end := vm.clock.Time() - vm.blockMetrics.getBlockIDAtHeight.Observe(float64(end.Sub(start))) - return blockID, err -} diff --git a/avalanchego/vms/metervm/metrics.go b/avalanchego/vms/metervm/metrics.go index eb2c2b40..d4c9304e 100644 --- a/avalanchego/vms/metervm/metrics.go +++ b/avalanchego/vms/metervm/metrics.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/utils/metric" @@ -16,7 +14,7 @@ func newAverager(namespace, name string, reg prometheus.Registerer, errs *wrappe return metric.NewAveragerWithErrs( namespace, name, - fmt.Sprintf("time (in ns) of a %s", name), + "time (in ns) of a "+name, reg, errs, ) diff --git a/avalanchego/vms/metervm/state_syncable_vm.go b/avalanchego/vms/metervm/state_syncable_vm.go index bcb27d68..42b5efa8 100644 --- a/avalanchego/vms/metervm/state_syncable_vm.go +++ b/avalanchego/vms/metervm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/vertex_metrics.go b/avalanchego/vms/metervm/vertex_metrics.go index e377dee2..67caa50b 100644 --- a/avalanchego/vms/metervm/vertex_metrics.go +++ b/avalanchego/vms/metervm/vertex_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm @@ -11,11 +11,8 @@ import ( ) type vertexMetrics struct { - pending, parse, parseErr, - get, - getErr, verify, verifyErr, accept, @@ -27,11 +24,8 @@ func (m *vertexMetrics) Initialize( reg prometheus.Registerer, ) error { errs := wrappers.Errs{} - m.pending = newAverager(namespace, "pending_txs", reg, &errs) m.parse = newAverager(namespace, "parse_tx", reg, &errs) m.parseErr = newAverager(namespace, "parse_tx_err", reg, &errs) - m.get = newAverager(namespace, "get_tx", reg, &errs) - m.getErr = newAverager(namespace, "get_tx_err", reg, &errs) m.verify = newAverager(namespace, "verify_tx", reg, &errs) m.verifyErr = newAverager(namespace, "verify_tx_err", reg, &errs) m.accept = newAverager(namespace, "accept", reg, &errs) diff --git a/avalanchego/vms/metervm/vertex_vm.go b/avalanchego/vms/metervm/vertex_vm.go index 08268beb..8992b486 100644 --- a/avalanchego/vms/metervm/vertex_vm.go +++ b/avalanchego/vms/metervm/vertex_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm @@ -9,8 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" @@ -38,7 +37,7 @@ type vertexVM struct { func (vm *vertexVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, @@ -77,14 +76,6 @@ func (vm *vertexVM) Initialize( ) } -func (vm *vertexVM) PendingTxs(ctx context.Context) []snowstorm.Tx { - start := vm.clock.Time() - txs := vm.LinearizableVMWithEngine.PendingTxs(ctx) - end := vm.clock.Time() - vm.vertexMetrics.pending.Observe(float64(end.Sub(start))) - return txs -} - func (vm *vertexVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { start := vm.clock.Time() tx, err := vm.LinearizableVMWithEngine.ParseTx(ctx, b) @@ -101,22 +92,6 @@ func (vm *vertexVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) }, nil } -func (vm *vertexVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { - start := vm.clock.Time() - tx, err := vm.LinearizableVMWithEngine.GetTx(ctx, txID) - end := vm.clock.Time() - duration := float64(end.Sub(start)) - if err != nil { - vm.vertexMetrics.getErr.Observe(duration) - return nil, err - } - vm.vertexMetrics.get.Observe(duration) - return &meterTx{ - Tx: tx, - vm: vm, - }, nil -} - type meterTx struct { snowstorm.Tx diff --git a/avalanchego/vms/mock_manager.go b/avalanchego/vms/mock_manager.go index 021ca596..cea232ba 100644 --- a/avalanchego/vms/mock_manager.go +++ b/avalanchego/vms/mock_manager.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms (interfaces: Factory,Manager) +// +// Generated by this command: +// +// mockgen -package=vms -destination=vms/mock_manager.go github.com/ava-labs/avalanchego/vms Factory,Manager +// // Package vms is a generated GoMock package. package vms @@ -13,7 +15,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" logging "github.com/ava-labs/avalanchego/utils/logging" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockFactory is a mock of Factory interface. @@ -40,16 +42,16 @@ func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { } // New mocks base method. -func (m *MockFactory) New(arg0 logging.Logger) (interface{}, error) { +func (m *MockFactory) New(arg0 logging.Logger) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "New", arg0) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // New indicates an expected call of New. -func (mr *MockFactoryMockRecorder) New(arg0 interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) New(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFactory)(nil).New), arg0) } @@ -86,7 +88,7 @@ func (m *MockManager) Alias(arg0 ids.ID, arg1 string) error { } // Alias indicates an expected call of Alias. -func (mr *MockManagerMockRecorder) Alias(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Alias(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), arg0, arg1) } @@ -101,7 +103,7 @@ func (m *MockManager) Aliases(arg0 ids.ID) ([]string, error) { } // Aliases indicates an expected call of Aliases. -func (mr *MockManagerMockRecorder) Aliases(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Aliases(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), arg0) } @@ -116,7 +118,7 @@ func (m *MockManager) GetFactory(arg0 ids.ID) (Factory, error) { } // GetFactory indicates an expected call of GetFactory. -func (mr *MockManagerMockRecorder) GetFactory(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetFactory(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), arg0) } @@ -146,7 +148,7 @@ func (m *MockManager) Lookup(arg0 string) (ids.ID, error) { } // Lookup indicates an expected call of Lookup. -func (mr *MockManagerMockRecorder) Lookup(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Lookup(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), arg0) } @@ -161,7 +163,7 @@ func (m *MockManager) PrimaryAlias(arg0 ids.ID) (string, error) { } // PrimaryAlias indicates an expected call of PrimaryAlias. -func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), arg0) } @@ -175,7 +177,7 @@ func (m *MockManager) PrimaryAliasOrDefault(arg0 ids.ID) string { } // PrimaryAliasOrDefault indicates an expected call of PrimaryAliasOrDefault. -func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), arg0) } @@ -189,7 +191,7 @@ func (m *MockManager) RegisterFactory(arg0 context.Context, arg1 ids.ID, arg2 Fa } // RegisterFactory indicates an expected call of RegisterFactory. -func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), arg0, arg1, arg2) } @@ -201,7 +203,7 @@ func (m *MockManager) RemoveAliases(arg0 ids.ID) { } // RemoveAliases indicates an expected call of RemoveAliases. -func (mr *MockManagerMockRecorder) RemoveAliases(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveAliases(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), arg0) } diff --git a/avalanchego/vms/nftfx/credential.go b/avalanchego/vms/nftfx/credential.go index 56f3ec0b..a8970b85 100644 --- a/avalanchego/vms/nftfx/credential.go +++ b/avalanchego/vms/nftfx/credential.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx -import ( - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) +import "github.com/ava-labs/avalanchego/vms/secp256k1fx" type Credential struct { secp256k1fx.Credential `serialize:"true"` diff --git a/avalanchego/vms/nftfx/credential_test.go b/avalanchego/vms/nftfx/credential_test.go index e27d441b..0f05af26 100644 --- a/avalanchego/vms/nftfx/credential_test.go +++ b/avalanchego/vms/nftfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,12 +6,13 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestCredentialState(t *testing.T) { intf := interface{}(&Credential{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/nftfx/factory.go b/avalanchego/vms/nftfx/factory.go index e52d629f..c8be0366 100644 --- a/avalanchego/vms/nftfx/factory.go +++ b/avalanchego/vms/nftfx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "nftfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'n', 'f', 't', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/avalanchego/vms/nftfx/factory_test.go b/avalanchego/vms/nftfx/factory_test.go index 83aa31d1..6b5ecafb 100644 --- a/avalanchego/vms/nftfx/factory_test.go +++ b/avalanchego/vms/nftfx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,14 +6,12 @@ package nftfx import ( "testing" - "github.com/ava-labs/avalanchego/utils/logging" + "github.com/stretchr/testify/require" ) func TestFactory(t *testing.T) { + require := require.New(t) + factory := Factory{} - if fx, err := factory.New(logging.NoLog{}); err != nil { - t.Fatal(err) - } else if fx == nil { - t.Fatalf("Factory.New returned nil") - } + require.Equal(&Fx{}, factory.New()) } diff --git a/avalanchego/vms/nftfx/fx.go b/avalanchego/vms/nftfx/fx.go index d11e47e4..66ea9460 100644 --- a/avalanchego/vms/nftfx/fx.go +++ b/avalanchego/vms/nftfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -7,7 +7,7 @@ import ( "bytes" "errors" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -34,15 +34,13 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&TransferOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { diff --git a/avalanchego/vms/nftfx/fx_test.go b/avalanchego/vms/nftfx/fx_test.go index cb464dd1..1ed3426f 100644 --- a/avalanchego/vms/nftfx/fx_test.go +++ b/avalanchego/vms/nftfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -37,36 +39,31 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} - err := fx.Initialize(&vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { fx := Fx{} err := fx.Initialize(nil) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, secp256k1fx.ErrWrongVMType) } func TestFxVerifyMintOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -88,23 +85,21 @@ func TestFxVerifyMintOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyMintOperationWrongTx(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) cred := &Credential{Credential: secp256k1fx.Credential{ Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, @@ -123,23 +118,22 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid tx") - } + err := fx.VerifyOperation(nil, op, cred, utxos) + require.ErrorIs(err, errWrongTxType) } func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -155,23 +149,22 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { } utxos := []interface{}{} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to not enough utxos") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongNumberOfUTXOs) } func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -188,23 +181,22 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a bad credential") - } + err := fx.VerifyOperation(tx, op, nil, utxos) + require.ErrorIs(err, errWrongCredentialType) } func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -220,23 +212,22 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -259,23 +250,22 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrAddrsNotSortedUnique) } func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -298,23 +288,22 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid Group ID") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUniqueID) } func TestFxVerifyTransferOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -350,23 +339,21 @@ func TestFxVerifyTransferOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -392,23 +379,22 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -441,23 +427,22 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrOutputUnspendable) } func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -493,23 +478,22 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a wrong unique id") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUniqueID) } func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -545,23 +529,22 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to the wrong hash being produced") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongBytes) } func TestFxVerifyTransferOperationTooSoon(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -598,23 +581,22 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("Should have errored due to locktime") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrTimelocked) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -635,24 +617,22 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an unknown operation") - } + err := fx.VerifyOperation(tx, nil, cred, utxos) + require.ErrorIs(err, errWrongOperationType) } func TestFxVerifyTransfer(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { - t.Fatalf("this Fx doesn't support transfers") - } + require.NoError(fx.Initialize(&vm)) + err := fx.VerifyTransfer(nil, nil, nil, nil) + require.ErrorIs(err, errCantTransfer) } diff --git a/avalanchego/vms/nftfx/mint_operation.go b/avalanchego/vms/nftfx/mint_operation.go index 2d1c5bbb..227c7a6d 100644 --- a/avalanchego/vms/nftfx/mint_operation.go +++ b/avalanchego/vms/nftfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,11 +6,10 @@ package nftfx import ( "errors" - "github.com/ava-labs/avalanchego/vms/types" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) var errNilMintOperation = errors.New("nil mint operation") diff --git a/avalanchego/vms/nftfx/mint_operation_test.go b/avalanchego/vms/nftfx/mint_operation_test.go index d462885e..ff397e9a 100644 --- a/avalanchego/vms/nftfx/mint_operation_test.go +++ b/avalanchego/vms/nftfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,24 +6,24 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestMintOperationVerifyNil(t *testing.T) { op := (*MintOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilMintOperation) } func TestMintOperationVerifyTooLargePayload(t *testing.T) { op := MintOperation{ Payload: make([]byte, MaxPayloadSize+1), } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errPayloadTooLarge) } func TestMintOperationVerifyInvalidOutput(t *testing.T) { @@ -32,23 +32,19 @@ func TestMintOperationVerifyInvalidOutput(t *testing.T) { Threshold: 1, }}, } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnspendable) } func TestMintOperationOuts(t *testing.T) { op := MintOperation{ Outputs: []*secp256k1fx.OutputOwners{{}}, } - if outs := op.Outs(); len(outs) != 1 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 1) } func TestMintOperationState(t *testing.T) { intf := interface{}(&MintOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/nftfx/mint_output.go b/avalanchego/vms/nftfx/mint_output.go index fe91a1b4..e3a97437 100644 --- a/avalanchego/vms/nftfx/mint_output.go +++ b/avalanchego/vms/nftfx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,10 +6,15 @@ package nftfx import ( "encoding/json" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var _ verify.State = (*MintOutput)(nil) + type MintOutput struct { + verify.IsState `json:"-"` + GroupID uint32 `serialize:"true" json:"groupID"` secp256k1fx.OutputOwners `serialize:"true"` } diff --git a/avalanchego/vms/nftfx/mint_output_test.go b/avalanchego/vms/nftfx/mint_output_test.go index c33ede0e..9589fc17 100644 --- a/avalanchego/vms/nftfx/mint_output_test.go +++ b/avalanchego/vms/nftfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,12 +6,13 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestMintOutputState(t *testing.T) { intf := interface{}(&MintOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/avalanchego/vms/nftfx/transfer_operation.go b/avalanchego/vms/nftfx/transfer_operation.go index 010d4389..014cd900 100644 --- a/avalanchego/vms/nftfx/transfer_operation.go +++ b/avalanchego/vms/nftfx/transfer_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/transfer_operation_test.go b/avalanchego/vms/nftfx/transfer_operation_test.go index 15d39501..b8892aec 100644 --- a/avalanchego/vms/nftfx/transfer_operation_test.go +++ b/avalanchego/vms/nftfx/transfer_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,38 +6,35 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestTransferOperationVerifyNil(t *testing.T) { op := (*TransferOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilTransferOperation) } func TestTransferOperationInvalid(t *testing.T) { op := TransferOperation{Input: secp256k1fx.Input{ SigIndices: []uint32{1, 0}, }} - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestTransferOperationOuts(t *testing.T) { op := TransferOperation{ Output: TransferOutput{}, } - if outs := op.Outs(); len(outs) != 1 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 1) } func TestTransferOperationState(t *testing.T) { intf := interface{}(&TransferOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/nftfx/transfer_output.go b/avalanchego/vms/nftfx/transfer_output.go index 2d4182ac..ed10762f 100644 --- a/avalanchego/vms/nftfx/transfer_output.go +++ b/avalanchego/vms/nftfx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -7,11 +7,10 @@ import ( "encoding/json" "errors" - "github.com/ava-labs/avalanchego/vms/types" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) const ( @@ -26,6 +25,8 @@ var ( ) type TransferOutput struct { + verify.IsState `json:"-"` + GroupID uint32 `serialize:"true" json:"groupID"` Payload types.JSONByteSlice `serialize:"true" json:"payload"` secp256k1fx.OutputOwners `serialize:"true"` @@ -55,7 +56,3 @@ func (out *TransferOutput) Verify() error { return out.OutputOwners.Verify() } } - -func (out *TransferOutput) VerifyState() error { - return out.Verify() -} diff --git a/avalanchego/vms/nftfx/transfer_output_test.go b/avalanchego/vms/nftfx/transfer_output_test.go index a95a7467..0effa6c3 100644 --- a/avalanchego/vms/nftfx/transfer_output_test.go +++ b/avalanchego/vms/nftfx/transfer_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -6,6 +6,8 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -13,18 +15,16 @@ import ( func TestTransferOutputVerifyNil(t *testing.T) { to := (*TransferOutput)(nil) - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on nil") - } + err := to.Verify() + require.ErrorIs(t, err, errNilTransferOutput) } func TestTransferOutputLargePayload(t *testing.T) { to := TransferOutput{ Payload: make([]byte, MaxPayloadSize+1), } - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") - } + err := to.Verify() + require.ErrorIs(t, err, errPayloadTooLarge) } func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { @@ -36,14 +36,12 @@ func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { }, }, } - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") - } + err := to.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnoptimized) } func TestTransferOutputState(t *testing.T) { intf := interface{}(&TransferOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/avalanchego/vms/platformvm/api/static_client.go b/avalanchego/vms/platformvm/api/static_client.go deleted file mode 100644 index 9dea3666..00000000 --- a/avalanchego/vms/platformvm/api/static_client.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package api - -import ( - "context" - - "github.com/ava-labs/avalanchego/utils/rpc" -) - -var _ StaticClient = (*staticClient)(nil) - -// StaticClient for interacting with the platformvm static api -type StaticClient interface { - BuildGenesis( - ctx context.Context, - args *BuildGenesisArgs, - options ...rpc.Option, - ) (*BuildGenesisReply, error) -} - -// staticClient is an implementation of a platformvm client for interacting with -// the platformvm static api -type staticClient struct { - requester rpc.EndpointRequester -} - -// NewClient returns a platformvm client for interacting with the platformvm static api -func NewStaticClient(uri string) StaticClient { - return &staticClient{requester: rpc.NewEndpointRequester( - uri + "/ext/vm/platform", - )} -} - -func (c *staticClient) BuildGenesis( - ctx context.Context, - args *BuildGenesisArgs, - options ...rpc.Option, -) (resp *BuildGenesisReply, err error) { - resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "platform.buildGenesis", args, resp, options...) - return resp, err -} diff --git a/avalanchego/vms/platformvm/api/static_service.go b/avalanchego/vms/platformvm/api/static_service.go index 9067d6b8..7b3b5232 100644 --- a/avalanchego/vms/platformvm/api/static_service.go +++ b/avalanchego/vms/platformvm/api/static_service.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api import ( + "cmp" "errors" "fmt" "net/http" @@ -30,9 +31,10 @@ import ( // state of the network. var ( - errUTXOHasNoValue = errors.New("genesis UTXO has no value") - errValidatorAddsNoValue = errors.New("validator would have already unstaked") - errStakeOverflow = errors.New("validator stake exceeds limit") + errUTXOHasNoValue = errors.New("genesis UTXO has no value") + errValidatorHasNoWeight = errors.New("validator has not weight") + errValidatorAlreadyExited = errors.New("validator would have already unstaked") + errStakeOverflow = errors.New("validator stake exceeds limit") _ utils.Sortable[UTXO] = UTXO{} ) @@ -49,30 +51,25 @@ type UTXO struct { } // TODO can we define this on *UTXO? -func (utxo UTXO) Less(other UTXO) bool { - if utxo.Locktime < other.Locktime { - return true - } else if utxo.Locktime > other.Locktime { - return false +func (utxo UTXO) Compare(other UTXO) int { + if locktimeCmp := cmp.Compare(utxo.Locktime, other.Locktime); locktimeCmp != 0 { + return locktimeCmp } - - if utxo.Amount < other.Amount { - return true - } else if utxo.Amount > other.Amount { - return false + if amountCmp := cmp.Compare(utxo.Amount, other.Amount); amountCmp != 0 { + return amountCmp } utxoAddr, err := bech32ToID(utxo.Address) if err != nil { - return false + return 0 } otherAddr, err := bech32ToID(other.Address) if err != nil { - return false + return 0 } - return utxoAddr.Less(otherAddr) + return utxoAddr.Compare(otherAddr) } // TODO: Refactor APIStaker, APIValidators and merge them together for @@ -97,6 +94,9 @@ type Staker struct { StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` } +// GenesisValidator should to be used for genesis validators only. +type GenesisValidator Staker + // Owner is the repr. of a reward owner sent over APIs. type Owner struct { Locktime json.Uint64 `json:"locktime"` @@ -131,6 +131,16 @@ type PermissionlessValidator struct { Delegators *[]PrimaryDelegator `json:"delegators,omitempty"` } +// GenesisPermissionlessValidator should to be used for genesis validators only. +type GenesisPermissionlessValidator struct { + GenesisValidator + RewardOwner *Owner `json:"rewardOwner,omitempty"` + DelegationFee json.Float32 `json:"delegationFee"` + ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` + Staked []UTXO `json:"staked,omitempty"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` +} + // PermissionedValidator is the repr. of a permissioned validator sent over APIs. type PermissionedValidator struct { Staker @@ -169,15 +179,15 @@ type Chain struct { // [Chains] are the chains that exist at genesis. // [Time] is the Platform Chain's time at network genesis. type BuildGenesisArgs struct { - AvaxAssetID ids.ID `json:"avaxAssetID"` - NetworkID json.Uint32 `json:"networkID"` - UTXOs []UTXO `json:"utxos"` - Validators []PermissionlessValidator `json:"validators"` - Chains []Chain `json:"chains"` - Time json.Uint64 `json:"time"` - InitialSupply json.Uint64 `json:"initialSupply"` - Message string `json:"message"` - Encoding formatting.Encoding `json:"encoding"` + AvaxAssetID ids.ID `json:"avaxAssetID"` + NetworkID json.Uint32 `json:"networkID"` + UTXOs []UTXO `json:"utxos"` + Validators []GenesisPermissionlessValidator `json:"validators"` + Chains []Chain `json:"chains"` + Time json.Uint64 `json:"time"` + InitialSupply json.Uint64 `json:"initialSupply"` + Message string `json:"message"` + Encoding formatting.Encoding `json:"encoding"` } // BuildGenesisReply is the reply from BuildGenesis @@ -278,10 +288,10 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl } if weight == 0 { - return errValidatorAddsNoValue + return errValidatorHasNoWeight } if uint64(vdr.EndTime) <= uint64(args.Time) { - return errValidatorAddsNoValue + return errValidatorAlreadyExited } owner := &secp256k1fx.OutputOwners{ @@ -302,21 +312,39 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl delegationFee = uint32(*vdr.ExactDelegationFee) } - tx := &txs.Tx{Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + var ( + baseTx = txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, - }}, - Validator: txs.Validator{ + }} + validator = txs.Validator{ NodeID: vdr.NodeID, Start: uint64(args.Time), End: uint64(vdr.EndTime), Wght: weight, - }, - StakeOuts: stake, - RewardsOwner: owner, - DelegationShares: delegationFee, - }} + } + tx *txs.Tx + ) + if vdr.Signer == nil { + tx = &txs.Tx{Unsigned: &txs.AddValidatorTx{ + BaseTx: baseTx, + Validator: validator, + StakeOuts: stake, + RewardsOwner: owner, + DelegationShares: delegationFee, + }} + } else { + tx = &txs.Tx{Unsigned: &txs.AddPermissionlessValidatorTx{ + BaseTx: baseTx, + Validator: validator, + Signer: vdr.Signer, + StakeOuts: stake, + ValidatorRewardsOwner: owner, + DelegatorRewardsOwner: owner, + DelegationShares: delegationFee, + }} + } + if err := tx.Initialize(txs.GenesisCodec); err != nil { return err } @@ -363,7 +391,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl } // Marshal genesis to bytes - bytes, err := genesis.Codec.Marshal(genesis.Version, g) + bytes, err := genesis.Codec.Marshal(genesis.CodecVersion, g) if err != nil { return fmt.Errorf("couldn't marshal genesis: %w", err) } diff --git a/avalanchego/vms/platformvm/api/static_service_test.go b/avalanchego/vms/platformvm/api/static_service_test.go index e11c1a29..a0e62fa9 100644 --- a/avalanchego/vms/platformvm/api/static_service_test.go +++ b/avalanchego/vms/platformvm/api/static_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -16,13 +16,10 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/genesis" ) -const testNetworkID = 10 // To be used in tests - func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} - hrp := constants.NetworkIDToHRP[testNetworkID] - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) utxo := UTXO{ @@ -30,8 +27,8 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { Amount: 0, } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ EndTime: 15, Weight: weight, NodeID: nodeID, @@ -50,7 +47,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -59,14 +56,14 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid balance") + err = ss.BuildGenesis(nil, &args, &reply) + require.ErrorIs(err, errUTXOHasNoValue) } -func TestBuildGenesisInvalidAmount(t *testing.T) { +func TestBuildGenesisInvalidStakeWeight(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} - hrp := constants.NetworkIDToHRP[testNetworkID] - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) utxo := UTXO{ @@ -74,8 +71,8 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { Amount: 123456789, } weight := json.Uint64(0) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 15, NodeID: nodeID, @@ -94,7 +91,7 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -103,14 +100,14 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid amount") + err = ss.BuildGenesis(nil, &args, &reply) + require.ErrorIs(err, errValidatorHasNoWeight) } func TestBuildGenesisInvalidEndtime(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1, 2, 3} - hrp := constants.NetworkIDToHRP[testNetworkID] - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + nodeID := ids.BuildTestNodeID([]byte{1, 2, 3}) + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) utxo := UTXO{ @@ -119,8 +116,8 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { } weight := json.Uint64(987654321) - validator := PermissionlessValidator{ - Staker: Staker{ + validator := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 5, NodeID: nodeID, @@ -139,7 +136,7 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator, }, Time: 5, @@ -148,14 +145,14 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid end time") + err = ss.BuildGenesis(nil, &args, &reply) + require.ErrorIs(err, errValidatorAlreadyExited) } func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require := require.New(t) - nodeID := ids.NodeID{1} - hrp := constants.NetworkIDToHRP[testNetworkID] - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + nodeID := ids.BuildTestNodeID([]byte{1}) + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) utxo := UTXO{ @@ -164,8 +161,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } weight := json.Uint64(987654321) - validator1 := PermissionlessValidator{ - Staker: Staker{ + validator1 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 0, EndTime: 20, NodeID: nodeID, @@ -180,8 +177,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator2 := PermissionlessValidator{ - Staker: Staker{ + validator2 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 3, EndTime: 15, NodeID: nodeID, @@ -196,8 +193,8 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { }}, } - validator3 := PermissionlessValidator{ - Staker: Staker{ + validator3 := GenesisPermissionlessValidator{ + GenesisValidator: GenesisValidator{ StartTime: 1, EndTime: 10, NodeID: nodeID, @@ -217,7 +214,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { UTXOs: []UTXO{ utxo, }, - Validators: []PermissionlessValidator{ + Validators: []GenesisPermissionlessValidator{ validator1, validator2, validator3, @@ -240,89 +237,63 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require.Len(validators, 3) } -func TestUTXOLess(t *testing.T) { +func TestUTXOCompare(t *testing.T) { var ( smallerAddr = ids.ShortID{} largerAddr = ids.ShortID{1} ) smallerAddrStr, err := address.FormatBech32("avax", smallerAddr[:]) - if err != nil { - panic(err) - } + require.NoError(t, err) largerAddrStr, err := address.FormatBech32("avax", largerAddr[:]) - if err != nil { - panic(err) - } + require.NoError(t, err) + type test struct { name string utxo1 UTXO utxo2 UTXO - expected bool + expected int } tests := []test{ { name: "both empty", utxo1: UTXO{}, utxo2: UTXO{}, - expected: false, + expected: 0, }, { - name: "first locktime smaller", + name: "locktime smaller", utxo1: UTXO{}, utxo2: UTXO{ Locktime: 1, }, - expected: true, + expected: -1, }, { - name: "first locktime larger", - utxo1: UTXO{ - Locktime: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first amount smaller", + name: "amount smaller", utxo1: UTXO{}, utxo2: UTXO{ Amount: 1, }, - expected: true, + expected: -1, }, { - name: "first amount larger", - utxo1: UTXO{ - Amount: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first address smaller", + name: "address smaller", utxo1: UTXO{ Address: smallerAddrStr, }, utxo2: UTXO{ Address: largerAddrStr, }, - expected: true, - }, - { - name: "first address larger", - utxo1: UTXO{ - Address: largerAddrStr, - }, - utxo2: UTXO{ - Address: smallerAddrStr, - }, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.utxo1.Less(tt.utxo2)) + require := require.New(t) + + require.Equal(tt.expected, tt.utxo1.Compare(tt.utxo2)) + require.Equal(-tt.expected, tt.utxo2.Compare(tt.utxo1)) }) } } diff --git a/avalanchego/vms/platformvm/blocks/abort_block.go b/avalanchego/vms/platformvm/block/abort_block.go similarity index 90% rename from avalanchego/vms/platformvm/blocks/abort_block.go rename to avalanchego/vms/platformvm/block/abort_block.go index 2f5928f9..ace8087f 100644 --- a/avalanchego/vms/platformvm/blocks/abort_block.go +++ b/avalanchego/vms/platformvm/block/abort_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "time" @@ -43,7 +43,7 @@ func NewBanffAbortBlock( }, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotAbortBlock struct { @@ -78,5 +78,5 @@ func NewApricotAbortBlock( Hght: height, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/avalanchego/vms/platformvm/blocks/abort_block_test.go b/avalanchego/vms/platformvm/block/abort_block_test.go similarity index 92% rename from avalanchego/vms/platformvm/blocks/abort_block_test.go rename to avalanchego/vms/platformvm/block/abort_block_test.go index d85cf12b..a6517cef 100644 --- a/avalanchego/vms/platformvm/blocks/abort_block_test.go +++ b/avalanchego/vms/platformvm/block/abort_block_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" diff --git a/avalanchego/vms/platformvm/blocks/atomic_block.go b/avalanchego/vms/platformvm/block/atomic_block.go similarity index 90% rename from avalanchego/vms/platformvm/blocks/atomic_block.go rename to avalanchego/vms/platformvm/block/atomic_block.go index 94424439..35deda80 100644 --- a/avalanchego/vms/platformvm/blocks/atomic_block.go +++ b/avalanchego/vms/platformvm/block/atomic_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "fmt" @@ -52,5 +52,5 @@ func NewApricotAtomicBlock( }, Tx: tx, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/avalanchego/vms/platformvm/blocks/atomic_block_test.go b/avalanchego/vms/platformvm/block/atomic_block_test.go similarity index 93% rename from avalanchego/vms/platformvm/blocks/atomic_block_test.go rename to avalanchego/vms/platformvm/block/atomic_block_test.go index 7e1b9f09..d8131018 100644 --- a/avalanchego/vms/platformvm/blocks/atomic_block_test.go +++ b/avalanchego/vms/platformvm/block/atomic_block_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" diff --git a/avalanchego/vms/platformvm/blocks/block.go b/avalanchego/vms/platformvm/block/block.go similarity index 80% rename from avalanchego/vms/platformvm/blocks/block.go rename to avalanchego/vms/platformvm/block/block.go index 273f379a..30be125b 100644 --- a/avalanchego/vms/platformvm/blocks/block.go +++ b/avalanchego/vms/platformvm/block/block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "fmt" @@ -36,12 +36,14 @@ type BanffBlock interface { Timestamp() time.Time } -func initialize(blk Block) error { +func initialize(blk Block, commonBlk *CommonBlock) error { // We serialize this block as a pointer so that it can be deserialized into // a Block - bytes, err := Codec.Marshal(Version, &blk) + bytes, err := Codec.Marshal(CodecVersion, &blk) if err != nil { return fmt.Errorf("couldn't marshal block: %w", err) } - return blk.initialize(bytes) + + commonBlk.initialize(bytes) + return nil } diff --git a/avalanchego/vms/platformvm/block/builder/builder.go b/avalanchego/vms/platformvm/block/builder/builder.go new file mode 100644 index 00000000..77f39fbd --- /dev/null +++ b/avalanchego/vms/platformvm/block/builder/builder.go @@ -0,0 +1,455 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +// targetBlockSize is maximum number of transaction bytes to place into a +// StandardBlock +const targetBlockSize = 128 * units.KiB + +var ( + _ Builder = (*builder)(nil) + + ErrEndOfTime = errors.New("program time is suspiciously far in the future") + ErrNoPendingBlocks = errors.New("no pending blocks") + errMissingPreferredState = errors.New("missing preferred block state") + errCalculatingNextStakerTime = errors.New("failed calculating next staker time") +) + +type Builder interface { + mempool.Mempool + + // StartBlockTimer starts to issue block creation requests to advance the + // chain timestamp. + StartBlockTimer() + + // ResetBlockTimer forces the block timer to recalculate when it should + // advance the chain timestamp. + ResetBlockTimer() + + // ShutdownBlockTimer stops block creation requests to advance the chain + // timestamp. + // + // Invariant: Assumes the context lock is held when calling. + ShutdownBlockTimer() + + // BuildBlock can be called to attempt to create a new block + BuildBlock(context.Context) (snowman.Block, error) + + // PackBlockTxs returns an array of txs that can fit into a valid block of + // size [targetBlockSize]. The returned txs are all verified against the + // preferred state. + // + // Note: This function does not call the consensus engine. + PackBlockTxs(targetBlockSize int) ([]*txs.Tx, error) +} + +// builder implements a simple builder to convert txs into valid blocks +type builder struct { + mempool.Mempool + + txExecutorBackend *txexecutor.Backend + blkManager blockexecutor.Manager + + // resetTimer is used to signal that the block builder timer should update + // when it will trigger building of a block. + resetTimer chan struct{} + closed chan struct{} + closeOnce sync.Once +} + +func New( + mempool mempool.Mempool, + txExecutorBackend *txexecutor.Backend, + blkManager blockexecutor.Manager, +) Builder { + return &builder{ + Mempool: mempool, + txExecutorBackend: txExecutorBackend, + blkManager: blkManager, + resetTimer: make(chan struct{}, 1), + closed: make(chan struct{}), + } +} + +func (b *builder) StartBlockTimer() { + go func() { + timer := time.NewTimer(0) + defer timer.Stop() + + for { + // Invariant: The [timer] is not stopped. + select { + case <-timer.C: + case <-b.resetTimer: + if !timer.Stop() { + <-timer.C + } + case <-b.closed: + return + } + + // Note: Because the context lock is not held here, it is possible + // that [ShutdownBlockTimer] is called concurrently with this + // execution. + for { + duration, err := b.durationToSleep() + if err != nil { + b.txExecutorBackend.Ctx.Log.Error("block builder encountered a fatal error", + zap.Error(err), + ) + return + } + + if duration > 0 { + timer.Reset(duration) + break + } + + // Block needs to be issued to advance time. + b.Mempool.RequestBuildBlock(true /*=emptyBlockPermitted*/) + + // Invariant: ResetBlockTimer is guaranteed to be called after + // [durationToSleep] returns a value <= 0. This is because we + // are guaranteed to attempt to build block. After building a + // valid block, the chain will have its preference updated which + // may change the duration to sleep and trigger a timer reset. + select { + case <-b.resetTimer: + case <-b.closed: + return + } + } + } + }() +} + +func (b *builder) durationToSleep() (time.Duration, error) { + // Grabbing the lock here enforces that this function is not called mid-way + // through modifying of the state. + b.txExecutorBackend.Ctx.Lock.Lock() + defer b.txExecutorBackend.Ctx.Lock.Unlock() + + // If [ShutdownBlockTimer] was called, we want to exit the block timer + // goroutine. We check this with the context lock held because + // [ShutdownBlockTimer] is expected to only be called with the context lock + // held. + select { + case <-b.closed: + return 0, nil + default: + } + + preferredID := b.blkManager.Preferred() + preferredState, ok := b.blkManager.GetState(preferredID) + if !ok { + return 0, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) + } + + nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + if err != nil { + return 0, fmt.Errorf("%w of %s: %w", errCalculatingNextStakerTime, preferredID, err) + } + + now := b.txExecutorBackend.Clk.Time() + return nextStakerChangeTime.Sub(now), nil +} + +func (b *builder) ResetBlockTimer() { + // Ensure that the timer will be reset at least once. + select { + case b.resetTimer <- struct{}{}: + default: + } +} + +func (b *builder) ShutdownBlockTimer() { + b.closeOnce.Do(func() { + close(b.closed) + }) +} + +// BuildBlock builds a block to be added to consensus. +// This method removes the transactions from the returned +// blocks from the mempool. +func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { + // If there are still transactions in the mempool, then we need to + // re-trigger block building. + defer b.Mempool.RequestBuildBlock(false /*=emptyBlockPermitted*/) + + b.txExecutorBackend.Ctx.Log.Debug("starting to attempt to build a block") + + // Get the block to build on top of and retrieve the new block's context. + preferredID := b.blkManager.Preferred() + preferred, err := b.blkManager.GetBlock(preferredID) + if err != nil { + return nil, err + } + nextHeight := preferred.Height() + 1 + preferredState, ok := b.blkManager.GetState(preferredID) + if !ok { + return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) + } + + timestamp, timeWasCapped, err := txexecutor.NextBlockTime(preferredState, b.txExecutorBackend.Clk) + if err != nil { + return nil, fmt.Errorf("could not calculate next staker change time: %w", err) + } + + statelessBlk, err := buildBlock( + b, + preferredID, + nextHeight, + timestamp, + timeWasCapped, + preferredState, + ) + if err != nil { + return nil, err + } + + return b.blkManager.NewBlock(statelessBlk), nil +} + +func (b *builder) PackBlockTxs(targetBlockSize int) ([]*txs.Tx, error) { + preferredID := b.blkManager.Preferred() + preferredState, ok := b.blkManager.GetState(preferredID) + if !ok { + return nil, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) + } + + return packBlockTxs( + preferredID, + preferredState, + b.Mempool, + b.txExecutorBackend, + b.blkManager, + b.txExecutorBackend.Clk.Time(), + targetBlockSize, + ) +} + +// [timestamp] is min(max(now, parent timestamp), next staker change time) +func buildBlock( + builder *builder, + parentID ids.ID, + height uint64, + timestamp time.Time, + forceAdvanceTime bool, + parentState state.Chain, +) (block.Block, error) { + // Try rewarding stakers whose staking period ends at the new chain time. + // This is done first to prioritize advancing the timestamp as quickly as + // possible. + stakerTxID, shouldReward, err := getNextStakerToReward(timestamp, parentState) + if err != nil { + return nil, fmt.Errorf("could not find next staker to reward: %w", err) + } + if shouldReward { + rewardValidatorTx, err := NewRewardValidatorTx(builder.txExecutorBackend.Ctx, stakerTxID) + if err != nil { + return nil, fmt.Errorf("could not build tx to reward staker: %w", err) + } + + var blockTxs []*txs.Tx + // TODO: Cleanup post-Durango + if builder.txExecutorBackend.Config.IsDurangoActivated(timestamp) { + blockTxs, err = packBlockTxs( + parentID, + parentState, + builder.Mempool, + builder.txExecutorBackend, + builder.blkManager, + timestamp, + targetBlockSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to pack block txs: %w", err) + } + } + + return block.NewBanffProposalBlock( + timestamp, + parentID, + height, + rewardValidatorTx, + blockTxs, + ) + } + + blockTxs, err := packBlockTxs( + parentID, + parentState, + builder.Mempool, + builder.txExecutorBackend, + builder.blkManager, + timestamp, + targetBlockSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to pack block txs: %w", err) + } + + // If there is no reason to build a block, don't. + if len(blockTxs) == 0 && !forceAdvanceTime { + builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") + return nil, ErrNoPendingBlocks + } + + // Issue a block with as many transactions as possible. + return block.NewBanffStandardBlock( + timestamp, + parentID, + height, + blockTxs, + ) +} + +func packBlockTxs( + parentID ids.ID, + parentState state.Chain, + mempool mempool.Mempool, + backend *txexecutor.Backend, + manager blockexecutor.Manager, + timestamp time.Time, + remainingSize int, +) ([]*txs.Tx, error) { + stateDiff, err := state.NewDiffOn(parentState) + if err != nil { + return nil, err + } + + if _, err := txexecutor.AdvanceTimeTo(backend, stateDiff, timestamp); err != nil { + return nil, err + } + + var ( + blockTxs []*txs.Tx + inputs set.Set[ids.ID] + ) + + for { + tx, exists := mempool.Peek() + if !exists { + break + } + txSize := len(tx.Bytes()) + if txSize > remainingSize { + break + } + mempool.Remove(tx) + + // Invariant: [tx] has already been syntactically verified. + + txDiff, err := state.NewDiffOn(stateDiff) + if err != nil { + return nil, err + } + + executor := &txexecutor.StandardTxExecutor{ + Backend: backend, + State: txDiff, + Tx: tx, + } + + err = tx.Unsigned.Visit(executor) + if err != nil { + txID := tx.ID() + mempool.MarkDropped(txID, err) + continue + } + + if inputs.Overlaps(executor.Inputs) { + txID := tx.ID() + mempool.MarkDropped(txID, blockexecutor.ErrConflictingBlockTxs) + continue + } + err = manager.VerifyUniqueInputs(parentID, executor.Inputs) + if err != nil { + txID := tx.ID() + mempool.MarkDropped(txID, err) + continue + } + inputs.Union(executor.Inputs) + + txDiff.AddTx(tx, status.Committed) + err = txDiff.Apply(stateDiff) + if err != nil { + return nil, err + } + + remainingSize -= txSize + blockTxs = append(blockTxs, tx) + } + + return blockTxs, nil +} + +// getNextStakerToReward returns the next staker txID to remove from the staking +// set with a RewardValidatorTx rather than an AdvanceTimeTx. [chainTimestamp] +// is the timestamp of the chain at the time this validator would be getting +// removed and is used to calculate [shouldReward]. +// Returns: +// - [txID] of the next staker to reward +// - [shouldReward] if the txID exists and is ready to be rewarded +// - [err] if something bad happened +func getNextStakerToReward( + chainTimestamp time.Time, + preferredState state.Chain, +) (ids.ID, bool, error) { + if !chainTimestamp.Before(mockable.MaxTime) { + return ids.Empty, false, ErrEndOfTime + } + + currentStakerIterator, err := preferredState.GetCurrentStakerIterator() + if err != nil { + return ids.Empty, false, err + } + defer currentStakerIterator.Release() + + for currentStakerIterator.Next() { + currentStaker := currentStakerIterator.Value() + priority := currentStaker.Priority + // If the staker is a permissionless staker (not a permissioned subnet + // validator), it's the next staker we will want to remove with a + // RewardValidatorTx rather than an AdvanceTimeTx. + if priority != txs.SubnetPermissionedValidatorCurrentPriority { + return currentStaker.TxID, chainTimestamp.Equal(currentStaker.EndTime), nil + } + } + return ids.Empty, false, nil +} + +func NewRewardValidatorTx(ctx *snow.Context, txID ids.ID) (*txs.Tx, error) { + utx := &txs.RewardValidatorTx{TxID: txID} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(ctx) +} diff --git a/avalanchego/vms/platformvm/block/builder/builder_test.go b/avalanchego/vms/platformvm/block/builder/builder_test.go new file mode 100644 index 00000000..96fa8e06 --- /dev/null +++ b/avalanchego/vms/platformvm/block/builder/builder_test.go @@ -0,0 +1,710 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +func TestBuildBlockBasic(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + txID := tx.ID() + + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + // [BuildBlock] should build a block with the transaction + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Len(blk.Txs(), 1) + require.Equal(txID, blk.Txs()[0].ID()) + + // Mempool should not contain the transaction or have marked it as dropped + _, ok = env.mempool.Get(txID) + require.False(ok) + require.NoError(env.mempool.GetDropReason(txID)) +} + +func TestBuildBlockDoesNotBuildWithEmptyMempool(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + tx, exists := env.mempool.Peek() + require.False(exists) + require.Nil(tx) + + // [BuildBlock] should not build an empty block + blk, err := env.Builder.BuildBlock(context.Background()) + require.ErrorIs(err, ErrNoPendingBlocks) + require.Nil(blk) +} + +func TestBuildBlockShouldReward(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + var ( + now = env.backend.Clk.Time() + nodeID = ids.GenerateTestNodeID() + + defaultValidatorStake = 100 * units.MilliAvax + validatorStartTime = now.Add(2 * txexecutor.SyncBound) + validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + // Create a valid [AddPermissionlessValidatorTx] + tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + defaultValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + nil, + ) + require.NoError(err) + txID := tx.ID() + + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + // Build and accept a block with the tx + blk, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.IsType(&block.BanffStandardBlock{}, blk.(*blockexecutor.Block).Block) + require.Equal([]*txs.Tx{tx}, blk.(*blockexecutor.Block).Block.Txs()) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(blk.ID())) + + // Validator should now be current + staker, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.Equal(txID, staker.TxID) + + // Should be rewarded at the end of staking period + env.backend.Clk.Set(validatorEndTime) + + for { + iter, err := env.state.GetCurrentStakerIterator() + require.NoError(err) + require.True(iter.Next()) + staker := iter.Value() + iter.Release() + + // Check that the right block was built + blk, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.IsType(&block.BanffProposalBlock{}, blk.(*blockexecutor.Block).Block) + + expectedTx, err := NewRewardValidatorTx(env.ctx, staker.TxID) + require.NoError(err) + require.Equal([]*txs.Tx{expectedTx}, blk.(*blockexecutor.Block).Block.Txs()) + + // Commit the [ProposalBlock] with a [CommitBlock] + proposalBlk, ok := blk.(snowman.OracleBlock) + require.True(ok) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.True(env.blkManager.SetPreference(commit.ID())) + + // Stop rewarding once our staker is rewarded + if staker.TxID == txID { + break + } + } + + // Staking rewards should have been issued + rewardUTXOs, err := env.state.GetRewardUTXOs(txID) + require.NoError(err) + require.Empty(rewardUTXOs) // No rewards on Flare +} + +func TestBuildBlockAdvanceTime(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + var ( + now = env.backend.Clk.Time() + nextTime = now.Add(2 * txexecutor.SyncBound) + ) + + // Add a staker to [env.state] + env.state.PutCurrentValidator(&state.Staker{ + NextTime: nextTime, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + }) + + // Advance wall clock to [nextTime] + env.backend.Clk.Set(nextTime) + + // [BuildBlock] should build a block advancing the time to [NextTime] + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Empty(blk.Txs()) + require.IsType(&block.BanffStandardBlock{}, blk.Block) + standardBlk := blk.Block.(*block.BanffStandardBlock) + require.Equal(nextTime.Unix(), standardBlk.Timestamp().Unix()) +} + +func TestBuildBlockForceAdvanceTime(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + txID := tx.ID() + + // Issue the transaction + env.ctx.Lock.Unlock() + require.NoError(env.network.IssueTx(context.Background(), tx)) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.True(ok) + + var ( + now = env.backend.Clk.Time() + nextTime = now.Add(2 * txexecutor.SyncBound) + ) + + // Add a staker to [env.state] + env.state.PutCurrentValidator(&state.Staker{ + NextTime: nextTime, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + }) + + // Advance wall clock to [nextTime] + [txexecutor.SyncBound] + env.backend.Clk.Set(nextTime.Add(txexecutor.SyncBound)) + + // [BuildBlock] should build a block advancing the time to [nextTime], + // not the current wall clock. + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Equal([]*txs.Tx{tx}, blk.Txs()) + require.IsType(&block.BanffStandardBlock{}, blk.Block) + standardBlk := blk.Block.(*block.BanffStandardBlock) + require.Equal(nextTime.Unix(), standardBlk.Timestamp().Unix()) +} + +func TestBuildBlockDropExpiredStakerTxs(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // The [StartTime] in a staker tx is only validated pre-Durango. + // TODO: Delete this test post-Durango activation. + env.config.DurangoTime = mockable.MaxTime + + var ( + now = env.backend.Clk.Time() + defaultValidatorStake = 100 * units.MilliAvax + + // Add a validator with StartTime in the future within [MaxFutureStartTime] + validatorStartTime = now.Add(txexecutor.MaxFutureStartTime - 1*time.Second) + validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) + ) + + tx1, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + nil, + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx1)) + tx1ID := tx1.ID() + _, ok := env.mempool.Get(tx1ID) + require.True(ok) + + // Add a validator with StartTime before current chain time + validator2StartTime := now.Add(-5 * time.Second) + validator2EndTime := validator2StartTime.Add(360 * 24 * time.Hour) + + tx2, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validator2StartTime.Unix()), + uint64(validator2EndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[1].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[1]}, + preFundedKeys[1].PublicKey().Address(), + nil, + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx2)) + tx2ID := tx2.ID() + _, ok = env.mempool.Get(tx2ID) + require.True(ok) + + // Add a validator with StartTime in the future past [MaxFutureStartTime] + validator3StartTime := now.Add(txexecutor.MaxFutureStartTime + 5*time.Second) + validator3EndTime := validator2StartTime.Add(360 * 24 * time.Hour) + + tx3, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validator3StartTime.Unix()), + uint64(validator3EndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[2].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[2]}, + preFundedKeys[2].PublicKey().Address(), + nil, + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx3)) + tx3ID := tx3.ID() + _, ok = env.mempool.Get(tx3ID) + require.True(ok) + + // Only tx1 should be in a built block + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Len(blk.Txs(), 1) + require.Equal(tx1ID, blk.Txs()[0].ID()) + + // Mempool should have none of the txs + _, ok = env.mempool.Get(tx1ID) + require.False(ok) + _, ok = env.mempool.Get(tx2ID) + require.False(ok) + _, ok = env.mempool.Get(tx3ID) + require.False(ok) + + // Only tx2 and tx3 should be dropped + require.NoError(env.mempool.GetDropReason(tx1ID)) + + tx2DropReason := env.mempool.GetDropReason(tx2ID) + require.ErrorIs(tx2DropReason, txexecutor.ErrTimestampNotBeforeStartTime) + + tx3DropReason := env.mempool.GetDropReason(tx3ID) + require.ErrorIs(tx3DropReason, txexecutor.ErrFutureStakeTime) +} + +func TestBuildBlockInvalidStakingDurations(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Post-Durango, [StartTime] is no longer validated. Staking durations are + // based on the current chain timestamp and must be validated. + env.config.DurangoTime = time.Time{} + + var ( + now = env.backend.Clk.Time() + defaultValidatorStake = 100 * units.MilliAvax + + // Add a validator ending in [MaxStakeDuration] + validatorEndTime = now.Add(env.config.MaxStakeDuration) + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + tx1, err := env.txBuilder.NewAddPermissionlessValidatorTx( + defaultValidatorStake, + uint64(now.Unix()), + uint64(validatorEndTime.Unix()), + ids.GenerateTestNodeID(), + signer.NewProofOfPossession(sk), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), + nil, + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx1)) + tx1ID := tx1.ID() + _, ok := env.mempool.Get(tx1ID) + require.True(ok) + + // Add a validator ending past [MaxStakeDuration] + validator2EndTime := now.Add(env.config.MaxStakeDuration + time.Second) + + sk, err = bls.NewSecretKey() + require.NoError(err) + + tx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( + defaultValidatorStake, + uint64(now.Unix()), + uint64(validator2EndTime.Unix()), + ids.GenerateTestNodeID(), + signer.NewProofOfPossession(sk), + preFundedKeys[2].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[2]}, + preFundedKeys[2].PublicKey().Address(), + nil, + ) + require.NoError(err) + require.NoError(env.mempool.Add(tx2)) + tx2ID := tx2.ID() + _, ok = env.mempool.Get(tx2ID) + require.True(ok) + + // Only tx1 should be in a built block since [MaxStakeDuration] is satisfied. + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) + + require.IsType(&blockexecutor.Block{}, blkIntf) + blk := blkIntf.(*blockexecutor.Block) + require.Len(blk.Txs(), 1) + require.Equal(tx1ID, blk.Txs()[0].ID()) + + // Mempool should have none of the txs + _, ok = env.mempool.Get(tx1ID) + require.False(ok) + _, ok = env.mempool.Get(tx2ID) + require.False(ok) + + // Only tx2 should be dropped + require.NoError(env.mempool.GetDropReason(tx1ID)) + + tx2DropReason := env.mempool.GetDropReason(tx2ID) + require.ErrorIs(tx2DropReason, txexecutor.ErrStakeTooLong) +} + +func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + txID := tx.ID() + + // Transaction should not be marked as dropped before being added to the + // mempool + require.NoError(env.mempool.GetDropReason(txID)) + + // Mark the transaction as dropped + errTestingDropped := errors.New("testing dropped") + env.mempool.MarkDropped(txID, errTestingDropped) + err = env.mempool.GetDropReason(txID) + require.ErrorIs(err, errTestingDropped) + + // Issue the transaction + env.ctx.Lock.Unlock() + err = env.network.IssueTx(context.Background(), tx) + require.ErrorIs(err, errTestingDropped) + env.ctx.Lock.Lock() + _, ok := env.mempool.Get(txID) + require.False(ok) + + // When issued again, the mempool should still be marked as dropped + err = env.mempool.GetDropReason(txID) + require.ErrorIs(err, errTestingDropped) +} + +func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, latestFork) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + env.isBootstrapped.Set(false) + + require.True(env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic +} + +func TestGetNextStakerToReward(t *testing.T) { + var ( + now = time.Now() + txID = ids.GenerateTestID() + ) + + type test struct { + name string + timestamp time.Time + stateF func(*gomock.Controller) state.Chain + expectedTxID ids.ID + expectedShouldReward bool + expectedErr error + } + + tests := []test{ + { + name: "end of time", + timestamp: mockable.MaxTime, + stateF: func(ctrl *gomock.Controller) state.Chain { + return state.NewMockChain(ctrl) + }, + expectedErr: ErrEndOfTime, + }, + { + name: "no stakers", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + currentStakerIter.EXPECT().Next().Return(false) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + }, + { + name: "expired subnet validator/delegator", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + Priority: txs.SubnetPermissionedValidatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: txID, + Priority: txs.SubnetPermissionlessDelegatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + expectedTxID: txID, + expectedShouldReward: true, + }, + { + name: "expired primary network validator after subnet expired subnet validator", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + Priority: txs.SubnetPermissionedValidatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: txID, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + expectedTxID: txID, + expectedShouldReward: true, + }, + { + name: "expired primary network delegator after subnet expired subnet validator", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + Priority: txs.SubnetPermissionedValidatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: txID, + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + EndTime: now, + }) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + expectedTxID: txID, + expectedShouldReward: true, + }, + { + name: "non-expired primary network delegator", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: txID, + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + EndTime: now.Add(time.Second), + }) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + expectedTxID: txID, + expectedShouldReward: false, + }, + { + name: "non-expired primary network validator", + timestamp: now, + stateF: func(ctrl *gomock.Controller) state.Chain { + currentStakerIter := state.NewMockStakerIterator(ctrl) + + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: txID, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + EndTime: now.Add(time.Second), + }) + currentStakerIter.EXPECT().Release() + + s := state.NewMockChain(ctrl) + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + + return s + }, + expectedTxID: txID, + expectedShouldReward: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + state := tt.stateF(ctrl) + txID, shouldReward, err := getNextStakerToReward(tt.timestamp, state) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(tt.expectedTxID, txID) + require.Equal(tt.expectedShouldReward, shouldReward) + }) + } +} diff --git a/avalanchego/vms/platformvm/blocks/builder/helpers_test.go b/avalanchego/vms/platformvm/block/builder/helpers_test.go similarity index 61% rename from avalanchego/vms/platformvm/blocks/builder/helpers_test.go rename to avalanchego/vms/platformvm/block/builder/helpers_test.go index 73ac66ed..906ef93a 100644 --- a/avalanchego/vms/platformvm/blocks/builder/helpers_test.go +++ b/avalanchego/vms/platformvm/block/builder/helpers_test.go @@ -1,28 +1,29 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( "context" - "errors" "fmt" "testing" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -34,14 +35,12 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/window" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -50,16 +49,23 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) const ( - testNetworkID = 10 // To be used in tests - defaultWeight = 10000 - maxRecentlyAcceptedWindowSize = 256 - recentlyAcceptedWindowTTL = 5 * time.Minute + defaultWeight = 10000 + trackChecksum = false + + apricotPhase3 fork = iota + apricotPhase5 + banff + cortina + durango + + latestFork = durango ) var ( @@ -71,18 +77,24 @@ var ( defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake preFundedKeys = secp256k1.TestKeys() - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] - errMissingPrimaryValidators = errors.New("missing primary validator set") - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + +type fork uint8 + type mutableSharedMemory struct { atomic.SharedMemory } @@ -91,6 +103,7 @@ type environment struct { Builder blkManager blockexecutor.Manager mempool mempool.Mempool + network network.Network sender *common.SenderTest isBootstrapped *utils.Atomic[bool] @@ -108,28 +121,36 @@ type environment struct { backend txexecutor.Backend } -func newEnvironment(t *testing.T) *environment { +func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam + require := require.New(t) + res := &environment{ isBootstrapped: &utils.Atomic[bool]{}, - config: defaultConfig(), + config: defaultConfig(t, f), clk: defaultClock(), } res.isBootstrapped.Set(true) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - res.baseDB = versiondb.New(baseDBManager.Current().Database) - res.ctx, res.msm = defaultCtx(res.baseDB) + res.baseDB = versiondb.New(memdb.New()) + atomicDB := prefixdb.New([]byte{1}, res.baseDB) + m := atomic.NewMemory(atomicDB) + + res.ctx = snowtest.Context(t, snowtest.PChainID) + res.msm = &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(res.ctx.ChainID), + } + res.ctx.SharedMemory = res.msm res.ctx.Lock.Lock() defer res.ctx.Lock.Unlock() - res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) + res.fx = defaultFx(t, res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) - res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) + res.state = defaultState(t, res.config, res.ctx, res.baseDB, rewardsCalc) res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) - res.uptimes = uptime.NewManager(res.state) + res.uptimes = uptime.NewManager(res.state, res.clk) res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = txbuilder.New( @@ -155,48 +176,74 @@ func newEnvironment(t *testing.T) *environment { } registerer := prometheus.NewRegistry() - window := window.New[ids.ID]( - window.Config{ - Clock: res.clk, - MaxSize: maxRecentlyAcceptedWindowSize, - TTL: recentlyAcceptedWindowTTL, - }, - ) res.sender = &common.SenderTest{T: t} - - metrics, err := metrics.New("", registerer, res.config.TrackedSubnets) - if err != nil { - panic(fmt.Errorf("failed to create metrics: %w", err)) + res.sender.SendAppGossipF = func(context.Context, []byte) error { + return nil } - res.mempool, err = mempool.NewMempool("mempool", registerer, res) - if err != nil { - panic(fmt.Errorf("failed to create mempool: %w", err)) - } + metrics, err := metrics.New("", registerer) + require.NoError(err) + + res.mempool, err = mempool.New("mempool", registerer, nil) + require.NoError(err) + res.blkManager = blockexecutor.NewManager( res.mempool, metrics, res.state, &res.backend, - window, + pvalidators.TestManager, ) + txVerifier := network.NewLockedTxVerifier(&res.ctx.Lock, res.blkManager) + res.network, err = network.New( + res.backend.Ctx.Log, + res.backend.Ctx.NodeID, + res.backend.Ctx.SubnetID, + res.backend.Ctx.ValidatorState, + txVerifier, + res.mempool, + res.backend.Config.PartialSyncPrimaryNetwork, + res.sender, + registerer, + network.DefaultConfig, + ) + require.NoError(err) + res.Builder = New( res.mempool, - res.txBuilder, &res.backend, res.blkManager, - nil, // toEngine, - res.sender, ) + res.Builder.StartBlockTimer() + + res.blkManager.SetPreference(genesisID) + addSubnet(t, res) + + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + res.Builder.ShutdownBlockTimer() + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - res.Builder.SetPreference(genesisID) - addSubnet(res) + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + require.NoError(res.state.Commit()) + } + + require.NoError(res.state.Close()) + require.NoError(res.baseDB.Close()) + }) return res } -func addSubnet(env *environment) { +func addSubnet(t *testing.T, env *environment) { + require := require.New(t) + // Create a subnet var err error testSubnet1, err = env.txBuilder.NewCreateSubnetTx( @@ -208,108 +255,87 @@ func addSubnet(env *environment) { }, []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), + nil, ) - if err != nil { - panic(err) - } + require.NoError(err) // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) - if err != nil { - panic(err) - } + require.NoError(err) executor := txexecutor.StandardTxExecutor{ Backend: &env.backend, State: stateDiff, Tx: testSubnet1, } - err = testSubnet1.Unsigned.Visit(&executor) - if err != nil { - panic(err) - } + require.NoError(testSubnet1.Unsigned.Visit(&executor)) stateDiff.AddTx(testSubnet1, status.Committed) - if err := stateDiff.Apply(env.state); err != nil { - panic(err) - } + require.NoError(stateDiff.Apply(env.state)) } func defaultState( + t *testing.T, cfg *config.Config, ctx *snow.Context, db database.Database, rewards reward.Calculator, ) state.State { - genesisBytes := buildGenesisTest(ctx) + require := require.New(t) + + execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) + genesisBytes := buildGenesisTest(t, ctx) state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), cfg, + execCfg, ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) - if err != nil { - panic(err) - } + require.NoError(err) // persist and reload to init a bunch of in-memory stuff state.SetHeight(0) - if err := state.Commit(); err != nil { - panic(err) - } - state.SetHeight( /*height*/ 0) - if err := state.Commit(); err != nil { - panic(err) - } - + require.NoError(state.Commit()) return state } -func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) +func defaultConfig(t *testing.T, f fork) *config.Config { + var ( + apricotPhase3Time = mockable.MaxTime + apricotPhase5Time = mockable.MaxTime + banffTime = mockable.MaxTime + cortinaTime = mockable.MaxTime + durangoTime = mockable.MaxTime + ) - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), + switch f { + case durango: + durangoTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case cortina: + cortinaTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case banff: + banffTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case apricotPhase5: + apricotPhase5Time = defaultValidateEndTime + fallthrough + case apricotPhase3: + apricotPhase3Time = defaultValidateEndTime + default: + require.NoError(t, fmt.Errorf("unhandled fork %d", f)) } - ctx.SharedMemory = msm - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - - return ctx, msm -} -func defaultConfig() *config.Config { - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: vdrs, + Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -324,17 +350,19 @@ func defaultConfig() *config.Config { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: time.Time{}, // neglecting fork ordering this for package tests + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, + BanffTime: banffTime, + CortinaTime: cortinaTime, + DurangoTime: durangoTime, } } func defaultClock() *mockable.Clock { // set time after Banff fork (and before default nextStakerTime) - clk := mockable.Clock{} + clk := &mockable.Clock{} clk.Set(defaultGenesisTime) - return &clk + return clk } type fxVMInt struct { @@ -359,48 +387,42 @@ func (fvi *fxVMInt) EthVerificationEnabled() bool { return false } -func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { +func defaultFx(t *testing.T, clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { + require := require.New(t) + fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } res := &secp256k1fx.Fx{} - if err := res.Initialize(fxVMInt); err != nil { - panic(err) - } + require.NoError(res.Initialize(fxVMInt)) if isBootstrapped { - if err := res.Bootstrapped(); err != nil { - panic(err) - } + require.NoError(res.Bootstrapped()) } return res } -func buildGenesisTest(ctx *snow.Context) []byte { +func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { + require := require.New(t) + genesisUTXOs := make([]api.UTXO, len(preFundedKeys)) - hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range preFundedKeys { id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - panic(err) - } + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) + require.NoError(err) genesisUTXOs[i] = api.UTXO{ Amount: json.Uint64(defaultBalance), Address: addr, } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - panic(err) - } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) + require.NoError(err) + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -418,7 +440,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { } buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(testNetworkID), + NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: ctx.AVAXAssetID, UTXOs: genesisUTXOs, Validators: genesisValidators, @@ -430,43 +452,10 @@ func buildGenesisTest(ctx *snow.Context) []byte { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) - } + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - panic(err) - } + require.NoError(err) return genesisBytes } - -func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.Get() { - primaryValidatorSet, exist := env.config.Validators.Get(constants.PrimaryNetworkID) - if !exist { - return errMissingPrimaryValidators - } - primaryValidators := primaryValidatorSet.List() - - validatorIDs := make([]ids.NodeID, len(primaryValidators)) - for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.NodeID - } - - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := env.state.Commit(); err != nil { - return err - } - } - - errs := wrappers.Errs{} - errs.Add( - env.state.Close(), - env.baseDB.Close(), - ) - return errs.Err -} diff --git a/avalanchego/vms/platformvm/block/builder/main_test.go b/avalanchego/vms/platformvm/block/builder/main_test.go new file mode 100644 index 00000000..31149bfb --- /dev/null +++ b/avalanchego/vms/platformvm/block/builder/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go b/avalanchego/vms/platformvm/block/builder/standard_block_test.go similarity index 84% rename from avalanchego/vms/platformvm/blocks/builder/standard_block_test.go rename to avalanchego/vms/platformvm/block/builder/standard_block_test.go index 8fa9c716..fa1a07fb 100644 --- a/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go +++ b/avalanchego/vms/platformvm/block/builder/standard_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -22,11 +22,9 @@ import ( func TestAtomicTxImports(t *testing.T) { require := require.New(t) - env := newEnvironment(t) + env := newEnvironment(t, latestFork) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), @@ -41,7 +39,7 @@ func TestAtomicTxImports(t *testing.T) { peerSharedMemory := m.NewSharedMemory(env.ctx.XChainID) utxo := &avax.UTXO{ UTXOID: utxoID, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -50,11 +48,11 @@ func TestAtomicTxImports(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ env.ctx.ChainID: {PutRequests: []*atomic.Element{{ Key: inputID[:], Value: utxoBytes, @@ -62,14 +60,14 @@ func TestAtomicTxImports(t *testing.T) { recipientKey.PublicKey().Address().Bytes(), }, }}}, - }) - require.NoError(err) + })) tx, err := env.txBuilder.NewImportTx( env.ctx.XChainID, recipientKey.PublicKey().Address(), []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -82,5 +80,5 @@ func TestAtomicTxImports(t *testing.T) { _, txStatus, err := env.state.GetTx(tx.ID()) require.NoError(err) // Ensure transaction is in the committed state - require.Equal(txStatus, status.Committed) + require.Equal(status.Committed, txStatus) } diff --git a/avalanchego/vms/platformvm/blocks/codec.go b/avalanchego/vms/platformvm/block/codec.go similarity index 54% rename from avalanchego/vms/platformvm/blocks/codec.go rename to avalanchego/vms/platformvm/block/codec.go index ac0f42da..33babbaf 100644 --- a/avalanchego/vms/platformvm/blocks/codec.go +++ b/avalanchego/vms/platformvm/block/codec.go @@ -1,49 +1,67 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -// Version is the current default codec version -const Version = txs.Version +const CodecVersion = txs.CodecVersion -// GenesisCode allows blocks of larger than usual size to be parsed. -// While this gives flexibility in accommodating large genesis blocks -// it must not be used to parse new, unverified blocks which instead -// must be processed by Codec var ( - Codec codec.Manager + // GenesisCodec allows blocks of larger than usual size to be parsed. + // While this gives flexibility in accommodating large genesis blocks + // it must not be used to parse new, unverified blocks which instead + // must be processed by Codec GenesisCodec codec.Manager + + Codec codec.Manager ) -func init() { - c := linearcodec.NewDefault() - Codec = codec.NewDefaultManager() - gc := linearcodec.NewCustomMaxLength(math.MaxInt32) - GenesisCodec = codec.NewManager(math.MaxInt32) +// TODO: Remove after v1.11.x has activated +// +// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed +// concurrently +func InitCodec(durangoTime time.Time) error { + c := linearcodec.NewDefault(durangoTime) + gc := linearcodec.NewDefault(time.Time{}) errs := wrappers.Errs{} - for _, c := range []codec.Registry{c, gc} { + for _, c := range []linearcodec.Codec{c, gc} { errs.Add( RegisterApricotBlockTypes(c), txs.RegisterUnsignedTxsTypes(c), RegisterBanffBlockTypes(c), + txs.RegisterDUnsignedTxsTypes(c), ) } + + newCodec := codec.NewDefaultManager() + newGenesisCodec := codec.NewManager(math.MaxInt32) errs.Add( - Codec.RegisterCodec(Version, c), - GenesisCodec.RegisterCodec(Version, gc), + newCodec.RegisterCodec(CodecVersion, c), + newGenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - panic(errs.Err) + return errs.Err + } + + Codec = newCodec + GenesisCodec = newGenesisCodec + return nil +} + +func init() { + if err := InitCodec(time.Time{}); err != nil { + panic(err) } } @@ -52,24 +70,20 @@ func init() { // subpackage-level codecs were introduced, each handling serialization of // specific types. func RegisterApricotBlockTypes(targetCodec codec.Registry) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( targetCodec.RegisterType(&ApricotProposalBlock{}), targetCodec.RegisterType(&ApricotAbortBlock{}), targetCodec.RegisterType(&ApricotCommitBlock{}), targetCodec.RegisterType(&ApricotStandardBlock{}), targetCodec.RegisterType(&ApricotAtomicBlock{}), ) - return errs.Err } func RegisterBanffBlockTypes(targetCodec codec.Registry) error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( targetCodec.RegisterType(&BanffProposalBlock{}), targetCodec.RegisterType(&BanffAbortBlock{}), targetCodec.RegisterType(&BanffCommitBlock{}), targetCodec.RegisterType(&BanffStandardBlock{}), ) - return errs.Err } diff --git a/avalanchego/vms/platformvm/blocks/commit_block.go b/avalanchego/vms/platformvm/block/commit_block.go similarity index 89% rename from avalanchego/vms/platformvm/blocks/commit_block.go rename to avalanchego/vms/platformvm/block/commit_block.go index 5247c45d..ac6dbb1e 100644 --- a/avalanchego/vms/platformvm/blocks/commit_block.go +++ b/avalanchego/vms/platformvm/block/commit_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "time" @@ -43,7 +43,7 @@ func NewBanffCommitBlock( }, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotCommitBlock struct { @@ -75,5 +75,5 @@ func NewApricotCommitBlock( Hght: height, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/avalanchego/vms/platformvm/blocks/commit_block_test.go b/avalanchego/vms/platformvm/block/commit_block_test.go similarity index 92% rename from avalanchego/vms/platformvm/blocks/commit_block_test.go rename to avalanchego/vms/platformvm/block/commit_block_test.go index 24023c40..f89489d5 100644 --- a/avalanchego/vms/platformvm/blocks/commit_block_test.go +++ b/avalanchego/vms/platformvm/block/commit_block_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" diff --git a/avalanchego/vms/platformvm/blocks/common_block.go b/avalanchego/vms/platformvm/block/common_block.go similarity index 79% rename from avalanchego/vms/platformvm/blocks/common_block.go rename to avalanchego/vms/platformvm/block/common_block.go index 1a48e6c7..f4b46b81 100644 --- a/avalanchego/vms/platformvm/blocks/common_block.go +++ b/avalanchego/vms/platformvm/block/common_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "github.com/ava-labs/avalanchego/ids" @@ -16,17 +16,17 @@ type CommonBlock struct { // This block's height. The genesis block is at height 0. Hght uint64 `serialize:"true" json:"height"` - id ids.ID - bytes []byte + BlockID ids.ID `json:"id"` + bytes []byte } func (b *CommonBlock) initialize(bytes []byte) { - b.id = hashing.ComputeHash256Array(bytes) + b.BlockID = hashing.ComputeHash256Array(bytes) b.bytes = bytes } func (b *CommonBlock) ID() ids.ID { - return b.id + return b.BlockID } func (b *CommonBlock) Parent() ids.ID { diff --git a/avalanchego/vms/platformvm/blocks/executor/README.md b/avalanchego/vms/platformvm/block/executor/README.md similarity index 100% rename from avalanchego/vms/platformvm/blocks/executor/README.md rename to avalanchego/vms/platformvm/block/executor/README.md diff --git a/avalanchego/vms/platformvm/blocks/executor/acceptor.go b/avalanchego/vms/platformvm/block/executor/acceptor.go similarity index 50% rename from avalanchego/vms/platformvm/blocks/executor/acceptor.go rename to avalanchego/vms/platformvm/block/executor/acceptor.go index 5aaa1d82..cc2bcef0 100644 --- a/avalanchego/vms/platformvm/blocks/executor/acceptor.go +++ b/avalanchego/vms/platformvm/block/executor/acceptor.go @@ -1,151 +1,82 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "errors" "fmt" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/window" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) -var _ blocks.Visitor = (*acceptor)(nil) +var ( + _ block.Visitor = (*acceptor)(nil) + + errMissingBlockState = errors.New("missing state of block") +) // acceptor handles the logic for accepting a block. // All errors returned by this struct are fatal and should result in the chain // being shutdown. type acceptor struct { *backend - metrics metrics.Metrics - recentlyAccepted window.Window[ids.ID] - bootstrapped *utils.Atomic[bool] + metrics metrics.Metrics + validators validators.Manager + bootstrapped *utils.Atomic[bool] } -func (a *acceptor) BanffAbortBlock(b *blocks.BanffAbortBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "banff abort"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.abortBlock(b) +func (a *acceptor) BanffAbortBlock(b *block.BanffAbortBlock) error { + return a.optionBlock(b, "banff abort") } -func (a *acceptor) BanffCommitBlock(b *blocks.BanffCommitBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "banff commit"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.commitBlock(b) +func (a *acceptor) BanffCommitBlock(b *block.BanffCommitBlock) error { + return a.optionBlock(b, "banff commit") } -func (a *acceptor) BanffProposalBlock(b *blocks.BanffProposalBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "banff proposal"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - a.proposalBlock(b) +func (a *acceptor) BanffProposalBlock(b *block.BanffProposalBlock) error { + a.proposalBlock(b, "banff proposal") return nil } -func (a *acceptor) BanffStandardBlock(b *blocks.BanffStandardBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "banff standard"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.standardBlock(b) +func (a *acceptor) BanffStandardBlock(b *block.BanffStandardBlock) error { + return a.standardBlock(b, "banff standard") } -func (a *acceptor) ApricotAbortBlock(b *blocks.ApricotAbortBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "apricot abort"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.abortBlock(b) +func (a *acceptor) ApricotAbortBlock(b *block.ApricotAbortBlock) error { + return a.optionBlock(b, "apricot abort") } -func (a *acceptor) ApricotCommitBlock(b *blocks.ApricotCommitBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "apricot commit"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.commitBlock(b) +func (a *acceptor) ApricotCommitBlock(b *block.ApricotCommitBlock) error { + return a.optionBlock(b, "apricot commit") } -func (a *acceptor) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "apricot proposal"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - a.proposalBlock(b) +func (a *acceptor) ApricotProposalBlock(b *block.ApricotProposalBlock) error { + a.proposalBlock(b, "apricot proposal") return nil } -func (a *acceptor) ApricotStandardBlock(b *blocks.ApricotStandardBlock) error { - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "apricot standard"), - zap.Stringer("blkID", b.ID()), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - - return a.standardBlock(b) +func (a *acceptor) ApricotStandardBlock(b *block.ApricotStandardBlock) error { + return a.standardBlock(b, "apricot standard") } -func (a *acceptor) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { +func (a *acceptor) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { blkID := b.ID() defer a.free(blkID) - a.ctx.Log.Debug( - "accepting block", - zap.String("blockType", "apricot atomic"), - zap.Stringer("blkID", blkID), - zap.Uint64("height", b.Height()), - zap.Stringer("parentID", b.Parent()), - ) - if err := a.commonAccept(b); err != nil { return err } blkState, ok := a.blkIDToState[blkID] if !ok { - return fmt.Errorf("couldn't find state of block %s", blkID) + return fmt.Errorf("%w %s", errMissingBlockState, blkID) } // Update the state to reflect the changes made in [onAcceptState]. @@ -172,49 +103,27 @@ func (a *acceptor) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { err, ) } - return nil -} - -func (a *acceptor) abortBlock(b blocks.Block) error { - parentID := b.Parent() - parentState, ok := a.blkIDToState[parentID] - if !ok { - return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) - } - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteLost() - } else { - a.metrics.MarkOptionVoteWon() - } - } + a.ctx.Log.Trace( + "accepted block", + zap.String("blockType", "apricot atomic"), + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + zap.Stringer("utxoChecksum", a.state.Checksum()), + ) - return a.optionBlock(b, parentState.statelessBlock) + return nil } -func (a *acceptor) commitBlock(b blocks.Block) error { +func (a *acceptor) optionBlock(b block.Block, blockType string) error { parentID := b.Parent() parentState, ok := a.blkIDToState[parentID] if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteWon() - } else { - a.metrics.MarkOptionVoteLost() - } - } - - return a.optionBlock(b, parentState.statelessBlock) -} - -func (a *acceptor) optionBlock(b, parent blocks.Block) error { blkID := b.ID() - parentID := parent.ID() - defer func() { // Note: we assume this block's sibling doesn't // need the parent's state when it's rejected. @@ -223,7 +132,7 @@ func (a *acceptor) optionBlock(b, parent blocks.Block) error { }() // Note that the parent must be accepted first. - if err := a.commonAccept(parent); err != nil { + if err := a.commonAccept(parentState.statelessBlock); err != nil { return err } @@ -231,17 +140,52 @@ func (a *acceptor) optionBlock(b, parent blocks.Block) error { return err } + if parentState.onDecisionState != nil { + if err := parentState.onDecisionState.Apply(a.state); err != nil { + return err + } + } + blkState, ok := a.blkIDToState[blkID] if !ok { - return fmt.Errorf("couldn't find state of block %s", blkID) + return fmt.Errorf("%w %s", errMissingBlockState, blkID) } if err := blkState.onAcceptState.Apply(a.state); err != nil { return err } - return a.state.Commit() + + defer a.state.Abort() + batch, err := a.state.CommitBatch() + if err != nil { + return fmt.Errorf( + "failed to commit VM's database for block %s: %w", + blkID, + err, + ) + } + + // Note that this method writes [batch] to the database. + if err := a.ctx.SharedMemory.Apply(parentState.atomicRequests, batch); err != nil { + return fmt.Errorf("failed to apply vm's state to shared memory: %w", err) + } + + if onAcceptFunc := parentState.onAcceptFunc; onAcceptFunc != nil { + onAcceptFunc() + } + + a.ctx.Log.Trace( + "accepted block", + zap.String("blockType", blockType), + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", parentID), + zap.Stringer("utxoChecksum", a.state.Checksum()), + ) + + return nil } -func (a *acceptor) proposalBlock(b blocks.Block) { +func (a *acceptor) proposalBlock(b block.Block, blockType string) { // Note that: // // * We don't free the proposal block in this method. @@ -258,10 +202,20 @@ func (a *acceptor) proposalBlock(b blocks.Block) { // (The VM's Shutdown method commits the database.) // The snowman.Engine requires that the last committed block is a decision block - a.backend.lastAccepted = b.ID() + blkID := b.ID() + a.backend.lastAccepted = blkID + + a.ctx.Log.Trace( + "accepted block", + zap.String("blockType", blockType), + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + zap.Stringer("utxoChecksum", a.state.Checksum()), + ) } -func (a *acceptor) standardBlock(b blocks.Block) error { +func (a *acceptor) standardBlock(b block.Block, blockType string) error { blkID := b.ID() defer a.free(blkID) @@ -271,7 +225,7 @@ func (a *acceptor) standardBlock(b blocks.Block) error { blkState, ok := a.blkIDToState[blkID] if !ok { - return fmt.Errorf("couldn't find state of block %s", blkID) + return fmt.Errorf("%w %s", errMissingBlockState, blkID) } // Update the state to reflect the changes made in [onAcceptState]. @@ -297,10 +251,20 @@ func (a *acceptor) standardBlock(b blocks.Block) error { if onAcceptFunc := blkState.onAcceptFunc; onAcceptFunc != nil { onAcceptFunc() } + + a.ctx.Log.Trace( + "accepted block", + zap.String("blockType", blockType), + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + zap.Stringer("utxoChecksum", a.state.Checksum()), + ) + return nil } -func (a *acceptor) commonAccept(b blocks.Block) error { +func (a *acceptor) commonAccept(b block.Block) error { blkID := b.ID() if err := a.metrics.MarkAccepted(b); err != nil { @@ -310,7 +274,7 @@ func (a *acceptor) commonAccept(b blocks.Block) error { a.backend.lastAccepted = blkID a.state.SetLastAccepted(blkID) a.state.SetHeight(b.Height()) - a.state.AddStatelessBlock(b, choices.Accepted) - a.recentlyAccepted.Add(blkID) + a.state.AddStatelessBlock(b) + a.validators.OnAcceptedBlockID(blkID) return nil } diff --git a/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go b/avalanchego/vms/platformvm/block/executor/acceptor_test.go similarity index 63% rename from avalanchego/vms/platformvm/blocks/executor/acceptor_test.go rename to avalanchego/vms/platformvm/block/executor/acceptor_test.go index 583e3ee6..00b75b3f 100644 --- a/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go +++ b/avalanchego/vms/platformvm/block/executor/acceptor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,35 +7,32 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/window" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/validators" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestAcceptorVisitProposalBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lastAcceptedID := ids.GenerateTestID() - blk, err := blocks.NewApricotProposalBlock( + blk, err := block.NewApricotProposalBlock( lastAcceptedID, 1, &txs.Tx{ @@ -51,6 +48,8 @@ func TestAcceptorVisitProposalBlock(t *testing.T) { blkID := blk.ID() s := state.NewMockState(ctrl) + s.EXPECT().Checksum().Return(ids.Empty).Times(1) + acceptor := &acceptor{ backend: &backend{ ctx: &snow.Context{ @@ -61,12 +60,11 @@ func TestAcceptorVisitProposalBlock(t *testing.T) { }, state: s, }, - metrics: metrics.Noop, - recentlyAccepted: nil, + metrics: metrics.Noop, + validators: validators.TestManager, } - err = acceptor.ApricotProposalBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotProposalBlock(blk)) require.Equal(blkID, acceptor.backend.lastAccepted) @@ -82,7 +80,6 @@ func TestAcceptorVisitProposalBlock(t *testing.T) { func TestAcceptorVisitAtomicBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() s := state.NewMockState(ctrl) sharedMemory := atomic.NewMockSharedMemory(ctrl) @@ -98,15 +95,11 @@ func TestAcceptorVisitAtomicBlock(t *testing.T) { SharedMemory: sharedMemory, }, }, - metrics: metrics.Noop, - recentlyAccepted: window.New[ids.ID](window.Config{ - Clock: &mockable.Clock{}, - MaxSize: 1, - TTL: time.Hour, - }), + metrics: metrics.Noop, + validators: validators.TestManager, } - blk, err := blocks.NewApricotAtomicBlock( + blk, err := block.NewApricotAtomicBlock( parentID, 1, &txs.Tx{ @@ -123,15 +116,15 @@ func TestAcceptorVisitAtomicBlock(t *testing.T) { // We should error after [commonAccept] is called. s.EXPECT().SetLastAccepted(blk.ID()).Times(1) s.EXPECT().SetHeight(blk.Height()).Times(1) - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1) + s.EXPECT().AddStatelessBlock(blk).Times(1) err = acceptor.ApricotAtomicBlock(blk) - require.Error(err, "should fail because the block isn't in the state map") + require.ErrorIs(err, errMissingBlockState) // Set [blk]'s state in the map as though it had been verified. onAcceptState := state.NewMockDiff(ctrl) childID := ids.GenerateTestID() - atomicRequests := map[ids.ID]*atomic.Requests{ids.GenerateTestID(): nil} + atomicRequests := make(map[ids.ID]*atomic.Requests) acceptor.backend.blkIDToState[blk.ID()] = &blockState{ onAcceptState: onAcceptState, atomicRequests: atomicRequests, @@ -152,21 +145,20 @@ func TestAcceptorVisitAtomicBlock(t *testing.T) { // Set expected calls on dependencies. s.EXPECT().SetLastAccepted(blk.ID()).Times(1) s.EXPECT().SetHeight(blk.Height()).Times(1) - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1) + s.EXPECT().AddStatelessBlock(blk).Times(1) batch := database.NewMockBatch(ctrl) s.EXPECT().CommitBatch().Return(batch, nil).Times(1) s.EXPECT().Abort().Times(1) onAcceptState.EXPECT().Apply(s).Times(1) sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1) + s.EXPECT().Checksum().Return(ids.Empty).Times(1) - err = acceptor.ApricotAtomicBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotAtomicBlock(blk)) } func TestAcceptorVisitStandardBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() s := state.NewMockState(ctrl) sharedMemory := atomic.NewMockSharedMemory(ctrl) @@ -183,15 +175,11 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { SharedMemory: sharedMemory, }, }, - metrics: metrics.Noop, - recentlyAccepted: window.New[ids.ID](window.Config{ - Clock: clk, - MaxSize: 1, - TTL: time.Hour, - }), + metrics: metrics.Noop, + validators: validators.TestManager, } - blk, err := blocks.NewBanffStandardBlock( + blk, err := block.NewBanffStandardBlock( clk.Time(), parentID, 1, @@ -211,24 +199,23 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { // We should error after [commonAccept] is called. s.EXPECT().SetLastAccepted(blk.ID()).Times(1) s.EXPECT().SetHeight(blk.Height()).Times(1) - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1) + s.EXPECT().AddStatelessBlock(blk).Times(1) err = acceptor.BanffStandardBlock(blk) - require.Error(err, "should fail because the block isn't in the state map") + require.ErrorIs(err, errMissingBlockState) // Set [blk]'s state in the map as though it had been verified. onAcceptState := state.NewMockDiff(ctrl) childID := ids.GenerateTestID() - atomicRequests := map[ids.ID]*atomic.Requests{ids.GenerateTestID(): nil} + atomicRequests := make(map[ids.ID]*atomic.Requests) calledOnAcceptFunc := false acceptor.backend.blkIDToState[blk.ID()] = &blockState{ - onAcceptState: onAcceptState, - atomicRequests: atomicRequests, - standardBlockState: standardBlockState{ - onAcceptFunc: func() { - calledOnAcceptFunc = true - }, + onAcceptState: onAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true }, + + atomicRequests: atomicRequests, } // Give [blk] a child. childOnAcceptState := state.NewMockDiff(ctrl) @@ -246,15 +233,15 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { // Set expected calls on dependencies. s.EXPECT().SetLastAccepted(blk.ID()).Times(1) s.EXPECT().SetHeight(blk.Height()).Times(1) - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1) + s.EXPECT().AddStatelessBlock(blk).Times(1) batch := database.NewMockBatch(ctrl) s.EXPECT().CommitBatch().Return(batch, nil).Times(1) s.EXPECT().Abort().Times(1) onAcceptState.EXPECT().Apply(s).Times(1) sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1) + s.EXPECT().Checksum().Return(ids.Empty).Times(1) - err = acceptor.BanffStandardBlock(blk) - require.NoError(err) + require.NoError(acceptor.BanffStandardBlock(blk)) require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } @@ -262,7 +249,6 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { func TestAcceptorVisitCommitBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() s := state.NewMockState(ctrl) sharedMemory := atomic.NewMockSharedMemory(ctrl) @@ -278,81 +264,101 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { SharedMemory: sharedMemory, }, }, - metrics: metrics.Noop, - recentlyAccepted: window.New[ids.ID](window.Config{ - Clock: &mockable.Clock{}, - MaxSize: 1, - TTL: time.Hour, - }), + metrics: metrics.Noop, + validators: validators.TestManager, bootstrapped: &utils.Atomic[bool]{}, } - blk, err := blocks.NewApricotCommitBlock(parentID, 1 /*height*/) + blk, err := block.NewApricotCommitBlock(parentID, 1 /*height*/) require.NoError(err) - blkID := blk.ID() err = acceptor.ApricotCommitBlock(blk) - require.Error(err, "should fail because the block isn't in the state map") + require.ErrorIs(err, state.ErrMissingParentState) - // Set [blk]'s state in the map as though it had been verified. - onAcceptState := state.NewMockDiff(ctrl) - childID := ids.GenerateTestID() - acceptor.backend.blkIDToState[blkID] = &blockState{ - onAcceptState: onAcceptState, - } - // Give [blk] a child. - childOnAcceptState := state.NewMockDiff(ctrl) - childOnAbortState := state.NewMockDiff(ctrl) - childOnCommitState := state.NewMockDiff(ctrl) - childState := &blockState{ - onAcceptState: childOnAcceptState, - proposalBlockState: proposalBlockState{ - onAbortState: childOnAbortState, - onCommitState: childOnCommitState, - }, - } - acceptor.backend.blkIDToState[childID] = childState // Set [blk]'s parent in the state map. parentOnAcceptState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) + calledOnAcceptFunc := false + atomicRequests := make(map[ids.ID]*atomic.Requests) parentState := &blockState{ - statelessBlock: parentStatelessBlk, - onAcceptState: parentOnAcceptState, proposalBlockState: proposalBlockState{ onAbortState: parentOnAbortState, onCommitState: parentOnCommitState, }, + statelessBlock: parentStatelessBlk, + + onAcceptState: parentOnAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true + }, + + atomicRequests: atomicRequests, } acceptor.backend.blkIDToState[parentID] = parentState + blkID := blk.ID() // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), - s.EXPECT().AddStatelessBlock(parentState.statelessBlock, choices.Accepted).Times(1), + s.EXPECT().AddStatelessBlock(parentState.statelessBlock).Times(1), s.EXPECT().SetLastAccepted(blkID).Times(1), s.EXPECT().SetHeight(blk.Height()).Times(1), - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1), - - onAcceptState.EXPECT().Apply(s).Times(1), - s.EXPECT().Commit().Return(nil).Times(1), + s.EXPECT().AddStatelessBlock(blk).Times(1), ) err = acceptor.ApricotCommitBlock(blk) - require.NoError(err) + require.ErrorIs(err, errMissingBlockState) + + parentOnCommitState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + + // Set [blk]'s state in the map as though it had been verified. + acceptor.backend.blkIDToState[parentID] = parentState + acceptor.backend.blkIDToState[blkID] = &blockState{ + onAcceptState: parentState.onCommitState, + onAcceptFunc: parentState.onAcceptFunc, + + inputs: parentState.inputs, + timestamp: parentOnCommitState.GetTimestamp(), + atomicRequests: parentState.atomicRequests, + } + + batch := database.NewMockBatch(ctrl) + + // Set expected calls on dependencies. + // Make sure the parent is accepted first. + gomock.InOrder( + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), + s.EXPECT().SetLastAccepted(parentID).Times(1), + parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), + s.EXPECT().SetHeight(blk.Height()-1).Times(1), + s.EXPECT().AddStatelessBlock(parentState.statelessBlock).Times(1), + + s.EXPECT().SetLastAccepted(blkID).Times(1), + s.EXPECT().SetHeight(blk.Height()).Times(1), + s.EXPECT().AddStatelessBlock(blk).Times(1), + + parentOnCommitState.EXPECT().Apply(s).Times(1), + s.EXPECT().CommitBatch().Return(batch, nil).Times(1), + sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1), + s.EXPECT().Checksum().Return(ids.Empty).Times(1), + s.EXPECT().Abort().Times(1), + ) + + require.NoError(acceptor.ApricotCommitBlock(blk)) + require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } func TestAcceptorVisitAbortBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() s := state.NewMockState(ctrl) sharedMemory := atomic.NewMockSharedMemory(ctrl) @@ -368,73 +374,94 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { SharedMemory: sharedMemory, }, }, - metrics: metrics.Noop, - recentlyAccepted: window.New[ids.ID](window.Config{ - Clock: &mockable.Clock{}, - MaxSize: 1, - TTL: time.Hour, - }), + metrics: metrics.Noop, + validators: validators.TestManager, bootstrapped: &utils.Atomic[bool]{}, } - blk, err := blocks.NewApricotAbortBlock(parentID, 1 /*height*/) + blk, err := block.NewApricotAbortBlock(parentID, 1 /*height*/) require.NoError(err) - blkID := blk.ID() err = acceptor.ApricotAbortBlock(blk) - require.Error(err, "should fail because the block isn't in the state map") + require.ErrorIs(err, state.ErrMissingParentState) - // Set [blk]'s state in the map as though it had been verified. - onAcceptState := state.NewMockDiff(ctrl) - childID := ids.GenerateTestID() - acceptor.backend.blkIDToState[blkID] = &blockState{ - onAcceptState: onAcceptState, - } - // Give [blk] a child. - childOnAcceptState := state.NewMockDiff(ctrl) - childOnAbortState := state.NewMockDiff(ctrl) - childOnCommitState := state.NewMockDiff(ctrl) - childState := &blockState{ - onAcceptState: childOnAcceptState, - proposalBlockState: proposalBlockState{ - onAbortState: childOnAbortState, - onCommitState: childOnCommitState, - }, - } - acceptor.backend.blkIDToState[childID] = childState // Set [blk]'s parent in the state map. parentOnAcceptState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) + calledOnAcceptFunc := false + atomicRequests := make(map[ids.ID]*atomic.Requests) parentState := &blockState{ - statelessBlock: parentStatelessBlk, - onAcceptState: parentOnAcceptState, proposalBlockState: proposalBlockState{ onAbortState: parentOnAbortState, onCommitState: parentOnCommitState, }, + statelessBlock: parentStatelessBlk, + + onAcceptState: parentOnAcceptState, + onAcceptFunc: func() { + calledOnAcceptFunc = true + }, + + atomicRequests: atomicRequests, } acceptor.backend.blkIDToState[parentID] = parentState + blkID := blk.ID() // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), - s.EXPECT().AddStatelessBlock(parentState.statelessBlock, choices.Accepted).Times(1), + s.EXPECT().AddStatelessBlock(parentState.statelessBlock).Times(1), s.EXPECT().SetLastAccepted(blkID).Times(1), s.EXPECT().SetHeight(blk.Height()).Times(1), - s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1), - - onAcceptState.EXPECT().Apply(s).Times(1), - s.EXPECT().Commit().Return(nil).Times(1), + s.EXPECT().AddStatelessBlock(blk).Times(1), ) err = acceptor.ApricotAbortBlock(blk) - require.NoError(err) + require.ErrorIs(err, errMissingBlockState) + + parentOnAbortState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + + // Set [blk]'s state in the map as though it had been verified. + acceptor.backend.blkIDToState[parentID] = parentState + acceptor.backend.blkIDToState[blkID] = &blockState{ + onAcceptState: parentState.onAbortState, + onAcceptFunc: parentState.onAcceptFunc, + + inputs: parentState.inputs, + timestamp: parentOnAbortState.GetTimestamp(), + atomicRequests: parentState.atomicRequests, + } + + batch := database.NewMockBatch(ctrl) + + // Set expected calls on dependencies. + // Make sure the parent is accepted first. + gomock.InOrder( + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), + s.EXPECT().SetLastAccepted(parentID).Times(1), + parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), + s.EXPECT().SetHeight(blk.Height()-1).Times(1), + s.EXPECT().AddStatelessBlock(parentState.statelessBlock).Times(1), + + s.EXPECT().SetLastAccepted(blkID).Times(1), + s.EXPECT().SetHeight(blk.Height()).Times(1), + s.EXPECT().AddStatelessBlock(blk).Times(1), + + parentOnAbortState.EXPECT().Apply(s).Times(1), + s.EXPECT().CommitBatch().Return(batch, nil).Times(1), + sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1), + s.EXPECT().Checksum().Return(ids.Empty).Times(1), + s.EXPECT().Abort().Times(1), + ) + + require.NoError(acceptor.ApricotAbortBlock(blk)) + require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } diff --git a/avalanchego/vms/platformvm/blocks/executor/backend.go b/avalanchego/vms/platformvm/block/executor/backend.go similarity index 73% rename from avalanchego/vms/platformvm/blocks/executor/backend.go rename to avalanchego/vms/platformvm/block/executor/backend.go index 56619f3e..c4e56545 100644 --- a/avalanchego/vms/platformvm/blocks/executor/backend.go +++ b/avalanchego/vms/platformvm/block/executor/backend.go @@ -1,18 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "errors" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) +var errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") + // Shared fields used by visitors. type backend struct { mempool.Mempool @@ -63,14 +67,14 @@ func (b *backend) getOnCommitState(blkID ids.ID) (state.Diff, bool) { return state.onCommitState, true } -func (b *backend) GetBlock(blkID ids.ID) (blocks.Block, error) { +func (b *backend) GetBlock(blkID ids.ID) (block.Block, error) { // See if the block is in memory. if blk, ok := b.blkIDToState[blkID]; ok { return blk.statelessBlock, nil } + // The block isn't in memory. Check the database. - statelessBlk, _, err := b.state.GetStatelessBlock(blkID) - return statelessBlk, err + return b.state.GetStatelessBlock(blkID) } func (b *backend) LastAccepted() ids.ID { @@ -95,3 +99,28 @@ func (b *backend) getTimestamp(blkID ids.ID) time.Time { // so we just return the chain time. return b.state.GetTimestamp() } + +// verifyUniqueInputs returns nil iff no blocks in the inclusive +// ancestry of [blkID] consume an input in [inputs]. +func (b *backend) verifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + if inputs.Len() == 0 { + return nil + } + + // Check for conflicts in ancestors. + for { + state, ok := b.blkIDToState[blkID] + if !ok { + // The parent state isn't pinned in memory. + // This means the parent must be accepted already. + return nil + } + + if state.inputs.Overlaps(inputs) { + return errConflictingParentTxs + } + + blk := state.statelessBlock + blkID = blk.Parent() + } +} diff --git a/avalanchego/vms/platformvm/blocks/executor/backend_test.go b/avalanchego/vms/platformvm/block/executor/backend_test.go similarity index 85% rename from avalanchego/vms/platformvm/blocks/executor/backend_test.go rename to avalanchego/vms/platformvm/block/executor/backend_test.go index 63d30873..a37823da 100644 --- a/avalanchego/vms/platformvm/blocks/executor/backend_test.go +++ b/avalanchego/vms/platformvm/block/executor/backend_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,21 +7,18 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" ) func TestGetState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( mockState = state.NewMockState(ctrl) @@ -72,11 +69,10 @@ func TestGetState(t *testing.T) { func TestBackendGetBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( blkID1 = ids.GenerateTestID() - statelessBlk = blocks.NewMockBlock(ctrl) + statelessBlk = block.NewMockBlock(ctrl) state = state.NewMockState(ctrl) b = &backend{ state: state, @@ -98,7 +94,7 @@ func TestBackendGetBlock(t *testing.T) { { // Case: block isn't in the map or database. blkID := ids.GenerateTestID() - state.EXPECT().GetStatelessBlock(blkID).Return(nil, choices.Unknown, database.ErrNotFound) + state.EXPECT().GetStatelessBlock(blkID).Return(nil, database.ErrNotFound) _, err := b.GetBlock(blkID) require.Equal(database.ErrNotFound, err) } @@ -106,7 +102,7 @@ func TestBackendGetBlock(t *testing.T) { { // Case: block isn't in the map and is in database. blkID := ids.GenerateTestID() - state.EXPECT().GetStatelessBlock(blkID).Return(statelessBlk, choices.Accepted, nil) + state.EXPECT().GetStatelessBlock(blkID).Return(statelessBlk, nil) gotBlk, err := b.GetBlock(blkID) require.NoError(err) require.Equal(statelessBlk, gotBlk) @@ -124,7 +120,7 @@ func TestGetTimestamp(t *testing.T) { tests := []test{ { name: "block is in map", - backendF: func(ctrl *gomock.Controller) *backend { + backendF: func(*gomock.Controller) *backend { return &backend{ blkIDToState: map[ids.ID]*blockState{ blkID: { @@ -151,7 +147,6 @@ func TestGetTimestamp(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() backend := tt.backendF(ctrl) gotTimestamp := backend.getTimestamp(blkID) diff --git a/avalanchego/vms/platformvm/blocks/executor/block.go b/avalanchego/vms/platformvm/block/executor/block.go similarity index 75% rename from avalanchego/vms/platformvm/blocks/executor/block.go rename to avalanchego/vms/platformvm/block/executor/block.go index ce6d1f06..5cd5a02f 100644 --- a/avalanchego/vms/platformvm/blocks/executor/block.go +++ b/avalanchego/vms/platformvm/block/executor/block.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "context" - "fmt" "time" "go.uber.org/zap" @@ -13,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) var ( @@ -23,7 +22,7 @@ var ( // Exported for testing in platformvm package. type Block struct { - blocks.Block + block.Block manager *manager } @@ -58,10 +57,10 @@ func (b *Block) Status() choices.Status { return choices.Processing } // Block isn't in memory. Check in the database. - _, status, err := b.manager.state.GetStatelessBlock(blkID) + _, err := b.manager.state.GetStatelessBlock(blkID) switch err { case nil: - return status + return choices.Accepted case database.ErrNotFound: // choices.Unknown means we don't have the bytes of the block. @@ -83,22 +82,18 @@ func (b *Block) Timestamp() time.Time { } func (b *Block) Options(context.Context) ([2]snowman.Block, error) { - options := options{} + options := options{ + log: b.manager.ctx.Log, + primaryUptimePercentage: b.manager.txExecutorBackend.Config.UptimePercentage, + uptimes: b.manager.txExecutorBackend.Uptimes, + state: b.manager.backend.state, + } if err := b.Block.Visit(&options); err != nil { return [2]snowman.Block{}, err } - commitBlock := b.manager.NewBlock(options.commitBlock) - abortBlock := b.manager.NewBlock(options.abortBlock) - - blkID := b.ID() - blkState, ok := b.manager.blkIDToState[blkID] - if !ok { - return [2]snowman.Block{}, fmt.Errorf("block %s state not found", blkID) - } - - if blkState.initiallyPreferCommit { - return [2]snowman.Block{commitBlock, abortBlock}, nil - } - return [2]snowman.Block{abortBlock, commitBlock}, nil + return [2]snowman.Block{ + b.manager.NewBlock(options.preferredBlock), + b.manager.NewBlock(options.alternateBlock), + }, nil } diff --git a/avalanchego/vms/platformvm/blocks/executor/block_state.go b/avalanchego/vms/platformvm/block/executor/block_state.go similarity index 57% rename from avalanchego/vms/platformvm/blocks/executor/block_state.go rename to avalanchego/vms/platformvm/block/executor/block_state.go index ced2560b..9d6b377c 100644 --- a/avalanchego/vms/platformvm/blocks/executor/block_state.go +++ b/avalanchego/vms/platformvm/block/executor/block_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,29 +9,26 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" ) -type standardBlockState struct { - onAcceptFunc func() - inputs set.Set[ids.ID] -} - type proposalBlockState struct { - initiallyPreferCommit bool - onCommitState state.Diff - onAbortState state.Diff + onDecisionState state.Diff + onCommitState state.Diff + onAbortState state.Diff } // The state of a block. // Note that not all fields will be set for a given block. type blockState struct { - standardBlockState proposalBlockState - statelessBlock blocks.Block - onAcceptState state.Diff + statelessBlock block.Block + + onAcceptState state.Diff + onAcceptFunc func() + inputs set.Set[ids.ID] timestamp time.Time atomicRequests map[ids.ID]*atomic.Requests } diff --git a/avalanchego/vms/platformvm/block/executor/block_test.go b/avalanchego/vms/platformvm/block/executor/block_test.go new file mode 100644 index 00000000..1215f962 --- /dev/null +++ b/avalanchego/vms/platformvm/block/executor/block_test.go @@ -0,0 +1,619 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +func TestStatus(t *testing.T) { + type test struct { + name string + blockF func(*gomock.Controller) *Block + expectedStatus choices.Status + } + + tests := []test{ + { + name: "last accepted", + blockF: func(ctrl *gomock.Controller) *Block { + blkID := ids.GenerateTestID() + statelessBlk := block.NewMockBlock(ctrl) + statelessBlk.EXPECT().ID().Return(blkID) + + manager := &manager{ + backend: &backend{ + lastAccepted: blkID, + }, + } + + return &Block{ + Block: statelessBlk, + manager: manager, + } + }, + expectedStatus: choices.Accepted, + }, + { + name: "processing", + blockF: func(ctrl *gomock.Controller) *Block { + blkID := ids.GenerateTestID() + statelessBlk := block.NewMockBlock(ctrl) + statelessBlk.EXPECT().ID().Return(blkID) + + manager := &manager{ + backend: &backend{ + blkIDToState: map[ids.ID]*blockState{ + blkID: {}, + }, + }, + } + return &Block{ + Block: statelessBlk, + manager: manager, + } + }, + expectedStatus: choices.Processing, + }, + { + name: "in database", + blockF: func(ctrl *gomock.Controller) *Block { + blkID := ids.GenerateTestID() + statelessBlk := block.NewMockBlock(ctrl) + statelessBlk.EXPECT().ID().Return(blkID) + + state := state.NewMockState(ctrl) + state.EXPECT().GetStatelessBlock(blkID).Return(statelessBlk, nil) + + manager := &manager{ + backend: &backend{ + state: state, + }, + } + return &Block{ + Block: statelessBlk, + manager: manager, + } + }, + expectedStatus: choices.Accepted, + }, + { + name: "not in map or database", + blockF: func(ctrl *gomock.Controller) *Block { + blkID := ids.GenerateTestID() + statelessBlk := block.NewMockBlock(ctrl) + statelessBlk.EXPECT().ID().Return(blkID) + + state := state.NewMockState(ctrl) + state.EXPECT().GetStatelessBlock(blkID).Return(nil, database.ErrNotFound) + + manager := &manager{ + backend: &backend{ + state: state, + }, + } + return &Block{ + Block: statelessBlk, + manager: manager, + } + }, + expectedStatus: choices.Processing, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + + blk := tt.blockF(ctrl) + require.Equal(t, tt.expectedStatus, blk.Status()) + }) + } +} + +func TestBlockOptions(t *testing.T) { + type test struct { + name string + blkF func(*gomock.Controller) *Block + expectedPreferenceType block.Block + } + + tests := []test{ + { + name: "apricot proposal block; commit preferred", + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.ApricotProposalBlock{}, + manager: manager, + } + }, + expectedPreferenceType: &block.ApricotCommitBlock{}, + }, + { + name: "banff proposal block; invalid proposal tx", + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; missing tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; error fetching staker tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrClosed) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; unexpected staker tx type", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + stakerTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + } + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; missing primary network validator", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; failed calculating primary network uptime", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = constants.PrimaryNetworkID + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(0.0, database.ErrNotFound) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; failed fetching subnet transformation", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; prefers commit", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .2 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; prefers abort", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .6 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffAbortBlock{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + require := require.New(t) + + blk := tt.blkF(ctrl) + options, err := blk.Options(context.Background()) + require.NoError(err) + require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) + }) + } +} diff --git a/avalanchego/vms/platformvm/blocks/executor/helpers_test.go b/avalanchego/vms/platformvm/block/executor/helpers_test.go similarity index 73% rename from avalanchego/vms/platformvm/blocks/executor/helpers_test.go rename to avalanchego/vms/platformvm/block/executor/helpers_test.go index b6497760..5e99575d 100644 --- a/avalanchego/vms/platformvm/blocks/executor/helpers_test.go +++ b/avalanchego/vms/platformvm/block/executor/helpers_test.go @@ -1,29 +1,29 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( - "context" - "errors" "fmt" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -35,9 +35,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/window" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -52,23 +49,25 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - db_manager "github.com/ava-labs/avalanchego/database/manager" p_tx_builder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" + pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) const ( pending stakerStatus = iota current - testNetworkID = 10 // To be used in tests - defaultWeight = 10000 - maxRecentlyAcceptedWindowSize = 256 - recentlyAcceptedWindowTTL = 5 * time.Minute + defaultWeight = 10000 + trackChecksum = false + + apricotPhase3 fork = iota + apricotPhase5 + banff + cortina + durango ) var ( - _ mempool.BlockTimer = (*environment)(nil) - defaultMinStakingDuration = 24 * time.Hour defaultMaxStakingDuration = 365 * 24 * time.Hour defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) @@ -79,18 +78,25 @@ var ( preFundedKeys = secp256k1.TestKeys() avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) genesisBlkID ids.ID testSubnet1 *txs.Tx - errMissingPrimaryValidators = errors.New("missing primary validator set") - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + type stakerStatus uint +type fork uint8 + type staker struct { nodeID ids.NodeID rewardAddress ids.ShortID @@ -126,21 +132,22 @@ type environment struct { backend *executor.Backend } -func (*environment) ResetBlockTimer() { - // dummy call, do nothing for now -} - -func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { +func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment { res := &environment{ isBootstrapped: &utils.Atomic[bool]{}, - config: defaultConfig(), + config: defaultConfig(t, f), clk: defaultClock(), } res.isBootstrapped.Set(true) - baseDBManager := db_manager.NewMemDB(version.Semantic1_0_0) - res.baseDB = versiondb.New(baseDBManager.Current().Database) - res.ctx = defaultCtx(res.baseDB) + res.baseDB = versiondb.New(memdb.New()) + atomicDB := prefixdb.New([]byte{1}, res.baseDB) + m := atomic.NewMemory(atomicDB) + + res.ctx = snowtest.Context(t, snowtest.PChainID) + res.ctx.AVAXAssetID = avaxAssetID + res.ctx.SharedMemory = m.NewSharedMemory(res.ctx.ChainID) + res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) @@ -148,7 +155,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { if ctrl == nil { res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) - res.uptimes = uptime.NewManager(res.state) + res.uptimes = uptime.NewManager(res.state, res.clk) res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = p_tx_builder.New( res.ctx, @@ -162,7 +169,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { } else { genesisBlkID = ids.GenerateTestID() res.mockedState = state.NewMockState(ctrl) - res.uptimes = uptime.NewManager(res.mockedState) + res.uptimes = uptime.NewManager(res.mockedState, res.clk) res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = p_tx_builder.New( res.ctx, @@ -190,19 +197,12 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { } registerer := prometheus.NewRegistry() - window := window.New[ids.ID]( - window.Config{ - Clock: res.clk, - MaxSize: maxRecentlyAcceptedWindowSize, - TTL: recentlyAcceptedWindowTTL, - }, - ) res.sender = &common.SenderTest{T: t} metrics := metrics.Noop var err error - res.mempool, err = mempool.NewMempool("mempool", registerer, res) + res.mempool, err = mempool.New("mempool", registerer, nil) if err != nil { panic(fmt.Errorf("failed to create mempool: %w", err)) } @@ -213,7 +213,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { metrics, res.state, res.backend, - window, + pvalidators.TestManager, ) addSubnet(res) } else { @@ -222,12 +222,37 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { metrics, res.mockedState, res.backend, - window, + pvalidators.TestManager, ) // we do not add any subnet to state, since we can mock // whatever we need } + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + if res.mockedState != nil { + // state is mocked, nothing to do here + return + } + + require := require.New(t) + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(res.state.Commit()) + } + + if res.state != nil { + require.NoError(res.state.Close()) + } + + require.NoError(res.baseDB.Close()) + }) + return res } @@ -243,6 +268,7 @@ func addSubnet(env *environment) { }, []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), + nil, ) if err != nil { panic(err) @@ -278,15 +304,16 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) + execCfg, _ := config.GetExecutionConfig([]byte(`{}`)) state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), cfg, + execCfg, ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -297,51 +324,42 @@ func defaultState( if err := state.Commit(); err != nil { panic(err) } - state.SetHeight( /*height*/ 0) - if err := state.Commit(); err != nil { - panic(err) - } genesisBlkID = state.GetLastAccepted() return state } -func defaultCtx(db database.Database) *snow.Context { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) +func defaultConfig(t *testing.T, f fork) *config.Config { + var ( + apricotPhase3Time = mockable.MaxTime + apricotPhase5Time = mockable.MaxTime + banffTime = mockable.MaxTime + cortinaTime = mockable.MaxTime + durangoTime = mockable.MaxTime + ) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, + switch f { + case durango: + durangoTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case cortina: + cortinaTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case banff: + banffTime = time.Time{} // neglecting fork ordering for this package's tests + fallthrough + case apricotPhase5: + apricotPhase5Time = defaultValidateEndTime + fallthrough + case apricotPhase3: + apricotPhase3Time = defaultValidateEndTime + default: + require.NoError(t, fmt.Errorf("unhandled fork %d", f)) } - return ctx -} - -func defaultConfig() *config.Config { - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: vdrs, + Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -356,16 +374,18 @@ func defaultConfig() *config.Config { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: mockable.MaxTime, + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, + BanffTime: banffTime, + CortinaTime: cortinaTime, + DurangoTime: durangoTime, } } func defaultClock() *mockable.Clock { - clk := mockable.Clock{} + clk := &mockable.Clock{} clk.Set(defaultGenesisTime) - return &clk + return clk } type fxVMInt struct { @@ -392,7 +412,7 @@ func (fvi *fxVMInt) EthVerificationEnabled() bool { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } @@ -410,10 +430,9 @@ func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx. func buildGenesisTest(ctx *snow.Context) []byte { genesisUTXOs := make([]api.UTXO, len(preFundedKeys)) - hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range preFundedKeys { id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) if err != nil { panic(err) } @@ -423,15 +442,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -449,7 +467,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { } buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(testNetworkID), + NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: ctx.AVAXAssetID, UTXOs: genesisUTXOs, Validators: genesisValidators, @@ -473,40 +491,6 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } -func shutdownEnvironment(t *environment) error { - if t.mockedState != nil { - // state is mocked, nothing to do here - return nil - } - - if t.isBootstrapped.Get() { - primaryValidatorSet, exist := t.config.Validators.Get(constants.PrimaryNetworkID) - if !exist { - return errMissingPrimaryValidators - } - primaryValidators := primaryValidatorSet.List() - - validatorIDs := make([]ids.NodeID, len(primaryValidators)) - for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.NodeID - } - - if err := t.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := t.state.Commit(); err != nil { - return err - } - } - - errs := wrappers.Errs{} - if t.state != nil { - errs.Add(t.state.Close()) - } - errs.Add(t.baseDB.Close()) - return errs.Err -} - func addPendingValidator( env *environment, startTime time.Time, @@ -524,6 +508,7 @@ func addPendingValidator( reward.PercentDenominator, keys, ids.ShortEmpty, + nil, ) if err != nil { return nil, err diff --git a/avalanchego/vms/platformvm/block/executor/manager.go b/avalanchego/vms/platformvm/block/executor/manager.go new file mode 100644 index 00000000..27d35a76 --- /dev/null +++ b/avalanchego/vms/platformvm/block/executor/manager.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "errors" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/platformvm/validators" +) + +var ( + _ Manager = (*manager)(nil) + + ErrChainNotSynced = errors.New("chain not synced") +) + +type Manager interface { + state.Versions + + // Returns the ID of the most recently accepted block. + LastAccepted() ids.ID + + SetPreference(blkID ids.ID) (updated bool) + Preferred() ids.ID + + GetBlock(blkID ids.ID) (snowman.Block, error) + GetStatelessBlock(blkID ids.ID) (block.Block, error) + NewBlock(block.Block) snowman.Block + + // VerifyTx verifies that the transaction can be issued based on the currently + // preferred state. This should *not* be used to verify transactions in a block. + VerifyTx(tx *txs.Tx) error + + // VerifyUniqueInputs verifies that the inputs are not duplicated in the + // provided blk or any of its ancestors pinned in memory. + VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error +} + +func NewManager( + mempool mempool.Mempool, + metrics metrics.Metrics, + s state.State, + txExecutorBackend *executor.Backend, + validatorManager validators.Manager, +) Manager { + lastAccepted := s.GetLastAccepted() + backend := &backend{ + Mempool: mempool, + lastAccepted: lastAccepted, + state: s, + ctx: txExecutorBackend.Ctx, + blkIDToState: map[ids.ID]*blockState{}, + } + + return &manager{ + backend: backend, + verifier: &verifier{ + backend: backend, + txExecutorBackend: txExecutorBackend, + }, + acceptor: &acceptor{ + backend: backend, + metrics: metrics, + validators: validatorManager, + bootstrapped: txExecutorBackend.Bootstrapped, + }, + rejector: &rejector{ + backend: backend, + addTxsToMempool: !txExecutorBackend.Config.PartialSyncPrimaryNetwork, + }, + preferred: lastAccepted, + txExecutorBackend: txExecutorBackend, + } +} + +type manager struct { + *backend + verifier block.Visitor + acceptor block.Visitor + rejector block.Visitor + + preferred ids.ID + txExecutorBackend *executor.Backend +} + +func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { + blk, err := m.backend.GetBlock(blkID) + if err != nil { + return nil, err + } + return m.NewBlock(blk), nil +} + +func (m *manager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { + return m.backend.GetBlock(blkID) +} + +func (m *manager) NewBlock(blk block.Block) snowman.Block { + return &Block{ + manager: m, + Block: blk, + } +} + +func (m *manager) SetPreference(blkID ids.ID) bool { + updated := m.preferred != blkID + m.preferred = blkID + return updated +} + +func (m *manager) Preferred() ids.ID { + return m.preferred +} + +func (m *manager) VerifyTx(tx *txs.Tx) error { + if !m.txExecutorBackend.Bootstrapped.Get() { + return ErrChainNotSynced + } + + stateDiff, err := state.NewDiff(m.preferred, m) + if err != nil { + return err + } + + nextBlkTime, _, err := executor.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) + if err != nil { + return err + } + + _, err = executor.AdvanceTimeTo(m.txExecutorBackend, stateDiff, nextBlkTime) + if err != nil { + return err + } + + err = tx.Unsigned.Visit(&executor.StandardTxExecutor{ + Backend: m.txExecutorBackend, + State: stateDiff, + Tx: tx, + }) + // We ignore [errFutureStakeTime] here because the time will be advanced + // when this transaction is issued. + // + // TODO: Remove this check post-Durango. + if errors.Is(err, executor.ErrFutureStakeTime) { + return nil + } + return err +} + +func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + return m.backend.verifyUniqueInputs(blkID, inputs) +} diff --git a/avalanchego/vms/platformvm/blocks/executor/manager_test.go b/avalanchego/vms/platformvm/block/executor/manager_test.go similarity index 64% rename from avalanchego/vms/platformvm/blocks/executor/manager_test.go rename to avalanchego/vms/platformvm/block/executor/manager_test.go index fb15dd3c..48197261 100644 --- a/avalanchego/vms/platformvm/blocks/executor/manager_test.go +++ b/avalanchego/vms/platformvm/block/executor/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,23 +6,20 @@ package executor import ( "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" ) func TestGetBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - statelessBlk, err := blocks.NewApricotCommitBlock(ids.GenerateTestID() /*parent*/, 2 /*height*/) + statelessBlk, err := block.NewApricotCommitBlock(ids.GenerateTestID() /*parent*/, 2 /*height*/) require.NoError(err) state := state.NewMockState(ctrl) manager := &manager{ @@ -34,18 +31,18 @@ func TestGetBlock(t *testing.T) { { // Case: block isn't in memory or database - state.EXPECT().GetStatelessBlock(statelessBlk.ID()).Return(nil, choices.Unknown, database.ErrNotFound).Times(1) + state.EXPECT().GetStatelessBlock(statelessBlk.ID()).Return(nil, database.ErrNotFound).Times(1) _, err := manager.GetBlock(statelessBlk.ID()) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) } { // Case: block isn't in memory but is in database. - state.EXPECT().GetStatelessBlock(statelessBlk.ID()).Return(statelessBlk, choices.Accepted, nil).Times(1) + state.EXPECT().GetStatelessBlock(statelessBlk.ID()).Return(statelessBlk, nil).Times(1) gotBlk, err := manager.GetBlock(statelessBlk.ID()) require.NoError(err) require.Equal(statelessBlk.ID(), gotBlk.ID()) - innerBlk, ok := gotBlk.(*Block) - require.True(ok) + require.IsType(&Block{}, gotBlk) + innerBlk := gotBlk.(*Block) require.Equal(statelessBlk, innerBlk.Block) require.Equal(manager, innerBlk.manager) } @@ -57,8 +54,8 @@ func TestGetBlock(t *testing.T) { gotBlk, err := manager.GetBlock(statelessBlk.ID()) require.NoError(err) require.Equal(statelessBlk.ID(), gotBlk.ID()) - innerBlk, ok := gotBlk.(*Block) - require.True(ok) + require.IsType(&Block{}, gotBlk) + innerBlk := gotBlk.(*Block) require.Equal(statelessBlk, innerBlk.Block) require.Equal(manager, innerBlk.manager) } @@ -74,3 +71,18 @@ func TestManagerLastAccepted(t *testing.T) { require.Equal(t, lastAcceptedID, manager.LastAccepted()) } + +func TestManagerSetPreference(t *testing.T) { + require := require.New(t) + + initialPreference := ids.GenerateTestID() + manager := &manager{ + preferred: initialPreference, + } + require.False(manager.SetPreference(initialPreference)) + + newPreference := ids.GenerateTestID() + require.True(manager.SetPreference(newPreference)) + require.False(manager.SetPreference(newPreference)) + require.True(manager.SetPreference(initialPreference)) +} diff --git a/avalanchego/vms/platformvm/block/executor/mock_manager.go b/avalanchego/vms/platformvm/block/executor/mock_manager.go new file mode 100644 index 00000000..5e822238 --- /dev/null +++ b/avalanchego/vms/platformvm/block/executor/mock_manager.go @@ -0,0 +1,174 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: vms/platformvm/block/executor/manager.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/block/executor/manager.go -destination=vms/platformvm/block/executor/mock_manager.go -package=executor -exclude_interfaces= +// + +// Package executor is a generated GoMock package. +package executor + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + set "github.com/ava-labs/avalanchego/utils/set" + block "github.com/ava-labs/avalanchego/vms/platformvm/block" + state "github.com/ava-labs/avalanchego/vms/platformvm/state" + txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" + gomock "go.uber.org/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// GetBlock mocks base method. +func (m *MockManager) GetBlock(blkID ids.ID) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", blkID) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockManagerMockRecorder) GetBlock(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), blkID) +} + +// GetState mocks base method. +func (m *MockManager) GetState(blkID ids.ID) (state.Chain, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetState", blkID) + ret0, _ := ret[0].(state.Chain) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetState indicates an expected call of GetState. +func (mr *MockManagerMockRecorder) GetState(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), blkID) +} + +// GetStatelessBlock mocks base method. +func (m *MockManager) GetStatelessBlock(blkID ids.ID) (block.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatelessBlock", blkID) + ret0, _ := ret[0].(block.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStatelessBlock indicates an expected call of GetStatelessBlock. +func (mr *MockManagerMockRecorder) GetStatelessBlock(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), blkID) +} + +// LastAccepted mocks base method. +func (m *MockManager) LastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// LastAccepted indicates an expected call of LastAccepted. +func (mr *MockManagerMockRecorder) LastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockManager)(nil).LastAccepted)) +} + +// NewBlock mocks base method. +func (m *MockManager) NewBlock(arg0 block.Block) snowman.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + return ret0 +} + +// NewBlock indicates an expected call of NewBlock. +func (mr *MockManagerMockRecorder) NewBlock(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) +} + +// Preferred mocks base method. +func (m *MockManager) Preferred() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Preferred") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Preferred indicates an expected call of Preferred. +func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Preferred", reflect.TypeOf((*MockManager)(nil).Preferred)) +} + +// SetPreference mocks base method. +func (m *MockManager) SetPreference(blkID ids.ID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPreference", blkID) + ret0, _ := ret[0].(bool) + return ret0 +} + +// SetPreference indicates an expected call of SetPreference. +func (mr *MockManagerMockRecorder) SetPreference(blkID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), blkID) +} + +// VerifyTx mocks base method. +func (m *MockManager) VerifyTx(tx *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyTx", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyTx indicates an expected call of VerifyTx. +func (mr *MockManagerMockRecorder) VerifyTx(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), tx) +} + +// VerifyUniqueInputs mocks base method. +func (m *MockManager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyUniqueInputs", blkID, inputs) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyUniqueInputs indicates an expected call of VerifyUniqueInputs. +func (mr *MockManagerMockRecorder) VerifyUniqueInputs(blkID, inputs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), blkID, inputs) +} diff --git a/avalanchego/vms/platformvm/block/executor/options.go b/avalanchego/vms/platformvm/block/executor/options.go new file mode 100644 index 00000000..f2071c8e --- /dev/null +++ b/avalanchego/vms/platformvm/block/executor/options.go @@ -0,0 +1,189 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "errors" + "fmt" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +var ( + _ block.Visitor = (*options)(nil) + + errUnexpectedProposalTxType = errors.New("unexpected proposal transaction type") + errFailedFetchingStakerTx = errors.New("failed fetching staker transaction") + errUnexpectedStakerTxType = errors.New("unexpected staker transaction type") + errFailedFetchingPrimaryStaker = errors.New("failed fetching primary staker") + errFailedFetchingSubnetTransformation = errors.New("failed fetching subnet transformation") + errFailedCalculatingUptime = errors.New("failed calculating uptime") +) + +// options supports build new option blocks +type options struct { + // inputs populated before calling this struct's methods: + log logging.Logger + primaryUptimePercentage float64 + uptimes uptime.Calculator + state state.Chain + + // outputs populated by this struct's methods: + preferredBlock block.Block + alternateBlock block.Block +} + +func (*options) BanffAbortBlock(*block.BanffAbortBlock) error { + return snowman.ErrNotOracle +} + +func (*options) BanffCommitBlock(*block.BanffCommitBlock) error { + return snowman.ErrNotOracle +} + +func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { + timestamp := b.Timestamp() + blkID := b.ID() + nextHeight := b.Height() + 1 + + commitBlock, err := block.NewBanffCommitBlock(timestamp, blkID, nextHeight) + if err != nil { + return fmt.Errorf( + "failed to create commit block: %w", + err, + ) + } + + abortBlock, err := block.NewBanffAbortBlock(timestamp, blkID, nextHeight) + if err != nil { + return fmt.Errorf( + "failed to create abort block: %w", + err, + ) + } + + prefersCommit, err := o.prefersCommit(b.Tx) + if err != nil { + o.log.Debug("falling back to prefer commit", + zap.Error(err), + ) + // We fall back to commit here to err on the side of over-rewarding + // rather than under-rewarding. + // + // Invariant: We must not return the error here, because the error would + // be treated as fatal. Errors can occur here due to a malicious block + // proposer or even in unusual virtuous cases. + prefersCommit = true + } + + if prefersCommit { + o.preferredBlock = commitBlock + o.alternateBlock = abortBlock + } else { + o.preferredBlock = abortBlock + o.alternateBlock = commitBlock + } + return nil +} + +func (*options) BanffStandardBlock(*block.BanffStandardBlock) error { + return snowman.ErrNotOracle +} + +func (*options) ApricotAbortBlock(*block.ApricotAbortBlock) error { + return snowman.ErrNotOracle +} + +func (*options) ApricotCommitBlock(*block.ApricotCommitBlock) error { + return snowman.ErrNotOracle +} + +func (o *options) ApricotProposalBlock(b *block.ApricotProposalBlock) error { + blkID := b.ID() + nextHeight := b.Height() + 1 + + var err error + o.preferredBlock, err = block.NewApricotCommitBlock(blkID, nextHeight) + if err != nil { + return fmt.Errorf( + "failed to create commit block: %w", + err, + ) + } + + o.alternateBlock, err = block.NewApricotAbortBlock(blkID, nextHeight) + if err != nil { + return fmt.Errorf( + "failed to create abort block: %w", + err, + ) + } + return nil +} + +func (*options) ApricotStandardBlock(*block.ApricotStandardBlock) error { + return snowman.ErrNotOracle +} + +func (*options) ApricotAtomicBlock(*block.ApricotAtomicBlock) error { + return snowman.ErrNotOracle +} + +func (o *options) prefersCommit(tx *txs.Tx) (bool, error) { + unsignedTx, ok := tx.Unsigned.(*txs.RewardValidatorTx) + if !ok { + return false, fmt.Errorf("%w: %T", errUnexpectedProposalTxType, tx.Unsigned) + } + + stakerTx, _, err := o.state.GetTx(unsignedTx.TxID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingStakerTx, err) + } + + staker, ok := stakerTx.Unsigned.(txs.Staker) + if !ok { + return false, fmt.Errorf("%w: %T", errUnexpectedStakerTxType, stakerTx.Unsigned) + } + + nodeID := staker.NodeID() + primaryNetworkValidator, err := o.state.GetCurrentValidator( + constants.PrimaryNetworkID, + nodeID, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingPrimaryStaker, err) + } + + expectedUptimePercentage := o.primaryUptimePercentage + if subnetID := staker.SubnetID(); subnetID != constants.PrimaryNetworkID { + transformSubnet, err := executor.GetTransformSubnetTx(o.state, subnetID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingSubnetTransformation, err) + } + + expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator + } + + // TODO: calculate subnet uptimes + uptime, err := o.uptimes.CalculateUptimePercentFrom( + nodeID, + constants.PrimaryNetworkID, + primaryNetworkValidator.StartTime, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedCalculatingUptime, err) + } + + return uptime >= expectedUptimePercentage, nil +} diff --git a/avalanchego/vms/platformvm/blocks/executor/options_test.go b/avalanchego/vms/platformvm/block/executor/options_test.go similarity index 53% rename from avalanchego/vms/platformvm/blocks/executor/options_test.go rename to avalanchego/vms/platformvm/block/executor/options_test.go index 66a7b382..54bef779 100644 --- a/avalanchego/vms/platformvm/blocks/executor/options_test.go +++ b/avalanchego/vms/platformvm/block/executor/options_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,18 +10,18 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) func TestOptionsUnexpectedBlockType(t *testing.T) { - tests := []blocks.Block{ - &blocks.BanffAbortBlock{}, - &blocks.BanffCommitBlock{}, - &blocks.BanffStandardBlock{}, - &blocks.ApricotAbortBlock{}, - &blocks.ApricotCommitBlock{}, - &blocks.ApricotStandardBlock{}, - &blocks.ApricotAtomicBlock{}, + tests := []block.Block{ + &block.BanffAbortBlock{}, + &block.BanffCommitBlock{}, + &block.BanffStandardBlock{}, + &block.ApricotAbortBlock{}, + &block.ApricotCommitBlock{}, + &block.ApricotStandardBlock{}, + &block.ApricotAtomicBlock{}, } for _, blk := range tests { diff --git a/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go b/avalanchego/vms/platformvm/block/executor/proposal_block_test.go similarity index 73% rename from avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go rename to avalanchego/vms/platformvm/block/executor/proposal_block_test.go index 3d1569ad..eb1df5fd 100644 --- a/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go +++ b/avalanchego/vms/platformvm/block/executor/proposal_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,20 +9,20 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -33,17 +33,13 @@ import ( func TestApricotProposalBlockTimeVerification(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, ctrl, apricotPhase5) // create apricotParentBlk. It's a standard one for simplicity parentHeight := uint64(2022) - apricotParentBlk, err := blocks.NewApricotStandardBlock( + apricotParentBlk, err := block.NewApricotStandardBlock( ids.Empty, // does not matter parentHeight, nil, // txs do not matter in this test @@ -97,67 +93,57 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { NodeID: utx.NodeID(), SubnetID: utx.SubnetID(), StartTime: utx.StartTime(), + NextTime: chainTime, EndTime: chainTime, }).Times(2) currentStakersIt.EXPECT().Release() onParentAccept.EXPECT().GetCurrentStakerIterator().Return(currentStakersIt, nil) - onParentAccept.EXPECT().GetCurrentValidator(utx.SubnetID(), utx.NodeID()).Return(&state.Staker{ - TxID: addValTx.ID(), - NodeID: utx.NodeID(), - SubnetID: utx.SubnetID(), - StartTime: utx.StartTime(), - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(addValTx.ID()).Return(addValTx, status.Committed, nil) onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, utx.NodeID()).Return(uint64(0), nil).AnyTimes() env.mockedState.EXPECT().GetUptime(gomock.Any(), constants.PrimaryNetworkID).Return( - time.Duration(1000), /*upDuration*/ - time.Time{}, /*lastUpdated*/ - nil, /*err*/ + time.Microsecond, /*upDuration*/ + time.Time{}, /*lastUpdated*/ + nil, /*err*/ ).AnyTimes() // wrong height - statelessProposalBlock, err := blocks.NewApricotProposalBlock( + statelessProposalBlock, err := block.NewApricotProposalBlock( parentID, parentHeight, blkTx, ) require.NoError(err) - block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + proposalBlock := env.blkManager.NewBlock(statelessProposalBlock) + + err = proposalBlock.Verify(context.Background()) + require.ErrorIs(err, errIncorrectBlockHeight) // valid - statelessProposalBlock, err = blocks.NewApricotProposalBlock( + statelessProposalBlock, err = block.NewApricotProposalBlock( parentID, parentHeight+1, blkTx, ) require.NoError(err) - block = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify(context.Background())) + proposalBlock = env.blkManager.NewBlock(statelessProposalBlock) + require.NoError(proposalBlock.Verify(context.Background())) } func TestBanffProposalBlockTimeVerification(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.clk.Set(defaultGenesisTime) - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, ctrl, banff) // create parentBlock. It's a standard one for simplicity parentTime := defaultGenesisTime parentHeight := uint64(2022) - banffParentBlk, err := blocks.NewApricotStandardBlock( + banffParentBlk, err := block.NewApricotStandardBlock( genesisBlkID, // does not matter parentHeight, nil, // txs do not matter in this test @@ -181,11 +167,11 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { env.blkManager.(*manager).lastAccepted = parentID env.mockedState.EXPECT().GetLastAccepted().Return(parentID).AnyTimes() env.mockedState.EXPECT().GetStatelessBlock(gomock.Any()).DoAndReturn( - func(blockID ids.ID) (blocks.Block, choices.Status, error) { + func(blockID ids.ID) (block.Block, error) { if blockID == parentID { - return banffParentBlk, choices.Accepted, nil + return banffParentBlk, nil } - return nil, choices.Rejected, database.ErrNotFound + return nil, database.ErrNotFound }).AnyTimes() // setup state to validate proposal block transaction @@ -210,13 +196,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(nextStakerTx.Initialize(txs.Codec)) nextStakerTxID := nextStakerTx.ID() - onParentAccept.EXPECT().GetCurrentValidator(unsignedNextStakerTx.SubnetID(), unsignedNextStakerTx.NodeID()).Return(&state.Staker{ - TxID: nextStakerTxID, - NodeID: unsignedNextStakerTx.NodeID(), - SubnetID: unsignedNextStakerTx.SubnetID(), - StartTime: unsignedNextStakerTx.StartTime(), - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(nextStakerTxID).Return(nextStakerTx, status.Processing, nil) currentStakersIt := state.NewMockStakerIterator(ctrl) @@ -238,9 +217,9 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetPendingStakerIterator().Return(pendingStakersIt, nil).AnyTimes() env.mockedState.EXPECT().GetUptime(gomock.Any(), gomock.Any()).Return( - time.Duration(1000), /*upDuration*/ - time.Time{}, /*lastUpdated*/ - nil, /*err*/ + time.Microsecond, /*upDuration*/ + time.Time{}, /*lastUpdated*/ + nil, /*err*/ ).AnyTimes() // create proposal tx to be included in the proposal block @@ -253,21 +232,23 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { { // wrong height - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( parentTime.Add(time.Second), parentID, banffParentBlk.Height(), blkTx, + []*txs.Tx{}, ) require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errIncorrectBlockHeight) } { - // wrong version - statelessProposalBlock, err := blocks.NewApricotProposalBlock( + // wrong block version + statelessProposalBlock, err := block.NewApricotProposalBlock( parentID, banffParentBlk.Height()+1, blkTx, @@ -275,51 +256,60 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errApricotBlockIssuedAfterFork) } { // wrong timestamp, earlier than parent - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( parentTime.Add(-1*time.Second), parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errChildBlockEarlierThanParent) } { // wrong timestamp, violated synchrony bound - beyondSyncBoundTimeStamp := env.clk.Time().Add(executor.SyncBound).Add(time.Second) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( - beyondSyncBoundTimeStamp, + initClkTime := env.clk.Time() + env.clk.Set(parentTime.Add(-executor.SyncBound)) + statelessProposalBlock, err := block.NewBanffProposalBlock( + parentTime.Add(time.Second), parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, executor.ErrChildBlockBeyondSyncBound) + env.clk.Set(initClkTime) } { // wrong timestamp, skipped staker set change event skippedStakerEventTimeStamp := nextStakerTime.Add(time.Second) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( skippedStakerEventTimeStamp, parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, executor.ErrChildBlockAfterStakerChangeTime) } { @@ -330,40 +320,45 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { }, } require.NoError(invalidTx.Initialize(txs.Codec)) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( parentTime.Add(time.Second), parentID, banffParentBlk.Height()+1, invalidTx, + []*txs.Tx{}, ) require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, executor.ErrAdvanceTimeTxIssuedAfterBanff) } { // include too many transactions - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( nextStakerTime, parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) statelessProposalBlock.Transactions = []*txs.Tx{blkTx} block := env.blkManager.NewBlock(statelessProposalBlock) - require.ErrorIs(block.Verify(context.Background()), errBanffProposalBlockWithMultipleTransactions) + err = block.Verify(context.Background()) + require.ErrorIs(err, errBanffProposalBlockWithMultipleTransactions) } { // valid - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( nextStakerTime, parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -383,57 +378,52 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // Staker5: |--------------------| // Staker0 it's here just to allow to issue a proposal block with the chosen endTime. - staker0RewardAddress := ids.GenerateTestShortID() + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker0 := staker{ - nodeID: ids.NodeID(staker0RewardAddress), - rewardAddress: staker0RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf0}), + rewardAddress: ids.ShortID{0xf0}, startTime: defaultGenesisTime, endTime: time.Time{}, // actual endTime depends on specific test } - staker1RewardAddress := ids.GenerateTestShortID() staker1 := staker{ - nodeID: ids.NodeID(staker1RewardAddress), - rewardAddress: staker1RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } - - staker2RewardAddress := ids.GenerateTestShortID() staker2 := staker{ - nodeID: ids.NodeID(staker2RewardAddress), - rewardAddress: staker2RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } - - staker3RewardAddress := ids.GenerateTestShortID() staker3 := staker{ - nodeID: ids.NodeID(staker3RewardAddress), - rewardAddress: staker3RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } - staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: staker3.rewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } - - staker4RewardAddress := ids.GenerateTestShortID() staker4 := staker{ - nodeID: ids.NodeID(staker4RewardAddress), - rewardAddress: staker4RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } - - staker5RewardAddress := ids.GenerateTestShortID() staker5 := staker{ - nodeID: ids.NodeID(staker5RewardAddress), - rewardAddress: staker5RewardAddress, + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -532,15 +522,19 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, - // given its txID, staker2 will be - // rewarded and moved out of current stakers set - // staker2.nodeID: current, + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, this test injects + // staker0 into the staker set artificially to advance the time. + // This means that staker2 is not removed by the ProposalBlock + // when advancing the time. + staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, staker5.nodeID: current, @@ -551,16 +545,10 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) for _, staker := range test.stakers { tx, err := env.txBuilder.NewAddValidatorTx( @@ -572,6 +560,7 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -595,6 +584,7 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -624,13 +614,16 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker0, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -649,13 +642,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // build proposal block moving ahead chain time // as well as rewarding staker0 preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( newTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -677,20 +671,24 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.False(ok) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.True(ok) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.False(ok) case current: - require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.True(ok) } } }) @@ -699,19 +697,13 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -722,12 +714,15 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -739,7 +734,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -748,6 +743,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -779,13 +775,16 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -804,13 +803,14 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( subnetVdr1EndTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -829,29 +829,25 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // Check VM Validators are removed successfully require.NoError(propBlk.Accept(context.Background())) require.NoError(commitBlk.Accept(context.Background())) - require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) - require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, subnetVdr2NodeID) + require.False(ok) + _, ok = env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.False(ok) } func TestBanffProposalBlockTrackedSubnet(t *testing.T) { for _, tracked := range []bool{true, false} { - t.Run(fmt.Sprintf("tracked %t", tracked), func(ts *testing.T) { + t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() if tracked { env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -862,6 +858,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -891,13 +888,16 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -916,13 +916,14 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( subnetVdr1StartTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -934,18 +935,15 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { require.NoError(propBlk.Accept(context.Background())) require.NoError(commitBlk.Accept(context.Background())) - require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.True(ok) }) } } func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) // Case: Timestamp is after next validator start time // Add a pending validator @@ -976,13 +974,16 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1001,13 +1002,14 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( pendingValidatorStartTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1022,9 +1024,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(commitBlk.Accept(context.Background())) // Test validator weight before delegation - primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - vdrWeight := primarySet.GetWeight(nodeID) + vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -1043,6 +1043,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { preFundedKeys[4], }, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -1069,13 +1070,16 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1094,13 +1098,14 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // Advance Time preferredID = env.state.GetLastAccepted() - parentBlk, _, err = env.state.GetStatelessBlock(preferredID) + parentBlk, err = env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err = blocks.NewBanffProposalBlock( + statelessProposalBlock, err = block.NewBanffProposalBlock( pendingDelegatorStartTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -1116,26 +1121,21 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(commitBlk.Accept(context.Background())) // Test validator weight after delegation - vdrWeight = primarySet.GetWeight(nodeID) + vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) // Case: Timestamp is after next validator start time // Add a pending validator pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) - factory := secp256k1.Factory{} - nodeIDKey, _ := factory.NewPrivateKey() + nodeIDKey, _ := secp256k1.NewPrivateKey() rewardAddress := nodeIDKey.PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := ids.BuildTestNodeID(rewardAddress[:]) _, err := addPendingValidator( env, @@ -1160,13 +1160,16 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1185,13 +1188,14 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err := blocks.NewBanffProposalBlock( + statelessProposalBlock, err := block.NewBanffProposalBlock( pendingValidatorStartTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1206,9 +1210,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require.NoError(commitBlk.Accept(context.Background())) // Test validator weight before delegation - primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - vdrWeight := primarySet.GetWeight(nodeID) + vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -1226,6 +1228,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { preFundedKeys[4], }, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -1252,13 +1255,16 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // store Staker0 to state + addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) staker, err = state.NewCurrentStaker( addStaker0.ID(), - addStaker0.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -1277,13 +1283,14 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // Advance Time preferredID = env.state.GetLastAccepted() - parentBlk, _, err = env.state.GetStatelessBlock(preferredID) + parentBlk, err = env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessProposalBlock, err = blocks.NewBanffProposalBlock( + statelessProposalBlock, err = block.NewBanffProposalBlock( pendingDelegatorStartTime, parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) @@ -1298,6 +1305,162 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require.NoError(commitBlk.Accept(context.Background())) // Test validator weight after delegation - vdrWeight = primarySet.GetWeight(nodeID) + vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } + +func TestAddValidatorProposalBlock(t *testing.T) { + require := require.New(t) + env := newEnvironment(t, nil, durango) + + now := env.clk.Time() + + // Create validator tx + var ( + validatorStartTime = now.Add(2 * executor.SyncBound) + validatorEndTime = validatorStartTime.Add(env.config.MinStakeDuration) + nodeID = ids.GenerateTestNodeID() + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + addValidatorTx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + env.config.MinValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk), + preFundedKeys[0].PublicKey().Address(), + 10000, + []*secp256k1.PrivateKey{ + preFundedKeys[0], + preFundedKeys[1], + preFundedKeys[4], + }, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + + // Add validator through a [StandardBlock] + preferredID := env.blkManager.Preferred() + preferred, err := env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + statelessBlk, err := block.NewBanffStandardBlock( + now.Add(executor.SyncBound), + preferredID, + preferred.Height()+1, + []*txs.Tx{addValidatorTx}, + ) + require.NoError(err) + blk := env.blkManager.NewBlock(statelessBlk) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(statelessBlk.ID())) + + // Should be current + staker, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.NotNil(staker) + + // Advance time until next staker change time is [validatorEndTime] + for { + nextStakerChangeTime, err := executor.GetNextStakerChangeTime(env.state) + require.NoError(err) + if nextStakerChangeTime.Equal(validatorEndTime) { + break + } + + preferredID = env.blkManager.Preferred() + preferred, err = env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + statelessBlk, err = block.NewBanffStandardBlock( + nextStakerChangeTime, + preferredID, + preferred.Height()+1, + nil, + ) + require.NoError(err) + blk = env.blkManager.NewBlock(statelessBlk) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.True(env.blkManager.SetPreference(statelessBlk.ID())) + } + + env.clk.Set(validatorEndTime) + now = env.clk.Time() + + // Create another validator tx + validatorStartTime = now.Add(2 * executor.SyncBound) + validatorEndTime = validatorStartTime.Add(env.config.MinStakeDuration) + nodeID = ids.GenerateTestNodeID() + + sk, err = bls.NewSecretKey() + require.NoError(err) + + addValidatorTx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( + env.config.MinValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk), + preFundedKeys[0].PublicKey().Address(), + 10000, + []*secp256k1.PrivateKey{ + preFundedKeys[0], + preFundedKeys[1], + preFundedKeys[4], + }, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + + // Add validator through a [ProposalBlock] and reward the last one + preferredID = env.blkManager.Preferred() + preferred, err = env.blkManager.GetStatelessBlock(preferredID) + require.NoError(err) + + rewardValidatorTx, err := newRewardValidatorTx(t, addValidatorTx.ID()) + require.NoError(err) + + statelessProposalBlk, err := block.NewBanffProposalBlock( + now, + preferredID, + preferred.Height()+1, + rewardValidatorTx, + []*txs.Tx{addValidatorTx2}, + ) + require.NoError(err) + blk = env.blkManager.NewBlock(statelessProposalBlk) + require.NoError(blk.Verify(context.Background())) + + options, err := blk.(snowman.OracleBlock).Options(context.Background()) + require.NoError(err) + commitBlk := options[0] + require.NoError(commitBlk.Verify(context.Background())) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) + + // Should be current + staker, err = env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + require.NotNil(staker) + + rewardUTXOs, err := env.state.GetRewardUTXOs(addValidatorTx.ID()) + require.NoError(err) + require.Empty(rewardUTXOs) // No rewards on Flare +} + +func newRewardValidatorTx(t testing.TB, txID ids.ID) (*txs.Tx, error) { + utx := &txs.RewardValidatorTx{TxID: txID} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(snowtest.Context(t, snowtest.PChainID)) +} diff --git a/avalanchego/vms/platformvm/blocks/executor/rejector.go b/avalanchego/vms/platformvm/block/executor/rejector.go similarity index 53% rename from avalanchego/vms/platformvm/blocks/executor/rejector.go rename to avalanchego/vms/platformvm/block/executor/rejector.go index 6c039b29..b5dde1f6 100644 --- a/avalanchego/vms/platformvm/blocks/executor/rejector.go +++ b/avalanchego/vms/platformvm/block/executor/rejector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,56 +6,56 @@ package executor import ( "go.uber.org/zap" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) -var _ blocks.Visitor = (*rejector)(nil) +var _ block.Visitor = (*rejector)(nil) // rejector handles the logic for rejecting a block. // All errors returned by this struct are fatal and should result in the chain // being shutdown. type rejector struct { *backend + addTxsToMempool bool } -func (r *rejector) BanffAbortBlock(b *blocks.BanffAbortBlock) error { +func (r *rejector) BanffAbortBlock(b *block.BanffAbortBlock) error { return r.rejectBlock(b, "banff abort") } -func (r *rejector) BanffCommitBlock(b *blocks.BanffCommitBlock) error { +func (r *rejector) BanffCommitBlock(b *block.BanffCommitBlock) error { return r.rejectBlock(b, "banff commit") } -func (r *rejector) BanffProposalBlock(b *blocks.BanffProposalBlock) error { +func (r *rejector) BanffProposalBlock(b *block.BanffProposalBlock) error { return r.rejectBlock(b, "banff proposal") } -func (r *rejector) BanffStandardBlock(b *blocks.BanffStandardBlock) error { +func (r *rejector) BanffStandardBlock(b *block.BanffStandardBlock) error { return r.rejectBlock(b, "banff standard") } -func (r *rejector) ApricotAbortBlock(b *blocks.ApricotAbortBlock) error { +func (r *rejector) ApricotAbortBlock(b *block.ApricotAbortBlock) error { return r.rejectBlock(b, "apricot abort") } -func (r *rejector) ApricotCommitBlock(b *blocks.ApricotCommitBlock) error { +func (r *rejector) ApricotCommitBlock(b *block.ApricotCommitBlock) error { return r.rejectBlock(b, "apricot commit") } -func (r *rejector) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { +func (r *rejector) ApricotProposalBlock(b *block.ApricotProposalBlock) error { return r.rejectBlock(b, "apricot proposal") } -func (r *rejector) ApricotStandardBlock(b *blocks.ApricotStandardBlock) error { +func (r *rejector) ApricotStandardBlock(b *block.ApricotStandardBlock) error { return r.rejectBlock(b, "apricot standard") } -func (r *rejector) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { +func (r *rejector) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { return r.rejectBlock(b, "apricot atomic") } -func (r *rejector) rejectBlock(b blocks.Block, blockType string) error { +func (r *rejector) rejectBlock(b block.Block, blockType string) error { blkID := b.ID() defer r.free(blkID) @@ -67,6 +67,10 @@ func (r *rejector) rejectBlock(b blocks.Block, blockType string) error { zap.Stringer("parentID", b.Parent()), ) + if !r.addTxsToMempool { + return nil + } + for _, tx := range b.Txs() { if err := r.Mempool.Add(tx); err != nil { r.ctx.Log.Debug( @@ -78,6 +82,7 @@ func (r *rejector) rejectBlock(b blocks.Block, blockType string) error { } } - r.state.AddStatelessBlock(b, choices.Rejected) - return r.state.Commit() + r.Mempool.RequestBuildBlock(false /*=emptyBlockPermitted*/) + + return nil } diff --git a/avalanchego/vms/platformvm/blocks/executor/rejector_test.go b/avalanchego/vms/platformvm/block/executor/rejector_test.go similarity index 61% rename from avalanchego/vms/platformvm/blocks/executor/rejector_test.go rename to avalanchego/vms/platformvm/block/executor/rejector_test.go index 3c909e6b..41881d27 100644 --- a/avalanchego/vms/platformvm/blocks/executor/rejector_test.go +++ b/avalanchego/vms/platformvm/block/executor/rejector_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,16 +7,14 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" @@ -26,15 +24,15 @@ import ( func TestRejectBlock(t *testing.T) { type test struct { name string - newBlockFunc func() (blocks.Block, error) - rejectFunc func(*rejector, blocks.Block) error + newBlockFunc func() (block.Block, error) + rejectFunc func(*rejector, block.Block) error } tests := []test{ { name: "proposal block", - newBlockFunc: func() (blocks.Block, error) { - return blocks.NewBanffProposalBlock( + newBlockFunc: func() (block.Block, error) { + return block.NewBanffProposalBlock( time.Now(), ids.GenerateTestID(), 1, @@ -45,16 +43,17 @@ func TestRejectBlock(t *testing.T) { }, Creds: []verify.Verifiable{}, }, + []*txs.Tx{}, ) }, - rejectFunc: func(r *rejector, b blocks.Block) error { - return r.BanffProposalBlock(b.(*blocks.BanffProposalBlock)) + rejectFunc: func(r *rejector, b block.Block) error { + return r.BanffProposalBlock(b.(*block.BanffProposalBlock)) }, }, { name: "atomic block", - newBlockFunc: func() (blocks.Block, error) { - return blocks.NewApricotAtomicBlock( + newBlockFunc: func() (block.Block, error) { + return block.NewApricotAtomicBlock( ids.GenerateTestID(), 1, &txs.Tx{ @@ -66,14 +65,14 @@ func TestRejectBlock(t *testing.T) { }, ) }, - rejectFunc: func(r *rejector, b blocks.Block) error { - return r.ApricotAtomicBlock(b.(*blocks.ApricotAtomicBlock)) + rejectFunc: func(r *rejector, b block.Block) error { + return r.ApricotAtomicBlock(b.(*block.ApricotAtomicBlock)) }, }, { name: "standard block", - newBlockFunc: func() (blocks.Block, error) { - return blocks.NewBanffStandardBlock( + newBlockFunc: func() (block.Block, error) { + return block.NewBanffStandardBlock( time.Now(), ids.GenerateTestID(), 1, @@ -88,26 +87,26 @@ func TestRejectBlock(t *testing.T) { }, ) }, - rejectFunc: func(r *rejector, b blocks.Block) error { - return r.BanffStandardBlock(b.(*blocks.BanffStandardBlock)) + rejectFunc: func(r *rejector, b block.Block) error { + return r.BanffStandardBlock(b.(*block.BanffStandardBlock)) }, }, { name: "commit", - newBlockFunc: func() (blocks.Block, error) { - return blocks.NewBanffCommitBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) + newBlockFunc: func() (block.Block, error) { + return block.NewBanffCommitBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) }, - rejectFunc: func(r *rejector, blk blocks.Block) error { - return r.BanffCommitBlock(blk.(*blocks.BanffCommitBlock)) + rejectFunc: func(r *rejector, blk block.Block) error { + return r.BanffCommitBlock(blk.(*block.BanffCommitBlock)) }, }, { name: "abort", - newBlockFunc: func() (blocks.Block, error) { - return blocks.NewBanffAbortBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) + newBlockFunc: func() (block.Block, error) { + return block.NewBanffAbortBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) }, - rejectFunc: func(r *rejector, blk blocks.Block) error { - return r.BanffAbortBlock(blk.(*blocks.BanffAbortBlock)) + rejectFunc: func(r *rejector, blk block.Block) error { + return r.BanffAbortBlock(blk.(*block.BanffAbortBlock)) }, }, } @@ -116,7 +115,6 @@ func TestRejectBlock(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() blk, err := tt.newBlockFunc() require.NoError(err) @@ -136,19 +134,17 @@ func TestRejectBlock(t *testing.T) { Mempool: mempool, state: state, }, + addTxsToMempool: true, } // Set expected calls on dependencies. for _, tx := range blk.Txs() { mempool.EXPECT().Add(tx).Return(nil).Times(1) } - gomock.InOrder( - state.EXPECT().AddStatelessBlock(blk, choices.Rejected).Times(1), - state.EXPECT().Commit().Return(nil).Times(1), - ) - err = tt.rejectFunc(rejector, blk) - require.NoError(err) + mempool.EXPECT().RequestBuildBlock(false).Times(1) + + require.NoError(tt.rejectFunc(rejector, blk)) // Make sure block and its parent are removed from the state map. require.NotContains(rejector.blkIDToState, blk.ID()) }) diff --git a/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go b/avalanchego/vms/platformvm/block/executor/standard_block_test.go similarity index 78% rename from avalanchego/vms/platformvm/blocks/executor/standard_block_test.go rename to avalanchego/vms/platformvm/block/executor/standard_block_test.go index 433983c9..4b1c0c73 100644 --- a/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go +++ b/avalanchego/vms/platformvm/block/executor/standard_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,40 +9,33 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) func TestApricotStandardBlockTimeVerification(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, ctrl, apricotPhase5) // setup and store parent block // it's a standard block for simplicity parentHeight := uint64(2022) - apricotParentBlk, err := blocks.NewApricotStandardBlock( + apricotParentBlk, err := block.NewApricotStandardBlock( ids.Empty, // does not matter parentHeight, nil, // txs do not matter in this test @@ -64,45 +57,41 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { onParentAccept.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() // wrong height - apricotChildBlk, err := blocks.NewApricotStandardBlock( + apricotChildBlk, err := block.NewApricotStandardBlock( apricotParentBlk.ID(), apricotParentBlk.Height(), nil, // txs nulled to simplify test ) require.NoError(err) - block := env.blkManager.NewBlock(apricotChildBlk) - require.Error(block.Verify(context.Background())) + blk := env.blkManager.NewBlock(apricotChildBlk) + err = blk.Verify(context.Background()) + require.ErrorIs(err, errIncorrectBlockHeight) // valid height - apricotChildBlk, err = blocks.NewApricotStandardBlock( + apricotChildBlk, err = block.NewApricotStandardBlock( apricotParentBlk.ID(), apricotParentBlk.Height()+1, nil, // txs nulled to simplify test ) require.NoError(err) - block = env.blkManager.NewBlock(apricotChildBlk) - require.NoError(block.Verify(context.Background())) + blk = env.blkManager.NewBlock(apricotChildBlk) + require.NoError(blk.Verify(context.Background())) } func TestBanffStandardBlockTimeVerification(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, ctrl, banff) now := env.clk.Time() env.clk.Set(now) - env.config.BanffTime = time.Time{} // activate Banff // setup and store parent block // it's a standard block for simplicity parentTime := now parentHeight := uint64(2022) - banffParentBlk, err := blocks.NewBanffStandardBlock( + banffParentBlk, err := block.NewBanffStandardBlock( parentTime, ids.Empty, // does not matter parentHeight, @@ -123,7 +112,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { env.mockedState.EXPECT().GetLastAccepted().Return(parentID).AnyTimes() env.mockedState.EXPECT().GetTimestamp().Return(chainTime).AnyTimes() - nextStakerTime := chainTime.Add(txexecutor.SyncBound).Add(-1 * time.Second) + nextStakerTime := chainTime.Add(executor.SyncBound).Add(-1 * time.Second) // store just once current staker to mark next staker time. currentStakerIt := state.NewMockStakerIterator(ctrl) @@ -180,20 +169,21 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { { // wrong version - banffChildBlk, err := blocks.NewApricotStandardBlock( + banffChildBlk, err := block.NewApricotStandardBlock( banffParentBlk.ID(), banffParentBlk.Height()+1, []*txs.Tx{tx}, ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errApricotBlockIssuedAfterFork) } { // wrong height childTimestamp := parentTime.Add(time.Second) - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height(), @@ -201,13 +191,14 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errIncorrectBlockHeight) } { // wrong timestamp, earlier than parent childTimestamp := parentTime.Add(-1 * time.Second) - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height()+1, @@ -215,27 +206,31 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, errChildBlockEarlierThanParent) } { // wrong timestamp, violated synchrony bound - childTimestamp := parentTime.Add(txexecutor.SyncBound).Add(time.Second) - banffChildBlk, err := blocks.NewBanffStandardBlock( - childTimestamp, + initClkTime := env.clk.Time() + env.clk.Set(parentTime.Add(-executor.SyncBound)) + banffChildBlk, err := block.NewBanffStandardBlock( + parentTime.Add(time.Second), banffParentBlk.ID(), banffParentBlk.Height()+1, []*txs.Tx{tx}, ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, executor.ErrChildBlockBeyondSyncBound) + env.clk.Set(initClkTime) } { // wrong timestamp, skipped staker set change event childTimestamp := nextStakerTime.Add(time.Second) - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height()+1, @@ -243,13 +238,14 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify(context.Background())) + err = block.Verify(context.Background()) + require.ErrorIs(err, executor.ErrChildBlockAfterStakerChangeTime) } { // no state changes childTimestamp := parentTime - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height()+1, @@ -257,13 +253,14 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.ErrorIs(block.Verify(context.Background()), errBanffStandardBlockWithoutChanges) + err = block.Verify(context.Background()) + require.ErrorIs(err, errBanffStandardBlockWithoutChanges) } { // valid block, same timestamp as parent block childTimestamp := parentTime - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height()+1, @@ -277,7 +274,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { { // valid childTimestamp := nextStakerTime - banffChildBlk, err := blocks.NewBanffStandardBlock( + banffChildBlk, err := block.NewBanffStandardBlock( childTimestamp, banffParentBlk.ID(), banffParentBlk.Height()+1, @@ -292,11 +289,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) // Case: Timestamp is after next validator start time // Add a pending validator @@ -316,9 +309,9 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { // build standard block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err := blocks.NewBanffStandardBlock( + statelessStandardBlock, err := block.NewBanffStandardBlock( pendingValidatorStartTime, parentBlk.ID(), parentBlk.Height()+1, @@ -343,7 +336,8 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { // Test VM validators require.NoError(block.Accept(context.Background())) - require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, nodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) + require.True(ok) } // Ensure semantic verification updates the current and pending staker sets correctly. @@ -357,39 +351,45 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { // Staker3sub: |----------------| // Staker4: |------------------------| // Staker5: |--------------------| + + // In this test multiple stakers may join and leave the staker set at the same time. + // The order in which they do it is asserted; the order may depend on the staker.TxID, + // which in turns depend on every feature of the transaction creating the staker. + // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID + // so that TxID does not depend on the order we run tests. staker1 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf1}), + rewardAddress: ids.ShortID{0xf1}, startTime: defaultGenesisTime.Add(1 * time.Minute), endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } staker2 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf2}), + rewardAddress: ids.ShortID{0xf2}, startTime: staker1.startTime.Add(1 * time.Minute), endTime: staker1.startTime.Add(1 * time.Minute).Add(defaultMinStakingDuration), } staker3 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xf3}, startTime: staker2.startTime.Add(1 * time.Minute), endTime: staker2.endTime.Add(1 * time.Minute), } staker3Sub := staker{ - nodeID: staker3.nodeID, - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf3}), + rewardAddress: ids.ShortID{0xff}, startTime: staker3.startTime.Add(1 * time.Minute), endTime: staker3.endTime.Add(-1 * time.Minute), } staker4 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf4}), + rewardAddress: ids.ShortID{0xf4}, startTime: staker3.startTime, endTime: staker3.endTime, } staker5 := staker{ - nodeID: ids.GenerateTestNodeID(), - rewardAddress: ids.GenerateTestShortID(), + nodeID: ids.BuildTestNodeID([]byte{0xf5}), + rewardAddress: ids.ShortID{0xf5}, startTime: staker2.endTime, endTime: staker2.endTime.Add(defaultMinStakingDuration), } @@ -468,11 +468,17 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { }, }, { - description: "advance time to staker5 end", + description: "advance time to staker5 start", stakers: []staker{staker1, staker2, staker3, staker4, staker5}, advanceTimeTo: []time.Time{staker1.startTime, staker2.startTime, staker3.startTime, staker5.startTime}, expectedStakers: map[ids.NodeID]stakerStatus{ staker1.nodeID: current, + + // Staker2's end time matches staker5's start time, so typically + // the block builder would produce a ProposalBlock to remove + // staker2 when advancing the time. However, it is valid to only + // advance the time with a StandardBlock and not remove staker2, + // which is what this test does. staker2.nodeID: current, staker3.nodeID: current, staker4.nodeID: current, @@ -484,15 +490,10 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) for _, staker := range test.stakers { _, err := addPendingValidator( @@ -515,6 +516,7 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -535,9 +537,9 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { // build standard block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err := blocks.NewBanffStandardBlock( + statelessStandardBlock, err := block.NewBanffStandardBlock( newTime, parentBlk.ID(), parentBlk.Height()+1, @@ -557,20 +559,24 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.False(ok) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.True(ok) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.False(ok) case current: - require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.True(ok) } } }) @@ -583,19 +589,13 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { // is after the new timestamp func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -606,12 +606,15 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -623,7 +626,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -632,6 +635,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -651,9 +655,9 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { env.clk.Set(subnetVdr1EndTime) // build standard block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err := blocks.NewBanffStandardBlock( + statelessStandardBlock, err := block.NewBanffStandardBlock( subnetVdr1EndTime, parentBlk.ID(), parentBlk.Height()+1, @@ -672,29 +676,25 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // Check VM Validators are removed successfully require.NoError(block.Accept(context.Background())) - require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) - require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, subnetVdr2NodeID) + require.False(ok) + _, ok = env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.False(ok) } func TestBanffStandardBlockTrackedSubnet(t *testing.T) { for _, tracked := range []bool{true, false} { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) subnetID := testSubnet1.ID() if tracked { env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) } // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -705,6 +705,7 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -723,9 +724,9 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { // build standard block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err := blocks.NewBanffStandardBlock( + statelessStandardBlock, err := block.NewBanffStandardBlock( subnetVdr1StartTime, parentBlk.ID(), parentBlk.Height()+1, @@ -737,18 +738,15 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { // update staker set require.NoError(block.Verify(context.Background())) require.NoError(block.Accept(context.Background())) - require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.True(ok) }) } } func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) - env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.config.BanffTime = time.Time{} // activate Banff + env := newEnvironment(t, nil, banff) // Case: Timestamp is after next validator start time // Add a pending validator @@ -768,24 +766,22 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { // build standard block moving ahead chain time preferredID := env.state.GetLastAccepted() - parentBlk, _, err := env.state.GetStatelessBlock(preferredID) + parentBlk, err := env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err := blocks.NewBanffStandardBlock( + statelessStandardBlock, err := block.NewBanffStandardBlock( pendingValidatorStartTime, parentBlk.ID(), parentBlk.Height()+1, nil, // txs nulled to simplify test ) require.NoError(err) - block := env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) + blk := env.blkManager.NewBlock(statelessStandardBlock) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - vdrWeight := primarySet.GetWeight(nodeID) + vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -804,6 +800,7 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { preFundedKeys[4], }, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -820,21 +817,21 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { // Advance Time preferredID = env.state.GetLastAccepted() - parentBlk, _, err = env.state.GetStatelessBlock(preferredID) + parentBlk, err = env.state.GetStatelessBlock(preferredID) require.NoError(err) - statelessStandardBlock, err = blocks.NewBanffStandardBlock( + statelessStandardBlock, err = block.NewBanffStandardBlock( pendingDelegatorStartTime, parentBlk.ID(), parentBlk.Height()+1, nil, // txs nulled to simplify test ) require.NoError(err) - block = env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) + blk = env.blkManager.NewBlock(statelessStandardBlock) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight = primarySet.GetWeight(nodeID) + vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } diff --git a/avalanchego/vms/platformvm/blocks/executor/verifier.go b/avalanchego/vms/platformvm/block/executor/verifier.go similarity index 66% rename from avalanchego/vms/platformvm/blocks/executor/verifier.go rename to avalanchego/vms/platformvm/block/executor/verifier.go index 9f72b509..b35d2ecd 100644 --- a/avalanchego/vms/platformvm/blocks/executor/verifier.go +++ b/avalanchego/vms/platformvm/block/executor/verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -18,14 +18,15 @@ import ( ) var ( - _ blocks.Visitor = (*verifier)(nil) + _ block.Visitor = (*verifier)(nil) + + ErrConflictingBlockTxs = errors.New("block contains conflicting transactions") errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") errBanffProposalBlockWithMultipleTransactions = errors.New("BanffProposalBlock contains multiple transactions") errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") + errIncorrectBlockHeight = errors.New("incorrect block height") errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") - errConflictingBatchTxs = errors.New("block contains conflicting transactions") - errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") ) @@ -35,22 +36,23 @@ type verifier struct { txExecutorBackend *executor.Backend } -func (v *verifier) BanffAbortBlock(b *blocks.BanffAbortBlock) error { +func (v *verifier) BanffAbortBlock(b *block.BanffAbortBlock) error { if err := v.banffOptionBlock(b); err != nil { return err } return v.abortBlock(b) } -func (v *verifier) BanffCommitBlock(b *blocks.BanffCommitBlock) error { +func (v *verifier) BanffCommitBlock(b *block.BanffCommitBlock) error { if err := v.banffOptionBlock(b); err != nil { return err } return v.commitBlock(b) } -func (v *verifier) BanffProposalBlock(b *blocks.BanffProposalBlock) error { - if len(b.Transactions) != 0 { +func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { + nextChainTime := b.Timestamp() + if !v.txExecutorBackend.Config.IsDurangoActivated(nextChainTime) && len(b.Transactions) != 0 { return errBanffProposalBlockWithMultipleTransactions } @@ -59,36 +61,43 @@ func (v *verifier) BanffProposalBlock(b *blocks.BanffProposalBlock) error { } parentID := b.Parent() - onCommitState, err := state.NewDiff(parentID, v.backend) + onDecisionState, err := state.NewDiff(parentID, v.backend) if err != nil { return err } - onAbortState, err := state.NewDiff(parentID, v.backend) - if err != nil { + + // Advance the time to [nextChainTime]. + if _, err := executor.AdvanceTimeTo(v.txExecutorBackend, onDecisionState, nextChainTime); err != nil { return err } - // Apply the changes, if any, from advancing the chain time. - nextChainTime := b.Timestamp() - changes, err := executor.AdvanceTimeTo( - v.txExecutorBackend, - onCommitState, - nextChainTime, - ) + inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, onDecisionState, b.Parent()) if err != nil { return err } - onCommitState.SetTimestamp(nextChainTime) - changes.Apply(onCommitState) + onCommitState, err := state.NewDiffOn(onDecisionState) + if err != nil { + return err + } - onAbortState.SetTimestamp(nextChainTime) - changes.Apply(onAbortState) + onAbortState, err := state.NewDiffOn(onDecisionState) + if err != nil { + return err + } - return v.proposalBlock(&b.ApricotProposalBlock, onCommitState, onAbortState) + return v.proposalBlock( + &b.ApricotProposalBlock, + onDecisionState, + onCommitState, + onAbortState, + inputs, + atomicRequests, + onAcceptFunc, + ) } -func (v *verifier) BanffStandardBlock(b *blocks.BanffStandardBlock) error { +func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { if err := v.banffNonOptionBlock(b); err != nil { return err } @@ -99,12 +108,11 @@ func (v *verifier) BanffStandardBlock(b *blocks.BanffStandardBlock) error { return err } - // Apply the changes, if any, from advancing the chain time. - nextChainTime := b.Timestamp() - changes, err := executor.AdvanceTimeTo( + // Advance the time to [b.Timestamp()]. + changed, err := executor.AdvanceTimeTo( v.txExecutorBackend, onAcceptState, - nextChainTime, + b.Timestamp(), ) if err != nil { return err @@ -112,31 +120,28 @@ func (v *verifier) BanffStandardBlock(b *blocks.BanffStandardBlock) error { // If this block doesn't perform any changes, then it should never have been // issued. - if changes.Len() == 0 && len(b.Transactions) == 0 { + if !changed && len(b.Transactions) == 0 { return errBanffStandardBlockWithoutChanges } - onAcceptState.SetTimestamp(nextChainTime) - changes.Apply(onAcceptState) - return v.standardBlock(&b.ApricotStandardBlock, onAcceptState) } -func (v *verifier) ApricotAbortBlock(b *blocks.ApricotAbortBlock) error { +func (v *verifier) ApricotAbortBlock(b *block.ApricotAbortBlock) error { if err := v.apricotCommonBlock(b); err != nil { return err } return v.abortBlock(b) } -func (v *verifier) ApricotCommitBlock(b *blocks.ApricotCommitBlock) error { +func (v *verifier) ApricotCommitBlock(b *block.ApricotCommitBlock) error { if err := v.apricotCommonBlock(b); err != nil { return err } return v.commitBlock(b) } -func (v *verifier) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { +func (v *verifier) ApricotProposalBlock(b *block.ApricotProposalBlock) error { if err := v.apricotCommonBlock(b); err != nil { return err } @@ -151,10 +156,10 @@ func (v *verifier) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { return err } - return v.proposalBlock(b, onCommitState, onAbortState) + return v.proposalBlock(b, nil, onCommitState, onAbortState, nil, nil, nil) } -func (v *verifier) ApricotStandardBlock(b *blocks.ApricotStandardBlock) error { +func (v *verifier) ApricotStandardBlock(b *block.ApricotStandardBlock) error { if err := v.apricotCommonBlock(b); err != nil { return err } @@ -168,7 +173,7 @@ func (v *verifier) ApricotStandardBlock(b *blocks.ApricotStandardBlock) error { return v.standardBlock(b, onAcceptState) } -func (v *verifier) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { +func (v *verifier) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { // We call [commonBlock] here rather than [apricotCommonBlock] because below // this check we perform the more strict check that ApricotPhase5 isn't // activated. @@ -202,26 +207,26 @@ func (v *verifier) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { atomicExecutor.OnAccept.AddTx(b.Tx, status.Committed) - if err := v.verifyUniqueInputs(b, atomicExecutor.Inputs); err != nil { + if err := v.verifyUniqueInputs(parentID, atomicExecutor.Inputs); err != nil { return err } + v.Mempool.Remove(b.Tx) + blkID := b.ID() v.blkIDToState[blkID] = &blockState{ - standardBlockState: standardBlockState{ - inputs: atomicExecutor.Inputs, - }, statelessBlock: b, - onAcceptState: atomicExecutor.OnAccept, + + onAcceptState: atomicExecutor.OnAccept, + + inputs: atomicExecutor.Inputs, timestamp: atomicExecutor.OnAccept.GetTimestamp(), atomicRequests: atomicExecutor.AtomicRequests, } - - v.Mempool.Remove([]*txs.Tx{b.Tx}) return nil } -func (v *verifier) banffOptionBlock(b blocks.BanffBlock) error { +func (v *verifier) banffOptionBlock(b block.BanffBlock) error { if err := v.commonBlock(b); err != nil { return err } @@ -244,7 +249,7 @@ func (v *verifier) banffOptionBlock(b blocks.BanffBlock) error { return nil } -func (v *verifier) banffNonOptionBlock(b blocks.BanffBlock) error { +func (v *verifier) banffNonOptionBlock(b block.BanffBlock) error { if err := v.commonBlock(b); err != nil { return err } @@ -279,7 +284,7 @@ func (v *verifier) banffNonOptionBlock(b blocks.BanffBlock) error { ) } -func (v *verifier) apricotCommonBlock(b blocks.Block) error { +func (v *verifier) apricotCommonBlock(b block.Block) error { // We can use the parent timestamp here, because we are guaranteed that the // parent was verified. Apricot blocks only update the timestamp with // AdvanceTimeTxs. This means that this block's timestamp will be equal to @@ -296,7 +301,7 @@ func (v *verifier) apricotCommonBlock(b blocks.Block) error { return v.commonBlock(b) } -func (v *verifier) commonBlock(b blocks.Block) error { +func (v *verifier) commonBlock(b block.Block) error { parentID := b.Parent() parent, err := v.GetBlock(parentID) if err != nil { @@ -307,7 +312,8 @@ func (v *verifier) commonBlock(b blocks.Block) error { height := b.Height() if expectedHeight != height { return fmt.Errorf( - "expected block to have height %d, but found %d", + "%w expected %d, but found %d", + errIncorrectBlockHeight, expectedHeight, height, ) @@ -316,9 +322,9 @@ func (v *verifier) commonBlock(b blocks.Block) error { } // abortBlock populates the state of this block if [nil] is returned -func (v *verifier) abortBlock(b blocks.Block) error { +func (v *verifier) abortBlock(b block.Block) error { parentID := b.Parent() - onAcceptState, ok := v.getOnAbortState(parentID) + onAbortState, ok := v.getOnAbortState(parentID) if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } @@ -326,16 +332,16 @@ func (v *verifier) abortBlock(b blocks.Block) error { blkID := b.ID() v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, - timestamp: onAcceptState.GetTimestamp(), + onAcceptState: onAbortState, + timestamp: onAbortState.GetTimestamp(), } return nil } // commitBlock populates the state of this block if [nil] is returned -func (v *verifier) commitBlock(b blocks.Block) error { +func (v *verifier) commitBlock(b block.Block) error { parentID := b.Parent() - onAcceptState, ok := v.getOnCommitState(parentID) + onCommitState, ok := v.getOnCommitState(parentID) if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } @@ -343,17 +349,21 @@ func (v *verifier) commitBlock(b blocks.Block) error { blkID := b.ID() v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, - timestamp: onAcceptState.GetTimestamp(), + onAcceptState: onCommitState, + timestamp: onCommitState.GetTimestamp(), } return nil } // proposalBlock populates the state of this block if [nil] is returned func (v *verifier) proposalBlock( - b *blocks.ApricotProposalBlock, + b *block.ApricotProposalBlock, + onDecisionState state.Diff, onCommitState state.Diff, onAbortState state.Diff, + inputs set.Set[ids.ID], + atomicRequests map[ids.ID]*atomic.Requests, + onAcceptFunc func(), ) error { txExecutor := executor.ProposalTxExecutor{ OnCommitState: onCommitState, @@ -371,66 +381,96 @@ func (v *verifier) proposalBlock( onCommitState.AddTx(b.Tx, status.Committed) onAbortState.AddTx(b.Tx, status.Aborted) + v.Mempool.Remove(b.Tx) + blkID := b.ID() v.blkIDToState[blkID] = &blockState{ proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, - initiallyPreferCommit: txExecutor.PrefersCommit, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, + statelessBlock: b, + + onAcceptFunc: onAcceptFunc, + + inputs: inputs, // It is safe to use [b.onAbortState] here because the timestamp will // never be modified by an Apricot Abort block and the timestamp will // always be the same as the Banff Proposal Block. - timestamp: onAbortState.GetTimestamp(), + timestamp: onAbortState.GetTimestamp(), + atomicRequests: atomicRequests, } - - v.Mempool.Remove([]*txs.Tx{b.Tx}) return nil } // standardBlock populates the state of this block if [nil] is returned func (v *verifier) standardBlock( - b *blocks.ApricotStandardBlock, + b *block.ApricotStandardBlock, onAcceptState state.Diff, ) error { - blkState := &blockState{ + inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, onAcceptState, b.Parent()) + if err != nil { + return err + } + + v.Mempool.Remove(b.Transactions...) + + blkID := b.ID() + v.blkIDToState[blkID] = &blockState{ statelessBlock: b, - onAcceptState: onAcceptState, + + onAcceptState: onAcceptState, + onAcceptFunc: onAcceptFunc, + timestamp: onAcceptState.GetTimestamp(), - atomicRequests: make(map[ids.ID]*atomic.Requests), + inputs: inputs, + atomicRequests: atomicRequests, } + return nil +} - // Finally we process the transactions - funcs := make([]func(), 0, len(b.Transactions)) - for _, tx := range b.Transactions { +func (v *verifier) processStandardTxs(txs []*txs.Tx, state state.Diff, parentID ids.ID) ( + set.Set[ids.ID], + map[ids.ID]*atomic.Requests, + func(), + error, +) { + var ( + onAcceptFunc func() + inputs set.Set[ids.ID] + funcs = make([]func(), 0, len(txs)) + atomicRequests = make(map[ids.ID]*atomic.Requests) + ) + for _, tx := range txs { txExecutor := executor.StandardTxExecutor{ Backend: v.txExecutorBackend, - State: onAcceptState, + State: state, Tx: tx, } if err := tx.Unsigned.Visit(&txExecutor); err != nil { txID := tx.ID() v.MarkDropped(txID, err) // cache tx as dropped - return err + return nil, nil, nil, err } // ensure it doesn't overlap with current input batch - if blkState.inputs.Overlaps(txExecutor.Inputs) { - return errConflictingBatchTxs + if inputs.Overlaps(txExecutor.Inputs) { + return nil, nil, nil, ErrConflictingBlockTxs } // Add UTXOs to batch - blkState.inputs.Union(txExecutor.Inputs) + inputs.Union(txExecutor.Inputs) - onAcceptState.AddTx(tx, status.Committed) + state.AddTx(tx, status.Committed) if txExecutor.OnAccept != nil { funcs = append(funcs, txExecutor.OnAccept) } for chainID, txRequests := range txExecutor.AtomicRequests { // Add/merge in the atomic requests represented by [tx] - chainRequests, exists := blkState.atomicRequests[chainID] + chainRequests, exists := atomicRequests[chainID] if !exists { - blkState.atomicRequests[chainID] = txRequests + atomicRequests[chainID] = txRequests continue } @@ -439,48 +479,19 @@ func (v *verifier) standardBlock( } } - if err := v.verifyUniqueInputs(b, blkState.inputs); err != nil { - return err + if err := v.verifyUniqueInputs(parentID, inputs); err != nil { + return nil, nil, nil, err } if numFuncs := len(funcs); numFuncs == 1 { - blkState.onAcceptFunc = funcs[0] + onAcceptFunc = funcs[0] } else if numFuncs > 1 { - blkState.onAcceptFunc = func() { + onAcceptFunc = func() { for _, f := range funcs { f() } } } - blkID := b.ID() - v.blkIDToState[blkID] = blkState - - v.Mempool.Remove(b.Transactions) - return nil -} - -// verifyUniqueInputs verifies that the inputs of the given block are not -// duplicated in any of the parent blocks pinned in memory. -func (v *verifier) verifyUniqueInputs(block blocks.Block, inputs set.Set[ids.ID]) error { - if inputs.Len() == 0 { - return nil - } - - // Check for conflicts in ancestors. - for { - parentID := block.Parent() - parentState, ok := v.blkIDToState[parentID] - if !ok { - // The parent state isn't pinned in memory. - // This means the parent must be accepted already. - return nil - } - - if parentState.inputs.Overlaps(inputs) { - return errConflictingParentTxs - } - - block = parentState.statelessBlock - } + return inputs, atomicRequests, onAcceptFunc, nil } diff --git a/avalanchego/vms/platformvm/blocks/executor/verifier_test.go b/avalanchego/vms/platformvm/block/executor/verifier_test.go similarity index 86% rename from avalanchego/vms/platformvm/blocks/executor/verifier_test.go rename to avalanchego/vms/platformvm/block/executor/verifier_test.go index ee224f0c..ba24fb2f 100644 --- a/avalanchego/vms/platformvm/blocks/executor/verifier_test.go +++ b/avalanchego/vms/platformvm/block/executor/verifier_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -8,20 +8,18 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -33,12 +31,11 @@ import ( func TestVerifierVisitProposalBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentOnAcceptState := state.NewMockDiff(ctrl) timestamp := time.Now() // One call for each of onCommitState and onAbortState. @@ -80,7 +77,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { // Serialize this block with a dummy tx // and replace it after creation with the mock tx. // TODO allow serialization of mock txs. - apricotBlk, err := blocks.NewApricotProposalBlock( + apricotBlk, err := block.NewApricotProposalBlock( parentID, 2, &txs.Tx{ @@ -98,8 +95,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { // Visit the block blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] require.Equal(apricotBlk, gotBlkState.statelessBlock) @@ -115,20 +111,18 @@ func TestVerifierVisitProposalBlock(t *testing.T) { require.Equal(status.Aborted, gotStatus) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitAtomicBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) grandparentID := ids.GenerateTestID() parentState := state.NewMockDiff(ctrl) @@ -162,7 +156,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { onAccept := state.NewMockDiff(ctrl) blkTx := txs.NewMockUnsignedTx(ctrl) - inputs := set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}} + inputs := set.Of(ids.GenerateTestID()) blkTx.EXPECT().Visit(gomock.AssignableToTypeOf(&executor.AtomicTxExecutor{})).DoAndReturn( func(e *executor.AtomicTxExecutor) error { e.OnAccept = onAccept @@ -175,7 +169,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { // Serialize this block with a dummy tx and replace it after creation with // the mock tx. // TODO allow serialization of mock txs. - apricotBlk, err := blocks.NewApricotAtomicBlock( + apricotBlk, err := block.NewApricotAtomicBlock( parentID, 2, &txs.Tx{ @@ -195,8 +189,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { onAccept.EXPECT().GetTimestamp().Return(timestamp).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] @@ -206,20 +199,18 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitStandardBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentState := state.NewMockDiff(ctrl) backend := &backend{ @@ -277,7 +268,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { // Serialize this block with a dummy tx // and replace it after creation with the mock tx. // TODO allow serialization of mock txs. - apricotBlk, err := blocks.NewApricotStandardBlock( + apricotBlk, err := block.NewApricotStandardBlock( parentID, 2, /*height*/ []*txs.Tx{ @@ -297,8 +288,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -308,20 +298,19 @@ func TestVerifierVisitStandardBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitCommitBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) + parentOnDecisionState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -330,10 +319,10 @@ func TestVerifierVisitCommitBlock(t *testing.T) { parentID: { statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: parentOnCommitState, - onAbortState: parentOnAbortState, + onDecisionState: parentOnDecisionState, + onCommitState: parentOnCommitState, + onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -356,7 +345,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { verifier: verifier, } - apricotBlk, err := blocks.NewApricotCommitBlock( + apricotBlk, err := block.NewApricotCommitBlock( parentID, 2, ) @@ -371,8 +360,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -381,20 +369,19 @@ func TestVerifierVisitCommitBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitAbortBlock(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) + parentOnDecisionState := state.NewMockDiff(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -403,10 +390,10 @@ func TestVerifierVisitAbortBlock(t *testing.T) { parentID: { statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: parentOnCommitState, - onAbortState: parentOnAbortState, + onDecisionState: parentOnDecisionState, + onCommitState: parentOnCommitState, + onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -429,7 +416,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { verifier: verifier, } - apricotBlk, err := blocks.NewApricotAbortBlock( + apricotBlk, err := block.NewApricotAbortBlock( parentID, 2, ) @@ -444,8 +431,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -454,15 +440,13 @@ func TestVerifierVisitAbortBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } // Assert that a block with an unverified parent fails verification. func TestVerifyUnverifiedParent(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) @@ -487,21 +471,20 @@ func TestVerifyUnverifiedParent(t *testing.T) { backend: backend, } - blk, err := blocks.NewApricotAbortBlock(parentID /*not in memory or persisted state*/, 2 /*height*/) + blk, err := block.NewApricotAbortBlock(parentID /*not in memory or persisted state*/, 2 /*height*/) require.NoError(err) // Set expectations for dependencies. s.EXPECT().GetTimestamp().Return(time.Now()).Times(1) - s.EXPECT().GetStatelessBlock(parentID).Return(nil, choices.Unknown, database.ErrNotFound).Times(1) + s.EXPECT().GetStatelessBlock(parentID).Return(nil, database.ErrNotFound).Times(1) // Verify the block. err = blk.Visit(verifier) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) } func TestBanffAbortBlockTimestampChecks(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() now := defaultGenesisTime.Add(time.Hour) @@ -539,7 +522,7 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentHeight := uint64(1) backend := &backend{ @@ -562,14 +545,16 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { // build and verify child block childHeight := parentHeight + 1 - statelessAbortBlk, err := blocks.NewBanffAbortBlock(test.childTime, parentID, childHeight) + statelessAbortBlk, err := block.NewBanffAbortBlock(test.childTime, parentID, childHeight) require.NoError(err) // setup parent state parentTime := defaultGenesisTime - s.EXPECT().GetLastAccepted().Return(parentID).Times(2) - s.EXPECT().GetTimestamp().Return(parentTime).Times(2) + s.EXPECT().GetLastAccepted().Return(parentID).Times(3) + s.EXPECT().GetTimestamp().Return(parentTime).Times(3) + onDecisionState, err := state.NewDiff(parentID, backend) + require.NoError(err) onCommitState, err := state.NewDiff(parentID, backend) require.NoError(err) onAbortState, err := state.NewDiff(parentID, backend) @@ -578,8 +563,9 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { timestamp: test.parentTime, statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, } @@ -595,7 +581,6 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { // TODO combine with TestApricotCommitBlockTimestampChecks func TestBanffCommitBlockTimestampChecks(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() now := defaultGenesisTime.Add(time.Hour) @@ -633,7 +618,7 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentHeight := uint64(1) backend := &backend{ @@ -656,14 +641,16 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { // build and verify child block childHeight := parentHeight + 1 - statelessCommitBlk, err := blocks.NewBanffCommitBlock(test.childTime, parentID, childHeight) + statelessCommitBlk, err := block.NewBanffCommitBlock(test.childTime, parentID, childHeight) require.NoError(err) // setup parent state parentTime := defaultGenesisTime - s.EXPECT().GetLastAccepted().Return(parentID).Times(2) - s.EXPECT().GetTimestamp().Return(parentTime).Times(2) + s.EXPECT().GetLastAccepted().Return(parentID).Times(3) + s.EXPECT().GetTimestamp().Return(parentTime).Times(3) + onDecisionState, err := state.NewDiff(parentID, backend) + require.NoError(err) onCommitState, err := state.NewDiff(parentID, backend) require.NoError(err) onAbortState, err := state.NewDiff(parentID, backend) @@ -672,8 +659,9 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { timestamp: test.parentTime, statelessBlock: parentStatelessBlk, proposalBlockState: proposalBlockState{ - onCommitState: onCommitState, - onAbortState: onAbortState, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, } @@ -689,30 +677,25 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) grandParentID := ids.GenerateTestID() - grandParentStatelessBlk := blocks.NewMockBlock(ctrl) + grandParentStatelessBlk := block.NewMockBlock(ctrl) grandParentState := state.NewMockDiff(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentState := state.NewMockDiff(ctrl) - atomicInputs := set.Set[ids.ID]{ - ids.GenerateTestID(): struct{}{}, - } + atomicInputs := set.Of(ids.GenerateTestID()) backend := &backend{ blkIDToState: map[ids.ID]*blockState{ grandParentID: { - standardBlockState: standardBlockState{ - inputs: atomicInputs, - }, statelessBlock: grandParentStatelessBlk, onAcceptState: grandParentState, + inputs: atomicInputs, }, parentID: { statelessBlock: parentStatelessBlk, @@ -763,7 +746,7 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { // Serialize this block with a dummy tx // and replace it after creation with the mock tx. // TODO allow serialization of mock txs. - blk, err := blocks.NewApricotStandardBlock( + blk, err := block.NewApricotStandardBlock( parentID, 2, []*txs.Tx{ @@ -789,13 +772,12 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -807,7 +789,6 @@ func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) onCommitState: parentOnCommitState, onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -826,7 +807,7 @@ func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) backend: backend, } - blk, err := blocks.NewApricotStandardBlock( + blk, err := block.NewApricotStandardBlock( parentID, 2, []*txs.Tx{ @@ -847,13 +828,12 @@ func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) mempool := mempool.NewMockMempool(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) parentTime := time.Now() parentOnCommitState := state.NewMockDiff(ctrl) parentOnAbortState := state.NewMockDiff(ctrl) @@ -866,7 +846,6 @@ func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { onCommitState: parentOnCommitState, onAbortState: parentOnAbortState, }, - standardBlockState: standardBlockState{}, }, }, Mempool: mempool, @@ -885,7 +864,7 @@ func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { backend: backend, } - blk, err := blocks.NewBanffStandardBlock( + blk, err := block.NewBanffStandardBlock( parentTime.Add(time.Second), parentID, 2, @@ -907,12 +886,11 @@ func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { func TestVerifierVisitApricotCommitBlockUnexpectedParentState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ @@ -933,7 +911,7 @@ func TestVerifierVisitApricotCommitBlockUnexpectedParentState(t *testing.T) { }, } - blk, err := blocks.NewApricotCommitBlock( + blk, err := block.NewApricotCommitBlock( parentID, 2, ) @@ -950,12 +928,11 @@ func TestVerifierVisitApricotCommitBlockUnexpectedParentState(t *testing.T) { func TestVerifierVisitBanffCommitBlockUnexpectedParentState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) timestamp := time.Unix(12345, 0) verifier := &verifier{ txExecutorBackend: &executor.Backend{ @@ -978,7 +955,7 @@ func TestVerifierVisitBanffCommitBlockUnexpectedParentState(t *testing.T) { }, } - blk, err := blocks.NewBanffCommitBlock( + blk, err := block.NewBanffCommitBlock( timestamp, parentID, 2, @@ -996,12 +973,11 @@ func TestVerifierVisitBanffCommitBlockUnexpectedParentState(t *testing.T) { func TestVerifierVisitApricotAbortBlockUnexpectedParentState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ @@ -1022,7 +998,7 @@ func TestVerifierVisitApricotAbortBlockUnexpectedParentState(t *testing.T) { }, } - blk, err := blocks.NewApricotAbortBlock( + blk, err := block.NewApricotAbortBlock( parentID, 2, ) @@ -1039,12 +1015,11 @@ func TestVerifierVisitApricotAbortBlockUnexpectedParentState(t *testing.T) { func TestVerifierVisitBanffAbortBlockUnexpectedParentState(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create mocked dependencies. s := state.NewMockState(ctrl) parentID := ids.GenerateTestID() - parentStatelessBlk := blocks.NewMockBlock(ctrl) + parentStatelessBlk := block.NewMockBlock(ctrl) timestamp := time.Unix(12345, 0) verifier := &verifier{ txExecutorBackend: &executor.Backend{ @@ -1067,7 +1042,7 @@ func TestVerifierVisitBanffAbortBlockUnexpectedParentState(t *testing.T) { }, } - blk, err := blocks.NewBanffAbortBlock( + blk, err := block.NewBanffAbortBlock( timestamp, parentID, 2, diff --git a/avalanchego/vms/platformvm/blocks/mock_block.go b/avalanchego/vms/platformvm/block/mock_block.go similarity index 87% rename from avalanchego/vms/platformvm/blocks/mock_block.go rename to avalanchego/vms/platformvm/block/mock_block.go index 8bc912e6..7bd28119 100644 --- a/avalanchego/vms/platformvm/blocks/mock_block.go +++ b/avalanchego/vms/platformvm/block/mock_block.go @@ -1,11 +1,13 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/blocks (interfaces: Block) +// Source: github.com/ava-labs/avalanchego/vms/platformvm/block (interfaces: Block) +// +// Generated by this command: +// +// mockgen -package=block -destination=vms/platformvm/block/mock_block.go github.com/ava-labs/avalanchego/vms/platformvm/block Block +// -// Package blocks is a generated GoMock package. -package blocks +// Package block is a generated GoMock package. +package block import ( reflect "reflect" @@ -13,7 +15,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockBlock is a mock of Block interface. @@ -88,7 +90,7 @@ func (m *MockBlock) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockBlockMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockBlock)(nil).InitCtx), arg0) } @@ -130,7 +132,7 @@ func (m *MockBlock) Visit(arg0 Visitor) error { } // Visit indicates an expected call of Visit. -func (mr *MockBlockMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) Visit(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockBlock)(nil).Visit), arg0) } @@ -144,7 +146,7 @@ func (m *MockBlock) initialize(arg0 []byte) error { } // initialize indicates an expected call of initialize. -func (mr *MockBlockMockRecorder) initialize(arg0 interface{}) *gomock.Call { +func (mr *MockBlockMockRecorder) initialize(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "initialize", reflect.TypeOf((*MockBlock)(nil).initialize), arg0) } diff --git a/avalanchego/vms/platformvm/blocks/parse.go b/avalanchego/vms/platformvm/block/parse.go similarity index 62% rename from avalanchego/vms/platformvm/blocks/parse.go rename to avalanchego/vms/platformvm/block/parse.go index 27d83773..e6679079 100644 --- a/avalanchego/vms/platformvm/blocks/parse.go +++ b/avalanchego/vms/platformvm/block/parse.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block -import ( - "github.com/ava-labs/avalanchego/codec" -) +import "github.com/ava-labs/avalanchego/codec" func Parse(c codec.Manager, b []byte) (Block, error) { var blk Block diff --git a/avalanchego/vms/platformvm/blocks/parse_test.go b/avalanchego/vms/platformvm/block/parse_test.go similarity index 79% rename from avalanchego/vms/platformvm/blocks/parse_test.go rename to avalanchego/vms/platformvm/block/parse_test.go index 9ab17af7..8bebb507 100644 --- a/avalanchego/vms/platformvm/blocks/parse_test.go +++ b/avalanchego/vms/platformvm/block/parse_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" @@ -25,12 +25,12 @@ func TestStandardBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - txs, err := testDecisionTxs() + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { // build block - apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, txs) + apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, decisionTxs) require.NoError(err) // parse block @@ -43,12 +43,11 @@ func TestStandardBlocks(t *testing.T) { require.Equal(apricotStandardBlk.Parent(), parsed.Parent()) require.Equal(apricotStandardBlk.Height(), parsed.Height()) - _, ok := parsed.(*ApricotStandardBlock) - require.True(ok) - require.Equal(txs, parsed.Txs()) + require.IsType(&ApricotStandardBlock{}, parsed) + require.Equal(decisionTxs, parsed.Txs()) // check that banff standard block can be built and parsed - banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, txs) + banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, decisionTxs) require.NoError(err) // parse block @@ -60,9 +59,9 @@ func TestStandardBlocks(t *testing.T) { require.Equal(banffStandardBlk.Bytes(), parsed.Bytes()) require.Equal(banffStandardBlk.Parent(), parsed.Parent()) require.Equal(banffStandardBlk.Height(), parsed.Height()) - parsedBanffStandardBlk, ok := parsed.(*BanffStandardBlock) - require.True(ok) - require.Equal(txs, parsedBanffStandardBlk.Txs()) + require.IsType(&BanffStandardBlock{}, parsed) + parsedBanffStandardBlk := parsed.(*BanffStandardBlock) + require.Equal(decisionTxs, parsedBanffStandardBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffStandardBlk.Timestamp(), parsedBanffStandardBlk.Timestamp()) @@ -78,7 +77,9 @@ func TestProposalBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testProposalTx() + proposalTx, err := testProposalTx() + require.NoError(err) + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -86,7 +87,7 @@ func TestProposalBlocks(t *testing.T) { apricotProposalBlk, err := NewApricotProposalBlock( parentID, height, - tx, + proposalTx, ) require.NoError(err) @@ -100,16 +101,17 @@ func TestProposalBlocks(t *testing.T) { require.Equal(apricotProposalBlk.Parent(), parsed.Parent()) require.Equal(apricotProposalBlk.Height(), parsed.Height()) - parsedApricotProposalBlk, ok := parsed.(*ApricotProposalBlock) - require.True(ok) - require.Equal([]*txs.Tx{tx}, parsedApricotProposalBlk.Txs()) + require.IsType(&ApricotProposalBlock{}, parsed) + parsedApricotProposalBlk := parsed.(*ApricotProposalBlock) + require.Equal([]*txs.Tx{proposalTx}, parsedApricotProposalBlk.Txs()) // check that banff proposal block can be built and parsed banffProposalBlk, err := NewBanffProposalBlock( blkTimestamp, parentID, height, - tx, + proposalTx, + []*txs.Tx{}, ) require.NoError(err) @@ -120,17 +122,47 @@ func TestProposalBlocks(t *testing.T) { // compare content require.Equal(banffProposalBlk.ID(), parsed.ID()) require.Equal(banffProposalBlk.Bytes(), parsed.Bytes()) - require.Equal(banffProposalBlk.Parent(), banffProposalBlk.Parent()) + require.Equal(banffProposalBlk.Parent(), parsed.Parent()) require.Equal(banffProposalBlk.Height(), parsed.Height()) - parsedBanffProposalBlk, ok := parsed.(*BanffProposalBlock) - require.True(ok) - require.Equal([]*txs.Tx{tx}, parsedBanffProposalBlk.Txs()) + require.IsType(&BanffProposalBlock{}, parsed) + parsedBanffProposalBlk := parsed.(*BanffProposalBlock) + require.Equal([]*txs.Tx{proposalTx}, parsedBanffProposalBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffProposalBlk.Timestamp(), parsedBanffProposalBlk.Timestamp()) // backward compatibility check require.Equal(parsedApricotProposalBlk.Txs(), parsedBanffProposalBlk.Txs()) + + // check that banff proposal block with decisionTxs can be built and parsed + banffProposalBlkWithDecisionTxs, err := NewBanffProposalBlock( + blkTimestamp, + parentID, + height, + proposalTx, + decisionTxs, + ) + require.NoError(err) + + // parse block + parsed, err = Parse(cdc, banffProposalBlkWithDecisionTxs.Bytes()) + require.NoError(err) + + // compare content + require.Equal(banffProposalBlkWithDecisionTxs.ID(), parsed.ID()) + require.Equal(banffProposalBlkWithDecisionTxs.Bytes(), parsed.Bytes()) + require.Equal(banffProposalBlkWithDecisionTxs.Parent(), parsed.Parent()) + require.Equal(banffProposalBlkWithDecisionTxs.Height(), parsed.Height()) + require.IsType(&BanffProposalBlock{}, parsed) + parsedBanffProposalBlkWithDecisionTxs := parsed.(*BanffProposalBlock) + + l := len(decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, decisionTxs) + expectedTxs[l] = proposalTx + require.Equal(expectedTxs, parsedBanffProposalBlkWithDecisionTxs.Txs()) + + require.Equal(banffProposalBlkWithDecisionTxs.Timestamp(), parsedBanffProposalBlkWithDecisionTxs.Timestamp()) } } @@ -167,12 +199,12 @@ func TestCommitBlock(t *testing.T) { // compare content require.Equal(banffCommitBlk.ID(), parsed.ID()) require.Equal(banffCommitBlk.Bytes(), parsed.Bytes()) - require.Equal(banffCommitBlk.Parent(), banffCommitBlk.Parent()) + require.Equal(banffCommitBlk.Parent(), parsed.Parent()) require.Equal(banffCommitBlk.Height(), parsed.Height()) // timestamp check for banff blocks only - parsedBanffCommitBlk, ok := parsed.(*BanffCommitBlock) - require.True(ok) + require.IsType(&BanffCommitBlock{}, parsed) + parsedBanffCommitBlk := parsed.(*BanffCommitBlock) require.Equal(banffCommitBlk.Timestamp(), parsedBanffCommitBlk.Timestamp()) } } @@ -210,12 +242,12 @@ func TestAbortBlock(t *testing.T) { // compare content require.Equal(banffAbortBlk.ID(), parsed.ID()) require.Equal(banffAbortBlk.Bytes(), parsed.Bytes()) - require.Equal(banffAbortBlk.Parent(), banffAbortBlk.Parent()) + require.Equal(banffAbortBlk.Parent(), parsed.Parent()) require.Equal(banffAbortBlk.Height(), parsed.Height()) // timestamp check for banff blocks only - parsedBanffAbortBlk, ok := parsed.(*BanffAbortBlock) - require.True(ok) + require.IsType(&BanffAbortBlock{}, parsed) + parsedBanffAbortBlk := parsed.(*BanffAbortBlock) require.Equal(banffAbortBlk.Timestamp(), parsedBanffAbortBlk.Timestamp()) } } @@ -225,7 +257,7 @@ func TestAtomicBlock(t *testing.T) { require := require.New(t) parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testAtomicTx() + atomicTx, err := testAtomicTx() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -233,7 +265,7 @@ func TestAtomicBlock(t *testing.T) { atomicBlk, err := NewApricotAtomicBlock( parentID, height, - tx, + atomicTx, ) require.NoError(err) @@ -247,9 +279,9 @@ func TestAtomicBlock(t *testing.T) { require.Equal(atomicBlk.Parent(), parsed.Parent()) require.Equal(atomicBlk.Height(), parsed.Height()) - parsedAtomicBlk, ok := parsed.(*ApricotAtomicBlock) - require.True(ok) - require.Equal([]*txs.Tx{tx}, parsedAtomicBlk.Txs()) + require.IsType(&ApricotAtomicBlock{}, parsed) + parsedAtomicBlk := parsed.(*ApricotAtomicBlock) + require.Equal([]*txs.Tx{atomicTx}, parsedAtomicBlk.Txs()) } } diff --git a/avalanchego/vms/platformvm/blocks/proposal_block.go b/avalanchego/vms/platformvm/block/proposal_block.go similarity index 69% rename from avalanchego/vms/platformvm/blocks/proposal_block.go rename to avalanchego/vms/platformvm/block/proposal_block.go index 11e9c22a..4160db57 100644 --- a/avalanchego/vms/platformvm/blocks/proposal_block.go +++ b/avalanchego/vms/platformvm/block/proposal_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "fmt" @@ -18,16 +18,23 @@ var ( ) type BanffProposalBlock struct { - Time uint64 `serialize:"true" json:"time"` - // Transactions is currently unused. This is populated so that introducing - // them in the future will not require a codec change. - // - // TODO: when Transactions is used, we must correctly verify and apply their - // changes. - Transactions []*txs.Tx `serialize:"true" json:"-"` + Time uint64 `serialize:"true" json:"time"` + Transactions []*txs.Tx `serialize:"true" json:"txs"` ApricotProposalBlock `serialize:"true"` } +func (b *BanffProposalBlock) initialize(bytes []byte) error { + if err := b.ApricotProposalBlock.initialize(bytes); err != nil { + return err + } + for _, tx := range b.Transactions { + if err := tx.Initialize(txs.Codec); err != nil { + return fmt.Errorf("failed to initialize tx: %w", err) + } + } + return nil +} + func (b *BanffProposalBlock) InitCtx(ctx *snow.Context) { for _, tx := range b.Transactions { tx.Unsigned.InitCtx(ctx) @@ -39,6 +46,14 @@ func (b *BanffProposalBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } +func (b *BanffProposalBlock) Txs() []*txs.Tx { + l := len(b.Transactions) + txs := make([]*txs.Tx, l+1) + copy(txs, b.Transactions) + txs[l] = b.Tx + return txs +} + func (b *BanffProposalBlock) Visit(v Visitor) error { return v.BanffProposalBlock(b) } @@ -47,19 +62,21 @@ func NewBanffProposalBlock( timestamp time.Time, parentID ids.ID, height uint64, - tx *txs.Tx, + proposalTx *txs.Tx, + decisionTxs []*txs.Tx, ) (*BanffProposalBlock, error) { blk := &BanffProposalBlock{ - Time: uint64(timestamp.Unix()), + Transactions: decisionTxs, + Time: uint64(timestamp.Unix()), ApricotProposalBlock: ApricotProposalBlock{ CommonBlock: CommonBlock{ PrntID: parentID, Hght: height, }, - Tx: tx, + Tx: proposalTx, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotProposalBlock struct { @@ -102,5 +119,5 @@ func NewApricotProposalBlock( }, Tx: tx, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/avalanchego/vms/platformvm/block/proposal_block_test.go b/avalanchego/vms/platformvm/block/proposal_block_test.go new file mode 100644 index 00000000..bc596bd7 --- /dev/null +++ b/avalanchego/vms/platformvm/block/proposal_block_test.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +func TestNewBanffProposalBlock(t *testing.T) { + timestamp := time.Now().Truncate(time.Second) + parentID := ids.GenerateTestID() + height := uint64(1337) + proposalTx, err := testProposalTx() + require.NoError(t, err) + decisionTxs, err := testDecisionTxs() + require.NoError(t, err) + + type test struct { + name string + proposalTx *txs.Tx + decisionTxs []*txs.Tx + } + + tests := []test{ + { + name: "no decision txs", + proposalTx: proposalTx, + decisionTxs: []*txs.Tx{}, + }, + { + name: "decision txs", + proposalTx: proposalTx, + decisionTxs: decisionTxs, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + blk, err := NewBanffProposalBlock( + timestamp, + parentID, + height, + test.proposalTx, + test.decisionTxs, + ) + require.NoError(err) + + require.NotEmpty(blk.Bytes()) + require.Equal(parentID, blk.Parent()) + require.Equal(height, blk.Height()) + require.Equal(timestamp, blk.Timestamp()) + + l := len(test.decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, test.decisionTxs) + expectedTxs[l] = test.proposalTx + + blkTxs := blk.Txs() + require.Equal(expectedTxs, blkTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } + }) + } +} + +func TestNewApricotProposalBlock(t *testing.T) { + require := require.New(t) + + parentID := ids.GenerateTestID() + height := uint64(1337) + proposalTx, err := testProposalTx() + require.NoError(err) + + blk, err := NewApricotProposalBlock( + parentID, + height, + proposalTx, + ) + require.NoError(err) + + require.NotEmpty(blk.Bytes()) + require.Equal(parentID, blk.Parent()) + require.Equal(height, blk.Height()) + + expectedTxs := []*txs.Tx{proposalTx} + + blkTxs := blk.Txs() + require.Equal(expectedTxs, blkTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } +} diff --git a/avalanchego/vms/platformvm/block/serialization_test.go b/avalanchego/vms/platformvm/block/serialization_test.go new file mode 100644 index 00000000..8e2002c3 --- /dev/null +++ b/avalanchego/vms/platformvm/block/serialization_test.go @@ -0,0 +1,226 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +func TestBanffBlockSerialization(t *testing.T) { + type test struct { + block BanffBlock + bytes []byte + } + + tests := []test{ + { + block: &BanffProposalBlock{ + ApricotProposalBlock: ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.AdvanceTimeTx{}, + }, + }, + }, + bytes: []byte{ + // Codec version + 0x00, 0x00, + // Type ID + 0x00, 0x00, 0x00, 0x1d, + // Rest + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + }, + }, + { + block: &BanffCommitBlock{ + ApricotCommitBlock: ApricotCommitBlock{}, + }, + bytes: []byte{ + // Codec version + 0x00, 0x00, + // Type ID + 0x00, 0x00, 0x00, 0x1f, + // Rest + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + { + block: &BanffAbortBlock{ + ApricotAbortBlock: ApricotAbortBlock{}, + }, + bytes: []byte{ + // Codec version + 0x00, 0x00, + // Type ID + 0x00, 0x00, 0x00, 0x1e, + // Rest + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + { + block: &BanffStandardBlock{ + ApricotStandardBlock: ApricotStandardBlock{ + Transactions: []*txs.Tx{}, + }, + }, + bytes: []byte{ + // Codec version + 0x00, 0x00, + // Type ID + 0x00, 0x00, 0x00, 0x20, + // Rest + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + }, + }, + } + + for _, test := range tests { + testName := fmt.Sprintf("%T", test.block) + block := test.block + t.Run(testName, func(t *testing.T) { + require := require.New(t) + + got, err := Codec.Marshal(CodecVersion, &block) + require.NoError(err) + require.Equal(test.bytes, got) + }) + } +} + +func TestBanffProposalBlockJSON(t *testing.T) { + require := require.New(t) + + simpleBanffProposalBlock := &BanffProposalBlock{ + Time: 123456, + ApricotProposalBlock: ApricotProposalBlock{ + CommonBlock: CommonBlock{ + PrntID: ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'}, + Hght: 1337, + BlockID: ids.ID{'b', 'l', 'o', 'c', 'k', 'I', 'D'}, + }, + Tx: &txs.Tx{ + Unsigned: &txs.AdvanceTimeTx{ + Time: 123457, + }, + }, + }, + } + + simpleBanffProposalBlockBytes, err := json.MarshalIndent(simpleBanffProposalBlock, "", "\t") + require.NoError(err) + + require.Equal(`{ + "time": 123456, + "txs": null, + "parentID": "rVcYrvnGXdoJBeYQRm5ZNaCGHeVyqcHHJu8Yd89kJcef6V5Eg", + "height": 1337, + "id": "kM6h4d2UKYEDzQXm7KNqyeBJLjhb42J24m4L4WACB5didf3pk", + "tx": { + "unsignedTx": { + "time": 123457 + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } +}`, string(simpleBanffProposalBlockBytes)) + + complexBanffProposalBlock := simpleBanffProposalBlock + complexBanffProposalBlock.Transactions = []*txs.Tx{ + { + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{}, + Memo: []byte("KilroyWasHere"), + }, + }, + }, + { + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{}, + Memo: []byte("KilroyWasHere2"), + }, + }, + }, + } + + complexBanffProposalBlockBytes, err := json.MarshalIndent(complexBanffProposalBlock, "", "\t") + require.NoError(err) + + require.Equal(`{ + "time": 123456, + "txs": [ + { + "unsignedTx": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [], + "memo": "0x4b696c726f7957617348657265" + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + }, + { + "unsignedTx": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [], + "memo": "0x4b696c726f795761734865726532" + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } + ], + "parentID": "rVcYrvnGXdoJBeYQRm5ZNaCGHeVyqcHHJu8Yd89kJcef6V5Eg", + "height": 1337, + "id": "kM6h4d2UKYEDzQXm7KNqyeBJLjhb42J24m4L4WACB5didf3pk", + "tx": { + "unsignedTx": { + "time": 123457 + }, + "credentials": null, + "id": "11111111111111111111111111111111LpoYY" + } +}`, string(complexBanffProposalBlockBytes)) +} diff --git a/avalanchego/vms/platformvm/blocks/standard_block.go b/avalanchego/vms/platformvm/block/standard_block.go similarity index 90% rename from avalanchego/vms/platformvm/blocks/standard_block.go rename to avalanchego/vms/platformvm/block/standard_block.go index 72684c82..c7d35b12 100644 --- a/avalanchego/vms/platformvm/blocks/standard_block.go +++ b/avalanchego/vms/platformvm/block/standard_block.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "fmt" @@ -46,7 +46,7 @@ func NewBanffStandardBlock( Transactions: txs, }, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } type ApricotStandardBlock struct { @@ -58,7 +58,7 @@ func (b *ApricotStandardBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) for _, tx := range b.Transactions { if err := tx.Initialize(txs.Codec); err != nil { - return fmt.Errorf("failed to sign block: %w", err) + return fmt.Errorf("failed to initialize tx: %w", err) } } return nil @@ -93,5 +93,5 @@ func NewApricotStandardBlock( }, Transactions: txs, } - return blk, initialize(blk) + return blk, initialize(blk, &blk.CommonBlock) } diff --git a/avalanchego/vms/platformvm/blocks/standard_block_test.go b/avalanchego/vms/platformvm/block/standard_block_test.go similarity index 96% rename from avalanchego/vms/platformvm/blocks/standard_block_test.go rename to avalanchego/vms/platformvm/block/standard_block_test.go index b5c5c065..4162aadb 100644 --- a/avalanchego/vms/platformvm/blocks/standard_block_test.go +++ b/avalanchego/vms/platformvm/block/standard_block_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block import ( "testing" diff --git a/avalanchego/vms/platformvm/blocks/visitor.go b/avalanchego/vms/platformvm/block/visitor.go similarity index 86% rename from avalanchego/vms/platformvm/blocks/visitor.go rename to avalanchego/vms/platformvm/block/visitor.go index 929c615b..6c27b538 100644 --- a/avalanchego/vms/platformvm/blocks/visitor.go +++ b/avalanchego/vms/platformvm/block/visitor.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package blocks +package block type Visitor interface { BanffAbortBlock(*BanffAbortBlock) error diff --git a/avalanchego/vms/platformvm/blocks/builder/builder.go b/avalanchego/vms/platformvm/blocks/builder/builder.go deleted file mode 100644 index 947fdf26..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/builder.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "context" - "errors" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" -) - -// targetBlockSize is maximum number of transaction bytes to place into a -// StandardBlock -const targetBlockSize = 128 * units.KiB - -var ( - _ Builder = (*builder)(nil) - - errEndOfTime = errors.New("program time is suspiciously far in the future") - errNoPendingBlocks = errors.New("no pending blocks") - errChainNotSynced = errors.New("chain not synced") -) - -type Builder interface { - mempool.Mempool - mempool.BlockTimer - Network - - // set preferred block on top of which we'll build next - SetPreference(blockID ids.ID) - - // get preferred block on top of which we'll build next - Preferred() (snowman.Block, error) - - // AddUnverifiedTx verifier the tx before adding it to mempool - AddUnverifiedTx(tx *txs.Tx) error - - // BuildBlock is called on timer clock to attempt to create - // next block - BuildBlock(context.Context) (snowman.Block, error) - - // Shutdown cleanly shuts Builder down - Shutdown() -} - -// builder implements a simple builder to convert txs into valid blocks -type builder struct { - mempool.Mempool - Network - - txBuilder txbuilder.Builder - txExecutorBackend *txexecutor.Backend - blkManager blockexecutor.Manager - - // ID of the preferred block to build on top of - preferredBlockID ids.ID - - // channel to send messages to the consensus engine - toEngine chan<- common.Message - - // This timer goes off when it is time for the next validator to add/leave - // the validator set. When it goes off ResetTimer() is called, potentially - // triggering creation of a new block. - timer *timer.Timer -} - -func New( - mempool mempool.Mempool, - txBuilder txbuilder.Builder, - txExecutorBackend *txexecutor.Backend, - blkManager blockexecutor.Manager, - toEngine chan<- common.Message, - appSender common.AppSender, -) Builder { - builder := &builder{ - Mempool: mempool, - txBuilder: txBuilder, - txExecutorBackend: txExecutorBackend, - blkManager: blkManager, - toEngine: toEngine, - } - - builder.timer = timer.NewTimer(builder.setNextBuildBlockTime) - - builder.Network = NewNetwork( - txExecutorBackend.Ctx, - builder, - appSender, - ) - - go txExecutorBackend.Ctx.Log.RecoverAndPanic(builder.timer.Dispatch) - return builder -} - -func (b *builder) SetPreference(blockID ids.ID) { - if blockID == b.preferredBlockID { - // If the preference didn't change, then this is a noop - return - } - b.preferredBlockID = blockID - b.ResetBlockTimer() -} - -func (b *builder) Preferred() (snowman.Block, error) { - return b.blkManager.GetBlock(b.preferredBlockID) -} - -// AddUnverifiedTx verifies a transaction and attempts to add it to the mempool -func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { - if !b.txExecutorBackend.Bootstrapped.Get() { - return errChainNotSynced - } - - txID := tx.ID() - if b.Mempool.Has(txID) { - // If the transaction is already in the mempool - then it looks the same - // as if it was successfully added - return nil - } - - verifier := txexecutor.MempoolTxVerifier{ - Backend: b.txExecutorBackend, - ParentID: b.preferredBlockID, // We want to build off of the preferred block - StateVersions: b.blkManager, - Tx: tx, - } - if err := tx.Unsigned.Visit(&verifier); err != nil { - b.MarkDropped(txID, err) - return err - } - - if err := b.Mempool.Add(tx); err != nil { - return err - } - return b.GossipTx(tx) -} - -// BuildBlock builds a block to be added to consensus. -// This method removes the transactions from the returned -// blocks from the mempool. -func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { - b.Mempool.DisableAdding() - defer func() { - b.Mempool.EnableAdding() - b.ResetBlockTimer() - }() - - ctx := b.txExecutorBackend.Ctx - ctx.Log.Debug("starting to attempt to build a block") - - statelessBlk, err := b.buildBlock() - if err != nil { - return nil, err - } - - // Remove selected txs from mempool now that we are returning the block to - // the consensus engine. - txs := statelessBlk.Txs() - b.Mempool.Remove(txs) - return b.blkManager.NewBlock(statelessBlk), nil -} - -// Returns the block we want to build and issue. -// Only modifies state to remove expired proposal txs. -func (b *builder) buildBlock() (blocks.Block, error) { - // Get the block to build on top of and retrieve the new block's context. - preferred, err := b.Preferred() - if err != nil { - return nil, err - } - preferredID := preferred.ID() - nextHeight := preferred.Height() + 1 - preferredState, ok := b.blkManager.GetState(preferredID) - if !ok { - return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) - } - - timestamp := b.txExecutorBackend.Clk.Time() - if parentTime := preferred.Timestamp(); parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) - if err != nil { - return nil, fmt.Errorf("could not calculate next staker change time: %w", err) - } - - // timeWasCapped means that [timestamp] was reduced to - // [nextStakerChangeTime]. It is used as a flag for [buildApricotBlock] to - // be willing to issue an advanceTimeTx. It is also used as a flag for - // [buildBanffBlock] to force the issuance of an empty block to advance - // the time forward; if there are no available transactions. - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - - return buildBlock( - b, - preferredID, - nextHeight, - timestamp, - timeWasCapped, - preferredState, - ) -} - -func (b *builder) Shutdown() { - // There is a potential deadlock if the timer is about to execute a timeout. - // So, the lock must be released before stopping the timer. - ctx := b.txExecutorBackend.Ctx - ctx.Lock.Unlock() - b.timer.Stop() - ctx.Lock.Lock() -} - -func (b *builder) ResetBlockTimer() { - // Next time the context lock is released, we can attempt to reset the block - // timer. - b.timer.SetTimeoutIn(0) -} - -// dropExpiredStakerTxs drops add validator/delegator transactions in the -// mempool whose start time is not sufficiently far in the future -// (i.e. within local time plus [MaxFutureStartFrom]). -func (b *builder) dropExpiredStakerTxs(timestamp time.Time) { - minStartTime := timestamp.Add(txexecutor.SyncBound) - for b.Mempool.HasStakerTx() { - tx := b.Mempool.PeekStakerTx() - startTime := tx.Unsigned.(txs.Staker).StartTime() - if !startTime.Before(minStartTime) { - // The next proposal tx in the mempool starts sufficiently far in - // the future. - return - } - - txID := tx.ID() - err := fmt.Errorf( - "synchrony bound (%s) is later than staker start time (%s)", - minStartTime, - startTime, - ) - - b.Mempool.Remove([]*txs.Tx{tx}) - b.Mempool.MarkDropped(txID, err) // cache tx as dropped - b.txExecutorBackend.Ctx.Log.Debug("dropping tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} - -func (b *builder) setNextBuildBlockTime() { - ctx := b.txExecutorBackend.Ctx - - // Grabbing the lock here enforces that this function is not called mid-way - // through modifying of the state. - ctx.Lock.Lock() - defer ctx.Lock.Unlock() - - if !b.txExecutorBackend.Bootstrapped.Get() { - ctx.Log.Verbo("skipping block timer reset", - zap.String("reason", "not bootstrapped"), - ) - return - } - - if _, err := b.buildBlock(); err == nil { - // We can build a block now - b.notifyBlockReady() - return - } - - // Wake up when it's time to add/remove the next validator/delegator - preferredState, ok := b.blkManager.GetState(b.preferredBlockID) - if !ok { - // The preferred block should always be a decision block - ctx.Log.Error("couldn't get preferred block state", - zap.Stringer("preferredID", b.preferredBlockID), - zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), - ) - return - } - - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) - if err != nil { - ctx.Log.Error("couldn't get next staker change time", - zap.Stringer("preferredID", b.preferredBlockID), - zap.Stringer("lastAcceptedID", b.blkManager.LastAccepted()), - zap.Error(err), - ) - return - } - - now := b.txExecutorBackend.Clk.Time() - waitTime := nextStakerChangeTime.Sub(now) - ctx.Log.Debug("setting next scheduled event", - zap.Time("nextEventTime", nextStakerChangeTime), - zap.Duration("timeUntil", waitTime), - ) - - // Wake up when it's time to add/remove the next validator - b.timer.SetTimeoutIn(waitTime) -} - -// notifyBlockReady tells the consensus engine that a new block is ready to be -// created -func (b *builder) notifyBlockReady() { - select { - case b.toEngine <- common.PendingTxs: - default: - b.txExecutorBackend.Ctx.Log.Debug("dropping message to consensus engine") - } -} - -// [timestamp] is min(max(now, parent timestamp), next staker change time) -func buildBlock( - builder *builder, - parentID ids.ID, - height uint64, - timestamp time.Time, - forceAdvanceTime bool, - parentState state.Chain, -) (blocks.Block, error) { - // Try rewarding stakers whose staking period ends at the new chain time. - // This is done first to prioritize advancing the timestamp as quickly as - // possible. - stakerTxID, shouldReward, err := getNextStakerToReward(timestamp, parentState) - if err != nil { - return nil, fmt.Errorf("could not find next staker to reward: %w", err) - } - if shouldReward { - rewardValidatorTx, err := builder.txBuilder.NewRewardValidatorTx(stakerTxID) - if err != nil { - return nil, fmt.Errorf("could not build tx to reward staker: %w", err) - } - - return blocks.NewBanffProposalBlock( - timestamp, - parentID, - height, - rewardValidatorTx, - ) - } - - // Clean out the mempool's transactions with invalid timestamps. - builder.dropExpiredStakerTxs(timestamp) - - // If there is no reason to build a block, don't. - if !builder.Mempool.HasTxs() && !forceAdvanceTime { - builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") - return nil, errNoPendingBlocks - } - - // Issue a block with as many transactions as possible. - return blocks.NewBanffStandardBlock( - timestamp, - parentID, - height, - builder.Mempool.PeekTxs(targetBlockSize), - ) -} - -// getNextStakerToReward returns the next staker txID to remove from the staking -// set with a RewardValidatorTx rather than an AdvanceTimeTx. [chainTimestamp] -// is the timestamp of the chain at the time this validator would be getting -// removed and is used to calculate [shouldReward]. -// Returns: -// - [txID] of the next staker to reward -// - [shouldReward] if the txID exists and is ready to be rewarded -// - [err] if something bad happened -func getNextStakerToReward( - chainTimestamp time.Time, - preferredState state.Chain, -) (ids.ID, bool, error) { - if !chainTimestamp.Before(mockable.MaxTime) { - return ids.Empty, false, errEndOfTime - } - - currentStakerIterator, err := preferredState.GetCurrentStakerIterator() - if err != nil { - return ids.Empty, false, err - } - defer currentStakerIterator.Release() - - for currentStakerIterator.Next() { - currentStaker := currentStakerIterator.Value() - priority := currentStaker.Priority - // If the staker is a permissionless staker (not a permissioned subnet - // validator), it's the next staker we will want to remove with a - // RewardValidatorTx rather than an AdvanceTimeTx. - if priority != txs.SubnetPermissionedValidatorCurrentPriority { - return currentStaker.TxID, chainTimestamp.Equal(currentStaker.EndTime), nil - } - } - return ids.Empty, false, nil -} diff --git a/avalanchego/vms/platformvm/blocks/builder/builder_test.go b/avalanchego/vms/platformvm/blocks/builder/builder_test.go deleted file mode 100644 index 8f8954a9..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/builder_test.go +++ /dev/null @@ -1,685 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" -) - -var errTestingDropped = errors.New("testing dropped") - -// shows that a locally generated CreateChainTx can be added to mempool and then -// removed by inclusion in a block -func TestBlockBuilderAddLocalTx(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - // add a tx to it - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - env.sender.SendAppGossipF = func(context.Context, []byte) error { - return nil - } - err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err) - - has := env.mempool.Has(txID) - require.True(has) - - // show that build block include that tx and removes it from mempool - blkIntf, err := env.Builder.BuildBlock(context.Background()) - require.NoError(err) - - blk, ok := blkIntf.(*blockexecutor.Block) - require.True(ok) - require.Len(blk.Txs(), 1) - require.Equal(txID, blk.Txs()[0].ID()) - - has = env.mempool.Has(txID) - require.False(has) -} - -func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - // create candidate tx - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - // A tx simply added to mempool is obviously not marked as dropped - require.NoError(env.mempool.Add(tx)) - require.True(env.mempool.Has(txID)) - reason := env.mempool.GetDropReason(txID) - require.NoError(reason) - - // When a tx is marked as dropped, it is still available to allow re-issuance - env.mempool.MarkDropped(txID, errTestingDropped) - require.True(env.mempool.Has(txID)) // still available - reason = env.mempool.GetDropReason(txID) - require.ErrorIs(reason, errTestingDropped) - - // A previously dropped tx, popped then re-added to mempool, - // is not dropped anymore - env.mempool.Remove([]*txs.Tx{tx}) - require.NoError(env.mempool.Add(tx)) - - require.True(env.mempool.Has(txID)) - reason = env.mempool.GetDropReason(txID) - require.NoError(reason) -} - -func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { - env := newEnvironment(t) - env.ctx.Lock.Lock() - env.isBootstrapped.Set(false) - env.ctx.Log = logging.NoWarn{} - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() - - env.Builder.SetPreference(ids.GenerateTestID()) // should not panic -} - -func TestGetNextStakerToReward(t *testing.T) { - type test struct { - name string - timestamp time.Time - stateF func(*gomock.Controller) state.Chain - expectedTxID ids.ID - expectedShouldReward bool - expectedErr error - } - - var ( - now = time.Now() - txID = ids.GenerateTestID() - ) - tests := []test{ - { - name: "end of time", - timestamp: mockable.MaxTime, - stateF: func(ctrl *gomock.Controller) state.Chain { - return state.NewMockChain(ctrl) - }, - expectedErr: errEndOfTime, - }, - { - name: "no stakers", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - currentStakerIter.EXPECT().Next().Return(false) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - }, - { - name: "expired subnet validator/delegator", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - Priority: txs.SubnetPermissionedValidatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: txID, - Priority: txs.SubnetPermissionlessDelegatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedTxID: txID, - expectedShouldReward: true, - }, - { - name: "expired primary network validator after subnet expired subnet validator", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - Priority: txs.SubnetPermissionedValidatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: txID, - Priority: txs.PrimaryNetworkValidatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedTxID: txID, - expectedShouldReward: true, - }, - { - name: "expired primary network delegator after subnet expired subnet validator", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - Priority: txs.SubnetPermissionedValidatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: txID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: now, - }) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedTxID: txID, - expectedShouldReward: true, - }, - { - name: "non-expired primary network delegator", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: txID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: now.Add(time.Second), - }) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedTxID: txID, - expectedShouldReward: false, - }, - { - name: "non-expired primary network validator", - timestamp: now, - stateF: func(ctrl *gomock.Controller) state.Chain { - currentStakerIter := state.NewMockStakerIterator(ctrl) - - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: txID, - Priority: txs.PrimaryNetworkValidatorCurrentPriority, - EndTime: now.Add(time.Second), - }) - currentStakerIter.EXPECT().Release() - - s := state.NewMockChain(ctrl) - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedTxID: txID, - expectedShouldReward: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - state := tt.stateF(ctrl) - txID, shouldReward, err := getNextStakerToReward(tt.timestamp, state) - if tt.expectedErr != nil { - require.Equal(tt.expectedErr, err) - return - } - require.NoError(err) - require.Equal(tt.expectedTxID, txID) - require.Equal(tt.expectedShouldReward, shouldReward) - }) - } -} - -func TestBuildBlock(t *testing.T) { - var ( - parentID = ids.GenerateTestID() - height = uint64(1337) - output = &avax.TransferableOutput{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - } - now = time.Now() - parentTimestamp = now.Add(-2 * time.Second) - transactions = []*txs.Tx{{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{output}, - }}, - Validator: txs.Validator{ - // Shouldn't be dropped - Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), - }, - StakeOuts: []*avax.TransferableOutput{output}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - Creds: []verify.Verifiable{ - &secp256k1fx.Credential{ - Sigs: [][secp256k1.SignatureLen]byte{{1, 3, 3, 7}}, - }, - }, - }} - stakerTxID = ids.GenerateTestID() - ) - - type test struct { - name string - builderF func(*gomock.Controller) *builder - timestamp time.Time - forceAdvanceTime bool - parentStateF func(*gomock.Controller) state.Chain - expectedBlkF func(*require.Assertions) blocks.Block - expectedErr error - } - - tests := []test{ - { - name: "should reward", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // The tx builder should be asked to build a reward tx - txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(transactions[0], nil) - - return &builder{ - Mempool: mempool, - txBuilder: txBuilder, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [parentTimestamp] - // i.e. it should be rewarded - currentStakerIter := state.NewMockStakerIterator(ctrl) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: stakerTxID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: parentTimestamp, - }) - currentStakerIter.EXPECT().Release() - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffProposalBlock( - parentTimestamp, - parentID, - height, - transactions[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has decision txs", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(transactions) - return &builder{ - Mempool: mempool, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - transactions, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "no stakers tx", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(*require.Assertions) blocks.Block { - return nil - }, - expectedErr: errNoPendingBlocks, - }, - { - name: "should advance time", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(nil) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: now.Add(-1 * time.Second), - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [now] - 1 second. - // That is, it ends in the past but after the current chain time. - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime] - // when determining whether to issue a reward tx. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(-1 * time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - now.Add(-1*time.Second), // note the advanced time - parentID, - height, - nil, // empty block to advance time - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx no force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There is a tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx with force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no decision txs - // There is a staker tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - gotBlk, err := buildBlock( - tt.builderF(ctrl), - parentID, - height, - tt.timestamp, - tt.forceAdvanceTime, - tt.parentStateF(ctrl), - ) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - return - } - require.NoError(err) - require.EqualValues(tt.expectedBlkF(require), gotBlk) - }) - } -} diff --git a/avalanchego/vms/platformvm/blocks/builder/network.go b/avalanchego/vms/platformvm/blocks/builder/network.go deleted file mode 100644 index e1768a13..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/network.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// TODO: consider moving the network implementation to a separate package - -package builder - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -const ( - // We allow [recentCacheSize] to be fairly large because we only store hashes - // in the cache, not entire transactions. - recentCacheSize = 512 -) - -var _ Network = (*network)(nil) - -type Network interface { - common.AppHandler - - // GossipTx gossips the transaction to some of the connected peers - GossipTx(tx *txs.Tx) error -} - -type network struct { - ctx *snow.Context - blkBuilder Builder - - // gossip related attributes - appSender common.AppSender - recentTxs *cache.LRU[ids.ID, struct{}] -} - -func NewNetwork( - ctx *snow.Context, - blkBuilder *builder, - appSender common.AppSender, -) Network { - return &network{ - ctx: ctx, - blkBuilder: blkBuilder, - appSender: appSender, - recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, - } -} - -func (*network) CrossChainAppRequestFailed(context.Context, ids.ID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppRequest(context.Context, ids.ID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) CrossChainAppResponse(context.Context, ids.ID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequestFailed(context.Context, ids.NodeID, uint32) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppRequest(context.Context, ids.NodeID, uint32, time.Time, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (*network) AppResponse(context.Context, ids.NodeID, uint32, []byte) error { - // This VM currently only supports gossiping of txs, so there are no - // requests. - return nil -} - -func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, msgBytes []byte) error { - n.ctx.Log.Debug("called AppGossip message handler", - zap.Stringer("nodeID", nodeID), - zap.Int("messageLen", len(msgBytes)), - ) - - msgIntf, err := message.Parse(msgBytes) - if err != nil { - n.ctx.Log.Debug("dropping AppGossip message", - zap.String("reason", "failed to parse message"), - ) - return nil - } - - msg, ok := msgIntf.(*message.Tx) - if !ok { - n.ctx.Log.Debug("dropping unexpected message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - tx, err := txs.Parse(txs.Codec, msg.Tx) - if err != nil { - n.ctx.Log.Verbo("received invalid tx", - zap.Stringer("nodeID", nodeID), - zap.Binary("tx", msg.Tx), - zap.Error(err), - ) - return nil - } - - txID := tx.ID() - - // We need to grab the context lock here to avoid racy behavior with - // transaction verification + mempool modifications. - n.ctx.Lock.Lock() - defer n.ctx.Lock.Unlock() - - if reason := n.blkBuilder.GetDropReason(txID); reason != nil { - // If the tx is being dropped - just ignore it - return nil - } - - // add to mempool - if err := n.blkBuilder.AddUnverifiedTx(tx); err != nil { - n.ctx.Log.Debug("tx failed verification", - zap.Stringer("nodeID", nodeID), - zap.Error(err), - ) - } - return nil -} - -func (n *network) GossipTx(tx *txs.Tx) error { - txID := tx.ID() - // Don't gossip a transaction if it has been recently gossiped. - if _, has := n.recentTxs.Get(txID); has { - return nil - } - n.recentTxs.Put(txID, struct{}{}) - - n.ctx.Log.Debug("gossiping tx", - zap.Stringer("txID", txID), - ) - - msg := &message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(msg) - if err != nil { - return fmt.Errorf("GossipTx: failed to build Tx message: %w", err) - } - return n.appSender.SendAppGossip(context.TODO(), msgBytes) -} diff --git a/avalanchego/vms/platformvm/blocks/builder/network_test.go b/avalanchego/vms/platformvm/blocks/builder/network_test.go deleted file mode 100644 index f29f8061..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/network_test.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/message" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" -) - -func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { - tx, err := txBuilder.NewCreateChainTx( - testSubnet1.ID(), - nil, - constants.AVMID, - nil, - "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - ) - require.NoError(t, err) - return tx -} - -// show that a tx learned from gossip is validated and added to mempool -func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - nodeID := ids.GenerateTestNodeID() - - // create a tx - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - // Free lock because [AppGossip] waits for the context lock - env.ctx.Lock.Unlock() - // show that unknown tx is added to mempool - err = env.AppGossip(context.Background(), nodeID, msgBytes) - require.NoError(err) - require.True(env.Builder.Has(txID)) - // Grab lock back - env.ctx.Lock.Lock() - - // and gossiped if it has just been discovered - require.True(gossipedBytes != nil) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) -} - -// show that txs already marked as invalid are not re-requested on gossiping -func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - // create a tx and mark as invalid - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - env.Builder.MarkDropped(txID, errTestingDropped) - - // show that the invalid tx is not requested - nodeID := ids.GenerateTestNodeID() - msg := message.Tx{Tx: tx.Bytes()} - msgBytes, err := message.Build(&msg) - require.NoError(err) - env.ctx.Lock.Unlock() - err = env.AppGossip(context.Background(), nodeID, msgBytes) - env.ctx.Lock.Lock() - require.NoError(err) - require.False(env.Builder.Has(txID)) -} - -// show that locally generated txs are gossiped -func TestMempoolNewLocaTxIsGossiped(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - var gossipedBytes []byte - env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { - gossipedBytes = b - return nil - } - - // add a tx to the mempool and show it gets gossiped - tx := getValidTx(env.txBuilder, t) - txID := tx.ID() - - err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err) - require.True(gossipedBytes != nil) - - // show gossiped bytes can be decoded to the original tx - replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err) - - reply := replyIntf.(*message.Tx) - retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err) - - require.Equal(txID, retrivedTx.ID()) - - // show that transaction is not re-gossiped is recently added to mempool - gossipedBytes = nil - env.Builder.Remove([]*txs.Tx{tx}) - err = env.Builder.Add(tx) - require.NoError(err) - - require.True(gossipedBytes == nil) -} diff --git a/avalanchego/vms/platformvm/blocks/executor/block_test.go b/avalanchego/vms/platformvm/blocks/executor/block_test.go deleted file mode 100644 index d27e7e3d..00000000 --- a/avalanchego/vms/platformvm/blocks/executor/block_test.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" -) - -func TestStatus(t *testing.T) { - type test struct { - name string - blockF func(*gomock.Controller) *Block - expectedStatus choices.Status - } - - tests := []test{ - { - name: "last accepted", - blockF: func(ctrl *gomock.Controller) *Block { - blkID := ids.GenerateTestID() - statelessBlk := blocks.NewMockBlock(ctrl) - statelessBlk.EXPECT().ID().Return(blkID) - - manager := &manager{ - backend: &backend{ - lastAccepted: blkID, - }, - } - - return &Block{ - Block: statelessBlk, - manager: manager, - } - }, - expectedStatus: choices.Accepted, - }, - { - name: "processing", - blockF: func(ctrl *gomock.Controller) *Block { - blkID := ids.GenerateTestID() - statelessBlk := blocks.NewMockBlock(ctrl) - statelessBlk.EXPECT().ID().Return(blkID) - - manager := &manager{ - backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, - }, - }, - } - return &Block{ - Block: statelessBlk, - manager: manager, - } - }, - expectedStatus: choices.Processing, - }, - { - name: "in database", - blockF: func(ctrl *gomock.Controller) *Block { - blkID := ids.GenerateTestID() - statelessBlk := blocks.NewMockBlock(ctrl) - statelessBlk.EXPECT().ID().Return(blkID) - - state := state.NewMockState(ctrl) - state.EXPECT().GetStatelessBlock(blkID).Return(statelessBlk, choices.Accepted, nil) - - manager := &manager{ - backend: &backend{ - state: state, - }, - } - return &Block{ - Block: statelessBlk, - manager: manager, - } - }, - expectedStatus: choices.Accepted, - }, - { - name: "not in map or database", - blockF: func(ctrl *gomock.Controller) *Block { - blkID := ids.GenerateTestID() - statelessBlk := blocks.NewMockBlock(ctrl) - statelessBlk.EXPECT().ID().Return(blkID) - - state := state.NewMockState(ctrl) - state.EXPECT().GetStatelessBlock(blkID).Return(nil, choices.Unknown, database.ErrNotFound) - - manager := &manager{ - backend: &backend{ - state: state, - }, - } - return &Block{ - Block: statelessBlk, - manager: manager, - } - }, - expectedStatus: choices.Processing, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - blk := tt.blockF(ctrl) - require.Equal(t, tt.expectedStatus, blk.Status()) - }) - } -} - -func TestBlockOptions(t *testing.T) { - type test struct { - name string - blkF func() *Block - expectedPreferenceType blocks.Block - expectedErr error - } - - tests := []test{ - { - name: "apricot proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &blocks.ApricotProposalBlock{} - blkID := innerBlk.ID() - - manager := &manager{ - backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, - }, - }, - }, - }, - } - - return &Block{ - Block: innerBlk, - manager: manager, - } - }, - expectedPreferenceType: &blocks.ApricotCommitBlock{}, - }, - { - name: "apricot proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &blocks.ApricotProposalBlock{} - blkID := innerBlk.ID() - - manager := &manager{ - backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, - }, - }, - } - - return &Block{ - Block: innerBlk, - manager: manager, - } - }, - expectedPreferenceType: &blocks.ApricotAbortBlock{}, - }, - { - name: "banff proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &blocks.BanffProposalBlock{} - blkID := innerBlk.ID() - - manager := &manager{ - backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, - }, - }, - }, - }, - } - - return &Block{ - Block: innerBlk, - manager: manager, - } - }, - expectedPreferenceType: &blocks.BanffCommitBlock{}, - }, - { - name: "banff proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &blocks.BanffProposalBlock{} - blkID := innerBlk.ID() - - manager := &manager{ - backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, - }, - }, - } - - return &Block{ - Block: innerBlk, - manager: manager, - } - }, - expectedPreferenceType: &blocks.BanffAbortBlock{}, - }, - { - name: "non oracle block", - blkF: func() *Block { - return &Block{ - Block: &blocks.BanffStandardBlock{}, - manager: &manager{}, - } - }, - expectedErr: snowman.ErrNotOracle, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - blk := tt.blkF() - options, err := blk.Options(context.Background()) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - return - } - require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) - }) - } -} diff --git a/avalanchego/vms/platformvm/blocks/executor/manager.go b/avalanchego/vms/platformvm/blocks/executor/manager.go deleted file mode 100644 index b552bbc1..00000000 --- a/avalanchego/vms/platformvm/blocks/executor/manager.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/window" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/metrics" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" -) - -var _ Manager = (*manager)(nil) - -type Manager interface { - state.Versions - - // Returns the ID of the most recently accepted block. - LastAccepted() ids.ID - GetBlock(blkID ids.ID) (snowman.Block, error) - GetStatelessBlock(blkID ids.ID) (blocks.Block, error) - NewBlock(blocks.Block) snowman.Block -} - -func NewManager( - mempool mempool.Mempool, - metrics metrics.Metrics, - s state.State, - txExecutorBackend *executor.Backend, - recentlyAccepted window.Window[ids.ID], -) Manager { - backend := &backend{ - Mempool: mempool, - lastAccepted: s.GetLastAccepted(), - state: s, - ctx: txExecutorBackend.Ctx, - blkIDToState: map[ids.ID]*blockState{}, - } - - return &manager{ - backend: backend, - verifier: &verifier{ - backend: backend, - txExecutorBackend: txExecutorBackend, - }, - acceptor: &acceptor{ - backend: backend, - metrics: metrics, - recentlyAccepted: recentlyAccepted, - bootstrapped: txExecutorBackend.Bootstrapped, - }, - rejector: &rejector{backend: backend}, - } -} - -type manager struct { - *backend - verifier blocks.Visitor - acceptor blocks.Visitor - rejector blocks.Visitor -} - -func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { - blk, err := m.backend.GetBlock(blkID) - if err != nil { - return nil, err - } - return m.NewBlock(blk), nil -} - -func (m *manager) GetStatelessBlock(blkID ids.ID) (blocks.Block, error) { - return m.backend.GetBlock(blkID) -} - -func (m *manager) NewBlock(blk blocks.Block) snowman.Block { - return &Block{ - manager: m, - Block: blk, - } -} diff --git a/avalanchego/vms/platformvm/blocks/executor/mock_manager.go b/avalanchego/vms/platformvm/blocks/executor/mock_manager.go deleted file mode 100644 index 64be8c70..00000000 --- a/avalanchego/vms/platformvm/blocks/executor/mock_manager.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor (interfaces: Manager) - -// Package executor is a generated GoMock package. -package executor - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" - blocks "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - state "github.com/ava-labs/avalanchego/vms/platformvm/state" - gomock "github.com/golang/mock/gomock" -) - -// MockManager is a mock of Manager interface. -type MockManager struct { - ctrl *gomock.Controller - recorder *MockManagerMockRecorder -} - -// MockManagerMockRecorder is the mock recorder for MockManager. -type MockManagerMockRecorder struct { - mock *MockManager -} - -// NewMockManager creates a new mock instance. -func NewMockManager(ctrl *gomock.Controller) *MockManager { - mock := &MockManager{ctrl: ctrl} - mock.recorder = &MockManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManager) EXPECT() *MockManagerMockRecorder { - return m.recorder -} - -// GetBlock mocks base method. -func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) - ret0, _ := ret[0].(snowman.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBlock indicates an expected call of GetBlock. -func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) -} - -// GetState mocks base method. -func (m *MockManager) GetState(arg0 ids.ID) (state.Chain, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) - ret0, _ := ret[0].(state.Chain) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetState indicates an expected call of GetState. -func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) -} - -// GetStatelessBlock mocks base method. -func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (blocks.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) - ret0, _ := ret[0].(blocks.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) -} - -// LastAccepted mocks base method. -func (m *MockManager) LastAccepted() ids.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastAccepted") - ret0, _ := ret[0].(ids.ID) - return ret0 -} - -// LastAccepted indicates an expected call of LastAccepted. -func (mr *MockManagerMockRecorder) LastAccepted() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockManager)(nil).LastAccepted)) -} - -// NewBlock mocks base method. -func (m *MockManager) NewBlock(arg0 blocks.Block) snowman.Block { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBlock", arg0) - ret0, _ := ret[0].(snowman.Block) - return ret0 -} - -// NewBlock indicates an expected call of NewBlock. -func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) -} diff --git a/avalanchego/vms/platformvm/blocks/executor/options.go b/avalanchego/vms/platformvm/blocks/executor/options.go deleted file mode 100644 index 8c0bf3af..00000000 --- a/avalanchego/vms/platformvm/blocks/executor/options.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" -) - -var _ blocks.Visitor = (*verifier)(nil) - -// options supports build new option blocks -type options struct { - // outputs populated by this struct's methods: - commitBlock blocks.Block - abortBlock blocks.Block -} - -func (*options) BanffAbortBlock(*blocks.BanffAbortBlock) error { - return snowman.ErrNotOracle -} - -func (*options) BanffCommitBlock(*blocks.BanffCommitBlock) error { - return snowman.ErrNotOracle -} - -func (o *options) BanffProposalBlock(b *blocks.BanffProposalBlock) error { - timestamp := b.Timestamp() - blkID := b.ID() - nextHeight := b.Height() + 1 - - var err error - o.commitBlock, err = blocks.NewBanffCommitBlock(timestamp, blkID, nextHeight) - if err != nil { - return fmt.Errorf( - "failed to create commit block: %w", - err, - ) - } - - o.abortBlock, err = blocks.NewBanffAbortBlock(timestamp, blkID, nextHeight) - if err != nil { - return fmt.Errorf( - "failed to create abort block: %w", - err, - ) - } - return nil -} - -func (*options) BanffStandardBlock(*blocks.BanffStandardBlock) error { - return snowman.ErrNotOracle -} - -func (*options) ApricotAbortBlock(*blocks.ApricotAbortBlock) error { - return snowman.ErrNotOracle -} - -func (*options) ApricotCommitBlock(*blocks.ApricotCommitBlock) error { - return snowman.ErrNotOracle -} - -func (o *options) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { - blkID := b.ID() - nextHeight := b.Height() + 1 - - var err error - o.commitBlock, err = blocks.NewApricotCommitBlock(blkID, nextHeight) - if err != nil { - return fmt.Errorf( - "failed to create commit block: %w", - err, - ) - } - - o.abortBlock, err = blocks.NewApricotAbortBlock(blkID, nextHeight) - if err != nil { - return fmt.Errorf( - "failed to create abort block: %w", - err, - ) - } - return nil -} - -func (*options) ApricotStandardBlock(*blocks.ApricotStandardBlock) error { - return snowman.ErrNotOracle -} - -func (*options) ApricotAtomicBlock(*blocks.ApricotAtomicBlock) error { - return snowman.ErrNotOracle -} diff --git a/avalanchego/vms/platformvm/blocks/proposal_block_test.go b/avalanchego/vms/platformvm/blocks/proposal_block_test.go deleted file mode 100644 index 50affc5f..00000000 --- a/avalanchego/vms/platformvm/blocks/proposal_block_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package blocks - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -func TestNewBanffProposalBlock(t *testing.T) { - require := require.New(t) - - timestamp := time.Now().Truncate(time.Second) - parentID := ids.GenerateTestID() - height := uint64(1337) - - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - }, - Creds: []verify.Verifiable{}, - } - require.NoError(tx.Initialize(txs.Codec)) - - blk, err := NewBanffProposalBlock( - timestamp, - parentID, - height, - tx, - ) - require.NoError(err) - - // Make sure the block and tx are initialized - require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) - require.Equal(timestamp, blk.Timestamp()) - require.Equal(parentID, blk.Parent()) - require.Equal(height, blk.Height()) -} - -func TestNewApricotProposalBlock(t *testing.T) { - require := require.New(t) - - parentID := ids.GenerateTestID() - height := uint64(1337) - - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - }, - Creds: []verify.Verifiable{}, - } - require.NoError(tx.Initialize(txs.Codec)) - - blk, err := NewApricotProposalBlock( - parentID, - height, - tx, - ) - require.NoError(err) - - // Make sure the block and tx are initialized - require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) - require.Equal(parentID, blk.Parent()) - require.Equal(height, blk.Height()) -} diff --git a/avalanchego/vms/platformvm/client.go b/avalanchego/vms/platformvm/client.go index 8a0cf008..962492ed 100644 --- a/avalanchego/vms/platformvm/client.go +++ b/avalanchego/vms/platformvm/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" @@ -66,6 +67,8 @@ type Client interface { startUTXOID ids.ID, options ...rpc.Option, ) ([][]byte, ids.ShortID, ids.ID, error) + // GetSubnet returns information about the specified subnet + GetSubnet(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (GetSubnetClientResponse, error) // GetSubnets returns information about the specified subnets // // Deprecated: Subnets should be fetched from a dedicated indexer. @@ -77,8 +80,8 @@ type Client interface { GetCurrentValidators(ctx context.Context, subnetID ids.ID, nodeIDs []ids.NodeID, options ...rpc.Option) ([]ClientPermissionlessValidator, error) // GetPendingValidators returns the list of pending validators for subnet with ID [subnetID] GetPendingValidators(ctx context.Context, subnetID ids.ID, nodeIDs []ids.NodeID, options ...rpc.Option) ([]interface{}, []interface{}, error) - // GetCurrentSupply returns an upper bound on the supply of AVAX in the system - GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) + // GetCurrentSupply returns an upper bound on the supply of AVAX in the system along with the P-chain height + GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) // SampleValidators returns the nodeIDs of a sample of [sampleSize] validators from the current validator set for subnet with ID [subnetID] SampleValidators(ctx context.Context, subnetID ids.ID, sampleSize uint16, options ...rpc.Option) ([]ids.NodeID, error) // AddValidator issues a transaction to add a validator to the primary network @@ -220,7 +223,12 @@ type Client interface { // // Deprecated: Stake should be calculated using GetTx, GetCurrentValidators, // and GetPendingValidators. - GetStake(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (map[ids.ID]uint64, [][]byte, error) + GetStake( + ctx context.Context, + addrs []ids.ShortID, + validatorsOnly bool, + options ...rpc.Option, + ) (map[ids.ID]uint64, [][]byte, error) // GetMinStake returns the minimum staking amount in nAVAX for validators // and delegators respectively GetMinStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) @@ -245,11 +253,18 @@ type Client interface { GetRewardUTXOs(context.Context, *api.GetTxArgs, ...rpc.Option) ([][]byte, error) // GetTimestamp returns the current chain timestamp GetTimestamp(ctx context.Context, options ...rpc.Option) (time.Time, error) - // GetValidatorsAt returns the weights of the validator set of a provided subnet - // at the specified height. - GetValidatorsAt(ctx context.Context, subnetID ids.ID, height uint64, options ...rpc.Option) (map[ids.NodeID]uint64, error) + // GetValidatorsAt returns the weights of the validator set of a provided + // subnet at the specified height. + GetValidatorsAt( + ctx context.Context, + subnetID ids.ID, + height uint64, + options ...rpc.Option, + ) (map[ids.NodeID]*validators.GetValidatorOutput, error) // GetBlock returns the block with the given id. GetBlock(ctx context.Context, blockID ids.ID, options ...rpc.Option) ([]byte, error) + // GetBlockByHeight returns the block at the given [height]. + GetBlockByHeight(ctx context.Context, height uint64, options ...rpc.Option) ([]byte, error) } // Client implementation for interacting with the P Chain endpoint @@ -368,6 +383,40 @@ func (c *client) GetAtomicUTXOs( return utxos, endAddr, endUTXOID, err } +// GetSubnetClientResponse is the response from calling GetSubnet on the client +type GetSubnetClientResponse struct { + // whether it is permissioned or not + IsPermissioned bool + // subnet auth information for a permissioned subnet + ControlKeys []ids.ShortID + Threshold uint32 + Locktime uint64 + // subnet transformation tx ID for a permissionless subnet + SubnetTransformationTxID ids.ID +} + +func (c *client) GetSubnet(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (GetSubnetClientResponse, error) { + res := &GetSubnetResponse{} + err := c.requester.SendRequest(ctx, "platform.getSubnet", &GetSubnetArgs{ + SubnetID: subnetID, + }, res, options...) + if err != nil { + return GetSubnetClientResponse{}, err + } + controlKeys, err := address.ParseToIDs(res.ControlKeys) + if err != nil { + return GetSubnetClientResponse{}, err + } + + return GetSubnetClientResponse{ + IsPermissioned: res.IsPermissioned, + ControlKeys: controlKeys, + Threshold: uint32(res.Threshold), + Locktime: uint64(res.Locktime), + SubnetTransformationTxID: res.SubnetTransformationTxID, + }, nil +} + // ClientSubnet is a representation of a subnet used in client methods type ClientSubnet struct { // ID of the subnet @@ -442,12 +491,12 @@ func (c *client) GetPendingValidators( return res.Validators, res.Delegators, err } -func (c *client) GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) { +func (c *client) GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) { res := &GetCurrentSupplyReply{} err := c.requester.SendRequest(ctx, "platform.getCurrentSupply", &GetCurrentSupplyArgs{ SubnetID: subnetID, }, res, options...) - return uint64(res.Supply), err + return uint64(res.Supply), uint64(res.Height), err } func (c *client) SampleValidators(ctx context.Context, subnetID ids.ID, sampleSize uint16, options ...rpc.Option) ([]ids.NodeID, error) { @@ -718,7 +767,7 @@ func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) } func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (*GetTxStatusResponse, error) { - res := new(GetTxStatusResponse) + res := &GetTxStatusResponse{} err := c.requester.SendRequest( ctx, "platform.getTxStatus", @@ -752,13 +801,19 @@ func (c *client) AwaitTxDecided(ctx context.Context, txID ids.ID, freq time.Dura } } -func (c *client) GetStake(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (map[ids.ID]uint64, [][]byte, error) { - res := new(GetStakeReply) +func (c *client) GetStake( + ctx context.Context, + addrs []ids.ShortID, + validatorsOnly bool, + options ...rpc.Option, +) (map[ids.ID]uint64, [][]byte, error) { + res := &GetStakeReply{} err := c.requester.SendRequest(ctx, "platform.getStake", &GetStakeArgs{ JSONAddresses: api.JSONAddresses{ Addresses: ids.ShortIDsToStrings(addrs), }, - Encoding: formatting.Hex, + ValidatorsOnly: validatorsOnly, + Encoding: formatting.Hex, }, res, options...) if err != nil { return nil, nil, err @@ -781,7 +836,7 @@ func (c *client) GetStake(ctx context.Context, addrs []ids.ShortID, options ...r } func (c *client) GetMinStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) { - res := new(GetMinStakeReply) + res := &GetMinStakeReply{} err := c.requester.SendRequest(ctx, "platform.getMinStake", &GetMinStakeArgs{ SubnetID: subnetID, }, res, options...) @@ -789,7 +844,7 @@ func (c *client) GetMinStake(ctx context.Context, subnetID ids.ID, options ...rp } func (c *client) GetTotalStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) { - res := new(GetTotalStakeReply) + res := &GetTotalStakeReply{} err := c.requester.SendRequest(ctx, "platform.getTotalStake", &GetTotalStakeArgs{ SubnetID: subnetID, }, res, options...) @@ -803,7 +858,7 @@ func (c *client) GetTotalStake(ctx context.Context, subnetID ids.ID, options ... } func (c *client) GetMaxStakeAmount(ctx context.Context, subnetID ids.ID, nodeID ids.NodeID, startTime, endTime uint64, options ...rpc.Option) (uint64, error) { - res := new(GetMaxStakeAmountReply) + res := &GetMaxStakeAmountReply{} err := c.requester.SendRequest(ctx, "platform.getMaxStakeAmount", &GetMaxStakeAmountArgs{ SubnetID: subnetID, NodeID: nodeID, @@ -836,7 +891,12 @@ func (c *client) GetTimestamp(ctx context.Context, options ...rpc.Option) (time. return res.Timestamp, err } -func (c *client) GetValidatorsAt(ctx context.Context, subnetID ids.ID, height uint64, options ...rpc.Option) (map[ids.NodeID]uint64, error) { +func (c *client) GetValidatorsAt( + ctx context.Context, + subnetID ids.ID, + height uint64, + options ...rpc.Option, +) (map[ids.NodeID]*validators.GetValidatorOutput, error) { res := &GetValidatorsAtReply{} err := c.requester.SendRequest(ctx, "platform.getValidatorsAt", &GetValidatorsAtArgs{ SubnetID: subnetID, @@ -846,13 +906,24 @@ func (c *client) GetValidatorsAt(ctx context.Context, subnetID ids.ID, height ui } func (c *client) GetBlock(ctx context.Context, blockID ids.ID, options ...rpc.Option) ([]byte, error) { - response := &api.FormattedBlock{} + res := &api.FormattedBlock{} if err := c.requester.SendRequest(ctx, "platform.getBlock", &api.GetBlockArgs{ BlockID: blockID, Encoding: formatting.Hex, - }, response, options...); err != nil { + }, res, options...); err != nil { return nil, err } + return formatting.Decode(res.Encoding, res.Block) +} - return formatting.Decode(response.Encoding, response.Block) +func (c *client) GetBlockByHeight(ctx context.Context, height uint64, options ...rpc.Option) ([]byte, error) { + res := &api.FormattedBlock{} + err := c.requester.SendRequest(ctx, "platform.getBlockByHeight", &api.GetBlockByHeightArgs{ + Height: json.Uint64(height), + Encoding: formatting.HexNC, + }, res, options...) + if err != nil { + return nil, err + } + return formatting.Decode(res.Encoding, res.Block) } diff --git a/avalanchego/vms/platformvm/client_permissionless_validator.go b/avalanchego/vms/platformvm/client_permissionless_validator.go index c9baac85..3974f770 100644 --- a/avalanchego/vms/platformvm/client_permissionless_validator.go +++ b/avalanchego/vms/platformvm/client_permissionless_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/avalanchego/vms/platformvm/config/config.go b/avalanchego/vms/platformvm/config/config.go index 02b3e6f3..50628c42 100644 --- a/avalanchego/vms/platformvm/config/config.go +++ b/avalanchego/vms/platformvm/config/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -33,7 +33,10 @@ type Config struct { UptimeLockedCalculator uptime.LockedCalculator // True if the node is being run with staking enabled - StakingEnabled bool + SybilProtectionEnabled bool + + // If true, only the P-chain will be instantiated on the primary network. + PartialSyncPrimaryNetwork bool // Set of subnets that this node is validating TrackedSubnets set.Set[ids.ID] @@ -101,14 +104,8 @@ type Config struct { // Time of the Cortina network upgrade CortinaTime time.Time - // Subnet ID --> Minimum portion of the subnet's stake this node must be - // connected to in order to report healthy. - // [constants.PrimaryNetworkID] is always a key in this map. - // If a subnet is in this map, but it isn't tracked, its corresponding value - // isn't used. - // If a subnet is tracked but not in this map, we use the value for the - // Primary Network. - MinPercentConnectedStakeHealthy map[ids.ID]float64 + // Time of the Durango network upgrade + DurangoTime time.Time // UseCurrentHeight forces [GetMinimumHeight] to return the current height // of the P-Chain instead of the oldest block in the [recentlyAccepted] @@ -132,6 +129,14 @@ func (c *Config) IsBanffActivated(timestamp time.Time) bool { return !timestamp.Before(c.BanffTime) } +func (c *Config) IsCortinaActivated(timestamp time.Time) bool { + return !timestamp.Before(c.CortinaTime) +} + +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) +} + func (c *Config) GetCreateBlockchainTxFee(timestamp time.Time) uint64 { if c.IsApricotPhase3Activated(timestamp) { return c.CreateBlockchainTxFee @@ -149,7 +154,7 @@ func (c *Config) GetCreateSubnetTxFee(timestamp time.Time) uint64 { // Create the blockchain described in [tx], but only if this node is a member of // the subnet that validates the chain func (c *Config) CreateChain(chainID ids.ID, tx *txs.CreateChainTx) { - if c.StakingEnabled && // Staking is enabled, so nodes might not validate all chains + if c.SybilProtectionEnabled && // Sybil protection is enabled, so nodes might not validate all chains constants.PrimaryNetworkID != tx.SubnetID && // All nodes must validate the primary network !c.TrackedSubnets.Contains(tx.SubnetID) { // This node doesn't validate this blockchain return diff --git a/avalanchego/vms/platformvm/config/execution_config.go b/avalanchego/vms/platformvm/config/execution_config.go new file mode 100644 index 00000000..e182758e --- /dev/null +++ b/avalanchego/vms/platformvm/config/execution_config.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +import ( + "encoding/json" + "time" + + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/network" +) + +var DefaultExecutionConfig = ExecutionConfig{ + Network: network.DefaultConfig, + BlockCacheSize: 64 * units.MiB, + TxCacheSize: 128 * units.MiB, + TransformedSubnetTxCacheSize: 4 * units.MiB, + RewardUTXOsCacheSize: 2048, + ChainCacheSize: 2048, + ChainDBCacheSize: 2048, + BlockIDCacheSize: 8192, + FxOwnerCacheSize: 4 * units.MiB, + ChecksumsEnabled: false, + MempoolPruneFrequency: 30 * time.Minute, +} + +// ExecutionConfig provides execution parameters of PlatformVM +type ExecutionConfig struct { + Network network.Config `json:"network"` + BlockCacheSize int `json:"block-cache-size"` + TxCacheSize int `json:"tx-cache-size"` + TransformedSubnetTxCacheSize int `json:"transformed-subnet-tx-cache-size"` + RewardUTXOsCacheSize int `json:"reward-utxos-cache-size"` + ChainCacheSize int `json:"chain-cache-size"` + ChainDBCacheSize int `json:"chain-db-cache-size"` + BlockIDCacheSize int `json:"block-id-cache-size"` + FxOwnerCacheSize int `json:"fx-owner-cache-size"` + ChecksumsEnabled bool `json:"checksums-enabled"` + MempoolPruneFrequency time.Duration `json:"mempool-prune-frequency"` +} + +// GetExecutionConfig returns an ExecutionConfig +// input is unmarshalled into an ExecutionConfig previously +// initialized with default values +func GetExecutionConfig(b []byte) (*ExecutionConfig, error) { + ec := DefaultExecutionConfig + + // if bytes are empty keep default values + if len(b) == 0 { + return &ec, nil + } + + return &ec, json.Unmarshal(b, &ec) +} diff --git a/avalanchego/vms/platformvm/config/execution_config_test.go b/avalanchego/vms/platformvm/config/execution_config_test.go new file mode 100644 index 00000000..89fd5cd5 --- /dev/null +++ b/avalanchego/vms/platformvm/config/execution_config_test.go @@ -0,0 +1,145 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/platformvm/network" +) + +func TestExecutionConfigUnmarshal(t *testing.T) { + t.Run("default values from empty json", func(t *testing.T) { + require := require.New(t) + b := []byte(`{}`) + ec, err := GetExecutionConfig(b) + require.NoError(err) + require.Equal(&DefaultExecutionConfig, ec) + }) + + t.Run("default values from empty bytes", func(t *testing.T) { + require := require.New(t) + b := []byte(``) + ec, err := GetExecutionConfig(b) + require.NoError(err) + require.Equal(&DefaultExecutionConfig, ec) + }) + + t.Run("mix default and extracted values from json", func(t *testing.T) { + require := require.New(t) + b := []byte(`{"block-cache-size":1}`) + ec, err := GetExecutionConfig(b) + require.NoError(err) + expected := DefaultExecutionConfig + expected.BlockCacheSize = 1 + require.Equal(&expected, ec) + }) + + t.Run("all values extracted from json", func(t *testing.T) { + require := require.New(t) + b := []byte(`{ + "network": { + "max-validator-set-staleness": 1, + "target-gossip-size": 2, + "pull-gossip-poll-size": 3, + "pull-gossip-frequency": 4, + "pull-gossip-throttling-period": 5, + "pull-gossip-throttling-limit": 6, + "expected-bloom-filter-elements":7, + "expected-bloom-filter-false-positive-probability": 8, + "max-bloom-filter-false-positive-probability": 9, + "legacy-push-gossip-cache-size": 10 + }, + "block-cache-size": 1, + "tx-cache-size": 2, + "transformed-subnet-tx-cache-size": 3, + "reward-utxos-cache-size": 5, + "chain-cache-size": 6, + "chain-db-cache-size": 7, + "block-id-cache-size": 8, + "fx-owner-cache-size": 9, + "checksums-enabled": true, + "mempool-prune-frequency": 60000000000 + }`) + ec, err := GetExecutionConfig(b) + require.NoError(err) + expected := &ExecutionConfig{ + Network: network.Config{ + MaxValidatorSetStaleness: 1, + TargetGossipSize: 2, + PullGossipPollSize: 3, + PullGossipFrequency: 4, + PullGossipThrottlingPeriod: 5, + PullGossipThrottlingLimit: 6, + ExpectedBloomFilterElements: 7, + ExpectedBloomFilterFalsePositiveProbability: 8, + MaxBloomFilterFalsePositiveProbability: 9, + LegacyPushGossipCacheSize: 10, + }, + BlockCacheSize: 1, + TxCacheSize: 2, + TransformedSubnetTxCacheSize: 3, + RewardUTXOsCacheSize: 5, + ChainCacheSize: 6, + ChainDBCacheSize: 7, + BlockIDCacheSize: 8, + FxOwnerCacheSize: 9, + ChecksumsEnabled: true, + MempoolPruneFrequency: time.Minute, + } + require.Equal(expected, ec) + }) + + t.Run("default values applied correctly", func(t *testing.T) { + require := require.New(t) + b := []byte(`{ + "network": { + "max-validator-set-staleness": 1, + "target-gossip-size": 2, + "pull-gossip-poll-size": 3, + "pull-gossip-frequency": 4, + "pull-gossip-throttling-period": 5 + }, + "block-cache-size": 1, + "tx-cache-size": 2, + "transformed-subnet-tx-cache-size": 3, + "reward-utxos-cache-size": 5, + "chain-cache-size": 6, + "chain-db-cache-size": 7, + "block-id-cache-size": 8, + "fx-owner-cache-size": 9, + "checksums-enabled": true + }`) + ec, err := GetExecutionConfig(b) + require.NoError(err) + expected := &ExecutionConfig{ + Network: network.Config{ + MaxValidatorSetStaleness: 1, + TargetGossipSize: 2, + PullGossipPollSize: 3, + PullGossipFrequency: 4, + PullGossipThrottlingPeriod: 5, + PullGossipThrottlingLimit: DefaultExecutionConfig.Network.PullGossipThrottlingLimit, + ExpectedBloomFilterElements: DefaultExecutionConfig.Network.ExpectedBloomFilterElements, + ExpectedBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.ExpectedBloomFilterFalsePositiveProbability, + MaxBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.MaxBloomFilterFalsePositiveProbability, + LegacyPushGossipCacheSize: DefaultExecutionConfig.Network.LegacyPushGossipCacheSize, + }, + BlockCacheSize: 1, + TxCacheSize: 2, + TransformedSubnetTxCacheSize: 3, + RewardUTXOsCacheSize: 5, + ChainCacheSize: 6, + ChainDBCacheSize: 7, + BlockIDCacheSize: 8, + FxOwnerCacheSize: 9, + ChecksumsEnabled: true, + MempoolPruneFrequency: 30 * time.Minute, + } + require.Equal(expected, ec) + }) +} diff --git a/avalanchego/vms/platformvm/docs/block_formation_logic.md b/avalanchego/vms/platformvm/docs/block_formation_logic.md index a9502afc..4f049c51 100644 --- a/avalanchego/vms/platformvm/docs/block_formation_logic.md +++ b/avalanchego/vms/platformvm/docs/block_formation_logic.md @@ -46,7 +46,7 @@ While the above steps could be executed in any order, we pick decisions transact Once all possibilities of create a block not advancing chain time are exhausted, we attempt to build a block which _may_ advance chain time as follows: -1. If the local clock's time is greater than or equal to the earliest change-event timestamp of the staker set, an advance time transaction is issued into a Proposal Block to move current chain time to the earliest change timestamp of the staker set. Upon this Proposal Block's acceptance, chain time will be move ahead and all scheduled changes (e.g. promoting a staker from pending to current) will be carried out. +1. If the local clock's time is greater than or equal to the earliest change-event timestamp of the staker set, an advance time transaction is issued into a Proposal Block to move current chain time to the earliest change timestamp of the staker set. Upon this Proposal Block's acceptance, chain time will be moved ahead and all scheduled changes (e.g. promoting a staker from pending to current) will be carried out. 2. If the mempool contains any proposal transactions, the mempool proposal transaction with the earliest start time is selected and included into a Proposal Block[^1]. A mempool proposal transaction as is won't change the current chain time[^2]. However there is an edge case to consider: on low activity chains (e.g. Fuji P-chain) chain time may fall significantly behind the local clock. If a proposal transaction is finally issued, its start time is likely to be quite far in the future relative to the current chain time. This would cause the proposal transaction to be considered invalid and rejected, since a staker added by a proposal transaction's start time must be at most 366 hours (two weeks) after current chain time. To avoid this edge case on low-activity chains, an advance time transaction is issued first to move chain time to the local clock's time. As soon as chain time is advanced, the mempool proposal transaction will be issued and accepted. Note that the order in which these steps are executed matters. A block updating chain time would be deemed invalid if it would advance time beyond the staker set's next change event, skipping the associated changes. The order above ensures this never happens because it checks first if chain time should be moved to the time of the next staker set change. It can also be verified by inspection that the timestamp selected for the advance time transactions always respect the synchrony bound. diff --git a/avalanchego/vms/platformvm/docs/validators_versioning.md b/avalanchego/vms/platformvm/docs/validators_versioning.md new file mode 100644 index 00000000..7db716d1 --- /dev/null +++ b/avalanchego/vms/platformvm/docs/validators_versioning.md @@ -0,0 +1,113 @@ +# Validators versioning + +One of the main responsibilities of the P-chain is to register and expose the validator set of any Subnet at every height. + +This information helps Subnets to bootstrap securely, downloading information from active validators only; moreover it supports validated cross-chain communication via Warp. + +In this brief document we dive into the technicalities of how `platformVM` tracks and versions the validator set of any Subnet. + +## The tracked content + +The entry point to retrieve validator information at a given height is the `GetValidatorSet` method in the `validators` package. Here is its signature: + +```golang +GetValidatorSet(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*GetValidatorOutput, error) +``` + +`GetValidatorSet` lets any VM specify a Subnet and a height and returns the data of all Subnet validators active at the requested height, and only those. + +Validator data are collected in a struct named `validators.GetValidatorOutput` which holds for each active validator, its `NodeID`, its `Weight` and its `BLS Public Key` if it was registered. + +Note that a validator `Weight` is not just its stake; it's the aggregate value of the validator's own stake and all of its delegators' stake. A validator's `Weight` gauges how relevant its preference should be in consensus or Warp operations. + +We will see in the next section how the P-chain keeps track of this information over time as the validator set changes. + +## Validator diffs content + +Every new block accepted by the P-chain can potentially alter the validator set of any Subnet, including the primary one. New validators may be added; some of them may have reached their end of life and are therefore removed. Moreover a validator can register itself again once its staking time is done, possibly with a `Weight` and a `BLS Public key` different from the previous staking period. + +Whenever the block at height `H` adds or removes a validator, the P-chain does, among others, the following operations: + +1. it updates the current validator set to add the new validator or remove it if expired; +2. it explicitly records the validator set diffs with respect to the validator set at height `H-1`. + +These diffs are key to rebuilding the validator set at a given past height. In this section we illustrate their content. In next ones, We'll see how the diffs are stored and used. + +The validators diffs track changes in a validator's `Weight` and `BLS Public key`. Along with the `NodeID` this is the data exposed by the `GetValidatorSet` method. + +Note that `Weight` and `BLS Public key` behave differently throughout the validator's lifetime: + +1. `BLS Public key` cannot change through a validator's lifetime. It can only change when a validator is added/re-added and removed. +2. `Weight` can change throughout a validator's lifetime by the creation and removal of its delegators as well as by validator's own creation and removal. + +Here is a scheme of what `Weight` and `BLS Public key` diff content we record upon relevant scenarios: + +| | Weight Diff (forward looking) | BLS Key Diff (backward looking) | +|--------------------|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| +| Validator creation | record ```golang state.ValidatorWeightDiff{ Decrease: false, Weight: validator.Weight, }``` | record an empty byte slice if validator.BlsKey is specified; otherwise record nothing | +| Delegator creation | record ```golang state.ValidatorWeightDiff{ Decrease: false, Weight: validator.Weight, }``` | No entry is recorded | +| Delegator removal | record ```golang state.ValidatorWeightDiff{ Decrease: true, Weight: validator.Weight, }``` | No entry is recorded | +| Validator removal | record ```golang state.ValidatorWeightDiff{ Decrease: true, Weight: validator.Weight, }``` | record validator.BlsKey if it is specified; otherwise record nothing | + +Note that `Weight` diffs are encoded `state.ValidatorWeightDiff` and are *forward-looking*: a diff recorded at height `H` stores the change that transforms validator weight at height `H-1` into validator weight at height `H`. + +In contrast, `BLS Public Key` diffs are *backward-looking*: a diff recorded at height `H` stores the change that transforms validator `BLS Public Key` at height `H` into validator `BLS Public key` at height `H-1`. + +Finally, if no changes are made to the validator set no diff entry is recorded. This implies that a validator `Weight` or `BLS Public Key` diff may not be stored for every height `H`. + +## Validator diffs layout + +Validator diffs layout is optimized to support iteration. Validator sets are rebuilt by accumulating `Weight` and `BLS Public Key` diffs from the top-most height down to the requested height. So validator diffs are stored so that it's fast to iterate them in this order. + +`Weight` diffs are stored as a contiguous block of key-value pairs as follows: + +| Key | Value | +|------------------------------------|--------------------------------------| +| SubnetID + Reverse_Height + NodeID | serialized state.ValidatorWeightDiff | + +Note that: + +1. `Weight` diffs related to a Subnet are stored contiguously. +2. Diff height is serialized as `Reverse_Height`. It is stored with big endian format and has its bits flipped too. Big endianess ensures that heights are stored in order, bit flipping ensures that the top-most height is always the first. +3. `NodeID` is part of the key and `state.ValidatorWeightDiff` is part of the value. + +`BLS Public` diffs are stored as follows: + +| Key | Value | +|------------------------------------|-------------------------------| +| SubnetID + Reverse_Height + NodeID | validator.BlsKey bytes or nil | + +Note that: + +1. `BLS Public Key` diffs have the same keys as `Weight` diffs. This implies that the same ordering is guaranteed. +2. Value is either validator `BLS Public Key` bytes or an empty byte slice, as illustrated in the previous section. + +## Validators diff usage in rebuilding validators state + +Now let's see how diffs are used to rebuild the validator set at a given height. The procedure varies slightly between Primary Network and Subnet validator, so we'll describe them separately. +We assume that the reader knows that, as of the Cortina fork, every Subnet validator must also be a Primary Network validator. + +### Primary network validator set rebuild + +If the P-Chain's current height is `T` and we want to retrieve the Primary Network validators at height `H < T`. We proceed as follows: + +1. We retrieve the Primary Network validator set at current height `T`. This is the base state on top of which diffs will be applied. +2. We apply weight diffs first. Specifically: + - `Weight` diff iteration starts from the top-most height smaller or equal to `T`. Remember that entry heights do not need to be contiguous, so the iteration starts from the highest height smaller or equal to `T`, in case `T` does not have a diff entry. + - Since `Weight` diffs are forward-looking, each diff is applied in reverse. A validator's weight is decreased if `state.ValidatorWeightDiff.Decrease` is `false` and it is increased if it is `true`. + - We take care of adding or removing a validator from the base set based on its weight. Whenever a validator weight, following diff application, becomes zero, we drop it; conversely whenever we encounter a diff increasing weight for a currently-non-existing validator, we add the validator to the base set. + - The iteration stops at the first height smaller or equal to `H+1`. Note that a `Weight` diff stored at height `K` holds the content to turn validator state at height `K-1` into validator state at height `K`. So to get validator state at height `K` we must apply diff content at height `K+1`. +3. Once all `Weight` diffs have been applied, the resulting validator set will contain all Primary Network validators active at height `H` and only those. We still need to compute the correct `BLS Public Keys` registered at height `H` for these validators, as each validator may have restaked between height `H` and `T`. They may have a different (or no) `BLS Public Key` at either height. We solve this by applying `BLS Public Key` diffs to the validator set: + - Once again we iterate `BLS Public Key` diffs from the top-most height smaller or equal to `T` till the first height smaller or equal to `H+1`. + - Since `BLS Public Key` diffs are *backward-looking*, we simply nil the BLS key when diff is nil and we restore the BLS Key when it is specified in the diff. + +### Subnet validator set rebuild + +Let's see first the reason why Subnet validators needs to have handled differently. As of `Cortina` fork, we allow `BLS Public Key` registration only for Primary network validators. A given `NodeID` may be both a Primary Network validator and a Subnet validator, but it'll register its `BLS Public Key` only when it registers as Primary Network validator. Despite this, we want to provide a validator `BLS Public Key` when `validators.GetValidatorOutput` is called. So we need to fetch it from the Primary Network validator set. + +Say P-chain current height is `T` and we want to retrieve Primary network validators at height `H < T`. We proceed as follows: + +1. We retrieve both Subnet and Primary Network validator set at current height `T`, +2. We apply `Weight` diff on top of the Subnet validator set, exactly as described in the previous section, +3. Before applying `BLS Public Key` diffs, we retrieve `BLS Public Key` from the current Primary Network validator set for each of the current Subnet validators. This ensures the `BLS Public Key`s are duly initialized before applying the diffs, +4. Finally we apply the `BLS Public Key` diffs exactly as described in the previous section. diff --git a/avalanchego/vms/platformvm/factory.go b/avalanchego/vms/platformvm/factory.go index 5673bebe..834c9c8f 100644 --- a/avalanchego/vms/platformvm/factory.go +++ b/avalanchego/vms/platformvm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm diff --git a/avalanchego/vms/platformvm/fx/fx.go b/avalanchego/vms/platformvm/fx/fx.go index 8bb95a2e..4f6eceea 100644 --- a/avalanchego/vms/platformvm/fx/fx.go +++ b/avalanchego/vms/platformvm/fx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fx @@ -9,7 +9,11 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ Fx = (*secp256k1fx.Fx)(nil) +var ( + _ Fx = (*secp256k1fx.Fx)(nil) + _ Owner = (*secp256k1fx.OutputOwners)(nil) + _ Owned = (*secp256k1fx.TransferOutput)(nil) +) // Fx is the interface a feature extension must implement to support the // Platform Chain. @@ -40,6 +44,8 @@ type Fx interface { } type Owner interface { + verify.IsNotState + verify.Verifiable snow.ContextInitializable } diff --git a/avalanchego/vms/platformvm/fx/mock_fx.go b/avalanchego/vms/platformvm/fx/mock_fx.go index 78a2d4e6..6878c124 100644 --- a/avalanchego/vms/platformvm/fx/mock_fx.go +++ b/avalanchego/vms/platformvm/fx/mock_fx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/fx (interfaces: Fx,Owner) +// +// Generated by this command: +// +// mockgen -package=fx -destination=vms/platformvm/fx/mock_fx.go github.com/ava-labs/avalanchego/vms/platformvm/fx Fx,Owner +// // Package fx is a generated GoMock package. package fx @@ -11,7 +13,8 @@ import ( reflect "reflect" snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" + verify "github.com/ava-labs/avalanchego/vms/components/verify" + gomock "go.uber.org/mock/gomock" ) // MockFx is a mock of Fx interface. @@ -66,22 +69,22 @@ func (mr *MockFxMockRecorder) Bootstrapping() *gomock.Call { } // CreateOutput mocks base method. -func (m *MockFx) CreateOutput(arg0 uint64, arg1 interface{}) (interface{}, error) { +func (m *MockFx) CreateOutput(arg0 uint64, arg1 any) (any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOutput", arg0, arg1) - ret0, _ := ret[0].(interface{}) + ret0, _ := ret[0].(any) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateOutput indicates an expected call of CreateOutput. -func (mr *MockFxMockRecorder) CreateOutput(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) CreateOutput(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOutput", reflect.TypeOf((*MockFx)(nil).CreateOutput), arg0, arg1) } // Initialize mocks base method. -func (m *MockFx) Initialize(arg0 interface{}) error { +func (m *MockFx) Initialize(arg0 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Initialize", arg0) ret0, _ := ret[0].(error) @@ -89,13 +92,13 @@ func (m *MockFx) Initialize(arg0 interface{}) error { } // Initialize indicates an expected call of Initialize. -func (mr *MockFxMockRecorder) Initialize(arg0 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) Initialize(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockFx)(nil).Initialize), arg0) } // VerifyPermission mocks base method. -func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) error { +func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyPermission", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -103,13 +106,13 @@ func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) error { } // VerifyPermission indicates an expected call of VerifyPermission. -func (mr *MockFxMockRecorder) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyPermission(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPermission", reflect.TypeOf((*MockFx)(nil).VerifyPermission), arg0, arg1, arg2, arg3) } // VerifyTransfer mocks base method. -func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) error { +func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 any) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifyTransfer", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -117,13 +120,15 @@ func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) error { } // VerifyTransfer indicates an expected call of VerifyTransfer. -func (mr *MockFxMockRecorder) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyTransfer(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTransfer", reflect.TypeOf((*MockFx)(nil).VerifyTransfer), arg0, arg1, arg2, arg3) } // MockOwner is a mock of Owner interface. type MockOwner struct { + verify.IsNotState + ctrl *gomock.Controller recorder *MockOwnerMockRecorder } @@ -152,7 +157,7 @@ func (m *MockOwner) InitCtx(arg0 *snow.Context) { } // InitCtx indicates an expected call of InitCtx. -func (mr *MockOwnerMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockOwnerMockRecorder) InitCtx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockOwner)(nil).InitCtx), arg0) } @@ -170,3 +175,17 @@ func (mr *MockOwnerMockRecorder) Verify() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockOwner)(nil).Verify)) } + +// isState mocks base method. +func (m *MockOwner) isState() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "isState") + ret0, _ := ret[0].(error) + return ret0 +} + +// isState indicates an expected call of isState. +func (mr *MockOwnerMockRecorder) isState() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isState", reflect.TypeOf((*MockOwner)(nil).isState)) +} diff --git a/avalanchego/vms/platformvm/genesis/codec.go b/avalanchego/vms/platformvm/genesis/codec.go index 29f19bc8..b18c40d6 100644 --- a/avalanchego/vms/platformvm/genesis/codec.go +++ b/avalanchego/vms/platformvm/genesis/codec.go @@ -1,12 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis -import ( - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" -) +import "github.com/ava-labs/avalanchego/vms/platformvm/block" -const Version = blocks.Version +const CodecVersion = block.CodecVersion -var Codec = blocks.GenesisCodec +var Codec = block.GenesisCodec diff --git a/avalanchego/vms/platformvm/genesis/genesis.go b/avalanchego/vms/platformvm/genesis/genesis.go index 6a62f978..795e73cb 100644 --- a/avalanchego/vms/platformvm/genesis/genesis.go +++ b/avalanchego/vms/platformvm/genesis/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -41,32 +41,3 @@ func Parse(genesisBytes []byte) (*Genesis, error) { } return gen, nil } - -// State represents the genesis state of the platform chain -type State struct { - UTXOs []*avax.UTXO - Validators []*txs.Tx - Chains []*txs.Tx - Timestamp uint64 - InitialSupply uint64 -} - -func ParseState(genesisBytes []byte) (*State, error) { - genesis, err := Parse(genesisBytes) - if err != nil { - return nil, err - } - - utxos := make([]*avax.UTXO, 0, len(genesis.UTXOs)) - for _, utxo := range genesis.UTXOs { - utxos = append(utxos, &utxo.UTXO) - } - - return &State{ - UTXOs: utxos, - Validators: genesis.Validators, - Chains: genesis.Chains, - Timestamp: genesis.Timestamp, - InitialSupply: genesis.InitialSupply, - }, nil -} diff --git a/avalanchego/vms/platformvm/health.go b/avalanchego/vms/platformvm/health.go index 38073674..86c80b80 100644 --- a/avalanchego/vms/platformvm/health.go +++ b/avalanchego/vms/platformvm/health.go @@ -1,36 +1,18 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "context" - "errors" "fmt" - "strings" "time" - "go.uber.org/zap" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/constants" ) -const fallbackMinPercentConnected = 0.8 - -var errNotEnoughStake = errors.New("not connected to enough stake") - func (vm *VM) HealthCheck(context.Context) (interface{}, error) { - // Returns nil if this node is connected to > alpha percent of the Primary Network's stake - primaryPercentConnected, err := vm.getPercentConnected(constants.PrimaryNetworkID) - if err != nil { - return nil, fmt.Errorf("couldn't get percent connected: %w", err) - } - vm.metrics.SetPercentConnected(primaryPercentConnected) - details := map[string]float64{ - "primary-percentConnected": primaryPercentConnected, - } - localPrimaryValidator, err := vm.state.GetCurrentValidator( constants.PrimaryNetworkID, vm.ctx.NodeID, @@ -44,42 +26,7 @@ func (vm *VM) HealthCheck(context.Context) (interface{}, error) { return nil, fmt.Errorf("couldn't get current local validator: %w", err) } - primaryMinPercentConnected, ok := vm.MinPercentConnectedStakeHealthy[constants.PrimaryNetworkID] - if !ok { - // This should never happen according to the comment for - // [MinPercentConnectedStakeHealthy] but we include it here to avoid the - // situation where a regression causes the key to be missing so that we - // don't accidentally set [primaryMinPercentConnected] to 0. - vm.ctx.Log.Warn("primary network min connected stake not given", - zap.Float64("fallback value", fallbackMinPercentConnected), - ) - primaryMinPercentConnected = fallbackMinPercentConnected - } - - var errorReasons []string - if primaryPercentConnected < primaryMinPercentConnected { - errorReasons = append(errorReasons, - fmt.Sprintf("connected to %f%% of primary network stake; should be connected to at least %f%%", - primaryPercentConnected*100, - primaryMinPercentConnected*100, - ), - ) - } - for subnetID := range vm.TrackedSubnets { - percentConnected, err := vm.getPercentConnected(subnetID) - if err != nil { - return nil, fmt.Errorf("couldn't get percent connected for %q: %w", subnetID, err) - } - minPercentConnected, ok := vm.MinPercentConnectedStakeHealthy[subnetID] - if !ok { - minPercentConnected = primaryMinPercentConnected - } - - vm.metrics.SetSubnetPercentConnected(subnetID, percentConnected) - key := fmt.Sprintf("%s-percentConnected", subnetID) - details[key] = percentConnected - localSubnetValidator, err := vm.state.GetCurrentValidator( subnetID, vm.ctx.NodeID, @@ -92,23 +39,6 @@ func (vm *VM) HealthCheck(context.Context) (interface{}, error) { default: return nil, fmt.Errorf("couldn't get current subnet validator of %q: %w", subnetID, err) } - - if percentConnected < minPercentConnected { - errorReasons = append(errorReasons, - fmt.Sprintf("connected to %f%% of %q weight; should be connected to at least %f%%", - percentConnected*100, - subnetID, - minPercentConnected*100, - ), - ) - } } - - if len(errorReasons) == 0 || !vm.StakingEnabled { - return details, nil - } - return details, fmt.Errorf("platform layer is unhealthy err: %w, details: %s", - errNotEnoughStake, - strings.Join(errorReasons, ", "), - ) + return nil, nil } diff --git a/avalanchego/vms/platformvm/health_test.go b/avalanchego/vms/platformvm/health_test.go deleted file mode 100644 index 7a7d67b4..00000000 --- a/avalanchego/vms/platformvm/health_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package platformvm - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/version" -) - -const defaultMinConnectedStake = 0.8 - -func TestHealthCheckPrimaryNetwork(t *testing.T) { - require := require.New(t) - - vm, _, _ := defaultVM() - vm.ctx.Lock.Lock() - - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() - genesisState, _ := defaultGenesis() - for index, validator := range genesisState.Validators { - err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) - require.NoError(err) - details, err := vm.HealthCheck(context.Background()) - if float64((index+1)*20) >= defaultMinConnectedStake*100 { - require.NoError(err) - } else { - require.Contains(details, "primary-percentConnected") - require.ErrorIs(err, errNotEnoughStake) - } - } -} - -func TestHealthCheckSubnet(t *testing.T) { - tests := map[string]struct { - minStake float64 - useDefault bool - }{ - "default min stake": { - useDefault: true, - minStake: 0, - }, - "custom min stake": { - minStake: 0.40, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - require := require.New(t) - - vm, _, _ := defaultVM() - vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() - - subnetID := ids.GenerateTestID() - subnetVdrs := validators.NewSet() - vm.TrackedSubnets.Add(subnetID) - testVdrCount := 4 - for i := 0; i < testVdrCount; i++ { - subnetVal := ids.GenerateTestNodeID() - err := subnetVdrs.Add(subnetVal, nil, ids.Empty, 100) - require.NoError(err) - } - ok := vm.Validators.Add(subnetID, subnetVdrs) - require.True(ok) - - // connect to all primary network validators first - genesisState, _ := defaultGenesis() - for _, validator := range genesisState.Validators { - err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) - require.NoError(err) - } - var expectedMinStake float64 - if test.useDefault { - expectedMinStake = defaultMinConnectedStake - } else { - expectedMinStake = test.minStake - vm.MinPercentConnectedStakeHealthy = map[ids.ID]float64{ - subnetID: expectedMinStake, - } - } - for index, vdr := range subnetVdrs.List() { - err := vm.ConnectedSubnet(context.Background(), vdr.NodeID, subnetID) - require.NoError(err) - details, err := vm.HealthCheck(context.Background()) - connectedPerc := float64((index + 1) * (100 / testVdrCount)) - if connectedPerc >= expectedMinStake*100 { - require.NoError(err) - } else { - require.Contains(details, fmt.Sprintf("%s-percentConnected", subnetID)) - require.ErrorIs(err, errNotEnoughStake) - } - } - }) - } -} diff --git a/avalanchego/vms/platformvm/main_test.go b/avalanchego/vms/platformvm/main_test.go new file mode 100644 index 00000000..d353d316 --- /dev/null +++ b/avalanchego/vms/platformvm/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/vms/platformvm/metrics/block_metrics.go b/avalanchego/vms/platformvm/metrics/block_metrics.go index 97156672..09239d8d 100644 --- a/avalanchego/vms/platformvm/metrics/block_metrics.go +++ b/avalanchego/vms/platformvm/metrics/block_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -9,10 +9,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) -var _ blocks.Visitor = (*blockMetrics)(nil) +var _ block.Visitor = (*blockMetrics)(nil) type blockMetrics struct { txMetrics *txMetrics @@ -49,24 +49,24 @@ func newBlockMetric( ) prometheus.Counter { blockMetric := prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_blks_accepted", blockName), + Name: blockName + "_blks_accepted", Help: fmt.Sprintf("Number of %s blocks accepted", blockName), }) errs.Add(registerer.Register(blockMetric)) return blockMetric } -func (m *blockMetrics) BanffAbortBlock(*blocks.BanffAbortBlock) error { +func (m *blockMetrics) BanffAbortBlock(*block.BanffAbortBlock) error { m.numAbortBlocks.Inc() return nil } -func (m *blockMetrics) BanffCommitBlock(*blocks.BanffCommitBlock) error { +func (m *blockMetrics) BanffCommitBlock(*block.BanffCommitBlock) error { m.numCommitBlocks.Inc() return nil } -func (m *blockMetrics) BanffProposalBlock(b *blocks.BanffProposalBlock) error { +func (m *blockMetrics) BanffProposalBlock(b *block.BanffProposalBlock) error { m.numProposalBlocks.Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { @@ -76,7 +76,7 @@ func (m *blockMetrics) BanffProposalBlock(b *blocks.BanffProposalBlock) error { return b.Tx.Unsigned.Visit(m.txMetrics) } -func (m *blockMetrics) BanffStandardBlock(b *blocks.BanffStandardBlock) error { +func (m *blockMetrics) BanffStandardBlock(b *block.BanffStandardBlock) error { m.numStandardBlocks.Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { @@ -86,22 +86,22 @@ func (m *blockMetrics) BanffStandardBlock(b *blocks.BanffStandardBlock) error { return nil } -func (m *blockMetrics) ApricotAbortBlock(*blocks.ApricotAbortBlock) error { +func (m *blockMetrics) ApricotAbortBlock(*block.ApricotAbortBlock) error { m.numAbortBlocks.Inc() return nil } -func (m *blockMetrics) ApricotCommitBlock(*blocks.ApricotCommitBlock) error { +func (m *blockMetrics) ApricotCommitBlock(*block.ApricotCommitBlock) error { m.numCommitBlocks.Inc() return nil } -func (m *blockMetrics) ApricotProposalBlock(b *blocks.ApricotProposalBlock) error { +func (m *blockMetrics) ApricotProposalBlock(b *block.ApricotProposalBlock) error { m.numProposalBlocks.Inc() return b.Tx.Unsigned.Visit(m.txMetrics) } -func (m *blockMetrics) ApricotStandardBlock(b *blocks.ApricotStandardBlock) error { +func (m *blockMetrics) ApricotStandardBlock(b *block.ApricotStandardBlock) error { m.numStandardBlocks.Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { @@ -111,7 +111,7 @@ func (m *blockMetrics) ApricotStandardBlock(b *blocks.ApricotStandardBlock) erro return nil } -func (m *blockMetrics) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { +func (m *blockMetrics) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { m.numAtomicBlocks.Inc() return b.Tx.Unsigned.Visit(m.txMetrics) } diff --git a/avalanchego/vms/platformvm/metrics/metrics.go b/avalanchego/vms/platformvm/metrics/metrics.go index b1ff4af8..98b611a0 100644 --- a/avalanchego/vms/platformvm/metrics/metrics.go +++ b/avalanchego/vms/platformvm/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -10,9 +10,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) var _ Metrics = (*metrics)(nil) @@ -20,12 +19,8 @@ var _ Metrics = (*metrics)(nil) type Metrics interface { metric.APIInterceptor - // Mark that an option vote that we initially preferred was accepted. - MarkOptionVoteWon() - // Mark that an option vote that we initially preferred was rejected. - MarkOptionVoteLost() // Mark that the given block was accepted. - MarkAccepted(blocks.Block) error + MarkAccepted(block.Block) error // Mark that a validator set was created. IncValidatorSetsCreated() // Mark that a validator set was cached. @@ -43,35 +38,15 @@ type Metrics interface { SetTimeUntilUnstake(time.Duration) // Mark when this node will unstake from a subnet. SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) - // Mark that this node is connected to this percent of a subnet's stake. - SetSubnetPercentConnected(subnetID ids.ID, percent float64) - // Mark that this node is connected to this percent of the Primary Network's - // stake. - SetPercentConnected(percent float64) } func New( namespace string, registerer prometheus.Registerer, - trackedSubnets set.Set[ids.ID], ) (Metrics, error) { blockMetrics, err := newBlockMetrics(namespace, registerer) m := &metrics{ blockMetrics: blockMetrics, - - percentConnected: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "percent_connected", - Help: "Percent of connected stake", - }), - subnetPercentConnected: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "percent_connected_subnet", - Help: "Percent of connected subnet weight", - }, - []string{"subnetID"}, - ), timeUntilUnstake: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "time_until_unstake", @@ -96,17 +71,6 @@ func New( Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", }), - numVotesWon: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_won", - Help: "Total number of votes this node has won", - }), - numVotesLost: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_lost", - Help: "Total number of votes this node has lost", - }), - validatorSetsCached: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "validator_sets_cached", @@ -131,31 +95,20 @@ func New( errs := wrappers.Errs{Err: err} apiRequestMetrics, err := metric.NewAPIInterceptor(namespace, registerer) + errs.Add(err) m.APIInterceptor = apiRequestMetrics errs.Add( - err, - - registerer.Register(m.percentConnected), - registerer.Register(m.subnetPercentConnected), registerer.Register(m.timeUntilUnstake), registerer.Register(m.timeUntilSubnetUnstake), registerer.Register(m.localStake), registerer.Register(m.totalStake), - registerer.Register(m.numVotesWon), - registerer.Register(m.numVotesLost), - registerer.Register(m.validatorSetsCreated), registerer.Register(m.validatorSetsCached), registerer.Register(m.validatorSetsHeightDiff), registerer.Register(m.validatorSetsDuration), ) - // init subnet tracker metrics with tracked subnets - for subnetID := range trackedSubnets { - // initialize to 0 - m.subnetPercentConnected.WithLabelValues(subnetID.String()).Set(0) - } return m, errs.Err } @@ -164,30 +117,18 @@ type metrics struct { blockMetrics *blockMetrics - percentConnected prometheus.Gauge - subnetPercentConnected *prometheus.GaugeVec timeUntilUnstake prometheus.Gauge timeUntilSubnetUnstake *prometheus.GaugeVec localStake prometheus.Gauge totalStake prometheus.Gauge - numVotesWon, numVotesLost prometheus.Counter - validatorSetsCached prometheus.Counter validatorSetsCreated prometheus.Counter validatorSetsHeightDiff prometheus.Gauge validatorSetsDuration prometheus.Gauge } -func (m *metrics) MarkOptionVoteWon() { - m.numVotesWon.Inc() -} - -func (m *metrics) MarkOptionVoteLost() { - m.numVotesLost.Inc() -} - -func (m *metrics) MarkAccepted(b blocks.Block) error { +func (m *metrics) MarkAccepted(b block.Block) error { return b.Visit(m.blockMetrics) } @@ -222,11 +163,3 @@ func (m *metrics) SetTimeUntilUnstake(timeUntilUnstake time.Duration) { func (m *metrics) SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) { m.timeUntilSubnetUnstake.WithLabelValues(subnetID.String()).Set(float64(timeUntilUnstake)) } - -func (m *metrics) SetSubnetPercentConnected(subnetID ids.ID, percent float64) { - m.subnetPercentConnected.WithLabelValues(subnetID.String()).Set(percent) -} - -func (m *metrics) SetPercentConnected(percent float64) { - m.percentConnected.Set(percent) -} diff --git a/avalanchego/vms/platformvm/metrics/no_op.go b/avalanchego/vms/platformvm/metrics/no_op.go index d5948348..770e30c9 100644 --- a/avalanchego/vms/platformvm/metrics/no_op.go +++ b/avalanchego/vms/platformvm/metrics/no_op.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -10,7 +10,7 @@ import ( "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" ) var Noop Metrics = noopMetrics{} @@ -21,7 +21,7 @@ func (noopMetrics) MarkOptionVoteWon() {} func (noopMetrics) MarkOptionVoteLost() {} -func (noopMetrics) MarkAccepted(blocks.Block) error { +func (noopMetrics) MarkAccepted(block.Block) error { return nil } diff --git a/avalanchego/vms/platformvm/metrics/tx_metrics.go b/avalanchego/vms/platformvm/metrics/tx_metrics.go index 118f1156..70b03276 100644 --- a/avalanchego/vms/platformvm/metrics/tx_metrics.go +++ b/avalanchego/vms/platformvm/metrics/tx_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -27,7 +27,9 @@ type txMetrics struct { numRemoveSubnetValidatorTxs, numTransformSubnetTxs, numAddPermissionlessValidatorTxs, - numAddPermissionlessDelegatorTxs prometheus.Counter + numAddPermissionlessDelegatorTxs, + numTransferSubnetOwnershipTxs, + numBaseTxs prometheus.Counter } func newTxMetrics( @@ -49,6 +51,8 @@ func newTxMetrics( numTransformSubnetTxs: newTxMetric(namespace, "transform_subnet", registerer, &errs), numAddPermissionlessValidatorTxs: newTxMetric(namespace, "add_permissionless_validator", registerer, &errs), numAddPermissionlessDelegatorTxs: newTxMetric(namespace, "add_permissionless_delegator", registerer, &errs), + numTransferSubnetOwnershipTxs: newTxMetric(namespace, "transfer_subnet_ownership", registerer, &errs), + numBaseTxs: newTxMetric(namespace, "base", registerer, &errs), } return m, errs.Err } @@ -61,7 +65,7 @@ func newTxMetric( ) prometheus.Counter { txMetric := prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: fmt.Sprintf("%s_txs_accepted", txName), + Name: txName + "_txs_accepted", Help: fmt.Sprintf("Number of %s transactions accepted", txName), }) errs.Add(registerer.Register(txMetric)) @@ -132,3 +136,13 @@ func (m *txMetrics) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegator m.numAddPermissionlessDelegatorTxs.Inc() return nil } + +func (m *txMetrics) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { + m.numTransferSubnetOwnershipTxs.Inc() + return nil +} + +func (m *txMetrics) BaseTx(*txs.BaseTx) error { + m.numBaseTxs.Inc() + return nil +} diff --git a/avalanchego/vms/platformvm/network/config.go b/avalanchego/vms/platformvm/network/config.go new file mode 100644 index 00000000..8536504d --- /dev/null +++ b/avalanchego/vms/platformvm/network/config.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + +var DefaultConfig = Config{ + MaxValidatorSetStaleness: time.Minute, + TargetGossipSize: 20 * units.KiB, + PullGossipPollSize: 1, + PullGossipFrequency: 1500 * time.Millisecond, + PullGossipThrottlingPeriod: 10 * time.Second, + PullGossipThrottlingLimit: 2, + ExpectedBloomFilterElements: 8 * 1024, + ExpectedBloomFilterFalsePositiveProbability: .01, + MaxBloomFilterFalsePositiveProbability: .05, + LegacyPushGossipCacheSize: 512, +} + +type Config struct { + // MaxValidatorSetStaleness limits how old of a validator set the network + // will use for peer sampling and rate limiting. + MaxValidatorSetStaleness time.Duration `json:"max-validator-set-staleness"` + // TargetGossipSize is the number of bytes that will be attempted to be + // sent when pushing transactions and when responded to transaction pull + // requests. + TargetGossipSize int `json:"target-gossip-size"` + // PullGossipPollSize is the number of validators to sample when performing + // a round of pull gossip. + PullGossipPollSize int `json:"pull-gossip-poll-size"` + // PullGossipFrequency is how frequently rounds of pull gossip are + // performed. + PullGossipFrequency time.Duration `json:"pull-gossip-frequency"` + // PullGossipThrottlingPeriod is how large of a window the throttler should + // use. + PullGossipThrottlingPeriod time.Duration `json:"pull-gossip-throttling-period"` + // PullGossipThrottlingLimit is the number of pull querys that are allowed + // by a validator in every throttling window. + PullGossipThrottlingLimit int `json:"pull-gossip-throttling-limit"` + // ExpectedBloomFilterElements is the number of elements to expect when + // creating a new bloom filter. The larger this number is, the larger the + // bloom filter will be. + ExpectedBloomFilterElements int `json:"expected-bloom-filter-elements"` + // ExpectedBloomFilterFalsePositiveProbability is the expected probability + // of a false positive after having inserted ExpectedBloomFilterElements + // into a bloom filter. The smaller this number is, the larger the bloom + // filter will be. + ExpectedBloomFilterFalsePositiveProbability float64 `json:"expected-bloom-filter-false-positive-probability"` + // MaxBloomFilterFalsePositiveProbability is used to determine when the + // bloom filter should be refreshed. Once the expected probability of a + // false positive exceeds this value, the bloom filter will be regenerated. + // The smaller this number is, the more frequently that the bloom filter + // will be regenerated. + MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` + // LegacyPushGossipCacheSize tracks the most recently received transactions + // and ensures to only gossip them once. + // + // Deprecated: The legacy push gossip mechanism is deprecated in favor of + // the p2p SDK's push gossip mechanism. + LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` +} diff --git a/avalanchego/vms/platformvm/network/gossip.go b/avalanchego/vms/platformvm/network/gossip.go new file mode 100644 index 00000000..5259a80e --- /dev/null +++ b/avalanchego/vms/platformvm/network/gossip.go @@ -0,0 +1,143 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + _ gossip.Marshaller[*txs.Tx] = (*txMarshaller)(nil) + _ gossip.Gossipable = (*txs.Tx)(nil) +) + +// bloomChurnMultiplier is the number used to multiply the size of the mempool +// to determine how large of a bloom filter to create. +const bloomChurnMultiplier = 3 + +// txGossipHandler is the handler called when serving gossip messages +type txGossipHandler struct { + p2p.NoOpHandler + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip( + ctx context.Context, + nodeID ids.NodeID, + gossipBytes []byte, +) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t txGossipHandler) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + deadline time.Time, + requestBytes []byte, +) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +type txMarshaller struct{} + +func (txMarshaller) MarshalGossip(tx *txs.Tx) ([]byte, error) { + return tx.Bytes(), nil +} + +func (txMarshaller) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { + return txs.Parse(txs.Codec, bytes) +} + +func newGossipMempool( + mempool mempool.Mempool, + registerer prometheus.Registerer, + log logging.Logger, + txVerifier TxVerifier, + minTargetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) (*gossipMempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + return &gossipMempool{ + Mempool: mempool, + log: log, + txVerifier: txVerifier, + bloom: bloom, + }, err +} + +type gossipMempool struct { + mempool.Mempool + log logging.Logger + txVerifier TxVerifier + + lock sync.RWMutex + bloom *gossip.BloomFilter +} + +func (g *gossipMempool) Add(tx *txs.Tx) error { + txID := tx.ID() + if _, ok := g.Mempool.Get(txID); ok { + return fmt.Errorf("tx %s dropped: %w", txID, mempool.ErrDuplicateTx) + } + + if reason := g.Mempool.GetDropReason(txID); reason != nil { + // If the tx is being dropped - just ignore it + // + // TODO: Should we allow re-verification of the transaction even if it + // failed previously? + return reason + } + + if err := g.txVerifier.VerifyTx(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + if err := g.Mempool.Add(tx); err != nil { + g.Mempool.MarkDropped(txID, err) + return err + } + + g.lock.Lock() + defer g.lock.Unlock() + + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, g.Mempool.Len()*bloomChurnMultiplier) + if err != nil { + return err + } + + if reset { + g.log.Debug("resetting bloom filter") + g.Mempool.Iterate(func(tx *txs.Tx) bool { + g.bloom.Add(tx) + return true + }) + } + + g.Mempool.RequestBuildBlock(false) + return nil +} + +func (g *gossipMempool) GetFilter() (bloom []byte, salt []byte) { + g.lock.RLock() + defer g.lock.RUnlock() + + return g.bloom.Marshal() +} diff --git a/avalanchego/vms/platformvm/network/gossip_test.go b/avalanchego/vms/platformvm/network/gossip_test.go new file mode 100644 index 00000000..47f0602c --- /dev/null +++ b/avalanchego/vms/platformvm/network/gossip_test.go @@ -0,0 +1,152 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var errFoo = errors.New("foo") + +// Add should error if verification errors +func TestGossipMempoolAddVerificationError(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + mempool := mempool.NewMockMempool(ctrl) + txVerifier := testTxVerifier{err: errFoo} + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().MarkDropped(txID, errFoo) + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, errFoo) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Add should error if adding to the mempool errors +func TestGossipMempoolAddError(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + txVerifier := testTxVerifier{} + mempool := mempool.NewMockMempool(ctrl) + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().Add(tx).Return(errFoo) + mempool.EXPECT().MarkDropped(txID, errFoo).AnyTimes() + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, errFoo) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Adding a duplicate to the mempool should return an error +func TestMempoolDuplicate(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + testMempool := mempool.NewMockMempool(ctrl) + txVerifier := testTxVerifier{} + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + testMempool.EXPECT().Get(txID).Return(tx, true) + + gossipMempool, err := newGossipMempool( + testMempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + err = gossipMempool.Add(tx) + require.ErrorIs(err, mempool.ErrDuplicateTx) + require.False(gossipMempool.bloom.Has(tx)) +} + +// Adding a tx to the mempool should add it to the bloom filter +func TestGossipAddBloomFilter(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + txID := ids.GenerateTestID() + tx := &txs.Tx{ + TxID: txID, + } + + txVerifier := testTxVerifier{} + mempool := mempool.NewMockMempool(ctrl) + + mempool.EXPECT().Get(txID).Return(nil, false) + mempool.EXPECT().GetDropReason(txID).Return(nil) + mempool.EXPECT().Add(tx).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + + gossipMempool, err := newGossipMempool( + mempool, + prometheus.NewRegistry(), + logging.NoLog{}, + txVerifier, + testConfig.ExpectedBloomFilterElements, + testConfig.ExpectedBloomFilterFalsePositiveProbability, + testConfig.MaxBloomFilterFalsePositiveProbability, + ) + require.NoError(err) + + require.NoError(gossipMempool.Add(tx)) + require.True(gossipMempool.bloom.Has(tx)) +} diff --git a/avalanchego/vms/platformvm/network/main_test.go b/avalanchego/vms/platformvm/network/main_test.go new file mode 100644 index 00000000..ed2cfd9e --- /dev/null +++ b/avalanchego/vms/platformvm/network/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/vms/platformvm/network/network.go b/avalanchego/vms/platformvm/network/network.go new file mode 100644 index 00000000..af51c475 --- /dev/null +++ b/avalanchego/vms/platformvm/network/network.go @@ -0,0 +1,295 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +const TxGossipHandlerID = 0 + +type Network interface { + common.AppHandler + + // Gossip starts gossiping transactions and blocks until it completes. + Gossip(ctx context.Context) + // IssueTx verifies the transaction at the currently preferred state, adds + // it to the mempool, and gossips it to the network. + IssueTx(context.Context, *txs.Tx) error +} + +type network struct { + *p2p.Network + + log logging.Logger + txVerifier TxVerifier + mempool *gossipMempool + partialSyncPrimaryNetwork bool + appSender common.AppSender + + txPushGossiper gossip.Accumulator[*txs.Tx] + txPullGossiper gossip.Gossiper + txGossipFrequency time.Duration + + // gossip related attributes + recentTxsLock sync.Mutex + recentTxs *cache.LRU[ids.ID, struct{}] +} + +func New( + log logging.Logger, + nodeID ids.NodeID, + subnetID ids.ID, + vdrs validators.State, + txVerifier TxVerifier, + mempool mempool.Mempool, + partialSyncPrimaryNetwork bool, + appSender common.AppSender, + registerer prometheus.Registerer, + config Config, +) (Network, error) { + p2pNetwork, err := p2p.NewNetwork(log, appSender, registerer, "p2p") + if err != nil { + return nil, err + } + + marshaller := txMarshaller{} + validators := p2p.NewValidators( + p2pNetwork.Peers, + log, + subnetID, + vdrs, + config.MaxValidatorSetStaleness, + ) + txGossipClient := p2pNetwork.NewClient( + TxGossipHandlerID, + p2p.WithValidatorSampling(validators), + ) + txGossipMetrics, err := gossip.NewMetrics(registerer, "tx") + if err != nil { + return nil, err + } + + txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + txGossipClient, + txGossipMetrics, + config.TargetGossipSize, + ) + + gossipMempool, err := newGossipMempool( + mempool, + registerer, + log, + txVerifier, + config.ExpectedBloomFilterElements, + config.ExpectedBloomFilterFalsePositiveProbability, + config.MaxBloomFilterFalsePositiveProbability, + ) + if err != nil { + return nil, err + } + + var txPullGossiper gossip.Gossiper + txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( + log, + marshaller, + gossipMempool, + txGossipClient, + txGossipMetrics, + config.PullGossipPollSize, + ) + + // Gossip requests are only served if a node is a validator + txPullGossiper = gossip.ValidatorGossiper{ + Gossiper: txPullGossiper, + NodeID: nodeID, + Validators: validators, + } + + handler := gossip.NewHandler[*txs.Tx]( + log, + marshaller, + txPushGossiper, + gossipMempool, + txGossipMetrics, + config.TargetGossipSize, + ) + + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler( + config.PullGossipThrottlingPeriod, + config.PullGossipThrottlingLimit, + ), + log, + ), + validators, + log, + ) + + // We allow pushing txs between all peers, but only serve gossip requests + // from validators + txGossipHandler := txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } + + if err := p2pNetwork.AddHandler(TxGossipHandlerID, txGossipHandler); err != nil { + return nil, err + } + + return &network{ + Network: p2pNetwork, + log: log, + txVerifier: txVerifier, + mempool: gossipMempool, + partialSyncPrimaryNetwork: partialSyncPrimaryNetwork, + appSender: appSender, + txPushGossiper: txPushGossiper, + txPullGossiper: txPullGossiper, + txGossipFrequency: config.PullGossipFrequency, + recentTxs: &cache.LRU[ids.ID, struct{}]{Size: config.LegacyPushGossipCacheSize}, + }, nil +} + +func (n *network) Gossip(ctx context.Context) { + // If the node is running partial sync, we should not perform any pull + // gossip. + if n.partialSyncPrimaryNetwork { + return + } + + gossip.Every(ctx, n.log, n.txPullGossiper, n.txGossipFrequency) +} + +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { + n.log.Debug("called AppGossip message handler", + zap.Stringer("nodeID", nodeID), + zap.Int("messageLen", len(msgBytes)), + ) + + if n.partialSyncPrimaryNetwork { + n.log.Debug("dropping AppGossip message", + zap.String("reason", "primary network is not being fully synced"), + ) + return nil + } + + msgIntf, err := message.Parse(msgBytes) + if err != nil { + n.log.Debug("forwarding AppGossip to p2p network", + zap.String("reason", "failed to parse message"), + ) + + return n.Network.AppGossip(ctx, nodeID, msgBytes) + } + + msg, ok := msgIntf.(*message.Tx) + if !ok { + n.log.Debug("dropping unexpected message", + zap.Stringer("nodeID", nodeID), + ) + return nil + } + + tx, err := txs.Parse(txs.Codec, msg.Tx) + if err != nil { + n.log.Verbo("received invalid tx", + zap.Stringer("nodeID", nodeID), + zap.Binary("tx", msg.Tx), + zap.Error(err), + ) + return nil + } + txID := tx.ID() + + if err := n.issueTx(tx); err == nil { + n.legacyGossipTx(ctx, txID, msgBytes) + + n.txPushGossiper.Add(tx) + return n.txPushGossiper.Gossip(ctx) + } + return nil +} + +func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.issueTx(tx); err != nil { + return err + } + + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { + return err + } + + txID := tx.ID() + n.legacyGossipTx(ctx, txID, msgBytes) + n.txPushGossiper.Add(tx) + return n.txPushGossiper.Gossip(ctx) +} + +// returns nil if the tx is in the mempool +func (n *network) issueTx(tx *txs.Tx) error { + // If we are partially syncing the Primary Network, we should not be + // maintaining the transaction mempool locally. + if n.partialSyncPrimaryNetwork { + return nil + } + + if err := n.mempool.Add(tx); err != nil { + n.log.Debug("tx failed to be added to the mempool", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + + return err + } + + return nil +} + +func (n *network) legacyGossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { + n.recentTxsLock.Lock() + _, has := n.recentTxs.Get(txID) + n.recentTxs.Put(txID, struct{}{}) + n.recentTxsLock.Unlock() + + // Don't gossip a transaction if it has been recently gossiped. + if has { + return + } + + n.log.Debug("gossiping tx", + zap.Stringer("txID", txID), + ) + + if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { + n.log.Error("failed to gossip tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } +} diff --git a/avalanchego/vms/platformvm/network/network_test.go b/avalanchego/vms/platformvm/network/network_test.go new file mode 100644 index 00000000..56957b00 --- /dev/null +++ b/avalanchego/vms/platformvm/network/network_test.go @@ -0,0 +1,372 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" +) + +var ( + errTest = errors.New("test error") + + testConfig = Config{ + MaxValidatorSetStaleness: time.Second, + TargetGossipSize: 1, + PullGossipPollSize: 1, + PullGossipFrequency: time.Second, + PullGossipThrottlingPeriod: time.Second, + PullGossipThrottlingLimit: 1, + ExpectedBloomFilterElements: 10, + ExpectedBloomFilterFalsePositiveProbability: .1, + MaxBloomFilterFalsePositiveProbability: .5, + LegacyPushGossipCacheSize: 512, + } +) + +var _ TxVerifier = (*testTxVerifier)(nil) + +type testTxVerifier struct { + err error +} + +func (t testTxVerifier) VerifyTx(*txs.Tx) error { + return t.err +} + +func TestNetworkAppGossip(t *testing.T) { + testTx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: 1, + BlockchainID: ids.GenerateTestID(), + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }, + } + require.NoError(t, testTx.Initialize(txs.Codec)) + + type test struct { + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + } + + tests := []test{ + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid message bytes", + msgBytesFunc: func() []byte { + return []byte{0x00} + }, + mempoolFunc: func(*gomock.Controller) mempool.Mempool { + return nil + }, + appSenderFunc: func(*gomock.Controller) common.AppSender { + return nil + }, + }, + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid tx bytes", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: []byte{0x00}, + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + }, + }, + { + name: "issuance succeeds", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Times(2) + return appSender + }, + }, + { + // Issue returns error because tx was dropped. We shouldn't gossip the tx. + name: "issuance fails", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + }, + }, + { + name: "should AppGossip if primary network is not being fully synced", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + // mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + // appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) + return appSender + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctx := context.Background() + ctrl := gomock.NewController(t) + + snowCtx := snowtest.Context(t, ids.Empty) + n, err := New( + logging.NoLog{}, + ids.EmptyNodeID, + ids.Empty, + snowCtx.ValidatorState, + testTxVerifier{}, + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + prometheus.NewRegistry(), + DefaultConfig, + ) + require.NoError(err) + + require.NoError(n.AppGossip(ctx, ids.GenerateTestNodeID(), tt.msgBytesFunc())) + }) + } +} + +func TestNetworkIssueTx(t *testing.T) { + tx := &txs.Tx{} + + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + txVerifier testTxVerifier + partialSyncPrimaryNetwork bool + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "mempool has transaction", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(tx, true) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + return common.NewMockSender(ctrl) + }, + expectedErr: mempool.ErrDuplicateTx, + }, + { + name: "transaction marked as dropped in mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "transaction invalid", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + txVerifier: testTxVerifier{err: errTest}, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "AppGossip tx but do not add to mempool if primary network is not being fully synced", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + return mempool.NewMockMempool(ctrl) + }, + partialSyncPrimaryNetwork: true, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().Len().Return(0) + mempool.EXPECT().RequestBuildBlock(false) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // we should gossip the tx twice because sdk and legacy gossip + // currently runs together + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + snowCtx := snowtest.Context(t, ids.Empty) + n, err := New( + snowCtx.Log, + snowCtx.NodeID, + snowCtx.SubnetID, + snowCtx.ValidatorState, + tt.txVerifier, + tt.mempoolFunc(ctrl), + tt.partialSyncPrimaryNetwork, + tt.appSenderFunc(ctrl), + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + + err = n.IssueTx(context.Background(), tx) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func TestNetworkGossipTx(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + appSender := common.NewMockSender(ctrl) + + snowCtx := snowtest.Context(t, ids.Empty) + nIntf, err := New( + snowCtx.Log, + snowCtx.NodeID, + snowCtx.SubnetID, + snowCtx.ValidatorState, + testTxVerifier{}, + mempool.NewMockMempool(ctrl), + false, + appSender, + prometheus.NewRegistry(), + testConfig, + ) + require.NoError(err) + require.IsType(&network{}, nIntf) + n := nIntf.(*network) + + // Case: Tx was recently gossiped + txID := ids.GenerateTestID() + n.recentTxs.Put(txID, struct{}{}) + n.legacyGossipTx(context.Background(), txID, []byte{}) + // Didn't make a call to SendAppGossip + + // Case: Tx was not recently gossiped + msgBytes := []byte{1, 2, 3} + appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) + n.legacyGossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + // Did make a call to SendAppGossip +} diff --git a/avalanchego/vms/platformvm/network/tx_verifier.go b/avalanchego/vms/platformvm/network/tx_verifier.go new file mode 100644 index 00000000..ee76c8b0 --- /dev/null +++ b/avalanchego/vms/platformvm/network/tx_verifier.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "sync" + + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +var _ TxVerifier = (*LockedTxVerifier)(nil) + +type TxVerifier interface { + // VerifyTx verifies that the transaction should be issued into the mempool. + VerifyTx(tx *txs.Tx) error +} + +type LockedTxVerifier struct { + lock sync.Locker + txVerifier TxVerifier +} + +func (l *LockedTxVerifier) VerifyTx(tx *txs.Tx) error { + l.lock.Lock() + defer l.lock.Unlock() + + return l.txVerifier.VerifyTx(tx) +} + +func NewLockedTxVerifier(lock sync.Locker, txVerifier TxVerifier) *LockedTxVerifier { + return &LockedTxVerifier{ + lock: lock, + txVerifier: txVerifier, + } +} diff --git a/avalanchego/vms/platformvm/reward/calculator.go b/avalanchego/vms/platformvm/reward/calculator.go index 5903969a..7729a9d6 100644 --- a/avalanchego/vms/platformvm/reward/calculator.go +++ b/avalanchego/vms/platformvm/reward/calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward @@ -6,6 +6,8 @@ package reward import ( "math/big" "time" + + "github.com/ava-labs/avalanchego/utils/math" ) var _ Calculator = (*calculator)(nil) @@ -34,3 +36,19 @@ func NewCalculator(c Config) Calculator { func (c *calculator) Calculate(stakedDuration time.Duration, stakedAmount, currentSupply uint64) uint64 { return uint64(0) } + +// Split [totalAmount] into [totalAmount * shares percentage] and the remainder. +// +// Invariant: [shares] <= [PercentDenominator] +func Split(totalAmount uint64, shares uint32) (uint64, uint64) { + remainderShares := PercentDenominator - uint64(shares) + remainderAmount := remainderShares * (totalAmount / PercentDenominator) + + // Delay rounding as long as possible for small numbers + if optimisticReward, err := math.Mul64(remainderShares, totalAmount); err == nil { + remainderAmount = optimisticReward / PercentDenominator + } + + amountFromShares := totalAmount - remainderAmount + return amountFromShares, remainderAmount +} diff --git a/avalanchego/vms/platformvm/reward/calculator_test.go b/avalanchego/vms/platformvm/reward/calculator_test.go index f3b4e363..c4b72422 100644 --- a/avalanchego/vms/platformvm/reward/calculator_test.go +++ b/avalanchego/vms/platformvm/reward/calculator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/avalanchego/vms/platformvm/reward/config.go b/avalanchego/vms/platformvm/reward/config.go index 17a0a0d0..ccabc398 100644 --- a/avalanchego/vms/platformvm/reward/config.go +++ b/avalanchego/vms/platformvm/reward/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/avalanchego/vms/platformvm/service.go b/avalanchego/vms/platformvm/service.go index ba05b304..0712e1ff 100644 --- a/avalanchego/vms/platformvm/service.go +++ b/avalanchego/vms/platformvm/service.go @@ -1,36 +1,34 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "context" + "encoding/json" "errors" "fmt" + "maps" + "math" "net/http" "os" "strings" "time" - stdmath "math" - "go.uber.org/zap" - "golang.org/x/exp/maps" - "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -44,6 +42,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + avajson "github.com/ava-labs/avalanchego/utils/json" + safemath "github.com/ava-labs/avalanchego/utils/math" platformapi "github.com/ava-labs/avalanchego/vms/platformvm/api" ) @@ -64,22 +64,23 @@ const ( ) var ( - errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") - errNoSubnetID = errors.New("argument 'subnetID' not provided") - errNoRewardAddress = errors.New("argument 'rewardAddress' not provided") - errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") - errNoAddresses = errors.New("no addresses provided") - errNoKeys = errors.New("user has no keys or funds") - errStartTimeTooSoon = fmt.Errorf("start time must be at least %s in the future", minAddStakerDelay) - errStartTimeTooLate = errors.New("start time is too far in the future") - errNamedSubnetCantBePrimary = errors.New("subnet validator attempts to validate primary network") - errNoAmount = errors.New("argument 'amount' must be > 0") - errMissingName = errors.New("argument 'name' not given") - errMissingVMID = errors.New("argument 'vmID' not given") - errMissingBlockchainID = errors.New("argument 'blockchainID' not given") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") - errStartAfterEndTime = errors.New("start time must be before end time") - errStartTimeInThePast = errors.New("start time in the past") + errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") + errNoSubnetID = errors.New("argument 'subnetID' not provided") + errPrimaryNetworkIsNotASubnet = errors.New("the primary network isn't a subnet") + errNoRewardAddress = errors.New("argument 'rewardAddress' not provided") + errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") + errNoAddresses = errors.New("no addresses provided") + errNoKeys = errors.New("user has no keys or funds") + errStartTimeTooSoon = fmt.Errorf("start time must be at least %s in the future", minAddStakerDelay) + errStartTimeTooLate = errors.New("start time is too far in the future") + errNamedSubnetCantBePrimary = errors.New("subnet validator attempts to validate primary network") + errNoAmount = errors.New("argument 'amount' must be > 0") + errMissingName = errors.New("argument 'name' not given") + errMissingVMID = errors.New("argument 'vmID' not given") + errMissingBlockchainID = errors.New("argument 'blockchainID' not given") + errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errStartAfterEndTime = errors.New("start time must be before end time") + errStartTimeInThePast = errors.New("start time in the past") completeGetValidators = false ) @@ -113,17 +114,13 @@ func (s *Service) GetHeight(r *http.Request, _ *struct{}, response *api.GetHeigh zap.String("method", "getHeight"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + ctx := r.Context() - lastAcceptedID, err := s.vm.LastAccepted(ctx) - if err != nil { - return fmt.Errorf("couldn't get last accepted block ID: %w", err) - } - lastAccepted, err := s.vm.GetBlock(ctx, lastAcceptedID) - if err != nil { - return fmt.Errorf("couldn't get last accepted block: %w", err) - } - response.Height = json.Uint64(lastAccepted.Height()) - return nil + height, err := s.vm.GetCurrentHeight(ctx) + response.Height = avajson.Uint64(height) + return err } // ExportKeyArgs are arguments for ExportKey @@ -151,6 +148,9 @@ func (s *Service) ExportKey(_ *http.Request, args *ExportKeyArgs, reply *ExportK return fmt.Errorf("couldn't parse %s to address: %w", args.Address, err) } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -190,6 +190,9 @@ func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSO return fmt.Errorf("problem formatting address: %w", err) } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -216,15 +219,15 @@ type GetBalanceRequest struct { // compatibility. type GetBalanceResponse struct { // Balance, in nAVAX, of the address - Balance json.Uint64 `json:"balance"` - Unlocked json.Uint64 `json:"unlocked"` - LockedStakeable json.Uint64 `json:"lockedStakeable"` - LockedNotStakeable json.Uint64 `json:"lockedNotStakeable"` - Balances map[ids.ID]json.Uint64 `json:"balances"` - Unlockeds map[ids.ID]json.Uint64 `json:"unlockeds"` - LockedStakeables map[ids.ID]json.Uint64 `json:"lockedStakeables"` - LockedNotStakeables map[ids.ID]json.Uint64 `json:"lockedNotStakeables"` - UTXOIDs []*avax.UTXOID `json:"utxoIDs"` + Balance avajson.Uint64 `json:"balance"` + Unlocked avajson.Uint64 `json:"unlocked"` + LockedStakeable avajson.Uint64 `json:"lockedStakeable"` + LockedNotStakeable avajson.Uint64 `json:"lockedNotStakeable"` + Balances map[ids.ID]avajson.Uint64 `json:"balances"` + Unlockeds map[ids.ID]avajson.Uint64 `json:"unlockeds"` + LockedStakeables map[ids.ID]avajson.Uint64 `json:"lockedStakeables"` + LockedNotStakeables map[ids.ID]avajson.Uint64 `json:"lockedNotStakeables"` + UTXOIDs []*avax.UTXOID `json:"utxoIDs"` } // GetBalance gets the balance of an address @@ -235,12 +238,14 @@ func (s *Service) GetBalance(_ *http.Request, args *GetBalanceRequest, response logging.UserStrings("addresses", args.Addresses), ) - // Parse to address addrs, err := avax.ParseServiceAddresses(s.addrManager, args.Addresses) if err != nil { return err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + utxos, err := avax.GetAllUTXOs(s.vm.state, addrs) if err != nil { return fmt.Errorf("couldn't get UTXO set of %v: %w", args.Addresses, err) @@ -258,16 +263,16 @@ utxoFor: switch out := utxo.Out.(type) { case *secp256k1fx.TransferOutput: if out.Locktime <= currentTime { - newBalance, err := math.Add64(unlockeds[assetID], out.Amount()) + newBalance, err := safemath.Add64(unlockeds[assetID], out.Amount()) if err != nil { - unlockeds[assetID] = stdmath.MaxUint64 + unlockeds[assetID] = math.MaxUint64 } else { unlockeds[assetID] = newBalance } } else { - newBalance, err := math.Add64(lockedNotStakeables[assetID], out.Amount()) + newBalance, err := safemath.Add64(lockedNotStakeables[assetID], out.Amount()) if err != nil { - lockedNotStakeables[assetID] = stdmath.MaxUint64 + lockedNotStakeables[assetID] = math.MaxUint64 } else { lockedNotStakeables[assetID] = newBalance } @@ -281,23 +286,23 @@ utxoFor: ) continue utxoFor case innerOut.Locktime > currentTime: - newBalance, err := math.Add64(lockedNotStakeables[assetID], out.Amount()) + newBalance, err := safemath.Add64(lockedNotStakeables[assetID], out.Amount()) if err != nil { - lockedNotStakeables[assetID] = stdmath.MaxUint64 + lockedNotStakeables[assetID] = math.MaxUint64 } else { lockedNotStakeables[assetID] = newBalance } case out.Locktime <= currentTime: - newBalance, err := math.Add64(unlockeds[assetID], out.Amount()) + newBalance, err := safemath.Add64(unlockeds[assetID], out.Amount()) if err != nil { - unlockeds[assetID] = stdmath.MaxUint64 + unlockeds[assetID] = math.MaxUint64 } else { unlockeds[assetID] = newBalance } default: - newBalance, err := math.Add64(lockedStakeables[assetID], out.Amount()) + newBalance, err := safemath.Add64(lockedStakeables[assetID], out.Amount()) if err != nil { - lockedStakeables[assetID] = stdmath.MaxUint64 + lockedStakeables[assetID] = math.MaxUint64 } else { lockedStakeables[assetID] = newBalance } @@ -311,17 +316,17 @@ utxoFor: balances := maps.Clone(lockedStakeables) for assetID, amount := range lockedNotStakeables { - newBalance, err := math.Add64(balances[assetID], amount) + newBalance, err := safemath.Add64(balances[assetID], amount) if err != nil { - balances[assetID] = stdmath.MaxUint64 + balances[assetID] = math.MaxUint64 } else { balances[assetID] = newBalance } } for assetID, amount := range unlockeds { - newBalance, err := math.Add64(balances[assetID], amount) + newBalance, err := safemath.Add64(balances[assetID], amount) if err != nil { - balances[assetID] = stdmath.MaxUint64 + balances[assetID] = math.MaxUint64 } else { balances[assetID] = newBalance } @@ -338,10 +343,10 @@ utxoFor: return nil } -func newJSONBalanceMap(balanceMap map[ids.ID]uint64) map[ids.ID]json.Uint64 { - jsonBalanceMap := make(map[ids.ID]json.Uint64, len(balanceMap)) +func newJSONBalanceMap(balanceMap map[ids.ID]uint64) map[ids.ID]avajson.Uint64 { + jsonBalanceMap := make(map[ids.ID]avajson.Uint64, len(balanceMap)) for assetID, amount := range balanceMap { - jsonBalanceMap[assetID] = json.Uint64(amount) + jsonBalanceMap[assetID] = avajson.Uint64(amount) } return jsonBalanceMap } @@ -355,6 +360,9 @@ func (s *Service) CreateAddress(_ *http.Request, args *api.UserPass, response *a logging.UserString("username", args.Username), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -381,6 +389,9 @@ func (s *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *a logging.UserString("username", args.Username), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err @@ -460,6 +471,10 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap if limit <= 0 || builder.MaxPageSize < limit { limit = builder.MaxPageSize } + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if sourceChain == s.vm.ctx.ChainID { utxos, endAddr, endUTXOID, err = avax.GetPaginatedUTXOs( s.vm.state, @@ -483,13 +498,13 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap response.UTXOs = make([]string, len(utxos)) for i, utxo := range utxos { - bytes, err := txs.Codec.Marshal(txs.Version, utxo) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("couldn't serialize UTXO %q: %w", utxo.InputID(), err) } response.UTXOs[i], err = formatting.Encode(args.Encoding, bytes) if err != nil { - return fmt.Errorf("couldn't encode UTXO %s as string: %w", utxo.InputID(), err) + return fmt.Errorf("couldn't encode UTXO %s as %s: %w", utxo.InputID(), args.Encoding, err) } } @@ -500,11 +515,78 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap response.EndIndex.Address = endAddress response.EndIndex.UTXO = endUTXOID.String() - response.NumFetched = json.Uint64(len(utxos)) + response.NumFetched = avajson.Uint64(len(utxos)) response.Encoding = args.Encoding return nil } +// GetSubnetArgs are the arguments to GetSubnet +type GetSubnetArgs struct { + // ID of the subnet to retrieve information about + SubnetID ids.ID `json:"subnetID"` +} + +// GetSubnetResponse is the response from calling GetSubnet +type GetSubnetResponse struct { + // whether it is permissioned or not + IsPermissioned bool `json:"isPermissioned"` + // subnet auth information for a permissioned subnet + ControlKeys []string `json:"controlKeys"` + Threshold avajson.Uint32 `json:"threshold"` + Locktime avajson.Uint64 `json:"locktime"` + // subnet transformation tx ID for a permissionless subnet + SubnetTransformationTxID ids.ID `json:"subnetTransformationTxID"` +} + +func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetSubnetResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getSubnet"), + zap.Stringer("subnetID", args.SubnetID), + ) + + if args.SubnetID == constants.PrimaryNetworkID { + return errPrimaryNetworkIsNotASubnet + } + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + + subnetOwner, err := s.vm.state.GetSubnetOwner(args.SubnetID) + if err != nil { + return err + } + owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) + if !ok { + return fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) + } + controlAddrs := make([]string, len(owner.Addrs)) + for i, controlKeyID := range owner.Addrs { + addr, err := s.addrManager.FormatLocalAddress(controlKeyID) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + controlAddrs[i] = addr + } + + response.ControlKeys = controlAddrs + response.Threshold = avajson.Uint32(owner.Threshold) + response.Locktime = avajson.Uint64(owner.Locktime) + + switch subnetTransformationTx, err := s.vm.state.GetSubnetTransformation(args.SubnetID); err { + case nil: + response.IsPermissioned = false + response.SubnetTransformationTxID = subnetTransformationTx.ID() + case database.ErrNotFound: + response.IsPermissioned = true + response.SubnetTransformationTxID = ids.Empty + default: + return err + } + + return nil +} + /* ****************************************************** ******************* Get Subnets ********************** @@ -519,11 +601,11 @@ type APISubnet struct { // Each element of [ControlKeys] the address of a public key. // A transaction to add a validator to this subnet requires // signatures from [Threshold] of these keys to be valid. - ControlKeys []string `json:"controlKeys"` - Threshold json.Uint32 `json:"threshold"` + ControlKeys []string `json:"controlKeys"` + Threshold avajson.Uint32 `json:"threshold"` } -// GetSubnetsArgs are the arguments to GetSubnet +// GetSubnetsArgs are the arguments to GetSubnets type GetSubnetsArgs struct { // IDs of the subnets to retrieve information about // If omitted, gets all subnets @@ -545,6 +627,9 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge zap.String("method", "getSubnets"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + getAll := len(args.IDs) == 0 if getAll { subnets, err := s.vm.state.GetSubnets() // all subnets @@ -559,7 +644,7 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge response.Subnets[i] = APISubnet{ ID: subnetID, ControlKeys: []string{}, - Threshold: json.Uint32(0), + Threshold: avajson.Uint32(0), } continue } @@ -577,14 +662,14 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge response.Subnets[i] = APISubnet{ ID: subnetID, ControlKeys: controlAddrs, - Threshold: json.Uint32(owner.Threshold), + Threshold: avajson.Uint32(owner.Threshold), } } // Include primary network response.Subnets[len(subnets)] = APISubnet{ ID: constants.PrimaryNetworkID, ControlKeys: []string{}, - Threshold: json.Uint32(0), + Threshold: avajson.Uint32(0), } return nil } @@ -601,7 +686,7 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge APISubnet{ ID: constants.PrimaryNetworkID, ControlKeys: []string{}, - Threshold: json.Uint32(0), + Threshold: avajson.Uint32(0), }, ) continue @@ -611,12 +696,12 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge response.Subnets = append(response.Subnets, APISubnet{ ID: subnetID, ControlKeys: []string{}, - Threshold: json.Uint32(0), + Threshold: avajson.Uint32(0), }) continue } - subnetTx, _, err := s.vm.state.GetTx(subnetID) + subnetOwner, err := s.vm.state.GetSubnetOwner(subnetID) if err == database.ErrNotFound { continue } @@ -624,13 +709,9 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge return err } - subnet, ok := subnetTx.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return fmt.Errorf("expected tx type *txs.CreateSubnetTx but got %T", subnetTx.Unsigned) - } - owner, ok := subnet.Owner.(*secp256k1fx.OutputOwners) + owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) if !ok { - return fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnet.Owner) + return fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) } controlAddrs := make([]string, len(owner.Addrs)) @@ -645,7 +726,7 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge response.Subnets = append(response.Subnets, APISubnet{ ID: subnetID, ControlKeys: controlAddrs, - Threshold: json.Uint32(owner.Threshold), + Threshold: avajson.Uint32(owner.Threshold), }) } return nil @@ -674,6 +755,9 @@ func (s *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAssetIDArgs return nil } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + transformSubnetIntf, err := s.vm.state.GetSubnetTransformation(args.SubnetID) if err != nil { return fmt.Errorf( @@ -775,8 +859,10 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato vdrToDelegators := map[ids.NodeID][]platformapi.PrimaryDelegator{} // Create set of nodeIDs - nodeIDs := set.Set[ids.NodeID]{} - nodeIDs.Add(args.NodeIDs...) + nodeIDs := set.Of(args.NodeIDs...) + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() numNodeIDs := nodeIDs.Len() targetStakers := make([]*state.Staker, 0, numNodeIDs) @@ -822,22 +908,22 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato for _, currentStaker := range targetStakers { nodeID := currentStaker.NodeID - weight := json.Uint64(currentStaker.Weight) + weight := avajson.Uint64(currentStaker.Weight) apiStaker := platformapi.Staker{ TxID: currentStaker.TxID, - StartTime: json.Uint64(currentStaker.StartTime.Unix()), - EndTime: json.Uint64(currentStaker.EndTime.Unix()), + StartTime: avajson.Uint64(currentStaker.StartTime.Unix()), + EndTime: avajson.Uint64(currentStaker.EndTime.Unix()), Weight: weight, StakeAmount: &weight, NodeID: nodeID, } - potentialReward := json.Uint64(currentStaker.PotentialReward) + potentialReward := avajson.Uint64(currentStaker.PotentialReward) delegateeReward, err := s.vm.state.GetDelegateeReward(currentStaker.SubnetID, currentStaker.NodeID) if err != nil { return err } - jsonDelegateeReward := json.Uint64(delegateeReward) + jsonDelegateeReward := avajson.Uint64(delegateeReward) switch currentStaker.Priority { case txs.PrimaryNetworkValidatorCurrentPriority, txs.SubnetPermissionlessValidatorCurrentPriority: @@ -847,7 +933,7 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato } shares := attr.shares - delegationFee := json.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) + delegationFee := avajson.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) uptime, err := s.getAPIUptime(currentStaker) if err != nil { @@ -942,8 +1028,8 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato // always return a non-nil value. delegators = []platformapi.PrimaryDelegator{} } - delegatorCount := json.Uint64(len(delegators)) - delegatorWeight := json.Uint64(0) + delegatorCount := avajson.Uint64(len(delegators)) + delegatorWeight := avajson.Uint64(0) for _, d := range delegators { delegatorWeight += d.Weight } @@ -990,8 +1076,10 @@ func (s *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidato reply.Delegators = []interface{}{} // Create set of nodeIDs - nodeIDs := set.Set[ids.NodeID]{} - nodeIDs.Add(args.NodeIDs...) + nodeIDs := set.Of(args.NodeIDs...) + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() numNodeIDs := nodeIDs.Len() targetStakers := make([]*state.Staker, 0, numNodeIDs) @@ -1035,12 +1123,12 @@ func (s *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidato for _, pendingStaker := range targetStakers { nodeID := pendingStaker.NodeID - weight := json.Uint64(pendingStaker.Weight) + weight := avajson.Uint64(pendingStaker.Weight) apiStaker := platformapi.Staker{ TxID: pendingStaker.TxID, NodeID: nodeID, - StartTime: json.Uint64(pendingStaker.StartTime.Unix()), - EndTime: json.Uint64(pendingStaker.EndTime.Unix()), + StartTime: avajson.Uint64(pendingStaker.StartTime.Unix()), + EndTime: avajson.Uint64(pendingStaker.EndTime.Unix()), Weight: weight, StakeAmount: &weight, } @@ -1053,7 +1141,7 @@ func (s *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidato } shares := attr.shares - delegationFee := json.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) + delegationFee := avajson.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) vdr := platformapi.PermissionlessValidator{ @@ -1088,25 +1176,40 @@ type GetCurrentSupplyArgs struct { // GetCurrentSupplyReply are the results from calling GetCurrentSupply type GetCurrentSupplyReply struct { - Supply json.Uint64 `json:"supply"` + Supply avajson.Uint64 `json:"supply"` + Height avajson.Uint64 `json:"height"` } // GetCurrentSupply returns an upper bound on the supply of AVAX in the system -func (s *Service) GetCurrentSupply(_ *http.Request, args *GetCurrentSupplyArgs, reply *GetCurrentSupplyReply) error { +func (s *Service) GetCurrentSupply(r *http.Request, args *GetCurrentSupplyArgs, reply *GetCurrentSupplyReply) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "getCurrentSupply"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + supply, err := s.vm.state.GetCurrentSupply(args.SubnetID) - reply.Supply = json.Uint64(supply) - return err + if err != nil { + return fmt.Errorf("fetching current supply failed: %w", err) + } + reply.Supply = avajson.Uint64(supply) + + ctx := r.Context() + height, err := s.vm.GetCurrentHeight(ctx) + if err != nil { + return fmt.Errorf("fetching current height failed: %w", err) + } + reply.Height = avajson.Uint64(height) + + return nil } // SampleValidatorsArgs are the arguments for calling SampleValidators type SampleValidatorsArgs struct { // Number of validators in the sample - Size json.Uint16 `json:"size"` + Size avajson.Uint16 `json:"size"` // ID of subnet to sample validators from // If omitted, defaults to the primary network @@ -1126,17 +1229,9 @@ func (s *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, zap.Uint16("size", uint16(args.Size)), ) - validators, ok := s.vm.Validators.Get(args.SubnetID) - if !ok { - return fmt.Errorf( - "couldn't get validators of subnet %q. Is it being validated?", - args.SubnetID, - ) - } - - sample, err := validators.Sample(int(args.Size)) + sample, err := s.vm.Validators.Sample(args.SubnetID, int(args.Size)) if err != nil { - return fmt.Errorf("sampling errored with %w", err) + return fmt.Errorf("sampling %s errored with %w", args.SubnetID, err) } if sample == nil { @@ -1160,23 +1255,38 @@ type AddValidatorArgs struct { api.JSONSpendHeader platformapi.Staker // The address the staking reward, if applicable, will go to - RewardAddress string `json:"rewardAddress"` - DelegationFeeRate json.Float32 `json:"delegationFeeRate"` + RewardAddress string `json:"rewardAddress"` + DelegationFeeRate avajson.Float32 `json:"delegationFeeRate"` } // AddValidator creates and signs and issues a transaction to add a validator to // the primary network -func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddValidator(req *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addValidator"), ) + tx, changeAddr, err := s.buildAddValidatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + reply.TxID = tx.ID() + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddValidatorTx(args *AddValidatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) + minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := json.Uint64(maxAddStakerTime.Unix()) + maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) if args.StartTime == 0 { args.StartTime = minAddStakerUnix @@ -1184,13 +1294,13 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a switch { case args.RewardAddress == "": - return errNoRewardAddress + return nil, ids.ShortEmpty, errNoRewardAddress case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate case args.DelegationFeeRate < 0 || args.DelegationFeeRate > 100: - return errInvalidDelegationRate + return nil, ids.ShortEmpty, errInvalidDelegationRate } // Parse the node ID @@ -1204,36 +1314,39 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the reward address rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { - return fmt.Errorf("problem while parsing reward address: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem while parsing reward address: %w", err) } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() // Get the user's keys privKeys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. if len(privKeys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1252,21 +1365,13 @@ func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *a uint32(10000*args.DelegationFeeRate), // Shares privKeys.Keys, // Keys providing the staked tokens changeAddr, + nil, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // AddDelegatorArgs are the arguments to AddDelegator @@ -1279,17 +1384,32 @@ type AddDelegatorArgs struct { // AddDelegator creates and signs and issues a transaction to add a delegator to // the primary network -func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { +func (s *Service) AddDelegator(req *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addDelegator"), ) + tx, changeAddr, err := s.buildAddDelegatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + reply.TxID = tx.ID() + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddDelegatorTx(args *AddDelegatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) + minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := json.Uint64(maxAddStakerTime.Unix()) + maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) if args.StartTime == 0 { args.StartTime = minAddStakerUnix @@ -1297,11 +1417,11 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a switch { case args.RewardAddress == "": - return errNoRewardAddress + return nil, ids.ShortEmpty, errNoRewardAddress case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate } var nodeID ids.NodeID @@ -1314,36 +1434,39 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a // Parse the reward address rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { - return fmt.Errorf("problem parsing 'rewardAddress': %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing 'rewardAddress': %w", err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() privKeys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. Assumes that if the user has no keys, // this operation will fail so the change address can be anything. if len(privKeys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1361,21 +1484,13 @@ func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *a rewardAddress, // Reward Address privKeys.Keys, // Private keys changeAddr, // Change address + nil, // Memo ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // AddSubnetValidatorArgs are the arguments to AddSubnetValidator @@ -1389,17 +1504,32 @@ type AddSubnetValidatorArgs struct { // AddSubnetValidator creates and signs and issues a transaction to add a // validator to a subnet other than the primary network -func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) AddSubnetValidator(req *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "addSubnetValidator"), ) + tx, changeAddr, err := s.buildAddSubnetValidatorTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildAddSubnetValidatorTx(args *AddSubnetValidatorArgs) (*txs.Tx, ids.ShortID, error) { now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) + minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := json.Uint64(maxAddStakerTime.Unix()) + maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) if args.StartTime == 0 { args.StartTime = minAddStakerUnix @@ -1407,48 +1537,51 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr switch { case args.SubnetID == "": - return errNoSubnetID + return nil, ids.ShortEmpty, errNoSubnetID case args.StartTime < minAddStakerUnix: - return errStartTimeTooSoon + return nil, ids.ShortEmpty, errStartTimeTooSoon case args.StartTime > maxAddStakerUnix: - return errStartTimeTooLate + return nil, ids.ShortEmpty, errStartTimeTooLate } // Parse the subnet ID subnetID, err := ids.FromString(args.SubnetID) if err != nil { - return fmt.Errorf("problem parsing subnetID %q: %w", args.SubnetID, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing subnetID %q: %w", args.SubnetID, err) } if subnetID == constants.PrimaryNetworkID { - return errNamedSubnetCantBePrimary + return nil, ids.ShortEmpty, errNamedSubnetCantBePrimary } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() keys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. if len(keys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1466,21 +1599,13 @@ func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorAr subnetID, // Subnet ID keys.Keys, changeAddr, + nil, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // CreateSubnetArgs are the arguments to CreateSubnet @@ -1493,45 +1618,63 @@ type CreateSubnetArgs struct { // CreateSubnet creates and signs and issues a transaction to create a new // subnet -func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateSubnet(req *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createSubnet"), ) + tx, changeAddr, err := s.buildCreateSubnetTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildCreateSubnetTx(args *CreateSubnetArgs) (*txs.Tx, ids.ShortID, error) { // Parse the control keys controlKeys, err := avax.ParseServiceAddresses(s.addrManager, args.ControlKeys) if err != nil { - return err + return nil, ids.ShortEmpty, err } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() privKeys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. Assumes that if the user has no keys, // this operation will fail so the change address can be anything. if len(privKeys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1541,21 +1684,13 @@ func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response controlKeys.List(), // Control Addresses privKeys.Keys, // Private keys changeAddr, + nil, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // ExportAVAXArgs are the arguments to ExportAVAX @@ -1564,7 +1699,7 @@ type ExportAVAXArgs struct { api.JSONSpendHeader // Amount of AVAX to send - Amount json.Uint64 `json:"amount"` + Amount avajson.Uint64 `json:"amount"` // Chain the funds are going to. Optional. Used if To address does not include the chainID. TargetChain string `json:"targetChain"` @@ -1576,14 +1711,29 @@ type ExportAVAXArgs struct { // ExportAVAX exports AVAX from the P-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ExportAVAX(req *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "exportAVAX"), ) + tx, changeAddr, err := s.buildExportAVAX(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("couldn't format address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildExportAVAX(args *ExportAVAXArgs) (*txs.Tx, ids.ShortID, error) { if args.Amount == 0 { - return errNoAmount + return nil, ids.ShortEmpty, errNoAmount } // Get the chainID and parse the to address @@ -1591,41 +1741,44 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap if err != nil { chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { - return err + return nil, ids.ShortEmpty, err } to, err = ids.ShortFromString(args.To) if err != nil { - return err + return nil, ids.ShortEmpty, err } } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() privKeys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. Assumes that if the user has no keys, // this operation will fail so the change address can be anything. if len(privKeys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1636,21 +1789,13 @@ func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *ap to, // Address privKeys.Keys, // Private keys changeAddr, // Change address + nil, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // ImportAVAXArgs are the arguments to ImportAVAX @@ -1667,51 +1812,69 @@ type ImportAVAXArgs struct { // ImportAVAX issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) ImportAVAX(req *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "importAVAX"), ) + tx, changeAddr, err := s.buildImportAVAXTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildImportAVAXTx(args *ImportAVAXArgs) (*txs.Tx, ids.ShortID, error) { // Parse the sourceCHain chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } // Parse the to address to, err := avax.ParseServiceAddress(s.addrManager, args.To) if err != nil { // Parse address - return fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() privKeys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { // Get keys - return fmt.Errorf("couldn't get keys controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get keys controlled by the user: %w", err) } // Parse the change address. Assumes that if the user has no keys, // this operation will fail so the change address can be anything. if len(privKeys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1720,21 +1883,13 @@ func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *ap to, privKeys.Keys, changeAddr, + nil, ) if err != nil { - return err + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } /* @@ -1762,76 +1917,93 @@ type CreateBlockchainArgs struct { } // CreateBlockchain issues a transaction to create a new blockchain -func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { +func (s *Service) CreateBlockchain(req *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { s.vm.ctx.Log.Warn("deprecated API called", zap.String("service", "platform"), zap.String("method", "createBlockchain"), ) + tx, changeAddr, err := s.buildCreateBlockchainTx(args) + if err != nil { + return fmt.Errorf("couldn't create tx: %w", err) + } + + response.TxID = tx.ID() + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + + return s.vm.issueTx(req.Context(), tx) +} + +func (s *Service) buildCreateBlockchainTx(args *CreateBlockchainArgs) (*txs.Tx, ids.ShortID, error) { switch { case args.Name == "": - return errMissingName + return nil, ids.ShortEmpty, errMissingName case args.VMID == "": - return errMissingVMID + return nil, ids.ShortEmpty, errMissingVMID } genesisBytes, err := formatting.Decode(args.Encoding, args.GenesisData) if err != nil { - return fmt.Errorf("problem parsing genesis data: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("problem parsing genesis data: %w", err) } vmID, err := s.vm.Chains.LookupVM(args.VMID) if err != nil { - return fmt.Errorf("no VM with ID '%s' found", args.VMID) + return nil, ids.ShortEmpty, fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { fxID, err := s.vm.Chains.LookupVM(fxIDStr) if err != nil { - return fmt.Errorf("no FX with ID '%s' found", fxIDStr) + return nil, ids.ShortEmpty, fmt.Errorf("no FX with ID '%s' found", fxIDStr) } fxIDs = append(fxIDs, fxID) } // If creating AVM instance, use secp256k1fx // TODO: Document FXs and have user specify them in API call - fxIDsSet := set.Set[ids.ID]{} - fxIDsSet.Add(fxIDs...) + fxIDsSet := set.Of(fxIDs...) if vmID == constants.AVMID && !fxIDsSet.Contains(secp256k1fx.ID) { fxIDs = append(fxIDs, secp256k1fx.ID) } if args.SubnetID == constants.PrimaryNetworkID { - return txs.ErrCantValidatePrimaryNetwork + return nil, ids.ShortEmpty, txs.ErrCantValidatePrimaryNetwork } // Parse the from addresses fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { - return err + return nil, ids.ShortEmpty, err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { - return err + return nil, ids.ShortEmpty, err } defer user.Close() keys, err := keystore.GetKeychain(user, fromAddrs) if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) } // Parse the change address. Assumes that if the user has no keys, // this operation will fail so the change address can be anything. if len(keys.Keys) == 0 { - return errNoKeys + return nil, ids.ShortEmpty, errNoKeys } changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { - return fmt.Errorf("couldn't parse changeAddr: %w", err) + return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) } } @@ -1844,21 +2016,13 @@ func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, args.Name, keys.Keys, changeAddr, // Change address + nil, ) if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) + return nil, ids.ShortEmpty, err } - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - - errs := wrappers.Errs{} - errs.Add( - err, - s.vm.Builder.AddUnverifiedTx(tx), - user.Close(), - ) - return errs.Err + return tx, changeAddr, user.Close() } // GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus @@ -1884,6 +2048,9 @@ func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatus return errMissingBlockchainID } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + // if its aliased then vm created this chain. if aliasedID, err := s.vm.Chains.Lookup(args.BlockchainID); err == nil { if s.nodeValidates(aliasedID) { @@ -1915,11 +2082,8 @@ func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatus return nil } - preferredBlk, err := s.vm.Preferred() - if err != nil { - return fmt.Errorf("could not retrieve preferred block, err %w", err) - } - preferred, err := s.chainExists(ctx, preferredBlk.ID(), blockchainID) + preferredBlkID := s.vm.manager.Preferred() + preferred, err := s.chainExists(ctx, preferredBlkID, blockchainID) if err != nil { return fmt.Errorf("problem looking up blockchain: %w", err) } @@ -1942,12 +2106,8 @@ func (s *Service) nodeValidates(blockchainID ids.ID) bool { return false } - validators, ok := s.vm.Validators.Get(chain.SubnetID) - if !ok { - return false - } - - return validators.Contains(s.vm.ctx.NodeID) + _, isValidator := s.vm.Validators.GetValidator(chain.SubnetID, s.vm.ctx.NodeID) + return isValidator } func (s *Service) chainExists(ctx context.Context, blockID ids.ID, chainID ids.ID) (bool, error) { @@ -1993,6 +2153,9 @@ func (s *Service) ValidatedBy(r *http.Request, args *ValidatedByArgs, response * zap.String("method", "validatedBy"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + var err error ctx := r.Context() response.SubnetID, err = s.vm.GetSubnetID(ctx, args.BlockchainID) @@ -2016,6 +2179,9 @@ func (s *Service) Validates(_ *http.Request, args *ValidatesArgs, response *Vali zap.String("method", "validates"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + if args.SubnetID != constants.PrimaryNetworkID { subnetTx, _, err := s.vm.state.GetTx(args.SubnetID) if err != nil { @@ -2072,6 +2238,9 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc zap.String("method", "getBlockchains"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + subnets, err := s.vm.state.GetSubnets() if err != nil { return fmt.Errorf("couldn't retrieve subnets: %w", err) @@ -2125,8 +2294,7 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc return nil } -// IssueTx issues a tx -func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { +func (s *Service) IssueTx(req *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "issueTx"), @@ -2140,7 +2308,8 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api. if err != nil { return fmt.Errorf("couldn't parse tx: %w", err) } - if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { + + if err := s.vm.issueTx(req.Context(), tx); err != nil { return fmt.Errorf("couldn't issue tx: %w", err) } @@ -2148,31 +2317,34 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api. return nil } -// GetTx gets a tx func (s *Service) GetTx(_ *http.Request, args *api.GetTxArgs, response *api.GetTxReply) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "getTx"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + tx, _, err := s.vm.state.GetTx(args.TxID) if err != nil { return fmt.Errorf("couldn't get tx: %w", err) } - txBytes := tx.Bytes() response.Encoding = args.Encoding + var result any if args.Encoding == formatting.JSON { tx.Unsigned.InitCtx(s.vm.ctx) - response.Tx = tx - return nil + result = tx + } else { + result, err = formatting.Encode(args.Encoding, tx.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode tx as %s: %w", args.Encoding, err) + } } - response.Tx, err = formatting.Encode(args.Encoding, txBytes) - if err != nil { - return fmt.Errorf("couldn't encode tx as a string: %w", err) - } - return nil + response.Tx, err = json.Marshal(result) + return err } type GetTxStatusArgs struct { @@ -2193,6 +2365,9 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * zap.String("method", "getTxStatus"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + _, txStatus, err := s.vm.state.GetTx(args.TxID) if err == nil { // Found the status. Report it. response.Status = txStatus @@ -2204,12 +2379,7 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * // The status of this transaction is not in the database - check if the tx // is in the preferred block's db. If so, return that it's processing. - prefBlk, err := s.vm.Preferred() - if err != nil { - return err - } - - preferredID := prefBlk.ID() + preferredID := s.vm.manager.Preferred() onAccept, ok := s.vm.manager.GetState(preferredID) if !ok { return fmt.Errorf("could not retrieve state for block %s", preferredID) @@ -2225,7 +2395,7 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * return err } - if s.vm.Builder.Has(args.TxID) { + if _, ok := s.vm.Builder.Get(args.TxID); ok { // Found the tx in the mempool. Report tx is processing. response.Status = status.Processing return nil @@ -2248,13 +2418,14 @@ func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response * type GetStakeArgs struct { api.JSONAddresses - Encoding formatting.Encoding `json:"encoding"` + ValidatorsOnly bool `json:"validatorsOnly"` + Encoding formatting.Encoding `json:"encoding"` } // GetStakeReply is the response from calling GetStake. type GetStakeReply struct { - Staked json.Uint64 `json:"staked"` - Stakeds map[ids.ID]json.Uint64 `json:"stakeds"` + Staked avajson.Uint64 `json:"staked"` + Stakeds map[ids.ID]avajson.Uint64 `json:"stakeds"` // String representation of staked outputs // Each is of type avax.TransferableOutput Outputs []string `json:"stakedOutputs"` @@ -2285,6 +2456,9 @@ func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetSta return err } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + currentStakerIterator, err := s.vm.state.GetCurrentStakerIterator() if err != nil { return err @@ -2298,6 +2472,10 @@ func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetSta for currentStakerIterator.Next() { // Iterates over current stakers staker := currentStakerIterator.Value() + if args.ValidatorsOnly && !staker.Priority.IsValidator() { + continue + } + tx, _, err := s.vm.state.GetTx(staker.TxID) if err != nil { return err @@ -2315,6 +2493,10 @@ func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetSta for pendingStakerIterator.Next() { // Iterates over pending stakers staker := pendingStakerIterator.Value() + if args.ValidatorsOnly && !staker.Priority.IsValidator() { + continue + } + tx, _, err := s.vm.state.GetTx(staker.TxID) if err != nil { return err @@ -2327,13 +2509,13 @@ func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetSta response.Staked = response.Stakeds[s.vm.ctx.AVAXAssetID] response.Outputs = make([]string, len(stakedOuts)) for i, output := range stakedOuts { - bytes, err := txs.Codec.Marshal(txs.Version, output) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, output) if err != nil { return fmt.Errorf("couldn't serialize output %s: %w", output.ID, err) } response.Outputs[i], err = formatting.Encode(args.Encoding, bytes) if err != nil { - return fmt.Errorf("couldn't encode output %s as string: %w", output.ID, err) + return fmt.Errorf("couldn't encode output %s as %s: %w", output.ID, args.Encoding, err) } } response.Encoding = args.Encoding @@ -2349,9 +2531,9 @@ type GetMinStakeArgs struct { // GetMinStakeReply is the response from calling GetMinStake. type GetMinStakeReply struct { // The minimum amount of tokens one must bond to be a validator - MinValidatorStake json.Uint64 `json:"minValidatorStake"` + MinValidatorStake avajson.Uint64 `json:"minValidatorStake"` // Minimum stake, in nAVAX, that can be delegated on the primary network - MinDelegatorStake json.Uint64 `json:"minDelegatorStake"` + MinDelegatorStake avajson.Uint64 `json:"minDelegatorStake"` } // GetMinStake returns the minimum staking amount in nAVAX. @@ -2364,11 +2546,14 @@ func (s *Service) GetMinStake(_ *http.Request, args *GetMinStakeArgs, reply *Get if args.SubnetID == constants.PrimaryNetworkID { timestamp := s.vm.state.GetTimestamp() minValidatorStake, _, minDelegatorStake, _, _, _, _, _, _, _ := executor.GetCurrentInflationSettings(timestamp, s.vm.ctx.NetworkID, &s.vm.Config) - reply.MinValidatorStake = json.Uint64(minValidatorStake) - reply.MinDelegatorStake = json.Uint64(minDelegatorStake) + reply.MinValidatorStake = avajson.Uint64(minValidatorStake) + reply.MinDelegatorStake = avajson.Uint64(minDelegatorStake) return nil } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + transformSubnetIntf, err := s.vm.state.GetSubnetTransformation(args.SubnetID) if err != nil { return fmt.Errorf( @@ -2385,8 +2570,8 @@ func (s *Service) GetMinStake(_ *http.Request, args *GetMinStakeArgs, reply *Get ) } - reply.MinValidatorStake = json.Uint64(transformSubnet.MinValidatorStake) - reply.MinDelegatorStake = json.Uint64(transformSubnet.MinDelegatorStake) + reply.MinValidatorStake = avajson.Uint64(transformSubnet.MinValidatorStake) + reply.MinDelegatorStake = avajson.Uint64(transformSubnet.MinDelegatorStake) return nil } @@ -2401,9 +2586,9 @@ type GetTotalStakeArgs struct { // GetTotalStakeReply is the response from calling GetTotalStake. type GetTotalStakeReply struct { // Deprecated: Use Weight instead. - Stake json.Uint64 `json:"stake"` + Stake avajson.Uint64 `json:"stake"` - Weight json.Uint64 `json:"weight"` + Weight avajson.Uint64 `json:"weight"` } // GetTotalStake returns the total amount staked on the Primary Network @@ -2413,11 +2598,11 @@ func (s *Service) GetTotalStake(_ *http.Request, args *GetTotalStakeArgs, reply zap.String("method", "getTotalStake"), ) - vdrs, ok := s.vm.Validators.Get(args.SubnetID) - if !ok { - return errMissingValidatorSet + totalWeight, err := s.vm.Validators.TotalWeight(args.SubnetID) + if err != nil { + return fmt.Errorf("couldn't get total weight: %w", err) } - weight := json.Uint64(vdrs.Weight()) + weight := avajson.Uint64(totalWeight) reply.Weight = weight reply.Stake = weight return nil @@ -2425,15 +2610,15 @@ func (s *Service) GetTotalStake(_ *http.Request, args *GetTotalStakeArgs, reply // GetMaxStakeAmountArgs is the request for calling GetMaxStakeAmount. type GetMaxStakeAmountArgs struct { - SubnetID ids.ID `json:"subnetID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime json.Uint64 `json:"startTime"` - EndTime json.Uint64 `json:"endTime"` + SubnetID ids.ID `json:"subnetID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime avajson.Uint64 `json:"startTime"` + EndTime avajson.Uint64 `json:"endTime"` } // GetMaxStakeAmountReply is the response from calling GetMaxStakeAmount. type GetMaxStakeAmountReply struct { - Amount json.Uint64 `json:"amount"` + Amount avajson.Uint64 `json:"amount"` } // GetMaxStakeAmount returns the maximum amount of nAVAX staking to the named @@ -2450,6 +2635,10 @@ func (s *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmountArgs if startTime.After(endTime) { return errStartAfterEndTime } + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + now := s.vm.state.GetTimestamp() if startTime.Before(now) { return errStartTimeInThePast @@ -2471,14 +2660,14 @@ func (s *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmountArgs } maxStakeAmount, err := executor.GetMaxWeight(s.vm.state, staker, startTime, endTime) - reply.Amount = json.Uint64(maxStakeAmount) + reply.Amount = avajson.Uint64(maxStakeAmount) return err } // GetRewardUTXOsReply defines the GetRewardUTXOs replies returned from the API type GetRewardUTXOsReply struct { // Number of UTXOs returned - NumFetched json.Uint64 `json:"numFetched"` + NumFetched avajson.Uint64 `json:"numFetched"` // The UTXOs UTXOs []string `json:"utxos"` // Encoding specifies the encoding format the UTXOs are returned in @@ -2493,22 +2682,25 @@ func (s *Service) GetRewardUTXOs(_ *http.Request, args *api.GetTxArgs, reply *Ge zap.String("method", "getRewardUTXOs"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + utxos, err := s.vm.state.GetRewardUTXOs(args.TxID) if err != nil { return fmt.Errorf("couldn't get reward UTXOs: %w", err) } - reply.NumFetched = json.Uint64(len(utxos)) + reply.NumFetched = avajson.Uint64(len(utxos)) reply.UTXOs = make([]string, len(utxos)) for i, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo) if err != nil { - return fmt.Errorf("failed to encode UTXO to bytes: %w", err) + return fmt.Errorf("couldn't encode UTXO to bytes: %w", err) } utxoStr, err := formatting.Encode(args.Encoding, utxoBytes) if err != nil { - return fmt.Errorf("couldn't encode utxo as a string: %w", err) + return fmt.Errorf("couldn't encode utxo as %s: %w", args.Encoding, err) } reply.UTXOs[i] = utxoStr } @@ -2529,21 +2721,81 @@ func (s *Service) GetTimestamp(_ *http.Request, _ *struct{}, reply *GetTimestamp zap.String("method", "getTimestamp"), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + reply.Timestamp = s.vm.state.GetTimestamp() return nil } // GetValidatorsAtArgs is the response from GetValidatorsAt type GetValidatorsAtArgs struct { - Height json.Uint64 `json:"height"` - SubnetID ids.ID `json:"subnetID"` + Height avajson.Uint64 `json:"height"` + SubnetID ids.ID `json:"subnetID"` +} + +type jsonGetValidatorOutput struct { + PublicKey *string `json:"publicKey"` + Weight avajson.Uint64 `json:"weight"` +} + +func (v *GetValidatorsAtReply) MarshalJSON() ([]byte, error) { + m := make(map[ids.NodeID]*jsonGetValidatorOutput, len(v.Validators)) + for _, vdr := range v.Validators { + vdrJSON := &jsonGetValidatorOutput{ + Weight: avajson.Uint64(vdr.Weight), + } + + if vdr.PublicKey != nil { + pk, err := formatting.Encode(formatting.HexNC, bls.PublicKeyToBytes(vdr.PublicKey)) + if err != nil { + return nil, err + } + vdrJSON.PublicKey = &pk + } + + m[vdr.NodeID] = vdrJSON + } + return json.Marshal(m) +} + +func (v *GetValidatorsAtReply) UnmarshalJSON(b []byte) error { + var m map[ids.NodeID]*jsonGetValidatorOutput + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + if m == nil { + v.Validators = nil + return nil + } + + v.Validators = make(map[ids.NodeID]*validators.GetValidatorOutput, len(m)) + for nodeID, vdrJSON := range m { + vdr := &validators.GetValidatorOutput{ + NodeID: nodeID, + Weight: uint64(vdrJSON.Weight), + } + + if vdrJSON.PublicKey != nil { + pkBytes, err := formatting.Decode(formatting.HexNC, *vdrJSON.PublicKey) + if err != nil { + return err + } + vdr.PublicKey, err = bls.PublicKeyFromBytes(pkBytes) + if err != nil { + return err + } + } + + v.Validators[nodeID] = vdr + } + return nil } // GetValidatorsAtReply is the response from GetValidatorsAt type GetValidatorsAtReply struct { - // TODO should we change this to map[ids.NodeID]*validators.Validator? - // We'd have to add a MarshalJSON method to validators.Validator. - Validators map[ids.NodeID]uint64 `json:"validators"` + Validators map[ids.NodeID]*validators.GetValidatorOutput } // GetValidatorsAt returns the weights of the validator set of a provided subnet @@ -2557,16 +2809,15 @@ func (s *Service) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, re zap.Stringer("subnetID", args.SubnetID), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + ctx := r.Context() var err error - vdrs, err := s.vm.GetValidatorSet(ctx, height, args.SubnetID) + reply.Validators, err = s.vm.GetValidatorSet(ctx, height, args.SubnetID) if err != nil { return fmt.Errorf("failed to get validator set: %w", err) } - reply.Validators = make(map[ids.NodeID]uint64, len(vdrs)) - for _, vdr := range vdrs { - reply.Validators[vdr.NodeID] = vdr.Weight - } return nil } @@ -2578,27 +2829,73 @@ func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, response *ap zap.Stringer("encoding", args.Encoding), ) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + block, err := s.vm.manager.GetStatelessBlock(args.BlockID) if err != nil { return fmt.Errorf("couldn't get block with id %s: %w", args.BlockID, err) } response.Encoding = args.Encoding + var result any if args.Encoding == formatting.JSON { block.InitCtx(s.vm.ctx) - response.Block = block - return nil + result = block + } else { + result, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as %s: %w", args.BlockID, args.Encoding, err) + } + } + + response.Block, err = json.Marshal(result) + return err +} + +// GetBlockByHeight returns the block at the given height. +func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightArgs, response *api.GetBlockResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getBlockByHeight"), + zap.Uint64("height", uint64(args.Height)), + zap.Stringer("encoding", args.Encoding), + ) + + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() + + blockID, err := s.vm.state.GetBlockIDAtHeight(uint64(args.Height)) + if err != nil { + return fmt.Errorf("couldn't get block at height %d: %w", args.Height, err) } - response.Block, err = formatting.Encode(args.Encoding, block.Bytes()) + block, err := s.vm.manager.GetStatelessBlock(blockID) if err != nil { - return fmt.Errorf("couldn't encode block %s as string: %w", args.BlockID, err) + s.vm.ctx.Log.Error("couldn't get accepted block", + zap.Stringer("blkID", blockID), + zap.Error(err), + ) + return fmt.Errorf("couldn't get block with id %s: %w", blockID, err) } + response.Encoding = args.Encoding - return nil + var result any + if args.Encoding == formatting.JSON { + block.InitCtx(s.vm.ctx) + result = block + } else { + result, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as %s: %w", blockID, args.Encoding, err) + } + } + + response.Block, err = json.Marshal(result) + return err } -func (s *Service) getAPIUptime(staker *state.Staker) (*json.Float32, error) { +func (s *Service) getAPIUptime(staker *state.Staker) (*avajson.Float32, error) { // Only report uptimes that we have been actively tracking. if constants.PrimaryNetworkID != staker.SubnetID && !s.vm.TrackedSubnets.Contains(staker.SubnetID) { return nil, nil @@ -2610,14 +2907,15 @@ func (s *Service) getAPIUptime(staker *state.Staker) (*json.Float32, error) { } // Transform this to a percentage (0-100) to make it consistent // with observedUptime in info.peers API - uptime := json.Float32(rawUptime * 100) + uptime := avajson.Float32(rawUptime * 100) return &uptime, nil } func (s *Service) getAPIOwner(owner *secp256k1fx.OutputOwners) (*platformapi.Owner, error) { apiOwner := &platformapi.Owner{ - Locktime: json.Uint64(owner.Locktime), - Threshold: json.Uint32(owner.Threshold), + Locktime: avajson.Uint64(owner.Locktime), + Threshold: avajson.Uint32(owner.Threshold), + Addresses: make([]string, 0, len(owner.Addrs)), } for _, addr := range owner.Addrs { addrStr, err := s.addrManager.FormatLocalAddress(addr) @@ -2667,9 +2965,9 @@ func getStakeHelper(tx *txs.Tx, addrs set.Set[ids.ShortID], totalAmountStaked ma } assetID := output.AssetID() - newAmount, err := math.Add64(totalAmountStaked[assetID], secpOut.Amt) + newAmount, err := safemath.Add64(totalAmountStaked[assetID], secpOut.Amt) if err != nil { - newAmount = stdmath.MaxUint64 + newAmount = math.MaxUint64 } totalAmountStaked[assetID] = newAmount diff --git a/avalanchego/vms/platformvm/service_test.go b/avalanchego/vms/platformvm/service_test.go index 12cd7546..25ce69bc 100644 --- a/avalanchego/vms/platformvm/service_test.go +++ b/avalanchego/vms/platformvm/service_test.go @@ -1,44 +1,50 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "context" + "encoding/json" "errors" "fmt" + "math" "math/rand" "testing" "time" - stdjson "encoding/json" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + avajson "github.com/ava-labs/avalanchego/utils/json" vmkeystore "github.com/ava-labs/avalanchego/vms/components/keystore" pchainapi "github.com/ava-labs/avalanchego/vms/platformvm/api" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) @@ -67,12 +73,11 @@ var ( ) func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { - vm, _, mutableSharedMemory := defaultVM() + vm, _, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - ks := keystore.New(logging.NoLog{}, manager.NewMemDB(version.Semantic1_0_0)) - err := ks.CreateUser(testUsername, testPassword) - require.NoError(t, err) + ks := keystore.New(logging.NoLog{}, memdb.New()) + require.NoError(t, ks.CreateUser(testUsername, testPassword)) vm.ctx.Keystore = ks.NewBlockchainKeyStore(vm.ctx.ChainID) return &Service{ @@ -86,55 +91,51 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { // Give user [testUsername] control of [testPrivateKey] and keys[0] (which is funded) func defaultAddress(t *testing.T, service *Service) { + require := require.New(t) + service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() user, err := vmkeystore.NewUserFromKeystore(service.vm.ctx.Keystore, testUsername, testPassword) - require.NoError(t, err) + require.NoError(err) - pk, err := testKeyFactory.ToPrivateKey(testPrivateKey) - require.NoError(t, err) + pk, err := secp256k1.ToPrivateKey(testPrivateKey) + require.NoError(err) - err = user.PutKeys(pk, keys[0]) - require.NoError(t, err) + require.NoError(user.PutKeys(pk, keys[0])) } func TestAddValidator(t *testing.T) { + require := require.New(t) + expectedJSONString := `{"username":"","password":"","from":null,"changeAddr":"","txID":"11111111111111111111111111111111LpoYY","startTime":"0","endTime":"0","weight":"0","nodeID":"NodeID-111111111111111111116DBWJs","rewardAddress":"","delegationFeeRate":"0.0000"}` args := AddValidatorArgs{} - bytes, err := stdjson.Marshal(&args) - require.NoError(t, err) - require.Equal(t, expectedJSONString, string(bytes)) + bytes, err := json.Marshal(&args) + require.NoError(err) + require.Equal(expectedJSONString, string(bytes)) } func TestCreateBlockchainArgsParsing(t *testing.T) { + require := require.New(t) + jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "username":"bob loblaw", "password":"yeet", "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(t, err) + require.NoError(json.Unmarshal([]byte(jsonString), &args)) - _, err = stdjson.Marshal(args.GenesisData) - require.NoError(t, err) + _, err := json.Marshal(args.GenesisData) + require.NoError(err) } func TestExportKey(t *testing.T) { require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","address":"` + testAddress + `"}` args := ExportKeyArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(err) + require.NoError(json.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) defaultAddress(t, service) - service.vm.ctx.Lock.Lock() - defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() - }() reply := ExportKeyReply{} - err = service.ExportKey(nil, &args, &reply) - require.NoError(err) + require.NoError(service.ExportKey(nil, &args, &reply)) require.Equal(testPrivateKey, reply.PrivateKey.Bytes()) } @@ -143,20 +144,12 @@ func TestImportKey(t *testing.T) { require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","privateKey":"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"}` args := ImportKeyArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(err) + require.NoError(json.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) - service.vm.ctx.Lock.Lock() - defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() - }() reply := api.JSONAddress{} - err = service.ImportKey(nil, &args, &reply) - require.NoError(err) + require.NoError(service.ImportKey(nil, &args, &reply)) require.Equal(testAddress, reply.Address) } @@ -166,20 +159,14 @@ func TestGetTxStatus(t *testing.T) { service, mutableSharedMemory := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() - defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() - }() - factory := secp256k1.Factory{} - recipientKey, err := factory.NewPrivateKey() + recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) - m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.dbManager.Current().Database)) + m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.db)) sm := m.NewSharedMemory(service.vm.ctx.ChainID) - peerSharedMemory := m.NewSharedMemory(xChainID) + peerSharedMemory := m.NewSharedMemory(service.vm.ctx.XChainID) // #nosec G404 utxo := &avax.UTXO{ @@ -187,7 +174,7 @@ func TestGetTxStatus(t *testing.T) { TxID: ids.GenerateTestID(), OutputIndex: rand.Uint32(), }, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: 1234567, OutputOwners: secp256k1fx.OutputOwners{ @@ -197,11 +184,11 @@ func TestGetTxStatus(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ service.vm.ctx.ChainID: { PutRequests: []*atomic.Element{ { @@ -213,48 +200,45 @@ func TestGetTxStatus(t *testing.T) { }, }, }, - }) - require.NoError(err) + })) - oldSharedMemory := mutableSharedMemory.SharedMemory mutableSharedMemory.SharedMemory = sm - tx, err := service.vm.txBuilder.NewImportTx(xChainID, ids.ShortEmpty, []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty) + tx, err := service.vm.txBuilder.NewImportTx( + service.vm.ctx.XChainID, + ids.ShortEmpty, + []*secp256k1.PrivateKey{recipientKey}, + ids.ShortEmpty, + nil, + ) require.NoError(err) - mutableSharedMemory.SharedMemory = oldSharedMemory + service.vm.ctx.Lock.Unlock() var ( arg = &GetTxStatusArgs{TxID: tx.ID()} resp GetTxStatusResponse ) - err = service.GetTxStatus(nil, arg, &resp) - require.NoError(err) + require.NoError(service.GetTxStatus(nil, arg, &resp)) require.Equal(status.Unknown, resp.Status) require.Zero(resp.Reason) // put the chain in existing chain list - err = service.vm.Builder.AddUnverifiedTx(tx) - require.Error(err) - - mutableSharedMemory.SharedMemory = sm - - err = service.vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) + service.vm.ctx.Lock.Lock() block, err := service.vm.BuildBlock(context.Background()) require.NoError(err) blk := block.(*blockexecutor.Block) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) + + service.vm.ctx.Lock.Unlock() resp = GetTxStatusResponse{} // reset - err = service.GetTxStatus(nil, arg, &resp) - require.NoError(err) + require.NoError(service.GetTxStatus(nil, arg, &resp)) require.Equal(status.Committed, resp.Status) require.Zero(resp.Reason) } @@ -272,27 +256,33 @@ func TestGetTx(t *testing.T) { func(service *Service) (*txs.Tx, error) { return service.vm.txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks testSubnet1.ID(), - nil, + []byte{}, constants.AVMID, - nil, + []ids.ID{}, "chain name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0].PublicKey().Address(), // change addr + nil, ) }, }, { "proposal block", func(service *Service) (*txs.Tx, error) { - return service.vm.txBuilder.NewAddValidatorTx( // Test GetTx works for proposal blocks + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + return service.vm.txBuilder.NewAddPermissionlessValidatorTx( // Test GetTx works for proposal blocks service.vm.MinValidatorStake, uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Unix()), uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Add(defaultMinStakingDuration).Unix()), ids.GenerateTestNodeID(), + signer.NewProofOfPossession(sk), ids.GenerateTestShortID(), 0, []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr + nil, ) }, }, @@ -305,6 +295,7 @@ func TestGetTx(t *testing.T) { ids.GenerateTestShortID(), []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr + nil, ) }, }, @@ -325,93 +316,88 @@ func TestGetTx(t *testing.T) { tx, err := test.createTx(service) require.NoError(err) + service.vm.ctx.Lock.Unlock() + arg := &api.GetTxArgs{ TxID: tx.ID(), Encoding: encoding, } var response api.GetTxReply err = service.GetTx(nil, arg, &response) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) // We haven't issued the tx yet - err = service.vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) + service.vm.ctx.Lock.Lock() - block, err := service.vm.BuildBlock(context.Background()) + blk, err := service.vm.BuildBlock(context.Background()) require.NoError(err) - err = block.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = block.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) - if blk, ok := block.(snowman.OracleBlock); ok { // For proposal blocks, commit them + if blk, ok := blk.(snowman.OracleBlock); ok { // For proposal blocks, commit them options, err := blk.Options(context.Background()) if !errors.Is(err, snowman.ErrNotOracle) { require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - err := commit.Verify(context.Background()) - require.NoError(err) - - err = commit.Accept(context.Background()) - require.NoError(err) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) } } - err = service.GetTx(nil, arg, &response) - require.NoError(err) + service.vm.ctx.Lock.Unlock() + + require.NoError(service.GetTx(nil, arg, &response)) switch encoding { case formatting.Hex: // we're always guaranteed a string for hex encodings. - responseTxBytes, err := formatting.Decode(response.Encoding, response.Tx.(string)) + var txStr string + require.NoError(json.Unmarshal(response.Tx, &txStr)) + responseTxBytes, err := formatting.Decode(response.Encoding, txStr) require.NoError(err) require.Equal(tx.Bytes(), responseTxBytes) case formatting.JSON: - require.Equal(tx, response.Tx) + tx.Unsigned.InitCtx(service.vm.ctx) + expectedTxJSON, err := json.Marshal(tx) + require.NoError(err) + require.Equal(expectedTxJSON, []byte(response.Tx)) } - - err = service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() }) } } } -// Test method GetBalance func TestGetBalance(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - service.vm.ctx.Lock.Lock() - defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis() - for _, utxo := range genesis.UTXOs { + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) + for idx, utxo := range genesis.UTXOs { request := GetBalanceRequest{ Addresses: []string{ - fmt.Sprintf("P-%s", utxo.Address), + "P-" + utxo.Address, }, } reply := GetBalanceResponse{} require.NoError(service.GetBalance(nil, &request, &reply)) - - require.Equal(json.Uint64(defaultBalance), reply.Balance) - require.Equal(json.Uint64(defaultBalance), reply.Unlocked) - require.Equal(json.Uint64(0), reply.LockedStakeable) - require.Equal(json.Uint64(0), reply.LockedNotStakeable) + balance := defaultBalance + if idx == 0 { + // we use the first key to fund a subnet creation in [defaultGenesis]. + // As such we need to account for the subnet creation fee + balance = defaultBalance - service.vm.Config.GetCreateSubnetTxFee(service.vm.clock.Time()) + } + require.Equal(avajson.Uint64(balance), reply.Balance) + require.Equal(avajson.Uint64(balance), reply.Unlocked) + require.Equal(avajson.Uint64(0), reply.LockedStakeable) + require.Equal(avajson.Uint64(0), reply.LockedNotStakeable) } } @@ -419,28 +405,23 @@ func TestGetStake(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - service.vm.ctx.Lock.Lock() - defer func() { - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis() + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) addrsStrs := []string{} for i, validator := range genesis.Validators { - addr := fmt.Sprintf("P-%s", validator.RewardOwner.Addresses[0]) + addr := "P-" + validator.RewardOwner.Addresses[0] addrsStrs = append(addrsStrs, addr) args := GetStakeArgs{ - api.JSONAddresses{ + JSONAddresses: api.JSONAddresses{ Addresses: []string{addr}, }, - formatting.Hex, + Encoding: formatting.Hex, } response := GetStakeReply{} require.NoError(service.GetStake(nil, &args, &response)) - require.EqualValues(uint64(defaultWeight), uint64(response.Staked)) + require.Equal(defaultWeight, uint64(response.Staked)) require.Len(response.Outputs, 1) // Unmarshal into an output @@ -452,8 +433,8 @@ func TestGetStake(t *testing.T) { require.NoError(err) out := output.Out.(*secp256k1fx.TransferOutput) - require.EqualValues(defaultWeight, out.Amount()) - require.EqualValues(1, out.Threshold) + require.Equal(defaultWeight, out.Amount()) + require.Equal(uint32(1), out.Threshold) require.Len(out.Addrs, 1) require.Equal(keys[i].PublicKey().Address(), out.Addrs[0]) require.Zero(out.Locktime) @@ -461,14 +442,14 @@ func TestGetStake(t *testing.T) { // Make sure this works for multiple addresses args := GetStakeArgs{ - api.JSONAddresses{ + JSONAddresses: api.JSONAddresses{ Addresses: addrsStrs, }, - formatting.Hex, + Encoding: formatting.Hex, } response := GetStakeReply{} require.NoError(service.GetStake(nil, &args, &response)) - require.EqualValues(len(genesis.Validators)*defaultWeight, response.Staked) + require.Equal(len(genesis.Validators)*int(defaultWeight), int(response.Staked)) require.Len(response.Outputs, len(genesis.Validators)) for _, outputStr := range response.Outputs { @@ -480,32 +461,38 @@ func TestGetStake(t *testing.T) { require.NoError(err) out := output.Out.(*secp256k1fx.TransferOutput) - require.EqualValues(defaultWeight, out.Amount()) - require.EqualValues(1, out.Threshold) + require.Equal(defaultWeight, out.Amount()) + require.Equal(uint32(1), out.Threshold) require.Zero(out.Locktime) require.Len(out.Addrs, 1) } - oldStake := uint64(defaultWeight) + oldStake := defaultWeight + + service.vm.ctx.Lock.Lock() // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - delegatorNodeID := ids.NodeID(keys[0].PublicKey().Address()) - delegatorEndTime := uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()) + delegatorNodeID := genesisNodeIDs[0] + delegatorStartTime := defaultValidateStartTime + delegatorEndTime := defaultGenesisTime.Add(defaultMinStakingDuration) tx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, - uint64(defaultGenesisTime.Unix()), - delegatorEndTime, + uint64(delegatorStartTime.Unix()), + uint64(delegatorEndTime.Unix()), delegatorNodeID, ids.GenerateTestShortID(), []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr + nil, ) require.NoError(err) + addDelTx := tx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + delegatorStartTime, 0, ) require.NoError(err) @@ -514,11 +501,13 @@ func TestGetStake(t *testing.T) { service.vm.state.AddTx(tx, status.Committed) require.NoError(service.vm.state.Commit()) + service.vm.ctx.Lock.Unlock() + // Make sure the delegator addr has the right stake (old stake + stakeAmount) addr, _ := service.addrManager.FormatLocalAddress(keys[0].PublicKey().Address()) args.Addresses = []string{addr} require.NoError(service.GetStake(nil, &args, &response)) - require.EqualValues(oldStake+stakeAmount, uint64(response.Staked)) + require.Equal(oldStake+stakeAmount, uint64(response.Staked)) require.Len(response.Outputs, 2) // Unmarshal into transferable outputs @@ -531,10 +520,12 @@ func TestGetStake(t *testing.T) { } // Make sure the stake amount is as expected - require.EqualValues(stakeAmount+oldStake, outputs[0].Out.Amount()+outputs[1].Out.Amount()) + require.Equal(stakeAmount+oldStake, outputs[0].Out.Amount()+outputs[1].Out.Amount()) oldStake = uint64(response.Staked) + service.vm.ctx.Lock.Lock() + // Make sure this works for pending stakers // Add a pending staker stakeAmount = service.vm.MinValidatorStake + 54321 @@ -549,6 +540,7 @@ func TestGetStake(t *testing.T) { 0, []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr + nil, ) require.NoError(err) @@ -562,9 +554,11 @@ func TestGetStake(t *testing.T) { service.vm.state.AddTx(tx, status.Committed) require.NoError(service.vm.state.Commit()) + service.vm.ctx.Lock.Unlock() + // Make sure the delegator has the right stake (old stake + stakeAmount) require.NoError(service.GetStake(nil, &args, &response)) - require.EqualValues(oldStake+stakeAmount, response.Staked) + require.Equal(oldStake+stakeAmount, uint64(response.Staked)) require.Len(response.Outputs, 3) // Unmarshal @@ -577,30 +571,22 @@ func TestGetStake(t *testing.T) { } // Make sure the stake amount is as expected - require.EqualValues(stakeAmount+oldStake, outputs[0].Out.Amount()+outputs[1].Out.Amount()+outputs[2].Out.Amount()) + require.Equal(stakeAmount+oldStake, outputs[0].Out.Amount()+outputs[1].Out.Amount()+outputs[2].Out.Amount()) } -// Test method GetCurrentValidators func TestGetCurrentValidators(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - service.vm.ctx.Lock.Lock() - defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) - service.vm.ctx.Lock.Unlock() - }() - genesis, _ := defaultGenesis() + genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) // Call getValidators args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} response := GetCurrentValidatorsReply{} - err := service.GetCurrentValidators(nil, &args, &response) - require.NoError(err) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.NoError(service.GetCurrentValidators(nil, &args, &response)) + require.Len(response.Validators, len(genesis.Validators)) for _, vdr := range genesis.Validators { found := false @@ -619,38 +605,43 @@ func TestGetCurrentValidators(t *testing.T) { // Add a delegator stakeAmount := service.vm.MinDelegatorStake + 12345 - validatorNodeID := ids.NodeID(keys[1].PublicKey().Address()) - delegatorStartTime := uint64(defaultValidateStartTime.Unix()) - delegatorEndTime := uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) + validatorNodeID := genesisNodeIDs[1] + delegatorStartTime := defaultValidateStartTime + delegatorEndTime := delegatorStartTime.Add(defaultMinStakingDuration) + + service.vm.ctx.Lock.Lock() delTx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, - delegatorStartTime, - delegatorEndTime, + uint64(delegatorStartTime.Unix()), + uint64(delegatorEndTime.Unix()), validatorNodeID, ids.GenerateTestShortID(), []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr + nil, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + delegatorStartTime, 0, ) require.NoError(err) service.vm.state.PutCurrentDelegator(staker) service.vm.state.AddTx(delTx, status.Committed) - err = service.vm.state.Commit() - require.NoError(err) + require.NoError(service.vm.state.Commit()) + + service.vm.ctx.Lock.Unlock() // Call getCurrentValidators args = GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} - err = service.GetCurrentValidators(nil, &args, &response) - require.NoError(err) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.NoError(service.GetCurrentValidators(nil, &args, &response)) + require.Len(response.Validators, len(genesis.Validators)) // Make sure the delegator is there found := false @@ -668,61 +659,64 @@ func TestGetCurrentValidators(t *testing.T) { NodeIDs: []ids.NodeID{vdr.NodeID}, } innerResponse := GetCurrentValidatorsReply{} - err = service.GetCurrentValidators(nil, &innerArgs, &innerResponse) - require.NoError(err) + require.NoError(service.GetCurrentValidators(nil, &innerArgs, &innerResponse)) require.Len(innerResponse.Validators, 1) innerVdr := innerResponse.Validators[0].(pchainapi.PermissionlessValidator) require.Equal(vdr.NodeID, innerVdr.NodeID) require.NotNil(innerVdr.Delegators) - require.Equal(1, len(*innerVdr.Delegators)) + require.Len(*innerVdr.Delegators, 1) delegator := (*innerVdr.Delegators)[0] require.Equal(delegator.NodeID, innerVdr.NodeID) - require.Equal(uint64(delegator.StartTime), delegatorStartTime) - require.Equal(uint64(delegator.EndTime), delegatorEndTime) + require.Equal(int64(delegator.StartTime), delegatorStartTime.Unix()) + require.Equal(int64(delegator.EndTime), delegatorEndTime.Unix()) require.Equal(uint64(delegator.Weight), stakeAmount) } require.True(found) + service.vm.ctx.Lock.Lock() + // Reward the delegator - tx, err := service.vm.txBuilder.NewRewardValidatorTx(delTx.ID()) + tx, err := builder.NewRewardValidatorTx(service.vm.ctx, delTx.ID()) require.NoError(err) service.vm.state.AddTx(tx, status.Committed) service.vm.state.DeleteCurrentDelegator(staker) require.NoError(service.vm.state.SetDelegateeReward(staker.SubnetID, staker.NodeID, 100000)) require.NoError(service.vm.state.Commit()) + service.vm.ctx.Lock.Unlock() + // Call getValidators response = GetCurrentValidatorsReply{} require.NoError(service.GetCurrentValidators(nil, &args, &response)) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.Len(response.Validators, len(genesis.Validators)) - for i := 0; i < len(response.Validators); i++ { - vdr := response.Validators[i].(pchainapi.PermissionlessValidator) - if vdr.NodeID != validatorNodeID { + for _, vdr := range response.Validators { + castVdr := vdr.(pchainapi.PermissionlessValidator) + if castVdr.NodeID != validatorNodeID { continue } - require.Equal(uint64(100000), uint64(*vdr.AccruedDelegateeReward)) + require.Equal(uint64(100000), uint64(*castVdr.AccruedDelegateeReward)) } } func TestGetTimestamp(t *testing.T) { require := require.New(t) service, _ := defaultService(t) - service.vm.ctx.Lock.Lock() - defer func() { - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := GetTimestampReply{} require.NoError(service.GetTimestamp(nil, nil, &reply)) + + service.vm.ctx.Lock.Lock() + require.Equal(service.vm.state.GetTimestamp(), reply.Timestamp) newTimestamp := reply.Timestamp.Add(time.Second) service.vm.state.SetTimestamp(newTimestamp) + service.vm.ctx.Lock.Unlock() + require.NoError(service.GetTimestamp(nil, nil, &reply)) require.Equal(newTimestamp, reply.Timestamp) } @@ -747,26 +741,27 @@ func TestGetBlock(t *testing.T) { require := require.New(t) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() service.vm.Config.CreateAssetTxFee = 100 * defaultTxFee // Make a block an accept it, then check we can get it. tx, err := service.vm.txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks testSubnet1.ID(), - nil, + []byte{}, constants.AVMID, - nil, + []ids.ID{}, "chain name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0].PublicKey().Address(), // change addr + nil, ) require.NoError(err) - preferred, err := service.vm.Builder.Preferred() + preferredID := service.vm.manager.Preferred() + preferred, err := service.vm.manager.GetBlock(preferredID) require.NoError(err) - statelessBlock, err := blocks.NewBanffStandardBlock( + statelessBlock, err := block.NewBanffStandardBlock( preferred.Timestamp(), preferred.ID(), preferred.Height()+1, @@ -774,31 +769,259 @@ func TestGetBlock(t *testing.T) { ) require.NoError(err) - block := service.vm.manager.NewBlock(statelessBlock) + blk := service.vm.manager.NewBlock(statelessBlock) + + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) + service.vm.ctx.Lock.Unlock() args := api.GetBlockArgs{ - BlockID: block.ID(), + BlockID: blk.ID(), Encoding: test.encoding, } response := api.GetBlockResponse{} - err = service.GetBlock(nil, &args, &response) - require.NoError(err) + require.NoError(service.GetBlock(nil, &args, &response)) switch { case test.encoding == formatting.JSON: - require.Equal(statelessBlock, response.Block) - - _, err = stdjson.Marshal(response) + statelessBlock.InitCtx(service.vm.ctx) + expectedBlockJSON, err := json.Marshal(statelessBlock) require.NoError(err) + require.Equal(expectedBlockJSON, []byte(response.Block)) default: - decoded, _ := formatting.Decode(response.Encoding, response.Block.(string)) - require.Equal(block.Bytes(), decoded) + var blockStr string + require.NoError(json.Unmarshal(response.Block, &blockStr)) + responseBlockBytes, err := formatting.Decode(response.Encoding, blockStr) + require.NoError(err) + require.Equal(blk.Bytes(), responseBlockBytes) } require.Equal(test.encoding, response.Encoding) }) } } + +func TestGetValidatorsAtReplyMarshalling(t *testing.T) { + require := require.New(t) + + reply := &GetValidatorsAtReply{ + Validators: make(map[ids.NodeID]*validators.GetValidatorOutput), + } + + { + reply.Validators[ids.EmptyNodeID] = &validators.GetValidatorOutput{ + NodeID: ids.EmptyNodeID, + PublicKey: nil, + Weight: 0, + } + } + { + nodeID := ids.GenerateTestNodeID() + sk, err := bls.NewSecretKey() + require.NoError(err) + reply.Validators[nodeID] = &validators.GetValidatorOutput{ + NodeID: nodeID, + PublicKey: bls.PublicFromSecretKey(sk), + Weight: math.MaxUint64, + } + } + + replyJSON, err := reply.MarshalJSON() + require.NoError(err) + + var parsedReply GetValidatorsAtReply + require.NoError(parsedReply.UnmarshalJSON(replyJSON)) + require.Equal(reply, &parsedReply) +} + +func TestServiceGetBlockByHeight(t *testing.T) { + ctrl := gomock.NewController(t) + + blockID := ids.GenerateTestID() + blockHeight := uint64(1337) + + type test struct { + name string + serviceAndExpectedBlockFunc func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) + encoding formatting.Encoding + expectedErr error + } + + tests := []test{ + { + name: "block height not found", + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(ids.Empty, database.ErrNotFound) + + manager := blockexecutor.NewMockManager(ctrl) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: database.ErrNotFound, + }, + { + name: "block not found", + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) + + manager := blockexecutor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: database.ErrNotFound, + }, + { + name: "JSON format", + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) + block.EXPECT().InitCtx(gomock.Any()) + + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) + + manager := blockexecutor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, block + }, + encoding: formatting.JSON, + expectedErr: nil, + }, + { + name: "hex format", + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.Hex, blockBytes) + require.NoError(t, err) + + manager := blockexecutor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.Hex, + expectedErr: nil, + }, + { + name: "hexc format", + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.HexC, blockBytes) + require.NoError(t, err) + + manager := blockexecutor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexC, + expectedErr: nil, + }, + { + name: "hexnc format", + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { + block := block.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := state.NewMockState(ctrl) + state.EXPECT().GetBlockIDAtHeight(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.HexNC, blockBytes) + require.NoError(t, err) + + manager := blockexecutor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + manager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexNC, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + service, expected := tt.serviceAndExpectedBlockFunc(t, ctrl) + + args := &api.GetBlockByHeightArgs{ + Height: avajson.Uint64(blockHeight), + Encoding: tt.encoding, + } + reply := &api.GetBlockResponse{} + err := service.GetBlockByHeight(nil, args, reply) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(tt.encoding, reply.Encoding) + + expectedJSON, err := json.Marshal(expected) + require.NoError(err) + + require.Equal(json.RawMessage(expectedJSON), reply.Block) + }) + } +} diff --git a/avalanchego/vms/platformvm/signer/empty.go b/avalanchego/vms/platformvm/signer/empty.go index 21bfbcab..21412ae6 100644 --- a/avalanchego/vms/platformvm/signer/empty.go +++ b/avalanchego/vms/platformvm/signer/empty.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer -import ( - "github.com/ava-labs/avalanchego/utils/crypto/bls" -) +import "github.com/ava-labs/avalanchego/utils/crypto/bls" var _ Signer = (*Empty)(nil) diff --git a/avalanchego/vms/platformvm/signer/empty_test.go b/avalanchego/vms/platformvm/signer/empty_test.go index e6a6307b..9fe949f4 100644 --- a/avalanchego/vms/platformvm/signer/empty_test.go +++ b/avalanchego/vms/platformvm/signer/empty_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/avalanchego/vms/platformvm/signer/proof_of_possession.go b/avalanchego/vms/platformvm/signer/proof_of_possession.go index 35ddcb32..8b32975b 100644 --- a/avalanchego/vms/platformvm/signer/proof_of_possession.go +++ b/avalanchego/vms/platformvm/signer/proof_of_possession.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/avalanchego/vms/platformvm/signer/proof_of_possession_test.go b/avalanchego/vms/platformvm/signer/proof_of_possession_test.go index c29ac1ad..9f4f3fee 100644 --- a/avalanchego/vms/platformvm/signer/proof_of_possession_test.go +++ b/avalanchego/vms/platformvm/signer/proof_of_possession_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer @@ -22,17 +22,20 @@ func TestProofOfPossession(t *testing.T) { blsPOP, err = newProofOfPossession() require.NoError(err) blsPOP.ProofOfPossession = [bls.SignatureLen]byte{} - require.Error(blsPOP.Verify()) + err = blsPOP.Verify() + require.ErrorIs(err, bls.ErrFailedSignatureDecompress) blsPOP, err = newProofOfPossession() require.NoError(err) blsPOP.PublicKey = [bls.PublicKeyLen]byte{} - require.Error(blsPOP.Verify()) + err = blsPOP.Verify() + require.ErrorIs(err, bls.ErrFailedPublicKeyDecompress) newBLSPOP, err := newProofOfPossession() require.NoError(err) newBLSPOP.ProofOfPossession = blsPOP.ProofOfPossession - require.ErrorIs(newBLSPOP.Verify(), errInvalidProofOfPossession) + err = newBLSPOP.Verify() + require.ErrorIs(err, errInvalidProofOfPossession) } func TestNewProofOfPossessionDeterministic(t *testing.T) { diff --git a/avalanchego/vms/platformvm/signer/signer.go b/avalanchego/vms/platformvm/signer/signer.go index 7269ad19..31bf212d 100644 --- a/avalanchego/vms/platformvm/signer/signer.go +++ b/avalanchego/vms/platformvm/signer/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/avalanchego/vms/platformvm/stakeable/stakeable_lock.go b/avalanchego/vms/platformvm/stakeable/stakeable_lock.go index 5c09cbfd..58149266 100644 --- a/avalanchego/vms/platformvm/stakeable/stakeable_lock.go +++ b/avalanchego/vms/platformvm/stakeable/stakeable_lock.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package stakeable diff --git a/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go b/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go index 5a6cfce5..352137c0 100644 --- a/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go +++ b/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package stakeable @@ -7,9 +7,8 @@ import ( "errors" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -36,7 +35,7 @@ func TestLockOutVerify(t *testing.T) { { name: "invalid locktime", locktime: 0, - transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + transferableOutF: func(*gomock.Controller) avax.TransferableOut { return nil }, expectedErr: errInvalidLocktime, @@ -44,7 +43,7 @@ func TestLockOutVerify(t *testing.T) { { name: "nested", locktime: 1, - transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + transferableOutF: func(*gomock.Controller) avax.TransferableOut { return &LockOut{} }, expectedErr: errNestedStakeableLocks, @@ -64,7 +63,6 @@ func TestLockOutVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() lockOut := &LockOut{ Locktime: tt.locktime, @@ -95,7 +93,7 @@ func TestLockInVerify(t *testing.T) { { name: "invalid locktime", locktime: 0, - transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + transferableInF: func(*gomock.Controller) avax.TransferableIn { return nil }, expectedErr: errInvalidLocktime, @@ -103,7 +101,7 @@ func TestLockInVerify(t *testing.T) { { name: "nested", locktime: 1, - transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + transferableInF: func(*gomock.Controller) avax.TransferableIn { return &LockIn{} }, expectedErr: errNestedStakeableLocks, @@ -123,7 +121,6 @@ func TestLockInVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() lockOut := &LockIn{ Locktime: tt.locktime, diff --git a/avalanchego/vms/platformvm/state/diff.go b/avalanchego/vms/platformvm/state/diff.go index 6efdd9e2..568cb370 100644 --- a/avalanchego/vms/platformvm/state/diff.go +++ b/avalanchego/vms/platformvm/state/diff.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -11,12 +11,14 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) var ( - _ Diff = (*diff)(nil) + _ Diff = (*diff)(nil) + _ Versions = stateGetter{} ErrMissingParentState = errors.New("missing parent state") ) @@ -24,7 +26,7 @@ var ( type Diff interface { Chain - Apply(State) error + Apply(Chain) error } type diff struct { @@ -44,12 +46,12 @@ type diff struct { pendingStakerDiffs diffStakers addedSubnets []*txs.Tx + // Subnet ID --> Owner of the subnet + subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx - cachedSubnets []*txs.Tx - addedChains map[ids.ID][]*txs.Tx - cachedChains map[ids.ID][]*txs.Tx + addedChains map[ids.ID][]*txs.Tx addedRewardUTXOs map[ids.ID][]*avax.UTXO @@ -72,6 +74,7 @@ func NewDiff( parentID: parentID, stateVersions: stateVersions, timestamp: parentState.GetTimestamp(), + subnetOwners: make(map[ids.ID]fx.Owner), }, nil } @@ -79,6 +82,20 @@ func (d *diff) GetNetworkID() uint32 { return d.networkID } +type stateGetter struct { + state Chain +} + +func (s stateGetter) GetState(ids.ID) (Chain, bool) { + return s.state, true +} + +func NewDiffOn(parentState Chain) (Diff, error) { + return NewDiff(ids.Empty, stateGetter{ + state: parentState, + }) +} + func (d *diff) GetTimestamp() time.Time { return d.timestamp } @@ -262,41 +279,26 @@ func (d *diff) GetPendingStakerIterator() (StakerIterator, error) { return d.pendingStakerDiffs.GetStakerIterator(parentIterator), nil } -func (d *diff) GetSubnets() ([]*txs.Tx, error) { - if len(d.addedSubnets) == 0 { - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetSubnets() - } +func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { + d.addedSubnets = append(d.addedSubnets, createSubnetTx) +} - if len(d.cachedSubnets) != 0 { - return d.cachedSubnets, nil +func (d *diff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { + owner, exists := d.subnetOwners[subnetID] + if exists { + return owner, nil } + // If the subnet owner was not assigned in this diff, ask the parent state. parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - subnets, err := parentState.GetSubnets() - if err != nil { - return nil, err - } - newSubnets := make([]*txs.Tx, len(subnets)+len(d.addedSubnets)) - copy(newSubnets, subnets) - for i, subnet := range d.addedSubnets { - newSubnets[i+len(subnets)] = subnet + return nil, ErrMissingParentState } - d.cachedSubnets = newSubnets - return newSubnets, nil + return parentState.GetSubnetOwner(subnetID) } -func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { - d.addedSubnets = append(d.addedSubnets, createSubnetTx) - if d.cachedSubnets != nil { - d.cachedSubnets = append(d.cachedSubnets, createSubnetTx) - } +func (d *diff) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { + d.subnetOwners[subnetID] = owner } func (d *diff) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { @@ -324,48 +326,6 @@ func (d *diff) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { } } -func (d *diff) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - addedChains := d.addedChains[subnetID] - if len(addedChains) == 0 { - // No chains have been added to this subnet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetChains(subnetID) - } - - // There have been chains added to the requested subnet - - if d.cachedChains == nil { - // This is the first time we are going to be caching the subnet chains - d.cachedChains = make(map[ids.ID][]*txs.Tx) - } - - cachedChains, cached := d.cachedChains[subnetID] - if cached { - return cachedChains, nil - } - - // This chain wasn't cached yet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - chains, err := parentState.GetChains(subnetID) - if err != nil { - return nil, err - } - - newChains := make([]*txs.Tx, len(chains)+len(addedChains)) - copy(newChains, chains) - for i, chain := range addedChains { - newChains[i+len(chains)] = chain - } - d.cachedChains[subnetID] = newChains - return newChains, nil -} - func (d *diff) AddChain(createChainTx *txs.Tx) { tx := createChainTx.Unsigned.(*txs.CreateChainTx) if d.addedChains == nil { @@ -375,12 +335,6 @@ func (d *diff) AddChain(createChainTx *txs.Tx) { } else { d.addedChains[tx.SubnetID] = append(d.addedChains[tx.SubnetID], createChainTx) } - - cachedChains, cached := d.cachedChains[tx.SubnetID] - if !cached { - return - } - d.cachedChains[tx.SubnetID] = append(cachedChains, createChainTx) } func (d *diff) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { @@ -410,18 +364,6 @@ func (d *diff) AddTx(tx *txs.Tx, status status.Status) { } } -func (d *diff) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := d.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetRewardUTXOs(txID) -} - func (d *diff) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { if d.addedRewardUTXOs == nil { d.addedRewardUTXOs = make(map[ids.ID][]*avax.UTXO) @@ -464,7 +406,7 @@ func (d *diff) DeleteUTXO(utxoID ids.ID) { } } -func (d *diff) Apply(baseState State) error { +func (d *diff) Apply(baseState Chain) error { baseState.SetTimestamp(d.timestamp) for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) @@ -542,5 +484,8 @@ func (d *diff) Apply(baseState State) error { baseState.DeleteUTXO(utxoID) } } + for subnetID, owner := range d.subnetOwners { + baseState.SetSubnetOwner(subnetID, owner) + } return nil } diff --git a/avalanchego/vms/platformvm/state/diff_test.go b/avalanchego/vms/platformvm/state/diff_test.go index 40bfee20..87fd5971 100644 --- a/avalanchego/vms/platformvm/state/diff_test.go +++ b/avalanchego/vms/platformvm/state/diff_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -7,22 +7,21 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) func TestDiffMissingState(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() versions := NewMockVersions(ctrl) @@ -36,10 +35,9 @@ func TestDiffMissingState(t *testing.T) { func TestDiffCreation(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lastAcceptedID := ids.GenerateTestID() - state, _ := newInitializedState(require) + state := newInitializedState(require) versions := NewMockVersions(ctrl) versions.EXPECT().GetState(lastAcceptedID).AnyTimes().Return(state, true) @@ -51,10 +49,9 @@ func TestDiffCreation(t *testing.T) { func TestDiffCurrentSupply(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lastAcceptedID := ids.GenerateTestID() - state, _ := newInitializedState(require) + state := newInitializedState(require) versions := NewMockVersions(ctrl) versions.EXPECT().GetState(lastAcceptedID).AnyTimes().Return(state, true) @@ -79,7 +76,6 @@ func TestDiffCurrentSupply(t *testing.T) { func TestDiffCurrentValidator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) @@ -117,7 +113,6 @@ func TestDiffCurrentValidator(t *testing.T) { func TestDiffPendingValidator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() lastAcceptedID := ids.GenerateTestID() state := NewMockState(ctrl) @@ -155,7 +150,6 @@ func TestDiffPendingValidator(t *testing.T) { func TestDiffCurrentDelegator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() currentDelegator := &Staker{ TxID: ids.GenerateTestID(), @@ -205,7 +199,6 @@ func TestDiffCurrentDelegator(t *testing.T) { func TestDiffPendingDelegator(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() pendingDelegator := &Staker{ TxID: ids.GenerateTestID(), @@ -255,78 +248,103 @@ func TestDiffPendingDelegator(t *testing.T) { func TestDiffSubnet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + + // Initialize parent with one subnet + parentStateCreateSubnetTx := &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + Owner: fx.NewMockOwner(ctrl), + }, + } + state.AddSubnet(parentStateCreateSubnetTx) + + // Verify parent returns one subnet + subnets, err := state.GetSubnets() + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + }, subnets) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a subnet - createSubnetTx := &txs.Tx{} - d.AddSubnet(createSubnetTx) - - // Assert that we get the subnet back - // [state] returns 1 subnet. - parentStateCreateSubnetTx := &txs.Tx{} - state.EXPECT().GetSubnets().Return([]*txs.Tx{parentStateCreateSubnetTx}, nil).Times(1) - gotSubnets, err := d.GetSubnets() - require.NoError(err) - require.Len(gotSubnets, 2) - require.Equal(gotSubnets[0], parentStateCreateSubnetTx) - require.Equal(gotSubnets[1], createSubnetTx) + createSubnetTx := &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + Owner: fx.NewMockOwner(ctrl), + }, + } + diff.AddSubnet(createSubnetTx) + + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two subnets + subnets, err = state.GetSubnets() + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + createSubnetTx, + }, subnets) } func TestDiffChain(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + subnetID := ids.GenerateTestID() + + // Initialize parent with one chain + parentStateCreateChainTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{ + SubnetID: subnetID, + }, + } + state.AddChain(parentStateCreateChainTx) + + // Verify parent returns one chain + chains, err := state.GetChains(subnetID) + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + }, chains) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a chain - subnetID := ids.GenerateTestID() createChainTx := &txs.Tx{ Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, + SubnetID: subnetID, // note this is the same subnet as [parentStateCreateChainTx] }, } - d.AddChain(createChainTx) + diff.AddChain(createChainTx) - // Assert that we get the chain back - // [state] returns 1 chain. - parentStateCreateChainTx := &txs.Tx{ - Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, // note this is the same subnet as [createChainTx] - }, - } - state.EXPECT().GetChains(subnetID).Return([]*txs.Tx{parentStateCreateChainTx}, nil).Times(1) - gotChains, err := d.GetChains(subnetID) + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two chains + chains, err = state.GetChains(subnetID) require.NoError(err) - require.Len(gotChains, 2) - require.Equal(parentStateCreateChainTx, gotChains[0]) - require.Equal(createChainTx, gotChains[1]) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + createChainTx, + }, chains) } func TestDiffTx(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := NewMockState(ctrl) // Called in NewDiff @@ -377,53 +395,52 @@ func TestDiffTx(t *testing.T) { func TestDiffRewardUTXO(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state := newInitializedState(require) + + txID := ids.GenerateTestID() + + // Initialize parent with one reward UTXO + parentRewardUTXO := &avax.UTXO{ + UTXOID: avax.UTXOID{TxID: txID}, + } + state.AddRewardUTXO(txID, parentRewardUTXO) + + // Verify parent returns the reward UTXO + rewardUTXOs, err := state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + }, rewardUTXOs) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a reward UTXO - txID := ids.GenerateTestID() rewardUTXO := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: txID}, } - d.AddRewardUTXO(txID, rewardUTXO) + diff.AddRewardUTXO(txID, rewardUTXO) - { - // Assert that we get the UTXO back - gotRewardUTXOs, err := d.GetRewardUTXOs(txID) - require.NoError(err) - require.Len(gotRewardUTXOs, 1) - require.Equal(rewardUTXO, gotRewardUTXOs[0]) - } + // Apply diff to parent state + require.NoError(diff.Apply(state)) - { - // Assert that we can get a UTXO from the parent state - // [state] returns 1 UTXO. - txID2 := ids.GenerateTestID() - parentRewardUTXO := &avax.UTXO{ - UTXOID: avax.UTXOID{TxID: txID2}, - } - state.EXPECT().GetRewardUTXOs(txID2).Return([]*avax.UTXO{parentRewardUTXO}, nil).Times(1) - gotParentRewardUTXOs, err := d.GetRewardUTXOs(txID2) - require.NoError(err) - require.Len(gotParentRewardUTXOs, 1) - require.Equal(parentRewardUTXO, gotParentRewardUTXOs[0]) - } + // Verify parent now returns two reward UTXOs + rewardUTXOs, err = state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + rewardUTXO, + }, rewardUTXOs) } func TestDiffUTXO(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := NewMockState(ctrl) // Called in NewDiff @@ -467,52 +484,192 @@ func TestDiffUTXO(t *testing.T) { // Make sure it's gone _, err = d.GetUTXO(utxo.InputID()) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) } } func assertChainsEqual(t *testing.T, expected, actual Chain) { + require := require.New(t) + t.Helper() expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.Equal(t, expectedErr, actualErr) + require.Equal(expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.Equal(t, expectedErr, actualErr) + require.Equal(expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } - require.Equal(t, expected.GetTimestamp(), actual.GetTimestamp()) + require.Equal(expected.GetTimestamp(), actual.GetTimestamp()) expectedCurrentSupply, err := expected.GetCurrentSupply(constants.PrimaryNetworkID) - require.NoError(t, err) + require.NoError(err) actualCurrentSupply, err := actual.GetCurrentSupply(constants.PrimaryNetworkID) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, expectedCurrentSupply, actualCurrentSupply) + require.Equal(expectedCurrentSupply, actualCurrentSupply) +} - expectedSubnets, expectedErr := expected.GetSubnets() - actualSubnets, actualErr := actual.GetSubnets() - require.Equal(t, expectedErr, actualErr) - if expectedErr == nil { - require.Equal(t, expectedSubnets, actualSubnets) +func TestDiffSubnetOwner(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) - for _, subnet := range expectedSubnets { - subnetID := subnet.ID() + state := newInitializedState(require) - expectedChains, expectedErr := expected.GetChains(subnetID) - actualChains, actualErr := actual.GetChains(subnetID) - require.Equal(t, expectedErr, actualErr) - if expectedErr == nil { - require.Equal(t, expectedChains, actualChains) - } + states := NewMockVersions(ctrl) + lastAcceptedID := ids.GenerateTestID() + states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() + + var ( + owner1 = fx.NewMockOwner(ctrl) + owner2 = fx.NewMockOwner(ctrl) + + createSubnetTx = &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{}, + Owner: owner1, + }, } - } + + subnetID = createSubnetTx.ID() + ) + + // Create subnet on base state + owner, err := state.GetSubnetOwner(subnetID) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(owner) + + state.AddSubnet(createSubnetTx) + state.SetSubnetOwner(subnetID, owner1) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Create diff and verify that subnet owner returns correctly + d, err := NewDiff(lastAcceptedID, states) + require.NoError(err) + + owner, err = d.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Transferring subnet ownership on diff should be reflected on diff not state + d.SetSubnetOwner(subnetID, owner2) + owner, err = d.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // State should reflect new subnet owner after diff is applied. + require.NoError(d.Apply(state)) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) +} + +func TestDiffStacking(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + state := newInitializedState(require) + + states := NewMockVersions(ctrl) + lastAcceptedID := ids.GenerateTestID() + states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() + + var ( + owner1 = fx.NewMockOwner(ctrl) + owner2 = fx.NewMockOwner(ctrl) + owner3 = fx.NewMockOwner(ctrl) + + createSubnetTx = &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{}, + Owner: owner1, + }, + } + + subnetID = createSubnetTx.ID() + ) + + // Create subnet on base state + owner, err := state.GetSubnetOwner(subnetID) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(owner) + + state.AddSubnet(createSubnetTx) + state.SetSubnetOwner(subnetID, owner1) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Create first diff and verify that subnet owner returns correctly + statesDiff, err := NewDiff(lastAcceptedID, states) + require.NoError(err) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Transferring subnet ownership on first diff should be reflected on first diff not state + statesDiff.SetSubnetOwner(subnetID, owner2) + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Create a second diff on first diff and verify that subnet owner returns correctly + stackedDiff, err := NewDiffOn(statesDiff) + require.NoError(err) + owner, err = stackedDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + // Transfer ownership on stacked diff and verify it is only reflected on stacked diff + stackedDiff.SetSubnetOwner(subnetID, owner3) + owner, err = stackedDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + // Applying both diffs successively should work as expected. + require.NoError(stackedDiff.Apply(statesDiff)) + + owner, err = statesDiff.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + require.NoError(statesDiff.Apply(state)) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner3, owner) } diff --git a/avalanchego/vms/platformvm/state/disk_staker_diff_iterator.go b/avalanchego/vms/platformvm/state/disk_staker_diff_iterator.go new file mode 100644 index 00000000..1c6e8833 --- /dev/null +++ b/avalanchego/vms/platformvm/state/disk_staker_diff_iterator.go @@ -0,0 +1,97 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "encoding/binary" + "fmt" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" +) + +const ( + // startDiffKey = [subnetID] + [inverseHeight] + startDiffKeyLength = ids.IDLen + database.Uint64Size + // diffKey = [subnetID] + [inverseHeight] + [nodeID] + diffKeyLength = startDiffKeyLength + ids.NodeIDLen + // diffKeyNodeIDOffset = [subnetIDLen] + [inverseHeightLen] + diffKeyNodeIDOffset = ids.IDLen + database.Uint64Size + + // weightValue = [isNegative] + [weight] + weightValueLength = database.BoolSize + database.Uint64Size +) + +var ( + errUnexpectedDiffKeyLength = fmt.Errorf("expected diff key length %d", diffKeyLength) + errUnexpectedWeightValueLength = fmt.Errorf("expected weight value length %d", weightValueLength) +) + +// marshalStartDiffKey is used to determine the starting key when iterating. +// +// Invariant: the result is a prefix of [marshalDiffKey] when called with the +// same arguments. +func marshalStartDiffKey(subnetID ids.ID, height uint64) []byte { + key := make([]byte, startDiffKeyLength) + copy(key, subnetID[:]) + packIterableHeight(key[ids.IDLen:], height) + return key +} + +func marshalDiffKey(subnetID ids.ID, height uint64, nodeID ids.NodeID) []byte { + key := make([]byte, diffKeyLength) + copy(key, subnetID[:]) + packIterableHeight(key[ids.IDLen:], height) + copy(key[diffKeyNodeIDOffset:], nodeID.Bytes()) + return key +} + +func unmarshalDiffKey(key []byte) (ids.ID, uint64, ids.NodeID, error) { + if len(key) != diffKeyLength { + return ids.Empty, 0, ids.EmptyNodeID, errUnexpectedDiffKeyLength + } + var ( + subnetID ids.ID + nodeID ids.NodeID + ) + copy(subnetID[:], key) + height := unpackIterableHeight(key[ids.IDLen:]) + copy(nodeID[:], key[diffKeyNodeIDOffset:]) + return subnetID, height, nodeID, nil +} + +func marshalWeightDiff(diff *ValidatorWeightDiff) []byte { + value := make([]byte, weightValueLength) + if diff.Decrease { + value[0] = database.BoolTrue + } + binary.BigEndian.PutUint64(value[database.BoolSize:], diff.Amount) + return value +} + +func unmarshalWeightDiff(value []byte) (*ValidatorWeightDiff, error) { + if len(value) != weightValueLength { + return nil, errUnexpectedWeightValueLength + } + return &ValidatorWeightDiff{ + Decrease: value[0] == database.BoolTrue, + Amount: binary.BigEndian.Uint64(value[database.BoolSize:]), + }, nil +} + +// Note: [height] is encoded as a bit flipped big endian number so that +// iterating lexicographically results in iterating in decreasing heights. +// +// Invariant: [key] has sufficient length +func packIterableHeight(key []byte, height uint64) { + binary.BigEndian.PutUint64(key, ^height) +} + +// Because we bit flip the height when constructing the key, we must remember to +// bip flip again here. +// +// Invariant: [key] has sufficient length +func unpackIterableHeight(key []byte) uint64 { + return ^binary.BigEndian.Uint64(key) +} diff --git a/avalanchego/vms/platformvm/state/disk_staker_diff_iterator_test.go b/avalanchego/vms/platformvm/state/disk_staker_diff_iterator_test.go new file mode 100644 index 00000000..abdc1c7c --- /dev/null +++ b/avalanchego/vms/platformvm/state/disk_staker_diff_iterator_test.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/thepudds/fzgen/fuzzer" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" +) + +func FuzzMarshalDiffKey(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + var ( + subnetID ids.ID + height uint64 + nodeID ids.NodeID + ) + fz := fuzzer.NewFuzzer(data) + fz.Fill(&subnetID, &height, &nodeID) + + key := marshalDiffKey(subnetID, height, nodeID) + parsedSubnetID, parsedHeight, parsedNodeID, err := unmarshalDiffKey(key) + require.NoError(err) + require.Equal(subnetID, parsedSubnetID) + require.Equal(height, parsedHeight) + require.Equal(nodeID, parsedNodeID) + }) +} + +func FuzzUnmarshalDiffKey(f *testing.F) { + f.Fuzz(func(t *testing.T, key []byte) { + require := require.New(t) + + subnetID, height, nodeID, err := unmarshalDiffKey(key) + if err != nil { + require.ErrorIs(err, errUnexpectedDiffKeyLength) + return + } + + formattedKey := marshalDiffKey(subnetID, height, nodeID) + require.Equal(key, formattedKey) + }) +} + +func TestDiffIteration(t *testing.T) { + require := require.New(t) + + db := memdb.New() + + subnetID0 := ids.GenerateTestID() + subnetID1 := ids.GenerateTestID() + + nodeID0 := ids.BuildTestNodeID([]byte{0x00}) + nodeID1 := ids.BuildTestNodeID([]byte{0x01}) + + subnetID0Height0NodeID0 := marshalDiffKey(subnetID0, 0, nodeID0) + subnetID0Height1NodeID0 := marshalDiffKey(subnetID0, 1, nodeID0) + subnetID0Height1NodeID1 := marshalDiffKey(subnetID0, 1, nodeID1) + + subnetID1Height0NodeID0 := marshalDiffKey(subnetID1, 0, nodeID0) + subnetID1Height1NodeID0 := marshalDiffKey(subnetID1, 1, nodeID0) + subnetID1Height1NodeID1 := marshalDiffKey(subnetID1, 1, nodeID1) + + require.NoError(db.Put(subnetID0Height0NodeID0, nil)) + require.NoError(db.Put(subnetID0Height1NodeID0, nil)) + require.NoError(db.Put(subnetID0Height1NodeID1, nil)) + require.NoError(db.Put(subnetID1Height0NodeID0, nil)) + require.NoError(db.Put(subnetID1Height1NodeID0, nil)) + require.NoError(db.Put(subnetID1Height1NodeID1, nil)) + + { + it := db.NewIteratorWithStartAndPrefix(marshalStartDiffKey(subnetID0, 0), subnetID0[:]) + defer it.Release() + + expectedKeys := [][]byte{ + subnetID0Height0NodeID0, + } + for _, expectedKey := range expectedKeys { + require.True(it.Next()) + require.Equal(expectedKey, it.Key()) + } + require.False(it.Next()) + require.NoError(it.Error()) + } + + { + it := db.NewIteratorWithStartAndPrefix(marshalStartDiffKey(subnetID0, 1), subnetID0[:]) + defer it.Release() + + expectedKeys := [][]byte{ + subnetID0Height1NodeID0, + subnetID0Height1NodeID1, + subnetID0Height0NodeID0, + } + for _, expectedKey := range expectedKeys { + require.True(it.Next()) + require.Equal(expectedKey, it.Key()) + } + require.False(it.Next()) + require.NoError(it.Error()) + } +} diff --git a/avalanchego/vms/platformvm/state/empty_iterator.go b/avalanchego/vms/platformvm/state/empty_iterator.go index 69766c19..3ec5f04f 100644 --- a/avalanchego/vms/platformvm/state/empty_iterator.go +++ b/avalanchego/vms/platformvm/state/empty_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/empty_iterator_test.go b/avalanchego/vms/platformvm/state/empty_iterator_test.go index b5bb43d1..19cd4f06 100644 --- a/avalanchego/vms/platformvm/state/empty_iterator_test.go +++ b/avalanchego/vms/platformvm/state/empty_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/masked_iterator.go b/avalanchego/vms/platformvm/state/masked_iterator.go index 5621205c..9ceee971 100644 --- a/avalanchego/vms/platformvm/state/masked_iterator.go +++ b/avalanchego/vms/platformvm/state/masked_iterator.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" var _ StakerIterator = (*maskedIterator)(nil) diff --git a/avalanchego/vms/platformvm/state/masked_iterator_test.go b/avalanchego/vms/platformvm/state/masked_iterator_test.go index 8ba719d3..ccc37d6f 100644 --- a/avalanchego/vms/platformvm/state/masked_iterator_test.go +++ b/avalanchego/vms/platformvm/state/masked_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/merged_iterator.go b/avalanchego/vms/platformvm/state/merged_iterator.go index 0b91b867..059001b3 100644 --- a/avalanchego/vms/platformvm/state/merged_iterator.go +++ b/avalanchego/vms/platformvm/state/merged_iterator.go @@ -1,22 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state -import ( - "container/heap" -) +import "github.com/ava-labs/avalanchego/utils/heap" -var ( - _ StakerIterator = (*mergedIterator)(nil) - _ heap.Interface = (*mergedIterator)(nil) -) +var _ StakerIterator = (*mergedIterator)(nil) type mergedIterator struct { initialized bool // heap only contains iterators that have been initialized and are not // exhausted. - heap []StakerIterator + heap heap.Queue[StakerIterator] } // Returns an iterator that returns all of the elements of [stakers] in order. @@ -38,15 +33,19 @@ func NewMergedIterator(stakers ...StakerIterator) StakerIterator { } it := &mergedIterator{ - heap: stakers, + heap: heap.QueueOf( + func(a, b StakerIterator) bool { + return a.Value().Less(b.Value()) + }, + stakers..., + ), } - heap.Init(it) return it } func (it *mergedIterator) Next() bool { - if len(it.heap) == 0 { + if it.heap.Len() == 0 { return false } @@ -59,54 +58,31 @@ func (it *mergedIterator) Next() bool { } // Update the heap root. - current := it.heap[0] + current, _ := it.heap.Peek() if current.Next() { // Calling Next() above modifies [current] so we fix the heap. - heap.Fix(it, 0) + it.heap.Fix(0) return true } // The old root is exhausted. Remove it from the heap. current.Release() - heap.Pop(it) - return len(it.heap) > 0 + it.heap.Pop() + return it.heap.Len() > 0 } func (it *mergedIterator) Value() *Staker { - return it.heap[0].Value() + peek, _ := it.heap.Peek() + return peek.Value() } -// When Release() returns, Release() has been called on each element of -// [stakers]. func (it *mergedIterator) Release() { - for _, it := range it.heap { - it.Release() + for it.heap.Len() > 0 { + removed, _ := it.heap.Pop() + removed.Release() } - it.heap = nil } -// Returns the number of sub-iterators in [it]. func (it *mergedIterator) Len() int { - return len(it.heap) -} - -func (it *mergedIterator) Less(i, j int) bool { - return it.heap[i].Value().Less(it.heap[j].Value()) -} - -func (it *mergedIterator) Swap(i, j int) { - it.heap[j], it.heap[i] = it.heap[i], it.heap[j] -} - -// Push is never actually used - but we need it to implement heap.Interface. -func (it *mergedIterator) Push(value interface{}) { - it.heap = append(it.heap, value.(StakerIterator)) -} - -func (it *mergedIterator) Pop() interface{} { - newLength := len(it.heap) - 1 - value := it.heap[newLength] - it.heap[newLength] = nil - it.heap = it.heap[:newLength] - return value + return it.heap.Len() } diff --git a/avalanchego/vms/platformvm/state/merged_iterator_test.go b/avalanchego/vms/platformvm/state/merged_iterator_test.go index c85b3594..e6cd5245 100644 --- a/avalanchego/vms/platformvm/state/merged_iterator_test.go +++ b/avalanchego/vms/platformvm/state/merged_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/metadata_codec.go b/avalanchego/vms/platformvm/state/metadata_codec.go new file mode 100644 index 00000000..65832ed7 --- /dev/null +++ b/avalanchego/vms/platformvm/state/metadata_codec.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" +) + +const ( + CodecVersion0Tag = "v0" + CodecVersion0 uint16 = 0 + + CodecVersion1Tag = "v1" + CodecVersion1 uint16 = 1 +) + +var MetadataCodec codec.Manager + +func init() { + c0 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag}, math.MaxInt32) + c1 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag, CodecVersion1Tag}, math.MaxInt32) + MetadataCodec = codec.NewManager(math.MaxInt32) + + err := utils.Err( + MetadataCodec.RegisterCodec(CodecVersion0, c0), + MetadataCodec.RegisterCodec(CodecVersion1, c1), + ) + if err != nil { + panic(err) + } +} diff --git a/avalanchego/vms/platformvm/state/metadata_delegator.go b/avalanchego/vms/platformvm/state/metadata_delegator.go new file mode 100644 index 00000000..06099d81 --- /dev/null +++ b/avalanchego/vms/platformvm/state/metadata_delegator.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" +) + +type delegatorMetadata struct { + PotentialReward uint64 `v1:"true"` + StakerStartTime uint64 `v1:"true"` + + txID ids.ID +} + +func parseDelegatorMetadata(bytes []byte, metadata *delegatorMetadata) error { + var err error + switch len(bytes) { + case database.Uint64Size: + // only potential reward was stored + metadata.PotentialReward, err = database.ParseUInt64(bytes) + default: + _, err = MetadataCodec.Unmarshal(bytes, metadata) + } + return err +} + +func writeDelegatorMetadata(db database.KeyValueWriter, metadata *delegatorMetadata, codecVersion uint16) error { + // The "0" codec is skipped for [delegatorMetadata]. This is to ensure the + // [validatorMetadata] codec version is the same as the [delegatorMetadata] + // codec version. + // + // TODO: Cleanup post-Durango activation. + if codecVersion == 0 { + return database.PutUInt64(db, metadata.txID[:], metadata.PotentialReward) + } + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) + if err != nil { + return err + } + return db.Put(metadata.txID[:], metadataBytes) +} diff --git a/avalanchego/vms/platformvm/state/metadata_delegator_test.go b/avalanchego/vms/platformvm/state/metadata_delegator_test.go new file mode 100644 index 00000000..9c9d6c1c --- /dev/null +++ b/avalanchego/vms/platformvm/state/metadata_delegator_test.go @@ -0,0 +1,141 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +func TestParseDelegatorMetadata(t *testing.T) { + type test struct { + name string + bytes []byte + expected *delegatorMetadata + expectedErr error + } + tests := []test{ + { + name: "potential reward only no codec", + bytes: []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + expected: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 0, + }, + expectedErr: nil, + }, + { + name: "potential reward + staker start time with codec v1", + bytes: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + expected: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expectedErr: nil, + }, + { + name: "invalid codec version", + bytes: []byte{ + // codec version + 0x00, 0x02, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + expected: nil, + expectedErr: codec.ErrUnknownVersion, + }, + { + name: "short byte len", + bytes: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + expected: nil, + expectedErr: wrappers.ErrInsufficientLength, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + var metadata delegatorMetadata + err := parseDelegatorMetadata(tt.bytes, &metadata) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(tt.expected, &metadata) + }) + } +} + +func TestWriteDelegatorMetadata(t *testing.T) { + type test struct { + name string + version uint16 + metadata *delegatorMetadata + expected []byte + } + tests := []test{ + { + name: CodecVersion0Tag, + version: CodecVersion0, + metadata: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expected: []byte{ + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + }, + }, + { + name: CodecVersion1Tag, + version: CodecVersion1, + metadata: &delegatorMetadata{ + PotentialReward: 123, + StakerStartTime: 456, + }, + expected: []byte{ + // codec version + 0x00, 0x01, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, + // staker start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc8, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db := memdb.New() + tt.metadata.txID = ids.GenerateTestID() + require.NoError(writeDelegatorMetadata(db, tt.metadata, tt.version)) + bytes, err := db.Get(tt.metadata.txID[:]) + require.NoError(err) + require.Equal(tt.expected, bytes) + }) + } +} diff --git a/avalanchego/vms/platformvm/state/validator_metadata.go b/avalanchego/vms/platformvm/state/metadata_validator.go similarity index 89% rename from avalanchego/vms/platformvm/state/validator_metadata.go rename to avalanchego/vms/platformvm/state/metadata_validator.go index a14b9331..0c725368 100644 --- a/avalanchego/vms/platformvm/state/validator_metadata.go +++ b/avalanchego/vms/platformvm/state/metadata_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -11,8 +11,6 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/platformvm/genesis" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) // preDelegateeRewardSize is the size of codec marshalling @@ -24,16 +22,17 @@ const preDelegateeRewardSize = wrappers.ShortLen + 3*wrappers.LongLen var _ validatorState = (*metadata)(nil) type preDelegateeRewardMetadata struct { - UpDuration time.Duration `serialize:"true"` - LastUpdated uint64 `serialize:"true"` // Unix time in seconds - PotentialReward uint64 `serialize:"true"` + UpDuration time.Duration `v0:"true"` + LastUpdated uint64 `v0:"true"` // Unix time in seconds + PotentialReward uint64 `v0:"true"` } type validatorMetadata struct { - UpDuration time.Duration `serialize:"true"` - LastUpdated uint64 `serialize:"true"` // Unix time in seconds - PotentialReward uint64 `serialize:"true"` - PotentialDelegateeReward uint64 `serialize:"true"` + UpDuration time.Duration `v0:"true"` + LastUpdated uint64 `v0:"true"` // Unix time in seconds + PotentialReward uint64 `v0:"true"` + PotentialDelegateeReward uint64 `v0:"true"` + StakerStartTime uint64 ` v1:"true"` txID ids.ID lastUpdated time.Time @@ -60,7 +59,7 @@ func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { // potential reward and uptime was stored but potential delegatee reward // was not tmp := preDelegateeRewardMetadata{} - if _, err := txs.Codec.Unmarshal(bytes, &tmp); err != nil { + if _, err := MetadataCodec.Unmarshal(bytes, &tmp); err != nil { return err } @@ -69,7 +68,7 @@ func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { metadata.PotentialReward = tmp.PotentialReward default: // everything was stored - if _, err := txs.Codec.Unmarshal(bytes, metadata); err != nil { + if _, err := MetadataCodec.Unmarshal(bytes, metadata); err != nil { return err } } @@ -132,6 +131,7 @@ type validatorState interface { WriteValidatorMetadata( dbPrimary database.KeyValueWriter, dbSubnet database.KeyValueWriter, + codecVersion uint16, ) error } @@ -232,13 +232,14 @@ func (m *metadata) DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) { func (m *metadata) WriteValidatorMetadata( dbPrimary database.KeyValueWriter, dbSubnet database.KeyValueWriter, + codecVersion uint16, ) error { for vdrID, updatedSubnets := range m.updatedMetadata { for subnetID := range updatedSubnets { metadata := m.metadata[vdrID][subnetID] metadata.LastUpdated = uint64(metadata.lastUpdated.Unix()) - metadataBytes, err := genesis.Codec.Marshal(txs.Version, metadata) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { return err } diff --git a/avalanchego/vms/platformvm/state/validator_metadata_test.go b/avalanchego/vms/platformvm/state/metadata_validator_test.go similarity index 86% rename from avalanchego/vms/platformvm/state/validator_metadata_test.go rename to avalanchego/vms/platformvm/state/metadata_validator_test.go index 15fc983a..3a041a26 100644 --- a/avalanchego/vms/platformvm/state/validator_metadata_test.go +++ b/avalanchego/vms/platformvm/state/metadata_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -9,9 +9,11 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/wrappers" ) func TestValidatorUptimes(t *testing.T) { @@ -44,8 +46,7 @@ func TestValidatorUptimes(t *testing.T) { // set uptime newUpDuration := testMetadata.UpDuration + 1 newLastUpdated := testMetadata.lastUpdated.Add(time.Hour) - err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) - require.NoError(err) + require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) // get new uptime upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) @@ -80,9 +81,9 @@ func TestWriteValidatorMetadata(t *testing.T) { primaryDB := memdb.New() subnetDB := memdb.New() + // write empty uptimes - err := state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) // load uptime nodeID := ids.GenerateTestNodeID() @@ -96,8 +97,7 @@ func TestWriteValidatorMetadata(t *testing.T) { state.LoadValidatorMetadata(nodeID, subnetID, testUptimeReward) // write state, should not reflect to DB yet - err = state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.False(subnetDB.Has(testUptimeReward.txID[:])) @@ -110,12 +110,10 @@ func TestWriteValidatorMetadata(t *testing.T) { // update uptimes newUpDuration := testUptimeReward.UpDuration + 1 newLastUpdated := testUptimeReward.lastUpdated.Add(time.Hour) - err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) - require.NoError(err) + require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) // write uptimes, should reflect to subnet DB - err = state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB, CodecVersion1)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.True(subnetDB.Has(testUptimeReward.txID[:])) } @@ -147,8 +145,7 @@ func TestValidatorDelegateeRewards(t *testing.T) { // set delegatee reward newDelegateeReward := testMetadata.PotentialDelegateeReward + 100000 - err = state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward) - require.NoError(err) + require.NoError(state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward)) // get new delegatee reward delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) @@ -176,10 +173,10 @@ func TestValidatorDelegateeRewards(t *testing.T) { func TestParseValidatorMetadata(t *testing.T) { type test struct { - name string - bytes []byte - expected *validatorMetadata - shouldErr bool + name string + bytes []byte + expected *validatorMetadata + expectedErr error } tests := []test{ { @@ -188,7 +185,7 @@ func TestParseValidatorMetadata(t *testing.T) { expected: &validatorMetadata{ lastUpdated: time.Unix(0, 0), }, - shouldErr: false, + expectedErr: nil, }, { name: "nil", @@ -196,7 +193,7 @@ func TestParseValidatorMetadata(t *testing.T) { expected: &validatorMetadata{ lastUpdated: time.Unix(0, 0), }, - shouldErr: false, + expectedErr: nil, }, { name: "potential reward only", @@ -207,7 +204,7 @@ func TestParseValidatorMetadata(t *testing.T) { PotentialReward: 100000, lastUpdated: time.Unix(0, 0), }, - shouldErr: false, + expectedErr: nil, }, { name: "uptime + potential reward", @@ -222,12 +219,12 @@ func TestParseValidatorMetadata(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, }, expected: &validatorMetadata{ - UpDuration: time.Duration(6000000), + UpDuration: 6000000, LastUpdated: 900000, PotentialReward: 100000, lastUpdated: time.Unix(900000, 0), }, - shouldErr: false, + expectedErr: nil, }, { name: "uptime + potential reward + potential delegatee reward", @@ -244,19 +241,19 @@ func TestParseValidatorMetadata(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, }, expected: &validatorMetadata{ - UpDuration: time.Duration(6000000), + UpDuration: 6000000, LastUpdated: 900000, PotentialReward: 100000, PotentialDelegateeReward: 20000, lastUpdated: time.Unix(900000, 0), }, - shouldErr: false, + expectedErr: nil, }, { name: "invalid codec version", bytes: []byte{ // codec version - 0x00, 0x01, + 0x00, 0x02, // up duration 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, // last updated @@ -266,8 +263,8 @@ func TestParseValidatorMetadata(t *testing.T) { // potential delegatee reward 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, }, - expected: nil, - shouldErr: true, + expected: nil, + expectedErr: codec.ErrUnknownVersion, }, { name: "short byte len", @@ -283,8 +280,8 @@ func TestParseValidatorMetadata(t *testing.T) { // potential delegatee reward 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, }, - expected: nil, - shouldErr: true, + expected: nil, + expectedErr: wrappers.ErrInsufficientLength, }, } for _, tt := range tests { @@ -292,12 +289,11 @@ func TestParseValidatorMetadata(t *testing.T) { require := require.New(t) var metadata validatorMetadata err := parseValidatorMetadata(tt.bytes, &metadata) - if tt.shouldErr { - require.Error(err) - } else { - require.NoError(err) - require.Equal(tt.expected, &metadata) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return } + require.Equal(tt.expected, &metadata) }) } } diff --git a/avalanchego/vms/platformvm/state/mock_chain.go b/avalanchego/vms/platformvm/state/mock_chain.go deleted file mode 100644 index 76cffe5b..00000000 --- a/avalanchego/vms/platformvm/state/mock_chain.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Chain) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - avax "github.com/ava-labs/avalanchego/vms/components/avax" - status "github.com/ava-labs/avalanchego/vms/platformvm/status" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" -) - -// MockChain is a mock of Chain interface. -type MockChain struct { - ctrl *gomock.Controller - recorder *MockChainMockRecorder -} - -// MockChainMockRecorder is the mock recorder for MockChain. -type MockChainMockRecorder struct { - mock *MockChain -} - -// NewMockChain creates a new mock instance. -func NewMockChain(ctrl *gomock.Controller) *MockChain { - mock := &MockChain{ctrl: ctrl} - mock.recorder = &MockChainMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockChain) EXPECT() *MockChainMockRecorder { - return m.recorder -} - -// AddChain mocks base method. -func (m *MockChain) AddChain(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0) -} - -// AddChain indicates an expected call of AddChain. -func (mr *MockChainMockRecorder) AddChain(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockChain)(nil).AddChain), arg0) -} - -// AddRewardUTXO mocks base method. -func (m *MockChain) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) -} - -// AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockChainMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockChain)(nil).AddRewardUTXO), arg0, arg1) -} - -// AddSubnet mocks base method. -func (m *MockChain) AddSubnet(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnet", arg0) -} - -// AddSubnet indicates an expected call of AddSubnet. -func (mr *MockChainMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockChain)(nil).AddSubnet), arg0) -} - -// AddSubnetTransformation mocks base method. -func (m *MockChain) AddSubnetTransformation(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnetTransformation", arg0) -} - -// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockChainMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockChain)(nil).AddSubnetTransformation), arg0) -} - -// AddTx mocks base method. -func (m *MockChain) AddTx(arg0 *txs.Tx, arg1 status.Status) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTx", arg0, arg1) -} - -// AddTx indicates an expected call of AddTx. -func (mr *MockChainMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0, arg1) -} - -// AddUTXO mocks base method. -func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUTXO", arg0) -} - -// AddUTXO indicates an expected call of AddUTXO. -func (mr *MockChainMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) -} - -// DeleteCurrentDelegator mocks base method. -func (m *MockChain) DeleteCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) -} - -// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockChainMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentDelegator), arg0) -} - -// DeleteCurrentValidator mocks base method. -func (m *MockChain) DeleteCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentValidator", arg0) -} - -// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockChainMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentValidator), arg0) -} - -// DeletePendingDelegator mocks base method. -func (m *MockChain) DeletePendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingDelegator", arg0) -} - -// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockChainMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockChain)(nil).DeletePendingDelegator), arg0) -} - -// DeletePendingValidator mocks base method. -func (m *MockChain) DeletePendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingValidator", arg0) -} - -// DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockChainMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockChain)(nil).DeletePendingValidator), arg0) -} - -// DeleteUTXO mocks base method. -func (m *MockChain) DeleteUTXO(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteUTXO", arg0) -} - -// DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) -} - -// GetChains mocks base method. -func (m *MockChain) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockChainMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockChain)(nil).GetChains), arg0) -} - -// GetCurrentDelegatorIterator mocks base method. -func (m *MockChain) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockChainMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentDelegatorIterator), arg0, arg1) -} - -// GetCurrentStakerIterator mocks base method. -func (m *MockChain) GetCurrentStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. -func (mr *MockChainMockRecorder) GetCurrentStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentStakerIterator)) -} - -// GetCurrentSupply mocks base method. -func (m *MockChain) GetCurrentSupply(arg0 ids.ID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockChainMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockChain)(nil).GetCurrentSupply), arg0) -} - -// GetCurrentValidator mocks base method. -func (m *MockChain) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockChainMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockChain)(nil).GetCurrentValidator), arg0, arg1) -} - -// GetDelegateeReward mocks base method. -func (m *MockChain) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockChainMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), arg0, arg1) -} - -// GetPendingDelegatorIterator mocks base method. -func (m *MockChain) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockChainMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetPendingDelegatorIterator), arg0, arg1) -} - -// GetPendingStakerIterator mocks base method. -func (m *MockChain) GetPendingStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. -func (mr *MockChainMockRecorder) GetPendingStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockChain)(nil).GetPendingStakerIterator)) -} - -// GetPendingValidator mocks base method. -func (m *MockChain) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockChainMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), arg0, arg1) -} - -// GetRewardUTXOs mocks base method. -func (m *MockChain) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockChainMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockChain)(nil).GetRewardUTXOs), arg0) -} - -// GetSubnetTransformation mocks base method. -func (m *MockChain) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockChainMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockChain)(nil).GetSubnetTransformation), arg0) -} - -// GetSubnets mocks base method. -func (m *MockChain) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockChainMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockChain)(nil).GetSubnets)) -} - -// GetTimestamp mocks base method. -func (m *MockChain) GetTimestamp() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTimestamp") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// GetTimestamp indicates an expected call of GetTimestamp. -func (mr *MockChainMockRecorder) GetTimestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockChain)(nil).GetTimestamp)) -} - -// GetTx mocks base method. -func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(status.Status) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetTx indicates an expected call of GetTx. -func (mr *MockChainMockRecorder) GetTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) -} - -// GetUTXO mocks base method. -func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXO", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXO indicates an expected call of GetUTXO. -func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) -} - -// PutCurrentDelegator mocks base method. -func (m *MockChain) PutCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentDelegator", arg0) -} - -// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockChainMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockChain)(nil).PutCurrentDelegator), arg0) -} - -// PutCurrentValidator mocks base method. -func (m *MockChain) PutCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentValidator", arg0) -} - -// PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockChainMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockChain)(nil).PutCurrentValidator), arg0) -} - -// PutPendingDelegator mocks base method. -func (m *MockChain) PutPendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingDelegator", arg0) -} - -// PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockChainMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockChain)(nil).PutPendingDelegator), arg0) -} - -// PutPendingValidator mocks base method. -func (m *MockChain) PutPendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingValidator", arg0) -} - -// PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockChainMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockChain)(nil).PutPendingValidator), arg0) -} - -// SetCurrentSupply mocks base method. -func (m *MockChain) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) -} - -// SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockChainMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockChain)(nil).SetCurrentSupply), arg0, arg1) -} - -// SetDelegateeReward mocks base method. -func (m *MockChain) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockChainMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockChain)(nil).SetDelegateeReward), arg0, arg1, arg2) -} - -// SetTimestamp mocks base method. -func (m *MockChain) SetTimestamp(arg0 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTimestamp", arg0) -} - -// SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockChainMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) -} diff --git a/avalanchego/vms/platformvm/state/mock_diff.go b/avalanchego/vms/platformvm/state/mock_diff.go deleted file mode 100644 index c4929833..00000000 --- a/avalanchego/vms/platformvm/state/mock_diff.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Diff) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - avax "github.com/ava-labs/avalanchego/vms/components/avax" - status "github.com/ava-labs/avalanchego/vms/platformvm/status" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" -) - -// MockDiff is a mock of Diff interface. -type MockDiff struct { - ctrl *gomock.Controller - recorder *MockDiffMockRecorder -} - -// MockDiffMockRecorder is the mock recorder for MockDiff. -type MockDiffMockRecorder struct { - mock *MockDiff -} - -// NewMockDiff creates a new mock instance. -func NewMockDiff(ctrl *gomock.Controller) *MockDiff { - mock := &MockDiff{ctrl: ctrl} - mock.recorder = &MockDiffMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDiff) EXPECT() *MockDiffMockRecorder { - return m.recorder -} - -// AddChain mocks base method. -func (m *MockDiff) AddChain(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddChain", arg0) -} - -func (m *MockDiff) GetNetworkID() uint32 { - return 0 -} - -// AddChain indicates an expected call of AddChain. -func (mr *MockDiffMockRecorder) AddChain(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockDiff)(nil).AddChain), arg0) -} - -// AddRewardUTXO mocks base method. -func (m *MockDiff) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) -} - -// AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockDiffMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockDiff)(nil).AddRewardUTXO), arg0, arg1) -} - -// AddSubnet mocks base method. -func (m *MockDiff) AddSubnet(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnet", arg0) -} - -// AddSubnet indicates an expected call of AddSubnet. -func (mr *MockDiffMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockDiff)(nil).AddSubnet), arg0) -} - -// AddSubnetTransformation mocks base method. -func (m *MockDiff) AddSubnetTransformation(arg0 *txs.Tx) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddSubnetTransformation", arg0) -} - -// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockDiffMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).AddSubnetTransformation), arg0) -} - -// AddTx mocks base method. -func (m *MockDiff) AddTx(arg0 *txs.Tx, arg1 status.Status) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddTx", arg0, arg1) -} - -// AddTx indicates an expected call of AddTx. -func (mr *MockDiffMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0, arg1) -} - -// AddUTXO mocks base method. -func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUTXO", arg0) -} - -// AddUTXO indicates an expected call of AddUTXO. -func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) -} - -// Apply mocks base method. -func (m *MockDiff) Apply(arg0 State) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Apply", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Apply indicates an expected call of Apply. -func (mr *MockDiffMockRecorder) Apply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) -} - -// DeleteCurrentDelegator mocks base method. -func (m *MockDiff) DeleteCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) -} - -// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockDiffMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentDelegator), arg0) -} - -// DeleteCurrentValidator mocks base method. -func (m *MockDiff) DeleteCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteCurrentValidator", arg0) -} - -// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockDiffMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentValidator), arg0) -} - -// DeletePendingDelegator mocks base method. -func (m *MockDiff) DeletePendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingDelegator", arg0) -} - -// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockDiffMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockDiff)(nil).DeletePendingDelegator), arg0) -} - -// DeletePendingValidator mocks base method. -func (m *MockDiff) DeletePendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeletePendingValidator", arg0) -} - -// DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockDiffMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockDiff)(nil).DeletePendingValidator), arg0) -} - -// DeleteUTXO mocks base method. -func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteUTXO", arg0) -} - -// DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) -} - -// GetChains mocks base method. -func (m *MockDiff) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockDiffMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockDiff)(nil).GetChains), arg0) -} - -// GetCurrentDelegatorIterator mocks base method. -func (m *MockDiff) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockDiffMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentDelegatorIterator), arg0, arg1) -} - -// GetCurrentStakerIterator mocks base method. -func (m *MockDiff) GetCurrentStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. -func (mr *MockDiffMockRecorder) GetCurrentStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentStakerIterator)) -} - -// GetCurrentSupply mocks base method. -func (m *MockDiff) GetCurrentSupply(arg0 ids.ID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockDiffMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).GetCurrentSupply), arg0) -} - -// GetCurrentValidator mocks base method. -func (m *MockDiff) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockDiffMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockDiff)(nil).GetCurrentValidator), arg0, arg1) -} - -// GetDelegateeReward mocks base method. -func (m *MockDiff) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockDiffMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), arg0, arg1) -} - -// GetPendingDelegatorIterator mocks base method. -func (m *MockDiff) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockDiffMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingDelegatorIterator), arg0, arg1) -} - -// GetPendingStakerIterator mocks base method. -func (m *MockDiff) GetPendingStakerIterator() (StakerIterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingStakerIterator") - ret0, _ := ret[0].(StakerIterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. -func (mr *MockDiffMockRecorder) GetPendingStakerIterator() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingStakerIterator)) -} - -// GetPendingValidator mocks base method. -func (m *MockDiff) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) - ret0, _ := ret[0].(*Staker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockDiffMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), arg0, arg1) -} - -// GetRewardUTXOs mocks base method. -func (m *MockDiff) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockDiffMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockDiff)(nil).GetRewardUTXOs), arg0) -} - -// GetSubnetTransformation mocks base method. -func (m *MockDiff) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockDiffMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).GetSubnetTransformation), arg0) -} - -// GetSubnets mocks base method. -func (m *MockDiff) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockDiffMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockDiff)(nil).GetSubnets)) -} - -// GetTimestamp mocks base method. -func (m *MockDiff) GetTimestamp() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTimestamp") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// GetTimestamp indicates an expected call of GetTimestamp. -func (mr *MockDiffMockRecorder) GetTimestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockDiff)(nil).GetTimestamp)) -} - -// GetTx mocks base method. -func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(status.Status) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetTx indicates an expected call of GetTx. -func (mr *MockDiffMockRecorder) GetTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) -} - -// GetUTXO mocks base method. -func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUTXO", arg0) - ret0, _ := ret[0].(*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetUTXO indicates an expected call of GetUTXO. -func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) -} - -// PutCurrentDelegator mocks base method. -func (m *MockDiff) PutCurrentDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentDelegator", arg0) -} - -// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockDiffMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).PutCurrentDelegator), arg0) -} - -// PutCurrentValidator mocks base method. -func (m *MockDiff) PutCurrentValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutCurrentValidator", arg0) -} - -// PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockDiffMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockDiff)(nil).PutCurrentValidator), arg0) -} - -// PutPendingDelegator mocks base method. -func (m *MockDiff) PutPendingDelegator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingDelegator", arg0) -} - -// PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockDiffMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockDiff)(nil).PutPendingDelegator), arg0) -} - -// PutPendingValidator mocks base method. -func (m *MockDiff) PutPendingValidator(arg0 *Staker) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PutPendingValidator", arg0) -} - -// PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockDiffMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockDiff)(nil).PutPendingValidator), arg0) -} - -// SetCurrentSupply mocks base method. -func (m *MockDiff) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) -} - -// SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockDiffMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).SetCurrentSupply), arg0, arg1) -} - -// SetDelegateeReward mocks base method. -func (m *MockDiff) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockDiffMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).SetDelegateeReward), arg0, arg1, arg2) -} - -// SetTimestamp mocks base method. -func (m *MockDiff) SetTimestamp(arg0 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTimestamp", arg0) -} - -// SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockDiffMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) -} diff --git a/avalanchego/vms/platformvm/state/mock_staker_iterator.go b/avalanchego/vms/platformvm/state/mock_staker_iterator.go index 1c4812b5..62ba31d8 100644 --- a/avalanchego/vms/platformvm/state/mock_staker_iterator.go +++ b/avalanchego/vms/platformvm/state/mock_staker_iterator.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: StakerIterator) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/platformvm/state/mock_staker_iterator.go github.com/ava-labs/avalanchego/vms/platformvm/state StakerIterator +// // Package state is a generated GoMock package. package state @@ -10,7 +12,7 @@ package state import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockStakerIterator is a mock of StakerIterator interface. diff --git a/avalanchego/vms/platformvm/state/mock_state.go b/avalanchego/vms/platformvm/state/mock_state.go index 9a610397..2fcc1462 100644 --- a/avalanchego/vms/platformvm/state/mock_state.go +++ b/avalanchego/vms/platformvm/state/mock_state.go @@ -1,27 +1,945 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Chain,Diff,State,Versions) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/platformvm/state/mock_state.go github.com/ava-labs/avalanchego/vms/platformvm/state Chain,Diff,State,Versions +// + +// Package state is a generated GoMock package. +package state + +import ( + context "context" + reflect "reflect" + sync "sync" + time "time" + + database "github.com/ava-labs/avalanchego/database" + ids "github.com/ava-labs/avalanchego/ids" + validators "github.com/ava-labs/avalanchego/snow/validators" + logging "github.com/ava-labs/avalanchego/utils/logging" + avax "github.com/ava-labs/avalanchego/vms/components/avax" + block "github.com/ava-labs/avalanchego/vms/platformvm/block" + fx "github.com/ava-labs/avalanchego/vms/platformvm/fx" + status "github.com/ava-labs/avalanchego/vms/platformvm/status" + txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" + gomock "go.uber.org/mock/gomock" +) + +// MockChain is a mock of Chain interface. +type MockChain struct { + ctrl *gomock.Controller + recorder *MockChainMockRecorder +} + +// MockChainMockRecorder is the mock recorder for MockChain. +type MockChainMockRecorder struct { + mock *MockChain +} + +// NewMockChain creates a new mock instance. +func NewMockChain(ctrl *gomock.Controller) *MockChain { + mock := &MockChain{ctrl: ctrl} + mock.recorder = &MockChainMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChain) EXPECT() *MockChainMockRecorder { + return m.recorder +} + +// AddChain mocks base method. +func (m *MockChain) AddChain(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddChain", arg0) +} + +// AddChain indicates an expected call of AddChain. +func (mr *MockChainMockRecorder) AddChain(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockChain)(nil).AddChain), arg0) +} + +// AddRewardUTXO mocks base method. +func (m *MockChain) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) +} + +// AddRewardUTXO indicates an expected call of AddRewardUTXO. +func (mr *MockChainMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockChain)(nil).AddRewardUTXO), arg0, arg1) +} + +// AddSubnet mocks base method. +func (m *MockChain) AddSubnet(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnet", arg0) +} + +// AddSubnet indicates an expected call of AddSubnet. +func (mr *MockChainMockRecorder) AddSubnet(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockChain)(nil).AddSubnet), arg0) +} + +// AddSubnetTransformation mocks base method. +func (m *MockChain) AddSubnetTransformation(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnetTransformation", arg0) +} + +// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. +func (mr *MockChainMockRecorder) AddSubnetTransformation(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockChain)(nil).AddSubnetTransformation), arg0) +} + +// AddTx mocks base method. +func (m *MockChain) AddTx(arg0 *txs.Tx, arg1 status.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", arg0, arg1) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockChainMockRecorder) AddTx(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0, arg1) +} + +// AddUTXO mocks base method. +func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", arg0) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockChainMockRecorder) AddUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) +} + +// DeleteCurrentDelegator mocks base method. +func (m *MockChain) DeleteCurrentDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) +} + +// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. +func (mr *MockChainMockRecorder) DeleteCurrentDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentDelegator), arg0) +} + +// DeleteCurrentValidator mocks base method. +func (m *MockChain) DeleteCurrentValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentValidator", arg0) +} + +// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. +func (mr *MockChainMockRecorder) DeleteCurrentValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockChain)(nil).DeleteCurrentValidator), arg0) +} + +// DeletePendingDelegator mocks base method. +func (m *MockChain) DeletePendingDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingDelegator", arg0) +} + +// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. +func (mr *MockChainMockRecorder) DeletePendingDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockChain)(nil).DeletePendingDelegator), arg0) +} + +// DeletePendingValidator mocks base method. +func (m *MockChain) DeletePendingValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingValidator", arg0) +} + +// DeletePendingValidator indicates an expected call of DeletePendingValidator. +func (mr *MockChainMockRecorder) DeletePendingValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockChain)(nil).DeletePendingValidator), arg0) +} + +// DeleteUTXO mocks base method. +func (m *MockChain) DeleteUTXO(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", arg0) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockChainMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) +} + +// GetCurrentDelegatorIterator mocks base method. +func (m *MockChain) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. +func (mr *MockChainMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentDelegatorIterator), arg0, arg1) +} + +// GetCurrentStakerIterator mocks base method. +func (m *MockChain) GetCurrentStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. +func (mr *MockChainMockRecorder) GetCurrentStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockChain)(nil).GetCurrentStakerIterator)) +} + +// GetCurrentSupply mocks base method. +func (m *MockChain) GetCurrentSupply(arg0 ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSupply indicates an expected call of GetCurrentSupply. +func (mr *MockChainMockRecorder) GetCurrentSupply(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockChain)(nil).GetCurrentSupply), arg0) +} + +// GetCurrentValidator mocks base method. +func (m *MockChain) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidator indicates an expected call of GetCurrentValidator. +func (mr *MockChainMockRecorder) GetCurrentValidator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockChain)(nil).GetCurrentValidator), arg0, arg1) +} + +// GetDelegateeReward mocks base method. +func (m *MockChain) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockChainMockRecorder) GetDelegateeReward(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), arg0, arg1) +} + +// GetPendingDelegatorIterator mocks base method. +func (m *MockChain) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. +func (mr *MockChainMockRecorder) GetPendingDelegatorIterator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockChain)(nil).GetPendingDelegatorIterator), arg0, arg1) +} + +// GetPendingStakerIterator mocks base method. +func (m *MockChain) GetPendingStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. +func (mr *MockChainMockRecorder) GetPendingStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockChain)(nil).GetPendingStakerIterator)) +} + +// GetPendingValidator mocks base method. +func (m *MockChain) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingValidator indicates an expected call of GetPendingValidator. +func (mr *MockChainMockRecorder) GetPendingValidator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), arg0, arg1) +} + +// GetSubnetOwner mocks base method. +func (m *MockChain) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) + ret0, _ := ret[0].(fx.Owner) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOwner indicates an expected call of GetSubnetOwner. +func (mr *MockChainMockRecorder) GetSubnetOwner(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockChain)(nil).GetSubnetOwner), arg0) +} + +// GetSubnetTransformation mocks base method. +func (m *MockChain) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. +func (mr *MockChainMockRecorder) GetSubnetTransformation(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockChain)(nil).GetSubnetTransformation), arg0) +} + +// GetTimestamp mocks base method. +func (m *MockChain) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockChainMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockChain)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(status.Status) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockChainMockRecorder) GetTx(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) +} + +// GetUTXO mocks base method. +func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockChainMockRecorder) GetUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) +} + +// PutCurrentDelegator mocks base method. +func (m *MockChain) PutCurrentDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentDelegator", arg0) +} + +// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. +func (mr *MockChainMockRecorder) PutCurrentDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockChain)(nil).PutCurrentDelegator), arg0) +} + +// PutCurrentValidator mocks base method. +func (m *MockChain) PutCurrentValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentValidator", arg0) +} + +// PutCurrentValidator indicates an expected call of PutCurrentValidator. +func (mr *MockChainMockRecorder) PutCurrentValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockChain)(nil).PutCurrentValidator), arg0) +} + +// PutPendingDelegator mocks base method. +func (m *MockChain) PutPendingDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingDelegator", arg0) +} + +// PutPendingDelegator indicates an expected call of PutPendingDelegator. +func (mr *MockChainMockRecorder) PutPendingDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockChain)(nil).PutPendingDelegator), arg0) +} + +// PutPendingValidator mocks base method. +func (m *MockChain) PutPendingValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingValidator", arg0) +} + +// PutPendingValidator indicates an expected call of PutPendingValidator. +func (mr *MockChainMockRecorder) PutPendingValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockChain)(nil).PutPendingValidator), arg0) +} + +// SetCurrentSupply mocks base method. +func (m *MockChain) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) +} + +// SetCurrentSupply indicates an expected call of SetCurrentSupply. +func (mr *MockChainMockRecorder) SetCurrentSupply(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockChain)(nil).SetCurrentSupply), arg0, arg1) +} + +// SetDelegateeReward mocks base method. +func (m *MockChain) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockChainMockRecorder) SetDelegateeReward(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockChain)(nil).SetDelegateeReward), arg0, arg1, arg2) +} + +// SetSubnetOwner mocks base method. +func (m *MockChain) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) +} + +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockChainMockRecorder) SetSubnetOwner(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockChain)(nil).SetSubnetOwner), arg0, arg1) +} + +// SetTimestamp mocks base method. +func (m *MockChain) SetTimestamp(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", arg0) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockChainMockRecorder) SetTimestamp(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) +} + +// MockDiff is a mock of Diff interface. +type MockDiff struct { + ctrl *gomock.Controller + recorder *MockDiffMockRecorder +} + +// MockDiffMockRecorder is the mock recorder for MockDiff. +type MockDiffMockRecorder struct { + mock *MockDiff +} + +// NewMockDiff creates a new mock instance. +func NewMockDiff(ctrl *gomock.Controller) *MockDiff { + mock := &MockDiff{ctrl: ctrl} + mock.recorder = &MockDiffMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDiff) EXPECT() *MockDiffMockRecorder { + return m.recorder +} + +// AddChain mocks base method. +func (m *MockDiff) AddChain(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddChain", arg0) +} + +func (m *MockDiff) GetNetworkID() uint32 { + return 0 +} + +// AddChain indicates an expected call of AddChain. +func (mr *MockDiffMockRecorder) AddChain(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockDiff)(nil).AddChain), arg0) +} + +// AddRewardUTXO mocks base method. +func (m *MockDiff) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRewardUTXO", arg0, arg1) +} + +// AddRewardUTXO indicates an expected call of AddRewardUTXO. +func (mr *MockDiffMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockDiff)(nil).AddRewardUTXO), arg0, arg1) +} + +// AddSubnet mocks base method. +func (m *MockDiff) AddSubnet(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnet", arg0) +} + +// AddSubnet indicates an expected call of AddSubnet. +func (mr *MockDiffMockRecorder) AddSubnet(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockDiff)(nil).AddSubnet), arg0) +} + +// AddSubnetTransformation mocks base method. +func (m *MockDiff) AddSubnetTransformation(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddSubnetTransformation", arg0) +} + +// AddSubnetTransformation indicates an expected call of AddSubnetTransformation. +func (mr *MockDiffMockRecorder) AddSubnetTransformation(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).AddSubnetTransformation), arg0) +} + +// AddTx mocks base method. +func (m *MockDiff) AddTx(arg0 *txs.Tx, arg1 status.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", arg0, arg1) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockDiffMockRecorder) AddTx(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0, arg1) +} + +// AddUTXO mocks base method. +func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", arg0) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockDiffMockRecorder) AddUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) +} + +// Apply mocks base method. +func (m *MockDiff) Apply(arg0 Chain) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Apply", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Apply indicates an expected call of Apply. +func (mr *MockDiffMockRecorder) Apply(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) +} + +// DeleteCurrentDelegator mocks base method. +func (m *MockDiff) DeleteCurrentDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentDelegator", arg0) +} + +// DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. +func (mr *MockDiffMockRecorder) DeleteCurrentDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentDelegator), arg0) +} + +// DeleteCurrentValidator mocks base method. +func (m *MockDiff) DeleteCurrentValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteCurrentValidator", arg0) +} + +// DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. +func (mr *MockDiffMockRecorder) DeleteCurrentValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockDiff)(nil).DeleteCurrentValidator), arg0) +} + +// DeletePendingDelegator mocks base method. +func (m *MockDiff) DeletePendingDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingDelegator", arg0) +} + +// DeletePendingDelegator indicates an expected call of DeletePendingDelegator. +func (mr *MockDiffMockRecorder) DeletePendingDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockDiff)(nil).DeletePendingDelegator), arg0) +} + +// DeletePendingValidator mocks base method. +func (m *MockDiff) DeletePendingValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeletePendingValidator", arg0) +} + +// DeletePendingValidator indicates an expected call of DeletePendingValidator. +func (mr *MockDiffMockRecorder) DeletePendingValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockDiff)(nil).DeletePendingValidator), arg0) +} + +// DeleteUTXO mocks base method. +func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", arg0) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) +} + +// GetCurrentDelegatorIterator mocks base method. +func (m *MockDiff) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentDelegatorIterator", arg0, arg1) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. +func (mr *MockDiffMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentDelegatorIterator), arg0, arg1) +} + +// GetCurrentStakerIterator mocks base method. +func (m *MockDiff) GetCurrentStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentStakerIterator indicates an expected call of GetCurrentStakerIterator. +func (mr *MockDiffMockRecorder) GetCurrentStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetCurrentStakerIterator)) +} + +// GetCurrentSupply mocks base method. +func (m *MockDiff) GetCurrentSupply(arg0 ids.ID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentSupply", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentSupply indicates an expected call of GetCurrentSupply. +func (mr *MockDiffMockRecorder) GetCurrentSupply(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).GetCurrentSupply), arg0) +} + +// GetCurrentValidator mocks base method. +func (m *MockDiff) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentValidator", arg0, arg1) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentValidator indicates an expected call of GetCurrentValidator. +func (mr *MockDiffMockRecorder) GetCurrentValidator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockDiff)(nil).GetCurrentValidator), arg0, arg1) +} + +// GetDelegateeReward mocks base method. +func (m *MockDiff) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockDiffMockRecorder) GetDelegateeReward(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), arg0, arg1) +} + +// GetPendingDelegatorIterator mocks base method. +func (m *MockDiff) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingDelegatorIterator", arg0, arg1) + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: State) +// GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. +func (mr *MockDiffMockRecorder) GetPendingDelegatorIterator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingDelegatorIterator), arg0, arg1) +} -// Package state is a generated GoMock package. -package state +// GetPendingStakerIterator mocks base method. +func (m *MockDiff) GetPendingStakerIterator() (StakerIterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingStakerIterator") + ret0, _ := ret[0].(StakerIterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} -import ( - reflect "reflect" - time "time" +// GetPendingStakerIterator indicates an expected call of GetPendingStakerIterator. +func (mr *MockDiffMockRecorder) GetPendingStakerIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingStakerIterator", reflect.TypeOf((*MockDiff)(nil).GetPendingStakerIterator)) +} - database "github.com/ava-labs/avalanchego/database" - ids "github.com/ava-labs/avalanchego/ids" - choices "github.com/ava-labs/avalanchego/snow/choices" - validators "github.com/ava-labs/avalanchego/snow/validators" - bls "github.com/ava-labs/avalanchego/utils/crypto/bls" - avax "github.com/ava-labs/avalanchego/vms/components/avax" - blocks "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - status "github.com/ava-labs/avalanchego/vms/platformvm/status" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" -) +// GetPendingValidator mocks base method. +func (m *MockDiff) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingValidator", arg0, arg1) + ret0, _ := ret[0].(*Staker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPendingValidator indicates an expected call of GetPendingValidator. +func (mr *MockDiffMockRecorder) GetPendingValidator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), arg0, arg1) +} + +// GetSubnetOwner mocks base method. +func (m *MockDiff) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) + ret0, _ := ret[0].(fx.Owner) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOwner indicates an expected call of GetSubnetOwner. +func (mr *MockDiffMockRecorder) GetSubnetOwner(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).GetSubnetOwner), arg0) +} + +// GetSubnetTransformation mocks base method. +func (m *MockDiff) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetTransformation", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetTransformation indicates an expected call of GetSubnetTransformation. +func (mr *MockDiffMockRecorder) GetSubnetTransformation(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).GetSubnetTransformation), arg0) +} + +// GetTimestamp mocks base method. +func (m *MockDiff) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockDiffMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockDiff)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(status.Status) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockDiffMockRecorder) GetTx(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) +} + +// GetUTXO mocks base method. +func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockDiffMockRecorder) GetUTXO(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) +} + +// PutCurrentDelegator mocks base method. +func (m *MockDiff) PutCurrentDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentDelegator", arg0) +} + +// PutCurrentDelegator indicates an expected call of PutCurrentDelegator. +func (mr *MockDiffMockRecorder) PutCurrentDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockDiff)(nil).PutCurrentDelegator), arg0) +} + +// PutCurrentValidator mocks base method. +func (m *MockDiff) PutCurrentValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutCurrentValidator", arg0) +} + +// PutCurrentValidator indicates an expected call of PutCurrentValidator. +func (mr *MockDiffMockRecorder) PutCurrentValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockDiff)(nil).PutCurrentValidator), arg0) +} + +// PutPendingDelegator mocks base method. +func (m *MockDiff) PutPendingDelegator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingDelegator", arg0) +} + +// PutPendingDelegator indicates an expected call of PutPendingDelegator. +func (mr *MockDiffMockRecorder) PutPendingDelegator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockDiff)(nil).PutPendingDelegator), arg0) +} + +// PutPendingValidator mocks base method. +func (m *MockDiff) PutPendingValidator(arg0 *Staker) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PutPendingValidator", arg0) +} + +// PutPendingValidator indicates an expected call of PutPendingValidator. +func (mr *MockDiffMockRecorder) PutPendingValidator(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockDiff)(nil).PutPendingValidator), arg0) +} + +// SetCurrentSupply mocks base method. +func (m *MockDiff) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetCurrentSupply", arg0, arg1) +} + +// SetCurrentSupply indicates an expected call of SetCurrentSupply. +func (mr *MockDiffMockRecorder) SetCurrentSupply(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).SetCurrentSupply), arg0, arg1) +} + +// SetDelegateeReward mocks base method. +func (m *MockDiff) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockDiffMockRecorder) SetDelegateeReward(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).SetDelegateeReward), arg0, arg1, arg2) +} + +// SetSubnetOwner mocks base method. +func (m *MockDiff) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) +} + +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockDiffMockRecorder) SetSubnetOwner(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockDiff)(nil).SetSubnetOwner), arg0, arg1) +} + +// SetTimestamp mocks base method. +func (m *MockDiff) SetTimestamp(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", arg0) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockDiffMockRecorder) SetTimestamp(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) +} // MockState is a mock of State interface. type MockState struct { @@ -69,7 +987,7 @@ func (m *MockState) AddChain(arg0 *txs.Tx) { } // AddChain indicates an expected call of AddChain. -func (mr *MockStateMockRecorder) AddChain(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddChain(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockState)(nil).AddChain), arg0) } @@ -81,21 +999,21 @@ func (m *MockState) AddRewardUTXO(arg0 ids.ID, arg1 *avax.UTXO) { } // AddRewardUTXO indicates an expected call of AddRewardUTXO. -func (mr *MockStateMockRecorder) AddRewardUTXO(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRewardUTXO", reflect.TypeOf((*MockState)(nil).AddRewardUTXO), arg0, arg1) } // AddStatelessBlock mocks base method. -func (m *MockState) AddStatelessBlock(arg0 blocks.Block, arg1 choices.Status) { +func (m *MockState) AddStatelessBlock(arg0 block.Block) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddStatelessBlock", arg0, arg1) + m.ctrl.Call(m, "AddStatelessBlock", arg0) } // AddStatelessBlock indicates an expected call of AddStatelessBlock. -func (mr *MockStateMockRecorder) AddStatelessBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddStatelessBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatelessBlock", reflect.TypeOf((*MockState)(nil).AddStatelessBlock), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatelessBlock", reflect.TypeOf((*MockState)(nil).AddStatelessBlock), arg0) } // AddSubnet mocks base method. @@ -105,7 +1023,7 @@ func (m *MockState) AddSubnet(arg0 *txs.Tx) { } // AddSubnet indicates an expected call of AddSubnet. -func (mr *MockStateMockRecorder) AddSubnet(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddSubnet(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockState)(nil).AddSubnet), arg0) } @@ -117,7 +1035,7 @@ func (m *MockState) AddSubnetTransformation(arg0 *txs.Tx) { } // AddSubnetTransformation indicates an expected call of AddSubnetTransformation. -func (mr *MockStateMockRecorder) AddSubnetTransformation(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddSubnetTransformation(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnetTransformation", reflect.TypeOf((*MockState)(nil).AddSubnetTransformation), arg0) } @@ -129,7 +1047,7 @@ func (m *MockState) AddTx(arg0 *txs.Tx, arg1 status.Status) { } // AddTx indicates an expected call of AddTx. -func (mr *MockStateMockRecorder) AddTx(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddTx(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), arg0, arg1) } @@ -141,11 +1059,53 @@ func (m *MockState) AddUTXO(arg0 *avax.UTXO) { } // AddUTXO indicates an expected call of AddUTXO. -func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) AddUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) } +// ApplyValidatorPublicKeyDiffs mocks base method. +func (m *MockState) ApplyValidatorPublicKeyDiffs(arg0 context.Context, arg1 map[ids.NodeID]*validators.GetValidatorOutput, arg2, arg3 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyValidatorPublicKeyDiffs", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyValidatorPublicKeyDiffs indicates an expected call of ApplyValidatorPublicKeyDiffs. +func (mr *MockStateMockRecorder) ApplyValidatorPublicKeyDiffs(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorPublicKeyDiffs), arg0, arg1, arg2, arg3) +} + +// ApplyValidatorWeightDiffs mocks base method. +func (m *MockState) ApplyValidatorWeightDiffs(arg0 context.Context, arg1 map[ids.NodeID]*validators.GetValidatorOutput, arg2, arg3 uint64, arg4 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplyValidatorWeightDiffs", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplyValidatorWeightDiffs indicates an expected call of ApplyValidatorWeightDiffs. +func (mr *MockStateMockRecorder) ApplyValidatorWeightDiffs(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyValidatorWeightDiffs", reflect.TypeOf((*MockState)(nil).ApplyValidatorWeightDiffs), arg0, arg1, arg2, arg3, arg4) +} + +// Checksum mocks base method. +func (m *MockState) Checksum() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Checksum") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Checksum indicates an expected call of Checksum. +func (mr *MockStateMockRecorder) Checksum() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Checksum", reflect.TypeOf((*MockState)(nil).Checksum)) +} + // Close mocks base method. func (m *MockState) Close() error { m.ctrl.T.Helper() @@ -196,7 +1156,7 @@ func (m *MockState) DeleteCurrentDelegator(arg0 *Staker) { } // DeleteCurrentDelegator indicates an expected call of DeleteCurrentDelegator. -func (mr *MockStateMockRecorder) DeleteCurrentDelegator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteCurrentDelegator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentDelegator", reflect.TypeOf((*MockState)(nil).DeleteCurrentDelegator), arg0) } @@ -208,7 +1168,7 @@ func (m *MockState) DeleteCurrentValidator(arg0 *Staker) { } // DeleteCurrentValidator indicates an expected call of DeleteCurrentValidator. -func (mr *MockStateMockRecorder) DeleteCurrentValidator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteCurrentValidator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCurrentValidator", reflect.TypeOf((*MockState)(nil).DeleteCurrentValidator), arg0) } @@ -220,7 +1180,7 @@ func (m *MockState) DeletePendingDelegator(arg0 *Staker) { } // DeletePendingDelegator indicates an expected call of DeletePendingDelegator. -func (mr *MockStateMockRecorder) DeletePendingDelegator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeletePendingDelegator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingDelegator", reflect.TypeOf((*MockState)(nil).DeletePendingDelegator), arg0) } @@ -232,7 +1192,7 @@ func (m *MockState) DeletePendingValidator(arg0 *Staker) { } // DeletePendingValidator indicates an expected call of DeletePendingValidator. -func (mr *MockStateMockRecorder) DeletePendingValidator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeletePendingValidator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePendingValidator", reflect.TypeOf((*MockState)(nil).DeletePendingValidator), arg0) } @@ -244,11 +1204,26 @@ func (m *MockState) DeleteUTXO(arg0 ids.ID) { } // DeleteUTXO indicates an expected call of DeleteUTXO. -func (mr *MockStateMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) DeleteUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), arg0) } +// GetBlockIDAtHeight mocks base method. +func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockIDAtHeight", arg0) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) +} + // GetChains mocks base method. func (m *MockState) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { m.ctrl.T.Helper() @@ -259,7 +1234,7 @@ func (m *MockState) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { } // GetChains indicates an expected call of GetChains. -func (mr *MockStateMockRecorder) GetChains(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetChains(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockState)(nil).GetChains), arg0) } @@ -274,7 +1249,7 @@ func (m *MockState) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (S } // GetCurrentDelegatorIterator indicates an expected call of GetCurrentDelegatorIterator. -func (mr *MockStateMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetCurrentDelegatorIterator(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetCurrentDelegatorIterator), arg0, arg1) } @@ -304,7 +1279,7 @@ func (m *MockState) GetCurrentSupply(arg0 ids.ID) (uint64, error) { } // GetCurrentSupply indicates an expected call of GetCurrentSupply. -func (mr *MockStateMockRecorder) GetCurrentSupply(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetCurrentSupply(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentSupply", reflect.TypeOf((*MockState)(nil).GetCurrentSupply), arg0) } @@ -319,7 +1294,7 @@ func (m *MockState) GetCurrentValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, } // GetCurrentValidator indicates an expected call of GetCurrentValidator. -func (mr *MockStateMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetCurrentValidator(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockState)(nil).GetCurrentValidator), arg0, arg1) } @@ -334,7 +1309,7 @@ func (m *MockState) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, er } // GetDelegateeReward indicates an expected call of GetDelegateeReward. -func (mr *MockStateMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetDelegateeReward(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockState)(nil).GetDelegateeReward), arg0, arg1) } @@ -377,7 +1352,7 @@ func (m *MockState) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (S } // GetPendingDelegatorIterator indicates an expected call of GetPendingDelegatorIterator. -func (mr *MockStateMockRecorder) GetPendingDelegatorIterator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetPendingDelegatorIterator(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingDelegatorIterator", reflect.TypeOf((*MockState)(nil).GetPendingDelegatorIterator), arg0, arg1) } @@ -407,7 +1382,7 @@ func (m *MockState) GetPendingValidator(arg0 ids.ID, arg1 ids.NodeID) (*Staker, } // GetPendingValidator indicates an expected call of GetPendingValidator. -func (mr *MockStateMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetPendingValidator(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockState)(nil).GetPendingValidator), arg0, arg1) } @@ -422,7 +1397,7 @@ func (m *MockState) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { } // GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockStateMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetRewardUTXOs(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockState)(nil).GetRewardUTXOs), arg0) } @@ -437,27 +1412,41 @@ func (m *MockState) GetStartTime(arg0 ids.NodeID, arg1 ids.ID) (time.Time, error } // GetStartTime indicates an expected call of GetStartTime. -func (mr *MockStateMockRecorder) GetStartTime(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetStartTime(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0, arg1) } // GetStatelessBlock mocks base method. -func (m *MockState) GetStatelessBlock(arg0 ids.ID) (blocks.Block, choices.Status, error) { +func (m *MockState) GetStatelessBlock(arg0 ids.ID) (block.Block, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) - ret0, _ := ret[0].(blocks.Block) - ret1, _ := ret[1].(choices.Status) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret0, _ := ret[0].(block.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetStatelessBlock indicates an expected call of GetStatelessBlock. -func (mr *MockStateMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetStatelessBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), arg0) } +// GetSubnetOwner mocks base method. +func (m *MockState) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetOwner", arg0) + ret0, _ := ret[0].(fx.Owner) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetOwner indicates an expected call of GetSubnetOwner. +func (mr *MockStateMockRecorder) GetSubnetOwner(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetOwner", reflect.TypeOf((*MockState)(nil).GetSubnetOwner), arg0) +} + // GetSubnetTransformation mocks base method. func (m *MockState) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { m.ctrl.T.Helper() @@ -468,7 +1457,7 @@ func (m *MockState) GetSubnetTransformation(arg0 ids.ID) (*txs.Tx, error) { } // GetSubnetTransformation indicates an expected call of GetSubnetTransformation. -func (mr *MockStateMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetSubnetTransformation(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockState)(nil).GetSubnetTransformation), arg0) } @@ -513,7 +1502,7 @@ func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, status.Status, error) { } // GetTx indicates an expected call of GetTx. -func (mr *MockStateMockRecorder) GetTx(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetTx(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), arg0) } @@ -528,7 +1517,7 @@ func (m *MockState) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { } // GetUTXO indicates an expected call of GetUTXO. -func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetUTXO(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), arg0) } @@ -544,39 +1533,23 @@ func (m *MockState) GetUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time } // GetUptime indicates an expected call of GetUptime. -func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) } -// GetValidatorPublicKeyDiffs mocks base method. -func (m *MockState) GetValidatorPublicKeyDiffs(arg0 uint64) (map[ids.NodeID]*bls.PublicKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorPublicKeyDiffs", arg0) - ret0, _ := ret[0].(map[ids.NodeID]*bls.PublicKey) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetValidatorPublicKeyDiffs indicates an expected call of GetValidatorPublicKeyDiffs. -func (mr *MockStateMockRecorder) GetValidatorPublicKeyDiffs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).GetValidatorPublicKeyDiffs), arg0) -} - -// GetValidatorWeightDiffs mocks base method. -func (m *MockState) GetValidatorWeightDiffs(arg0 uint64, arg1 ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { +// PruneAndIndex mocks base method. +func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorWeightDiffs", arg0, arg1) - ret0, _ := ret[0].(map[ids.NodeID]*ValidatorWeightDiff) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } -// GetValidatorWeightDiffs indicates an expected call of GetValidatorWeightDiffs. -func (mr *MockStateMockRecorder) GetValidatorWeightDiffs(arg0, arg1 interface{}) *gomock.Call { +// PruneAndIndex indicates an expected call of PruneAndIndex. +func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorWeightDiffs", reflect.TypeOf((*MockState)(nil).GetValidatorWeightDiffs), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) } // PutCurrentDelegator mocks base method. @@ -586,7 +1559,7 @@ func (m *MockState) PutCurrentDelegator(arg0 *Staker) { } // PutCurrentDelegator indicates an expected call of PutCurrentDelegator. -func (mr *MockStateMockRecorder) PutCurrentDelegator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutCurrentDelegator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentDelegator", reflect.TypeOf((*MockState)(nil).PutCurrentDelegator), arg0) } @@ -598,7 +1571,7 @@ func (m *MockState) PutCurrentValidator(arg0 *Staker) { } // PutCurrentValidator indicates an expected call of PutCurrentValidator. -func (mr *MockStateMockRecorder) PutCurrentValidator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutCurrentValidator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutCurrentValidator", reflect.TypeOf((*MockState)(nil).PutCurrentValidator), arg0) } @@ -610,7 +1583,7 @@ func (m *MockState) PutPendingDelegator(arg0 *Staker) { } // PutPendingDelegator indicates an expected call of PutPendingDelegator. -func (mr *MockStateMockRecorder) PutPendingDelegator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutPendingDelegator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingDelegator", reflect.TypeOf((*MockState)(nil).PutPendingDelegator), arg0) } @@ -622,7 +1595,7 @@ func (m *MockState) PutPendingValidator(arg0 *Staker) { } // PutPendingValidator indicates an expected call of PutPendingValidator. -func (mr *MockStateMockRecorder) PutPendingValidator(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutPendingValidator(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockState)(nil).PutPendingValidator), arg0) } @@ -634,7 +1607,7 @@ func (m *MockState) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { } // SetCurrentSupply indicates an expected call of SetCurrentSupply. -func (mr *MockStateMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetCurrentSupply(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockState)(nil).SetCurrentSupply), arg0, arg1) } @@ -648,7 +1621,7 @@ func (m *MockState) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64 } // SetDelegateeReward indicates an expected call of SetDelegateeReward. -func (mr *MockStateMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetDelegateeReward(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockState)(nil).SetDelegateeReward), arg0, arg1, arg2) } @@ -660,7 +1633,7 @@ func (m *MockState) SetHeight(arg0 uint64) { } // SetHeight indicates an expected call of SetHeight. -func (mr *MockStateMockRecorder) SetHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeight", reflect.TypeOf((*MockState)(nil).SetHeight), arg0) } @@ -672,11 +1645,23 @@ func (m *MockState) SetLastAccepted(arg0 ids.ID) { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) } +// SetSubnetOwner mocks base method. +func (m *MockState) SetSubnetOwner(arg0 ids.ID, arg1 fx.Owner) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetSubnetOwner", arg0, arg1) +} + +// SetSubnetOwner indicates an expected call of SetSubnetOwner. +func (mr *MockStateMockRecorder) SetSubnetOwner(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSubnetOwner", reflect.TypeOf((*MockState)(nil).SetSubnetOwner), arg0, arg1) +} + // SetTimestamp mocks base method. func (m *MockState) SetTimestamp(arg0 time.Time) { m.ctrl.T.Helper() @@ -684,7 +1669,7 @@ func (m *MockState) SetTimestamp(arg0 time.Time) { } // SetTimestamp indicates an expected call of SetTimestamp. -func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetTimestamp(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), arg0) } @@ -698,11 +1683,26 @@ func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Duration, } // SetUptime indicates an expected call of SetUptime. -func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) } +// ShouldPrune mocks base method. +func (m *MockState) ShouldPrune() (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldPrune") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ShouldPrune indicates an expected call of ShouldPrune. +func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) +} + // UTXOIDs mocks base method. func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { m.ctrl.T.Helper() @@ -713,21 +1713,45 @@ func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error } // UTXOIDs indicates an expected call of UTXOIDs. -func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), arg0, arg1, arg2) } -// ValidatorSet mocks base method. -func (m *MockState) ValidatorSet(arg0 ids.ID, arg1 validators.Set) error { +// MockVersions is a mock of Versions interface. +type MockVersions struct { + ctrl *gomock.Controller + recorder *MockVersionsMockRecorder +} + +// MockVersionsMockRecorder is the mock recorder for MockVersions. +type MockVersionsMockRecorder struct { + mock *MockVersions +} + +// NewMockVersions creates a new mock instance. +func NewMockVersions(ctrl *gomock.Controller) *MockVersions { + mock := &MockVersions{ctrl: ctrl} + mock.recorder = &MockVersionsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVersions) EXPECT() *MockVersionsMockRecorder { + return m.recorder +} + +// GetState mocks base method. +func (m *MockVersions) GetState(arg0 ids.ID) (Chain, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidatorSet", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "GetState", arg0) + ret0, _ := ret[0].(Chain) + ret1, _ := ret[1].(bool) + return ret0, ret1 } -// ValidatorSet indicates an expected call of ValidatorSet. -func (mr *MockStateMockRecorder) ValidatorSet(arg0, arg1 interface{}) *gomock.Call { +// GetState indicates an expected call of GetState. +func (mr *MockVersionsMockRecorder) GetState(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorSet", reflect.TypeOf((*MockState)(nil).ValidatorSet), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockVersions)(nil).GetState), arg0) } diff --git a/avalanchego/vms/platformvm/state/mock_versions.go b/avalanchego/vms/platformvm/state/mock_versions.go deleted file mode 100644 index e94aa499..00000000 --- a/avalanchego/vms/platformvm/state/mock_versions.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Versions) - -// Package state is a generated GoMock package. -package state - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" -) - -// MockVersions is a mock of Versions interface. -type MockVersions struct { - ctrl *gomock.Controller - recorder *MockVersionsMockRecorder -} - -// MockVersionsMockRecorder is the mock recorder for MockVersions. -type MockVersionsMockRecorder struct { - mock *MockVersions -} - -// NewMockVersions creates a new mock instance. -func NewMockVersions(ctrl *gomock.Controller) *MockVersions { - mock := &MockVersions{ctrl: ctrl} - mock.recorder = &MockVersionsMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockVersions) EXPECT() *MockVersionsMockRecorder { - return m.recorder -} - -// GetState mocks base method. -func (m *MockVersions) GetState(arg0 ids.ID) (Chain, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetState", arg0) - ret0, _ := ret[0].(Chain) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetState indicates an expected call of GetState. -func (mr *MockVersionsMockRecorder) GetState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockVersions)(nil).GetState), arg0) -} diff --git a/avalanchego/vms/platformvm/state/slice_iterator_test.go b/avalanchego/vms/platformvm/state/slice_iterator_test.go index 96a686cd..408ffe83 100644 --- a/avalanchego/vms/platformvm/state/slice_iterator_test.go +++ b/avalanchego/vms/platformvm/state/slice_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/staker.go b/avalanchego/vms/platformvm/state/staker.go index 37bc512e..a9ba5259 100644 --- a/avalanchego/vms/platformvm/state/staker.go +++ b/avalanchego/vms/platformvm/state/staker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -83,7 +83,12 @@ func (s *Staker) Less(than *Staker) bool { return bytes.Compare(s.TxID[:], than.TxID[:]) == -1 } -func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (*Staker, error) { +func NewCurrentStaker( + txID ids.ID, + staker txs.Staker, + startTime time.Time, + potentialReward uint64, +) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err @@ -95,7 +100,7 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (* PublicKey: publicKey, SubnetID: staker.SubnetID(), Weight: staker.Weight(), - StartTime: staker.StartTime(), + StartTime: startTime, EndTime: endTime, PotentialReward: potentialReward, NextTime: endTime, @@ -103,7 +108,7 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (* }, nil } -func NewPendingStaker(txID ids.ID, staker txs.Staker) (*Staker, error) { +func NewPendingStaker(txID ids.ID, staker txs.ScheduledStaker) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err diff --git a/avalanchego/vms/platformvm/state/staker_diff_iterator.go b/avalanchego/vms/platformvm/state/staker_diff_iterator.go index e92f6307..d47ab49a 100644 --- a/avalanchego/vms/platformvm/state/staker_diff_iterator.go +++ b/avalanchego/vms/platformvm/state/staker_diff_iterator.go @@ -1,18 +1,16 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( - "container/heap" - + "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) var ( _ StakerDiffIterator = (*stakerDiffIterator)(nil) _ StakerIterator = (*mutableStakerIterator)(nil) - _ heap.Interface = (*mutableStakerIterator)(nil) ) // StakerDiffIterator is an iterator that iterates over the events that will be @@ -114,38 +112,40 @@ func (it *stakerDiffIterator) advancePending() { type mutableStakerIterator struct { iteratorExhausted bool iterator StakerIterator - heap []*Staker + heap heap.Queue[*Staker] } func newMutableStakerIterator(iterator StakerIterator) *mutableStakerIterator { return &mutableStakerIterator{ iteratorExhausted: !iterator.Next(), iterator: iterator, + heap: heap.NewQueue((*Staker).Less), } } // Add should not be called until after Next has been called at least once. func (it *mutableStakerIterator) Add(staker *Staker) { - heap.Push(it, staker) + it.heap.Push(staker) } func (it *mutableStakerIterator) Next() bool { // The only time the heap should be empty - is when the iterator is // exhausted or uninitialized. - if len(it.heap) > 0 { - heap.Pop(it) + if it.heap.Len() > 0 { + it.heap.Pop() } // If the iterator is exhausted, the only elements left to iterate over are // in the heap. if it.iteratorExhausted { - return len(it.heap) > 0 + return it.heap.Len() > 0 } // If the heap doesn't contain the next staker to return, we need to move // the next element from the iterator into the heap. nextIteratorStaker := it.iterator.Value() - if len(it.heap) == 0 || nextIteratorStaker.Less(it.heap[0]) { + peek, ok := it.heap.Peek() + if !ok || nextIteratorStaker.Less(peek) { it.Add(nextIteratorStaker) it.iteratorExhausted = !it.iterator.Next() } @@ -153,35 +153,12 @@ func (it *mutableStakerIterator) Next() bool { } func (it *mutableStakerIterator) Value() *Staker { - return it.heap[0] + peek, _ := it.heap.Peek() + return peek } func (it *mutableStakerIterator) Release() { it.iteratorExhausted = true it.iterator.Release() - it.heap = nil -} - -func (it *mutableStakerIterator) Len() int { - return len(it.heap) -} - -func (it *mutableStakerIterator) Less(i, j int) bool { - return it.heap[i].Less(it.heap[j]) -} - -func (it *mutableStakerIterator) Swap(i, j int) { - it.heap[j], it.heap[i] = it.heap[i], it.heap[j] -} - -func (it *mutableStakerIterator) Push(value interface{}) { - it.heap = append(it.heap, value.(*Staker)) -} - -func (it *mutableStakerIterator) Pop() interface{} { - newLength := len(it.heap) - 1 - value := it.heap[newLength] - it.heap[newLength] = nil - it.heap = it.heap[:newLength] - return value + it.heap = heap.NewQueue((*Staker).Less) } diff --git a/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go b/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go index c008b06f..468b8800 100644 --- a/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go +++ b/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/staker_status.go b/avalanchego/vms/platformvm/state/staker_status.go index b74064c4..0adc4624 100644 --- a/avalanchego/vms/platformvm/state/staker_status.go +++ b/avalanchego/vms/platformvm/state/staker_status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/staker_test.go b/avalanchego/vms/platformvm/state/staker_test.go index bb196e84..d1ac10f9 100644 --- a/avalanchego/vms/platformvm/state/staker_test.go +++ b/avalanchego/vms/platformvm/state/staker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,9 +8,8 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -135,7 +134,6 @@ func TestStakerLess(t *testing.T) { func TestNewCurrentStaker(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() txID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() @@ -145,20 +143,19 @@ func TestNewCurrentStaker(t *testing.T) { subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() - endTime := time.Now() + endTime := startTime.Add(time.Hour) potentialReward := uint64(54321) currentPriority := txs.SubnetPermissionedValidatorCurrentPriority stakerTx := txs.NewMockStaker(ctrl) + stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) stakerTx.EXPECT().Weight().Return(weight) - stakerTx.EXPECT().StartTime().Return(startTime) - stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().CurrentPriority().Return(currentPriority) - staker, err := NewCurrentStaker(txID, stakerTx, potentialReward) + staker, err := NewCurrentStaker(txID, stakerTx, startTime, potentialReward) require.NotNil(staker) require.NoError(err) require.Equal(txID, staker.TxID) @@ -174,14 +171,13 @@ func TestNewCurrentStaker(t *testing.T) { stakerTx.EXPECT().PublicKey().Return(nil, false, errCustom) - _, err = NewCurrentStaker(txID, stakerTx, potentialReward) + _, err = NewCurrentStaker(txID, stakerTx, startTime, potentialReward) require.ErrorIs(err, errCustom) } func TestNewPendingStaker(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() txID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() @@ -194,7 +190,7 @@ func TestNewPendingStaker(t *testing.T) { endTime := time.Now() pendingPriority := txs.SubnetPermissionedValidatorPendingPriority - stakerTx := txs.NewMockStaker(ctrl) + stakerTx := txs.NewMockScheduledStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) diff --git a/avalanchego/vms/platformvm/state/stakers.go b/avalanchego/vms/platformvm/state/stakers.go index 5276ff4f..f787749f 100644 --- a/avalanchego/vms/platformvm/state/stakers.go +++ b/avalanchego/vms/platformvm/state/stakers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/stakers_test.go b/avalanchego/vms/platformvm/state/stakers_test.go index 6b8c85ea..5c6d9a8b 100644 --- a/avalanchego/vms/platformvm/state/stakers_test.go +++ b/avalanchego/vms/platformvm/state/stakers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -235,17 +235,19 @@ func newTestStaker() *Staker { } func assertIteratorsEqual(t *testing.T, expected, actual StakerIterator) { + require := require.New(t) + t.Helper() for expected.Next() { - require.True(t, actual.Next()) + require.True(actual.Next()) expectedStaker := expected.Value() actualStaker := actual.Value() - require.Equal(t, expectedStaker, actualStaker) + require.Equal(expectedStaker, actualStaker) } - require.False(t, actual.Next()) + require.False(actual.Next()) expected.Release() actual.Release() diff --git a/avalanchego/vms/platformvm/state/state.go b/avalanchego/vms/platformvm/state/state.go index 22425aac..f7e8bc9d 100644 --- a/avalanchego/vms/platformvm/state/state.go +++ b/avalanchego/vms/platformvm/state/state.go @@ -1,16 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( + "context" "errors" "fmt" + "math" + "sync" "time" "github.com/google/btree" - "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -27,58 +30,64 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) const ( - validatorDiffsCacheSize = 2048 - blockCacheSize = 2048 - txCacheSize = 2048 - rewardUTXOsCacheSize = 2048 - chainCacheSize = 2048 - chainDBCacheSize = 2048 + pruneCommitLimit = 1024 + pruneCommitSleepMultiplier = 5 + pruneCommitSleepCap = 10 * time.Second + pruneUpdateFrequency = 30 * time.Second ) var ( _ State = (*state)(nil) - ErrDelegatorSubset = errors.New("delegator's time range must be a subset of the validator's time range") - errMissingValidatorSet = errors.New("missing validator set") errValidatorSetAlreadyPopulated = errors.New("validator set already populated") - errDuplicateValidatorSet = errors.New("duplicate validator set") - - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - validatorWeightDiffsPrefix = []byte("validatorDiffs") - validatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") - - timestampKey = []byte("timestamp") - currentSupplyKey = []byte("current supply") - lastAcceptedKey = []byte("last accepted") - initializedKey = []byte("initialized") + errIsNotSubnet = errors.New("is not a subnet") + + BlockIDPrefix = []byte("blockID") + BlockPrefix = []byte("block") + ValidatorsPrefix = []byte("validators") + CurrentPrefix = []byte("current") + PendingPrefix = []byte("pending") + ValidatorPrefix = []byte("validator") + DelegatorPrefix = []byte("delegator") + SubnetValidatorPrefix = []byte("subnetValidator") + SubnetDelegatorPrefix = []byte("subnetDelegator") + NestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") + NestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") + FlatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") + FlatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") + TxPrefix = []byte("tx") + RewardUTXOsPrefix = []byte("rewardUTXOs") + UTXOPrefix = []byte("utxo") + SubnetPrefix = []byte("subnet") + SubnetOwnerPrefix = []byte("subnetOwner") + TransformedSubnetPrefix = []byte("transformedSubnet") + SupplyPrefix = []byte("supply") + ChainPrefix = []byte("chain") + SingletonPrefix = []byte("singleton") + + TimestampKey = []byte("timestamp") + CurrentSupplyKey = []byte("current supply") + LastAcceptedKey = []byte("last accepted") + HeightsIndexedKey = []byte("heights indexed") + InitializedKey = []byte("initialized") + PrunedKey = []byte("pruned") ) // Chain collects all methods to manage the state of the chain for block @@ -97,16 +106,16 @@ type Chain interface { GetCurrentSupply(subnetID ids.ID) (uint64, error) SetCurrentSupply(subnetID ids.ID, cs uint64) - GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) - GetSubnets() ([]*txs.Tx, error) AddSubnet(createSubnetTx *txs.Tx) + GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) + SetSubnetOwner(subnetID ids.ID, owner fx.Owner) + GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) - GetChains(subnetID ids.ID) ([]*txs.Tx, error) AddChain(createChainTx *txs.Tx) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) @@ -121,24 +130,71 @@ type State interface { GetLastAccepted() ids.ID SetLastAccepted(blkID ids.ID) - GetStatelessBlock(blockID ids.ID) (blocks.Block, choices.Status, error) - AddStatelessBlock(block blocks.Block, status choices.Status) + GetStatelessBlock(blockID ids.ID) (block.Block, error) - // ValidatorSet adds all the validators and delegators of [subnetID] into - // [vdrs]. - ValidatorSet(subnetID ids.ID, vdrs validators.Set) error + // Invariant: [block] is an accepted block. + AddStatelessBlock(block block.Block) - GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) + GetBlockIDAtHeight(height uint64) (ids.ID, error) - // Returns a map of node ID --> BLS Public Key for all validators - // that left the Primary Network validator set. - GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID]*bls.PublicKey, error) + GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) + GetSubnets() ([]*txs.Tx, error) + GetChains(subnetID ids.ID) ([]*txs.Tx, error) + + // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis + // block until it has applied all of the diffs up to and including + // [endHeight]. Applying the diffs modifies [validators]. + // + // Invariant: If attempting to generate the validator set for + // [endHeight - 1], [validators] must initially contain the validator + // weights for [startHeight]. + // + // Note: Because this function iterates towards the genesis, [startHeight] + // will typically be greater than or equal to [endHeight]. If [startHeight] + // is less than [endHeight], no diffs will be applied. + ApplyValidatorWeightDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, + subnetID ids.ID, + ) error + + // ApplyValidatorPublicKeyDiffs iterates from [startHeight] towards the + // genesis block until it has applied all of the diffs up to and including + // [endHeight]. Applying the diffs modifies [validators]. + // + // Invariant: If attempting to generate the validator set for + // [endHeight - 1], [validators] must initially contain the validator + // weights for [startHeight]. + // + // Note: Because this function iterates towards the genesis, [startHeight] + // will typically be greater than or equal to [endHeight]. If [startHeight] + // is less than [endHeight], no diffs will be applied. + ApplyValidatorPublicKeyDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, + ) error SetHeight(height uint64) // Discard uncommitted changes to the database. Abort() + // Returns if the state should be pruned and indexed to remove rejected + // blocks and generate the block height index. + // + // TODO: Remove after v1.11.x is activated + ShouldPrune() (bool, error) + + // Removes rejected blocks from disk and indexes accepted blocks by height. This + // function supports being (and is recommended to be) called asynchronously. + // + // TODO: Remove after v1.11.x is activated + PruneAndIndex(sync.Locker, logging.Logger) error + // Commit changes to the base database. Commit() error @@ -146,11 +202,14 @@ type State interface { // pending changes to the base database. CommitBatch() (database.Batch, error) + Checksum() ids.ID + Close() error } +// TODO: Remove after v1.11.x is activated type stateBlk struct { - Blk blocks.Block + Blk block.Block Bytes []byte `serialize:"true"` Status choices.Status `serialize:"true"` } @@ -184,14 +243,20 @@ type stateBlk struct { * | | '-. subnetDelegator * | | '-. list * | | '-- txID -> nil - * | |-. weight diffs + * | |-. nested weight diffs TODO: Remove once only the flat db is needed * | | '-. height+subnet * | | '-. list * | | '-- nodeID -> weightChange - * | '-. pub key diffs - * | '-. height - * | '-. list - * | '-- nodeID -> public key + * | |-. nested pub key diffs TODO: Remove once only the flat db is needed + * | | '-. height + * | | '-. list + * | | '-- nodeID -> compressed public key + * | |-. flat weight diffs + * | | '-- subnet+height+nodeID -> weightChange + * | '-. flat pub key diffs + * | '-- subnet+height+nodeID -> uncompressed public key or nil + * |-. blockIDs + * | '-- height -> blockID * |-. blocks * | '-- blockID -> block bytes * |-. txs @@ -205,24 +270,28 @@ type stateBlk struct { * |-. subnets * | '-. list * | '-- txID -> nil + * |-. subnetOwners + * | '-. subnetID -> owner * |-. chains * | '-. subnetID * | '-. list * | '-- txID -> nil * '-. singletons * |-- initializedKey -> nil + * |-- prunedKey -> nil * |-- timestampKey -> timestamp * |-- currentSupplyKey -> currentSupply - * '-- lastAcceptedKey -> lastAccepted + * |-- lastAcceptedKey -> lastAccepted + * '-- heightsIndexKey -> startIndexHeight + endIndexHeight */ type state struct { validatorState - cfg *config.Config - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator - bootstrapped *utils.Atomic[bool] + validators validators.Manager + ctx *snow.Context + cfg *config.Config + metrics metrics.Metrics + rewards reward.Calculator baseDB *versiondb.Database @@ -231,11 +300,13 @@ type state struct { currentHeight uint64 - addedBlocks map[ids.ID]stateBlk // map of blockID -> Block - // cache of blockID -> Block - // If the block isn't known, nil is cached. - blockCache cache.Cacher[ids.ID, *stateBlk] - blockDB database.Database + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDDB database.Database + + addedBlocks map[ids.ID]block.Block // map of blockID -> Block + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database validatorsDB database.Database currentValidatorsDB database.Database @@ -257,11 +328,10 @@ type state struct { pendingSubnetDelegatorBaseDB database.Database pendingSubnetDelegatorList linkeddb.LinkedDB - validatorWeightDiffsCache cache.Cacher[string, map[ids.NodeID]*ValidatorWeightDiff] // cache of heightWithSubnet -> map[ids.NodeID]*ValidatorWeightDiff - validatorWeightDiffsDB database.Database - - validatorPublicKeyDiffsCache cache.Cacher[uint64, map[ids.NodeID]*bls.PublicKey] // cache of height -> map[ids.NodeID]*bls.PublicKey - validatorPublicKeyDiffsDB database.Database + nestedValidatorWeightDiffsDB database.Database + nestedValidatorPublicKeyDiffsDB database.Database + flatValidatorWeightDiffsDB database.Database + flatValidatorPublicKeyDiffsDB database.Database addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database @@ -280,6 +350,11 @@ type state struct { subnetBaseDB database.Database subnetDB linkeddb.LinkedDB + // Subnet ID --> Owner of the subnet + subnetOwners map[ids.ID]fx.Owner + subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + subnetOwnerDB database.Database + transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database transformedSubnetDB database.Database @@ -298,9 +373,20 @@ type state struct { currentSupply, persistedCurrentSupply uint64 // [lastAccepted] is the most recently accepted block. lastAccepted, persistedLastAccepted ids.ID + indexedHeights *heightRange singletonDB database.Database } +// heightRange is used to track which heights are safe to use the native DB +// iterator for querying validator diffs. +// +// TODO: Remove once we are guaranteed nodes can not rollback to not support the +// new indexing mechanism. +type heightRange struct { + LowerBound uint64 `serialize:"true"` + UpperBound uint64 `serialize:"true"` +} + type ValidatorWeightDiff struct { Decrease bool `serialize:"true"` Amount uint64 `serialize:"true"` @@ -309,14 +395,14 @@ type ValidatorWeightDiff struct { func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { if v.Decrease == negative { var err error - v.Amount, err = math.Add64(v.Amount, amount) + v.Amount, err = safemath.Add64(v.Amount, amount) return err } if v.Amount > amount { v.Amount -= amount } else { - v.Amount = math.AbsDiff(v.Amount, amount) + v.Amount = safemath.AbsDiff(v.Amount, amount) v.Decrease = negative } return nil @@ -337,24 +423,50 @@ type txAndStatus struct { status status.Status } +type fxOwnerAndSize struct { + owner fx.Owner + size int +} + +func txSize(_ ids.ID, tx *txs.Tx) int { + if tx == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(tx.Bytes()) + constants.PointerOverhead +} + +func txAndStatusSize(_ ids.ID, t *txAndStatus) int { + if t == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(t.tx.Bytes()) + wrappers.IntLen + 2*constants.PointerOverhead +} + +func blockSize(_ ids.ID, blk block.Block) int { + if blk == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead +} + func New( db database.Database, genesisBytes []byte, metricsReg prometheus.Registerer, cfg *config.Config, + execCfg *config.ExecutionConfig, ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (State, error) { - s, err := new( + s, err := newState( db, metrics, cfg, + execCfg, ctx, metricsReg, rewards, - bootstrapped, ) if err != nil { return nil, err @@ -367,22 +479,52 @@ func New( return nil, err } + // Before we start accepting new blocks, we check if the pruning process needs + // to be run. + // + // TODO: Cleanup after v1.11.x is activated + shouldPrune, err := s.ShouldPrune() + if err != nil { + return nil, err + } + if shouldPrune { + // If the pruned key is on disk, we must delete it to ensure our disk + // can't get into a partially pruned state if the node restarts mid-way + // through pruning. + if err := s.singletonDB.Delete(PrunedKey); err != nil { + return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) + } + + if err := s.Commit(); err != nil { + return nil, fmt.Errorf("failed to commit to baseDB: %w", err) + } + } + return s, nil } -func new( +func newState( db database.Database, metrics metrics.Metrics, cfg *config.Config, + execCfg *config.ExecutionConfig, ctx *snow.Context, metricsReg prometheus.Registerer, rewards reward.Calculator, - bootstrapped *utils.Atomic[bool], ) (*state, error) { - blockCache, err := metercacher.New[ids.ID, *stateBlk]( + blockIDCache, err := metercacher.New[uint64, ids.ID]( + "block_id_cache", + metricsReg, + &cache.LRU[uint64, ids.ID]{Size: execCfg.BlockIDCacheSize}, + ) + if err != nil { + return nil, err + } + + blockCache, err := metercacher.New[ids.ID, block.Block]( "block_cache", metricsReg, - &cache.LRU[ids.ID, *stateBlk]{Size: blockCacheSize}, + cache.NewSizedLRU[ids.ID, block.Block](execCfg.BlockCacheSize, blockSize), ) if err != nil { return nil, err @@ -390,71 +532,68 @@ func new( baseDB := versiondb.New(db) - validatorsDB := prefixdb.New(validatorsPrefix, baseDB) + validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) + + currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) + currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB) + currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB) + currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB) + currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB) - currentValidatorsDB := prefixdb.New(currentPrefix, validatorsDB) - currentValidatorBaseDB := prefixdb.New(validatorPrefix, currentValidatorsDB) - currentDelegatorBaseDB := prefixdb.New(delegatorPrefix, currentValidatorsDB) - currentSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, currentValidatorsDB) - currentSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, currentValidatorsDB) + pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB) + pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB) + pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB) + pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) + pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) - pendingValidatorsDB := prefixdb.New(pendingPrefix, validatorsDB) - pendingValidatorBaseDB := prefixdb.New(validatorPrefix, pendingValidatorsDB) - pendingDelegatorBaseDB := prefixdb.New(delegatorPrefix, pendingValidatorsDB) - pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) - pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) + nestedValidatorWeightDiffsDB := prefixdb.New(NestedValidatorWeightDiffsPrefix, validatorsDB) + nestedValidatorPublicKeyDiffsDB := prefixdb.New(NestedValidatorPublicKeyDiffsPrefix, validatorsDB) + flatValidatorWeightDiffsDB := prefixdb.New(FlatValidatorWeightDiffsPrefix, validatorsDB) + flatValidatorPublicKeyDiffsDB := prefixdb.New(FlatValidatorPublicKeyDiffsPrefix, validatorsDB) - validatorWeightDiffsDB := prefixdb.New(validatorWeightDiffsPrefix, validatorsDB) - validatorWeightDiffsCache, err := metercacher.New[string, map[ids.NodeID]*ValidatorWeightDiff]( - "validator_weight_diffs_cache", + txCache, err := metercacher.New( + "tx_cache", metricsReg, - &cache.LRU[string, map[ids.NodeID]*ValidatorWeightDiff]{Size: validatorDiffsCacheSize}, + cache.NewSizedLRU[ids.ID, *txAndStatus](execCfg.TxCacheSize, txAndStatusSize), ) if err != nil { return nil, err } - validatorPublicKeyDiffsDB := prefixdb.New(validatorPublicKeyDiffsPrefix, validatorsDB) - validatorPublicKeyDiffsCache, err := metercacher.New[uint64, map[ids.NodeID]*bls.PublicKey]( - "validator_pub_key_diffs_cache", + rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB) + rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( + "reward_utxos_cache", metricsReg, - &cache.LRU[uint64, map[ids.NodeID]*bls.PublicKey]{Size: validatorDiffsCacheSize}, + &cache.LRU[ids.ID, []*avax.UTXO]{Size: execCfg.RewardUTXOsCacheSize}, ) if err != nil { return nil, err } - txCache, err := metercacher.New[ids.ID, *txAndStatus]( - "tx_cache", - metricsReg, - &cache.LRU[ids.ID, *txAndStatus]{Size: txCacheSize}, - ) + utxoDB := prefixdb.New(UTXOPrefix, baseDB) + utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) if err != nil { return nil, err } - rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) - rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( - "reward_utxos_cache", + subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB) + + subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB) + subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( + "subnet_owner_cache", metricsReg, - &cache.LRU[ids.ID, []*avax.UTXO]{Size: rewardUTXOsCacheSize}, + cache.NewSizedLRU[ids.ID, fxOwnerAndSize](execCfg.FxOwnerCacheSize, func(_ ids.ID, f fxOwnerAndSize) int { + return ids.IDLen + f.size + }), ) if err != nil { return nil, err } - utxoDB := prefixdb.New(utxoPrefix, baseDB) - utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg) - if err != nil { - return nil, err - } - - subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) - - transformedSubnetCache, err := metercacher.New[ids.ID, *txs.Tx]( + transformedSubnetCache, err := metercacher.New( "transformed_subnet_cache", metricsReg, - &cache.LRU[ids.ID, *txs.Tx]{Size: chainCacheSize}, + cache.NewSizedLRU[ids.ID, *txs.Tx](execCfg.TransformedSubnetTxCacheSize, txSize), ) if err != nil { return nil, err @@ -463,7 +602,7 @@ func new( supplyCache, err := metercacher.New[ids.ID, *uint64]( "supply_cache", metricsReg, - &cache.LRU[ids.ID, *uint64]{Size: chainCacheSize}, + &cache.LRU[ids.ID, *uint64]{Size: execCfg.ChainCacheSize}, ) if err != nil { return nil, err @@ -472,7 +611,7 @@ func new( chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( "chain_cache", metricsReg, - &cache.LRU[ids.ID, []*txs.Tx]{Size: chainCacheSize}, + &cache.LRU[ids.ID, []*txs.Tx]{Size: execCfg.ChainCacheSize}, ) if err != nil { return nil, err @@ -481,7 +620,7 @@ func new( chainDBCache, err := metercacher.New[ids.ID, linkeddb.LinkedDB]( "chain_db_cache", metricsReg, - &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: chainDBCacheSize}, + &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: execCfg.ChainDBCacheSize}, ) if err != nil { return nil, err @@ -490,46 +629,50 @@ func new( return &state{ validatorState: newValidatorState(), - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, - bootstrapped: bootstrapped, - baseDB: baseDB, + validators: cfg.Validators, + ctx: ctx, + cfg: cfg, + metrics: metrics, + rewards: rewards, + baseDB: baseDB, + + addedBlockIDs: make(map[uint64]ids.ID), + blockIDCache: blockIDCache, + blockIDDB: prefixdb.New(BlockIDPrefix, baseDB), - addedBlocks: make(map[ids.ID]stateBlk), + addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, - blockDB: prefixdb.New(blockPrefix, baseDB), + blockDB: prefixdb.New(BlockPrefix, baseDB), currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - validatorsDB: validatorsDB, - currentValidatorsDB: currentValidatorsDB, - currentValidatorBaseDB: currentValidatorBaseDB, - currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), - currentDelegatorBaseDB: currentDelegatorBaseDB, - currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), - currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, - currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), - currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, - currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), - pendingValidatorsDB: pendingValidatorsDB, - pendingValidatorBaseDB: pendingValidatorBaseDB, - pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), - pendingDelegatorBaseDB: pendingDelegatorBaseDB, - pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), - pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, - pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), - pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, - pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), - validatorWeightDiffsDB: validatorWeightDiffsDB, - validatorWeightDiffsCache: validatorWeightDiffsCache, - validatorPublicKeyDiffsCache: validatorPublicKeyDiffsCache, - validatorPublicKeyDiffsDB: validatorPublicKeyDiffsDB, + validatorsDB: validatorsDB, + currentValidatorsDB: currentValidatorsDB, + currentValidatorBaseDB: currentValidatorBaseDB, + currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), + currentDelegatorBaseDB: currentDelegatorBaseDB, + currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), + currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, + currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), + currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, + currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), + pendingValidatorsDB: pendingValidatorsDB, + pendingValidatorBaseDB: pendingValidatorBaseDB, + pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), + pendingDelegatorBaseDB: pendingDelegatorBaseDB, + pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), + pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, + pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), + pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, + pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), + nestedValidatorWeightDiffsDB: nestedValidatorWeightDiffsDB, + nestedValidatorPublicKeyDiffsDB: nestedValidatorPublicKeyDiffsDB, + flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, + flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: prefixdb.New(txPrefix, baseDB), + txDB: prefixdb.New(TxPrefix, baseDB), txCache: txCache, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), @@ -543,20 +686,24 @@ func new( subnetBaseDB: subnetBaseDB, subnetDB: linkeddb.NewDefault(subnetBaseDB), + subnetOwners: make(map[ids.ID]fx.Owner), + subnetOwnerDB: subnetOwnerDB, + subnetOwnerCache: subnetOwnerCache, + transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, - transformedSubnetDB: prefixdb.New(transformedSubnetPrefix, baseDB), + transformedSubnetDB: prefixdb.New(TransformedSubnetPrefix, baseDB), modifiedSupplies: make(map[ids.ID]uint64), supplyCache: supplyCache, - supplyDB: prefixdb.New(supplyPrefix, baseDB), + supplyDB: prefixdb.New(SupplyPrefix, baseDB), addedChains: make(map[ids.ID][]*txs.Tx), - chainDB: prefixdb.New(chainPrefix, baseDB), + chainDB: prefixdb.New(ChainPrefix, baseDB), chainCache: chainCache, chainDBCache: chainDBCache, - singletonDB: prefixdb.New(singletonPrefix, baseDB), + singletonDB: prefixdb.New(SingletonPrefix, baseDB), }, nil } @@ -621,12 +768,43 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { } func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) + has, err := s.singletonDB.Has(InitializedKey) return !has, err } func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) + return s.singletonDB.Put(InitializedKey, nil) +} + +func (s *state) ShouldPrune() (bool, error) { + has, err := s.singletonDB.Has(PrunedKey) + if err != nil { + return true, err + } + + // If [prunedKey] is not in [singletonDB], [PruneAndIndex()] did not finish + // execution. + if !has { + return true, nil + } + + // To ensure the db was not modified since we last ran [PruneAndIndex()], we + // must verify that [s.lastAccepted] is height indexed. + blk, err := s.GetStatelessBlock(s.lastAccepted) + if err != nil { + return true, err + } + + _, err = s.GetBlockIDAtHeight(blk.Height()) + if err == database.ErrNotFound { + return true, nil + } + + return false, err +} + +func (s *state) donePrune() error { + return s.singletonDB.Put(PrunedKey, nil) } func (s *state) GetSubnets() ([]*txs.Tx, error) { @@ -665,6 +843,55 @@ func (s *state) AddSubnet(createSubnetTx *txs.Tx) { } } +func (s *state) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { + if owner, exists := s.subnetOwners[subnetID]; exists { + return owner, nil + } + + if ownerAndSize, cached := s.subnetOwnerCache.Get(subnetID); cached { + if ownerAndSize.owner == nil { + return nil, database.ErrNotFound + } + return ownerAndSize.owner, nil + } + + ownerBytes, err := s.subnetOwnerDB.Get(subnetID[:]) + if err == nil { + var owner fx.Owner + if _, err := block.GenesisCodec.Unmarshal(ownerBytes, &owner); err != nil { + return nil, err + } + s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ + owner: owner, + size: len(ownerBytes), + }) + return owner, nil + } + if err != database.ErrNotFound { + return nil, err + } + + subnetIntf, _, err := s.GetTx(subnetID) + if err != nil { + if err == database.ErrNotFound { + s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{}) + } + return nil, err + } + + subnet, ok := subnetIntf.Unsigned.(*txs.CreateSubnetTx) + if !ok { + return nil, fmt.Errorf("%q %w", subnetID, errIsNotSubnet) + } + + s.SetSubnetOwner(subnetID, subnet.Owner) + return subnet.Owner, nil +} + +func (s *state) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) { + s.subnetOwners[subnetID] = owner +} + func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { if tx, exists := s.transformedSubnets[subnetID]; exists { return tx, nil @@ -910,117 +1137,221 @@ func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { } } -func (s *state) ValidatorSet(subnetID ids.ID, vdrs validators.Set) error { - for nodeID, validator := range s.currentStakers.validators[subnetID] { - staker := validator.validator - if err := vdrs.Add(nodeID, staker.PublicKey, staker.TxID, staker.Weight); err != nil { +func (s *state) ApplyValidatorWeightDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, + subnetID ids.ID, +) error { + diffIter := s.flatValidatorWeightDiffsDB.NewIteratorWithStartAndPrefix( + marshalStartDiffKey(subnetID, startHeight), + subnetID[:], + ) + defer diffIter.Release() + + prevHeight := startHeight + 1 + // TODO: Remove the index continuity checks once we are guaranteed nodes can + // not rollback to not support the new indexing mechanism. + for diffIter.Next() && s.indexedHeights != nil && s.indexedHeights.LowerBound <= endHeight { + if err := ctx.Err(); err != nil { return err } - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - staker := delegatorIterator.Value() - if err := vdrs.AddWeight(nodeID, staker.Weight); err != nil { - delegatorIterator.Release() - return err - } + _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) + if err != nil { + return err } - delegatorIterator.Release() - } - return nil -} -func (s *state) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, prefixStruct) - if err != nil { - return nil, err - } - prefixStr := string(prefixBytes) + if parsedHeight > prevHeight { + s.ctx.Log.Error("unexpected parsed height", + zap.Stringer("subnetID", subnetID), + zap.Uint64("parsedHeight", parsedHeight), + zap.Stringer("nodeID", nodeID), + zap.Uint64("prevHeight", prevHeight), + zap.Uint64("startHeight", startHeight), + zap.Uint64("endHeight", endHeight), + ) + } - if weightDiffs, ok := s.validatorWeightDiffsCache.Get(prefixStr); ok { - return weightDiffs, nil - } + // If the parsedHeight is less than our target endHeight, then we have + // fully processed the diffs from startHeight through endHeight. + if parsedHeight < endHeight { + return diffIter.Error() + } - rawDiffDB := prefixdb.New(prefixBytes, s.validatorWeightDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) - diffIter := diffDB.NewIterator() - defer diffIter.Release() + prevHeight = parsedHeight - weightDiffs := make(map[ids.NodeID]*ValidatorWeightDiff) - for diffIter.Next() { - nodeID, err := ids.ToNodeID(diffIter.Key()) + weightDiff, err := unmarshalWeightDiff(diffIter.Value()) if err != nil { - return nil, err + return err + } + + if err := applyWeightDiff(validators, nodeID, weightDiff); err != nil { + return err + } + } + if err := diffIter.Error(); err != nil { + return err + } + + // TODO: Remove this once it is assumed that all subnet validators have + // adopted the new indexing. + for height := prevHeight - 1; height >= endHeight; height-- { + if err := ctx.Err(); err != nil { + return err } - weightDiff := ValidatorWeightDiff{} - _, err = blocks.GenesisCodec.Unmarshal(diffIter.Value(), &weightDiff) + prefixStruct := heightWithSubnet{ + Height: height, + SubnetID: subnetID, + } + prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) if err != nil { - return nil, err + return err } - weightDiffs[nodeID] = &weightDiff + rawDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) + diffDB := linkeddb.NewDefault(rawDiffDB) + diffIter := diffDB.NewIterator() + defer diffIter.Release() + + for diffIter.Next() { + nodeID, err := ids.ToNodeID(diffIter.Key()) + if err != nil { + return err + } + + weightDiff := ValidatorWeightDiff{} + _, err = block.GenesisCodec.Unmarshal(diffIter.Value(), &weightDiff) + if err != nil { + return err + } + + if err := applyWeightDiff(validators, nodeID, &weightDiff); err != nil { + return err + } + } } - s.validatorWeightDiffsCache.Put(prefixStr, weightDiffs) - return weightDiffs, diffIter.Error() + return nil } -func (s *state) GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID]*bls.PublicKey, error) { - if publicKeyDiffs, ok := s.validatorPublicKeyDiffsCache.Get(height); ok { - return publicKeyDiffs, nil +func applyWeightDiff( + vdrs map[ids.NodeID]*validators.GetValidatorOutput, + nodeID ids.NodeID, + weightDiff *ValidatorWeightDiff, +) error { + vdr, ok := vdrs[nodeID] + if !ok { + // This node isn't in the current validator set. + vdr = &validators.GetValidatorOutput{ + NodeID: nodeID, + } + vdrs[nodeID] = vdr } - heightBytes := database.PackUInt64(height) - rawDiffDB := prefixdb.New(heightBytes, s.validatorPublicKeyDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) - diffIter := diffDB.NewIterator() + // The weight of this node changed at this block. + var err error + if weightDiff.Decrease { + // The validator's weight was decreased at this block, so in the + // prior block it was higher. + vdr.Weight, err = safemath.Add64(vdr.Weight, weightDiff.Amount) + } else { + // The validator's weight was increased at this block, so in the + // prior block it was lower. + vdr.Weight, err = safemath.Sub(vdr.Weight, weightDiff.Amount) + } + if err != nil { + return err + } + + if vdr.Weight == 0 { + // The validator's weight was 0 before this block so they weren't in the + // validator set. + delete(vdrs, nodeID) + } + return nil +} + +func (s *state) ApplyValidatorPublicKeyDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, +) error { + diffIter := s.flatValidatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( + marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), + constants.PrimaryNetworkID[:], + ) defer diffIter.Release() - pkDiffs := make(map[ids.NodeID]*bls.PublicKey) for diffIter.Next() { - nodeID, err := ids.ToNodeID(diffIter.Key()) + if err := ctx.Err(); err != nil { + return err + } + + _, parsedHeight, nodeID, err := unmarshalDiffKey(diffIter.Key()) if err != nil { - return nil, err + return err + } + // If the parsedHeight is less than our target endHeight, then we have + // fully processed the diffs from startHeight through endHeight. + if parsedHeight < endHeight { + break + } + + vdr, ok := validators[nodeID] + if !ok { + continue } pkBytes := diffIter.Value() - pk, err := bls.PublicKeyFromBytes(pkBytes) - if err != nil { - return nil, err + if len(pkBytes) == 0 { + vdr.PublicKey = nil + continue } - pkDiffs[nodeID] = pk + + vdr.PublicKey = bls.DeserializePublicKey(pkBytes) } - s.validatorPublicKeyDiffsCache.Put(height, pkDiffs) - return pkDiffs, diffIter.Error() + // Note: this does not fallback to the linkeddb index because the linkeddb + // index does not contain entries for when to remove the public key. + // + // Nodes may see inconsistent public keys for heights before the new public + // key index was populated. + return diffIter.Error() } -func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) error { +func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) error { genesisBlkID := genesisBlk.ID() s.SetLastAccepted(genesisBlkID) s.SetTimestamp(time.Unix(int64(genesis.Timestamp), 0)) s.SetCurrentSupply(constants.PrimaryNetworkID, genesis.InitialSupply) - s.AddStatelessBlock(genesisBlk, choices.Accepted) + s.AddStatelessBlock(genesisBlk) // Persist UTXOs that exist at genesis for _, utxo := range genesis.UTXOs { - s.AddUTXO(utxo) + avaxUTXO := utxo.UTXO + s.AddUTXO(&avaxUTXO) } // Persist primary network validator set at genesis for _, vdrTx := range genesis.Validators { - tx, ok := vdrTx.Unsigned.(*txs.AddValidatorTx) + // We expect genesis validator txs to be either AddValidatorTx or + // AddPermissionlessValidatorTx. + // + // TODO: Enforce stricter type check + validatorTx, ok := vdrTx.Unsigned.(txs.ScheduledStaker) if !ok { - return fmt.Errorf("expected tx type *txs.AddValidatorTx but got %T", vdrTx.Unsigned) + return fmt.Errorf("expected a scheduled staker but got %T", vdrTx.Unsigned) } - stakeAmount := tx.Validator.Wght - stakeDuration := tx.Validator.Duration() + stakeAmount := validatorTx.Weight() + // Note: We use [StartTime()] here because genesis transactions are + // guaranteed to be pre-Durango activation. + startTime := validatorTx.StartTime() + stakeDuration := validatorTx.EndTime().Sub(startTime) currentSupply, err := s.GetCurrentSupply(constants.PrimaryNetworkID) if err != nil { return err @@ -1031,12 +1362,12 @@ func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) err stakeAmount, currentSupply, ) - newCurrentSupply, err := math.Add64(currentSupply, potentialReward) + newCurrentSupply, err := safemath.Add64(currentSupply, potentialReward) if err != nil { return err } - staker, err := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) + staker, err := NewCurrentStaker(vdrTx.ID(), validatorTx, startTime, potentialReward) if err != nil { return err } @@ -1070,37 +1401,62 @@ func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) err // Load pulls data previously stored on disk that is expected to be in memory. func (s *state) load() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.loadMetadata(), s.loadCurrentValidators(), s.loadPendingValidators(), s.initValidatorSets(), ) - return errs.Err } func (s *state) loadMetadata() error { - timestamp, err := database.GetTimestamp(s.singletonDB, timestampKey) + timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey) if err != nil { return err } s.persistedTimestamp = timestamp s.SetTimestamp(timestamp) - currentSupply, err := database.GetUInt64(s.singletonDB, currentSupplyKey) + currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey) if err != nil { return err } s.persistedCurrentSupply = currentSupply s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) - lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) + lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey) if err != nil { return err } s.persistedLastAccepted = lastAccepted s.lastAccepted = lastAccepted + + // Lookup the most recently indexed range on disk. If we haven't started + // indexing the weights, then we keep the indexed heights as nil. + indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey) + if err == database.ErrNotFound { + return nil + } + if err != nil { + return err + } + + indexedHeights := &heightRange{} + _, err = block.GenesisCodec.Unmarshal(indexedHeightsBytes, indexedHeights) + if err != nil { + return err + } + + // If the indexed range is not up to date, then we will act as if the range + // doesn't exist. + lastAcceptedBlock, err := s.GetStatelessBlock(lastAccepted) + if err != nil { + return err + } + if indexedHeights.UpperBound != lastAcceptedBlock.Height() { + return nil + } + s.indexedHeights = indexedHeights return nil } @@ -1117,25 +1473,35 @@ func (s *state) loadCurrentValidators() error { } tx, _, err := s.GetTx(txID) if err != nil { - return err + return fmt.Errorf("failed loading validator transaction txID %s, %w", txID, err) + } + + stakerTx, ok := tx.Unsigned.(txs.Staker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } metadataBytes := validatorIt.Value() metadata := &validatorMetadata{ txID: txID, - // Note: we don't provide [LastUpdated] here because we expect it to + } + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] using the tx as a default in the event + // it was added pre-durango and is not stored in the database. + // + // Note: We do not populate [LastUpdated] since it is expected to // always be present on disk. + metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) } if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward) if err != nil { return err } @@ -1169,15 +1535,24 @@ func (s *state) loadCurrentValidators() error { metadataBytes := subnetValidatorIt.Value() metadata := &validatorMetadata{ txID: txID, - // use the start time as the fallback value - // in case it's not stored in the database - LastUpdated: uint64(stakerTx.StartTime().Unix()), + } + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] and [LastUpdated] using the tx as a + // default in the event they are not stored in the database. + startTime := uint64(scheduledStakerTx.StartTime().Unix()) + metadata.StakerStartTime = startTime + metadata.LastUpdated = startTime } if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward, + ) if err != nil { return err } @@ -1207,18 +1582,32 @@ func (s *state) loadCurrentValidators() error { return err } - potentialRewardBytes := delegatorIt.Value() - potentialReward, err := database.ParseUInt64(potentialRewardBytes) - if err != nil { - return err - } - stakerTx, ok := tx.Unsigned.(txs.Staker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker, err := NewCurrentStaker(txID, stakerTx, potentialReward) + metadataBytes := delegatorIt.Value() + metadata := &delegatorMetadata{ + txID: txID, + } + if scheduledStakerTx, ok := tx.Unsigned.(txs.ScheduledStaker); ok { + // Populate [StakerStartTime] using the tx as a default in the + // event it was added pre-durango and is not stored in the + // database. + metadata.StakerStartTime = uint64(scheduledStakerTx.StartTime().Unix()) + } + err = parseDelegatorMetadata(metadataBytes, metadata) + if err != nil { + return err + } + + staker, err := NewCurrentStaker( + txID, + stakerTx, + time.Unix(int64(metadata.StakerStartTime), 0), + metadata.PotentialReward, + ) if err != nil { return err } @@ -1233,14 +1622,12 @@ func (s *state) loadCurrentValidators() error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), subnetDelegatorIt.Error(), ) - return errs.Err } func (s *state) loadPendingValidators() error { @@ -1264,7 +1651,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } @@ -1299,7 +1686,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } @@ -1319,77 +1706,75 @@ func (s *state) loadPendingValidators() error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( validatorIt.Error(), subnetValidatorIt.Error(), delegatorIt.Error(), subnetDelegatorIt.Error(), ) - return errs.Err } // Invariant: initValidatorSets requires loadCurrentValidators to have already // been called. func (s *state) initValidatorSets() error { - primaryValidators, ok := s.cfg.Validators.Get(constants.PrimaryNetworkID) - if !ok { - return errMissingValidatorSet - } - if primaryValidators.Len() != 0 { - // Enforce the invariant that the validator set is empty here. - return errValidatorSetAlreadyPopulated - } - err := s.ValidatorSet(constants.PrimaryNetworkID, primaryValidators) - if err != nil { - return err - } - - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, constants.PrimaryNetworkID, s.ctx.NodeID) - primaryValidators.RegisterCallbackListener(vl) - - s.metrics.SetLocalStake(primaryValidators.GetWeight(s.ctx.NodeID)) - s.metrics.SetTotalStake(primaryValidators.Weight()) - - for subnetID := range s.cfg.TrackedSubnets { - subnetValidators := validators.NewSet() - err := s.ValidatorSet(subnetID, subnetValidators) - if err != nil { - return err + for subnetID, validators := range s.currentStakers.validators { + if s.validators.Count(subnetID) != 0 { + // Enforce the invariant that the validator set is empty here. + return fmt.Errorf("%w: %s", errValidatorSetAlreadyPopulated, subnetID) } - if !s.cfg.Validators.Add(subnetID, subnetValidators) { - return fmt.Errorf("%w: %s", errDuplicateValidatorSet, subnetID) + for nodeID, validator := range validators { + validatorStaker := validator.validator + if err := s.validators.AddStaker(subnetID, nodeID, validatorStaker.PublicKey, validatorStaker.TxID, validatorStaker.Weight); err != nil { + return err + } + + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + delegatorStaker := delegatorIterator.Value() + if err := s.validators.AddWeight(subnetID, nodeID, delegatorStaker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() } + } - vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, subnetID, s.ctx.NodeID) - subnetValidators.RegisterCallbackListener(vl) + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight of primary network validators: %w", err) } + s.metrics.SetTotalStake(totalWeight) return nil } func (s *state) write(updateValidators bool, height uint64) error { - errs := wrappers.Errs{} - errs.Add( + codecVersion := CodecVersion1 + if !s.cfg.IsDurangoActivated(s.GetTimestamp()) { + codecVersion = CodecVersion0 + } + + return utils.Err( s.writeBlocks(), - s.writeCurrentStakers(updateValidators, height), + s.writeCurrentStakers(updateValidators, height, codecVersion), s.writePendingStakers(), - s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList), // Must be called after writeCurrentStakers + s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList, codecVersion), // Must be called after writeCurrentStakers s.writeTXs(), s.writeRewardUTXOs(), s.writeUTXOs(), s.writeSubnets(), + s.writeSubnetOwners(), s.writeTransformedSubnets(), s.writeSubnetSupplies(), s.writeChains(), s.writeMetadata(), ) - return errs.Err } func (s *state) Close() error { - errs := wrappers.Errs{} - errs.Add( + return utils.Err( s.pendingSubnetValidatorBaseDB.Close(), s.pendingSubnetDelegatorBaseDB.Close(), s.pendingDelegatorBaseDB.Close(), @@ -1410,8 +1795,8 @@ func (s *state) Close() error { s.chainDB.Close(), s.singletonDB.Close(), s.blockDB.Close(), + s.blockIDDB.Close(), ) - return errs.Err } func (s *state) sync(genesis []byte) error { @@ -1448,16 +1833,16 @@ func (s *state) init(genesisBytes []byte) error { // genesisBlock.Accept() because then it'd look for genesisBlock's // non-existent parent) genesisID := hashing.ComputeHash256Array(genesisBytes) - genesisBlock, err := blocks.NewApricotCommitBlock(genesisID, 0 /*height*/) + genesisBlock, err := block.NewApricotCommitBlock(genesisID, 0 /*height*/) if err != nil { return err } - genesisState, err := genesis.ParseState(genesisBytes) + genesis, err := genesis.Parse(genesisBytes) if err != nil { return err } - if err := s.syncGenesis(genesisBlock, genesisState); err != nil { + if err := s.syncGenesis(genesisBlock, genesis); err != nil { return err } @@ -1468,15 +1853,23 @@ func (s *state) init(genesisBytes []byte) error { return s.Commit() } -func (s *state) AddStatelessBlock(block blocks.Block, status choices.Status) { - s.addedBlocks[block.ID()] = stateBlk{ - Blk: block, - Bytes: block.Bytes(), - Status: status, - } +func (s *state) AddStatelessBlock(block block.Block) { + blkID := block.ID() + s.addedBlockIDs[block.Height()] = blkID + s.addedBlocks[blkID] = block } func (s *state) SetHeight(height uint64) { + if s.indexedHeights == nil { + // If indexedHeights hasn't been created yet, then we are newly tracking + // the range. This means we should initialize the LowerBound to the + // current height. + s.indexedHeights = &heightRange{ + LowerBound: height, + } + } + + s.indexedHeights.UpperBound = height s.currentHeight = height } @@ -1493,6 +1886,10 @@ func (s *state) Abort() { s.baseDB.Abort() } +func (s *state) Checksum() ids.ID { + return s.utxoState.Checksum() +} + func (s *state) CommitBatch() (database.Batch, error) { // updateValidators is set to true here so that the validator manager is // kept up to date with the last accepted state. @@ -1503,67 +1900,96 @@ func (s *state) CommitBatch() (database.Batch, error) { } func (s *state) writeBlocks() error { - for blkID, stateBlk := range s.addedBlocks { - var ( - blkID = blkID - stBlk = stateBlk - ) + for blkID, blk := range s.addedBlocks { + blkID := blkID + blkBytes := blk.Bytes() + blkHeight := blk.Height() + heightKey := database.PackUInt64(blkHeight) - // Note: blocks to be stored are verified, so it's safe to marshal them with GenesisCodec - blockBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, &stBlk) - if err != nil { - return fmt.Errorf("failed to marshal block %s to store: %w", blkID, err) + delete(s.addedBlockIDs, blkHeight) + s.blockIDCache.Put(blkHeight, blkID) + if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { + return fmt.Errorf("failed to add blockID: %w", err) } delete(s.addedBlocks, blkID) - s.blockCache.Put(blkID, &stBlk) - if err := s.blockDB.Put(blkID[:], blockBytes); err != nil { + // Note: Evict is used rather than Put here because blk may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.blockCache.Evict(blkID) + if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { return fmt.Errorf("failed to write block %s: %w", blkID, err) } } return nil } -func (s *state) GetStatelessBlock(blockID ids.ID) (blocks.Block, choices.Status, error) { - if blk, ok := s.addedBlocks[blockID]; ok { - return blk.Blk, blk.Status, nil +func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { + if blk, exists := s.addedBlocks[blockID]; exists { + return blk, nil } - if blkState, ok := s.blockCache.Get(blockID); ok { - if blkState == nil { - return nil, choices.Processing, database.ErrNotFound + if blk, cached := s.blockCache.Get(blockID); cached { + if blk == nil { + return nil, database.ErrNotFound } - return blkState.Blk, blkState.Status, nil + + return blk, nil } blkBytes, err := s.blockDB.Get(blockID[:]) if err == database.ErrNotFound { s.blockCache.Put(blockID, nil) - return nil, choices.Processing, database.ErrNotFound // status does not matter here - } else if err != nil { - return nil, choices.Processing, err // status does not matter here + return nil, database.ErrNotFound + } + if err != nil { + return nil, err } - // Note: stored blocks are verified, so it's safe to unmarshal them with GenesisCodec - blkState := stateBlk{} - if _, err := blocks.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { - return nil, choices.Processing, err // status does not matter here + blk, status, _, err := parseStoredBlock(blkBytes) + if err != nil { + return nil, err + } + + if status != choices.Accepted { + s.blockCache.Put(blockID, nil) + return nil, database.ErrNotFound + } + + s.blockCache.Put(blockID, blk) + return blk, nil +} + +func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { + if blkID, exists := s.addedBlockIDs[height]; exists { + return blkID, nil + } + if blkID, cached := s.blockIDCache.Get(height); cached { + if blkID == ids.Empty { + return ids.Empty, database.ErrNotFound + } + + return blkID, nil } - blkState.Blk, err = blocks.Parse(blocks.GenesisCodec, blkState.Bytes) + heightKey := database.PackUInt64(height) + + blkID, err := database.GetID(s.blockIDDB, heightKey) + if err == database.ErrNotFound { + s.blockIDCache.Put(height, ids.Empty) + return ids.Empty, database.ErrNotFound + } if err != nil { - return nil, choices.Processing, err + return ids.Empty, err } - s.blockCache.Put(blockID, &blkState) - return blkState.Blk, blkState.Status, nil + s.blockIDCache.Put(height, blkID) + return blkID, nil } -func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error { +func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { heightBytes := database.PackUInt64(height) - rawPublicKeyDiffDB := prefixdb.New(heightBytes, s.validatorPublicKeyDiffsDB) - pkDiffDB := linkeddb.NewDefault(rawPublicKeyDiffDB) - // Node ID --> BLS public key of node before it left the validator set. - pkDiffs := make(map[ids.NodeID]*bls.PublicKey) + rawNestedPublicKeyDiffDB := prefixdb.New(heightBytes, s.nestedValidatorPublicKeyDiffsDB) + nestedPKDiffDB := linkeddb.NewDefault(rawNestedPublicKeyDiffDB) for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { delete(s.currentStakers.validatorDiffs, subnetID) @@ -1580,13 +2006,12 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error Height: height, SubnetID: subnetID, } - prefixBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, prefixStruct) + prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) if err != nil { return fmt.Errorf("failed to create prefix bytes: %w", err) } - rawWeightDiffDB := prefixdb.New(prefixBytes, s.validatorWeightDiffsDB) - weightDiffDB := linkeddb.NewDefault(rawWeightDiffDB) - weightDiffs := make(map[ids.NodeID]*ValidatorWeightDiff) + rawNestedWeightDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) + nestedWeightDiffDB := linkeddb.NewDefault(rawNestedWeightDiffDB) // Record the change in weight and/or public key for each validator. for nodeID, validatorDiff := range validatorDiffs { @@ -1601,21 +2026,38 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error staker := validatorDiff.validator weightDiff.Amount = staker.Weight + // Invariant: Only the Primary Network contains non-nil public + // keys. + if staker.PublicKey != nil { + // Record that the public key for the validator is being + // added. This means the prior value for the public key was + // nil. + err := s.flatValidatorPublicKeyDiffsDB.Put( + marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), + nil, + ) + if err != nil { + return err + } + } + // The validator is being added. // // Invariant: It's impossible for a delegator to have been // rewarded in the same block that the validator was added. + startTime := uint64(staker.StartTime.Unix()) metadata := &validatorMetadata{ txID: staker.TxID, lastUpdated: staker.StartTime, UpDuration: 0, - LastUpdated: uint64(staker.StartTime.Unix()), + LastUpdated: startTime, + StakerStartTime: startTime, PotentialReward: staker.PotentialReward, PotentialDelegateeReward: 0, } - metadataBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, metadata) + metadataBytes, err := MetadataCodec.Marshal(codecVersion, metadata) if err != nil { return fmt.Errorf("failed to serialize current validator: %w", err) } @@ -1629,14 +2071,30 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error staker := validatorDiff.validator weightDiff.Amount = staker.Weight - // Invariant: Only the Primary Network contains non-nil - // public keys. + // Invariant: Only the Primary Network contains non-nil public + // keys. if staker.PublicKey != nil { - // Record the public key of the validator being removed. - pkDiffs[nodeID] = staker.PublicKey + // Record that the public key for the validator is being + // removed. This means we must record the prior value of the + // public key. + // + // Note: We store the uncompressed public key here as it is + // significantly more efficient to parse when applying + // diffs. + err := s.flatValidatorPublicKeyDiffsDB.Put( + marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), + bls.SerializePublicKey(staker.PublicKey), + ) + if err != nil { + return err + } + // TODO: Remove this once we no longer support version + // rollbacks. + // + // Note: We store the compressed public key here. pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := pkDiffDB.Put(nodeID[:], pkBytes); err != nil { + if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { return err } } @@ -1652,6 +2110,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error delegatorDB, weightDiff, validatorDiff, + codecVersion, ) if err != nil { return err @@ -1661,14 +2120,21 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error // No weight change to record; go to next validator. continue } - weightDiffs[nodeID] = weightDiff - weightDiffBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, weightDiff) + err = s.flatValidatorWeightDiffsDB.Put( + marshalDiffKey(subnetID, height, nodeID), + marshalWeightDiff(weightDiff), + ) + if err != nil { + return err + } + + // TODO: Remove this once we no longer support version rollbacks. + weightDiffBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, weightDiff) if err != nil { return fmt.Errorf("failed to serialize validator weight diff: %w", err) } - - if err := weightDiffDB.Put(nodeID[:], weightDiffBytes); err != nil { + if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { return err } @@ -1677,18 +2143,12 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error continue } - // We only track the current validator set of tracked subnets. - if subnetID != constants.PrimaryNetworkID && !s.cfg.TrackedSubnets.Contains(subnetID) { - continue - } - if weightDiff.Decrease { - err = validators.RemoveWeight(s.cfg.Validators, subnetID, nodeID, weightDiff.Amount) + err = s.validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) } else { if validatorDiff.validatorStatus == added { staker := validatorDiff.validator - err = validators.Add( - s.cfg.Validators, + err = s.validators.AddStaker( subnetID, nodeID, staker.PublicKey, @@ -1696,16 +2156,14 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error weightDiff.Amount, ) } else { - err = validators.AddWeight(s.cfg.Validators, subnetID, nodeID, weightDiff.Amount) + err = s.validators.AddWeight(subnetID, nodeID, weightDiff.Amount) } } if err != nil { return fmt.Errorf("failed to update validator weight: %w", err) } } - s.validatorWeightDiffsCache.Put(string(prefixBytes), weightDiffs) } - s.validatorPublicKeyDiffsCache.Put(height, pkDiffs) // TODO: Move validator set management out of the state package // @@ -1713,12 +2171,14 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error if !updateValidators { return nil } - primaryValidators, ok := s.cfg.Validators.Get(constants.PrimaryNetworkID) - if !ok { - return nil + + totalWeight, err := s.validators.TotalWeight(constants.PrimaryNetworkID) + if err != nil { + return fmt.Errorf("failed to get total weight of primary network: %w", err) } - s.metrics.SetLocalStake(primaryValidators.GetWeight(s.ctx.NodeID)) - s.metrics.SetTotalStake(primaryValidators.Weight()) + + s.metrics.SetLocalStake(s.validators.GetWeight(constants.PrimaryNetworkID, s.ctx.NodeID)) + s.metrics.SetTotalStake(totalWeight) return nil } @@ -1726,6 +2186,7 @@ func writeCurrentDelegatorDiff( currentDelegatorList linkeddb.LinkedDB, weightDiff *ValidatorWeightDiff, validatorDiff *diffValidator, + codecVersion uint16, ) error { addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) defer addedDelegatorIterator.Release() @@ -1736,7 +2197,12 @@ func writeCurrentDelegatorDiff( return fmt.Errorf("failed to increase node weight diff: %w", err) } - if err := database.PutUInt64(currentDelegatorList, staker.TxID[:], staker.PotentialReward); err != nil { + metadata := &delegatorMetadata{ + txID: staker.TxID, + PotentialReward: staker.PotentialReward, + StakerStartTime: uint64(staker.StartTime.Unix()), + } + if err := writeDelegatorMetadata(currentDelegatorList, metadata, codecVersion); err != nil { return fmt.Errorf("failed to write current delegator to list: %w", err) } } @@ -1825,13 +2291,16 @@ func (s *state) writeTXs() error { // Note that we're serializing a [txBytesAndStatus] here, not a // *txs.Tx, so we don't use [txs.Codec]. - txBytes, err := txs.GenesisCodec.Marshal(txs.Version, &stx) + txBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, &stx) if err != nil { return fmt.Errorf("failed to serialize tx: %w", err) } delete(s.addedTxs, txID) - s.txCache.Put(txID, txStatus) + // Note: Evict is used rather than Put here because stx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.txCache.Evict(txID) if err := s.txDB.Put(txID[:], txBytes); err != nil { return fmt.Errorf("failed to add tx: %w", err) } @@ -1847,7 +2316,7 @@ func (s *state) writeRewardUTXOs() error { txDB := linkeddb.NewDefault(rawTxDB) for _, utxo := range utxos { - utxoBytes, err := txs.GenesisCodec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.GenesisCodec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("failed to serialize reward UTXO: %w", err) } @@ -1889,12 +2358,38 @@ func (s *state) writeSubnets() error { return nil } +func (s *state) writeSubnetOwners() error { + for subnetID, owner := range s.subnetOwners { + subnetID := subnetID + owner := owner + delete(s.subnetOwners, subnetID) + + ownerBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &owner) + if err != nil { + return fmt.Errorf("failed to marshal subnet owner: %w", err) + } + + s.subnetOwnerCache.Put(subnetID, fxOwnerAndSize{ + owner: owner, + size: len(ownerBytes), + }) + + if err := s.subnetOwnerDB.Put(subnetID[:], ownerBytes); err != nil { + return fmt.Errorf("failed to write subnet owner: %w", err) + } + } + return nil +} + func (s *state) writeTransformedSubnets() error { for subnetID, tx := range s.transformedSubnets { txID := tx.ID() delete(s.transformedSubnets, subnetID) - s.transformedSubnetCache.Put(subnetID, tx) + // Note: Evict is used rather than Put here because tx may end up + // referencing additional data (because of shared byte slices) that + // would not be properly accounted for in the cache sizing. + s.transformedSubnetCache.Evict(subnetID) if err := database.PutID(s.transformedSubnetDB, subnetID[:], txID); err != nil { return fmt.Errorf("failed to write transformed subnet: %w", err) } @@ -1931,22 +2426,217 @@ func (s *state) writeChains() error { func (s *state) writeMetadata() error { if !s.persistedTimestamp.Equal(s.timestamp) { - if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { + if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil { return fmt.Errorf("failed to write timestamp: %w", err) } s.persistedTimestamp = s.timestamp } if s.persistedCurrentSupply != s.currentSupply { - if err := database.PutUInt64(s.singletonDB, currentSupplyKey, s.currentSupply); err != nil { + if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil { return fmt.Errorf("failed to write current supply: %w", err) } s.persistedCurrentSupply = s.currentSupply } if s.persistedLastAccepted != s.lastAccepted { - if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { + if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil { return fmt.Errorf("failed to write last accepted: %w", err) } s.persistedLastAccepted = s.lastAccepted } + + if s.indexedHeights != nil { + indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, s.indexedHeights) + if err != nil { + return err + } + if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil { + return fmt.Errorf("failed to write indexed range: %w", err) + } + } + return nil } + +// Returns the block, status of the block, and whether it is a [stateBlk]. +// Invariant: blkBytes is safe to parse with blocks.GenesisCodec +// +// TODO: Remove after v1.11.x is activated +func parseStoredBlock(blkBytes []byte) (block.Block, choices.Status, bool, error) { + // Attempt to parse as blocks.Block + blk, err := block.Parse(block.GenesisCodec, blkBytes) + if err == nil { + return blk, choices.Accepted, false, nil + } + + // Fallback to [stateBlk] + blkState := stateBlk{} + if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { + return nil, choices.Processing, false, err + } + + blkState.Blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) + if err != nil { + return nil, choices.Processing, false, err + } + + return blkState.Blk, blkState.Status, true, nil +} + +func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { + lock.Lock() + // It is possible that new blocks are added after grabbing this iterator. New + // blocks are guaranteed to be accepted and height-indexed, so we don't need to + // check them. + blockIterator := s.blockDB.NewIterator() + // Releasing is done using a closure to ensure that updating blockIterator will + // result in having the most recent iterator released when executing the + // deferred function. + defer func() { + blockIterator.Release() + }() + + // While we are pruning the disk, we disable caching of the data we are + // modifying. Caching is re-enabled when pruning finishes. + // + // Note: If an unexpected error occurs the caches are never re-enabled. + // That's fine as the node is going to be in an unhealthy state regardless. + oldBlockIDCache := s.blockIDCache + s.blockIDCache = &cache.Empty[uint64, ids.ID]{} + lock.Unlock() + + log.Info("starting state pruning and indexing") + + var ( + startTime = time.Now() + lastCommit = startTime + lastUpdate = startTime + numPruned = 0 + numIndexed = 0 + ) + + for blockIterator.Next() { + blkBytes := blockIterator.Value() + + blk, status, isStateBlk, err := parseStoredBlock(blkBytes) + if err != nil { + return err + } + + if status != choices.Accepted { + // Remove non-accepted blocks from disk. + if err := s.blockDB.Delete(blockIterator.Key()); err != nil { + return fmt.Errorf("failed to delete block: %w", err) + } + + numPruned++ + + // We don't index the height of non-accepted blocks. + continue + } + + blkHeight := blk.Height() + blkID := blk.ID() + + // Populate the map of height -> blockID. + heightKey := database.PackUInt64(blkHeight) + if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { + return fmt.Errorf("failed to add blockID: %w", err) + } + + // Since we only store accepted blocks on disk, we only need to store a map of + // ids.ID to Block. + if isStateBlk { + if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { + return fmt.Errorf("failed to write block: %w", err) + } + } + + numIndexed++ + + if numIndexed%pruneCommitLimit == 0 { + // We must hold the lock during committing to make sure we don't + // attempt to commit to disk while a block is concurrently being + // accepted. + lock.Lock() + err := utils.Err( + s.Commit(), + blockIterator.Error(), + ) + lock.Unlock() + if err != nil { + return err + } + + // We release the iterator here to allow the underlying database to + // clean up deleted state. + blockIterator.Release() + + now := time.Now() + if now.Sub(lastUpdate) > pruneUpdateFrequency { + lastUpdate = now + + progress := timer.ProgressFromHash(blkID[:]) + eta := timer.EstimateETA( + startTime, + progress, + math.MaxUint64, + ) + + log.Info("committing state pruning and indexing", + zap.Int("numPruned", numPruned), + zap.Int("numIndexed", numIndexed), + zap.Duration("eta", eta), + ) + } + + // We take the minimum here because it's possible that the node is + // currently bootstrapping. This would mean that grabbing the lock + // could take an extremely long period of time; which we should not + // delay processing for. + pruneDuration := now.Sub(lastCommit) + sleepDuration := min( + pruneCommitSleepMultiplier*pruneDuration, + pruneCommitSleepCap, + ) + time.Sleep(sleepDuration) + + // Make sure not to include the sleep duration into the next prune + // duration. + lastCommit = time.Now() + + blockIterator = s.blockDB.NewIteratorWithStart(blkID[:]) + } + } + + // Ensure we fully iterated over all blocks before writing that pruning has + // finished. + // + // Note: This is needed because a transient read error could cause the + // iterator to stop early. + if err := blockIterator.Error(); err != nil { + return err + } + + if err := s.donePrune(); err != nil { + return err + } + + // We must hold the lock during committing to make sure we don't + // attempt to commit to disk while a block is concurrently being + // accepted. + lock.Lock() + defer lock.Unlock() + + // Make sure we flush the original cache before re-enabling it to prevent + // surfacing any stale data. + oldBlockIDCache.Flush() + s.blockIDCache = oldBlockIDCache + + log.Info("finished state pruning and indexing", + zap.Int("numPruned", numPruned), + zap.Int("numIndexed", numIndexed), + zap.Duration("duration", time.Since(startTime)), + ) + + return s.Commit() +} diff --git a/avalanchego/vms/platformvm/state/state_test.go b/avalanchego/vms/platformvm/state/state_test.go index ef8cdb63..01fae668 100644 --- a/avalanchego/vms/platformvm/state/state_test.go +++ b/avalanchego/vms/platformvm/state/state_test.go @@ -1,35 +1,43 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( + "context" + "fmt" "math" "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( @@ -43,23 +51,23 @@ func TestStateInitialization(t *testing.T) { require := require.New(t) s, db := newUninitializedState(require) - shouldInit, err := s.(*state).shouldInit() + shouldInit, err := s.shouldInit() require.NoError(err) require.True(shouldInit) - require.NoError(s.(*state).doneInit()) + require.NoError(s.doneInit()) require.NoError(s.Commit()) s = newStateFromDB(require, db) - shouldInit, err = s.(*state).shouldInit() + shouldInit, err = s.shouldInit() require.NoError(err) require.False(shouldInit) } func TestStateSyncGenesis(t *testing.T) { require := require.New(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) staker, err := state.GetCurrentValidator(constants.PrimaryNetworkID, initialNodeID) require.NoError(err) @@ -82,347 +90,617 @@ func TestStateSyncGenesis(t *testing.T) { assertIteratorsEqual(t, EmptyIterator, delegatorIterator) } -func TestGetValidatorWeightDiffs(t *testing.T) { - require := require.New(t) - stateIntf, _ := newInitializedState(require) - state := stateIntf.(*state) +// Whenever we store a staker, a whole bunch a data structures are updated +// This test is meant to capture which updates are carried out +func TestPersistStakers(t *testing.T) { + tests := map[string]struct { + // Insert or delete a staker to state and store it + storeStaker func(*require.Assertions, ids.ID /*=subnetID*/, *state) *Staker - txID0 := ids.GenerateTestID() - txID1 := ids.GenerateTestID() - txID2 := ids.GenerateTestID() - txID3 := ids.GenerateTestID() + // Check that the staker is duly stored/removed in P-chain state + checkStakerInState func(*require.Assertions, *state, *Staker) - nodeID0 := ids.GenerateTestNodeID() + // Check whether validators are duly reported in the validator set, + // with the right weight and showing the BLS key + checkValidatorsSet func(*require.Assertions, *state, *Staker) - subnetID0 := ids.GenerateTestID() + // Check that node duly track stakers uptimes + checkValidatorUptimes func(*require.Assertions, *state, *Staker) - type stakerDiff struct { - validatorsToAdd []*Staker - delegatorsToAdd []*Staker - validatorsToRemove []*Staker - delegatorsToRemove []*Staker + // Check whether weight/bls keys diffs are duly stored + checkDiffs func(*require.Assertions, *state, *Staker, uint64) + }{ + "add current validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() - expectedValidatorWeightDiffs map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff - } - stakerDiffs := []*stakerDiff{ - { - validatorsToAdd: []*Staker{ - { - TxID: txID0, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 1, - }, + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(endTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewCurrentStaker( + addPermValTx.ID(), + utx, + time.Unix(startTime, 0), + validatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return staker }, - expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ - constants.PrimaryNetworkID: { - nodeID0: { - Decrease: false, - Amount: 1, - }, - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + retrievedStaker, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.Equal(staker, retrievedStaker) }, - }, - { - validatorsToAdd: []*Staker{ - { - TxID: txID3, - NodeID: nodeID0, - SubnetID: subnetID0, - Weight: 10, - }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(&validators.GetValidatorOutput{ + NodeID: staker.NodeID, + PublicKey: staker.PublicKey, + Weight: staker.Weight, + }, valOut) }, - delegatorsToAdd: []*Staker{ - { - TxID: txID1, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 5, - }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + upDuration, lastUpdated, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.NoError(err) + r.Equal(upDuration, time.Duration(0)) + r.Equal(lastUpdated, staker.StartTime) }, - expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ - constants.PrimaryNetworkID: { - nodeID0: { - Decrease: false, - Amount: 5, - }, - }, - subnetID0: { - nodeID0: { - Decrease: false, - Amount: 10, - }, - }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(&ValidatorWeightDiff{ + Decrease: false, + Amount: staker.Weight, + }, weightDiff) + + blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + if staker.SubnetID == constants.PrimaryNetworkID { + r.NoError(err) + r.Nil(blsDiffBytes) + } else { + r.ErrorIs(err, database.ErrNotFound) + } }, }, - { - delegatorsToAdd: []*Staker{ - { - TxID: txID2, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 15, - }, + "add current delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert the delegator and its validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(valEndTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + delegatorReward uint64 = 5432 + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewCurrentStaker( + addPermValTx.ID(), + utxVal, + time.Unix(valStartTime, 0), + validatorReward, + ) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewCurrentStaker( + addPermDelTx.ID(), + utxDel, + time.Unix(delStartTime, 0), + delegatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.PutCurrentDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return del }, - delegatorsToRemove: []*Staker{ - { - TxID: txID1, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 5, - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.True(delIt.Next()) + retrievedDelegator := delIt.Value() + r.False(delIt.Next()) + delIt.Release() + r.Equal(staker, retrievedDelegator) }, - expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ - constants.PrimaryNetworkID: { - nodeID0: { - Decrease: false, - Amount: 10, - }, - }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(valOut.NodeID, staker.NodeID) + r.Equal(valOut.Weight, val.Weight+staker.Weight) + }, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // validator's weight must increase of delegator's weight amount + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(&ValidatorWeightDiff{ + Decrease: false, + Amount: staker.Weight, + }, weightDiff) }, }, - { - validatorsToRemove: []*Staker{ - { - TxID: txID0, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 1, - }, - { - TxID: txID3, - NodeID: nodeID0, - SubnetID: subnetID0, - Weight: 10, - }, + "add pending validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime), + End: uint64(endTime), + Wght: 1234, + } + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewPendingStaker( + addPermValTx.ID(), + utx, + ) + r.NoError(err) + + s.PutPendingValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + return staker }, - delegatorsToRemove: []*Staker{ - { - TxID: txID2, - NodeID: nodeID0, - SubnetID: constants.PrimaryNetworkID, - Weight: 15, - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + retrievedStaker, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.Equal(staker, retrievedStaker) }, - expectedValidatorWeightDiffs: map[ids.ID]map[ids.NodeID]*ValidatorWeightDiff{ - constants.PrimaryNetworkID: { - nodeID0: { - Decrease: true, - Amount: 16, - }, - }, - subnetID0: { - nodeID0: { - Decrease: true, - Amount: 10, - }, - }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + // pending validators are not showed in validators set + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Empty(valsMap) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // pending validators uptime is not tracked + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // pending validators weight diff and bls diffs are not stored + _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + + _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) }, }, - {}, - } + "add pending delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert the delegator and its validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() - for i, stakerDiff := range stakerDiffs { - for _, validator := range stakerDiff.validatorsToAdd { - state.PutCurrentValidator(validator) - } - for _, delegator := range stakerDiff.delegatorsToAdd { - state.PutCurrentDelegator(delegator) - } - for _, validator := range stakerDiff.validatorsToRemove { - state.DeleteCurrentValidator(validator) - } - for _, delegator := range stakerDiff.delegatorsToRemove { - state.DeleteCurrentDelegator(delegator) - } - state.SetHeight(uint64(i + 1)) - require.NoError(state.Commit()) + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(valStartTime), + End: uint64(valEndTime), + Wght: 1234, + } - // Calling write again should not change the state. - state.SetHeight(uint64(i + 1)) - require.NoError(state.Commit()) + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + Start: uint64(delStartTime), + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + ) - for j, stakerDiff := range stakerDiffs[:i+1] { - for subnetID, expectedValidatorWeightDiffs := range stakerDiff.expectedValidatorWeightDiffs { - validatorWeightDiffs, err := state.GetValidatorWeightDiffs(uint64(j+1), subnetID) - require.NoError(err) - require.Equal(expectedValidatorWeightDiffs, validatorWeightDiffs) - } + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) - state.validatorWeightDiffsCache.Flush() - } - } -} + val, err := NewPendingStaker(addPermValTx.ID(), utxVal) + r.NoError(err) -func TestGetValidatorPublicKeyDiffs(t *testing.T) { - require := require.New(t) - stateIntf, _ := newInitializedState(require) - state := stateIntf.(*state) + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) - var ( - numNodes = 6 - txIDs = make([]ids.ID, numNodes) - nodeIDs = make([]ids.NodeID, numNodes) - sks = make([]*bls.SecretKey, numNodes) - pks = make([]*bls.PublicKey, numNodes) - pkBytes = make([][]byte, numNodes) - err error - ) - for i := 0; i < numNodes; i++ { - txIDs[i] = ids.GenerateTestID() - nodeIDs[i] = ids.GenerateTestNodeID() - sks[i], err = bls.NewSecretKey() - require.NoError(err) - pks[i] = bls.PublicFromSecretKey(sks[i]) - pkBytes[i] = bls.PublicKeyToBytes(pks[i]) - } + del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) + r.NoError(err) - type stakerDiff struct { - validatorsToAdd []*Staker - validatorsToRemove []*Staker - expectedPublicKeyDiffs map[ids.NodeID]*bls.PublicKey - } - stakerDiffs := []*stakerDiff{ - { - // Add two validators - validatorsToAdd: []*Staker{ - { - TxID: txIDs[0], - NodeID: nodeIDs[0], - Weight: 1, - PublicKey: pks[0], - }, - { - TxID: txIDs[1], - NodeID: nodeIDs[1], - Weight: 10, - PublicKey: pks[1], - }, + s.PutPendingValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.PutPendingDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + return del }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, - }, - { - // Remove a validator - validatorsToRemove: []*Staker{ - { - TxID: txIDs[0], - NodeID: nodeIDs[0], - Weight: 1, - PublicKey: pks[0], - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.True(delIt.Next()) + retrievedDelegator := delIt.Value() + r.False(delIt.Next()) + delIt.Release() + r.Equal(staker, retrievedDelegator) }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ - nodeIDs[0]: pks[0], + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Empty(valsMap) }, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, - { - // Add 2 validators and remove a validator - validatorsToAdd: []*Staker{ - { - TxID: txIDs[2], - NodeID: nodeIDs[2], - Weight: 10, - PublicKey: pks[2], - }, - { - TxID: txIDs[3], - NodeID: nodeIDs[3], - Weight: 10, - PublicKey: pks[3], - }, + "delete current validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // add them remove the validator + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(endTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewCurrentStaker( + addPermValTx.ID(), + utx, + time.Unix(startTime, 0), + validatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeleteCurrentValidator(staker) + r.NoError(s.Commit()) + return staker }, - validatorsToRemove: []*Staker{ - { - TxID: txIDs[1], - NodeID: nodeIDs[1], - Weight: 10, - PublicKey: pks[1], - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + _, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ - nodeIDs[1]: pks[1], + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + // deleted validators are not showed in the validators set anymore + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Empty(valsMap) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + // uptimes of delete validators are dropped + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(&ValidatorWeightDiff{ + Decrease: true, + Amount: staker.Weight, + }, weightDiff) + + blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + if staker.SubnetID == constants.PrimaryNetworkID { + r.NoError(err) + r.Equal(bls.DeserializePublicKey(blsDiffBytes), staker.PublicKey) + } else { + r.ErrorIs(err, database.ErrNotFound) + } }, }, - { - // Remove 2 validators and add a validator - validatorsToAdd: []*Staker{ - { - TxID: txIDs[4], - NodeID: nodeIDs[4], - Weight: 10, - PublicKey: pks[4], - }, + "delete current delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert validator and delegator, then remove the delegator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(valEndTime), + Wght: 1234, + } + validatorReward uint64 = 5678 + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + delegatorReward uint64 = 5432 + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewCurrentStaker( + addPermValTx.ID(), + utxVal, + time.Unix(valStartTime, 0), + validatorReward, + ) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewCurrentStaker( + addPermDelTx.ID(), + utxDel, + time.Unix(delStartTime, 0), + delegatorReward, + ) + r.NoError(err) + + s.PutCurrentValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + + s.PutCurrentDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeleteCurrentDelegator(del) + r.NoError(s.Commit()) + + return del }, - validatorsToRemove: []*Staker{ - { - TxID: txIDs[2], - NodeID: nodeIDs[2], - Weight: 10, - PublicKey: pks[2], - }, - { - TxID: txIDs[3], - NodeID: nodeIDs[3], - Weight: 10, - PublicKey: pks[3], - }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetCurrentDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.False(delIt.Next()) + delIt.Release() }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ - nodeIDs[2]: pks[2], - nodeIDs[3]: pks[3], + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + val, err := s.GetCurrentValidator(staker.SubnetID, staker.NodeID) + r.NoError(err) + + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Len(valsMap, 1) + valOut, found := valsMap[staker.NodeID] + r.True(found) + r.Equal(valOut.NodeID, staker.NodeID) + r.Equal(valOut.Weight, val.Weight) + }, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + // validator's weight must decrease of delegator's weight amount + weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.NoError(err) + weightDiff, err := unmarshalWeightDiff(weightDiffBytes) + r.NoError(err) + r.Equal(&ValidatorWeightDiff{ + Decrease: true, + Amount: staker.Weight, + }, weightDiff) }, }, - { - // Add a validator with no pub key - validatorsToAdd: []*Staker{ - { - TxID: txIDs[5], - NodeID: nodeIDs[5], - Weight: 10, - PublicKey: nil, - }, + "delete pending validator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + var ( + startTime = time.Now().Unix() + endTime = time.Now().Add(14 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime), + End: uint64(endTime), + Wght: 1234, + } + ) + + utx := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utx} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + staker, err := NewPendingStaker( + addPermValTx.ID(), + utx, + ) + r.NoError(err) + + s.PutPendingValidator(staker) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeletePendingValidator(staker) + r.NoError(s.Commit()) + + return staker + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + _, err := s.GetPendingValidator(staker.SubnetID, staker.NodeID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Empty(valsMap) + }, + checkValidatorUptimes: func(r *require.Assertions, s *state, staker *Staker) { + _, _, err := s.GetUptime(staker.NodeID, staker.SubnetID) + r.ErrorIs(err, database.ErrNotFound) + }, + checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { + _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) + + _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + r.ErrorIs(err, database.ErrNotFound) }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, }, - { - // Remove a validator with no pub key - validatorsToRemove: []*Staker{ - { - TxID: txIDs[5], - NodeID: nodeIDs[5], - Weight: 10, - PublicKey: nil, - }, + "delete pending delegator": { + storeStaker: func(r *require.Assertions, subnetID ids.ID, s *state) *Staker { + // insert validator and delegator the remove the validator + var ( + valStartTime = time.Now().Truncate(time.Second).Unix() + delStartTime = time.Unix(valStartTime, 0).Add(time.Hour).Unix() + delEndTime = time.Unix(delStartTime, 0).Add(30 * 24 * time.Hour).Unix() + valEndTime = time.Unix(valStartTime, 0).Add(365 * 24 * time.Hour).Unix() + + validatorsData = txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(valStartTime), + End: uint64(valEndTime), + Wght: 1234, + } + + delegatorData = txs.Validator{ + NodeID: validatorsData.NodeID, + Start: uint64(delStartTime), + End: uint64(delEndTime), + Wght: validatorsData.Wght / 2, + } + ) + + utxVal := createPermissionlessValidatorTx(r, subnetID, validatorsData) + addPermValTx := &txs.Tx{Unsigned: utxVal} + r.NoError(addPermValTx.Initialize(txs.Codec)) + + val, err := NewPendingStaker(addPermValTx.ID(), utxVal) + r.NoError(err) + + utxDel := createPermissionlessDelegatorTx(subnetID, delegatorData) + addPermDelTx := &txs.Tx{Unsigned: utxDel} + r.NoError(addPermDelTx.Initialize(txs.Codec)) + + del, err := NewPendingStaker(addPermDelTx.ID(), utxDel) + r.NoError(err) + + s.PutPendingValidator(val) + s.AddTx(addPermValTx, status.Committed) // this is currently needed to reload the staker + + s.PutPendingDelegator(del) + s.AddTx(addPermDelTx, status.Committed) // this is currently needed to reload the staker + r.NoError(s.Commit()) + + s.DeletePendingDelegator(del) + r.NoError(s.Commit()) + return del + }, + checkStakerInState: func(r *require.Assertions, s *state, staker *Staker) { + delIt, err := s.GetPendingDelegatorIterator(staker.SubnetID, staker.NodeID) + r.NoError(err) + r.False(delIt.Next()) + delIt.Release() + }, + checkValidatorsSet: func(r *require.Assertions, s *state, staker *Staker) { + valsMap := s.cfg.Validators.GetMap(staker.SubnetID) + r.Empty(valsMap) }, - expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, + checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, + checkDiffs: func(*require.Assertions, *state, *Staker, uint64) {}, }, } - for i, stakerDiff := range stakerDiffs { - for _, validator := range stakerDiff.validatorsToAdd { - state.PutCurrentValidator(validator) - } - for _, validator := range stakerDiff.validatorsToRemove { - state.DeleteCurrentValidator(validator) - } - state.SetHeight(uint64(i + 1)) - require.NoError(state.Commit()) + subnetIDs := []ids.ID{constants.PrimaryNetworkID, ids.GenerateTestID()} + for _, subnetID := range subnetIDs { + for name, test := range tests { + t.Run(fmt.Sprintf("%s - subnetID %s", name, subnetID), func(t *testing.T) { + require := require.New(t) - // Calling write again should not change the state. - state.SetHeight(uint64(i + 1)) - require.NoError(state.Commit()) + state, db := newUninitializedState(require) - for j, stakerDiff := range stakerDiffs[:i+1] { - pkDiffs, err := state.GetValidatorPublicKeyDiffs(uint64(j + 1)) - require.NoError(err) - require.Equal(stakerDiff.expectedPublicKeyDiffs, pkDiffs) - state.validatorPublicKeyDiffsCache.Flush() + // create and store the staker + staker := test.storeStaker(require, subnetID, state) + + // check all relevant data are stored + test.checkStakerInState(require, state, staker) + test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) + test.checkDiffs(require, state, staker, 0 /*height*/) + + // rebuild the state + rebuiltState := newStateFromDB(require, db) + + // load relevant quantities + require.NoError(rebuiltState.loadCurrentValidators()) + require.NoError(rebuiltState.loadPendingValidators()) + require.NoError(rebuiltState.initValidatorSets()) + + // check again that all relevant data are still available in rebuilt state + test.checkStakerInState(require, state, staker) + test.checkValidatorsSet(require, state, staker) + test.checkValidatorUptimes(require, state, staker) + test.checkDiffs(require, state, staker, 0 /*height*/) + }) } } } -func newInitializedState(require *require.Assertions) (State, database.Database) { - s, db := newUninitializedState(require) +func newInitializedState(require *require.Assertions) State { + s, _ := newUninitializedState(require) initialValidator := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -455,17 +733,20 @@ func newInitializedState(require *require.Assertions) (State, database.Database) require.NoError(initialChainTx.Initialize(txs.Codec)) genesisBlkID := ids.GenerateTestID() - genesisState := &genesis.State{ - UTXOs: []*avax.UTXO{ + genesisState := &genesis.Genesis{ + UTXOs: []*genesis.UTXO{ { - UTXOID: avax.UTXOID{ - TxID: initialTxID, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: initialTxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: units.Schmeckle, + UTXO: avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: initialTxID, + OutputIndex: 0, + }, + Asset: avax.Asset{ID: initialTxID}, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Schmeckle, + }, }, + Message: nil, }, }, Validators: []*txs.Tx{ @@ -478,28 +759,27 @@ func newInitializedState(require *require.Assertions) (State, database.Database) InitialSupply: units.Schmeckle + units.Avax, } - genesisBlk, err := blocks.NewApricotCommitBlock(genesisBlkID, 0) + genesisBlk, err := block.NewApricotCommitBlock(genesisBlkID, 0) require.NoError(err) - require.NoError(s.(*state).syncGenesis(genesisBlk, genesisState)) + require.NoError(s.syncGenesis(genesisBlk, genesisState)) - return s, db + return s } -func newUninitializedState(require *require.Assertions) (State, database.Database) { +func newUninitializedState(require *require.Assertions) (*state, database.Database) { db := memdb.New() return newStateFromDB(require, db), db } -func newStateFromDB(require *require.Assertions, db database.Database) State { - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - state, err := new( +func newStateFromDB(require *require.Assertions, db database.Database) *state { + execCfg, _ := config.GetExecutionConfig(nil) + state, err := newState( db, metrics.Noop, &config.Config{ - Validators: vdrs, + Validators: validators.NewManager(), }, + execCfg, &snow.Context{}, prometheus.NewRegistry(), reward.NewCalculator(reward.Config{ @@ -508,27 +788,156 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), - &utils.Atomic[bool]{}, ) require.NoError(err) require.NotNil(state) return state } +func createPermissionlessValidatorTx(r *require.Assertions, subnetID ids.ID, validatorsData txs.Validator) *txs.AddPermissionlessValidatorTx { + var sig signer.Signer = &signer.Empty{} + if subnetID == constants.PrimaryNetworkID { + sk, err := bls.NewSecretKey() + r.NoError(err) + sig = signer.NewProofOfPossession(sk) + } + + return &txs.AddPermissionlessValidatorTx{ + BaseTx: txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: validatorsData, + Subnet: subnetID, + Signer: sig, + + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + DelegationShares: reward.PercentDenominator, + } +} + +func createPermissionlessDelegatorTx(subnetID ids.ID, delegatorData txs.Validator) *txs.AddPermissionlessDelegatorTx { + return &txs.AddPermissionlessDelegatorTx{ + BaseTx: txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: delegatorData, + Subnet: subnetID, + + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.GenerateTestShortID(), + }, + }, + } +} + func TestValidatorWeightDiff(t *testing.T) { type test struct { - name string - ops []func(*ValidatorWeightDiff) error - shouldErr bool - expected ValidatorWeightDiff + name string + ops []func(*ValidatorWeightDiff) error + expected *ValidatorWeightDiff + expectedErr error } tests := []test{ { - name: "no ops", - ops: []func(*ValidatorWeightDiff) error{}, - shouldErr: false, - expected: ValidatorWeightDiff{}, + name: "no ops", + ops: []func(*ValidatorWeightDiff) error{}, + expected: &ValidatorWeightDiff{}, + expectedErr: nil, }, { name: "simple decrease", @@ -540,11 +949,11 @@ func TestValidatorWeightDiff(t *testing.T) { return d.Add(true, 1) }, }, - shouldErr: false, - expected: ValidatorWeightDiff{ + expected: &ValidatorWeightDiff{ Decrease: true, Amount: 2, }, + expectedErr: nil, }, { name: "decrease overflow", @@ -556,8 +965,8 @@ func TestValidatorWeightDiff(t *testing.T) { return d.Add(true, 1) }, }, - shouldErr: true, - expected: ValidatorWeightDiff{}, + expected: &ValidatorWeightDiff{}, + expectedErr: safemath.ErrOverflow, }, { name: "simple increase", @@ -569,11 +978,11 @@ func TestValidatorWeightDiff(t *testing.T) { return d.Add(false, 1) }, }, - shouldErr: false, - expected: ValidatorWeightDiff{ + expected: &ValidatorWeightDiff{ Decrease: false, Amount: 2, }, + expectedErr: nil, }, { name: "increase overflow", @@ -585,8 +994,8 @@ func TestValidatorWeightDiff(t *testing.T) { return d.Add(false, 1) }, }, - shouldErr: true, - expected: ValidatorWeightDiff{}, + expected: &ValidatorWeightDiff{}, + expectedErr: safemath.ErrOverflow, }, { name: "varied use", @@ -630,11 +1039,11 @@ func TestValidatorWeightDiff(t *testing.T) { return d.Add(true, 2) // Value -2 }, }, - shouldErr: false, - expected: ValidatorWeightDiff{ + expected: &ValidatorWeightDiff{ Decrease: true, Amount: 2, }, + expectedErr: nil, }, } @@ -646,22 +1055,21 @@ func TestValidatorWeightDiff(t *testing.T) { for _, op := range tt.ops { errs.Add(op(diff)) } - if tt.shouldErr { - require.Error(errs.Err) + require.ErrorIs(errs.Err, tt.expectedErr) + if tt.expectedErr != nil { return } - require.NoError(errs.Err) - require.Equal(tt.expected, *diff) + require.Equal(tt.expected, diff) }) } } // Tests PutCurrentValidator, DeleteCurrentValidator, GetCurrentValidator, -// GetValidatorWeightDiffs, GetValidatorPublicKeyDiffs +// ApplyValidatorWeightDiffs, ApplyValidatorPublicKeyDiffs func TestStateAddRemoveValidator(t *testing.T) { require := require.New(t) - state, _ := newInitializedState(require) + state := newInitializedState(require) var ( numNodes = 3 @@ -690,147 +1098,346 @@ func TestStateAddRemoveValidator(t *testing.T) { } type diff struct { - added []Staker - removed []Staker - expectedSubnetWeightDiff map[ids.NodeID]*ValidatorWeightDiff - expectedPrimaryNetworkWeightDiff map[ids.NodeID]*ValidatorWeightDiff - expectedPublicKeyDiff map[ids.NodeID]*bls.PublicKey + addedValidators []Staker + addedDelegators []Staker + removedDelegators []Staker + removedValidators []Staker + + expectedPrimaryValidatorSet map[ids.NodeID]*validators.GetValidatorOutput + expectedSubnetValidatorSet map[ids.NodeID]*validators.GetValidatorOutput } diffs := []diff{ + { + // Do nothing + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + }, { // Add a subnet validator - added: []Staker{stakers[0]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + addedValidators: []Staker{stakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ stakers[0].NodeID: { - Decrease: false, - Amount: stakers[0].Weight, + NodeID: stakers[0].NodeID, + Weight: stakers[0].Weight, }, }, - // No diff because this is a subnet validator - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, }, { // Remove a subnet validator - removed: []Staker{stakers[0]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ - stakers[0].NodeID: { - Decrease: true, - Amount: stakers[0].Weight, - }, - }, - // No diff because this is a subnet validator - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + removedValidators: []Staker{stakers[0]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { // Add a primary network validator - added: []Staker{stakers[1]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + addedValidators: []Staker{stakers[1]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ stakers[1].NodeID: { - Decrease: false, - Amount: stakers[1].Weight, + NodeID: stakers[1].NodeID, + PublicKey: stakers[1].PublicKey, + Weight: stakers[1].Weight, }, }, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, - { // Remove a primary network validator - removed: []Staker{stakers[1]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + { + // Do nothing + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ stakers[1].NodeID: { - Decrease: true, - Amount: stakers[1].Weight, + NodeID: stakers[1].NodeID, + PublicKey: stakers[1].PublicKey, + Weight: stakers[1].Weight, }, }, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{ - stakers[1].NodeID: stakers[1].PublicKey, - }, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + }, + { // Remove a primary network validator + removedValidators: []Staker{stakers[1]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, { // Add 2 subnet validators and a primary network validator - added: []Staker{stakers[0], stakers[1], stakers[2]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + addedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ stakers[1].NodeID: { - Decrease: false, - Amount: stakers[1].Weight, + NodeID: stakers[1].NodeID, + PublicKey: stakers[1].PublicKey, + Weight: stakers[1].Weight, }, }, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{ stakers[0].NodeID: { - Decrease: false, - Amount: stakers[0].Weight, + NodeID: stakers[0].NodeID, + Weight: stakers[0].Weight, }, stakers[2].NodeID: { - Decrease: false, - Amount: stakers[2].Weight, + NodeID: stakers[2].NodeID, + Weight: stakers[2].Weight, }, }, - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, }, { // Remove 2 subnet validators and a primary network validator. - removed: []Staker{stakers[0], stakers[1], stakers[2]}, - expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ - stakers[1].NodeID: { - Decrease: true, - Amount: stakers[1].Weight, - }, - }, - expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ - stakers[0].NodeID: { - Decrease: true, - Amount: stakers[0].Weight, - }, - stakers[2].NodeID: { - Decrease: true, - Amount: stakers[2].Weight, - }, - }, - expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{ - stakers[1].NodeID: stakers[1].PublicKey, - }, + removedValidators: []Staker{stakers[0], stakers[1], stakers[2]}, + expectedPrimaryValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedSubnetValidatorSet: map[ids.NodeID]*validators.GetValidatorOutput{}, }, } - - for i, diff := range diffs { - for _, added := range diff.added { + for currentIndex, diff := range diffs { + for _, added := range diff.addedValidators { added := added state.PutCurrentValidator(&added) } - for _, removed := range diff.removed { + for _, added := range diff.addedDelegators { + added := added + state.PutCurrentDelegator(&added) + } + for _, removed := range diff.removedDelegators { + removed := removed + state.DeleteCurrentDelegator(&removed) + } + for _, removed := range diff.removedValidators { removed := removed state.DeleteCurrentValidator(&removed) } - newHeight := uint64(i + 1) - state.SetHeight(newHeight) + currentHeight := uint64(currentIndex + 1) + state.SetHeight(currentHeight) require.NoError(state.Commit()) - for _, added := range diff.added { + for _, added := range diff.addedValidators { gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) require.NoError(err) require.Equal(added, *gotValidator) } - for _, removed := range diff.removed { + for _, removed := range diff.removedValidators { _, err := state.GetCurrentValidator(removed.SubnetID, removed.NodeID) require.ErrorIs(err, database.ErrNotFound) } - // Assert that we get the expected weight diffs - gotSubnetWeightDiffs, err := state.GetValidatorWeightDiffs(newHeight, subnetID) + for i := 0; i < currentIndex; i++ { + prevDiff := diffs[i] + prevHeight := uint64(i + 1) + + primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + constants.PrimaryNetworkID, + )) + requireEqualWeightsValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) + + require.NoError(state.ApplyValidatorPublicKeyDiffs( + context.Background(), + primaryValidatorSet, + currentHeight, + prevHeight+1, + )) + requireEqualPublicKeysValidatorSet(require, prevDiff.expectedPrimaryValidatorSet, primaryValidatorSet) + + subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) + require.NoError(state.ApplyValidatorWeightDiffs( + context.Background(), + subnetValidatorSet, + currentHeight, + prevHeight+1, + subnetID, + )) + requireEqualWeightsValidatorSet(require, prevDiff.expectedSubnetValidatorSet, subnetValidatorSet) + } + } +} + +func copyValidatorSet( + input map[ids.NodeID]*validators.GetValidatorOutput, +) map[ids.NodeID]*validators.GetValidatorOutput { + result := make(map[ids.NodeID]*validators.GetValidatorOutput, len(input)) + for nodeID, vdr := range input { + vdrCopy := *vdr + result[nodeID] = &vdrCopy + } + return result +} + +func requireEqualWeightsValidatorSet( + require *require.Assertions, + expected map[ids.NodeID]*validators.GetValidatorOutput, + actual map[ids.NodeID]*validators.GetValidatorOutput, +) { + require.Len(actual, len(expected)) + for nodeID, expectedVdr := range expected { + require.Contains(actual, nodeID) + + actualVdr := actual[nodeID] + require.Equal(expectedVdr.NodeID, actualVdr.NodeID) + require.Equal(expectedVdr.Weight, actualVdr.Weight) + } +} + +func requireEqualPublicKeysValidatorSet( + require *require.Assertions, + expected map[ids.NodeID]*validators.GetValidatorOutput, + actual map[ids.NodeID]*validators.GetValidatorOutput, +) { + require.Len(actual, len(expected)) + for nodeID, expectedVdr := range expected { + require.Contains(actual, nodeID) + + actualVdr := actual[nodeID] + require.Equal(expectedVdr.NodeID, actualVdr.NodeID) + require.Equal(expectedVdr.PublicKey, actualVdr.PublicKey) + } +} + +func TestParsedStateBlock(t *testing.T) { + require := require.New(t) + + var blks []block.Block + + { + blk, err := block.NewApricotAbortBlock(ids.GenerateTestID(), 1000) + require.NoError(err) + blks = append(blks, blk) + } + + { + blk, err := block.NewApricotAtomicBlock(ids.GenerateTestID(), 1000, &txs.Tx{ + Unsigned: &txs.AdvanceTimeTx{ + Time: 1000, + }, + }) + require.NoError(err) + blks = append(blks, blk) + } + + { + blk, err := block.NewApricotCommitBlock(ids.GenerateTestID(), 1000) + require.NoError(err) + blks = append(blks, blk) + } + + { + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + }, + } + require.NoError(tx.Initialize(txs.Codec)) + blk, err := block.NewApricotProposalBlock(ids.GenerateTestID(), 1000, tx) + require.NoError(err) + blks = append(blks, blk) + } + + { + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + }, + } + require.NoError(tx.Initialize(txs.Codec)) + blk, err := block.NewApricotStandardBlock(ids.GenerateTestID(), 1000, []*txs.Tx{tx}) + require.NoError(err) + blks = append(blks, blk) + } + + { + blk, err := block.NewBanffAbortBlock(time.Now(), ids.GenerateTestID(), 1000) + require.NoError(err) + blks = append(blks, blk) + } + + { + blk, err := block.NewBanffCommitBlock(time.Now(), ids.GenerateTestID(), 1000) + require.NoError(err) + blks = append(blks, blk) + } + + { + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + }, + } + require.NoError(tx.Initialize(txs.Codec)) + + blk, err := block.NewBanffProposalBlock(time.Now(), ids.GenerateTestID(), 1000, tx, []*txs.Tx{}) + require.NoError(err) + blks = append(blks, blk) + } + + { + tx := &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + }, + } + require.NoError(tx.Initialize(txs.Codec)) + + blk, err := block.NewBanffStandardBlock(time.Now(), ids.GenerateTestID(), 1000, []*txs.Tx{tx}) + require.NoError(err) + blks = append(blks, blk) + } + + for _, blk := range blks { + stBlk := stateBlk{ + Blk: blk, + Bytes: blk.Bytes(), + Status: choices.Accepted, + } + + stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) require.NoError(err) - require.Equal(diff.expectedSubnetWeightDiff, gotSubnetWeightDiffs) - gotWeightDiffs, err := state.GetValidatorWeightDiffs(newHeight, constants.PrimaryNetworkID) + gotBlk, _, isStateBlk, err := parseStoredBlock(stBlkBytes) require.NoError(err) - require.Equal(diff.expectedPrimaryNetworkWeightDiff, gotWeightDiffs) + require.True(isStateBlk) + require.Equal(blk.ID(), gotBlk.ID()) - // Assert that we get the expected public key diff - gotPublicKeyDiffs, err := state.GetValidatorPublicKeyDiffs(newHeight) + gotBlk, _, isStateBlk, err = parseStoredBlock(blk.Bytes()) require.NoError(err) - require.Equal(diff.expectedPublicKeyDiff, gotPublicKeyDiffs) + require.False(isStateBlk) + require.Equal(blk.ID(), gotBlk.ID()) } } + +func TestStateSubnetOwner(t *testing.T) { + require := require.New(t) + + state := newInitializedState(require) + ctrl := gomock.NewController(t) + + var ( + owner1 = fx.NewMockOwner(ctrl) + owner2 = fx.NewMockOwner(ctrl) + + createSubnetTx = &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{}, + Owner: owner1, + }, + } + + subnetID = createSubnetTx.ID() + ) + + owner, err := state.GetSubnetOwner(subnetID) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(owner) + + state.AddSubnet(createSubnetTx) + state.SetSubnetOwner(subnetID, owner1) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + state.SetSubnetOwner(subnetID, owner2) + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) +} diff --git a/avalanchego/vms/platformvm/state/tree_iterator.go b/avalanchego/vms/platformvm/state/tree_iterator.go index a71b35e2..920bc137 100644 --- a/avalanchego/vms/platformvm/state/tree_iterator.go +++ b/avalanchego/vms/platformvm/state/tree_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/tree_iterator_test.go b/avalanchego/vms/platformvm/state/tree_iterator_test.go index 57fa5727..ddb0e4e8 100644 --- a/avalanchego/vms/platformvm/state/tree_iterator_test.go +++ b/avalanchego/vms/platformvm/state/tree_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,7 +8,6 @@ import ( "time" "github.com/google/btree" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" diff --git a/avalanchego/vms/platformvm/state/validator_uptimes.go b/avalanchego/vms/platformvm/state/validator_uptimes.go deleted file mode 100644 index 30be7243..00000000 --- a/avalanchego/vms/platformvm/state/validator_uptimes.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "time" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/platformvm/genesis" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ validatorUptimes = (*uptimes)(nil) - -type uptimeAndReward struct { - UpDuration time.Duration `serialize:"true"` - LastUpdated uint64 `serialize:"true"` // Unix time in seconds - PotentialReward uint64 `serialize:"true"` - - txID ids.ID - lastUpdated time.Time -} - -type validatorUptimes interface { - // LoadUptime sets the uptime measurements of [vdrID] on [subnetID] to - // [uptime]. GetUptime and SetUptime will return an error if the [vdrID] and - // [subnetID] hasn't been loaded. This call will not result in a write to disk. - LoadUptime( - vdrID ids.NodeID, - subnetID ids.ID, - uptime *uptimeAndReward, - ) - - // GetUptime returns the current uptime measurements of [vdrID] on - // [subnetID]. - GetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - ) (upDuration time.Duration, lastUpdated time.Time, err error) - - // SetUptime updates the uptime measurements of [vdrID] on [subnetID]. - // Unless these measurements are deleted first, the next call to - // WriteUptimes will write this update to disk. - SetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - upDuration time.Duration, - lastUpdated time.Time, - ) error - - // DeleteUptime removes in-memory references to the uptimes measurements of - // [vdrID] on [subnetID]. If there were staged updates from a prior call to - // SetUptime, the updates will be dropped. This call will not result in a - // write to disk. - DeleteUptime(vdrID ids.NodeID, subnetID ids.ID) - - // WriteUptimes writes all staged updates from a prior call to SetUptime. - WriteUptimes( - dbPrimary database.KeyValueWriter, - dbSubnet database.KeyValueWriter, - ) error -} - -type uptimes struct { - uptimes map[ids.NodeID]map[ids.ID]*uptimeAndReward // vdrID -> subnetID -> uptimes - // updatedUptimes tracks the updates since the last call to WriteUptimes - updatedUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs -} - -func newValidatorUptimes() validatorUptimes { - return &uptimes{ - uptimes: make(map[ids.NodeID]map[ids.ID]*uptimeAndReward), - updatedUptimes: make(map[ids.NodeID]set.Set[ids.ID]), - } -} - -func (u *uptimes) LoadUptime( - vdrID ids.NodeID, - subnetID ids.ID, - uptime *uptimeAndReward, -) { - subnetUptimes, ok := u.uptimes[vdrID] - if !ok { - subnetUptimes = make(map[ids.ID]*uptimeAndReward) - u.uptimes[vdrID] = subnetUptimes - } - subnetUptimes[subnetID] = uptime -} - -func (u *uptimes) GetUptime( - vdrID ids.NodeID, - subnetID ids.ID, -) (upDuration time.Duration, lastUpdated time.Time, err error) { - uptime, exists := u.uptimes[vdrID][subnetID] - if !exists { - return 0, time.Time{}, database.ErrNotFound - } - return uptime.UpDuration, uptime.lastUpdated, nil -} - -func (u *uptimes) SetUptime( - vdrID ids.NodeID, - subnetID ids.ID, - upDuration time.Duration, - lastUpdated time.Time, -) error { - uptime, exists := u.uptimes[vdrID][subnetID] - if !exists { - return database.ErrNotFound - } - uptime.UpDuration = upDuration - uptime.lastUpdated = lastUpdated - - updatedSubnetUptimes, ok := u.updatedUptimes[vdrID] - if !ok { - updatedSubnetUptimes = set.Set[ids.ID]{} - u.updatedUptimes[vdrID] = updatedSubnetUptimes - } - updatedSubnetUptimes.Add(subnetID) - return nil -} - -func (u *uptimes) DeleteUptime(vdrID ids.NodeID, subnetID ids.ID) { - subnetUptimes := u.uptimes[vdrID] - delete(subnetUptimes, subnetID) - if len(subnetUptimes) == 0 { - delete(u.uptimes, vdrID) - } - - subnetUpdatedUptimes := u.updatedUptimes[vdrID] - delete(subnetUpdatedUptimes, subnetID) - if len(subnetUpdatedUptimes) == 0 { - delete(u.updatedUptimes, vdrID) - } -} - -func (u *uptimes) WriteUptimes( - dbPrimary database.KeyValueWriter, - dbSubnet database.KeyValueWriter, -) error { - for vdrID, updatedSubnets := range u.updatedUptimes { - for subnetID := range updatedSubnets { - uptime := u.uptimes[vdrID][subnetID] - uptime.LastUpdated = uint64(uptime.lastUpdated.Unix()) - - uptimeBytes, err := genesis.Codec.Marshal(txs.Version, uptime) - if err != nil { - return err - } - db := dbSubnet - if subnetID == constants.PrimaryNetworkID { - db = dbPrimary - } - if err := db.Put(uptime.txID[:], uptimeBytes); err != nil { - return err - } - } - delete(u.updatedUptimes, vdrID) - } - return nil -} diff --git a/avalanchego/vms/platformvm/state/versions.go b/avalanchego/vms/platformvm/state/versions.go index dc2c3527..6afb0fe8 100644 --- a/avalanchego/vms/platformvm/state/versions.go +++ b/avalanchego/vms/platformvm/state/versions.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" type Versions interface { // GetState returns the state of the chain after [blkID] has been accepted. diff --git a/avalanchego/vms/platformvm/status/blockchain_status.go b/avalanchego/vms/platformvm/status/blockchain_status.go index 7866e0eb..5d427e5a 100644 --- a/avalanchego/vms/platformvm/status/blockchain_status.go +++ b/avalanchego/vms/platformvm/status/blockchain_status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status @@ -53,7 +53,7 @@ func (s *BlockchainStatus) UnmarshalJSON(b []byte) error { *s = Syncing case "null": default: - return errUnknownStatus + return errUnknownBlockchainStatus } return nil } diff --git a/avalanchego/vms/platformvm/status/blockchain_status_test.go b/avalanchego/vms/platformvm/status/blockchain_status_test.go index e4828e68..d0710d2f 100644 --- a/avalanchego/vms/platformvm/status/blockchain_status_test.go +++ b/avalanchego/vms/platformvm/status/blockchain_status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status @@ -26,28 +26,26 @@ func TestBlockchainStatusJSON(t *testing.T) { require.NoError(err) var parsedStatus BlockchainStatus - err = json.Unmarshal(statusJSON, &parsedStatus) - require.NoError(err) + require.NoError(json.Unmarshal(statusJSON, &parsedStatus)) require.Equal(status, parsedStatus) } { status := BlockchainStatus(math.MaxInt32) _, err := json.Marshal(status) - require.Error(err) + require.ErrorIs(err, errUnknownBlockchainStatus) } { status := Validating - err := json.Unmarshal([]byte("null"), &status) - require.NoError(err) + require.NoError(json.Unmarshal([]byte("null"), &status)) require.Equal(Validating, status) } { var status BlockchainStatus err := json.Unmarshal([]byte(`"not a status"`), &status) - require.Error(err) + require.ErrorIs(err, errUnknownBlockchainStatus) } } @@ -68,7 +66,7 @@ func TestBlockchainStatusVerify(t *testing.T) { badStatus := BlockchainStatus(math.MaxInt32) err := badStatus.Verify() - require.Error(err, "%s passed verification", badStatus) + require.ErrorIs(err, errUnknownBlockchainStatus) } func TestBlockchainStatusString(t *testing.T) { diff --git a/avalanchego/vms/platformvm/status/status.go b/avalanchego/vms/platformvm/status/status.go index a67fb6c3..2a674250 100644 --- a/avalanchego/vms/platformvm/status/status.go +++ b/avalanchego/vms/platformvm/status/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/avalanchego/vms/platformvm/status/status_test.go b/avalanchego/vms/platformvm/status/status_test.go index a97552c1..cd6ed5f8 100644 --- a/avalanchego/vms/platformvm/status/status_test.go +++ b/avalanchego/vms/platformvm/status/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status @@ -26,28 +26,26 @@ func TestStatusJSON(t *testing.T) { require.NoError(err) var parsedStatus Status - err = json.Unmarshal(statusJSON, &parsedStatus) - require.NoError(err) + require.NoError(json.Unmarshal(statusJSON, &parsedStatus)) require.Equal(status, parsedStatus) } { status := Status(math.MaxInt32) _, err := json.Marshal(status) - require.Error(err) + require.ErrorIs(err, errUnknownStatus) } { status := Committed - err := json.Unmarshal([]byte("null"), &status) - require.NoError(err) + require.NoError(json.Unmarshal([]byte("null"), &status)) require.Equal(Committed, status) } { var status Status err := json.Unmarshal([]byte(`"not a status"`), &status) - require.Error(err) + require.ErrorIs(err, errUnknownStatus) } } @@ -68,7 +66,7 @@ func TestStatusVerify(t *testing.T) { badStatus := Status(math.MaxInt32) err := badStatus.Verify() - require.Error(err, "%s passed verification", badStatus) + require.ErrorIs(err, errUnknownStatus) } func TestStatusString(t *testing.T) { diff --git a/avalanchego/vms/platformvm/txs/add_delegator_test.go b/avalanchego/vms/platformvm/txs/add_delegator_test.go index 45e4872b..ac3290fb 100644 --- a/avalanchego/vms/platformvm/txs/add_delegator_test.go +++ b/avalanchego/vms/platformvm/txs/add_delegator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -23,8 +23,7 @@ var preFundedKeys = secp256k1.TestKeys() func TestAddDelegatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -34,10 +33,12 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { ) // Case : signed tx is nil - require.ErrorIs(stx.SyntacticVerify(ctx), ErrNilSignedTx) + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilSignedTx) // Case : unsigned tx is nil - require.ErrorIs(addDelegatorTx.SyntacticVerify(ctx), ErrNilTx) + err = addDelegatorTx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilTx) validatorWeight := uint64(2022) inputs := []*avax.TransferableInput{{ @@ -98,7 +99,8 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { // Case: signed tx not initialized stx = &Tx{Unsigned: addDelegatorTx} - require.ErrorIs(stx.SyntacticVerify(ctx), errSignedTxNotInitialized) + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, errSignedTxNotInitialized) // Case: valid tx stx, err = NewSigned(addDelegatorTx, Codec, signers) @@ -111,7 +113,7 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addDelegatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, avax.ErrWrongNetworkID) addDelegatorTx.NetworkID-- // Case: delegator weight is not equal to total stake weight @@ -119,15 +121,15 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { addDelegatorTx.Wght = 2 * validatorWeight stx, err = NewSigned(addDelegatorTx, Codec, signers) require.NoError(err) - require.ErrorIs(stx.SyntacticVerify(ctx), errDelegatorWeightMismatch) + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, errDelegatorWeightMismatch) addDelegatorTx.Wght = validatorWeight } func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -196,7 +198,9 @@ func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { stx, err = NewSigned(addDelegatorTx, Codec, signers) require.NoError(err) - require.Error(stx.SyntacticVerify(ctx)) + + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, errStakeMustBeAVAX) } func TestAddDelegatorTxNotValidatorTx(t *testing.T) { diff --git a/avalanchego/vms/platformvm/txs/add_delegator_tx.go b/avalanchego/vms/platformvm/txs/add_delegator_tx.go index a44ddebb..3df97cf0 100644 --- a/avalanchego/vms/platformvm/txs/add_delegator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_delegator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -19,9 +19,11 @@ import ( ) var ( - _ DelegatorTx = (*AddDelegatorTx)(nil) + _ DelegatorTx = (*AddDelegatorTx)(nil) + _ ScheduledStaker = (*AddDelegatorTx)(nil) errDelegatorWeightMismatch = errors.New("delegator weight is not equal to total stake weight") + errStakeMustBeAVAX = errors.New("stake must be AVAX") ) // AddDelegatorTx is an unsigned addDelegatorTx @@ -105,7 +107,7 @@ func (tx *AddDelegatorTx) SyntacticVerify(ctx *snow.Context) error { assetID := out.AssetID() if assetID != ctx.AVAXAssetID { - return fmt.Errorf("stake output must be AVAX but is %q", assetID) + return fmt.Errorf("%w but is %q", errStakeMustBeAVAX, assetID) } } diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go index 43db685d..9c29b973 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -17,7 +17,10 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) +var ( + _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) +) // AddPermissionlessDelegatorTx is an unsigned addPermissionlessDelegatorTx type AddPermissionlessDelegatorTx struct { diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index 2502cdea..d7483f5b 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -1,27 +1,1503 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( + "encoding/json" "errors" "math" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var errCustom = errors.New("custom error") +func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + nodeID := ids.BuildTestNodeID([]byte{ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + }) + + simpleAddPrimaryTx := &AddPermissionlessDelegatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 200*24*60*60, + Wght: 2 * units.KiloAvax, + }, + Subnet: constants.PrimaryNetworkID, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + } + avax.SortTransferableOutputs(simpleAddPrimaryTx.Outs, Codec) + avax.SortTransferableOutputs(simpleAddPrimaryTx.StakeOuts, Codec) + utils.Sort(simpleAddPrimaryTx.Ins) + require.NoError(simpleAddPrimaryTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleAddPrimaryTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessDelegatorTx type ID + 0x00, 0x00, 0x00, 0x1a, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount = 2k AVAX + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // memo length + 0x00, 0x00, 0x00, 0x00, + // NodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x01, 0x07, 0xdc, 0x39, + // Stake weight + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // Primary network subnetID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of locked outputs + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transferable output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + var unsignedSimpleAddPrimaryTx UnsignedTx = simpleAddPrimaryTx + unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddPrimaryTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleAddPrimaryTxBytes, unsignedSimpleAddPrimaryTxBytes) + + complexAddPrimaryTx := &AddPermissionlessDelegatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MegaAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 200*24*60*60, + Wght: 5 * units.KiloAvax, + }, + Subnet: constants.PrimaryNetworkID, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 987654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 3 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 87654321, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + } + require.NoError(complexAddPrimaryTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexAddPrimaryTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessDelegatorTx type ID + 0x00, 0x00, 0x00, 0x1a, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x03, + // outputs[0] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // outputs[1] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // outputs[2] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x00, 0x03, 0x8d, 0x7e, 0xa4, 0xc6, 0x80, 0x00, + // number of signature indices + 0x00, 0x00, 0x00, 0x02, + // first signature index + 0x00, 0x00, 0x00, 0x02, + // second signature index + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signature indicies + 0x00, 0x00, 0x00, 0x00, + // memo length + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // nodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x01, 0x07, 0xdc, 0x39, + // Stake weight + 0x00, 0x00, 0x04, 0x8c, 0x27, 0x39, 0x50, 0x00, + // Primary Network subnet ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of locked outputs + 0x00, 0x00, 0x00, 0x02, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x3a, 0xde, 0x68, 0xb1, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x02, 0xba, 0x7d, 0xef, 0x30, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + } + var unsignedComplexAddPrimaryTx UnsignedTx = complexAddPrimaryTx + unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddPrimaryTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexAddPrimaryTxBytes, unsignedComplexAddPrimaryTxBytes) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexAddPrimaryTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexAddPrimaryTxJSONBytes, err := json.MarshalIndent(unsignedComplexAddPrimaryTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521", + "validator": { + "nodeID": "NodeID-2ZbTY9GatRTrfinAoYiYLcf6CvrPAUYgo", + "start": 12345, + "end": 17292345, + "weight": 5000000000000 + }, + "subnetID": "11111111111111111111111111111111LpoYY", + "stake": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 2000000000000, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 987654321, + "output": { + "addresses": [], + "amount": 3000000000000, + "locktime": 87654321, + "threshold": 0 + } + } + } + ], + "rewardsOwner": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } +}`, string(unsignedComplexAddPrimaryTxJSONBytes)) +} + +func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + nodeID := ids.BuildTestNodeID([]byte{ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + }) + subnetID := ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + } + + simpleAddSubnetTx := &AddPermissionlessDelegatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12346, + Wght: 1, + }, + Subnet: subnetID, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + } + avax.SortTransferableOutputs(simpleAddSubnetTx.Outs, Codec) + avax.SortTransferableOutputs(simpleAddSubnetTx.StakeOuts, Codec) + utils.Sort(simpleAddSubnetTx.Ins) + require.NoError(simpleAddSubnetTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleAddSubnetTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessDelegationTx type ID + 0x00, 0x00, 0x00, 0x1a, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x02, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount = 1 MilliAVAX + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // memo length + 0x00, 0x00, 0x00, 0x00, + // NodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x3a, + // Stake weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // SubnetID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // Number of locked outputs + 0x00, 0x00, 0x00, 0x01, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transferable output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + var unsignedSimpleAddSubnetTx UnsignedTx = simpleAddSubnetTx + unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddSubnetTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleAddSubnetTxBytes, unsignedSimpleAddSubnetTxBytes) + + complexAddSubnetTx := &AddPermissionlessDelegatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xfffffffffffffff0, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MegaAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 1, + Wght: 9, + }, + Subnet: subnetID, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 987654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 7, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 87654321, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + }, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + } + require.NoError(complexAddSubnetTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexAddSubnetTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessDelegatorTx type ID + 0x00, 0x00, 0x00, 0x1a, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x03, + // outputs[0] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // outputs[1] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // outputs[2] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x00, 0x03, 0x8d, 0x7e, 0xa4, 0xc6, 0x80, 0x00, + // number of signature indices + 0x00, 0x00, 0x00, 0x02, + // first signature index + 0x00, 0x00, 0x00, 0x02, + // second signature index + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signature indicies + 0x00, 0x00, 0x00, 0x00, + // memo length + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // nodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x3a, + // Stake weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // subnetID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // number of locked outputs + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x3a, 0xde, 0x68, 0xb1, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + } + var unsignedComplexAddSubnetTx UnsignedTx = complexAddSubnetTx + unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddSubnetTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexAddSubnetTxBytes, unsignedComplexAddSubnetTxBytes) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexAddSubnetTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexAddSubnetTxJSONBytes, err := json.MarshalIndent(unsignedComplexAddSubnetTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 1, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551600, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521", + "validator": { + "nodeID": "NodeID-2ZbTY9GatRTrfinAoYiYLcf6CvrPAUYgo", + "start": 12345, + "end": 12346, + "weight": 9 + }, + "subnetID": "SkB92YpWm4UpburLz9tEKZw2i67H3FF6YkjaU4BkFUDTG9Xm", + "stake": [ + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 2, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 987654321, + "output": { + "addresses": [], + "amount": 7, + "locktime": 87654321, + "threshold": 0 + } + } + } + ], + "rewardsOwner": { + "addresses": [], + "locktime": 0, + "threshold": 0 + } +}`, string(unsignedComplexAddSubnetTxJSONBytes)) +} + func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { type test struct { name string @@ -82,6 +1558,28 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { }, err: errNoStake, }, + { + name: "invalid BaseTx", + txFunc: func(*gomock.Controller) *AddPermissionlessDelegatorTx { + return &AddPermissionlessDelegatorTx{ + BaseTx: invalidBaseTx, + Validator: Validator{ + NodeID: ids.GenerateTestNodeID(), + }, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + }, + }, + }, + } + }, + err: avax.ErrWrongNetworkID, + }, { name: "invalid rewards owner", txFunc: func(ctrl *gomock.Controller) *AddPermissionlessDelegatorTx { @@ -204,6 +1702,42 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { }, err: errOutputsNotSorted, }, + { + name: "stake overflow", + txFunc: func(ctrl *gomock.Controller) *AddPermissionlessDelegatorTx { + rewardsOwner := fx.NewMockOwner(ctrl) + rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() + assetID := ids.GenerateTestID() + return &AddPermissionlessDelegatorTx{ + BaseTx: validBaseTx, + Validator: Validator{ + NodeID: ids.GenerateTestNodeID(), + Wght: 1, + }, + Subnet: ids.GenerateTestID(), + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + }, + }, + { + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + }, + }, + }, + DelegationRewardsOwner: rewardsOwner, + } + }, + err: safemath.ErrOverflow, + }, { name: "weight mismatch", txFunc: func(ctrl *gomock.Controller) *AddPermissionlessDelegatorTx { @@ -314,72 +1848,12 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) require.ErrorIs(t, err, tt.err) }) } - - t.Run("invalid BaseTx", func(t *testing.T) { - tx := &AddPermissionlessDelegatorTx{ - BaseTx: invalidBaseTx, - Validator: Validator{ - NodeID: ids.GenerateTestNodeID(), - }, - StakeOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ids.GenerateTestID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 1, - }, - }, - }, - } - err := tx.SyntacticVerify(ctx) - require.Error(t, err) - }) - - t.Run("stake overflow", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - rewardsOwner := fx.NewMockOwner(ctrl) - rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() - assetID := ids.GenerateTestID() - tx := &AddPermissionlessDelegatorTx{ - BaseTx: validBaseTx, - Validator: Validator{ - NodeID: ids.GenerateTestNodeID(), - Wght: 1, - }, - Subnet: ids.GenerateTestID(), - StakeOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - }, - }, - { - Asset: avax.Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 2, - }, - }, - }, - DelegationRewardsOwner: rewardsOwner, - } - err := tx.SyntacticVerify(ctx) - require.Error(t, err) - }) } func TestAddPermissionlessDelegatorTxNotValidatorTx(t *testing.T) { diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go index 8f313ae0..0f655c8d 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -21,7 +21,8 @@ import ( ) var ( - _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) errEmptyNodeID = errors.New("validator nodeID cannot be empty") errNoStake = errors.New("no stake") diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 704a3d4f..96828c94 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -1,27 +1,1371 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( + "encoding/hex" "math" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) +func TestAddPermissionlessPrimaryValidator(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + skBytes, err := hex.DecodeString("6668fecd4595b81e4d568398c820bbf3f073cb222902279fa55ebb84764ed2e3") + require.NoError(err) + + sk, err := bls.SecretKeyFromBytes(skBytes) + require.NoError(err) + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + nodeID := ids.BuildTestNodeID([]byte{ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + }) + + simpleAddPrimaryTx := &AddPermissionlessValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 2 * units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 200*24*60*60, + Wght: 2 * units.KiloAvax, + }, + Subnet: constants.PrimaryNetworkID, + Signer: signer.NewProofOfPossession(sk), + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegationShares: reward.PercentDenominator, + } + avax.SortTransferableOutputs(simpleAddPrimaryTx.Outs, Codec) + avax.SortTransferableOutputs(simpleAddPrimaryTx.StakeOuts, Codec) + utils.Sort(simpleAddPrimaryTx.Ins) + require.NoError(simpleAddPrimaryTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleAddPrimaryTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessValidatorTx type ID + 0x00, 0x00, 0x00, 0x19, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount = 2k AVAX + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // memo length + 0x00, 0x00, 0x00, 0x00, + // NodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x01, 0x07, 0xdc, 0x39, + // Stake weight + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // Primary network subnetID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // BLS PoP type ID + 0x00, 0x00, 0x00, 0x1c, + // BLS compressed public key + 0xaf, 0xf4, 0xac, 0xb4, 0xc5, 0x43, 0x9b, 0x5d, + 0x42, 0x6c, 0xad, 0xf9, 0xe9, 0x46, 0xd3, 0xa4, + 0x52, 0xf7, 0xde, 0x34, 0x14, 0xd1, 0xad, 0x27, + 0x33, 0x61, 0x33, 0x21, 0x1d, 0x8b, 0x90, 0xcf, + 0x49, 0xfb, 0x97, 0xee, 0xbc, 0xde, 0xee, 0xf7, + 0x14, 0xdc, 0x20, 0xf5, 0x4e, 0xd0, 0xd4, 0xd1, + // BLS compressed signature length + 0x8c, 0xfd, 0x79, 0x09, 0xd1, 0x53, 0xb9, 0x60, + 0x4b, 0x62, 0xb1, 0x43, 0xba, 0x36, 0x20, 0x7b, + 0xb7, 0xe6, 0x48, 0x67, 0x42, 0x44, 0x80, 0x20, + 0x2a, 0x67, 0xdc, 0x68, 0x76, 0x83, 0x46, 0xd9, + 0x5c, 0x90, 0x98, 0x3c, 0x2d, 0x27, 0x9c, 0x64, + 0xc4, 0x3c, 0x51, 0x13, 0x6b, 0x2a, 0x05, 0xe0, + 0x16, 0x02, 0xd5, 0x2a, 0xa6, 0x37, 0x6f, 0xda, + 0x17, 0xfa, 0x6e, 0x2a, 0x18, 0xa0, 0x83, 0xe4, + 0x9d, 0x9c, 0x45, 0x0e, 0xab, 0x7b, 0x89, 0xb1, + 0xd5, 0x55, 0x5d, 0xa5, 0xc4, 0x89, 0x87, 0x2e, + 0x02, 0xb7, 0xe5, 0x22, 0x7b, 0x77, 0x55, 0x0a, + 0xf1, 0x33, 0x0e, 0x5a, 0x71, 0xf8, 0xc3, 0x68, + // Number of locked outputs + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transferable output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // delegation shares + 0x00, 0x0f, 0x42, 0x40, + } + var unsignedSimpleAddPrimaryTx UnsignedTx = simpleAddPrimaryTx + unsignedSimpleAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddPrimaryTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleAddPrimaryTxBytes, unsignedSimpleAddPrimaryTxBytes) + + complexAddPrimaryTx := &AddPermissionlessValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MegaAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 200*24*60*60, + Wght: 5 * units.KiloAvax, + }, + Subnet: constants.PrimaryNetworkID, + Signer: signer.NewProofOfPossession(sk), + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 987654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 3 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 87654321, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + DelegationShares: reward.PercentDenominator, + } + require.NoError(complexAddPrimaryTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexAddPrimaryTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessValidatorTx type ID + 0x00, 0x00, 0x00, 0x19, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x03, + // outputs[0] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // outputs[1] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // outputs[2] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x00, 0x03, 0x8d, 0x7e, 0xa4, 0xc6, 0x80, 0x00, + // number of signature indices + 0x00, 0x00, 0x00, 0x02, + // first signature index + 0x00, 0x00, 0x00, 0x02, + // second signature index + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signature indicies + 0x00, 0x00, 0x00, 0x00, + // memo length + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // nodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x01, 0x07, 0xdc, 0x39, + // Stake weight + 0x00, 0x00, 0x04, 0x8c, 0x27, 0x39, 0x50, 0x00, + // Primary Network subnet ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // BLS PoP type ID + 0x00, 0x00, 0x00, 0x1c, + // BLS compressed public key + 0xaf, 0xf4, 0xac, 0xb4, 0xc5, 0x43, 0x9b, 0x5d, + 0x42, 0x6c, 0xad, 0xf9, 0xe9, 0x46, 0xd3, 0xa4, + 0x52, 0xf7, 0xde, 0x34, 0x14, 0xd1, 0xad, 0x27, + 0x33, 0x61, 0x33, 0x21, 0x1d, 0x8b, 0x90, 0xcf, + 0x49, 0xfb, 0x97, 0xee, 0xbc, 0xde, 0xee, 0xf7, + 0x14, 0xdc, 0x20, 0xf5, 0x4e, 0xd0, 0xd4, 0xd1, + // BLS compressed signature + 0x8c, 0xfd, 0x79, 0x09, 0xd1, 0x53, 0xb9, 0x60, + 0x4b, 0x62, 0xb1, 0x43, 0xba, 0x36, 0x20, 0x7b, + 0xb7, 0xe6, 0x48, 0x67, 0x42, 0x44, 0x80, 0x20, + 0x2a, 0x67, 0xdc, 0x68, 0x76, 0x83, 0x46, 0xd9, + 0x5c, 0x90, 0x98, 0x3c, 0x2d, 0x27, 0x9c, 0x64, + 0xc4, 0x3c, 0x51, 0x13, 0x6b, 0x2a, 0x05, 0xe0, + 0x16, 0x02, 0xd5, 0x2a, 0xa6, 0x37, 0x6f, 0xda, + 0x17, 0xfa, 0x6e, 0x2a, 0x18, 0xa0, 0x83, 0xe4, + 0x9d, 0x9c, 0x45, 0x0e, 0xab, 0x7b, 0x89, 0xb1, + 0xd5, 0x55, 0x5d, 0xa5, 0xc4, 0x89, 0x87, 0x2e, + 0x02, 0xb7, 0xe5, 0x22, 0x7b, 0x77, 0x55, 0x0a, + 0xf1, 0x33, 0x0e, 0x5a, 0x71, 0xf8, 0xc3, 0x68, + // number of locked outputs + 0x00, 0x00, 0x00, 0x02, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x01, 0xd1, 0xa9, 0x4a, 0x20, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x3a, 0xde, 0x68, 0xb1, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x02, 0xba, 0x7d, 0xef, 0x30, 0x00, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // delegation shares + 0x00, 0x0f, 0x42, 0x40, + } + var unsignedComplexAddPrimaryTx UnsignedTx = complexAddPrimaryTx + unsignedComplexAddPrimaryTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddPrimaryTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexAddPrimaryTxBytes, unsignedComplexAddPrimaryTxBytes) +} + +func TestAddPermissionlessSubnetValidator(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + nodeID := ids.BuildTestNodeID([]byte{ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + }) + subnetID := ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + } + + simpleAddSubnetTx := &AddPermissionlessValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{1}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12346, + Wght: 1, + }, + Subnet: subnetID, + Signer: &signer.Empty{}, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegationShares: reward.PercentDenominator, + } + avax.SortTransferableOutputs(simpleAddSubnetTx.Outs, Codec) + avax.SortTransferableOutputs(simpleAddSubnetTx.StakeOuts, Codec) + utils.Sort(simpleAddSubnetTx.Ins) + require.NoError(simpleAddSubnetTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleAddSubnetTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessValidatorTx type ID + 0x00, 0x00, 0x00, 0x19, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x02, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount = 1 MilliAVAX + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // Amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // Number of input signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x01, + // memo length + 0x00, 0x00, 0x00, 0x00, + // NodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x3a, + // Stake weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // SubnetID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // No signer type ID + 0x00, 0x00, 0x00, 0x1b, + // Number of locked outputs + 0x00, 0x00, 0x00, 0x01, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transferable output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1fx owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // delegation shares + 0x00, 0x0f, 0x42, 0x40, + } + var unsignedSimpleAddSubnetTx UnsignedTx = simpleAddSubnetTx + unsignedSimpleAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleAddSubnetTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleAddSubnetTxBytes, unsignedSimpleAddSubnetTxBytes) + + complexAddSubnetTx := &AddPermissionlessValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xfffffffffffffff0, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MegaAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Validator: Validator{ + NodeID: nodeID, + Start: 12345, + End: 12345 + 1, + Wght: 9, + }, + Subnet: subnetID, + Signer: &signer.Empty{}, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 987654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 7, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 87654321, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + }, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + DelegationShares: reward.PercentDenominator, + } + require.NoError(complexAddSubnetTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexAddSubnetTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // AddPermissionlessValidatorTx type ID + 0x00, 0x00, 0x00, 0x19, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of immediate outputs + 0x00, 0x00, 0x00, 0x03, + // outputs[0] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // outputs[1] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // outputs[2] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x00, 0x03, 0x8d, 0x7e, 0xa4, 0xc6, 0x80, 0x00, + // number of signature indices + 0x00, 0x00, 0x00, 0x02, + // first signature index + 0x00, 0x00, 0x00, 0x02, + // second signature index + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signature indices + 0x00, 0x00, 0x00, 0x01, + // signature index + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signature indicies + 0x00, 0x00, 0x00, 0x00, + // memo length + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // nodeID + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // Start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // End time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x3a, + // Stake weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // subnetID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // Empty signer type ID + 0x00, 0x00, 0x00, 0x1b, + // number of locked outputs + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x3a, 0xde, 0x68, 0xb1, + // secp256k1 transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // addresses[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // secp256k1 owner type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // delegation shares + 0x00, 0x0f, 0x42, 0x40, + } + var unsignedComplexAddSubnetTx UnsignedTx = complexAddSubnetTx + unsignedComplexAddSubnetTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexAddSubnetTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexAddSubnetTxBytes, unsignedComplexAddSubnetTxBytes) +} + func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { type test struct { name string @@ -125,6 +1469,29 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { }, err: errTooManyShares, }, + { + name: "invalid BaseTx", + txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { + return &AddPermissionlessValidatorTx{ + BaseTx: invalidBaseTx, + Validator: Validator{ + NodeID: ids.GenerateTestNodeID(), + }, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + }, + }, + }, + DelegationShares: reward.PercentDenominator, + } + }, + err: avax.ErrWrongNetworkID, + }, { name: "invalid rewards owner", txFunc: func(ctrl *gomock.Controller) *AddPermissionlessValidatorTx { @@ -216,6 +1583,45 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { }, err: errCustom, }, + { + name: "stake overflow", + txFunc: func(ctrl *gomock.Controller) *AddPermissionlessValidatorTx { + rewardsOwner := fx.NewMockOwner(ctrl) + rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() + assetID := ids.GenerateTestID() + return &AddPermissionlessValidatorTx{ + BaseTx: validBaseTx, + Validator: Validator{ + NodeID: ids.GenerateTestNodeID(), + Wght: 1, + }, + Subnet: ids.GenerateTestID(), + Signer: &signer.Empty{}, + StakeOuts: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + }, + }, + { + Asset: avax.Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + }, + }, + }, + ValidatorRewardsOwner: rewardsOwner, + DelegatorRewardsOwner: rewardsOwner, + DelegationShares: reward.PercentDenominator, + } + }, + err: safemath.ErrOverflow, + }, { name: "multiple staked assets", txFunc: func(ctrl *gomock.Controller) *AddPermissionlessValidatorTx { @@ -415,76 +1821,12 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) require.ErrorIs(t, err, tt.err) }) } - - t.Run("invalid BaseTx", func(t *testing.T) { - tx := &AddPermissionlessValidatorTx{ - BaseTx: invalidBaseTx, - Validator: Validator{ - NodeID: ids.GenerateTestNodeID(), - }, - StakeOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: ids.GenerateTestID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 1, - }, - }, - }, - DelegationShares: reward.PercentDenominator, - } - err := tx.SyntacticVerify(ctx) - require.Error(t, err) - }) - - t.Run("stake overflow", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - rewardsOwner := fx.NewMockOwner(ctrl) - rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() - assetID := ids.GenerateTestID() - tx := &AddPermissionlessValidatorTx{ - BaseTx: validBaseTx, - Validator: Validator{ - NodeID: ids.GenerateTestNodeID(), - Wght: 1, - }, - Subnet: ids.GenerateTestID(), - Signer: &signer.Empty{}, - StakeOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - }, - }, - { - Asset: avax.Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 2, - }, - }, - }, - ValidatorRewardsOwner: rewardsOwner, - DelegatorRewardsOwner: rewardsOwner, - DelegationShares: reward.PercentDenominator, - } - err := tx.SyntacticVerify(ctx) - require.Error(t, err) - }) } func TestAddPermissionlessValidatorTxNotDelegatorTx(t *testing.T) { diff --git a/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go b/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go index c25fd8df..8dc8d767 100644 --- a/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go +++ b/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -22,7 +22,7 @@ import ( func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -32,10 +32,12 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { ) // Case : signed tx is nil - require.ErrorIs(stx.SyntacticVerify(ctx), ErrNilSignedTx) + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilSignedTx) // Case : unsigned tx is nil - require.ErrorIs(addSubnetValidatorTx.SyntacticVerify(ctx), ErrNilTx) + err = addSubnetValidatorTx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilTx) validatorWeight := uint64(2022) subnetID := ids.ID{'s', 'u', 'b', 'n', 'e', 't', 'I', 'D'} @@ -94,16 +96,16 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, avax.ErrWrongNetworkID) addSubnetValidatorTx.NetworkID-- - // Case: Missing Subnet ID + // Case: Specifies primary network SubnetID addSubnetValidatorTx.SyntacticallyVerified = false addSubnetValidatorTx.Subnet = ids.Empty stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, errAddPrimaryNetworkValidator) addSubnetValidatorTx.Subnet = subnetID // Case: No weight @@ -112,7 +114,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, ErrWeightTooSmall) addSubnetValidatorTx.Wght = validatorWeight // Case: Subnet auth indices not unique @@ -123,7 +125,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, secp256k1fx.ErrInputIndicesNotSortedUnique) *input = oldInput // Case: adding to Primary Network @@ -138,7 +140,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { func TestAddSubnetValidatorMarshal(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -199,7 +201,7 @@ func TestAddSubnetValidatorMarshal(t *testing.T) { require.NoError(err) require.NoError(stx.SyntacticVerify(ctx)) - txBytes, err := Codec.Marshal(Version, stx) + txBytes, err := Codec.Marshal(CodecVersion, stx) require.NoError(err) parsedTx, err := Parse(Codec, txBytes) diff --git a/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go b/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go index 0ac3474e..b6ce0d0f 100644 --- a/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -14,7 +14,8 @@ import ( ) var ( - _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ ScheduledStaker = (*AddSubnetValidatorTx)(nil) errAddPrimaryNetworkValidator = errors.New("can't add primary network validator with AddSubnetValidatorTx") ) diff --git a/avalanchego/vms/platformvm/txs/add_validator_test.go b/avalanchego/vms/platformvm/txs/add_validator_test.go index 78eda5c9..daf32f66 100644 --- a/avalanchego/vms/platformvm/txs/add_validator_test.go +++ b/avalanchego/vms/platformvm/txs/add_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,8 +22,7 @@ import ( func TestAddValidatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -33,10 +32,12 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { ) // Case : signed tx is nil - require.ErrorIs(stx.SyntacticVerify(ctx), ErrNilSignedTx) + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilSignedTx) // Case : unsigned tx is nil - require.ErrorIs(addValidatorTx.SyntacticVerify(ctx), ErrNilTx) + err = addValidatorTx.SyntacticVerify(ctx) + require.ErrorIs(err, ErrNilTx) validatorWeight := uint64(2022) rewardAddress := preFundedKeys[0].PublicKey().Address() @@ -107,7 +108,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, avax.ErrWrongNetworkID) addValidatorTx.NetworkID-- // Case: Stake owner has no addresses @@ -119,7 +120,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, secp256k1fx.ErrOutputUnspendable) addValidatorTx.StakeOuts = stakes // Case: Rewards owner has no addresses @@ -128,7 +129,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, secp256k1fx.ErrOutputUnspendable) addValidatorTx.RewardsOwner.(*secp256k1fx.OutputOwners).Addrs = []ids.ShortID{rewardAddress} // Case: Too many shares @@ -137,15 +138,14 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { stx, err = NewSigned(addValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) - require.Error(err) + require.ErrorIs(err, errTooManyShares) addValidatorTx.DelegationShares-- } func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { require := require.New(t) clk := mockable.Clock{} - ctx := snow.DefaultContextTest() - ctx.AVAXAssetID = ids.GenerateTestID() + ctx := snowtest.Context(t, snowtest.PChainID) signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( @@ -215,7 +215,9 @@ func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { stx, err = NewSigned(addValidatorTx, Codec, signers) require.NoError(err) - require.Error(stx.SyntacticVerify(ctx)) + + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, errStakeMustBeAVAX) } func TestAddValidatorTxNotDelegatorTx(t *testing.T) { diff --git a/avalanchego/vms/platformvm/txs/add_validator_tx.go b/avalanchego/vms/platformvm/txs/add_validator_tx.go index d7101c37..b6ab65b5 100644 --- a/avalanchego/vms/platformvm/txs/add_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -19,7 +19,8 @@ import ( ) var ( - _ ValidatorTx = (*AddValidatorTx)(nil) + _ ValidatorTx = (*AddValidatorTx)(nil) + _ ScheduledStaker = (*AddValidatorTx)(nil) errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", reward.PercentDenominator) ) @@ -119,7 +120,7 @@ func (tx *AddValidatorTx) SyntacticVerify(ctx *snow.Context) error { assetID := out.AssetID() if assetID != ctx.AVAXAssetID { - return fmt.Errorf("stake output must be AVAX but is %q", assetID) + return fmt.Errorf("%w but is %q", errStakeMustBeAVAX, assetID) } } diff --git a/avalanchego/vms/platformvm/txs/advance_time_tx.go b/avalanchego/vms/platformvm/txs/advance_time_tx.go index fc889da9..80b277fc 100644 --- a/avalanchego/vms/platformvm/txs/advance_time_tx.go +++ b/avalanchego/vms/platformvm/txs/advance_time_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/base_tx.go b/avalanchego/vms/platformvm/txs/base_tx.go index ad8ea2f7..8a0be1ed 100644 --- a/avalanchego/vms/platformvm/txs/base_tx.go +++ b/avalanchego/vms/platformvm/txs/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -16,6 +16,8 @@ import ( ) var ( + _ UnsignedTx = (*BaseTx)(nil) + ErrNilTx = errors.New("tx is nil") errOutputsNotSorted = errors.New("outputs not sorted") @@ -90,9 +92,13 @@ func (tx *BaseTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.Outs, Codec): return errOutputsNotSorted - case !utils.IsSortedAndUniqueSortable(tx.Ins): + case !utils.IsSortedAndUnique(tx.Ins): return errInputsNotSortedUnique default: return nil } } + +func (tx *BaseTx) Visit(visitor Visitor) error { + return visitor.BaseTx(tx) +} diff --git a/avalanchego/vms/platformvm/txs/base_tx_test.go b/avalanchego/vms/platformvm/txs/base_tx_test.go index 6e5b5ad7..14bfc7b2 100644 --- a/avalanchego/vms/platformvm/txs/base_tx_test.go +++ b/avalanchego/vms/platformvm/txs/base_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,42 +10,443 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) -func TestBaseTxMarshalJSON(t *testing.T) { - blockchainID := ids.ID{1} - utxoTxID := ids.ID{2} - assetID := ids.ID{3} - fxID := ids.ID{4} - tx := &BaseTx{BaseTx: avax.BaseTx{ - BlockchainID: blockchainID, - NetworkID: 4, - Ins: []*avax.TransferableInput{ - { - FxID: fxID, - UTXOID: avax.UTXOID{TxID: utxoTxID, OutputIndex: 5}, - Asset: avax.Asset{ID: assetID}, - In: &avax.TestTransferable{Val: 100}, +func TestBaseTxSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + + simpleBaseTx := &BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{5}, + }, + }, + }, }, + Memo: types.JSONByteSlice{}, }, - Outs: []*avax.TransferableOutput{ - { - FxID: fxID, - Asset: avax.Asset{ID: assetID}, - Out: &avax.TestTransferable{Val: 100}, + } + require.NoError(simpleBaseTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleBaseTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // BaseTx Type ID + 0x00, 0x00, 0x00, 0x22, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // Inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 MilliAvax + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x05, + // length of memo + 0x00, 0x00, 0x00, 0x00, + } + var unsignedSimpleBaseTx UnsignedTx = simpleBaseTx + unsignedSimpleBaseTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleBaseTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleBaseTxBytes, unsignedSimpleBaseTxBytes) + + complexBaseTx := &BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), }, - Memo: []byte{1, 2, 3}, - }} + } + avax.SortTransferableOutputs(complexBaseTx.Outs, Codec) + utils.Sort(complexBaseTx.Ins) + require.NoError(complexBaseTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexBaseTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // BaseTx Type ID + 0x00, 0x00, 0x00, 0x22, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x02, + // Outputs[0] + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // Outputs[1] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 Avax + 0x00, 0x00, 0x00, 0x00, 0x3b, 0x9a, 0xca, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x02, + // index of first signer + 0x00, 0x00, 0x00, 0x02, + // index of second signer + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // Custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x00, + // length of memo + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + } + var unsignedComplexBaseTx UnsignedTx = complexBaseTx + unsignedComplexBaseTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexBaseTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexBaseTxBytes, unsignedComplexBaseTxBytes) - txBytes, err := json.Marshal(tx) - require.NoError(t, err) + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) - asString := string(txBytes) + unsignedComplexBaseTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) - require.Contains(t, asString, `"networkID":4`) - require.Contains(t, asString, `"blockchainID":"SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg"`) - require.Contains(t, asString, `"inputs":[{"txID":"t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt","outputIndex":5,"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","input":{"Err":null,"Val":100}}]`) - require.Contains(t, asString, `"outputs":[{"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","output":{"Err":null,"Val":100}}]`) + unsignedComplexBaseTxJSONBytes, err := json.MarshalIndent(unsignedComplexBaseTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521" +}`, string(unsignedComplexBaseTxJSONBytes)) } diff --git a/avalanchego/vms/platformvm/txs/builder/builder.go b/avalanchego/vms/platformvm/txs/builder/builder.go index 19aa1e37..626edf6e 100644 --- a/avalanchego/vms/platformvm/txs/builder/builder.go +++ b/avalanchego/vms/platformvm/txs/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -11,12 +11,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" @@ -29,7 +31,7 @@ const MaxPageSize = 1024 var ( _ Builder = (*builder)(nil) - errNoFunds = errors.New("no spendable funds were found") + ErrNoFunds = errors.New("no spendable funds were found") ) type Builder interface { @@ -48,6 +50,7 @@ type AtomicTxBuilder interface { to ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) // amount: amount of tokens to export @@ -61,6 +64,7 @@ type AtomicTxBuilder interface { to ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) } @@ -80,6 +84,7 @@ type DecisionTxBuilder interface { chainName string, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) // threshold: [threshold] of [ownerAddrs] needed to manage this subnet @@ -91,6 +96,39 @@ type DecisionTxBuilder interface { ownerAddrs []ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, + ) (*txs.Tx, error) + + NewTransformSubnetTx( + subnetID ids.ID, + assetID ids.ID, + initialSupply uint64, + maxSupply uint64, + minConsumptionRate uint64, + maxConsumptionRate uint64, + minValidatorStake uint64, + maxValidatorStake uint64, + minStakeDuration time.Duration, + maxStakeDuration time.Duration, + minDelegationFee uint32, + minDelegatorStake uint64, + maxValidatorWeightFactor byte, + uptimeRequirement uint32, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, + ) (*txs.Tx, error) + + // amount: amount the sender is sending + // owner: recipient of the funds + // keys: keys to sign the tx and pay the amount + // changeAddr: address to send change to, if there is any + NewBaseTx( + amount uint64, + owner secp256k1fx.OutputOwners, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) } @@ -112,6 +150,29 @@ type ProposalTxBuilder interface { shares uint32, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, + ) (*txs.Tx, error) + + // stakeAmount: amount the validator stakes + // startTime: unix time they start validating + // endTime: unix time they stop validating + // nodeID: ID of the node we want to validate with + // pop: the node proof of possession + // rewardAddress: address to send reward to, if applicable + // shares: 10,000 times percentage of reward taken from delegators + // keys: Keys providing the staked tokens + // changeAddr: Address to send change to, if there is any + NewAddPermissionlessValidatorTx( + stakeAmount, + startTime, + endTime uint64, + nodeID ids.NodeID, + pop *signer.ProofOfPossession, + rewardAddress ids.ShortID, + shares uint32, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) // stakeAmount: amount the delegator stakes @@ -129,6 +190,25 @@ type ProposalTxBuilder interface { rewardAddress ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, + ) (*txs.Tx, error) + + // stakeAmount: amount the delegator stakes + // startTime: unix time they start delegating + // endTime: unix time they stop delegating + // nodeID: ID of the node we are delegating to + // rewardAddress: address to send reward to, if applicable + // keys: keys providing the staked tokens + // changeAddr: address to send change to, if there is any + NewAddPermissionlessDelegatorTx( + stakeAmount, + startTime, + endTime uint64, + nodeID ids.NodeID, + rewardAddress ids.ShortID, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) // weight: sampling weight of the new validator @@ -146,6 +226,7 @@ type ProposalTxBuilder interface { subnetID ids.ID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) // Creates a transaction that removes [nodeID] @@ -157,15 +238,22 @@ type ProposalTxBuilder interface { subnetID ids.ID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) - // newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a - // Commit block, will set the chain's timestamp to [timestamp]. - NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) - - // RewardStakerTx creates a new transaction that proposes to remove the staker - // [validatorID] from the default validator set. - NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) + // Creates a transaction that transfers ownership of [subnetID] + // threshold: [threshold] of [ownerAddrs] needed to manage this subnet + // ownerAddrs: control addresses for the new subnet + // keys: keys to use for modifying the subnet + // changeAddr: address to send change to, if there is any + NewTransferSubnetOwnershipTx( + subnetID ids.ID, + threshold uint32, + ownerAddrs []ids.ShortID, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, + ) (*txs.Tx, error) } func New( @@ -204,6 +292,7 @@ func (b *builder) NewImportTx( to ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { kc := secp256k1fx.NewKeychain(keys...) @@ -241,7 +330,7 @@ func (b *builder) NewImportTx( avax.SortTransferableInputsWithSigners(importedInputs, signers) if len(importedAmounts) == 0 { - return nil, errNoFunds // No imported UTXOs were spendable + return nil, ErrNoFunds // No imported UTXOs were spendable } importedAVAX := importedAmounts[b.ctx.AVAXAssetID] @@ -286,6 +375,7 @@ func (b *builder) NewImportTx( BlockchainID: b.ctx.ChainID, Outs: outs, Ins: ins, + Memo: memo, }}, SourceChain: from, ImportedInputs: importedInputs, @@ -304,6 +394,7 @@ func (b *builder) NewExportTx( to ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { toBurn, err := math.Add64(amount, b.cfg.TxFee) if err != nil { @@ -321,6 +412,7 @@ func (b *builder) NewExportTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: outs, // Non-exported outputs + Memo: memo, }}, DestinationChain: chainID, ExportedOutputs: []*avax.TransferableOutput{{ // Exported to X-Chain @@ -350,6 +442,7 @@ func (b *builder) NewCreateChainTx( chainName string, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { timestamp := b.state.GetTimestamp() createBlockchainTxFee := b.cfg.GetCreateBlockchainTxFee(timestamp) @@ -374,6 +467,7 @@ func (b *builder) NewCreateChainTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: outs, + Memo: memo, }}, SubnetID: subnetID, ChainName: chainName, @@ -394,6 +488,7 @@ func (b *builder) NewCreateSubnetTx( ownerAddrs []ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { timestamp := b.state.GetTimestamp() createSubnetTxFee := b.cfg.GetCreateSubnetTxFee(timestamp) @@ -412,6 +507,7 @@ func (b *builder) NewCreateSubnetTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: outs, + Memo: memo, }}, Owner: &secp256k1fx.OutputOwners{ Threshold: threshold, @@ -425,6 +521,70 @@ func (b *builder) NewCreateSubnetTx( return tx, tx.SyntacticVerify(b.ctx) } +func (b *builder) NewTransformSubnetTx( + subnetID ids.ID, + assetID ids.ID, + initialSupply uint64, + maxSupply uint64, + minConsumptionRate uint64, + maxConsumptionRate uint64, + minValidatorStake uint64, + maxValidatorStake uint64, + minStakeDuration time.Duration, + maxStakeDuration time.Duration, + minDelegationFee uint32, + minDelegatorStake uint64, + maxValidatorWeightFactor byte, + uptimeRequirement uint32, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, +) (*txs.Tx, error) { + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TransformSubnetTxFee, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) + } + + subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) + if err != nil { + return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) + } + signers = append(signers, subnetSigners) + + utx := &txs.TransformSubnetTx{ + BaseTx: txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: outs, + Memo: memo, + }, + }, + Subnet: subnetID, + AssetID: assetID, + InitialSupply: initialSupply, + MaximumSupply: maxSupply, + MinConsumptionRate: minConsumptionRate, + MaxConsumptionRate: maxConsumptionRate, + MinValidatorStake: minValidatorStake, + MaxValidatorStake: maxValidatorStake, + MinStakeDuration: uint32(minStakeDuration / time.Second), + MaxStakeDuration: uint32(maxStakeDuration / time.Second), + MinDelegationFee: minDelegationFee, + MinDelegatorStake: minDelegatorStake, + MaxValidatorWeightFactor: maxValidatorWeightFactor, + UptimeRequirement: uptimeRequirement, + SubnetAuth: subnetAuth, + } + + tx, err := txs.NewSigned(utx, txs.Codec, signers) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(b.ctx) +} + func (b *builder) NewAddValidatorTx( stakeAmount, startTime, @@ -434,6 +594,7 @@ func (b *builder) NewAddValidatorTx( shares uint32, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { ins, unstakedOuts, stakedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) if err != nil { @@ -446,6 +607,7 @@ func (b *builder) NewAddValidatorTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: unstakedOuts, + Memo: memo, }}, Validator: txs.Validator{ NodeID: nodeID, @@ -468,6 +630,59 @@ func (b *builder) NewAddValidatorTx( return tx, tx.SyntacticVerify(b.ctx) } +func (b *builder) NewAddPermissionlessValidatorTx( + stakeAmount, + startTime, + endTime uint64, + nodeID ids.NodeID, + pop *signer.ProofOfPossession, + rewardAddress ids.ShortID, + shares uint32, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, +) (*txs.Tx, error) { + ins, unstakedOuts, stakedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) + } + // Create the tx + utx := &txs.AddPermissionlessValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + Memo: memo, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + Start: startTime, + End: endTime, + Wght: stakeAmount, + }, + Subnet: constants.PrimaryNetworkID, + Signer: pop, + StakeOuts: stakedOuts, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + DelegationShares: shares, + } + tx, err := txs.NewSigned(utx, txs.Codec, signers) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(b.ctx) +} + func (b *builder) NewAddDelegatorTx( stakeAmount, startTime, @@ -476,6 +691,7 @@ func (b *builder) NewAddDelegatorTx( rewardAddress ids.ShortID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { ins, unlockedOuts, lockedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) if err != nil { @@ -488,6 +704,7 @@ func (b *builder) NewAddDelegatorTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: unlockedOuts, + Memo: memo, }}, Validator: txs.Validator{ NodeID: nodeID, @@ -509,6 +726,50 @@ func (b *builder) NewAddDelegatorTx( return tx, tx.SyntacticVerify(b.ctx) } +func (b *builder) NewAddPermissionlessDelegatorTx( + stakeAmount, + startTime, + endTime uint64, + nodeID ids.NodeID, + rewardAddress ids.ShortID, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, +) (*txs.Tx, error) { + ins, unlockedOuts, lockedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) + } + // Create the tx + utx := &txs.AddPermissionlessDelegatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: unlockedOuts, + Memo: memo, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + Start: startTime, + End: endTime, + Wght: stakeAmount, + }, + Subnet: constants.PrimaryNetworkID, + StakeOuts: lockedOuts, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + } + tx, err := txs.NewSigned(utx, txs.Codec, signers) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(b.ctx) +} + func (b *builder) NewAddSubnetValidatorTx( weight, startTime, @@ -517,6 +778,7 @@ func (b *builder) NewAddSubnetValidatorTx( subnetID ids.ID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) if err != nil { @@ -536,6 +798,7 @@ func (b *builder) NewAddSubnetValidatorTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: outs, + Memo: memo, }}, SubnetValidator: txs.SubnetValidator{ Validator: txs.Validator{ @@ -560,6 +823,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( subnetID ids.ID, keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, + memo []byte, ) (*txs.Tx, error) { ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) if err != nil { @@ -579,6 +843,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( BlockchainID: b.ctx.ChainID, Ins: ins, Outs: outs, + Memo: memo, }}, Subnet: subnetID, NodeID: nodeID, @@ -591,21 +856,85 @@ func (b *builder) NewRemoveSubnetValidatorTx( return tx, tx.SyntacticVerify(b.ctx) } -func (b *builder) NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) { - utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} - tx, err := txs.NewSigned(utx, txs.Codec, nil) +func (b *builder) NewTransferSubnetOwnershipTx( + subnetID ids.ID, + threshold uint32, + ownerAddrs []ids.ShortID, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, +) (*txs.Tx, error) { + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) + } + + subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) + if err != nil { + return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) + } + signers = append(signers, subnetSigners) + + utx := &txs.TransferSubnetOwnershipTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: outs, + Memo: memo, + }}, + Subnet: subnetID, + SubnetAuth: subnetAuth, + Owner: &secp256k1fx.OutputOwners{ + Threshold: threshold, + Addrs: ownerAddrs, + }, + } + tx, err := txs.NewSigned(utx, txs.Codec, signers) if err != nil { return nil, err } return tx, tx.SyntacticVerify(b.ctx) } -func (b *builder) NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) { - utx := &txs.RewardValidatorTx{TxID: txID} - tx, err := txs.NewSigned(utx, txs.Codec, nil) +func (b *builder) NewBaseTx( + amount uint64, + owner secp256k1fx.OutputOwners, + keys []*secp256k1.PrivateKey, + changeAddr ids.ShortID, + memo []byte, +) (*txs.Tx, error) { + toBurn, err := math.Add64(amount, b.cfg.TxFee) if err != nil { - return nil, err + return nil, fmt.Errorf("amount (%d) + tx fee(%d) overflows", amount, b.cfg.TxFee) + } + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, toBurn, changeAddr) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } + outs = append(outs, &avax.TransferableOutput{ + Asset: avax.Asset{ID: b.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: owner, + }, + }) + + avax.SortTransferableOutputs(outs, txs.Codec) + + utx := &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: b.ctx.NetworkID, + BlockchainID: b.ctx.ChainID, + Ins: ins, + Outs: outs, + Memo: memo, + }, + } + tx, err := txs.NewSigned(utx, txs.Codec, signers) + if err != nil { + return nil, err + } return tx, tx.SyntacticVerify(b.ctx) } diff --git a/avalanchego/vms/platformvm/txs/builder/mock_builder.go b/avalanchego/vms/platformvm/txs/builder/mock_builder.go deleted file mode 100644 index 1f7c4f3d..00000000 --- a/avalanchego/vms/platformvm/txs/builder/mock_builder.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/builder (interfaces: Builder) - -// Package builder is a generated GoMock package. -package builder - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - secp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" -) - -// MockBuilder is a mock of Builder interface. -type MockBuilder struct { - ctrl *gomock.Controller - recorder *MockBuilderMockRecorder -} - -// MockBuilderMockRecorder is the mock recorder for MockBuilder. -type MockBuilderMockRecorder struct { - mock *MockBuilder -} - -// NewMockBuilder creates a new mock instance. -func NewMockBuilder(ctrl *gomock.Controller) *MockBuilder { - mock := &MockBuilder{ctrl: ctrl} - mock.recorder = &MockBuilderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder { - return m.recorder -} - -// NewAddDelegatorTx mocks base method. -func (m *MockBuilder) NewAddDelegatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddDelegatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddDelegatorTx indicates an expected call of NewAddDelegatorTx. -func (mr *MockBuilderMockRecorder) NewAddDelegatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddDelegatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddDelegatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewAddSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewAddSubnetValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddSubnetValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddSubnetValidatorTx indicates an expected call of NewAddSubnetValidatorTx. -func (mr *MockBuilderMockRecorder) NewAddSubnetValidatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddSubnetValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddSubnetValidatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewAddValidatorTx mocks base method. -func (m *MockBuilder) NewAddValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 uint32, arg6 []*secp256k1.PrivateKey, arg7 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAddValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAddValidatorTx indicates an expected call of NewAddValidatorTx. -func (mr *MockBuilderMockRecorder) NewAddValidatorTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewAddValidatorTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) -} - -// NewAdvanceTimeTx mocks base method. -func (m *MockBuilder) NewAdvanceTimeTx(arg0 time.Time) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAdvanceTimeTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewAdvanceTimeTx indicates an expected call of NewAdvanceTimeTx. -func (mr *MockBuilderMockRecorder) NewAdvanceTimeTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAdvanceTimeTx", reflect.TypeOf((*MockBuilder)(nil).NewAdvanceTimeTx), arg0) -} - -// NewCreateChainTx mocks base method. -func (m *MockBuilder) NewCreateChainTx(arg0 ids.ID, arg1 []byte, arg2 ids.ID, arg3 []ids.ID, arg4 string, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCreateChainTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewCreateChainTx indicates an expected call of NewCreateChainTx. -func (mr *MockBuilderMockRecorder) NewCreateChainTx(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCreateChainTx", reflect.TypeOf((*MockBuilder)(nil).NewCreateChainTx), arg0, arg1, arg2, arg3, arg4, arg5, arg6) -} - -// NewCreateSubnetTx mocks base method. -func (m *MockBuilder) NewCreateSubnetTx(arg0 uint32, arg1 []ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewCreateSubnetTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewCreateSubnetTx indicates an expected call of NewCreateSubnetTx. -func (mr *MockBuilderMockRecorder) NewCreateSubnetTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCreateSubnetTx", reflect.TypeOf((*MockBuilder)(nil).NewCreateSubnetTx), arg0, arg1, arg2, arg3) -} - -// NewExportTx mocks base method. -func (m *MockBuilder) NewExportTx(arg0 uint64, arg1 ids.ID, arg2 ids.ShortID, arg3 []*secp256k1.PrivateKey, arg4 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewExportTx", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewExportTx indicates an expected call of NewExportTx. -func (mr *MockBuilderMockRecorder) NewExportTx(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewExportTx", reflect.TypeOf((*MockBuilder)(nil).NewExportTx), arg0, arg1, arg2, arg3, arg4) -} - -// NewImportTx mocks base method. -func (m *MockBuilder) NewImportTx(arg0 ids.ID, arg1 ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewImportTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewImportTx indicates an expected call of NewImportTx. -func (mr *MockBuilderMockRecorder) NewImportTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewImportTx", reflect.TypeOf((*MockBuilder)(nil).NewImportTx), arg0, arg1, arg2, arg3) -} - -// NewRemoveSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewRemoveSubnetValidatorTx(arg0 ids.NodeID, arg1 ids.ID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRemoveSubnetValidatorTx", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewRemoveSubnetValidatorTx indicates an expected call of NewRemoveSubnetValidatorTx. -func (mr *MockBuilderMockRecorder) NewRemoveSubnetValidatorTx(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoveSubnetValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewRemoveSubnetValidatorTx), arg0, arg1, arg2, arg3) -} - -// NewRewardValidatorTx mocks base method. -func (m *MockBuilder) NewRewardValidatorTx(arg0 ids.ID) (*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewRewardValidatorTx", arg0) - ret0, _ := ret[0].(*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// NewRewardValidatorTx indicates an expected call of NewRewardValidatorTx. -func (mr *MockBuilderMockRecorder) NewRewardValidatorTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRewardValidatorTx", reflect.TypeOf((*MockBuilder)(nil).NewRewardValidatorTx), arg0) -} diff --git a/avalanchego/vms/platformvm/txs/codec.go b/avalanchego/vms/platformvm/txs/codec.go index d3667b94..36fe2e5a 100644 --- a/avalanchego/vms/platformvm/txs/codec.go +++ b/avalanchego/vms/platformvm/txs/codec.go @@ -1,21 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -// Version is the current default codec version -const Version = 0 +const CodecVersion = 0 var ( Codec codec.Manager @@ -27,11 +28,13 @@ var ( GenesisCodec codec.Manager ) -func init() { - c := linearcodec.NewDefault() - Codec = codec.NewDefaultManager() - gc := linearcodec.NewCustomMaxLength(math.MaxInt32) - GenesisCodec = codec.NewManager(math.MaxInt32) +// TODO: Remove after v1.11.x has activated +// +// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed +// concurrently +func InitCodec(durangoTime time.Time) error { + c := linearcodec.NewDefault(durangoTime) + gc := linearcodec.NewDefault(time.Time{}) errs := wrappers.Errs{} for _, c := range []linearcodec.Codec{c, gc} { @@ -41,31 +44,51 @@ func init() { c.SkipRegistrations(5) errs.Add(RegisterUnsignedTxsTypes(c)) + + c.SkipRegistrations(4) + + errs.Add(RegisterDUnsignedTxsTypes(c)) } + + newCodec := codec.NewDefaultManager() + newGenesisCodec := codec.NewManager(math.MaxInt32) errs.Add( - Codec.RegisterCodec(Version, c), - GenesisCodec.RegisterCodec(Version, gc), + newCodec.RegisterCodec(CodecVersion, c), + newGenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - panic(errs.Err) + return errs.Err + } + + Codec = newCodec + GenesisCodec = newGenesisCodec + return nil +} + +func init() { + if err := InitCodec(time.Time{}); err != nil { + panic(err) } } // RegisterUnsignedTxsTypes allows registering relevant type of unsigned package // in the right sequence. Following repackaging of platformvm package, a few -// subpackage-level codecs were introduced, each handling serialization of specific types. +// subpackage-level codecs were introduced, each handling serialization of +// specific types. +// // RegisterUnsignedTxsTypes is made exportable so to guarantee that other codecs // are coherent with components one. -func RegisterUnsignedTxsTypes(targetCodec codec.Registry) error { +func RegisterUnsignedTxsTypes(targetCodec linearcodec.Codec) error { errs := wrappers.Errs{} + + // The secp256k1fx is registered here because this is the same place it is + // registered in the AVM. This ensures that the typeIDs match up for utxos + // in shared memory. + errs.Add(targetCodec.RegisterType(&secp256k1fx.TransferInput{})) + targetCodec.SkipRegistrations(1) + errs.Add(targetCodec.RegisterType(&secp256k1fx.TransferOutput{})) + targetCodec.SkipRegistrations(1) errs.Add( - // The Fx is registered here because this is the same place it is - // registered in the AVM. This ensures that the typeIDs match up for - // utxos in shared memory. - targetCodec.RegisterType(&secp256k1fx.TransferInput{}), - targetCodec.RegisterType(&secp256k1fx.MintOutput{}), - targetCodec.RegisterType(&secp256k1fx.TransferOutput{}), - targetCodec.RegisterType(&secp256k1fx.MintOperation{}), targetCodec.RegisterType(&secp256k1fx.Credential{}), targetCodec.RegisterType(&secp256k1fx.Input{}), targetCodec.RegisterType(&secp256k1fx.OutputOwners{}), @@ -94,3 +117,10 @@ func RegisterUnsignedTxsTypes(targetCodec codec.Registry) error { ) return errs.Err } + +func RegisterDUnsignedTxsTypes(targetCodec linearcodec.Codec) error { + return utils.Err( + targetCodec.RegisterType(&TransferSubnetOwnershipTx{}), + targetCodec.RegisterType(&BaseTx{}), + ) +} diff --git a/avalanchego/vms/platformvm/txs/create_chain_test.go b/avalanchego/vms/platformvm/txs/create_chain_test.go index 60a01269..787aaa2a 100644 --- a/avalanchego/vms/platformvm/txs/create_chain_test.go +++ b/avalanchego/vms/platformvm/txs/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -17,7 +17,7 @@ import ( ) func TestUnsignedCreateChainTxVerify(t *testing.T) { - ctx := snow.DefaultContextTest() + ctx := snowtest.Context(t, snowtest.PChainID) testSubnet1ID := ids.GenerateTestID() testSubnet1ControlKeys := []*secp256k1.PrivateKey{ preFundedKeys[0], @@ -26,7 +26,6 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { type test struct { description string - shouldErr bool subnetID ids.ID genesisData []byte vmID ids.ID @@ -34,12 +33,12 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { chainName string keys []*secp256k1.PrivateKey setup func(*CreateChainTx) *CreateChainTx + expectedErr error } tests := []test{ { description: "tx is nil", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -49,10 +48,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { setup: func(*CreateChainTx) *CreateChainTx { return nil }, + expectedErr: ErrNilTx, }, { description: "vm ID is empty", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -63,24 +62,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { tx.VMID = ids.ID{} return tx }, - }, - { - description: "subnet ID is empty", - shouldErr: true, - subnetID: testSubnet1ID, - genesisData: nil, - vmID: constants.AVMID, - fxIDs: nil, - chainName: "yeet", - keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - setup: func(tx *CreateChainTx) *CreateChainTx { - tx.SubnetID = ids.ID{} - return tx - }, + expectedErr: errInvalidVMID, }, { description: "subnet ID is platform chain's ID", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -91,10 +76,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { tx.SubnetID = ctx.ChainID return tx }, + expectedErr: ErrCantValidatePrimaryNetwork, }, { description: "chain name is too long", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -105,10 +90,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { tx.ChainName = string(make([]byte, MaxNameLen+1)) return tx }, + expectedErr: errNameTooLong, }, { description: "chain name has invalid character", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -119,10 +104,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { tx.ChainName = "⌘" return tx }, + expectedErr: errIllegalNameCharacter, }, { description: "genesis data is too long", - shouldErr: true, subnetID: testSubnet1ID, genesisData: nil, vmID: constants.AVMID, @@ -133,62 +118,63 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { tx.GenesisData = make([]byte, MaxGenesisLen+1) return tx }, + expectedErr: errGenesisTooLong, }, } for _, test := range tests { - inputs := []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{'t', 'x', 'I', 'D'}, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}}, - In: &secp256k1fx.TransferInput{ - Amt: uint64(5678), - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }} - outputs := []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}}, - Out: &secp256k1fx.TransferOutput{ - Amt: uint64(1234), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + t.Run(test.description, func(t *testing.T) { + require := require.New(t) + + inputs := []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: 2, }, - }, - }} - subnetAuth := &secp256k1fx.Input{ - SigIndices: []uint32{0, 1}, - } + Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}}, + In: &secp256k1fx.TransferInput{ + Amt: uint64(5678), + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }} + outputs := []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 't'}}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(1234), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, + }, + }} + subnetAuth := &secp256k1fx.Input{ + SigIndices: []uint32{0, 1}, + } - createChainTx := &CreateChainTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: ctx.NetworkID, - BlockchainID: ctx.ChainID, - Ins: inputs, - Outs: outputs, - }}, - SubnetID: test.subnetID, - ChainName: test.chainName, - VMID: test.vmID, - FxIDs: test.fxIDs, - GenesisData: test.genesisData, - SubnetAuth: subnetAuth, - } + createChainTx := &CreateChainTx{ + BaseTx: BaseTx{BaseTx: avax.BaseTx{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, + Ins: inputs, + Outs: outputs, + }}, + SubnetID: test.subnetID, + ChainName: test.chainName, + VMID: test.vmID, + FxIDs: test.fxIDs, + GenesisData: test.genesisData, + SubnetAuth: subnetAuth, + } - signers := [][]*secp256k1.PrivateKey{preFundedKeys} - stx, err := NewSigned(createChainTx, Codec, signers) - require.NoError(t, err) + signers := [][]*secp256k1.PrivateKey{preFundedKeys} + stx, err := NewSigned(createChainTx, Codec, signers) + require.NoError(err) - createChainTx.SyntacticallyVerified = false - stx.Unsigned = test.setup(createChainTx) + createChainTx.SyntacticallyVerified = false + stx.Unsigned = test.setup(createChainTx) - err = stx.SyntacticVerify(ctx) - if !test.shouldErr { - require.NoError(t, err) - } else { - require.Error(t, err) - } + err = stx.SyntacticVerify(ctx) + require.ErrorIs(err, test.expectedErr) + }) } } diff --git a/avalanchego/vms/platformvm/txs/create_chain_tx.go b/avalanchego/vms/platformvm/txs/create_chain_tx.go index b329279c..84a9c72f 100644 --- a/avalanchego/vms/platformvm/txs/create_chain_tx.go +++ b/avalanchego/vms/platformvm/txs/create_chain_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -62,7 +62,7 @@ func (tx *CreateChainTx) SyntacticVerify(ctx *snow.Context) error { return errNameTooLong case tx.VMID == ids.Empty: return errInvalidVMID - case !utils.IsSortedAndUniqueSortable(tx.FxIDs): + case !utils.IsSortedAndUnique(tx.FxIDs): return errFxIDsNotSortedAndUnique case len(tx.GenesisData) > MaxGenesisLen: return errGenesisTooLong diff --git a/avalanchego/vms/platformvm/txs/create_subnet_tx.go b/avalanchego/vms/platformvm/txs/create_subnet_tx.go index 02f41fae..e560c9dd 100644 --- a/avalanchego/vms/platformvm/txs/create_subnet_tx.go +++ b/avalanchego/vms/platformvm/txs/create_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/executor/advance_time_test.go b/avalanchego/vms/platformvm/txs/executor/advance_time_test.go index 2bacbbab..d5edda11 100644 --- a/avalanchego/vms/platformvm/txs/executor/advance_time_test.go +++ b/avalanchego/vms/platformvm/txs/executor/advance_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -21,26 +21,39 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +func newAdvanceTimeTx(t testing.TB, timestamp time.Time) (*txs.Tx, error) { + utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(snowtest.Context(t, snowtest.PChainID)) +} + // Ensure semantic verification updates the current and pending staker set // for the primary network func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() - addPendingValidatorTx, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) + addPendingValidatorTx, err := addPendingValidator( + env, + pendingValidatorStartTime, + pendingValidatorEndTime, + nodeID, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -77,18 +90,16 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, nodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) + require.True(ok) } // Ensure semantic verification fails when proposed timestamp is at or before current timestamp func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) + tx, err := newAdvanceTimeTx(t, env.state.GetTimestamp()) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -104,25 +115,26 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've failed verification because proposed timestamp same as current timestamp") + require.ErrorIs(err, ErrChildBlockNotAfterParent) } // Ensure semantic verification fails when proposed timestamp is after next validator set change time func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) { - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -138,25 +150,20 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've failed verification because proposed timestamp is after pending validator start time") + require.ErrorIs(err, ErrChildBlockAfterStakerChangeTime) } - err = shutdownEnvironment(env) - require.NoError(err) - // Case: Timestamp is after next validator end time - env = newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env = newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() // fast forward clock to 10 seconds before genesis validators stop validating env.clk.Set(defaultValidateEndTime.Add(-10 * time.Second)) { // Proposes advancing timestamp to 1 second after genesis validators stop validating - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultValidateEndTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, defaultValidateEndTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -172,7 +179,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've failed verification because proposed timestamp is after pending validator start time") + require.ErrorIs(err, ErrChildBlockAfterStakerChangeTime) } } @@ -208,8 +215,8 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // Staker5: |--------------------| staker1 := staker{ nodeID: ids.GenerateTestNodeID(), - startTime: defaultGenesisTime.Add(1 * time.Minute), - endTime: defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), + startTime: defaultValidateStartTime.Add(1 * time.Minute), + endTime: defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute), } staker2 := staker{ nodeID: ids.GenerateTestNodeID(), @@ -347,17 +354,14 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) for _, staker := range test.stakers { _, err := addPendingValidator( @@ -379,6 +383,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -396,7 +401,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, newTime := range test.advanceTimeTo { env.clk.Set(newTime) - tx, err := env.txBuilder.NewAdvanceTimeTx(newTime) + tx, err := newAdvanceTimeTx(t, newTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -423,20 +428,24 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.False(ok) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, stakerNodeID) + require.True(ok) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.False(ok) case current: - require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, stakerNodeID) + require.True(ok) } } }) @@ -449,20 +458,16 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // is after the new timestamp func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) dummyHeight := uint64(1) // Add a subnet validator to the staker set - subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) - // Starts after the corre + subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) tx, err := env.txBuilder.NewAddSubnetValidatorTx( @@ -473,12 +478,15 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -491,7 +499,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // The above validator is now part of the staking set // Queue a staker that joins the staker set after the above validator leaves - subnetVdr2NodeID := ids.NodeID(preFundedKeys[1].PublicKey().Address()) + subnetVdr2NodeID := genesisNodeIDs[1] tx, err = env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time @@ -500,6 +508,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, // Keys ids.ShortEmpty, // reward address + nil, ) require.NoError(err) @@ -518,7 +527,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // Advance time to the first staker's end time. env.clk.Set(subnetVdr1EndTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1EndTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1EndTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -543,40 +552,40 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) - require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) + _, ok := env.config.Validators.GetValidator(subnetID, subnetVdr2NodeID) + require.False(ok) + _, ok = env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.False(ok) } func TestTrackedSubnet(t *testing.T) { for _, tracked := range []bool{true, false} { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) subnetID := testSubnet1.ID() if tracked { env.config.TrackedSubnets.Add(subnetID) - env.config.Validators.Add(subnetID, validators.NewSet()) } // Add a subnet validator to the staker set - subnetValidatorNodeID := preFundedKeys[0].PublicKey().Address() + subnetValidatorNodeID := genesisNodeIDs[0] - subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) - subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) + subnetVdr1StartTime := defaultValidateStartTime.Add(1 * time.Minute) + subnetVdr1EndTime := defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) tx, err := env.txBuilder.NewAddSubnetValidatorTx( 1, // Weight uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time - ids.NodeID(subnetValidatorNodeID), // Node ID + subnetValidatorNodeID, // Node ID subnetID, // Subnet ID []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -589,12 +598,11 @@ func TestTrackedSubnet(t *testing.T) { env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Advance time to the staker's start time. env.clk.Set(subnetVdr1StartTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1StartTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1StartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -609,30 +617,28 @@ func TestTrackedSubnet(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, ids.NodeID(subnetValidatorNodeID))) + _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) + require.True(ok) }) } } func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMaxStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator( @@ -644,7 +650,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -659,8 +665,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -668,9 +673,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - vdrWeight := primarySet.GetWeight(nodeID) + vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -689,6 +692,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { preFundedKeys[4], }, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -704,7 +708,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -719,8 +723,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -728,28 +731,26 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight = primarySet.GetWeight(nodeID) + vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time // Add a pending validator - pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorStartTime := defaultValidateStartTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -764,8 +765,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -773,9 +773,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - vdrWeight := primarySet.GetWeight(nodeID) + vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -789,6 +787,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { preFundedKeys[0].PublicKey().Address(), []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -804,7 +803,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -819,8 +818,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -828,54 +826,23 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight = primarySet.GetWeight(nodeID) + vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } -// Test method InitiallyPrefersCommit -func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { - require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time - - // Proposed advancing timestamp to 1 second after sync bound - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) - require.NoError(err) - - onCommitState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - onAbortState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: &env.backend, - Tx: tx, - } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) - - require.True(executor.PrefersCommit, "should prefer to commit this tx because its proposed timestamp it's within sync bound") -} - func TestAdvanceTimeTxAfterBanff(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, durango) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time - env.config.BanffTime = defaultGenesisTime.Add(SyncBound) + upgradeTime := env.clk.Time().Add(SyncBound) + env.config.BanffTime = upgradeTime + env.config.CortinaTime = upgradeTime + env.config.DurangoTime = upgradeTime // Proposed advancing timestamp to the banff timestamp - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) + tx, err := newAdvanceTimeTx(t, upgradeTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -891,22 +858,21 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, errAdvanceTimeTxIssuedAfterBanff) + require.ErrorIs(err, ErrAdvanceTimeTxIssuedAfterBanff) } // Ensure marshaling/unmarshaling works func TestAdvanceTimeTxUnmarshal(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) + chainTime := env.state.GetTimestamp() + tx, err := newAdvanceTimeTx(t, chainTime.Add(time.Second)) require.NoError(err) - bytes, err := txs.Codec.Marshal(txs.Version, tx) + bytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) require.NoError(err) var unmarshaledTx txs.Tx @@ -931,10 +897,11 @@ func addPendingValidator( uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, keys, ids.ShortEmpty, + nil, ) if err != nil { return nil, err diff --git a/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go index 266ba4b2..2a35cb45 100644 --- a/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -29,47 +29,55 @@ type AtomicTxExecutor struct { } func (*AtomicTxExecutor) AddValidatorTx(*txs.AddValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) AddDelegatorTx(*txs.AddDelegatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) CreateChainTx(*txs.CreateChainTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { - return errWrongTxType + return ErrWrongTxType +} + +func (*AtomicTxExecutor) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { + return ErrWrongTxType } func (*AtomicTxExecutor) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*AtomicTxExecutor) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - return errWrongTxType + return ErrWrongTxType +} + +func (*AtomicTxExecutor) BaseTx(*txs.BaseTx) error { + return ErrWrongTxType } func (e *AtomicTxExecutor) ImportTx(tx *txs.ImportTx) error { diff --git a/avalanchego/vms/platformvm/txs/executor/backend.go b/avalanchego/vms/platformvm/txs/executor/backend.go index f043521a..847aefc1 100644 --- a/avalanchego/vms/platformvm/txs/executor/backend.go +++ b/avalanchego/vms/platformvm/txs/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -20,7 +20,7 @@ type Backend struct { Clk *mockable.Clock Fx fx.Fx FlowChecker utxo.Verifier - Uptimes uptime.Manager + Uptimes uptime.Calculator Rewards reward.Calculator Bootstrapped *utils.Atomic[bool] } diff --git a/avalanchego/vms/platformvm/txs/executor/create_chain_test.go b/avalanchego/vms/platformvm/txs/executor/create_chain_test.go index f451c540..8209c975 100644 --- a/avalanchego/vms/platformvm/txs/executor/create_chain_test.go +++ b/avalanchego/vms/platformvm/txs/executor/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -17,17 +18,16 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) // Ensure Execute fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -37,6 +37,7 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { "chain name", []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -52,17 +53,15 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have erred because a sig is missing") + require.ErrorIs(err, errUnauthorizedSubnetModification) } // Ensure Execute fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -72,12 +71,12 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { "chain name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) // Generate new, random key to sign tx with - factory := secp256k1.Factory{} - key, err := factory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) // Replace a valid signature with one from another key @@ -94,18 +93,16 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because a sig is invalid") + require.ErrorIs(err, errUnauthorizedSubnetModification) } // Ensure Execute fails when the Subnet the blockchain specifies as // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -115,6 +112,7 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { "chain name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -129,17 +127,15 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because subnet doesn't exist") + require.ErrorIs(err, database.ErrNotFound) } // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -149,6 +145,7 @@ func TestCreateChainTxValid(t *testing.T) { "chain name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -160,47 +157,43 @@ func TestCreateChainTxValid(t *testing.T) { State: stateDiff, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } func TestCreateChainTxAP3FeeChange(t *testing.T) { ap3Time := defaultGenesisTime.Add(time.Hour) tests := []struct { - name string - time time.Time - fee uint64 - expectsError bool + name string + time time.Time + fee uint64 + expectedError error }{ { - name: "pre-fork - correctly priced", - time: defaultGenesisTime, - fee: 0, - expectsError: false, + name: "pre-fork - correctly priced", + time: defaultGenesisTime, + fee: 0, + expectedError: nil, }, { - name: "post-fork - incorrectly priced", - time: ap3Time, - fee: 100*defaultTxFee - 1*units.NanoAvax, - expectsError: true, + name: "post-fork - incorrectly priced", + time: ap3Time, + fee: 100*defaultTxFee - 1*units.NanoAvax, + expectedError: utxo.ErrInsufficientUnlockedFunds, }, { - name: "post-fork - correctly priced", - time: ap3Time, - fee: 100 * defaultTxFee, - expectsError: false, + name: "post-fork - correctly priced", + time: ap3Time, + fee: 100 * defaultTxFee, + expectedError: nil, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.config.ApricotPhase3Time = ap3Time - defer func() { - require.NoError(shutdownEnvironment(env)) - }() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) @@ -223,8 +216,7 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { SubnetAuth: subnetAuth, } tx := &txs.Tx{Unsigned: utx} - err = tx.Sign(txs.Codec, signers) - require.NoError(err) + require.NoError(tx.Sign(txs.Codec, signers)) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -237,7 +229,7 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Equal(test.expectsError, err != nil) + require.ErrorIs(err, test.expectedError) }) } } diff --git a/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go b/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go index 2446d841..259a5596 100644 --- a/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go +++ b/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -14,46 +14,45 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestCreateSubnetTxAP3FeeChange(t *testing.T) { ap3Time := defaultGenesisTime.Add(time.Hour) tests := []struct { - name string - time time.Time - fee uint64 - expectsError bool + name string + time time.Time + fee uint64 + expectedErr error }{ { - name: "pre-fork - correctly priced", - time: defaultGenesisTime, - fee: 0, - expectsError: false, + name: "pre-fork - correctly priced", + time: defaultGenesisTime, + fee: 0, + expectedErr: nil, }, { - name: "post-fork - incorrectly priced", - time: ap3Time, - fee: 100*defaultTxFee - 1*units.NanoAvax, - expectsError: true, + name: "post-fork - incorrectly priced", + time: ap3Time, + fee: 100*defaultTxFee - 1*units.NanoAvax, + expectedErr: utxo.ErrInsufficientUnlockedFunds, }, { - name: "post-fork - correctly priced", - time: ap3Time, - fee: 100 * defaultTxFee, - expectsError: false, + name: "post-fork - correctly priced", + time: ap3Time, + fee: 100 * defaultTxFee, + expectedErr: nil, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase3) env.config.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) @@ -82,7 +81,7 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Equal(test.expectsError, err != nil) + require.ErrorIs(err, test.expectedErr) }) } } diff --git a/avalanchego/vms/platformvm/txs/executor/export_test.go b/avalanchego/vms/platformvm/txs/executor/export_test.go index ca10b3c1..0ee1966e 100644 --- a/avalanchego/vms/platformvm/txs/executor/export_test.go +++ b/avalanchego/vms/platformvm/txs/executor/export_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,8 +7,6 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -17,19 +15,15 @@ import ( ) func TestNewExportTx(t *testing.T) { - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() type test struct { description string destinationChainID ids.ID sourceKeys []*secp256k1.PrivateKey timestamp time.Time - shouldErr bool - shouldVerify bool } sourceKey := preFundedKeys[0] @@ -37,19 +31,15 @@ func TestNewExportTx(t *testing.T) { tests := []test{ { description: "P->X export", - destinationChainID: xChainID, + destinationChainID: env.ctx.XChainID, sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: defaultValidateStartTime, - shouldErr: false, - shouldVerify: true, }, { description: "P->C export", - destinationChainID: cChainID, + destinationChainID: env.ctx.CChainID, sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.ApricotPhase5Time, - shouldErr: false, - shouldVerify: true, }, } @@ -57,8 +47,6 @@ func TestNewExportTx(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() tx, err := env.txBuilder.NewExportTx( defaultBalance-defaultTxFee, // Amount of tokens to export @@ -66,33 +54,21 @@ func TestNewExportTx(t *testing.T) { to, tt.sourceKeys, ids.ShortEmpty, // Change address + nil, ) - if tt.shouldErr { - require.Error(err) - return - } require.NoError(err) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) + stateDiff.SetTimestamp(tt.timestamp) - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) - - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, - } - err = tx.Unsigned.Visit(&verifier) - if tt.shouldVerify { - require.NoError(err) - } else { - require.Error(err) + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } + require.NoError(tx.Unsigned.Visit(&verifier)) }) } } diff --git a/avalanchego/vms/platformvm/txs/executor/helpers_test.go b/avalanchego/vms/platformvm/txs/executor/helpers_test.go index 6fc814d8..6ffd80f1 100644 --- a/avalanchego/vms/platformvm/txs/executor/helpers_test.go +++ b/avalanchego/vms/platformvm/txs/executor/helpers_test.go @@ -1,27 +1,27 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( - "context" - "errors" "fmt" "math" + "testing" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -33,8 +33,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" @@ -50,8 +48,14 @@ import ( ) const ( - testNetworkID = 10 // To be used in tests - defaultWeight = 10000 + defaultWeight = 5 * units.MilliAvax + trackChecksum = false + + apricotPhase3 fork = iota + apricotPhase5 + banff + cortina + durango ) var ( @@ -59,26 +63,29 @@ var ( defaultMaxStakingDuration = 365 * 24 * time.Hour defaultGenesisTime = time.Date(1997, 1, 1, 0, 0, 0, 0, time.UTC) defaultValidateStartTime = defaultGenesisTime - defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) + defaultValidateEndTime = defaultValidateStartTime.Add(20 * defaultMinStakingDuration) defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake preFundedKeys = secp256k1.TestKeys() - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) lastAcceptedID = ids.GenerateTestID() testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] - // Used to create and use keys. - testKeyfactory secp256k1.Factory - - errMissingPrimaryValidators = errors.New("missing primary validator set") - errMissing = errors.New("missing") + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID ) +func init() { + genesisNodeIDs = make([]ids.NodeID, len(preFundedKeys)) + for i := range preFundedKeys { + genesisNodeIDs[i] = ids.GenerateTestNodeID() + } +} + +type fork uint8 + type mutableSharedMemory struct { atomic.SharedMemory } @@ -112,30 +119,34 @@ func (e *environment) SetState(blkID ids.ID, chainState state.Chain) { e.states[blkID] = chainState } -func newEnvironment(postBanff, postCortina bool) *environment { +func newEnvironment(t *testing.T, f fork) *environment { var isBootstrapped utils.Atomic[bool] isBootstrapped.Set(true) - config := defaultConfig(postBanff, postCortina) - clk := defaultClock(postBanff || postCortina) + config := defaultConfig(t, f) + clk := defaultClock(f) - baseDBManager := manager.NewMemDB(version.CurrentDatabase) - baseDB := versiondb.New(baseDBManager.Current().Database) - ctx, msm := defaultCtx(baseDB) + baseDB := versiondb.New(memdb.New()) + ctx := snowtest.Context(t, snowtest.PChainID) + m := atomic.NewMemory(baseDB) + msm := &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(ctx.ChainID), + } + ctx.SharedMemory = msm - fx := defaultFx(&clk, ctx.Log, isBootstrapped.Get()) + fx := defaultFx(clk, ctx.Log, isBootstrapped.Get()) rewards := reward.NewCalculator(config.RewardConfig) - baseState := defaultState(&config, ctx, baseDB, rewards) + baseState := defaultState(config, ctx, baseDB, rewards) atomicUTXOs := avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) - uptimes := uptime.NewManager(baseState) - utxoHandler := utxo.NewHandler(ctx, &clk, fx) + uptimes := uptime.NewManager(baseState, clk) + utxoHandler := utxo.NewHandler(ctx, clk, fx) txBuilder := builder.New( ctx, - &config, - &clk, + config, + clk, fx, baseState, atomicUTXOs, @@ -143,9 +154,9 @@ func newEnvironment(postBanff, postCortina bool) *environment { ) backend := Backend{ - Config: &config, + Config: config, Ctx: ctx, - Clk: &clk, + Clk: clk, Bootstrapped: &isBootstrapped, Fx: fx, FlowChecker: utxoHandler, @@ -155,8 +166,8 @@ func newEnvironment(postBanff, postCortina bool) *environment { env := &environment{ isBootstrapped: &isBootstrapped, - config: &config, - clk: &clk, + config: config, + clk: clk, baseDB: baseDB, ctx: ctx, msm: msm, @@ -170,15 +181,42 @@ func newEnvironment(postBanff, postCortina bool) *environment { backend: backend, } - addSubnet(env, txBuilder) + addSubnet(t, env, txBuilder) + + t.Cleanup(func() { + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + require := require.New(t) + + if env.isBootstrapped.Get() { + validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + for subnetID := range env.config.TrackedSubnets { + validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, subnetID)) + } + env.state.SetHeight(math.MaxUint64) + require.NoError(env.state.Commit()) + } + + require.NoError(env.state.Close()) + require.NoError(env.baseDB.Close()) + }) return env } func addSubnet( + t *testing.T, env *environment, txBuilder builder.Builder, ) { + require := require.New(t) + // Create a subnet var err error testSubnet1, err = txBuilder.NewCreateSubnetTx( @@ -190,31 +228,24 @@ func addSubnet( }, []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), + nil, ) - if err != nil { - panic(err) - } + require.NoError(err) // store it stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - panic(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, State: stateDiff, Tx: testSubnet1, } - err = testSubnet1.Unsigned.Visit(&executor) - if err != nil { - panic(err) - } + require.NoError(testSubnet1.Unsigned.Visit(&executor)) stateDiff.AddTx(testSubnet1, status.Committed) - if err := stateDiff.Apply(env.state); err != nil { - panic(err) - } + require.NoError(stateDiff.Apply(env.state)) + require.NoError(env.state.Commit()) } func defaultState( @@ -224,15 +255,16 @@ func defaultState( rewards reward.Calculator, ) state.State { genesisBytes := buildGenesisTest(ctx) + execCfg, _ := config.GetExecutionConfig(nil) state, err := state.New( db, genesisBytes, prometheus.NewRegistry(), cfg, + execCfg, ctx, metrics.Noop, rewards, - &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -243,63 +275,42 @@ func defaultState( if err := state.Commit(); err != nil { panic(err) } - state.SetHeight( /*height*/ 0) - if err := state.Commit(); err != nil { - panic(err) - } lastAcceptedID = state.GetLastAccepted() return state } -func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 10 - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - - atomicDB := prefixdb.New([]byte{1}, db) - m := atomic.NewMemory(atomicDB) - - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), - } - ctx.SharedMemory = msm - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, - } - - return ctx, msm -} +func defaultConfig(t *testing.T, f fork) *config.Config { + var ( + apricotPhase3Time = mockable.MaxTime + apricotPhase5Time = mockable.MaxTime + banffTime = mockable.MaxTime + cortinaTime = mockable.MaxTime + durangoTime = mockable.MaxTime + ) -func defaultConfig(postBanff, postCortina bool) config.Config { - banffTime := mockable.MaxTime - if postBanff { - banffTime = defaultValidateEndTime.Add(-2 * time.Second) - } - cortinaTime := mockable.MaxTime - if postCortina { + switch f { + case durango: + durangoTime = defaultValidateStartTime.Add(-2 * time.Second) + fallthrough + case cortina: cortinaTime = defaultValidateStartTime.Add(-2 * time.Second) + fallthrough + case banff: + banffTime = defaultValidateStartTime.Add(-2 * time.Second) + fallthrough + case apricotPhase5: + apricotPhase5Time = defaultValidateEndTime + fallthrough + case apricotPhase3: + apricotPhase3Time = defaultValidateEndTime + default: + require.NoError(t, fmt.Errorf("unhandled fork %d", f)) } - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - return config.Config{ + return &config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: vdrs, + Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -314,20 +325,21 @@ func defaultConfig(postBanff, postCortina bool) config.Config { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, BanffTime: banffTime, CortinaTime: cortinaTime, + DurangoTime: durangoTime, } } -func defaultClock(postFork bool) mockable.Clock { +func defaultClock(f fork) *mockable.Clock { now := defaultGenesisTime - if postFork { - // 1 second after Banff fork + if f >= banff { + // 1 second after active fork now = defaultValidateEndTime.Add(-2 * time.Second) } - clk := mockable.Clock{} + clk := &mockable.Clock{} clk.Set(now) return clk } @@ -353,7 +365,7 @@ func (fvi *fxVMInt) Logger() logging.Logger { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(), + registry: linearcodec.NewDefault(time.Time{}), clk: clk, log: log, } @@ -371,10 +383,9 @@ func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx. func buildGenesisTest(ctx *snow.Context) []byte { genesisUTXOs := make([]api.UTXO, len(preFundedKeys)) - hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range preFundedKeys { id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) if err != nil { panic(err) } @@ -384,15 +395,14 @@ func buildGenesisTest(ctx *snow.Context) []byte { } } - genesisValidators := make([]api.PermissionlessValidator, len(preFundedKeys)) - for i, key := range preFundedKeys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) if err != nil { panic(err) } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -410,7 +420,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { } buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(testNetworkID), + NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: ctx.AVAXAssetID, UTXOs: genesisUTXOs, Validators: genesisValidators, @@ -433,48 +443,3 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } - -func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.Get() { - primaryValidatorSet, exist := env.config.Validators.Get(constants.PrimaryNetworkID) - if !exist { - return errMissingPrimaryValidators - } - primaryValidators := primaryValidatorSet.List() - - validatorIDs := make([]ids.NodeID, len(primaryValidators)) - for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.NodeID - } - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - - for subnetID := range env.config.TrackedSubnets { - vdrs, exist := env.config.Validators.Get(subnetID) - if !exist { - return nil - } - validators := vdrs.List() - - validatorIDs := make([]ids.NodeID, len(validators)) - for i, vdr := range validators { - validatorIDs[i] = vdr.NodeID - } - if err := env.uptimes.StopTracking(validatorIDs, subnetID); err != nil { - return err - } - } - env.state.SetHeight( /*height*/ math.MaxUint64) - if err := env.state.Commit(); err != nil { - return err - } - } - - errs := wrappers.Errs{} - errs.Add( - env.state.Close(), - env.baseDB.Close(), - ) - return errs.Err -} diff --git a/avalanchego/vms/platformvm/txs/executor/import_test.go b/avalanchego/vms/platformvm/txs/executor/import_test.go index bb2bdd15..bc52fabc 100644 --- a/avalanchego/vms/platformvm/txs/executor/import_test.go +++ b/avalanchego/vms/platformvm/txs/executor/import_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -17,14 +17,14 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var fundedSharedMemoryCalls byte + func TestNewImportTx(t *testing.T) { - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) type test struct { description string @@ -32,66 +32,12 @@ func TestNewImportTx(t *testing.T) { sharedMemory atomic.SharedMemory sourceKeys []*secp256k1.PrivateKey timestamp time.Time - shouldErr bool - shouldVerify bool + expectedErr error } - factory := secp256k1.Factory{} - sourceKey, err := factory.NewPrivateKey() + sourceKey, err := secp256k1.NewPrivateKey() require.NoError(t, err) - cnt := new(byte) - - // Returns a shared memory where GetDatabase returns a database - // where [recipientKey] has a balance of [amt] - fundedSharedMemory := func(peerChain ids.ID, assets map[ids.ID]uint64) atomic.SharedMemory { - *cnt++ - m := atomic.NewMemory(prefixdb.New([]byte{*cnt}, env.baseDB)) - - sm := m.NewSharedMemory(env.ctx.ChainID) - peerSharedMemory := m.NewSharedMemory(peerChain) - - for assetID, amt := range assets { - // #nosec G404 - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), - }, - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amt, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Addrs: []ids.ShortID{sourceKey.PublicKey().Address()}, - Threshold: 1, - }, - }, - } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) - require.NoError(t, err) - - inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ - env.ctx.ChainID: { - PutRequests: []*atomic.Element{ - { - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - sourceKey.PublicKey().Address().Bytes(), - }, - }, - }, - }, - }, - ) - require.NoError(t, err) - } - - return sm - } - customAssetID := ids.GenerateTestID() tests := []test{ @@ -99,55 +45,64 @@ func TestNewImportTx(t *testing.T) { description: "can't pay fee", sourceChainID: env.ctx.XChainID, sharedMemory: fundedSharedMemory( + t, + env, + sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee - 1, }, ), - sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - shouldErr: true, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, + expectedErr: utxo.ErrInsufficientFunds, }, { description: "can barely pay fee", sourceChainID: env.ctx.XChainID, sharedMemory: fundedSharedMemory( + t, + env, + sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, }, ), - sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - shouldErr: false, - shouldVerify: true, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, + expectedErr: nil, }, { description: "attempting to import from C-chain", - sourceChainID: cChainID, + sourceChainID: env.ctx.CChainID, sharedMemory: fundedSharedMemory( - cChainID, + t, + env, + sourceKey, + env.ctx.CChainID, map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, }, ), - sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - timestamp: env.config.ApricotPhase5Time, - shouldErr: false, - shouldVerify: true, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, + timestamp: env.config.ApricotPhase5Time, + expectedErr: nil, }, { description: "attempting to import non-avax from X-chain", sourceChainID: env.ctx.XChainID, sharedMemory: fundedSharedMemory( + t, + env, + sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ env.ctx.AVAXAssetID: env.config.TxFee, customAssetID: 1, }, ), - sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - timestamp: env.config.BanffTime, - shouldErr: false, - shouldVerify: true, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, + timestamp: env.config.ApricotPhase5Time, + expectedErr: nil, }, } @@ -162,16 +117,18 @@ func TestNewImportTx(t *testing.T) { to, tt.sourceKeys, ids.ShortEmpty, + nil, ) - if tt.shouldErr { - require.Error(err) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { return } require.NoError(err) unsignedTx := tx.Unsigned.(*txs.ImportTx) require.NotEmpty(unsignedTx.ImportedInputs) - require.Equal(len(tx.Creds), len(unsignedTx.Ins)+len(unsignedTx.ImportedInputs), "should have the same number of credentials as inputs") + numInputs := len(unsignedTx.Ins) + len(unsignedTx.ImportedInputs) + require.Equal(len(tx.Creds), numInputs, "should have the same number of credentials as inputs") totalIn := uint64(0) for _, in := range unsignedTx.Ins { @@ -187,26 +144,71 @@ func TestNewImportTx(t *testing.T) { require.Equal(env.config.TxFee, totalIn-totalOut) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) - - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) + stateDiff.SetTimestamp(tt.timestamp) - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, - } - err = tx.Unsigned.Visit(&verifier) - if tt.shouldVerify { - require.NoError(err) - } else { - require.Error(err) + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } + require.NoError(tx.Unsigned.Visit(&verifier)) }) } } + +// Returns a shared memory where GetDatabase returns a database +// where [recipientKey] has a balance of [amt] +func fundedSharedMemory( + t *testing.T, + env *environment, + sourceKey *secp256k1.PrivateKey, + peerChain ids.ID, + assets map[ids.ID]uint64, +) atomic.SharedMemory { + fundedSharedMemoryCalls++ + m := atomic.NewMemory(prefixdb.New([]byte{fundedSharedMemoryCalls}, env.baseDB)) + + sm := m.NewSharedMemory(env.ctx.ChainID) + peerSharedMemory := m.NewSharedMemory(peerChain) + + for assetID, amt := range assets { + // #nosec G404 + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + OutputIndex: rand.Uint32(), + }, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amt, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{sourceKey.PublicKey().Address()}, + Threshold: 1, + }, + }, + } + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) + require.NoError(t, err) + + inputID := utxo.InputID() + require.NoError(t, peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + env.ctx.ChainID: { + PutRequests: []*atomic.Element{ + { + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + sourceKey.PublicKey().Address().Bytes(), + }, + }, + }, + }, + })) + } + + return sm +} diff --git a/avalanchego/vms/platformvm/txs/executor/inflation_settings.go b/avalanchego/vms/platformvm/txs/executor/inflation_settings.go index 81855e3e..a469503d 100644 --- a/avalanchego/vms/platformvm/txs/executor/inflation_settings.go +++ b/avalanchego/vms/platformvm/txs/executor/inflation_settings.go @@ -13,7 +13,6 @@ var inflationSettingsVariants = utils.NewNetworkValue(getDefaultInflationSetting AddValue(constants.FlareID, getFlareInflationSettings). AddValue(constants.CostwoID, getCostwoInflationSettings). AddValue(constants.LocalFlareID, getLocalFlareInflationSettings). - AddValue(constants.StagingID, getStagingInflationSettings). AddValue(constants.SongbirdID, getSongbirdInflationSettings). AddValue(constants.CostonID, getCostonInflationSettings). AddValue(constants.LocalID, getLocalInflationSettings) @@ -40,13 +39,14 @@ func GetCurrentInflationSettings(currentTimestamp time.Time, networkID uint32, c func getCurrentValidatorRules(currentTimestamp time.Time, backend *Backend) *addValidatorRules { s := inflationSettingsVariants.GetValue(backend.Ctx.NetworkID)(currentTimestamp, backend.Config) return &addValidatorRules{ - assetID: backend.Ctx.AVAXAssetID, - minValidatorStake: s.MinValidatorStake, - maxValidatorStake: s.MaxValidatorStake, - minStakeDuration: s.MinStakeDuration, - maxStakeDuration: s.MaxStakeDuration, - minDelegationFee: s.MinDelegationFee, - minStakeStartTime: s.MinStakeStartTime, + assetID: backend.Ctx.AVAXAssetID, + minValidatorStake: s.MinValidatorStake, + maxValidatorStake: s.MaxValidatorStake, + minStakeDuration: s.MinStakeDuration, + maxStakeDuration: s.MaxStakeDuration, + minDelegationFee: s.MinDelegationFee, + minStakeStartTime: s.MinStakeStartTime, + minFutureStartTimeOffset: s.MinFutureStartTimeOffset, } } @@ -59,6 +59,7 @@ func getCurrentDelegatorRules(currentTimestamp time.Time, backend *Backend) *add minStakeDuration: s.MinDelegateDuration, maxStakeDuration: s.MaxStakeDuration, maxValidatorWeightFactor: byte(s.MaxValidatorWeightFactor), + minFutureStartTimeOffset: s.MinFutureStartTimeOffset, } } @@ -161,22 +162,6 @@ func getLocalFlareInflationSettings(currentTimestamp time.Time, _ *config.Config } } -func getStagingInflationSettings(_ time.Time, _ *config.Config) InflationSettings { - // Phase 1 - return InflationSettings{ - MinValidatorStake: 100 * units.KiloAvax, - MaxValidatorStake: 50 * units.MegaAvax, - MinDelegatorStake: 1 * units.KiloAvax, - MinDelegationFee: 0, - MinStakeDuration: 2 * 7 * 24 * time.Hour, - MinDelegateDuration: 2 * 7 * 24 * time.Hour, - MaxStakeDuration: 365 * 24 * time.Hour, - MinFutureStartTimeOffset: MaxFutureStartTime, - MaxValidatorWeightFactor: MaxValidatorWeightFactor, - MinStakeStartTime: time.Date(2023, time.May, 10, 15, 0, 0, 0, time.UTC), - } -} - func getSongbirdInflationSettings(currentTimestamp time.Time, config *config.Config) InflationSettings { switch { case currentTimestamp.Before(time.Date(2000, time.March, 1, 0, 0, 0, 0, time.UTC)): diff --git a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go index af0122e8..568fba59 100644 --- a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -32,13 +31,15 @@ const ( var ( _ txs.Visitor = (*ProposalTxExecutor)(nil) - errChildBlockNotAfterParent = errors.New("proposed timestamp not after current chain time") - errInvalidState = errors.New("generated output isn't valid state") - errShouldBePermissionlessStaker = errors.New("expected permissionless staker") - errWrongTxType = errors.New("wrong transaction type") - errInvalidID = errors.New("invalid ID") - errProposedAddStakerTxAfterBanff = errors.New("staker transaction proposed after Banff") - errAdvanceTimeTxIssuedAfterBanff = errors.New("AdvanceTimeTx issued after Banff") + ErrRemoveStakerTooEarly = errors.New("attempting to remove staker before their end time") + ErrRemoveWrongStaker = errors.New("attempting to remove wrong staker") + ErrChildBlockNotAfterParent = errors.New("proposed timestamp not after current chain time") + ErrInvalidState = errors.New("generated output isn't valid state") + ErrShouldBePermissionlessStaker = errors.New("expected permissionless staker") + ErrWrongTxType = errors.New("wrong transaction type") + ErrInvalidID = errors.New("invalid ID") + ErrProposedAddStakerTxAfterBanff = errors.New("staker transaction proposed after Banff") + ErrAdvanceTimeTxIssuedAfterBanff = errors.New("AdvanceTimeTx issued after Banff") ) var ( @@ -59,44 +60,46 @@ type ProposalTxExecutor struct { // [OnAbortState] is modified by this struct's methods to // reflect changes made to the state if the proposal is aborted. OnAbortState state.Diff - - // outputs populated by this struct's methods: - // - // [PrefersCommit] is true iff this node initially prefers to - // commit this block transaction. - PrefersCommit bool } func (*ProposalTxExecutor) CreateChainTx(*txs.CreateChainTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) ImportTx(*txs.ImportTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) ExportTx(*txs.ExportTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (*ProposalTxExecutor) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - return errWrongTxType + return ErrWrongTxType +} + +func (*ProposalTxExecutor) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { + return ErrWrongTxType +} + +func (*ProposalTxExecutor) BaseTx(*txs.BaseTx) error { + return ErrWrongTxType } func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { @@ -107,7 +110,7 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { if e.Config.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", - errProposedAddStakerTxAfterBanff, + ErrProposedAddStakerTxAfterBanff, currentTimestamp, e.Config.BanffTime, ) @@ -143,8 +146,6 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -156,7 +157,7 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) if e.Config.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", - errProposedAddStakerTxAfterBanff, + ErrProposedAddStakerTxAfterBanff, currentTimestamp, e.Config.BanffTime, ) @@ -191,8 +192,6 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, tx.Outs) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -204,7 +203,7 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { if e.Config.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", - errProposedAddStakerTxAfterBanff, + ErrProposedAddStakerTxAfterBanff, currentTimestamp, e.Config.BanffTime, ) @@ -240,8 +239,6 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -258,7 +255,7 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { if e.Config.IsBanffActivated(newChainTime) { return fmt.Errorf( "%w: proposed timestamp (%s) >= Banff fork time (%s)", - errAdvanceTimeTxIssuedAfterBanff, + ErrAdvanceTimeTxIssuedAfterBanff, newChainTime, e.Config.BanffTime, ) @@ -268,7 +265,7 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { if !newChainTime.After(parentChainTime) { return fmt.Errorf( "%w, proposed timestamp (%s), chain time (%s)", - errChildBlockNotAfterParent, + ErrChildBlockNotAfterParent, parentChainTime, parentChainTime, ) @@ -290,19 +287,9 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { return err } - changes, err := AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) - if err != nil { - return err - } - - // Update the state if this tx is committed - e.OnCommitState.SetTimestamp(newChainTime) - changes.Apply(e.OnCommitState) - - e.PrefersCommit = !newChainTime.After(now.Add(SyncBound)) - // Note that state doesn't change if this proposal is aborted - return nil + _, err = AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) + return err } func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error { @@ -310,7 +297,7 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error case tx == nil: return txs.ErrNilTx case tx.TxID == ids.Empty: - return errInvalidID + return ErrInvalidID case len(e.Tx.Creds) != 0: return errWrongNumberOfCredentials } @@ -322,499 +309,306 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error if !currentStakerIterator.Next() { return fmt.Errorf("failed to get next staker to remove: %w", database.ErrNotFound) } - stakerToRemove := currentStakerIterator.Value() + stakerToReward := currentStakerIterator.Value() currentStakerIterator.Release() - if stakerToRemove.TxID != tx.TxID { + if stakerToReward.TxID != tx.TxID { return fmt.Errorf( - "attempting to remove TxID: %s. Should be removing %s", + "%w: %s != %s", + ErrRemoveWrongStaker, + stakerToReward.TxID, tx.TxID, - stakerToRemove.TxID, ) } // Verify that the chain's timestamp is the validator's end time currentChainTime := e.OnCommitState.GetTimestamp() - if !stakerToRemove.EndTime.Equal(currentChainTime) { + if !stakerToReward.EndTime.Equal(currentChainTime) { return fmt.Errorf( - "attempting to remove TxID: %s before their end time %s", + "%w: TxID = %s with %s < %s", + ErrRemoveStakerTooEarly, tx.TxID, - stakerToRemove.EndTime, + currentChainTime, + stakerToReward.EndTime, ) } - primaryNetworkValidator, err := e.OnCommitState.GetCurrentValidator( - constants.PrimaryNetworkID, - stakerToRemove.NodeID, - ) - if err != nil { - // This should never error because the staker set is in memory and - // primary network validators are removed last. - return err - } - - stakerTx, _, err := e.OnCommitState.GetTx(stakerToRemove.TxID) + stakerTx, _, err := e.OnCommitState.GetTx(stakerToReward.TxID) if err != nil { return fmt.Errorf("failed to get next removed staker tx: %w", err) } + // Invariant: A [txs.DelegatorTx] does not also implement the + // [txs.ValidatorTx] interface. switch uStakerTx := stakerTx.Unsigned.(type) { case txs.ValidatorTx: - e.OnCommitState.DeleteCurrentValidator(stakerToRemove) - e.OnAbortState.DeleteCurrentValidator(stakerToRemove) - - stake := uStakerTx.Stake() - outputs := uStakerTx.Outputs() - // Invariant: The staked asset must be equal to the reward asset. - stakeAsset := stake[0].Asset - - // Refund the stake here - for i, out := range stake { - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + i), - }, - Asset: out.Asset, - Out: out.Output(), - } - e.OnCommitState.AddUTXO(utxo) - e.OnAbortState.AddUTXO(utxo) - } - - offset := 0 - - // Provide the reward here - if stakerToRemove.PotentialReward > 0 { - validationRewardsOwner := uStakerTx.ValidationRewardsOwner() - outIntf, err := e.Fx.CreateOutput(stakerToRemove.PotentialReward, validationRewardsOwner) - if err != nil { - return fmt.Errorf("failed to create output: %w", err) - } - out, ok := outIntf.(verify.State) - if !ok { - return errInvalidState - } - - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + len(stake)), - }, - Asset: stakeAsset, - Out: out, - } - - e.OnCommitState.AddUTXO(utxo) - e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) - - offset++ - } - - // Provide the accrued delegatee rewards from successful delegations here. - delegateeReward, err := e.OnCommitState.GetDelegateeReward( - stakerToRemove.SubnetID, - stakerToRemove.NodeID, - ) - if err != nil { - return fmt.Errorf("failed to fetch accrued delegatee rewards: %w", err) + if err := e.rewardValidatorTx(uStakerTx, stakerToReward); err != nil { + return err } - if delegateeReward > 0 { - delegationRewardsOwner := uStakerTx.DelegationRewardsOwner() - outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) - if err != nil { - return fmt.Errorf("failed to create output: %w", err) - } - out, ok := outIntf.(verify.State) - if !ok { - return errInvalidState - } - - onCommitUtxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + len(stake) + offset), - }, - Asset: stakeAsset, - Out: out, - } - e.OnCommitState.AddUTXO(onCommitUtxo) - e.OnCommitState.AddRewardUTXO(tx.TxID, onCommitUtxo) - - onAbortUtxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - // Note: There is no [offset] if the RewardValidatorTx is - // aborted, because the validator reward is not awarded. - OutputIndex: uint32(len(outputs) + len(stake)), - }, - Asset: stakeAsset, - Out: out, - } - e.OnAbortState.AddUTXO(onAbortUtxo) - e.OnAbortState.AddRewardUTXO(tx.TxID, onAbortUtxo) - } - // Invariant: A [txs.DelegatorTx] does not also implement the - // [txs.ValidatorTx] interface. + // Handle staker lifecycle. + e.OnCommitState.DeleteCurrentValidator(stakerToReward) + e.OnAbortState.DeleteCurrentValidator(stakerToReward) case txs.DelegatorTx: - e.OnCommitState.DeleteCurrentDelegator(stakerToRemove) - e.OnAbortState.DeleteCurrentDelegator(stakerToRemove) - - stake := uStakerTx.Stake() - outputs := uStakerTx.Outputs() - stakeAsset := stake[0].Asset - - // Refund the stake here - for i, out := range stake { - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + i), - }, - Asset: out.Asset, - Out: out.Output(), - } - e.OnCommitState.AddUTXO(utxo) - e.OnAbortState.AddUTXO(utxo) - } - - // We're removing a delegator, so we need to fetch the validator they - // are delegated to. - vdrStaker, err := e.OnCommitState.GetCurrentValidator( - stakerToRemove.SubnetID, - stakerToRemove.NodeID, - ) - if err != nil { - return fmt.Errorf( - "failed to get whether %s is a validator: %w", - stakerToRemove.NodeID, - err, - ) - } - - vdrTxIntf, _, err := e.OnCommitState.GetTx(vdrStaker.TxID) - if err != nil { - return fmt.Errorf( - "failed to get whether %s is a validator: %w", - stakerToRemove.NodeID, - err, - ) - } - - // Invariant: Delegators must only be able to reference validator - // transactions that implement [txs.ValidatorTx]. All - // validator transactions implement this interface except the - // AddSubnetValidatorTx. - vdrTx, ok := vdrTxIntf.Unsigned.(txs.ValidatorTx) - if !ok { - return errWrongTxType - } - - // Calculate split of reward between delegator/delegatee - // The delegator gives stake to the validatee - validatorShares := vdrTx.Shares() - delegatorShares := reward.PercentDenominator - uint64(validatorShares) // parentTx.Shares <= reward.PercentDenominator so no underflow - delegatorReward := delegatorShares * (stakerToRemove.PotentialReward / reward.PercentDenominator) // delegatorShares <= reward.PercentDenominator so no overflow - // Delay rounding as long as possible for small numbers - if optimisticReward, err := math.Mul64(delegatorShares, stakerToRemove.PotentialReward); err == nil { - delegatorReward = optimisticReward / reward.PercentDenominator - } - delegateeReward := stakerToRemove.PotentialReward - delegatorReward // delegatorReward <= reward so no underflow - - offset := 0 - - // Reward the delegator here - if delegatorReward > 0 { - rewardsOwner := uStakerTx.RewardsOwner() - outIntf, err := e.Fx.CreateOutput(delegatorReward, rewardsOwner) - if err != nil { - return fmt.Errorf("failed to create output: %w", err) - } - out, ok := outIntf.(verify.State) - if !ok { - return errInvalidState - } - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + len(stake)), - }, - Asset: stakeAsset, - Out: out, - } - - e.OnCommitState.AddUTXO(utxo) - e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) - - offset++ + if err := e.rewardDelegatorTx(uStakerTx, stakerToReward); err != nil { + return err } - // Reward the delegatee here - if delegateeReward > 0 { - if vdrStaker.StartTime.After(e.Config.CortinaTime) { - previousDelegateeReward, err := e.OnCommitState.GetDelegateeReward( - vdrStaker.SubnetID, - vdrStaker.NodeID, - ) - if err != nil { - return fmt.Errorf("failed to get delegatee reward: %w", err) - } - - // Invariant: The rewards calculator can never return a - // [potentialReward] that would overflow the - // accumulated rewards. - newDelegateeReward := previousDelegateeReward + delegateeReward - - // For any validators starting after [CortinaTime], we defer rewarding the - // [delegateeReward] until their staking period is over. - err = e.OnCommitState.SetDelegateeReward( - vdrStaker.SubnetID, - vdrStaker.NodeID, - newDelegateeReward, - ) - if err != nil { - return fmt.Errorf("failed to update delegatee reward: %w", err) - } - } else { - // For any validators who started prior to [CortinaTime], we issue the - // [delegateeReward] immediately. - delegationRewardsOwner := vdrTx.DelegationRewardsOwner() - outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) - if err != nil { - return fmt.Errorf("failed to create output: %w", err) - } - out, ok := outIntf.(verify.State) - if !ok { - return errInvalidState - } - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + len(stake) + offset), - }, - Asset: stakeAsset, - Out: out, - } - - e.OnCommitState.AddUTXO(utxo) - e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) - } - } + // Handle staker lifecycle. + e.OnCommitState.DeleteCurrentDelegator(stakerToReward) + e.OnAbortState.DeleteCurrentDelegator(stakerToReward) default: // Invariant: Permissioned stakers are removed by the advancement of // time and the current chain timestamp is == this staker's // EndTime. This means only permissionless stakers should be // left in the staker set. - return errShouldBePermissionlessStaker + return ErrShouldBePermissionlessStaker } // If the reward is aborted, then the current supply should be decreased. - currentSupply, err := e.OnAbortState.GetCurrentSupply(stakerToRemove.SubnetID) + currentSupply, err := e.OnAbortState.GetCurrentSupply(stakerToReward.SubnetID) if err != nil { return err } - newSupply, err := math.Sub(currentSupply, stakerToRemove.PotentialReward) + newSupply, err := math.Sub(currentSupply, stakerToReward.PotentialReward) if err != nil { return err } - e.OnAbortState.SetCurrentSupply(stakerToRemove.SubnetID, newSupply) + e.OnAbortState.SetCurrentSupply(stakerToReward.SubnetID, newSupply) + return nil +} + +func (e *ProposalTxExecutor) rewardValidatorTx(uValidatorTx txs.ValidatorTx, validator *state.Staker) error { + var ( + txID = validator.TxID + stake = uValidatorTx.Stake() + outputs = uValidatorTx.Outputs() + // Invariant: The staked asset must be equal to the reward asset. + stakeAsset = stake[0].Asset + ) - var expectedUptimePercentage float64 - if stakerToRemove.SubnetID != constants.PrimaryNetworkID { - transformSubnetIntf, err := e.OnCommitState.GetSubnetTransformation(stakerToRemove.SubnetID) + // Refund the stake only when validator is about to leave + // the staking set + for i, out := range stake { + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + i), + }, + Asset: out.Asset, + Out: out.Output(), + } + e.OnCommitState.AddUTXO(utxo) + e.OnAbortState.AddUTXO(utxo) + } + + utxosOffset := 0 + + // Provide the reward here + reward := validator.PotentialReward + if reward > 0 { + validationRewardsOwner := uValidatorTx.ValidationRewardsOwner() + outIntf, err := e.Fx.CreateOutput(reward, validationRewardsOwner) if err != nil { - return err + return fmt.Errorf("failed to create output: %w", err) } - transformSubnet, ok := transformSubnetIntf.Unsigned.(*txs.TransformSubnetTx) + out, ok := outIntf.(verify.State) if !ok { - return errIsNotTransformSubnetTx + return ErrInvalidState } - expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator - } else { - expectedUptimePercentage = e.Config.UptimePercentage + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + len(stake)), + }, + Asset: stakeAsset, + Out: out, + } + e.OnCommitState.AddUTXO(utxo) + e.OnCommitState.AddRewardUTXO(txID, utxo) + + utxosOffset++ } - // TODO: calculate subnet uptimes - uptime, err := e.Uptimes.CalculateUptimePercentFrom( - primaryNetworkValidator.NodeID, - constants.PrimaryNetworkID, - primaryNetworkValidator.StartTime, + // Provide the accrued delegatee rewards from successful delegations here. + delegateeReward, err := e.OnCommitState.GetDelegateeReward( + validator.SubnetID, + validator.NodeID, ) if err != nil { - return fmt.Errorf("failed to calculate uptime: %w", err) + return fmt.Errorf("failed to fetch accrued delegatee rewards: %w", err) } - e.PrefersCommit = uptime >= expectedUptimePercentage - return nil -} - -// GetNextStakerChangeTime returns the next time a staker will be either added -// or removed to/from the current validator set. -func GetNextStakerChangeTime(state state.Chain) (time.Time, error) { - currentStakerIterator, err := state.GetCurrentStakerIterator() - if err != nil { - return time.Time{}, err + if delegateeReward == 0 { + return nil } - defer currentStakerIterator.Release() - pendingStakerIterator, err := state.GetPendingStakerIterator() + delegationRewardsOwner := uValidatorTx.DelegationRewardsOwner() + outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) if err != nil { - return time.Time{}, err - } - defer pendingStakerIterator.Release() - - hasCurrentStaker := currentStakerIterator.Next() - hasPendingStaker := pendingStakerIterator.Next() - switch { - case hasCurrentStaker && hasPendingStaker: - nextCurrentTime := currentStakerIterator.Value().NextTime - nextPendingTime := pendingStakerIterator.Value().NextTime - if nextCurrentTime.Before(nextPendingTime) { - return nextCurrentTime, nil - } - return nextPendingTime, nil - case hasCurrentStaker: - return currentStakerIterator.Value().NextTime, nil - case hasPendingStaker: - return pendingStakerIterator.Value().NextTime, nil - default: - // Due to no initial stakers in genesis for Songbird networks - if state.GetNetworkID() == constants.SongbirdID || state.GetNetworkID() == constants.CostonID || state.GetNetworkID() == constants.LocalID { - return songbirdLatestStakingTime, nil - } - return time.Time{}, database.ErrNotFound - } + return fmt.Errorf("failed to create output: %w", err) + } + out, ok := outIntf.(verify.State) + if !ok { + return ErrInvalidState + } + + onCommitUtxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + len(stake) + utxosOffset), + }, + Asset: stakeAsset, + Out: out, + } + e.OnCommitState.AddUTXO(onCommitUtxo) + e.OnCommitState.AddRewardUTXO(txID, onCommitUtxo) + + // Note: There is no [offset] if the RewardValidatorTx is + // aborted, because the validator reward is not awarded. + onAbortUtxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + len(stake)), + }, + Asset: stakeAsset, + Out: out, + } + e.OnAbortState.AddUTXO(onAbortUtxo) + e.OnAbortState.AddRewardUTXO(txID, onAbortUtxo) + return nil } -// GetValidator returns information about the given validator, which may be a -// current validator or pending validator. -func GetValidator(state state.Chain, subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { - validator, err := state.GetCurrentValidator(subnetID, nodeID) - if err == nil { - // This node is currently validating the subnet. - return validator, nil - } - if err != database.ErrNotFound { - // Unexpected error occurred. - return nil, err +func (e *ProposalTxExecutor) rewardDelegatorTx(uDelegatorTx txs.DelegatorTx, delegator *state.Staker) error { + var ( + txID = delegator.TxID + stake = uDelegatorTx.Stake() + outputs = uDelegatorTx.Outputs() + // Invariant: The staked asset must be equal to the reward asset. + stakeAsset = stake[0].Asset + ) + + // Refund the stake only when delegator is about to leave + // the staking set + for i, out := range stake { + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + i), + }, + Asset: out.Asset, + Out: out.Output(), + } + e.OnCommitState.AddUTXO(utxo) + e.OnAbortState.AddUTXO(utxo) } - return state.GetPendingValidator(subnetID, nodeID) -} -// canDelegate returns true if [delegator] can be added as a delegator of -// [validator]. -// -// A [delegator] can be added if: -// - [delegator]'s start time is not before [validator]'s start time -// - [delegator]'s end time is not after [validator]'s end time -// - the maximum total weight on [validator] will not exceed [weightLimit] -func canDelegate( - state state.Chain, - validator *state.Staker, - weightLimit uint64, - delegator *state.Staker, -) (bool, error) { - if delegator.StartTime.Before(validator.StartTime) { - return false, nil - } - if delegator.EndTime.After(validator.EndTime) { - return false, nil - } - - maxWeight, err := GetMaxWeight(state, validator, delegator.StartTime, delegator.EndTime) + // We're (possibly) rewarding a delegator, so we need to fetch + // the validator they are delegated to. + validator, err := e.OnCommitState.GetCurrentValidator(delegator.SubnetID, delegator.NodeID) if err != nil { - return false, err + return fmt.Errorf("failed to get whether %s is a validator: %w", delegator.NodeID, err) } - newMaxWeight, err := math.Add64(maxWeight, delegator.Weight) + + vdrTxIntf, _, err := e.OnCommitState.GetTx(validator.TxID) if err != nil { - return false, err + return fmt.Errorf("failed to get whether %s is a validator: %w", delegator.NodeID, err) } - return newMaxWeight <= weightLimit, nil -} -// GetMaxWeight returns the maximum total weight of the [validator], including -// its own weight, between [startTime] and [endTime]. -// The weight changes are applied in the order they will be applied as chain -// time advances. -// Invariant: -// - [validator.StartTime] <= [startTime] < [endTime] <= [validator.EndTime] -func GetMaxWeight( - chainState state.Chain, - validator *state.Staker, - startTime time.Time, - endTime time.Time, -) (uint64, error) { - currentDelegatorIterator, err := chainState.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - return 0, err + // Invariant: Delegators must only be able to reference validator + // transactions that implement [txs.ValidatorTx]. All + // validator transactions implement this interface except the + // AddSubnetValidatorTx. + vdrTx, ok := vdrTxIntf.Unsigned.(txs.ValidatorTx) + if !ok { + return ErrWrongTxType } - // TODO: We can optimize this by moving the current total weight to be - // stored in the validator state. - // - // Calculate the current total weight on this validator, including the - // weight of the actual validator and the sum of the weights of all of the - // currently active delegators. - currentWeight := validator.Weight - for currentDelegatorIterator.Next() { - currentDelegator := currentDelegatorIterator.Value() - - currentWeight, err = math.Add64(currentWeight, currentDelegator.Weight) + // Calculate split of reward between delegator/delegatee + delegateeReward, delegatorReward := reward.Split(delegator.PotentialReward, vdrTx.Shares()) + + utxosOffset := 0 + + // Reward the delegator here + reward := delegatorReward + if reward > 0 { + rewardsOwner := uDelegatorTx.RewardsOwner() + outIntf, err := e.Fx.CreateOutput(reward, rewardsOwner) if err != nil { - currentDelegatorIterator.Release() - return 0, err + return fmt.Errorf("failed to create output: %w", err) } + out, ok := outIntf.(verify.State) + if !ok { + return ErrInvalidState + } + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + len(stake)), + }, + Asset: stakeAsset, + Out: out, + } + + e.OnCommitState.AddUTXO(utxo) + e.OnCommitState.AddRewardUTXO(txID, utxo) + + utxosOffset++ } - currentDelegatorIterator.Release() - currentDelegatorIterator, err = chainState.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - return 0, err + if delegateeReward == 0 { + return nil } - pendingDelegatorIterator, err := chainState.GetPendingDelegatorIterator(validator.SubnetID, validator.NodeID) - if err != nil { - currentDelegatorIterator.Release() - return 0, err - } - delegatorChangesIterator := state.NewStakerDiffIterator(currentDelegatorIterator, pendingDelegatorIterator) - defer delegatorChangesIterator.Release() - - // Iterate over the future stake weight changes and calculate the maximum - // total weight on the validator, only including the points in the time - // range [startTime, endTime]. - var currentMax uint64 - for delegatorChangesIterator.Next() { - delegator, isAdded := delegatorChangesIterator.Value() - // [delegator.NextTime] > [endTime] - if delegator.NextTime.After(endTime) { - // This delegation change (and all following changes) occurs after - // [endTime]. Since we're calculating the max amount staked in - // [startTime, endTime], we can stop. - break - } - // [delegator.NextTime] >= [startTime] - if !delegator.NextTime.Before(startTime) { - // We have advanced time to be at the inside of the delegation - // window. Make sure that the max weight is updated accordingly. - currentMax = math.Max(currentMax, currentWeight) + // Reward the delegatee here + if e.Config.IsCortinaActivated(validator.StartTime) { + previousDelegateeReward, err := e.OnCommitState.GetDelegateeReward( + validator.SubnetID, + validator.NodeID, + ) + if err != nil { + return fmt.Errorf("failed to get delegatee reward: %w", err) } - var op func(uint64, uint64) (uint64, error) - if isAdded { - op = math.Add64 - } else { - op = math.Sub[uint64] + // Invariant: The rewards calculator can never return a + // [potentialReward] that would overflow the + // accumulated rewards. + newDelegateeReward := previousDelegateeReward + delegateeReward + + // For any validators starting after [CortinaTime], we defer rewarding the + // [reward] until their staking period is over. + err = e.OnCommitState.SetDelegateeReward( + validator.SubnetID, + validator.NodeID, + newDelegateeReward, + ) + if err != nil { + return fmt.Errorf("failed to update delegatee reward: %w", err) } - currentWeight, err = op(currentWeight, delegator.Weight) + } else { + // For any validators who started prior to [CortinaTime], we issue the + // [delegateeReward] immediately. + delegationRewardsOwner := vdrTx.DelegationRewardsOwner() + outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) if err != nil { - return 0, err + return fmt.Errorf("failed to create output: %w", err) + } + out, ok := outIntf.(verify.State) + if !ok { + return ErrInvalidState } + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(outputs) + len(stake) + utxosOffset), + }, + Asset: stakeAsset, + Out: out, + } + + e.OnCommitState.AddUTXO(utxo) + e.OnCommitState.AddRewardUTXO(txID, utxo) } - // Because we assume [startTime] < [endTime], we have advanced time to - // be at the end of the delegation window. Make sure that the max weight is - // updated accordingly. - return math.Max(currentMax, currentWeight), nil + return nil } diff --git a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 5ea50eb8..a6ecc21b 100644 --- a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -24,7 +25,7 @@ import ( func TestProposalTxExecuteAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) @@ -42,12 +43,15 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { reward.PercentDenominator, // Shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(t, err) @@ -55,8 +59,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's @@ -71,12 +74,15 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { reward.PercentDenominator, // Shared []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(t, err) @@ -84,14 +90,14 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, apricotPhase5) currentTimestamp := dummyH.state.GetTimestamp() type test struct { + description string stakeAmount uint64 startTime uint64 endTime uint64 @@ -100,48 +106,36 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys []*secp256k1.PrivateKey setup func(*environment) AP3Time time.Time - shouldErr bool - description string + expectedErr error } tests := []test{ { + description: "validator stops validating earlier than delegator", stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()), + startTime: uint64(defaultValidateStartTime.Unix()) + 1, endTime: uint64(defaultValidateEndTime.Unix()) + 1, nodeID: nodeID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator stops validating primary network earlier than subnet", + expectedErr: ErrPeriodMismatch, }, { + description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), stakeAmount: dummyH.config.MinDelegatorStake, startTime: uint64(currentTimestamp.Add(MaxFutureStartTime + time.Second).Unix()), - endTime: uint64(currentTimestamp.Add(MaxFutureStartTime * 2).Unix()), - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: fmt.Sprintf("validator should not be added more than (%s) in the future", MaxFutureStartTime), - }, - { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()), - endTime: uint64(defaultValidateEndTime.Unix()) + 1, + endTime: uint64(currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second).Unix()), nodeID: nodeID, rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "end time is after the primary network end time", + expectedErr: ErrFutureStakeTime, }, { + description: "validator not in the current or pending validator sets", stakeAmount: dummyH.config.MinDelegatorStake, startTime: uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()), endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), @@ -150,10 +144,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator not in the current or pending validator sets of the subnet", + expectedErr: database.ErrNotFound, }, { + description: "delegator starts before validator", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime - 1, // start validating subnet before primary network endTime: newValidatorEndTime, @@ -162,10 +156,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator starts validating subnet before primary network", + expectedErr: ErrPeriodMismatch, }, { + description: "delegator stops before validator", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime, endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network @@ -174,10 +168,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator stops validating primary network before subnet", + expectedErr: ErrPeriodMismatch, }, { + description: "valid", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network @@ -186,10 +180,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, - shouldErr: false, - description: "valid", + expectedErr: nil, }, { + description: "starts delegating at current timestamp", stakeAmount: dummyH.config.MinDelegatorStake, // weight startTime: uint64(currentTimestamp.Unix()), // start time endTime: uint64(defaultValidateEndTime.Unix()), // end time @@ -198,16 +192,16 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer setup: nil, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "starts validating at current timestamp", + expectedErr: ErrTimestampNotBeforeStartTime, }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer + description: "tx fee paying key has no funds", + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: uint64(defaultValidateStartTime.Unix()) + 1, // start time + endTime: uint64(defaultValidateEndTime.Unix()), // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer setup: func(target *environment) { // Remove all UTXOs owned by keys[1] utxoIDs, err := target.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), @@ -219,14 +213,13 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) }, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "tx fee paying key has no funds", + expectedErr: ErrFlowCheckFailed, }, { + description: "over delegation before AP3", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network @@ -235,10 +228,10 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, - shouldErr: false, - description: "over delegation before AP3", + expectedErr: nil, }, { + description: "over delegation after AP3", stakeAmount: dummyH.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network @@ -247,19 +240,15 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, - shouldErr: true, - description: "over delegation after AP3", + expectedErr: ErrOverDelegated, }, } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, apricotPhase5) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, @@ -269,6 +258,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { tt.rewardAddress, tt.feeKeys, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -289,37 +279,31 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if tt.shouldErr { - require.Error(err) - } else { - require.NoError(err) - } + require.ErrorIs(err, tt.expectedErr) }) } } func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - - nodeID := preFundedKeys[0].PublicKey().Address() + defer env.ctx.Lock.Unlock() + nodeID := genesisNodeIDs[0] { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -336,7 +320,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator stops validating primary network earlier than subnet") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -346,12 +330,13 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // (note that keys[0] is a genesis validator) tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(defaultValidateStartTime.Unix()+1), + uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -367,17 +352,13 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Add a validator to pending validator set of primary network - key, err := testKeyfactory.NewPrivateKey() - require.NoError(err) - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis - dsStartTime := defaultGenesisTime.Add(10 * time.Second) + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() + dsStartTime := defaultValidateStartTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) addDSTx, err := env.txBuilder.NewAddValidatorTx( @@ -385,10 +366,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -402,6 +384,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -418,12 +401,14 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator not in the current or pending validator sets of the primary network") + require.ErrorIs(err, ErrNotValidator) } + addValTx := addDSTx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addDSTx.ID(), - addDSTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) @@ -432,8 +417,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -448,6 +432,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -464,7 +449,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator starts validating primary network before starting to validate primary network") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -478,6 +463,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -494,7 +480,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator stops validating primary network after stops validating primary network") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -508,6 +494,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -523,13 +510,12 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Case: Proposed validator start validating at/before current timestamp // First, advance the timestamp - newTimestamp := defaultGenesisTime.Add(2 * time.Second) + newTimestamp := defaultValidateStartTime.Add(2 * time.Second) env.state.SetTimestamp(newTimestamp) { @@ -537,10 +523,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -557,11 +544,11 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because starts validating at current timestamp") + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } // reset the timestamp - env.state.SetTimestamp(defaultGenesisTime) + env.state.SetTimestamp(defaultValidateStartTime) // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet @@ -569,16 +556,19 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -586,19 +576,19 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateEndTime.Unix()), // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -615,53 +605,24 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: duplicateSubnetTx, } err = duplicateSubnetTx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because validator already validating the specified subnet") + require.ErrorIs(err, ErrDuplicateValidator) } env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) - - { - // Case: Too many signatures - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, - ids.ShortEmpty, // change addr - ) - require.NoError(err) - - onCommitState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - onAbortState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: &env.backend, - Tx: tx, - } - err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because tx has 3 signatures but only 2 needed") - } + require.NoError(env.state.Commit()) { // Case: Too few signatures tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -685,26 +646,27 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because not enough control sigs") + require.ErrorIs(err, errUnauthorizedSubnetModification) } { // Case: Control Signature from invalid key (keys[3] is not a control key) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) require.NoError(err) - copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) + copy(tx.Creds[1].(*secp256k1fx.Credential).Sigs[0][:], sig) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -719,26 +681,29 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because a control sig is invalid") + require.ErrorIs(err, errUnauthorizedSubnetModification) } { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix())+1, // start time + uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + addSubnetValTx.StartTime(), 0, ) require.NoError(err) @@ -746,8 +711,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -762,31 +726,31 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because validator already in pending validator set of the specified subnet") + require.ErrorIs(err, ErrDuplicateValidator) } } func TestProposalTxExecuteAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() + chainTime := env.state.GetTimestamp() { // Case: Validator's start time too early tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix())-1, + uint64(chainTime.Unix()), uint64(defaultValidateEndTime.Unix()), nodeID, ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -803,7 +767,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because start time too early") + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } { @@ -817,6 +781,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -833,20 +798,23 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because start time too far in the future") + require.ErrorIs(err, ErrFutureStakeTime) } { + nodeID := genesisNodeIDs[0] + // Case: Validator already validating primary network tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), nodeID, ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -863,12 +831,12 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because validator already validating") + require.ErrorIs(err, ErrAlreadyValidator) } { // Case: Validator in pending validator set of primary network - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount uint64(startTime.Unix()), // start time @@ -878,22 +846,24 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) - env.state.PutCurrentValidator(staker) + env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -908,20 +878,21 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator in pending validator set") + require.ErrorIs(err, ErrAlreadyValidator) } { // Case: Validator doesn't have enough tokens to cover stake amount tx, err := env.txBuilder.NewAddValidatorTx( // create the tx env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Unix())+1, uint64(defaultValidateEndTime.Unix()), - nodeID, + ids.GenerateTestNodeID(), ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -946,6 +917,6 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because tx fee paying key has no funds") + require.ErrorIs(err, ErrFlowCheckFailed) } } diff --git a/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go b/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go index 24de2a0c..b3209ac0 100644 --- a/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go +++ b/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" @@ -23,12 +24,18 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +func newRewardValidatorTx(t testing.TB, txID ids.ID) (*txs.Tx, error) { + utx := &txs.RewardValidatorTx{TxID: txID} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(snowtest.Context(t, snowtest.PChainID)) +} + func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -43,7 +50,7 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { stakerToRemoveTx := stakerToRemoveTxIntf.Unsigned.(*txs.AddValidatorTx) // Case 1: Chain timestamp is wrong - tx, err := env.txBuilder.NewRewardValidatorTx(stakerToRemove.TxID) + tx, err := newRewardValidatorTx(t, stakerToRemove.TxID) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -58,13 +65,14 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { Backend: &env.backend, Tx: tx, } - require.Error(tx.Unsigned.Visit(&txExecutor)) + err = tx.Unsigned.Visit(&txExecutor) + require.ErrorIs(err, ErrRemoveStakerTooEarly) // Advance chain timestamp to time that next validator leaves env.state.SetTimestamp(stakerToRemove.EndTime) // Case 2: Wrong validator - tx, err = env.txBuilder.NewRewardValidatorTx(ids.GenerateTestID()) + tx, err = newRewardValidatorTx(t, ids.GenerateTestID()) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -79,10 +87,11 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { Backend: &env.backend, Tx: tx, } - require.Error(tx.Unsigned.Visit(&txExecutor)) + err = tx.Unsigned.Visit(&txExecutor) + require.ErrorIs(err, ErrRemoveWrongStaker) // Case 3: Happy path - tx, err = env.txBuilder.NewRewardValidatorTx(stakerToRemove.TxID) + tx, err = newRewardValidatorTx(t, stakerToRemove.TxID) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -126,10 +135,7 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -144,7 +150,7 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { stakerToRemoveTx := stakerToRemoveTxIntf.Unsigned.(*txs.AddValidatorTx) // Case 1: Chain timestamp is wrong - tx, err := env.txBuilder.NewRewardValidatorTx(stakerToRemove.TxID) + tx, err := newRewardValidatorTx(t, stakerToRemove.TxID) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -159,13 +165,14 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { Backend: &env.backend, Tx: tx, } - require.Error(tx.Unsigned.Visit(&txExecutor)) + err = tx.Unsigned.Visit(&txExecutor) + require.ErrorIs(err, ErrRemoveStakerTooEarly) // Advance chain timestamp to time that next validator leaves env.state.SetTimestamp(stakerToRemove.EndTime) // Case 2: Wrong validator - tx, err = env.txBuilder.NewRewardValidatorTx(ids.GenerateTestID()) + tx, err = newRewardValidatorTx(t, ids.GenerateTestID()) require.NoError(err) txExecutor = ProposalTxExecutor{ @@ -174,10 +181,11 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { Backend: &env.backend, Tx: tx, } - require.Error(tx.Unsigned.Visit(&txExecutor)) + err = tx.Unsigned.Visit(&txExecutor) + require.ErrorIs(err, ErrRemoveWrongStaker) // Case 3: Happy path - tx, err = env.txBuilder.NewRewardValidatorTx(stakerToRemove.TxID) + tx, err = newRewardValidatorTx(t, stakerToRemove.TxID) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -221,10 +229,7 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -243,6 +248,7 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { reward.PercentDenominator/4, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -257,19 +263,24 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { delRewardAddress, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // Change address + nil, ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + addDelTx.StartTime(), 1000000, ) require.NoError(err) @@ -283,13 +294,10 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require.NoError(env.state.Commit()) // test validator stake - vdrSet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - - stake := vdrSet.GetWeight(vdrNodeID) + stake := env.config.Validators.GetWeight(constants.PrimaryNetworkID, vdrNodeID) require.Equal(env.config.MinValidatorStake+env.config.MinDelegatorStake, stake) - tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + tx, err := newRewardValidatorTx(t, delTx.ID()) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -304,13 +312,10 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) - vdrDestSet := set.Set[ids.ShortID]{} - vdrDestSet.Add(vdrRewardAddress) - delDestSet := set.Set[ids.ShortID]{} - delDestSet.Add(delRewardAddress) + vdrDestSet := set.Of(vdrRewardAddress) + delDestSet := set.Of(delRewardAddress) expectedReward := uint64(1000000) @@ -341,15 +346,13 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require.Less(vdrReward, delReward, "the delegator's reward should be greater than the delegatee's because the delegatee's share is 25%") require.Equal(expectedReward, delReward+vdrReward, "expected total reward to be %d but is %d", expectedReward, delReward+vdrReward) - require.Equal(env.config.MinValidatorStake, vdrSet.GetWeight(vdrNodeID)) + stake = env.config.Validators.GetWeight(constants.PrimaryNetworkID, vdrNodeID) + require.Equal(env.config.MinValidatorStake, stake) } func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, cortina) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -368,6 +371,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { reward.PercentDenominator/4, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, /*=changeAddr*/ + nil, ) require.NoError(err) @@ -382,21 +386,26 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { delRewardAddress, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, /*=changeAddr*/ + nil, ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + time.Unix(int64(vdrStartTime), 0), vdrRewardAmt, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delRewardAmt := uint64(1000000) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + time.Unix(int64(delStartTime), 0), delRewardAmt, ) require.NoError(err) @@ -409,10 +418,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - vdrDestSet := set.Set[ids.ShortID]{} - vdrDestSet.Add(vdrRewardAddress) - delDestSet := set.Set[ids.ShortID]{} - delDestSet.Add(delRewardAddress) + vdrDestSet := set.Of(vdrRewardAddress) + delDestSet := set.Of(delRewardAddress) oldVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) require.NoError(err) @@ -420,13 +427,10 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require.NoError(err) // test validator stake - vdrSet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) - - stake := vdrSet.GetWeight(vdrNodeID) + stake := env.config.Validators.GetWeight(constants.PrimaryNetworkID, vdrNodeID) require.Equal(env.config.MinValidatorStake+env.config.MinDelegatorStake, stake) - tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + tx, err := newRewardValidatorTx(t, delTx.ID()) require.NoError(err) // Create Delegator Diff @@ -442,8 +446,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) // The delegator should be rewarded if the ProposalTx is committed. Since the // delegatee's share is 25%, we expect the delegator to receive 75% of the reward. @@ -457,8 +460,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { utxo, err := onCommitState.GetUTXO(delRewardUTXOID.InputID()) require.NoError(err) - castUTXO, ok := utxo.Out.(*secp256k1fx.TransferOutput) - require.True(ok) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castUTXO := utxo.Out.(*secp256k1fx.TransferOutput) require.Equal(delRewardAmt*3/4, castUTXO.Amt, "expected delegator balance to increase by 3/4 of reward amount") require.True(delDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to delDestSet") @@ -475,7 +478,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - tx, err = env.txBuilder.NewRewardValidatorTx(vdrStaker.TxID) + tx, err = newRewardValidatorTx(t, vdrStaker.TxID) require.NoError(err) // Create Validator Diff @@ -505,8 +508,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { utxo, err = onCommitState.GetUTXO(vdrRewardUTXOID.InputID()) require.NoError(err) - castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) - require.True(ok) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castUTXO = utxo.Out.(*secp256k1fx.TransferOutput) require.Equal(vdrRewardAmt, castUTXO.Amt, "expected validator to be rewarded") require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") @@ -518,8 +521,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { utxo, err = onCommitState.GetUTXO(onCommitVdrDelRewardUTXOID.InputID()) require.NoError(err) - castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) - require.True(ok) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castUTXO = utxo.Out.(*secp256k1fx.TransferOutput) require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") @@ -531,8 +534,8 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { utxo, err = onAbortState.GetUTXO(onAbortVdrDelRewardUTXOID.InputID()) require.NoError(err) - castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) - require.True(ok) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castUTXO = utxo.Out.(*secp256k1fx.TransferOutput) require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") @@ -567,10 +570,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, cortina) dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -589,6 +589,7 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * reward.PercentDenominator/4, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -603,21 +604,26 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * delRewardAddress, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // Change address + nil, ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), vdrRewardAmt, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delRewardAmt := uint64(1000000) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + time.Unix(int64(delStartTime), 0), delRewardAmt, ) require.NoError(err) @@ -630,17 +636,15 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - vdrDestSet := set.Set[ids.ShortID]{} - vdrDestSet.Add(vdrRewardAddress) - delDestSet := set.Set[ids.ShortID]{} - delDestSet.Add(delRewardAddress) + vdrDestSet := set.Of(vdrRewardAddress) + delDestSet := set.Of(delRewardAddress) oldVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) require.NoError(err) oldDelBalance, err := avax.GetBalance(env.state, delDestSet) require.NoError(err) - tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + tx, err := newRewardValidatorTx(t, delTx.ID()) require.NoError(err) // Create Delegator Diffs @@ -668,7 +672,7 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * vdrOnAbortState, err := state.NewDiff(testID, env) require.NoError(err) - tx, err = env.txBuilder.NewRewardValidatorTx(vdrTx.ID()) + tx, err = newRewardValidatorTx(t, vdrTx.ID()) require.NoError(err) txExecutor = ProposalTxExecutor{ @@ -688,8 +692,8 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * utxo, err := vdrOnAbortState.GetUTXO(onAbortVdrDelRewardUTXOID.InputID()) require.NoError(err) - castUTXO, ok := utxo.Out.(*secp256k1fx.TransferOutput) - require.True(ok) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castUTXO := utxo.Out.(*secp256k1fx.TransferOutput) require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") @@ -727,10 +731,7 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + env := newEnvironment(t, apricotPhase5) dummyHeight := uint64(1) initialSupply, err := env.state.GetCurrentSupply(constants.PrimaryNetworkID) @@ -752,6 +753,7 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { reward.PercentDenominator/4, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -765,19 +767,24 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { delRewardAddress, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), - vdrTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + addValTx.StartTime(), 0, ) require.NoError(err) + addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) delStaker, err := state.NewCurrentStaker( delTx.ID(), - delTx.Unsigned.(*txs.AddDelegatorTx), + addDelTx, + addDelTx.StartTime(), 1000000, ) require.NoError(err) @@ -790,7 +797,7 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + tx, err := newRewardValidatorTx(t, delTx.ID()) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -805,13 +812,10 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) - vdrDestSet := set.Set[ids.ShortID]{} - vdrDestSet.Add(vdrRewardAddress) - delDestSet := set.Set[ids.ShortID]{} - delDestSet.Add(delRewardAddress) + vdrDestSet := set.Of(vdrRewardAddress) + delDestSet := set.Of(delRewardAddress) expectedReward := uint64(1000000) diff --git a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go index 10cc0278..e95914cb 100644 --- a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,39 +6,85 @@ package executor import ( "errors" "fmt" + "math" "time" - stdmath "math" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var ( - errWeightTooSmall = errors.New("weight of this validator is too low") - errWeightTooLarge = errors.New("weight of this validator is too large") - errInsufficientDelegationFee = errors.New("staker charges an insufficient delegation fee") - errStakeTooShort = errors.New("staking period is too short") - errStakeTooLong = errors.New("staking period is too long") - errFlowCheckFailed = errors.New("flow check failed") - errFutureStakeTime = fmt.Errorf("staker is attempting to start staking more than %s ahead of the current chain time", MaxFutureStartTime) - errValidatorSubset = errors.New("all subnets' staking period must be a subset of the primary network") - errNotValidator = errors.New("isn't a current or pending validator") - errRemovePermissionlessValidator = errors.New("attempting to remove permissionless validator") - errStakeOverflow = errors.New("validator stake exceeds limit") - errOverDelegated = errors.New("validator would be over delegated") - errIsNotTransformSubnetTx = errors.New("is not a transform subnet tx") - errTimestampNotBeforeStartTime = errors.New("chain timestamp not before start time") - errDuplicateValidator = errors.New("duplicate validator") - errDelegateToPermissionedValidator = errors.New("delegation to permissioned validator") - errWrongStakedAssetID = errors.New("incorrect staked assetID") + ErrWeightTooSmall = errors.New("weight of this validator is too low") + ErrWeightTooLarge = errors.New("weight of this validator is too large") + ErrInsufficientDelegationFee = errors.New("staker charges an insufficient delegation fee") + ErrStakeTooShort = errors.New("staking period is too short") + ErrStakeTooLong = errors.New("staking period is too long") + ErrFlowCheckFailed = errors.New("flow check failed") + ErrFutureStakeTime = fmt.Errorf("staker is attempting to start staking more than %s ahead of the current chain time", MaxFutureStartTime) + ErrNotValidator = errors.New("isn't a current or pending validator") + ErrRemovePermissionlessValidator = errors.New("attempting to remove permissionless validator") + ErrStakeOverflow = errors.New("validator stake exceeds limit") + ErrPeriodMismatch = errors.New("proposed staking period is not inside dependant staking period") + ErrOverDelegated = errors.New("validator would be over delegated") + ErrIsNotTransformSubnetTx = errors.New("is not a transform subnet tx") + ErrTimestampNotBeforeStartTime = errors.New("chain timestamp not before start time") + ErrAlreadyValidator = errors.New("already a validator") + ErrDuplicateValidator = errors.New("duplicate validator") + ErrDelegateToPermissionedValidator = errors.New("delegation to permissioned validator") + ErrWrongStakedAssetID = errors.New("incorrect staked assetID") + ErrDurangoUpgradeNotActive = errors.New("attempting to use a Durango-upgrade feature prior to activation") + ErrAddValidatorTxPostDurango = errors.New("AddValidatorTx is not permitted post-Durango") + ErrAddDelegatorTxPostDurango = errors.New("AddDelegatorTx is not permitted post-Durango") ) +// verifySubnetValidatorPrimaryNetworkRequirements verifies the primary +// network requirements for [subnetValidator]. An error is returned if they +// are not fulfilled. +func verifySubnetValidatorPrimaryNetworkRequirements( + isDurangoActive bool, + chainState state.Chain, + subnetValidator txs.Validator, +) error { + primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, subnetValidator.NodeID) + if err == database.ErrNotFound { + return fmt.Errorf( + "%s %w of the primary network", + subnetValidator.NodeID, + ErrNotValidator, + ) + } + if err != nil { + return fmt.Errorf( + "failed to fetch the primary network validator for %s: %w", + subnetValidator.NodeID, + err, + ) + } + + // Ensure that the period this validator validates the specified subnet + // is a subset of the time they validate the primary network. + startTime := chainState.GetTimestamp() + if !isDurangoActive { + startTime = subnetValidator.StartTime() + } + if !txs.BoundedBy( + startTime, + subnetValidator.EndTime(), + primaryNetworkValidator.StartTime, + primaryNetworkValidator.EndTime, + ) { + return ErrPeriodMismatch + } + + return nil +} + // verifyAddValidatorTx carries out the validation for an AddValidatorTx. // It returns the tx outputs that should be returned if this validator is not // added to the staking set. @@ -51,35 +97,44 @@ func verifyAddValidatorTx( []*avax.TransferableOutput, error, ) { + currentTimestamp := chainState.GetTimestamp() + if backend.Config.IsDurangoActivated(currentTimestamp) { + return nil, ErrAddValidatorTxPostDurango + } + // Verify the tx is well-formed if err := sTx.SyntacticVerify(backend.Ctx); err != nil { return nil, err } - currentTimestamp := chainState.GetTimestamp() + if err := avax.VerifyMemoFieldLength(tx.Memo, false /*=isDurangoActive*/); err != nil { + return nil, err + } + minValidatorStake, maxValidatorStake, _, minDelegationFee, minStakeDuration, _, maxStakeDuration, minFutureStartTimeOffset, _, minStakeStartTime := GetCurrentInflationSettings(currentTimestamp, backend.Ctx.NetworkID, backend.Config) - duration := tx.Validator.Duration() + startTime := tx.StartTime() + duration := tx.EndTime().Sub(startTime) switch { case tx.Validator.Wght < minValidatorStake: // Ensure validator is staking at least the minimum amount - return nil, errWeightTooSmall + return nil, ErrWeightTooSmall case tx.Validator.Wght > maxValidatorStake: // Ensure validator isn't staking too much - return nil, errWeightTooLarge + return nil, ErrWeightTooLarge case tx.DelegationShares < minDelegationFee: // Ensure the validator fee is at least the minimum amount - return nil, errInsufficientDelegationFee + return nil, ErrInsufficientDelegationFee case duration < minStakeDuration: // Ensure staking length is not too short - return nil, errStakeTooShort + return nil, ErrStakeTooShort case duration > maxStakeDuration: // Ensure staking length is not too long - return nil, errStakeTooLong + return nil, ErrStakeTooLong } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.StakeOuts)) @@ -90,15 +145,8 @@ func verifyAddValidatorTx( return outs, nil } - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - errTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + if err := verifyStakerStartTime(false /*=isDurangoActive*/, currentTimestamp, startTime); err != nil { + return nil, err } if !minStakeStartTime.Before(startTime) { @@ -112,8 +160,9 @@ func verifyAddValidatorTx( _, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) if err == nil { return nil, fmt.Errorf( - "attempted to issue duplicate validation for %s", + "%s is %w of the primary network", tx.Validator.NodeID, + ErrAlreadyValidator, ) } if err != database.ErrNotFound { @@ -135,24 +184,17 @@ func verifyAddValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return nil, errFutureStakeTime - } - minStartTime := maxStartTime.Add(-minFutureStartTimeOffset) - if startTime.Before(minStartTime) { - return nil, fmt.Errorf( - "validator's start time (%s) at or before minStartTime (%s)", - startTime, - minStartTime, - ) + err = verifyMinFutureStartTimeOffset(currentTimestamp, startTime, minFutureStartTimeOffset) + if err != nil { + return nil, err } - return outs, nil + + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(false /*=isDurangoActive*/, currentTimestamp, startTime) } // verifyAddSubnetValidatorTx carries out the validation for an @@ -168,43 +210,50 @@ func verifyAddSubnetValidatorTx( return err } - // Flare does not allow creation of subnets - if constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID) { - return errWrongTxType + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err } - duration := tx.Validator.Duration() + // Flare does not allow creation of subnets before Durango + if !isDurangoActive && (constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID)) { + return ErrWrongTxType + } + + startTime := currentTimestamp + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := tx.EndTime().Sub(startTime) + switch { case duration < backend.Config.MinStakeDuration: // Ensure staking length is not too short - return errStakeTooShort + return ErrStakeTooShort case duration > backend.Config.MaxStakeDuration: // Ensure staking length is not too long - return errStakeTooLong + return ErrStakeTooLong } if !backend.Bootstrapped.Get() { return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return fmt.Errorf( - "%w: %s >= %s", - errTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return err } _, err := GetValidator(chainState, tx.SubnetValidator.Subnet, tx.Validator.NodeID) if err == nil { return fmt.Errorf( - "attempted to issue duplicate subnet validation for %s", + "attempted to issue %w for %s on subnet %s", + ErrDuplicateValidator, tx.Validator.NodeID, + tx.SubnetValidator.Subnet, ) } if err != database.ErrNotFound { @@ -215,19 +264,8 @@ func verifyAddSubnetValidatorTx( ) } - primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) - if err != nil { - return fmt.Errorf( - "failed to fetch the primary network validator for %s: %w", - tx.Validator.NodeID, - err, - ) - } - - // Ensure that the period this validator validates the specified subnet - // is a subset of the time they validate the primary network. - if !tx.Validator.BoundedBy(primaryNetworkValidator.StartTime, primaryNetworkValidator.EndTime) { - return errValidatorSubset + if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { + return err } baseTxCreds, err := verifyPoASubnetAuthorization(backend, chainState, sTx, tx.SubnetValidator.Subnet, tx.SubnetAuth) @@ -246,17 +284,12 @@ func verifyAddSubnetValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, }, ); err != nil { - return fmt.Errorf("%w: %v", errFlowCheckFailed, err) - } - - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return errFutureStakeTime + return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // Returns the representation of [tx.NodeID] validating [tx.Subnet]. @@ -267,7 +300,7 @@ func verifyAddSubnetValidatorTx( // * [sTx]'s creds authorize it to spend the stated inputs. // * [sTx]'s creds authorize it to remove a validator from [tx.Subnet]. // * The flow checker passes. -func removeSubnetValidatorValidation( +func verifyRemoveSubnetValidatorTx( backend *Backend, chainState state.Chain, sTx *txs.Tx, @@ -278,6 +311,14 @@ func removeSubnetValidatorValidation( return nil, false, err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, false, err + } + isCurrentValidator := true vdr, err := chainState.GetCurrentValidator(tx.Subnet, tx.NodeID) if err == database.ErrNotFound { @@ -287,17 +328,16 @@ func removeSubnetValidatorValidation( if err != nil { // It isn't a current or pending validator. return nil, false, fmt.Errorf( - "%s %w of %s: %v", + "%s %w of %s: %w", tx.NodeID, - errNotValidator, + ErrNotValidator, tx.Subnet, err, ) } - if vdr.Priority != txs.SubnetPermissionedValidatorCurrentPriority && - vdr.Priority != txs.SubnetPermissionedValidatorPendingPriority { - return nil, false, errRemovePermissionlessValidator + if !vdr.Priority.IsPermissionedValidator() { + return nil, false, ErrRemovePermissionlessValidator } if !backend.Bootstrapped.Get() { @@ -321,7 +361,7 @@ func removeSubnetValidatorValidation( backend.Ctx.AVAXAssetID: backend.Config.TxFee, }, ); err != nil { - return nil, false, fmt.Errorf("%w: %v", errFlowCheckFailed, err) + return nil, false, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } return vdr, isCurrentValidator, nil @@ -339,27 +379,38 @@ func verifyAddDelegatorTx( []*avax.TransferableOutput, error, ) { + currentTimestamp := chainState.GetTimestamp() + if backend.Config.IsDurangoActivated(currentTimestamp) { + return nil, ErrAddDelegatorTxPostDurango + } + // Verify the tx is well-formed if err := sTx.SyntacticVerify(backend.Ctx); err != nil { return nil, err } - currentTimestamp := chainState.GetTimestamp() - _, maxValidatorStake, minDelegatorStake, _, _, minStakeDuration, maxStakeDuration, minFutureStartTimeOffset, maxValidatorWeightFactor, _ := GetCurrentInflationSettings(currentTimestamp, backend.Ctx.NetworkID, backend.Config) + if err := avax.VerifyMemoFieldLength(tx.Memo, false /*=isDurangoActive*/); err != nil { + return nil, err + } - duration := tx.Validator.Duration() + var ( + endTime = tx.EndTime() + startTime = tx.StartTime() + duration = endTime.Sub(startTime) + ) + _, maxValidatorStake, minDelegatorStake, _, _, minStakeDuration, maxStakeDuration, minFutureStartTimeOffset, maxValidatorWeightFactor, _ := GetCurrentInflationSettings(currentTimestamp, backend.Ctx.NetworkID, backend.Config) switch { case duration < minStakeDuration: // Ensure staking length is not too short - return nil, errStakeTooShort + return nil, ErrStakeTooShort case duration > maxStakeDuration: // Ensure staking length is not too long - return nil, errStakeTooLong + return nil, ErrStakeTooLong case tx.Validator.Wght < minDelegatorStake: // Ensure validator is staking at least the minimum amount - return nil, errWeightTooSmall + return nil, ErrWeightTooSmall } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.StakeOuts)) @@ -370,15 +421,8 @@ func verifyAddDelegatorTx( return outs, nil } - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - errTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + if err := verifyStakerStartTime(false /*=isDurangoActive*/, currentTimestamp, startTime); err != nil { + return nil, err } primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) @@ -390,27 +434,36 @@ func verifyAddDelegatorTx( ) } - maximumWeight, err := math.Mul64(maxValidatorWeightFactor, primaryNetworkValidator.Weight) + maximumWeight, err := safemath.Mul64(maxValidatorWeightFactor, primaryNetworkValidator.Weight) if err != nil { - return nil, errStakeOverflow + return nil, ErrStakeOverflow } if backend.Config.IsApricotPhase3Activated(currentTimestamp) { - maximumWeight = math.Min(maximumWeight, maxValidatorStake) + maximumWeight = min(maximumWeight, maxValidatorStake) } - txID := sTx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { - return nil, err + if !txs.BoundedBy( + startTime, + endTime, + primaryNetworkValidator.StartTime, + primaryNetworkValidator.EndTime, + ) { + return nil, ErrPeriodMismatch } - - canDelegate, err := canDelegate(chainState, primaryNetworkValidator, maximumWeight, newStaker) + overDelegated, err := overDelegated( + chainState, + primaryNetworkValidator, + maximumWeight, + tx.Validator.Wght, + startTime, + endTime, + ) if err != nil { return nil, err } - if !canDelegate { - return nil, errOverDelegated + if overDelegated { + return nil, ErrOverDelegated } // Verify the flowcheck @@ -424,24 +477,17 @@ func verifyAddDelegatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return nil, errFutureStakeTime - } - minStartTime := maxStartTime.Add(-minFutureStartTimeOffset) - if validatorStartTime.Before(minStartTime) { - return nil, fmt.Errorf( - "validator's start time (%s) at or before minStartTime (%s)", - validatorStartTime, - minStartTime, - ) + err = verifyMinFutureStartTimeOffset(currentTimestamp, startTime, minFutureStartTimeOffset) + if err != nil { + return nil, err } - return outs, nil + + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(false /*=isDurangoActive*/, currentTimestamp, startTime) } // verifyAddPermissionlessValidatorTx carries out the validation for an @@ -457,75 +503,72 @@ func verifyAddPermissionlessValidatorTx( return err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + if !backend.Bootstrapped.Get() { return nil } - currentTimestamp := chainState.GetTimestamp() if constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID) { // Flare does not allow permissionless validator tx before Cortina if currentTimestamp.Before(backend.Config.CortinaTime) { - return errWrongTxType + return ErrWrongTxType } - // Flare does not allow creation of subnets - if tx.Subnet != constants.PrimaryNetworkID { - return errWrongTxType + // Flare does not allow creation of subnets before Durango + if !isDurangoActive && tx.Subnet != constants.PrimaryNetworkID { + return ErrWrongTxType } } - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "%w: %s >= %s", - errTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + startTime := currentTimestamp + if !isDurangoActive { + startTime = tx.StartTime() } + duration := tx.EndTime().Sub(startTime) - validatorRules, err := getValidatorRules(currentTimestamp, backend, chainState, tx.Subnet) - if err != nil { + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { return err } - if !validatorRules.minStakeStartTime.Before(startTime) { - return fmt.Errorf( - "validator's start time (%s) at or before minStakeStartTime (%s)", - startTime, - validatorRules.minStakeStartTime, - ) + validatorRules, err := getValidatorRules(currentTimestamp, backend, chainState, tx.Subnet) + if err != nil { + return err } - duration := tx.Validator.Duration() stakedAssetID := tx.StakeOuts[0].AssetID() switch { case tx.Validator.Wght < validatorRules.minValidatorStake: // Ensure validator is staking at least the minimum amount - return errWeightTooSmall + return ErrWeightTooSmall case tx.Validator.Wght > validatorRules.maxValidatorStake: // Ensure validator isn't staking too much - return errWeightTooLarge + return ErrWeightTooLarge case tx.DelegationShares < validatorRules.minDelegationFee: // Ensure the validator fee is at least the minimum amount - return errInsufficientDelegationFee + return ErrInsufficientDelegationFee case duration < validatorRules.minStakeDuration: // Ensure staking length is not too short - return errStakeTooShort + return ErrStakeTooShort case duration > validatorRules.maxStakeDuration: // Ensure staking length is not too long - return errStakeTooLong + return ErrStakeTooLong case stakedAssetID != validatorRules.assetID: // Wrong assetID used return fmt.Errorf( "%w: %s != %s", - errWrongStakedAssetID, + ErrWrongStakedAssetID, validatorRules.assetID, stakedAssetID, ) @@ -535,7 +578,7 @@ func verifyAddPermissionlessValidatorTx( if err == nil { return fmt.Errorf( "%w: %s on %s", - errDuplicateValidator, + ErrDuplicateValidator, tx.Validator.NodeID, tx.Subnet, ) @@ -551,19 +594,8 @@ func verifyAddPermissionlessValidatorTx( var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { - primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) - if err != nil { - return fmt.Errorf( - "failed to fetch the primary network validator for %s: %w", - tx.Validator.NodeID, - err, - ) - } - - // Ensure that the period this validator validates the specified subnet - // is a subset of the time they validate the primary network. - if !tx.Validator.BoundedBy(primaryNetworkValidator.StartTime, primaryNetworkValidator.EndTime) { - return errValidatorSubset + if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { + return err } txFee = backend.Config.AddSubnetValidatorFee @@ -586,55 +618,12 @@ func verifyAddPermissionlessValidatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %v", errFlowCheckFailed, err) - } - - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return errFutureStakeTime + return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - return nil -} -type addValidatorRules struct { - assetID ids.ID - minValidatorStake uint64 - maxValidatorStake uint64 - minStakeDuration time.Duration - maxStakeDuration time.Duration - minDelegationFee uint32 - minStakeStartTime time.Time -} - -func getValidatorRules( - timestamp time.Time, - backend *Backend, - chainState state.Chain, - subnetID ids.ID, -) (*addValidatorRules, error) { - if subnetID == constants.PrimaryNetworkID { - return getCurrentValidatorRules(timestamp, backend), nil - } - - transformSubnetIntf, err := chainState.GetSubnetTransformation(subnetID) - if err != nil { - return nil, err - } - transformSubnet, ok := transformSubnetIntf.Unsigned.(*txs.TransformSubnetTx) - if !ok { - return nil, errIsNotTransformSubnetTx - } - - return &addValidatorRules{ - assetID: transformSubnet.AssetID, - minValidatorStake: transformSubnet.MinValidatorStake, - maxValidatorStake: transformSubnet.MaxValidatorStake, - minStakeDuration: time.Duration(transformSubnet.MinStakeDuration) * time.Second, - maxStakeDuration: time.Duration(transformSubnet.MaxStakeDuration) * time.Second, - minDelegationFee: transformSubnet.MinDelegationFee, - }, nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) } // verifyAddPermissionlessDelegatorTx carries out the validation for an @@ -650,24 +639,34 @@ func verifyAddPermissionlessDelegatorTx( return err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + if !backend.Bootstrapped.Get() { return nil } - currentTimestamp := chainState.GetTimestamp() // Flare does not allow permissionless delegator tx before Cortina if currentTimestamp.Before(backend.Config.CortinaTime) && (constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID)) { - return errWrongTxType + return ErrWrongTxType } - // Ensure the proposed validator starts after the current timestamp - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "chain timestamp (%s) not before validator's start time (%s)", - currentTimestamp, - startTime, - ) + var ( + endTime = tx.EndTime() + startTime = currentTimestamp + ) + if !isDurangoActive { + startTime = tx.StartTime() + } + duration := endTime.Sub(startTime) + + if err := verifyStakerStartTime(isDurangoActive, currentTimestamp, startTime); err != nil { + return err } delegatorRules, err := getDelegatorRules(currentTimestamp, backend, chainState, tx.Subnet) @@ -675,26 +674,25 @@ func verifyAddPermissionlessDelegatorTx( return err } - duration := tx.Validator.Duration() stakedAssetID := tx.StakeOuts[0].AssetID() switch { case tx.Validator.Wght < delegatorRules.minDelegatorStake: // Ensure delegator is staking at least the minimum amount - return errWeightTooSmall + return ErrWeightTooSmall case duration < delegatorRules.minStakeDuration: // Ensure staking length is not too short - return errStakeTooShort + return ErrStakeTooShort case duration > delegatorRules.maxStakeDuration: // Ensure staking length is not too long - return errStakeTooLong + return ErrStakeTooLong case stakedAssetID != delegatorRules.assetID: // Wrong assetID used return fmt.Errorf( "%w: %s != %s", - errWrongStakedAssetID, + ErrWrongStakedAssetID, delegatorRules.assetID, stakedAssetID, ) @@ -710,27 +708,36 @@ func verifyAddPermissionlessDelegatorTx( ) } - maximumWeight, err := math.Mul64( + maximumWeight, err := safemath.Mul64( uint64(delegatorRules.maxValidatorWeightFactor), validator.Weight, ) if err != nil { - maximumWeight = stdmath.MaxUint64 + maximumWeight = math.MaxUint64 } - maximumWeight = math.Min(maximumWeight, delegatorRules.maxValidatorStake) + maximumWeight = min(maximumWeight, delegatorRules.maxValidatorStake) - txID := sTx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { - return err + if !txs.BoundedBy( + startTime, + endTime, + validator.StartTime, + validator.EndTime, + ) { + return ErrPeriodMismatch } - - canDelegate, err := canDelegate(chainState, validator, maximumWeight, newStaker) + overDelegated, err := overDelegated( + chainState, + validator, + maximumWeight, + tx.Validator.Wght, + startTime, + endTime, + ) if err != nil { return err } - if !canDelegate { - return errOverDelegated + if overDelegated { + return ErrOverDelegated } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.StakeOuts)) @@ -745,9 +752,8 @@ func verifyAddPermissionlessDelegatorTx( // AddSubnetValidatorTx. AddSubnetValidatorTx is the only // permissioned validator, so we verify this delegator is // pointing to a permissionless validator. - if validator.Priority == txs.SubnetPermissionedValidatorCurrentPriority || - validator.Priority == txs.SubnetPermissionedValidatorPendingPriority { - return errDelegateToPermissionedValidator + if validator.Priority.IsPermissionedValidator() { + return ErrDelegateToPermissionedValidator } txFee = backend.Config.AddSubnetDelegatorFee @@ -766,53 +772,108 @@ func verifyAddPermissionlessDelegatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %v", errFlowCheckFailed, err) + return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return errFutureStakeTime + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) +} + +// Returns an error if the given tx is invalid. +// The transaction is valid if: +// * [sTx]'s creds authorize it to spend the stated inputs. +// * [sTx]'s creds authorize it to transfer ownership of [tx.Subnet]. +// * The flow checker passes. +func verifyTransferSubnetOwnershipTx( + backend *Backend, + chainState state.Chain, + sTx *txs.Tx, + tx *txs.TransferSubnetOwnershipTx, +) error { + if !backend.Config.IsDurangoActivated(chainState.GetTimestamp()) { + return ErrDurangoUpgradeNotActive + } + + // Verify the tx is well-formed + if err := sTx.SyntacticVerify(backend.Ctx); err != nil { + return err + } + + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + + if !backend.Bootstrapped.Get() { + // Not bootstrapped yet -- don't need to do full verification. + return nil + } + + baseTxCreds, err := verifySubnetAuthorization(backend, chainState, sTx, tx.Subnet, tx.SubnetAuth) + if err != nil { + return err + } + + // Verify the flowcheck + if err := backend.FlowChecker.VerifySpend( + tx, + chainState, + tx.Ins, + tx.Outs, + baseTxCreds, + map[ids.ID]uint64{ + backend.Ctx.AVAXAssetID: backend.Config.TxFee, + }, + ); err != nil { + return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } return nil } -type addDelegatorRules struct { - assetID ids.ID - minDelegatorStake uint64 - maxValidatorStake uint64 - minStakeDuration time.Duration - maxStakeDuration time.Duration - maxValidatorWeightFactor byte -} +// Ensure the proposed validator starts after the current time +func verifyStakerStartTime(isDurangoActive bool, chainTime, stakerTime time.Time) error { + // Pre Durango activation, start time must be after current chain time. + // Post Durango activation, start time is not validated + if isDurangoActive { + return nil + } -func getDelegatorRules( - timestamp time.Time, - backend *Backend, - chainState state.Chain, - subnetID ids.ID, -) (*addDelegatorRules, error) { - if subnetID == constants.PrimaryNetworkID { - return getCurrentDelegatorRules(timestamp, backend), nil + if !chainTime.Before(stakerTime) { + return fmt.Errorf( + "%w: %s >= %s", + ErrTimestampNotBeforeStartTime, + chainTime, + stakerTime, + ) } + return nil +} - transformSubnetIntf, err := chainState.GetSubnetTransformation(subnetID) - if err != nil { - return nil, err +// For legacy addValidator and addDelegator transactions +func verifyMinFutureStartTimeOffset(chainTime, stakerStartTime time.Time, minFutureStartTimeOffset time.Duration) error { + maxStartTime := chainTime.Add(MaxFutureStartTime) + minStartTime := maxStartTime.Add(-minFutureStartTimeOffset) + if stakerStartTime.Before(minStartTime) { + return fmt.Errorf( + "validator's start time (%s) at or before minStartTime (%s)", + stakerStartTime, + minStartTime, + ) } - transformSubnet, ok := transformSubnetIntf.Unsigned.(*txs.TransformSubnetTx) - if !ok { - return nil, errIsNotTransformSubnetTx + return nil +} + +func verifyStakerStartsSoon(isDurangoActive bool, chainTime, stakerStartTime time.Time) error { + if isDurangoActive { + return nil } - return &addDelegatorRules{ - assetID: transformSubnet.AssetID, - minDelegatorStake: transformSubnet.MinDelegatorStake, - maxValidatorStake: transformSubnet.MaxValidatorStake, - minStakeDuration: time.Duration(transformSubnet.MinStakeDuration) * time.Second, - maxStakeDuration: time.Duration(transformSubnet.MaxStakeDuration) * time.Second, - maxValidatorWeightFactor: transformSubnet.MaxValidatorWeightFactor, - }, nil + // Make sure the tx doesn't start too far in the future. This is done last + // to allow the verifier visitor to explicitly check for this error. + maxStartTime := chainTime.Add(MaxFutureStartTime) + if stakerStartTime.After(maxStartTime) { + return ErrFutureStakeTime + } + return nil } diff --git a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_helpers.go b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_helpers.go new file mode 100644 index 00000000..6a9592a3 --- /dev/null +++ b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_helpers.go @@ -0,0 +1,261 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +type addValidatorRules struct { + assetID ids.ID + minValidatorStake uint64 + maxValidatorStake uint64 + minStakeDuration time.Duration + maxStakeDuration time.Duration + minDelegationFee uint32 + minStakeStartTime time.Time + minFutureStartTimeOffset time.Duration // For legacy addValidator transactions +} + +func getValidatorRules( + timestamp time.Time, + backend *Backend, + chainState state.Chain, + subnetID ids.ID, +) (*addValidatorRules, error) { + if subnetID == constants.PrimaryNetworkID { + return getCurrentValidatorRules(timestamp, backend), nil + } + + transformSubnet, err := GetTransformSubnetTx(chainState, subnetID) + if err != nil { + return nil, err + } + + return &addValidatorRules{ + assetID: transformSubnet.AssetID, + minValidatorStake: transformSubnet.MinValidatorStake, + maxValidatorStake: transformSubnet.MaxValidatorStake, + minStakeDuration: time.Duration(transformSubnet.MinStakeDuration) * time.Second, + maxStakeDuration: time.Duration(transformSubnet.MaxStakeDuration) * time.Second, + minDelegationFee: transformSubnet.MinDelegationFee, + }, nil +} + +type addDelegatorRules struct { + assetID ids.ID + minDelegatorStake uint64 + maxValidatorStake uint64 + minStakeDuration time.Duration + maxStakeDuration time.Duration + maxValidatorWeightFactor byte + minFutureStartTimeOffset time.Duration // For legacy addDelegator transactions +} + +func getDelegatorRules( + timestamp time.Time, + backend *Backend, + chainState state.Chain, + subnetID ids.ID, +) (*addDelegatorRules, error) { + if subnetID == constants.PrimaryNetworkID { + return getCurrentDelegatorRules(timestamp, backend), nil + } + + transformSubnet, err := GetTransformSubnetTx(chainState, subnetID) + if err != nil { + return nil, err + } + + return &addDelegatorRules{ + assetID: transformSubnet.AssetID, + minDelegatorStake: transformSubnet.MinDelegatorStake, + maxValidatorStake: transformSubnet.MaxValidatorStake, + minStakeDuration: time.Duration(transformSubnet.MinStakeDuration) * time.Second, + maxStakeDuration: time.Duration(transformSubnet.MaxStakeDuration) * time.Second, + maxValidatorWeightFactor: transformSubnet.MaxValidatorWeightFactor, + }, nil +} + +// GetNextStakerChangeTime returns the next time a staker will be either added +// or removed to/from the current validator set. +func GetNextStakerChangeTime(state state.Chain) (time.Time, error) { + currentStakerIterator, err := state.GetCurrentStakerIterator() + if err != nil { + return time.Time{}, err + } + defer currentStakerIterator.Release() + + pendingStakerIterator, err := state.GetPendingStakerIterator() + if err != nil { + return time.Time{}, err + } + defer pendingStakerIterator.Release() + + hasCurrentStaker := currentStakerIterator.Next() + hasPendingStaker := pendingStakerIterator.Next() + switch { + case hasCurrentStaker && hasPendingStaker: + nextCurrentTime := currentStakerIterator.Value().NextTime + nextPendingTime := pendingStakerIterator.Value().NextTime + if nextCurrentTime.Before(nextPendingTime) { + return nextCurrentTime, nil + } + return nextPendingTime, nil + case hasCurrentStaker: + return currentStakerIterator.Value().NextTime, nil + case hasPendingStaker: + return pendingStakerIterator.Value().NextTime, nil + default: + // Due to no initial stakers in genesis for Songbird networks + if state.GetNetworkID() == constants.SongbirdID || state.GetNetworkID() == constants.CostonID || state.GetNetworkID() == constants.LocalID { + return songbirdLatestStakingTime, nil + } + return time.Time{}, database.ErrNotFound + } +} + +// GetValidator returns information about the given validator, which may be a +// current validator or pending validator. +func GetValidator(state state.Chain, subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { + validator, err := state.GetCurrentValidator(subnetID, nodeID) + if err == nil { + // This node is currently validating the subnet. + return validator, nil + } + if err != database.ErrNotFound { + // Unexpected error occurred. + return nil, err + } + return state.GetPendingValidator(subnetID, nodeID) +} + +// overDelegated returns true if [validator] will be overdelegated when adding [delegator]. +// +// A [validator] would become overdelegated if: +// - the maximum total weight on [validator] exceeds [weightLimit] +func overDelegated( + state state.Chain, + validator *state.Staker, + weightLimit uint64, + delegatorWeight uint64, + delegatorStartTime time.Time, + delegatorEndTime time.Time, +) (bool, error) { + maxWeight, err := GetMaxWeight(state, validator, delegatorStartTime, delegatorEndTime) + if err != nil { + return true, err + } + newMaxWeight, err := math.Add64(maxWeight, delegatorWeight) + if err != nil { + return true, err + } + return newMaxWeight > weightLimit, nil +} + +// GetMaxWeight returns the maximum total weight of the [validator], including +// its own weight, between [startTime] and [endTime]. +// The weight changes are applied in the order they will be applied as chain +// time advances. +// Invariant: +// - [validator.StartTime] <= [startTime] < [endTime] <= [validator.EndTime] +func GetMaxWeight( + chainState state.Chain, + validator *state.Staker, + startTime time.Time, + endTime time.Time, +) (uint64, error) { + currentDelegatorIterator, err := chainState.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + return 0, err + } + + // TODO: We can optimize this by moving the current total weight to be + // stored in the validator state. + // + // Calculate the current total weight on this validator, including the + // weight of the actual validator and the sum of the weights of all of the + // currently active delegators. + currentWeight := validator.Weight + for currentDelegatorIterator.Next() { + currentDelegator := currentDelegatorIterator.Value() + + currentWeight, err = math.Add64(currentWeight, currentDelegator.Weight) + if err != nil { + currentDelegatorIterator.Release() + return 0, err + } + } + currentDelegatorIterator.Release() + + currentDelegatorIterator, err = chainState.GetCurrentDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + return 0, err + } + pendingDelegatorIterator, err := chainState.GetPendingDelegatorIterator(validator.SubnetID, validator.NodeID) + if err != nil { + currentDelegatorIterator.Release() + return 0, err + } + delegatorChangesIterator := state.NewStakerDiffIterator(currentDelegatorIterator, pendingDelegatorIterator) + defer delegatorChangesIterator.Release() + + // Iterate over the future stake weight changes and calculate the maximum + // total weight on the validator, only including the points in the time + // range [startTime, endTime]. + var currentMax uint64 + for delegatorChangesIterator.Next() { + delegator, isAdded := delegatorChangesIterator.Value() + // [delegator.NextTime] > [endTime] + if delegator.NextTime.After(endTime) { + // This delegation change (and all following changes) occurs after + // [endTime]. Since we're calculating the max amount staked in + // [startTime, endTime], we can stop. + break + } + + // [delegator.NextTime] >= [startTime] + if !delegator.NextTime.Before(startTime) { + // We have advanced time to be at the inside of the delegation + // window. Make sure that the max weight is updated accordingly. + currentMax = max(currentMax, currentWeight) + } + + var op func(uint64, uint64) (uint64, error) + if isAdded { + op = math.Add64 + } else { + op = math.Sub[uint64] + } + currentWeight, err = op(currentWeight, delegator.Weight) + if err != nil { + return 0, err + } + } + // Because we assume [startTime] < [endTime], we have advanced time to + // be at the end of the delegation window. Make sure that the max weight is + // updated accordingly. + return max(currentMax, currentWeight), nil +} + +func GetTransformSubnetTx(chain state.Chain, subnetID ids.ID) (*txs.TransformSubnetTx, error) { + transformSubnetIntf, err := chain.GetSubnetTransformation(subnetID) + if err != nil { + return nil, err + } + + transformSubnet, ok := transformSubnetIntf.Unsigned.(*txs.TransformSubnetTx) + if !ok { + return nil, ErrIsNotTransformSubnetTx + } + + return transformSubnet, nil +} diff --git a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go index 4c9d32af..3d963d8d 100644 --- a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -7,13 +7,13 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -27,6 +27,8 @@ import ( ) func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { + ctx := snowtest.Context(t, snowtest.PChainID) + type test struct { name string backendF func(*gomock.Controller) *Backend @@ -37,6 +39,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { } var ( + // in the following tests we set the fork time for forks we want active + // to activeForkTime, which is ensured to be before any other time related + // quantity (based on now) + activeForkTime = time.Unix(0, 0) + now = time.Now().Truncate(time.Second) // after activeForkTime + subnetID = ids.GenerateTestID() customAssetID = ids.GenerateTestID() unsignedTransformTx = &txs.TransformSubnetTx{ @@ -52,21 +60,24 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { Creds: []verify.Verifiable{}, } // This tx already passed syntactic verification. + startTime = now.Add(time.Second) + endTime = startTime.Add(time.Second * time.Duration(unsignedTransformTx.MinStakeDuration)) verifiedTx = txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{ SyntacticallyVerified: true, BaseTx: avax.BaseTx{ - NetworkID: 1, - BlockchainID: ids.GenerateTestID(), + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: []*avax.TransferableOutput{}, Ins: []*avax.TransferableInput{}, }, }, Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), - Start: 1, - End: 1 + uint64(unsignedTransformTx.MinStakeDuration), - Wght: unsignedTransformTx.MinValidatorStake, + // Note: [Start] is not set here as it will be ignored + // Post-Durango in favor of the current chain time + End: uint64(endTime.Unix()), + Wght: unsignedTransformTx.MinValidatorStake, }, Subnet: subnetID, StakeOuts: []*avax.TransferableOutput{ @@ -98,7 +109,10 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "fail syntactic verification", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, } }, stateF: func(*gomock.Controller) state.Chain { @@ -116,18 +130,23 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "not bootstrapped", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: &utils.Atomic[bool]{}, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - return nil + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after Durango fork activation since now.After(activeForkTime) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx }, txF: func() *txs.AddPermissionlessValidatorTx { - return nil + return &txs.AddPermissionlessValidatorTx{} }, expectedErr: nil, }, @@ -137,7 +156,11 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + CortinaTime: activeForkTime, + DurangoTime: mockable.MaxTime, + }, Bootstrapped: bootstrapped, } }, @@ -152,7 +175,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, - expectedErr: errTimestampNotBeforeStartTime, + expectedErr: ErrTimestampNotBeforeStartTime, }, { name: "weight too low", @@ -160,13 +183,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -178,7 +204,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx.Validator.Wght = unsignedTransformTx.MinValidatorStake - 1 return &tx }, - expectedErr: errWeightTooSmall, + expectedErr: ErrWeightTooSmall, }, { name: "weight too high", @@ -186,13 +212,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -204,7 +233,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake + 1 return &tx }, - expectedErr: errWeightTooLarge, + expectedErr: ErrWeightTooLarge, }, { name: "insufficient delegation fee", @@ -212,13 +241,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -231,7 +263,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx.DelegationShares = unsignedTransformTx.MinDelegationFee - 1 return &tx }, - expectedErr: errInsufficientDelegationFee, + expectedErr: ErrInsufficientDelegationFee, }, { name: "duration too short", @@ -239,13 +271,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -256,12 +291,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake tx.DelegationShares = unsignedTransformTx.MinDelegationFee + // Note the duration is 1 less than the minimum - tx.Validator.Start = 1 - tx.Validator.End = uint64(unsignedTransformTx.MinStakeDuration) + tx.Validator.End = tx.Validator.Start + uint64(unsignedTransformTx.MinStakeDuration) - 1 return &tx }, - expectedErr: errStakeTooShort, + expectedErr: ErrStakeTooShort, }, { name: "duration too long", @@ -269,13 +304,16 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + state.EXPECT().GetTimestamp().Return(time.Unix(1, 0)) // chain time is after fork activation since time.Unix(1, 0).After(activeForkTime) state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, @@ -286,12 +324,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake tx.DelegationShares = unsignedTransformTx.MinDelegationFee + // Note the duration is more than the maximum - tx.Validator.Start = 1 - tx.Validator.End = 2 + uint64(unsignedTransformTx.MaxStakeDuration) + tx.Validator.End = uint64(unsignedTransformTx.MaxStakeDuration) + 2 return &tx }, - expectedErr: errStakeTooLong, + expectedErr: ErrStakeTooLong, }, { name: "wrong assetID", @@ -299,15 +337,18 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) - state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) - return state + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) + mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx @@ -323,7 +364,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { } return &tx }, - expectedErr: errWrongStakedAssetID, + expectedErr: ErrWrongStakedAssetID, }, { name: "duplicate validator", @@ -331,17 +372,20 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { - state := state.NewMockChain(ctrl) - state.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) - state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after latest fork activation since now.After(activeForkTime) + mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) // State says validator exists - state.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, nil) - return state + mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, nil) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx @@ -349,7 +393,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, - expectedErr: errDuplicateValidator, + expectedErr: ErrDuplicateValidator, }, { name: "validator not subset of primary network validator", @@ -357,20 +401,22 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: snow.DefaultContextTest(), + Ctx: ctx, + Config: &config.Config{ + DurangoTime: activeForkTime, // activate latest fork + }, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after latest fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) // Validator time isn't subset of primary network validator time primaryNetworkVdr := &state.Staker{ - StartTime: verifiedTx.StartTime().Add(time.Second), - EndTime: verifiedTx.EndTime(), + EndTime: verifiedTx.EndTime().Add(-1 * time.Second), } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState @@ -381,7 +427,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, - expectedErr: errValidatorSubset, + expectedErr: ErrPeriodMismatch, }, { name: "flow check fails", @@ -397,26 +443,26 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(errFlowCheckFailed) + ).Return(ErrFlowCheckFailed) return &Backend{ FlowChecker: flowChecker, Config: &config.Config{ AddSubnetValidatorFee: 1, + DurangoTime: activeForkTime, // activate latest fork, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after latest fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) primaryNetworkVdr := &state.Staker{ - StartTime: verifiedTx.StartTime(), - EndTime: verifiedTx.EndTime(), + EndTime: mockable.MaxTime, } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState @@ -427,7 +473,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, - expectedErr: errFlowCheckFailed, + expectedErr: ErrFlowCheckFailed, }, { name: "starts too far in the future", @@ -448,15 +494,17 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { return &Backend{ FlowChecker: flowChecker, Config: &config.Config{ + CortinaTime: activeForkTime, + DurangoTime: mockable.MaxTime, AddSubnetValidatorFee: 1, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is Cortina fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) @@ -473,11 +521,11 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { txF: func() *txs.AddPermissionlessValidatorTx { // Note this copies [verifiedTx] tx := verifiedTx - tx.Validator.Start = uint64(MaxFutureStartTime.Seconds()) + 1 + tx.Validator.Start = uint64(now.Add(MaxFutureStartTime).Add(time.Second).Unix()) tx.Validator.End = tx.Validator.Start + uint64(unsignedTransformTx.MinStakeDuration) return &tx }, - expectedErr: errFutureStakeTime, + expectedErr: ErrFutureStakeTime, }, { name: "success", @@ -499,20 +547,20 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { FlowChecker: flowChecker, Config: &config.Config{ AddSubnetValidatorFee: 1, + DurangoTime: activeForkTime, // activate latest fork, }, - Ctx: snow.DefaultContextTest(), + Ctx: ctx, Bootstrapped: bootstrapped, } }, stateF: func(ctrl *gomock.Controller) state.Chain { mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(time.Unix(0, 0)) + mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is after Durango fork activation since now.After(activeForkTime) mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) primaryNetworkVdr := &state.Staker{ - StartTime: time.Unix(0, 0), - EndTime: mockable.MaxTime, + EndTime: mockable.MaxTime, } mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState @@ -530,7 +578,6 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() var ( backend = tt.backendF(ctrl) @@ -582,13 +629,14 @@ func TestGetValidatorRules(t *testing.T) { return nil }, expectedRules: &addValidatorRules{ - assetID: avaxAssetID, - minValidatorStake: config.MinValidatorStake, - maxValidatorStake: config.MaxValidatorStake, - minStakeDuration: config.MinStakeDuration, - maxStakeDuration: config.MaxStakeDuration, - minDelegationFee: config.MinDelegationFee, - minStakeStartTime: time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC), + assetID: avaxAssetID, + minValidatorStake: config.MinValidatorStake, + maxValidatorStake: config.MaxValidatorStake, + minStakeDuration: config.MinStakeDuration, + maxStakeDuration: config.MaxStakeDuration, + minDelegationFee: config.MinDelegationFee, + minStakeStartTime: time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC), + minFutureStartTimeOffset: MaxFutureStartTime, }, }, { @@ -616,7 +664,7 @@ func TestGetValidatorRules(t *testing.T) { return state }, expectedRules: &addValidatorRules{}, - expectedErr: errIsNotTransformSubnetTx, + expectedErr: ErrIsNotTransformSubnetTx, }, { name: "subnet", @@ -641,8 +689,8 @@ func TestGetValidatorRules(t *testing.T) { assetID: customAssetID, minValidatorStake: config.MinValidatorStake, maxValidatorStake: config.MaxValidatorStake, - minStakeDuration: time.Duration(1337) * time.Second, - maxStakeDuration: time.Duration(42) * time.Second, + minStakeDuration: 1337 * time.Second, + maxStakeDuration: 42 * time.Second, minDelegationFee: config.MinDelegationFee, }, expectedErr: nil, @@ -653,7 +701,6 @@ func TestGetValidatorRules(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() chainState := tt.chainStateF(ctrl) rules, err := getValidatorRules(time.Time{}, tt.backend, chainState, tt.subnetID) @@ -707,6 +754,7 @@ func TestGetDelegatorRules(t *testing.T) { minStakeDuration: config.MinStakeDuration, maxStakeDuration: config.MaxStakeDuration, maxValidatorWeightFactor: MaxValidatorWeightFactor, + minFutureStartTimeOffset: MaxFutureStartTime, }, }, { @@ -734,7 +782,7 @@ func TestGetDelegatorRules(t *testing.T) { return state }, expectedRules: &addDelegatorRules{}, - expectedErr: errIsNotTransformSubnetTx, + expectedErr: ErrIsNotTransformSubnetTx, }, { name: "subnet", @@ -761,8 +809,8 @@ func TestGetDelegatorRules(t *testing.T) { assetID: customAssetID, minDelegatorStake: config.MinDelegatorStake, maxValidatorStake: config.MaxValidatorStake, - minStakeDuration: time.Duration(1337) * time.Second, - maxStakeDuration: time.Duration(42) * time.Second, + minStakeDuration: 1337 * time.Second, + maxStakeDuration: 42 * time.Second, maxValidatorWeightFactor: 21, }, expectedErr: nil, @@ -772,7 +820,6 @@ func TestGetDelegatorRules(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() chainState := tt.chainStateF(ctrl) rules, err := getDelegatorRules(time.Time{}, tt.backend, chainState, tt.subnetID) diff --git a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go index b498b4bb..aa3ea9a2 100644 --- a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,8 +9,11 @@ import ( "fmt" "time" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -21,8 +24,9 @@ import ( var ( _ txs.Visitor = (*StandardTxExecutor)(nil) - errEmptyNodeID = errors.New("validator nodeID cannot be empty") - errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") + errEmptyNodeID = errors.New("validator nodeID cannot be empty") + errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") + errMissingStartTimePreDurango = errors.New("staker transactions must have a StartTime pre-Durango") ) type StandardTxExecutor struct { @@ -38,11 +42,11 @@ type StandardTxExecutor struct { } func (*StandardTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errWrongTxType + return ErrWrongTxType } func (*StandardTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errWrongTxType + return ErrWrongTxType } func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { @@ -50,14 +54,21 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + baseTxCreds, err := verifyPoASubnetAuthorization(e.Backend, e.State, e.Tx, tx.SubnetID, tx.SubnetAuth) if err != nil { return err } // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(timestamp) + createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -94,9 +105,16 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createSubnetTxFee := e.Config.GetCreateSubnetTxFee(timestamp) + createSubnetTxFee := e.Config.GetCreateSubnetTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -118,6 +136,7 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { avax.Produce(e.State, txID, tx.Outs) // Add the new subnet to the database e.State.AddSubnet(e.Tx) + e.State.SetSubnetOwner(txID, tx.Owner) return nil } @@ -126,6 +145,14 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + e.Inputs = set.NewSet[ids.ID](len(tx.ImportedInputs)) utxoIDs := make([][]byte, len(tx.ImportedInputs)) for i, in := range tx.ImportedInputs { @@ -135,7 +162,9 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { utxoIDs[i] = utxoID[:] } - if e.Bootstrapped.Get() { + // Skip verification of the shared memory inputs if the other primary + // network chains are not guaranteed to be up-to-date. + if e.Bootstrapped.Get() && !e.Config.PartialSyncPrimaryNetwork { if err := verify.SameSubnet(context.TODO(), e.Ctx, tx.SourceChain); err != nil { return err } @@ -186,6 +215,9 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) + // Note: We apply atomic requests even if we are not verifying atomic + // requests to ensure the shared state will be correct if we later start + // verifying the requests. e.AtomicRequests = map[ids.ID]*atomic.Requests{ tx.SourceChain: { RemoveRequests: utxoIDs, @@ -199,6 +231,14 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.ExportedOutputs)) copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.ExportedOutputs) @@ -230,6 +270,9 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) + // Note: We apply atomic requests even if we are not verifying atomic + // requests to ensure the shared state will be correct if we later start + // verifying the requests. elems := make([]*atomic.Element, len(tx.ExportedOutputs)) for i, out := range tx.ExportedOutputs { utxo := &avax.UTXO{ @@ -241,7 +284,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { Out: out.Out, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) if err != nil { return fmt.Errorf("failed to marshal UTXO: %w", err) } @@ -278,16 +321,22 @@ func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) + if e.Config.PartialSyncPrimaryNetwork && tx.Validator.NodeID == e.Ctx.NodeID { + e.Ctx.Log.Warn("verified transaction that would cause this node to become unhealthy", + zap.String("reason", "primary network is not being fully synced"), + zap.Stringer("txID", txID), + zap.String("txType", "addValidator"), + zap.Stringer("nodeID", tx.Validator.NodeID), + ) + } return nil } @@ -301,16 +350,13 @@ func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -324,26 +370,23 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingDelegator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } // Verifies a [*txs.RemoveSubnetValidatorTx] and, if it passes, executes it on -// [e.State]. For verification rules, see [removeSubnetValidatorValidation]. -// This transaction will result in [tx.NodeID] being removed as a validator of +// [e.State]. For verification rules, see [verifyRemoveSubnetValidatorTx]. This +// transaction will result in [tx.NodeID] being removed as a validator of // [tx.SubnetID]. // Note: [tx.NodeID] may be either a current or pending validator. func (e *StandardTxExecutor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { - staker, isCurrentValidator, err := removeSubnetValidatorValidation( + staker, isCurrentValidator, err := verifyRemoveSubnetValidatorTx( e.Backend, e.State, e.Tx, @@ -373,6 +416,14 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Note: math.MaxInt32 * time.Second < math.MaxInt64 - so this can never // overflow. if time.Duration(tx.MaxStakeDuration)*time.Second > e.Backend.Config.MaxStakeDuration { @@ -424,16 +475,25 @@ func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionl return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) + if e.Config.PartialSyncPrimaryNetwork && + tx.Subnet == constants.PrimaryNetworkID && + tx.Validator.NodeID == e.Ctx.NodeID { + e.Ctx.Log.Warn("verified transaction that would cause this node to become unhealthy", + zap.String("reason", "primary network is not being fully synced"), + zap.Stringer("txID", txID), + zap.String("txType", "addPermissionlessValidator"), + zap.Stringer("nodeID", tx.Validator.NodeID), + ) + } + return nil } @@ -447,15 +507,139 @@ func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionl return err } + if err := e.putStaker(tx); err != nil { + return err + } + txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) + return nil +} + +// Verifies a [*txs.TransferSubnetOwnershipTx] and, if it passes, executes it on +// [e.State]. For verification rules, see [verifyTransferSubnetOwnershipTx]. +// This transaction will result in the ownership of [tx.Subnet] being transferred +// to [tx.Owner]. +func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { + err := verifyTransferSubnetOwnershipTx( + e.Backend, + e.State, + e.Tx, + tx, + ) if err != nil { return err } - e.State.PutPendingDelegator(newStaker) + e.State.SetSubnetOwner(tx.Subnet, tx.Owner) + + txID := e.Tx.ID() + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) + return nil +} + +func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { + if !e.Backend.Config.IsDurangoActivated(e.State.GetTimestamp()) { + return ErrDurangoUpgradeNotActive + } + + // Verify the tx is well-formed + if err := e.Tx.SyntacticVerify(e.Ctx); err != nil { + return err + } + + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + + // Verify the flowcheck + if err := e.FlowChecker.VerifySpend( + tx, + e.State, + tx.Ins, + tx.Outs, + e.Tx.Creds, + map[ids.ID]uint64{ + e.Ctx.AVAXAssetID: e.Config.TxFee, + }, + ); err != nil { + return err + } + + txID := e.Tx.ID() + // Consume the UTXOS avax.Consume(e.State, tx.Ins) + // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) + return nil +} + +// Creates the staker as defined in [stakerTx] and adds it to [e.State]. +func (e *StandardTxExecutor) putStaker(stakerTx txs.Staker) error { + var ( + chainTime = e.State.GetTimestamp() + txID = e.Tx.ID() + staker *state.Staker + err error + ) + + if !e.Config.IsDurangoActivated(chainTime) { + // Pre-Durango, stakers set a future [StartTime] and are added to the + // pending staker set. They are promoted to the current staker set once + // the chain time reaches [StartTime]. + scheduledStakerTx, ok := stakerTx.(txs.ScheduledStaker) + if !ok { + return fmt.Errorf("%w: %T", errMissingStartTimePreDurango, stakerTx) + } + staker, err = state.NewPendingStaker(txID, scheduledStakerTx) + } else { + // Only calculate the potentialReward for permissionless stakers. + // Recall that we only need to check if this is a permissioned + // validator as there are no permissioned delegators + var potentialReward uint64 + if !stakerTx.CurrentPriority().IsPermissionedValidator() { + subnetID := stakerTx.SubnetID() + currentSupply, err := e.State.GetCurrentSupply(subnetID) + if err != nil { + return err + } + + rewards, err := GetRewardsCalculator(e.Backend, e.State, subnetID) + if err != nil { + return err + } + + // Post-Durango, stakers are immediately added to the current staker + // set. Their [StartTime] is the current chain time. + stakeDuration := stakerTx.EndTime().Sub(chainTime) + potentialReward = rewards.Calculate( + stakeDuration, + stakerTx.Weight(), + currentSupply, + ) + + e.State.SetCurrentSupply(subnetID, currentSupply+potentialReward) + } + staker, err = state.NewCurrentStaker(txID, stakerTx, chainTime, potentialReward) + } + if err != nil { + return err + } + + switch priority := staker.Priority; { + case priority.IsCurrentValidator(): + e.State.PutCurrentValidator(staker) + case priority.IsCurrentDelegator(): + e.State.PutCurrentDelegator(staker) + case priority.IsPendingValidator(): + e.State.PutPendingValidator(staker) + case priority.IsPendingDelegator(): + e.State.PutPendingDelegator(staker) + default: + return fmt.Errorf("staker %s, unexpected priority %d", staker.TxID, priority) + } return nil } diff --git a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go index 5a2581e4..69ad018c 100644 --- a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,22 +10,24 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -41,14 +43,12 @@ var errTest = errors.New("non-nil error") func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() chainTime := env.state.GetTimestamp() - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tests := []struct { banffTime time.Time @@ -80,6 +80,7 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -99,30 +100,33 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { func TestStandardTxExecutorAddDelegator(t *testing.T) { dummyHeight := uint64(1) rewardAddress := preFundedKeys[0].PublicKey().Address() - nodeID := ids.NodeID(rewardAddress) + nodeID := genesisNodeIDs[0] newValidatorID := ids.GenerateTestNodeID() - newValidatorStartTime := uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()) - newValidatorEndTime := uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()) + newValidatorStartTime := defaultValidateStartTime.Add(5 * time.Second) + newValidatorEndTime := defaultValidateEndTime.Add(-5 * time.Second) // [addMinStakeValidator] adds a new validator to the primary network's // pending validator set with the minimum staking amount addMinStakeValidator := func(target *environment) { tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MinValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shares + target.config.MinValidatorStake, // stake amount + uint64(newValidatorStartTime.Unix()), // start time + uint64(newValidatorEndTime.Unix()), // end time + newValidatorID, // node ID + rewardAddress, // Reward Address + reward.PercentDenominator, // Shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + newValidatorStartTime, 0, ) require.NoError(t, err) @@ -130,28 +134,30 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's // pending validator set with the maximum staking amount addMaxStakeValidator := func(target *environment) { tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MaxValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shared + target.config.MaxValidatorStake, // stake amount + uint64(newValidatorStartTime.Unix()), // start time + uint64(newValidatorEndTime.Unix()), // end time + newValidatorID, // node ID + rewardAddress, // Reward Address + reward.PercentDenominator, // Shared []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(t, err) + addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( tx.ID(), - tx.Unsigned.(*txs.AddValidatorTx), + addValTx, + newValidatorStartTime, 0, ) require.NoError(t, err) @@ -159,127 +165,115 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, apricotPhase5) currentTimestamp := dummyH.state.GetTimestamp() type test struct { - stakeAmount uint64 - startTime uint64 - endTime uint64 - nodeID ids.NodeID - rewardAddress ids.ShortID - feeKeys []*secp256k1.PrivateKey - setup func(*environment) - AP3Time time.Time - shouldErr bool - description string + description string + stakeAmount uint64 + startTime time.Time + endTime time.Time + nodeID ids.NodeID + rewardAddress ids.ShortID + feeKeys []*secp256k1.PrivateKey + setup func(*environment) + AP3Time time.Time + expectedExecutionErr error } tests := []test{ { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()), - endTime: uint64(defaultValidateEndTime.Unix()) + 1, - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator stops validating primary network earlier than subnet", - }, - { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(currentTimestamp.Add(MaxFutureStartTime + time.Second).Unix()), - endTime: uint64(currentTimestamp.Add(MaxFutureStartTime * 2).Unix()), - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: fmt.Sprintf("validator should not be added more than (%s) in the future", MaxFutureStartTime), + description: "validator stops validating earlier than delegator", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: defaultValidateStartTime.Add(time.Second), + endTime: defaultValidateEndTime.Add(time.Second), + nodeID: nodeID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrPeriodMismatch, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()), - endTime: uint64(defaultValidateEndTime.Unix()) + 1, - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "end time is after the primary network end time", + description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: currentTimestamp.Add(MaxFutureStartTime + time.Second), + endTime: currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second), + nodeID: nodeID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrFutureStakeTime, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()), - endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator not in the current or pending validator sets of the subnet", + description: "validator not in the current or pending validator sets", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: defaultValidateStartTime.Add(5 * time.Second), + endTime: defaultValidateEndTime.Add(-5 * time.Second), + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedExecutionErr: database.ErrNotFound, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime - 1, // start validating subnet before primary network - endTime: newValidatorEndTime, - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator starts validating subnet before primary network", + description: "delegator starts before validator", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: newValidatorStartTime.Add(-1 * time.Second), // start validating subnet before primary network + endTime: newValidatorEndTime, + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrPeriodMismatch, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, - endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "validator stops validating primary network before subnet", + description: "delegator stops before validator", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: newValidatorStartTime, + endTime: newValidatorEndTime.Add(time.Second), // stop validating subnet after stopping validating primary network + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrPeriodMismatch, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - shouldErr: false, - description: "valid", + description: "valid", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedExecutionErr: nil, }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(currentTimestamp.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer - setup: nil, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "starts validating at current timestamp", + description: "starts delegating at current timestamp", + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: currentTimestamp, // start time + endTime: defaultValidateEndTime, // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer + setup: nil, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrTimestampNotBeforeStartTime, }, { + description: "tx fee paying key has no funds", stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time + startTime: defaultValidateStartTime.Add(time.Second), // start time + endTime: defaultValidateEndTime, // end time nodeID: nodeID, // node ID rewardAddress: rewardAddress, // Reward Address feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer @@ -294,56 +288,52 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) }, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "tx fee paying key has no funds", + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrFlowCheckFailed, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMaxStakeValidator, - AP3Time: defaultValidateEndTime, - shouldErr: false, - description: "over delegation before AP3", + description: "over delegation before AP3", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMaxStakeValidator, + AP3Time: defaultValidateEndTime, + expectedExecutionErr: nil, }, { - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMaxStakeValidator, - AP3Time: defaultGenesisTime, - shouldErr: true, - description: "over delegation after AP3", + description: "over delegation after AP3", + stakeAmount: dummyH.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + rewardAddress: rewardAddress, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMaxStakeValidator, + AP3Time: defaultGenesisTime, + expectedExecutionErr: ErrOverDelegated, }, } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, apricotPhase5) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, - tt.startTime, - tt.endTime, + uint64(tt.startTime.Unix()), + uint64(tt.endTime.Unix()), tt.nodeID, tt.rewardAddress, tt.feeKeys, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -362,51 +352,33 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if tt.shouldErr { - require.Error(err) - } else { - require.NoError(err) - } - - mempoolExecutor := MempoolTxVerifier{ - Backend: &freshTH.backend, - ParentID: lastAcceptedID, - StateVersions: freshTH, - Tx: tx, - } - err = tx.Unsigned.Visit(&mempoolExecutor) - if tt.shouldErr { - require.Error(err) - } else { - require.NoError(err) - } + require.ErrorIs(err, tt.expectedExecutionErr) }) } } -func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { +func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, apricotPhase5) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() - nodeID := preFundedKeys[0].PublicKey().Address() - env.config.BanffTime = env.state.GetTimestamp() + nodeID := genesisNodeIDs[0] { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) + startTime := defaultValidateStartTime.Add(time.Second) tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(defaultValidateStartTime.Unix()), + uint64(startTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -419,7 +391,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator stops validating primary network earlier than subnet") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -431,10 +403,11 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, uint64(defaultValidateStartTime.Unix()+1), uint64(defaultValidateEndTime.Unix()), - ids.NodeID(nodeID), + nodeID, testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -446,17 +419,12 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { State: onAcceptState, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Add a validator to pending validator set of primary network - key, err := testKeyfactory.NewPrivateKey() - require.NoError(err) - - pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) - - // starts validating primary network 10 seconds after genesis + // Starts validating primary network 10 seconds after genesis + pendingDSValidatorID := ids.GenerateTestNodeID() dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) @@ -465,10 +433,11 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(dsStartTime.Unix()), // start time uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID - nodeID, // reward address + ids.GenerateTestShortID(), // reward address reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -482,6 +451,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -494,12 +464,14 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator not in the current or pending validator sets of the primary network") + require.ErrorIs(err, ErrNotValidator) } + addValTx := addDSTx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( addDSTx.ID(), - addDSTx.Unsigned.(*txs.AddValidatorTx), + addValTx, + dsStartTime, 0, ) require.NoError(err) @@ -508,8 +480,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -524,6 +495,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -536,7 +508,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator starts validating primary network before starting to validate primary network") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -550,6 +522,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -562,7 +535,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator stops validating primary network after stops validating primary network") + require.ErrorIs(err, ErrPeriodMismatch) } { @@ -576,6 +549,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -586,8 +560,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { State: onAcceptState, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Case: Proposed validator start validating at/before current timestamp @@ -600,10 +573,11 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -616,7 +590,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because starts validating at current timestamp") + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } // reset the timestamp @@ -628,16 +602,19 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { defaultWeight, // weight uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID + nodeID, // node ID testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + defaultValidateStartTime, 0, ) require.NoError(err) @@ -645,19 +622,20 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID + startTime := defaultValidateStartTime.Add(time.Second) duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(startTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -670,27 +648,35 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: duplicateSubnetTx, } err = duplicateSubnetTx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because validator already validating the specified subnet") + require.ErrorIs(err, ErrDuplicateValidator) } env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { - // Case: Too many signatures + // Case: Duplicate signatures + startTime := defaultValidateStartTime.Add(time.Second) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(startTime.Unix()), // start time + uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) + // Duplicate a signature + addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) + input := addSubnetValidatorTx.SubnetAuth.(*secp256k1fx.Input) + input.SigIndices = append(input.SigIndices, input.SigIndices[0]) + // This tx was syntactically verified when it was created...pretend it wasn't so we don't use cache + addSubnetValidatorTx.SyntacticallyVerified = false + onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -700,19 +686,21 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because tx has 3 signatures but only 2 needed") + require.ErrorIs(err, secp256k1fx.ErrInputIndicesNotSortedUnique) } { // Case: Too few signatures + startTime := defaultValidateStartTime.Add(time.Second) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(startTime.Unix()), // start time + uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -732,26 +720,28 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because not enough control sigs") + require.ErrorIs(err, errUnauthorizedSubnetModification) } { // Case: Control Signature from invalid key (keys[3] is not a control key) + startTime := defaultValidateStartTime.Add(time.Second) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(startTime.Unix()), // start time + uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) require.NoError(err) - copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) + copy(tx.Creds[1].(*secp256k1fx.Credential).Sigs[0][:], sig) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -762,26 +752,30 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because a control sig is invalid") + require.ErrorIs(err, errUnauthorizedSubnetModification) } { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet + startTime := defaultValidateStartTime.Add(time.Second) tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time - uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time - ids.NodeID(nodeID), // node ID - testSubnet1.ID(), // subnet ID + defaultWeight, // weight + uint64(startTime.Unix())+1, // start time + uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time + nodeID, // node ID + testSubnet1.ID(), // subnet ID []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) + addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( subnetTx.ID(), - subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), + addSubnetValTx, + defaultValidateStartTime, 0, ) require.NoError(err) @@ -789,8 +783,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -801,22 +794,18 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed verification because validator already in pending validator set of the specified subnet") + require.ErrorIs(err, ErrDuplicateValidator) } } -func TestStandardTxExecutorAddValidator(t *testing.T) { +func TestBanffStandardTxExecutorAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, banff) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() - env.config.BanffTime = env.state.GetTimestamp() - { // Case: Validator's start time too early tx, err := env.txBuilder.NewAddValidatorTx( @@ -828,6 +817,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -840,7 +830,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because start time too early") + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } { @@ -854,6 +844,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -866,38 +857,52 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because start time too far in the future") + require.ErrorIs(err, ErrFutureStakeTime) } { - // Case: Validator already validating primary network + // Case: Validator in current validator set of primary network + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix()), - uint64(defaultValidateEndTime.Unix()), + env.config.MinValidatorStake, // stake amount + uint64(startTime.Unix()), // start time + uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time nodeID, ids.ShortEmpty, - reward.PercentDenominator, + reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, + ) + require.NoError(err) + + addValTx := tx.Unsigned.(*txs.AddValidatorTx) + staker, err := state.NewCurrentStaker( + tx.ID(), + addValTx, + startTime, + 0, ) require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) + onAcceptState.PutCurrentValidator(staker) + onAcceptState.AddTx(tx, status.Committed) + executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should've errored because validator already validating") + require.ErrorIs(err, ErrAlreadyValidator) } { // Case: Validator in pending validator set of primary network - startTime := defaultGenesisTime.Add(1 * time.Second) + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount uint64(startTime.Unix()), // start time @@ -906,47 +911,45 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { ids.ShortEmpty, reward.PercentDenominator, // shares []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr // key + ids.ShortEmpty, // change addr + nil, ) require.NoError(err) - staker, err := state.NewCurrentStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), - 0, ) require.NoError(err) - env.state.PutCurrentValidator(staker) - env.state.AddTx(tx, status.Committed) - dummyHeight := uint64(1) - env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) - onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) + onAcceptState.PutPendingValidator(staker) + onAcceptState.AddTx(tx, status.Committed) + executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because validator in pending validator set") + require.ErrorIs(err, ErrAlreadyValidator) } { // Case: Validator doesn't have enough tokens to cover stake amount + startTime := defaultValidateStartTime.Add(1 * time.Second) tx, err := env.txBuilder.NewAddValidatorTx( // create the tx env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix()), - uint64(defaultValidateEndTime.Unix()), + uint64(startTime.Unix()), + uint64(startTime.Add(defaultMinStakingDuration).Unix()), nodeID, ids.ShortEmpty, reward.PercentDenominator, []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) @@ -954,24 +957,469 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) require.NoError(err) - for _, utxoID := range utxoIDs { - env.state.DeleteUTXO(utxoID) - } - onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) + for _, utxoID := range utxoIDs { + onAcceptState.DeleteUTXO(utxoID) + } + executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Error(err, "should have failed because tx fee paying key has no funds") + require.ErrorIs(err, ErrFlowCheckFailed) + } +} + +// Verifies that [AddValidatorTx] and [AddDelegatorTx] are disabled post-Durango +func TestDurangoDisabledTransactions(t *testing.T) { + type test struct { + name string + buildTx func(*environment) *txs.Tx + expectedErr error + } + + tests := []test{ + { + name: "AddValidatorTx", + buildTx: func(env *environment) *txs.Tx { + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) + + tx, err := env.txBuilder.NewAddValidatorTx( + defaultMinValidatorStake, + 0, // startTime + uint64(endTime.Unix()), + nodeID, + ids.ShortEmpty, // reward address, + reward.PercentDenominator, // shares + preFundedKeys, + ids.ShortEmpty, // change address + nil, // memo + ) + require.NoError(t, err) + + return tx + }, + expectedErr: ErrAddValidatorTxPostDurango, + }, + { + name: "AddDelegatorTx", + buildTx: func(env *environment) *txs.Tx { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + tx, err := env.txBuilder.NewAddDelegatorTx( + defaultMinValidatorStake, + 0, // startTime + uint64(primaryValidator.EndTime.Unix()), + primaryValidator.NodeID, + ids.ShortEmpty, // reward address, + preFundedKeys, + ids.ShortEmpty, // change address + nil, // memo + ) + require.NoError(t, err) + + return tx + }, + expectedErr: ErrAddDelegatorTxPostDurango, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, durango) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(err) + + tx := tt.buildTx(env) + + err = tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + }) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +// Verifies that the Memo field is required to be empty post-Durango +func TestDurangoMemoField(t *testing.T) { + type test struct { + name string + setupTest func(env *environment, memoField []byte) (*txs.Tx, state.Diff) + } + + tests := []test{ + { + name: "AddSubnetValidatorTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + tx, err := env.txBuilder.NewAddSubnetValidatorTx( + defaultMinValidatorStake, + 0, // startTime + uint64(primaryValidator.EndTime.Unix()), + primaryValidator.NodeID, + testSubnet1.TxID, + preFundedKeys, + ids.ShortEmpty, + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + return tx, onAcceptState + }, + }, + { + name: "CreateChainTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.TxID, + []byte{}, // genesisData + ids.GenerateTestID(), // vmID + []ids.ID{}, // fxIDs + "aaa", // chain name + preFundedKeys, + ids.ShortEmpty, + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "CreateSubnetTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewCreateSubnetTx( + 1, + []ids.ShortID{ids.GenerateTestShortID()}, + preFundedKeys, + ids.ShortEmpty, + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "ImportTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + // Skip shared memory checks + env.backend.Bootstrapped.Set(false) + + var ( + sourceChain = env.ctx.XChainID + sourceKey = preFundedKeys[1] + sourceAmount = 10 * units.Avax + ) + + sharedMemory := fundedSharedMemory( + t, + env, + sourceKey, + sourceChain, + map[ids.ID]uint64{ + env.ctx.AVAXAssetID: sourceAmount, + }, + ) + env.msm.SharedMemory = sharedMemory + + tx, err := env.txBuilder.NewImportTx( + sourceChain, + sourceKey.PublicKey().Address(), + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "ExportTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewExportTx( + units.Avax, // amount + env.ctx.XChainID, // destination chain + ids.GenerateTestShortID(), // destination address + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "RemoveSubnetValidatorTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + endTime := primaryValidator.EndTime + subnetValTx, err := env.txBuilder.NewAddSubnetValidatorTx( + defaultWeight, + 0, + uint64(endTime.Unix()), + primaryValidator.NodeID, + testSubnet1.ID(), + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + nil, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + require.NoError(t, subnetValTx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: subnetValTx, + })) + + tx, err := env.txBuilder.NewRemoveSubnetValidatorTx( + primaryValidator.NodeID, + testSubnet1.ID(), + preFundedKeys, + ids.ShortEmpty, + memoField, + ) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "TransformSubnetTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewTransformSubnetTx( + testSubnet1.TxID, // subnetID + ids.GenerateTestID(), // assetID + 10, // initial supply + 10, // max supply + 0, // min consumption rate + reward.PercentDenominator, // max consumption rate + 2, // min validator stake + 10, // max validator stake + time.Minute, // min stake duration + time.Hour, // max stake duration + 1, // min delegation fees + 10, // min delegator stake + 1, // max validator weight factor + 80, // uptime requirement + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "AddPermissionlessValidatorTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( + env.config.MinValidatorStake, + 0, // start Time + uint64(endTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk), + ids.ShortEmpty, // reward address + reward.PercentDenominator, // shares + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "AddPermissionlessDelegatorTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + tx, err := env.txBuilder.NewAddPermissionlessDelegatorTx( + defaultMinValidatorStake, + 0, // start Time + uint64(primaryValidator.EndTime.Unix()), + primaryValidator.NodeID, + ids.ShortEmpty, // reward address + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "TransferSubnetOwnershipTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewTransferSubnetOwnershipTx( + testSubnet1.TxID, + 1, + []ids.ShortID{ids.ShortEmpty}, + preFundedKeys, + ids.ShortEmpty, // change address + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + { + name: "BaseTx", + setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { + tx, err := env.txBuilder.NewBaseTx( + 1, + secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + preFundedKeys, + ids.ShortEmpty, + memoField, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + return tx, onAcceptState + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, durango) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + // Populated memo field should error + tx, onAcceptState := tt.setupTest(env, []byte{'m', 'e', 'm', 'o'}) + err := tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + }) + require.ErrorIs(err, avax.ErrMemoTooLarge) + + // Empty memo field should not error + tx, onAcceptState = tt.setupTest(env, []byte{}) + require.NoError(tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + })) + }) } } // Returns a RemoveSubnetValidatorTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *txs.Tx) { t.Helper() @@ -1014,7 +1462,6 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1034,13 +1481,13 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx // mock implementations that can be used in tests // for verifying RemoveSubnetValidatorTx. type removeSubnetValidatorTxVerifyEnv struct { - banffTime time.Time - fx *fx.MockFx - flowChecker *utxo.MockVerifier - unsignedTx *txs.RemoveSubnetValidatorTx - tx *txs.Tx - state *state.MockDiff - staker *state.Staker + latestForkTime time.Time + fx *fx.MockFx + flowChecker *utxo.MockVerifier + unsignedTx *txs.RemoveSubnetValidatorTx + tx *txs.Tx + state *state.MockDiff + staker *state.Staker } // Returns mock implementations that can be used in tests @@ -1054,12 +1501,12 @@ func newValidRemoveSubnetValidatorTxVerifyEnv(t *testing.T, ctrl *gomock.Control unsignedTx, tx := newRemoveSubnetValidatorTx(t) mockState := state.NewMockDiff(ctrl) return removeSubnetValidatorTxVerifyEnv{ - banffTime: now, - fx: mockFx, - flowChecker: mockFlowChecker, - unsignedTx: unsignedTx, - tx: tx, - state: mockState, + latestForkTime: now, + fx: mockFx, + flowChecker: mockFlowChecker, + unsignedTx: unsignedTx, + tx: tx, + state: mockState, staker: &state.Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), @@ -1072,7 +1519,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { type test struct { name string newExecutor func(*gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) - shouldErr bool expectedErr error } @@ -1083,14 +1529,10 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) - subnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: subnetOwner, - }, - } - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil).Times(1) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) env.flowChecker.EXPECT().VerifySpend( env.unsignedTx, env.state, env.unsignedTx.Ins, env.unsignedTx.Outs, env.tx.Creds[:len(env.tx.Creds)-1], gomock.Any(), @@ -1101,7 +1543,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1114,7 +1558,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: false, + expectedErr: nil, }, { name: "tx fails syntactic verification", @@ -1126,7 +1570,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1139,19 +1585,22 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, + expectedErr: txs.ErrRemovePrimaryNetworkValidator, }, { name: "node isn't a validator of the subnet", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) env.state.EXPECT().GetPendingValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1164,8 +1613,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, - expectedErr: errNotValidator, + expectedErr: ErrNotValidator, }, { name: "validator is permissionless", @@ -1176,11 +1624,14 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { staker.Priority = txs.SubnetPermissionlessValidatorCurrentPriority // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(&staker, nil).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1193,8 +1644,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, - expectedErr: errRemovePermissionlessValidator, + expectedErr: ErrRemovePermissionlessValidator, }, { name: "tx has no credentials", @@ -1203,11 +1653,14 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1220,7 +1673,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, expectedErr: errWrongNumberOfCredentials, }, { @@ -1228,12 +1680,15 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(nil, status.Unknown, database.ErrNotFound) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1246,27 +1701,24 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, - expectedErr: errCantFindSubnet, + expectedErr: database.ErrNotFound, }, { name: "no permission to remove validator", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) - subnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: subnetOwner, - }, - } - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(errTest) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1279,7 +1731,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, expectedErr: errUnauthorizedSubnetModification, }, { @@ -1287,14 +1738,10 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) - subnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: subnetOwner, - }, - } - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), @@ -1302,7 +1749,9 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1315,8 +1764,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - shouldErr: true, - expectedErr: errFlowCheckFailed, + expectedErr: ErrFlowCheckFailed, }, } @@ -1324,23 +1772,16 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() unsignedTx, executor := tt.newExecutor(ctrl) err := executor.RemoveSubnetValidatorTx(unsignedTx) - if tt.shouldErr { - require.Error(err) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - } - return - } - require.NoError(err) + require.ErrorIs(err, tt.expectedErr) }) } } // Returns a TransformSubnetTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { t.Helper() @@ -1383,7 +1824,6 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1415,13 +1855,13 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { // mock implementations that can be used in tests // for verifying TransformSubnetTx. type transformSubnetTxVerifyEnv struct { - banffTime time.Time - fx *fx.MockFx - flowChecker *utxo.MockVerifier - unsignedTx *txs.TransformSubnetTx - tx *txs.Tx - state *state.MockDiff - staker *state.Staker + latestForkTime time.Time + fx *fx.MockFx + flowChecker *utxo.MockVerifier + unsignedTx *txs.TransformSubnetTx + tx *txs.Tx + state *state.MockDiff + staker *state.Staker } // Returns mock implementations that can be used in tests @@ -1435,12 +1875,12 @@ func newValidTransformSubnetTxVerifyEnv(t *testing.T, ctrl *gomock.Controller) t unsignedTx, tx := newTransformSubnetTx(t) mockState := state.NewMockDiff(ctrl) return transformSubnetTxVerifyEnv{ - banffTime: now, - fx: mockFx, - flowChecker: mockFlowChecker, - unsignedTx: unsignedTx, - tx: tx, - state: mockState, + latestForkTime: now, + fx: mockFx, + flowChecker: mockFlowChecker, + unsignedTx: unsignedTx, + tx: tx, + state: mockState, staker: &state.Staker{ TxID: ids.GenerateTestID(), NodeID: ids.GenerateTestNodeID(), @@ -1466,7 +1906,9 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1487,10 +1929,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.unsignedTx.MaxStakeDuration = math.MaxUint32 env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, }, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, @@ -1512,10 +1957,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, @@ -1537,21 +1985,19 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) subnetOwner := fx.NewMockOwner(ctrl) - subnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: subnetOwner, - }, - } - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(errFlowCheckFailed) + ).Return(ErrFlowCheckFailed) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, @@ -1565,7 +2011,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e.Bootstrapped.Set(true) return env.unsignedTx, e }, - err: errFlowCheckFailed, + err: ErrFlowCheckFailed, }, { name: "valid tx", @@ -1574,12 +2020,8 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Set dependency expectations. subnetOwner := fx.NewMockOwner(ctrl) - subnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: subnetOwner, - }, - } - env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil).Times(1) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) + env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) env.flowChecker.EXPECT().VerifySpend( @@ -1592,7 +2034,9 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ - BanffTime: env.banffTime, + BanffTime: env.latestForkTime, + CortinaTime: env.latestForkTime, + DurangoTime: env.latestForkTime, MaxStakeDuration: math.MaxInt64, }, Bootstrapped: &utils.Atomic[bool]{}, @@ -1613,7 +2057,6 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() unsignedTx, executor := tt.newExecutor(ctrl) err := executor.TransformSubnetTx(unsignedTx) diff --git a/avalanchego/vms/platformvm/txs/executor/state_changes.go b/avalanchego/vms/platformvm/txs/executor/state_changes.go index ccf87bce..36981b09 100644 --- a/avalanchego/vms/platformvm/txs/executor/state_changes.go +++ b/avalanchego/vms/platformvm/txs/executor/state_changes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,14 +10,15 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) var ( - errChildBlockAfterStakerChangeTime = errors.New("proposed timestamp later than next staker change time") - errChildBlockBeyondSyncBound = errors.New("proposed timestamp is too far in the future relative to local time") + ErrChildBlockAfterStakerChangeTime = errors.New("proposed timestamp later than next staker change time") + ErrChildBlockBeyondSyncBound = errors.New("proposed timestamp is too far in the future relative to local time") ) // VerifyNewChainTime returns nil if the [newChainTime] is a valid chain time @@ -38,7 +39,7 @@ func VerifyNewChainTime( if newChainTime.After(nextStakerChangeTime) { return fmt.Errorf( "%w, proposed timestamp (%s), next staker change time (%s)", - errChildBlockAfterStakerChangeTime, + ErrChildBlockAfterStakerChangeTime, newChainTime, nextStakerChangeTime, ) @@ -49,7 +50,7 @@ func VerifyNewChainTime( if newChainTime.After(maxNewChainTime) { return fmt.Errorf( "%w, proposed time (%s), local time (%s)", - errChildBlockBeyondSyncBound, + ErrChildBlockBeyondSyncBound, newChainTime, now, ) @@ -57,78 +58,59 @@ func VerifyNewChainTime( return nil } -type StateChanges interface { - Apply(onAccept state.Diff) - Len() int -} - -type stateChanges struct { - updatedSupplies map[ids.ID]uint64 - currentValidatorsToAdd []*state.Staker - currentDelegatorsToAdd []*state.Staker - pendingValidatorsToRemove []*state.Staker - pendingDelegatorsToRemove []*state.Staker - currentValidatorsToRemove []*state.Staker -} - -func (s *stateChanges) Apply(stateDiff state.Diff) { - for subnetID, supply := range s.updatedSupplies { - stateDiff.SetCurrentSupply(subnetID, supply) +func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { + var ( + timestamp = clk.Time() + parentTime = state.GetTimestamp() + ) + if parentTime.After(timestamp) { + timestamp = parentTime } + // [timestamp] = max(now, parentTime) - for _, currentValidatorToAdd := range s.currentValidatorsToAdd { - stateDiff.PutCurrentValidator(currentValidatorToAdd) - } - for _, pendingValidatorToRemove := range s.pendingValidatorsToRemove { - stateDiff.DeletePendingValidator(pendingValidatorToRemove) - } - for _, currentDelegatorToAdd := range s.currentDelegatorsToAdd { - stateDiff.PutCurrentDelegator(currentDelegatorToAdd) - } - for _, pendingDelegatorToRemove := range s.pendingDelegatorsToRemove { - stateDiff.DeletePendingDelegator(pendingDelegatorToRemove) - } - for _, currentValidatorToRemove := range s.currentValidatorsToRemove { - stateDiff.DeleteCurrentValidator(currentValidatorToRemove) + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) } -} -func (s *stateChanges) Len() int { - return len(s.currentValidatorsToAdd) + len(s.currentDelegatorsToAdd) + - len(s.pendingValidatorsToRemove) + len(s.pendingDelegatorsToRemove) + - len(s.currentValidatorsToRemove) + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime + } + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil } -// AdvanceTimeTo does not modify [parentState]. -// Instead it returns all the StateChanges caused by advancing the chain time to -// the [newChainTime]. +// AdvanceTimeTo applies all state changes to [parentState] resulting from +// advancing the chain time to [newChainTime]. +// Returns true iff the validator set changed. func AdvanceTimeTo( backend *Backend, parentState state.Chain, newChainTime time.Time, -) (StateChanges, error) { - pendingStakerIterator, err := parentState.GetPendingStakerIterator() +) (bool, error) { + // We promote pending stakers to current stakers first and remove + // completed stakers from the current staker set. We assume that any + // promoted staker will not immediately be removed from the current staker + // set. This is guaranteed by the following invariants. + // + // Invariant: MinStakeDuration > 0 => guarantees [StartTime] != [EndTime] + // Invariant: [newChainTime] <= nextStakerChangeTime. + + changes, err := state.NewDiffOn(parentState) if err != nil { - return nil, err + return false, err } - defer pendingStakerIterator.Release() - changes := &stateChanges{ - updatedSupplies: make(map[ids.ID]uint64), + pendingStakerIterator, err := parentState.GetPendingStakerIterator() + if err != nil { + return false, err } + defer pendingStakerIterator.Release() - // Add to the staker set any pending stakers whose start time is at or - // before the new timestamp - - // Note: we process pending stakers ready to be promoted to current ones and - // then we process current stakers to be demoted out of stakers set. It is - // guaranteed that no promoted stakers would be demoted immediately. A - // failure of this invariant would cause a staker to be added to - // StateChanges and be persisted among current stakers even if it already - // expired. The following invariants ensure this does not happens: - // Invariant: minimum stake duration is > 0, so staker.StartTime != staker.EndTime. - // Invariant: [newChainTime] does not skip stakers set change times. - + var changed bool + // Promote any pending stakers to current if [StartTime] <= [newChainTime]. for pendingStakerIterator.Next() { stakerToRemove := pendingStakerIterator.Value() if stakerToRemove.StartTime.After(newChainTime) { @@ -140,22 +122,20 @@ func AdvanceTimeTo( stakerToAdd.Priority = txs.PendingToCurrentPriorities[stakerToRemove.Priority] if stakerToRemove.Priority == txs.SubnetPermissionedValidatorPendingPriority { - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) + changed = true continue } - supply, ok := changes.updatedSupplies[stakerToRemove.SubnetID] - if !ok { - supply, err = parentState.GetCurrentSupply(stakerToRemove.SubnetID) - if err != nil { - return nil, err - } + supply, err := changes.GetCurrentSupply(stakerToRemove.SubnetID) + if err != nil { + return false, err } rewards, err := GetRewardsCalculator(backend, parentState, stakerToRemove.SubnetID) if err != nil { - return nil, err + return false, err } potentialReward := rewards.Calculate( @@ -167,25 +147,28 @@ func AdvanceTimeTo( // Invariant: [rewards.Calculate] can never return a [potentialReward] // such that [supply + potentialReward > maximumSupply]. - changes.updatedSupplies[stakerToRemove.SubnetID] = supply + potentialReward + changes.SetCurrentSupply(stakerToRemove.SubnetID, supply+potentialReward) switch stakerToRemove.Priority { case txs.PrimaryNetworkValidatorPendingPriority, txs.SubnetPermissionlessValidatorPendingPriority: - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) case txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkDelegatorBanffPendingPriority, txs.SubnetPermissionlessDelegatorPendingPriority: - changes.currentDelegatorsToAdd = append(changes.currentDelegatorsToAdd, &stakerToAdd) - changes.pendingDelegatorsToRemove = append(changes.pendingDelegatorsToRemove, stakerToRemove) + changes.PutCurrentDelegator(&stakerToAdd) + changes.DeletePendingDelegator(stakerToRemove) default: - return nil, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) + return false, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) } + + changed = true } + // Remove any current stakers whose [EndTime] <= [newChainTime]. currentStakerIterator, err := parentState.GetCurrentStakerIterator() if err != nil { - return nil, err + return false, err } defer currentStakerIterator.Release() @@ -203,9 +186,16 @@ func AdvanceTimeTo( break } - changes.currentValidatorsToRemove = append(changes.currentValidatorsToRemove, stakerToRemove) + changes.DeleteCurrentValidator(stakerToRemove) + changed = true + } + + if err := changes.Apply(parentState); err != nil { + return false, err } - return changes, nil + + parentState.SetTimestamp(newChainTime) + return changed, nil } func GetRewardsCalculator( @@ -217,14 +207,10 @@ func GetRewardsCalculator( return backend.Rewards, nil } - transformSubnetIntf, err := parentState.GetSubnetTransformation(subnetID) + transformSubnet, err := GetTransformSubnetTx(parentState, subnetID) if err != nil { return nil, err } - transformSubnet, ok := transformSubnetIntf.Unsigned.(*txs.TransformSubnetTx) - if !ok { - return nil, errIsNotTransformSubnetTx - } return reward.NewCalculator(reward.Config{ MaxConsumptionRate: transformSubnet.MaxConsumptionRate, diff --git a/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go b/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go index defcc608..f1a75f6f 100644 --- a/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -16,8 +16,6 @@ import ( var ( errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") - errCantFindSubnet = errors.New("couldn't find subnet") - errIsNotSubnet = errors.New("is not a subnet") errIsImmutable = errors.New("is immutable") errUnauthorizedSubnetModification = errors.New("unauthorized subnet modification") ) @@ -67,23 +65,13 @@ func verifySubnetAuthorization( baseTxCredsLen := len(sTx.Creds) - 1 subnetCred := sTx.Creds[baseTxCredsLen] - subnetIntf, _, err := chainState.GetTx(subnetID) + subnetOwner, err := chainState.GetSubnetOwner(subnetID) if err != nil { - return nil, fmt.Errorf( - "%w %q: %v", - errCantFindSubnet, - subnetID, - err, - ) - } - - subnet, ok := subnetIntf.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return nil, fmt.Errorf("%q %w", subnetID, errIsNotSubnet) + return nil, err } - if err := backend.Fx.VerifyPermission(sTx.Unsigned, subnetAuth, subnetCred, subnet.Owner); err != nil { - return nil, fmt.Errorf("%w: %v", errUnauthorizedSubnetModification, err) + if err := backend.Fx.VerifyPermission(sTx.Unsigned, subnetAuth, subnetCred, subnetOwner); err != nil { + return nil, fmt.Errorf("%w: %w", errUnauthorizedSubnetModification, err) } return sTx.Creds[:baseTxCredsLen], nil diff --git a/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go b/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go deleted file mode 100644 index 71a1a61e..00000000 --- a/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "errors" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ txs.Visitor = (*MempoolTxVerifier)(nil) - -type MempoolTxVerifier struct { - *Backend - ParentID ids.ID - StateVersions state.Versions - Tx *txs.Tx -} - -func (*MempoolTxVerifier) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errWrongTxType -} - -func (*MempoolTxVerifier) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errWrongTxType -} - -func (v *MempoolTxVerifier) AddValidatorTx(tx *txs.AddValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddDelegatorTx(tx *txs.AddDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateChainTx(tx *txs.CreateChainTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateSubnetTx(tx *txs.CreateSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ImportTx(tx *txs.ImportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ExportTx(tx *txs.ExportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) TransformSubnetTx(tx *txs.TransformSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { - baseState, err := v.standardBaseState() - if err != nil { - return err - } - - executor := StandardTxExecutor{ - Backend: v.Backend, - State: baseState, - Tx: v.Tx, - } - err = tx.Visit(&executor) - // We ignore [errFutureStakeTime] here because the time will be advanced - // when this transaction is issued. - if errors.Is(err, errFutureStakeTime) { - return nil - } - return err -} - -// Upon Banff activation, txs are not verified against current chain time -// but against the block timestamp. [baseTime] calculates -// the right timestamp to be used to mempool tx verification -func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { - state, err := state.NewDiff(v.ParentID, v.StateVersions) - if err != nil { - return nil, err - } - - nextBlkTime, err := v.nextBlockTime(state) - if err != nil { - return nil, err - } - - if !v.Backend.Config.IsBanffActivated(nextBlkTime) { - // next tx would be included into an Apricot block - // so we verify it against current chain state - return state, nil - } - - // next tx would be included into a Banff block - // so we verify it against duly updated chain state - changes, err := AdvanceTimeTo(v.Backend, state, nextBlkTime) - if err != nil { - return nil, err - } - changes.Apply(state) - state.SetTimestamp(nextBlkTime) - - return state, nil -} - -func (v *MempoolTxVerifier) nextBlockTime(state state.Diff) (time.Time, error) { - var ( - parentTime = state.GetTimestamp() - nextBlkTime = v.Clk.Time() - ) - if parentTime.After(nextBlkTime) { - nextBlkTime = parentTime - } - nextStakerChangeTime, err := GetNextStakerChangeTime(state) - if err != nil { - return time.Time{}, fmt.Errorf("could not calculate next staker change time: %w", err) - } - if !nextBlkTime.Before(nextStakerChangeTime) { - nextBlkTime = nextStakerChangeTime - } - return nextBlkTime, nil -} diff --git a/avalanchego/vms/platformvm/txs/export_tx.go b/avalanchego/vms/platformvm/txs/export_tx.go index b124263a..19dc3a07 100644 --- a/avalanchego/vms/platformvm/txs/export_tx.go +++ b/avalanchego/vms/platformvm/txs/export_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/import_tx.go b/avalanchego/vms/platformvm/txs/import_tx.go index 70a79edf..563242da 100644 --- a/avalanchego/vms/platformvm/txs/import_tx.go +++ b/avalanchego/vms/platformvm/txs/import_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -78,7 +78,7 @@ func (tx *ImportTx) SyntacticVerify(ctx *snow.Context) error { return fmt.Errorf("input failed verification: %w", err) } } - if !utils.IsSortedAndUniqueSortable(tx.ImportedInputs) { + if !utils.IsSortedAndUnique(tx.ImportedInputs) { return errInputsNotSortedUnique } diff --git a/avalanchego/vms/platformvm/txs/mempool/issuer.go b/avalanchego/vms/platformvm/txs/mempool/issuer.go deleted file mode 100644 index aa5e5c70..00000000 --- a/avalanchego/vms/platformvm/txs/mempool/issuer.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import ( - "errors" - - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var ( - _ txs.Visitor = (*issuer)(nil) - - errCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") - errCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") -) - -type issuer struct { - m *mempool - tx *txs.Tx -} - -func (*issuer) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errCantIssueAdvanceTimeTx -} - -func (*issuer) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errCantIssueRewardValidatorTx -} - -func (i *issuer) AddValidatorTx(*txs.AddValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddDelegatorTx(*txs.AddDelegatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) CreateChainTx(*txs.CreateChainTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) CreateSubnetTx(*txs.CreateSubnetTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) ImportTx(*txs.ImportTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) ExportTx(*txs.ExportTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) TransformSubnetTx(*txs.TransformSubnetTx) error { - i.m.addDecisionTx(i.tx) - return nil -} - -func (i *issuer) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} - -func (i *issuer) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - i.m.addStakerTx(i.tx) - return nil -} diff --git a/avalanchego/vms/platformvm/txs/mempool/mempool.go b/avalanchego/vms/platformvm/txs/mempool/mempool.go index cec810fe..34ee9c28 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mempool.go +++ b/avalanchego/vms/platformvm/txs/mempool/mempool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -6,27 +6,28 @@ package mempool import ( "errors" "fmt" + "sync" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/setmap" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/txheap" ) const ( - // targetTxSize is the maximum number of bytes a transaction can use to be + // MaxTxSize is the maximum number of bytes a transaction can use to be // allowed into the mempool. - targetTxSize = 64 * units.KiB + MaxTxSize = 64 * units.KiB // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache droppedTxIDsCacheSize = 64 - initialConsumedUTXOsSize = 512 - // maxMempoolSize is the maximum number of bytes allowed in the mempool maxMempoolSize = 64 * units.MiB ) @@ -34,248 +35,202 @@ const ( var ( _ Mempool = (*mempool)(nil) - errMempoolFull = errors.New("mempool is full") + ErrDuplicateTx = errors.New("duplicate tx") + ErrTxTooLarge = errors.New("tx too large") + ErrMempoolFull = errors.New("mempool is full") + ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") + ErrCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") + ErrCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") ) -type BlockTimer interface { - // ResetBlockTimer schedules a timer to notify the consensus engine once - // there is a block ready to be built. If a block is ready to be built when - // this function is called, the engine will be notified directly. - ResetBlockTimer() -} - type Mempool interface { - // we may want to be able to stop valid transactions - // from entering the mempool, e.g. during blocks creation - EnableAdding() - DisableAdding() - Add(tx *txs.Tx) error - Has(txID ids.ID) bool - Get(txID ids.ID) *txs.Tx - Remove(txs []*txs.Tx) - - // Following Banff activation, all mempool transactions, - // (both decision and staker) are included into Standard blocks. - // HasTxs allow to check for availability of any mempool transaction. - HasTxs() bool - // PeekTxs returns the next txs for Banff blocks - // up to maxTxsBytes without removing them from the mempool. - PeekTxs(maxTxsBytes int) []*txs.Tx - - HasStakerTx() bool - // PeekStakerTx returns the next stakerTx without removing it from mempool. - // It returns nil if !HasStakerTx(). - // It's guaranteed that the returned tx, if not nil, is a StakerTx. - PeekStakerTx() *txs.Tx - - // Note: dropped txs are added to droppedTxIDs but not - // not evicted from unissued decision/staker txs. - // This allows previously dropped txs to be possibly - // reissued. + Get(txID ids.ID) (*txs.Tx, bool) + // Remove [txs] and any conflicts of [txs] from the mempool. + Remove(txs ...*txs.Tx) + + // Peek returns the oldest tx in the mempool. + Peek() (tx *txs.Tx, exists bool) + + // Iterate iterates over the txs until f returns false + Iterate(f func(tx *txs.Tx) bool) + + // RequestBuildBlock notifies the consensus engine that a block should be + // built. If [emptyBlockPermitted] is true, the notification will be sent + // regardless of whether there are no transactions in the mempool. If not, + // a notification will only be sent if there is at least one transaction in + // the mempool. + RequestBuildBlock(emptyBlockPermitted bool) + + // Note: dropped txs are added to droppedTxIDs but are not evicted from + // unissued decision/staker txs. This allows previously dropped txs to be + // possibly reissued. MarkDropped(txID ids.ID, reason error) GetDropReason(txID ids.ID) error + + // Len returns the number of txs in the mempool. + Len() int } // Transactions from clients that have not yet been put into blocks and added to // consensus type mempool struct { - // If true, drop transactions added to the mempool via Add. - dropIncoming bool - - bytesAvailableMetric prometheus.Gauge - bytesAvailable int + lock sync.RWMutex + unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs + bytesAvailable int + droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> verification error - unissuedDecisionTxs txheap.Heap - unissuedStakerTxs txheap.Heap + toEngine chan<- common.Message - // Key: Tx ID - // Value: Verification error - droppedTxIDs *cache.LRU[ids.ID, error] - - consumedUTXOs set.Set[ids.ID] - - blkTimer BlockTimer + numTxs prometheus.Gauge + bytesAvailableMetric prometheus.Gauge } -func NewMempool( +func New( namespace string, registerer prometheus.Registerer, - blkTimer BlockTimer, + toEngine chan<- common.Message, ) (Mempool, error) { - bytesAvailableMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }) - if err := registerer.Register(bytesAvailableMetric); err != nil { - return nil, err + m := &mempool{ + unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + consumedUTXOs: setmap.New[ids.ID, ids.ID](), + bytesAvailable: maxMempoolSize, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + toEngine: toEngine, + numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "txs", + Help: "Number of decision/staker transactions in the mempool", + }), + bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }), } + m.bytesAvailableMetric.Set(maxMempoolSize) - unissuedDecisionTxs, err := txheap.NewWithMetrics( - txheap.NewByAge(), - fmt.Sprintf("%s_decision_txs", namespace), - registerer, + err := utils.Err( + registerer.Register(m.numTxs), + registerer.Register(m.bytesAvailableMetric), ) - if err != nil { - return nil, err - } - - unissuedStakerTxs, err := txheap.NewWithMetrics( - txheap.NewByStartTime(), - fmt.Sprintf("%s_staker_txs", namespace), - registerer, - ) - if err != nil { - return nil, err - } - - bytesAvailableMetric.Set(maxMempoolSize) - return &mempool{ - bytesAvailableMetric: bytesAvailableMetric, - bytesAvailable: maxMempoolSize, - unissuedDecisionTxs: unissuedDecisionTxs, - unissuedStakerTxs: unissuedStakerTxs, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), - dropIncoming: false, // enable tx adding by default - blkTimer: blkTimer, - }, nil -} - -func (m *mempool) EnableAdding() { - m.dropIncoming = false -} - -func (m *mempool) DisableAdding() { - m.dropIncoming = true + return m, err } func (m *mempool) Add(tx *txs.Tx) error { - if m.dropIncoming { - return fmt.Errorf("tx %s not added because mempool is closed", tx.ID()) + m.lock.Lock() + defer m.lock.Unlock() + + switch tx.Unsigned.(type) { + case *txs.AdvanceTimeTx: + return ErrCantIssueAdvanceTimeTx + case *txs.RewardValidatorTx: + return ErrCantIssueRewardValidatorTx + default: } // Note: a previously dropped tx can be re-added txID := tx.ID() - if m.Has(txID) { - return fmt.Errorf("duplicate tx %s", txID) + if _, ok := m.unissuedTxs.Get(txID); ok { + return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) } - txBytes := tx.Bytes() - if len(txBytes) > targetTxSize { - return fmt.Errorf("tx %s size (%d) > target size (%d)", txID, len(txBytes), targetTxSize) + txSize := len(tx.Bytes()) + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + ErrTxTooLarge, + txID, + txSize, + MaxTxSize, + ) } - if len(txBytes) > m.bytesAvailable { - return fmt.Errorf("%w, tx %s size (%d) exceeds available space (%d)", - errMempoolFull, + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", + ErrMempoolFull, txID, - len(txBytes), + txSize, m.bytesAvailable, ) } inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.Overlaps(inputs) { - return fmt.Errorf("tx %s conflicts with a transaction in the mempool", txID) + if m.consumedUTXOs.HasOverlap(inputs) { + return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) } - if err := tx.Unsigned.Visit(&issuer{ - m: m, - tx: tx, - }); err != nil { - return err - } + m.unissuedTxs.Put(txID, tx) + m.numTxs.Inc() + m.bytesAvailable -= txSize + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Union(inputs) + m.consumedUTXOs.Put(txID, inputs) // An explicitly added tx must not be marked as dropped. m.droppedTxIDs.Evict(txID) - m.blkTimer.ResetBlockTimer() return nil } -func (m *mempool) Has(txID ids.ID) bool { - return m.Get(txID) != nil -} - -func (m *mempool) Get(txID ids.ID) *txs.Tx { - if tx := m.unissuedDecisionTxs.Get(txID); tx != nil { - return tx - } - return m.unissuedStakerTxs.Get(txID) -} - -func (m *mempool) Remove(txsToRemove []*txs.Tx) { - remover := &remover{ - m: m, - } - - for _, tx := range txsToRemove { - remover.tx = tx - _ = tx.Unsigned.Visit(remover) - } +func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { + return m.unissuedTxs.Get(txID) } -func (m *mempool) HasTxs() bool { - return m.unissuedDecisionTxs.Len() > 0 || m.unissuedStakerTxs.Len() > 0 -} +func (m *mempool) Remove(txs ...*txs.Tx) { + m.lock.Lock() + defer m.lock.Unlock() -func (m *mempool) PeekTxs(maxTxsBytes int) []*txs.Tx { - txs := m.unissuedDecisionTxs.List() - txs = append(txs, m.unissuedStakerTxs.List()...) + for _, tx := range txs { + txID := tx.ID() + // If the transaction is in the mempool, remove it. + if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { + m.unissuedTxs.Delete(txID) + m.bytesAvailable += len(tx.Bytes()) + continue + } - size := 0 - for i, tx := range txs { - size += len(tx.Bytes()) - if size > maxTxsBytes { - return txs[:i] + // If the transaction isn't in the mempool, remove any conflicts it has. + inputs := tx.Unsigned.InputIDs() + for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { + tx, _ := m.unissuedTxs.Get(removed.Key) + m.unissuedTxs.Delete(removed.Key) + m.bytesAvailable += len(tx.Bytes()) } } - return txs + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + m.numTxs.Set(float64(m.unissuedTxs.Len())) } -func (m *mempool) addDecisionTx(tx *txs.Tx) { - m.unissuedDecisionTxs.Add(tx) - m.register(tx) +func (m *mempool) Peek() (*txs.Tx, bool) { + _, tx, exists := m.unissuedTxs.Oldest() + return tx, exists } -func (m *mempool) addStakerTx(tx *txs.Tx) { - m.unissuedStakerTxs.Add(tx) - m.register(tx) -} +func (m *mempool) Iterate(f func(tx *txs.Tx) bool) { + m.lock.RLock() + defer m.lock.RUnlock() -func (m *mempool) HasStakerTx() bool { - return m.unissuedStakerTxs.Len() > 0 -} - -func (m *mempool) removeDecisionTxs(txs []*txs.Tx) { - for _, tx := range txs { - txID := tx.ID() - if m.unissuedDecisionTxs.Remove(txID) != nil { - m.deregister(tx) + itr := m.unissuedTxs.NewIterator() + for itr.Next() { + if !f(itr.Value()) { + return } } } -func (m *mempool) removeStakerTx(tx *txs.Tx) { - txID := tx.ID() - if m.unissuedStakerTxs.Remove(txID) != nil { - m.deregister(tx) +func (m *mempool) MarkDropped(txID ids.ID, reason error) { + if errors.Is(reason, ErrMempoolFull) { + return } -} -func (m *mempool) PeekStakerTx() *txs.Tx { - if m.unissuedStakerTxs.Len() == 0 { - return nil - } + m.lock.RLock() + defer m.lock.RUnlock() - return m.unissuedStakerTxs.Peek() -} + if _, ok := m.unissuedTxs.Get(txID); ok { + return + } -func (m *mempool) MarkDropped(txID ids.ID, reason error) { m.droppedTxIDs.Put(txID, reason) } @@ -284,17 +239,20 @@ func (m *mempool) GetDropReason(txID ids.ID) error { return err } -func (m *mempool) register(tx *txs.Tx) { - txBytes := tx.Bytes() - m.bytesAvailable -= len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) +func (m *mempool) RequestBuildBlock(emptyBlockPermitted bool) { + if !emptyBlockPermitted && m.unissuedTxs.Len() == 0 { + return + } + + select { + case m.toEngine <- common.PendingTxs: + default: + } } -func (m *mempool) deregister(tx *txs.Tx) { - txBytes := tx.Bytes() - m.bytesAvailable += len(txBytes) - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) +func (m *mempool) Len() int { + m.lock.RLock() + defer m.lock.RUnlock() - inputs := tx.Unsigned.InputIDs() - m.consumedUTXOs.Difference(inputs) + return m.unissuedTxs.Len() } diff --git a/avalanchego/vms/platformvm/txs/mempool/mempool_test.go b/avalanchego/vms/platformvm/txs/mempool/mempool_test.go index ba25fac2..3fadca5f 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mempool_test.go +++ b/avalanchego/vms/platformvm/txs/mempool/mempool_test.go @@ -1,32 +1,24 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool import ( - "errors" - "math" "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ BlockTimer = (*noopBlkTimer)(nil) - -type noopBlkTimer struct{} - -func (*noopBlkTimer) ResetBlockTimer() {} - var preFundedKeys = secp256k1.TestKeys() // shows that valid tx is not added to mempool if this would exceed its maximum @@ -35,7 +27,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) decisionTxs, err := createTestDecisionTxs(1) @@ -46,7 +38,12 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { mpool.(*mempool).bytesAvailable = len(tx.Bytes()) - 1 err = mpool.Add(tx) - require.True(errors.Is(err, errMempoolFull), err, "max mempool size breached") + require.ErrorIs(err, ErrMempoolFull) + + // tx should not be marked as dropped if the mempool is full + txID := tx.ID() + mpool.MarkDropped(txID, err) + require.NoError(mpool.GetDropReason(txID)) // shortcut to simulated almost filled mempool mpool.(*mempool).bytesAvailable = len(tx.Bytes()) @@ -59,48 +56,30 @@ func TestDecisionTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) decisionTxs, err := createTestDecisionTxs(2) require.NoError(err) - // txs must not already there before we start - require.False(mpool.HasTxs()) - for _, tx := range decisionTxs { // tx not already there - require.False(mpool.Has(tx.ID())) + _, ok := mpool.Get(tx.ID()) + require.False(ok) // we can insert require.NoError(mpool.Add(tx)) // we can get it - require.True(mpool.Has(tx.ID())) - - retrieved := mpool.Get(tx.ID()) - require.True(retrieved != nil) - require.Equal(tx, retrieved) - - // we can peek it - peeked := mpool.PeekTxs(math.MaxInt) - - // tx will be among those peeked, - // in NO PARTICULAR ORDER - found := false - for _, pk := range peeked { - if pk.ID() == tx.ID() { - found = true - break - } - } - require.True(found) + got, ok := mpool.Get(tx.ID()) + require.True(ok) + require.Equal(tx, got) // once removed it cannot be there - mpool.Remove([]*txs.Tx{tx}) + mpool.Remove(tx) - require.False(mpool.Has(tx.ID())) - require.Equal((*txs.Tx)(nil), mpool.Get(tx.ID())) + _, ok = mpool.Get(tx.ID()) + require.False(ok) // we can reinsert it again to grow the mempool require.NoError(mpool.Add(tx)) @@ -111,7 +90,7 @@ func TestProposalTxsInMempool(t *testing.T) { require := require.New(t) registerer := prometheus.NewRegistry() - mpool, err := NewMempool("mempool", registerer, &noopBlkTimer{}) + mpool, err := New("mempool", registerer, nil) require.NoError(err) // The proposal txs are ordered by decreasing start time. This means after @@ -120,52 +99,23 @@ func TestProposalTxsInMempool(t *testing.T) { proposalTxs, err := createTestProposalTxs(2) require.NoError(err) - // txs should not be already there - require.False(mpool.HasStakerTx()) - - for i, tx := range proposalTxs { - require.False(mpool.Has(tx.ID())) + for _, tx := range proposalTxs { + _, ok := mpool.Get(tx.ID()) + require.False(ok) // we can insert require.NoError(mpool.Add(tx)) // we can get it - require.True(mpool.HasStakerTx()) - require.True(mpool.Has(tx.ID())) - - retrieved := mpool.Get(tx.ID()) - require.True(retrieved != nil) - require.Equal(tx, retrieved) - - { - // we can peek it - peeked := mpool.PeekStakerTx() - require.True(peeked != nil) - require.Equal(tx, peeked) - } - - { - // we can peek it - peeked := mpool.PeekTxs(math.MaxInt) - require.Len(peeked, i+1) - - // tx will be among those peeked, - // in NO PARTICULAR ORDER - found := false - for _, pk := range peeked { - if pk.ID() == tx.ID() { - found = true - break - } - } - require.True(found) - } + got, ok := mpool.Get(tx.ID()) + require.Equal(tx, got) + require.True(ok) // once removed it cannot be there - mpool.Remove([]*txs.Tx{tx}) + mpool.Remove(tx) - require.False(mpool.Has(tx.ID())) - require.Equal((*txs.Tx)(nil), mpool.Get(tx.ID())) + _, ok = mpool.Get(tx.ID()) + require.False(ok) // we can reinsert it again to grow the mempool require.NoError(mpool.Add(tx)) @@ -220,20 +170,13 @@ func createTestDecisionTxs(count int) ([]*txs.Tx, error) { // Proposal txs are sorted by decreasing start time func createTestProposalTxs(count int) ([]*txs.Tx, error) { - var clk mockable.Clock + now := time.Now() proposalTxs := make([]*txs.Tx, 0, count) for i := 0; i < count; i++ { - utx := &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{}, - Validator: txs.Validator{ - Start: uint64(clk.Time().Add(time.Duration(count-i) * time.Second).Unix()), - }, - StakeOuts: nil, - RewardsOwner: &secp256k1fx.OutputOwners{}, - DelegationShares: 100, - } - - tx, err := txs.NewSigned(utx, txs.Codec, nil) + tx, err := generateAddValidatorTx( + uint64(now.Add(time.Duration(count-i)*time.Second).Unix()), // startTime + 0, // endTime + ) if err != nil { return nil, err } @@ -241,3 +184,116 @@ func createTestProposalTxs(count int) ([]*txs.Tx, error) { } return proposalTxs, nil } + +func generateAddValidatorTx(startTime uint64, endTime uint64) (*txs.Tx, error) { + utx := &txs.AddValidatorTx{ + BaseTx: txs.BaseTx{}, + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: startTime, + End: endTime, + }, + StakeOuts: nil, + RewardsOwner: &secp256k1fx.OutputOwners{}, + DelegationShares: 100, + } + + return txs.NewSigned(utx, txs.Codec, nil) +} + +func TestPeekTxs(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + testDecisionTxs, err := createTestDecisionTxs(1) + require.NoError(err) + testProposalTxs, err := createTestProposalTxs(1) + require.NoError(err) + + tx, exists := mempool.Peek() + require.False(exists) + require.Nil(tx) + + require.NoError(mempool.Add(testDecisionTxs[0])) + require.NoError(mempool.Add(testProposalTxs[0])) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, testDecisionTxs[0]) + require.NotEqual(tx, testProposalTxs[0]) + + mempool.Remove(testDecisionTxs[0]) + + tx, exists = mempool.Peek() + require.True(exists) + require.NotEqual(tx, testDecisionTxs[0]) + require.Equal(tx, testProposalTxs[0]) + + mempool.Remove(testProposalTxs[0]) + + tx, exists = mempool.Peek() + require.False(exists) + require.Nil(tx) +} + +func TestRemoveConflicts(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + txs, err := createTestDecisionTxs(1) + require.NoError(err) + conflictTxs, err := createTestDecisionTxs(1) + require.NoError(err) + + require.NoError(mempool.Add(txs[0])) + + tx, exists := mempool.Peek() + require.True(exists) + require.Equal(tx, txs[0]) + + mempool.Remove(conflictTxs[0]) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestIterate(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + testDecisionTxs, err := createTestDecisionTxs(1) + require.NoError(err) + decisionTx := testDecisionTxs[0] + + testProposalTxs, err := createTestProposalTxs(1) + require.NoError(err) + proposalTx := testProposalTxs[0] + + require.NoError(mempool.Add(decisionTx)) + require.NoError(mempool.Add(proposalTx)) + + expectedSet := set.Of( + decisionTx.ID(), + proposalTx.ID(), + ) + + set := set.NewSet[ids.ID](2) + mempool.Iterate(func(tx *txs.Tx) bool { + set.Add(tx.ID()) + return true + }) + + require.Equal(expectedSet, set) +} diff --git a/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go b/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go index b4ce5e4b..c47f42e9 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go +++ b/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool (interfaces: Mempool) +// +// Generated by this command: +// +// mockgen -package=mempool -destination=vms/platformvm/txs/mempool/mock_mempool.go github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool Mempool +// // Package mempool is a generated GoMock package. package mempool @@ -12,7 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockMempool is a mock of Mempool interface. @@ -47,45 +49,22 @@ func (m *MockMempool) Add(arg0 *txs.Tx) error { } // Add indicates an expected call of Add. -func (mr *MockMempoolMockRecorder) Add(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Add(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockMempool)(nil).Add), arg0) } -// DisableAdding mocks base method. -func (m *MockMempool) DisableAdding() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "DisableAdding") -} - -// DisableAdding indicates an expected call of DisableAdding. -func (mr *MockMempoolMockRecorder) DisableAdding() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAdding", reflect.TypeOf((*MockMempool)(nil).DisableAdding)) -} - -// EnableAdding mocks base method. -func (m *MockMempool) EnableAdding() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "EnableAdding") -} - -// EnableAdding indicates an expected call of EnableAdding. -func (mr *MockMempoolMockRecorder) EnableAdding() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAdding", reflect.TypeOf((*MockMempool)(nil).EnableAdding)) -} - // Get mocks base method. -func (m *MockMempool) Get(arg0 ids.ID) *txs.Tx { +func (m *MockMempool) Get(arg0 ids.ID) (*txs.Tx, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMempool)(nil).Get), arg0) } @@ -99,51 +78,35 @@ func (m *MockMempool) GetDropReason(arg0 ids.ID) error { } // GetDropReason indicates an expected call of GetDropReason. -func (mr *MockMempoolMockRecorder) GetDropReason(arg0 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) GetDropReason(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDropReason", reflect.TypeOf((*MockMempool)(nil).GetDropReason), arg0) } -// Has mocks base method. -func (m *MockMempool) Has(arg0 ids.ID) bool { +// Iterate mocks base method. +func (m *MockMempool) Iterate(arg0 func(*txs.Tx) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// Has indicates an expected call of Has. -func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) -} - -// HasStakerTx mocks base method. -func (m *MockMempool) HasStakerTx() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasStakerTx") - ret0, _ := ret[0].(bool) - return ret0 + m.ctrl.Call(m, "Iterate", arg0) } -// HasStakerTx indicates an expected call of HasStakerTx. -func (mr *MockMempoolMockRecorder) HasStakerTx() *gomock.Call { +// Iterate indicates an expected call of Iterate. +func (mr *MockMempoolMockRecorder) Iterate(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasStakerTx", reflect.TypeOf((*MockMempool)(nil).HasStakerTx)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterate", reflect.TypeOf((*MockMempool)(nil).Iterate), arg0) } -// HasTxs mocks base method. -func (m *MockMempool) HasTxs() bool { +// Len mocks base method. +func (m *MockMempool) Len() int { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasTxs") - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) return ret0 } -// HasTxs indicates an expected call of HasTxs. -func (mr *MockMempoolMockRecorder) HasTxs() *gomock.Call { +// Len indicates an expected call of Len. +func (mr *MockMempoolMockRecorder) Len() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasTxs", reflect.TypeOf((*MockMempool)(nil).HasTxs)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockMempool)(nil).Len)) } // MarkDropped mocks base method. @@ -153,47 +116,50 @@ func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { } // MarkDropped indicates an expected call of MarkDropped. -func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) } -// PeekStakerTx mocks base method. -func (m *MockMempool) PeekStakerTx() *txs.Tx { +// Peek mocks base method. +func (m *MockMempool) Peek() (*txs.Tx, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeekStakerTx") + ret := m.ctrl.Call(m, "Peek") ret0, _ := ret[0].(*txs.Tx) - return ret0 + ret1, _ := ret[1].(bool) + return ret0, ret1 } -// PeekStakerTx indicates an expected call of PeekStakerTx. -func (mr *MockMempoolMockRecorder) PeekStakerTx() *gomock.Call { +// Peek indicates an expected call of Peek. +func (mr *MockMempoolMockRecorder) Peek() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeekStakerTx", reflect.TypeOf((*MockMempool)(nil).PeekStakerTx)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek)) } -// PeekTxs mocks base method. -func (m *MockMempool) PeekTxs(arg0 int) []*txs.Tx { +// Remove mocks base method. +func (m *MockMempool) Remove(arg0 ...*txs.Tx) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeekTxs", arg0) - ret0, _ := ret[0].([]*txs.Tx) - return ret0 + varargs := []any{} + for _, a := range arg0 { + varargs = append(varargs, a) + } + m.ctrl.Call(m, "Remove", varargs...) } -// PeekTxs indicates an expected call of PeekTxs. -func (mr *MockMempoolMockRecorder) PeekTxs(arg0 interface{}) *gomock.Call { +// Remove indicates an expected call of Remove. +func (mr *MockMempoolMockRecorder) Remove(arg0 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeekTxs", reflect.TypeOf((*MockMempool)(nil).PeekTxs), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0...) } -// Remove mocks base method. -func (m *MockMempool) Remove(arg0 []*txs.Tx) { +// RequestBuildBlock mocks base method. +func (m *MockMempool) RequestBuildBlock(arg0 bool) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Remove", arg0) + m.ctrl.Call(m, "RequestBuildBlock", arg0) } -// Remove indicates an expected call of Remove. -func (mr *MockMempoolMockRecorder) Remove(arg0 interface{}) *gomock.Call { +// RequestBuildBlock indicates an expected call of RequestBuildBlock. +func (mr *MockMempoolMockRecorder) RequestBuildBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestBuildBlock", reflect.TypeOf((*MockMempool)(nil).RequestBuildBlock), arg0) } diff --git a/avalanchego/vms/platformvm/txs/mempool/remover.go b/avalanchego/vms/platformvm/txs/mempool/remover.go deleted file mode 100644 index fcdeca38..00000000 --- a/avalanchego/vms/platformvm/txs/mempool/remover.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import "github.com/ava-labs/avalanchego/vms/platformvm/txs" - -var _ txs.Visitor = (*remover)(nil) - -type remover struct { - m *mempool - tx *txs.Tx -} - -func (r *remover) AddValidatorTx(*txs.AddValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddDelegatorTx(*txs.AddDelegatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) CreateChainTx(*txs.CreateChainTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) CreateSubnetTx(*txs.CreateSubnetTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) ImportTx(*txs.ImportTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) ExportTx(*txs.ExportTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) TransformSubnetTx(*txs.TransformSubnetTx) error { - r.m.removeDecisionTxs([]*txs.Tx{r.tx}) - return nil -} - -func (r *remover) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (r *remover) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - r.m.removeStakerTx(r.tx) - return nil -} - -func (*remover) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - // this tx is never in mempool - return nil -} - -func (*remover) RewardValidatorTx(*txs.RewardValidatorTx) error { - // this tx is never in mempool - return nil -} diff --git a/avalanchego/vms/platformvm/txs/mock_staker.go b/avalanchego/vms/platformvm/txs/mock_staker.go deleted file mode 100644 index 0fbaea26..00000000 --- a/avalanchego/vms/platformvm/txs/mock_staker.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: Staker) - -// Package txs is a generated GoMock package. -package txs - -import ( - reflect "reflect" - time "time" - - ids "github.com/ava-labs/avalanchego/ids" - bls "github.com/ava-labs/avalanchego/utils/crypto/bls" - gomock "github.com/golang/mock/gomock" -) - -// MockStaker is a mock of Staker interface. -type MockStaker struct { - ctrl *gomock.Controller - recorder *MockStakerMockRecorder -} - -// MockStakerMockRecorder is the mock recorder for MockStaker. -type MockStakerMockRecorder struct { - mock *MockStaker -} - -// NewMockStaker creates a new mock instance. -func NewMockStaker(ctrl *gomock.Controller) *MockStaker { - mock := &MockStaker{ctrl: ctrl} - mock.recorder = &MockStakerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStaker) EXPECT() *MockStakerMockRecorder { - return m.recorder -} - -// CurrentPriority mocks base method. -func (m *MockStaker) CurrentPriority() Priority { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CurrentPriority") - ret0, _ := ret[0].(Priority) - return ret0 -} - -// CurrentPriority indicates an expected call of CurrentPriority. -func (mr *MockStakerMockRecorder) CurrentPriority() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockStaker)(nil).CurrentPriority)) -} - -// EndTime mocks base method. -func (m *MockStaker) EndTime() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EndTime") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// EndTime indicates an expected call of EndTime. -func (mr *MockStakerMockRecorder) EndTime() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockStaker)(nil).EndTime)) -} - -// NodeID mocks base method. -func (m *MockStaker) NodeID() ids.NodeID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NodeID") - ret0, _ := ret[0].(ids.NodeID) - return ret0 -} - -// NodeID indicates an expected call of NodeID. -func (mr *MockStakerMockRecorder) NodeID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockStaker)(nil).NodeID)) -} - -// PendingPriority mocks base method. -func (m *MockStaker) PendingPriority() Priority { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PendingPriority") - ret0, _ := ret[0].(Priority) - return ret0 -} - -// PendingPriority indicates an expected call of PendingPriority. -func (mr *MockStakerMockRecorder) PendingPriority() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockStaker)(nil).PendingPriority)) -} - -// PublicKey mocks base method. -func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PublicKey") - ret0, _ := ret[0].(*bls.PublicKey) - ret1, _ := ret[1].(bool) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// PublicKey indicates an expected call of PublicKey. -func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) -} - -// StartTime mocks base method. -func (m *MockStaker) StartTime() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartTime") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// StartTime indicates an expected call of StartTime. -func (mr *MockStakerMockRecorder) StartTime() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockStaker)(nil).StartTime)) -} - -// SubnetID mocks base method. -func (m *MockStaker) SubnetID() ids.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubnetID") - ret0, _ := ret[0].(ids.ID) - return ret0 -} - -// SubnetID indicates an expected call of SubnetID. -func (mr *MockStakerMockRecorder) SubnetID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockStaker)(nil).SubnetID)) -} - -// Weight mocks base method. -func (m *MockStaker) Weight() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Weight") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// Weight indicates an expected call of Weight. -func (mr *MockStakerMockRecorder) Weight() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockStaker)(nil).Weight)) -} diff --git a/avalanchego/vms/platformvm/txs/mock_staker_tx.go b/avalanchego/vms/platformvm/txs/mock_staker_tx.go new file mode 100644 index 00000000..2e01b15b --- /dev/null +++ b/avalanchego/vms/platformvm/txs/mock_staker_tx.go @@ -0,0 +1,265 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: vms/platformvm/txs/staker_tx.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/txs/staker_tx.go -destination=vms/platformvm/txs/mock_staker_tx.go -package=txs -exclude_interfaces=ValidatorTx,DelegatorTx,StakerTx,PermissionlessStaker +// + +// Package txs is a generated GoMock package. +package txs + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + gomock "go.uber.org/mock/gomock" +) + +// MockStaker is a mock of Staker interface. +type MockStaker struct { + ctrl *gomock.Controller + recorder *MockStakerMockRecorder +} + +// MockStakerMockRecorder is the mock recorder for MockStaker. +type MockStakerMockRecorder struct { + mock *MockStaker +} + +// NewMockStaker creates a new mock instance. +func NewMockStaker(ctrl *gomock.Controller) *MockStaker { + mock := &MockStaker{ctrl: ctrl} + mock.recorder = &MockStakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStaker) EXPECT() *MockStakerMockRecorder { + return m.recorder +} + +// CurrentPriority mocks base method. +func (m *MockStaker) CurrentPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// CurrentPriority indicates an expected call of CurrentPriority. +func (mr *MockStakerMockRecorder) CurrentPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockStaker)(nil).CurrentPriority)) +} + +// EndTime mocks base method. +func (m *MockStaker) EndTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EndTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// EndTime indicates an expected call of EndTime. +func (mr *MockStakerMockRecorder) EndTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockStaker)(nil).EndTime)) +} + +// NodeID mocks base method. +func (m *MockStaker) NodeID() ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeID") + ret0, _ := ret[0].(ids.NodeID) + return ret0 +} + +// NodeID indicates an expected call of NodeID. +func (mr *MockStakerMockRecorder) NodeID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockStaker)(nil).NodeID)) +} + +// PublicKey mocks base method. +func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) +} + +// SubnetID mocks base method. +func (m *MockStaker) SubnetID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// SubnetID indicates an expected call of SubnetID. +func (mr *MockStakerMockRecorder) SubnetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockStaker)(nil).SubnetID)) +} + +// Weight mocks base method. +func (m *MockStaker) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockStakerMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockStaker)(nil).Weight)) +} + +// MockScheduledStaker is a mock of ScheduledStaker interface. +type MockScheduledStaker struct { + ctrl *gomock.Controller + recorder *MockScheduledStakerMockRecorder +} + +// MockScheduledStakerMockRecorder is the mock recorder for MockScheduledStaker. +type MockScheduledStakerMockRecorder struct { + mock *MockScheduledStaker +} + +// NewMockScheduledStaker creates a new mock instance. +func NewMockScheduledStaker(ctrl *gomock.Controller) *MockScheduledStaker { + mock := &MockScheduledStaker{ctrl: ctrl} + mock.recorder = &MockScheduledStakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduledStaker) EXPECT() *MockScheduledStakerMockRecorder { + return m.recorder +} + +// CurrentPriority mocks base method. +func (m *MockScheduledStaker) CurrentPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// CurrentPriority indicates an expected call of CurrentPriority. +func (mr *MockScheduledStakerMockRecorder) CurrentPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockScheduledStaker)(nil).CurrentPriority)) +} + +// EndTime mocks base method. +func (m *MockScheduledStaker) EndTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EndTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// EndTime indicates an expected call of EndTime. +func (mr *MockScheduledStakerMockRecorder) EndTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockScheduledStaker)(nil).EndTime)) +} + +// NodeID mocks base method. +func (m *MockScheduledStaker) NodeID() ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeID") + ret0, _ := ret[0].(ids.NodeID) + return ret0 +} + +// NodeID indicates an expected call of NodeID. +func (mr *MockScheduledStakerMockRecorder) NodeID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockScheduledStaker)(nil).NodeID)) +} + +// PendingPriority mocks base method. +func (m *MockScheduledStaker) PendingPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PendingPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// PendingPriority indicates an expected call of PendingPriority. +func (mr *MockScheduledStakerMockRecorder) PendingPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockScheduledStaker)(nil).PendingPriority)) +} + +// PublicKey mocks base method. +func (m *MockScheduledStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockScheduledStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockScheduledStaker)(nil).PublicKey)) +} + +// StartTime mocks base method. +func (m *MockScheduledStaker) StartTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// StartTime indicates an expected call of StartTime. +func (mr *MockScheduledStakerMockRecorder) StartTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockScheduledStaker)(nil).StartTime)) +} + +// SubnetID mocks base method. +func (m *MockScheduledStaker) SubnetID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// SubnetID indicates an expected call of SubnetID. +func (mr *MockScheduledStakerMockRecorder) SubnetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockScheduledStaker)(nil).SubnetID)) +} + +// Weight mocks base method. +func (m *MockScheduledStaker) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockScheduledStakerMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockScheduledStaker)(nil).Weight)) +} diff --git a/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go b/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go index 8d95b09b..f775c520 100644 --- a/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go +++ b/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: UnsignedTx) +// Source: vms/platformvm/txs/unsigned_tx.go +// +// Generated by this command: +// +// mockgen -source=vms/platformvm/txs/unsigned_tx.go -destination=vms/platformvm/txs/mock_unsigned_tx.go -package=txs -exclude_interfaces= +// // Package txs is a generated GoMock package. package txs @@ -14,7 +16,7 @@ import ( snow "github.com/ava-labs/avalanchego/snow" set "github.com/ava-labs/avalanchego/utils/set" avax "github.com/ava-labs/avalanchego/vms/components/avax" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockUnsignedTx is a mock of UnsignedTx interface. @@ -55,15 +57,15 @@ func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { } // InitCtx mocks base method. -func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { +func (m *MockUnsignedTx) InitCtx(ctx *snow.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "InitCtx", arg0) + m.ctrl.Call(m, "InitCtx", ctx) } // InitCtx indicates an expected call of InitCtx. -func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) InitCtx(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), ctx) } // InputIDs mocks base method. @@ -95,41 +97,41 @@ func (mr *MockUnsignedTxMockRecorder) Outputs() *gomock.Call { } // SetBytes mocks base method. -func (m *MockUnsignedTx) SetBytes(arg0 []byte) { +func (m *MockUnsignedTx) SetBytes(unsignedBytes []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBytes", arg0) + m.ctrl.Call(m, "SetBytes", unsignedBytes) } // SetBytes indicates an expected call of SetBytes. -func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SetBytes(unsignedBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), unsignedBytes) } // SyntacticVerify mocks base method. -func (m *MockUnsignedTx) SyntacticVerify(arg0 *snow.Context) error { +func (m *MockUnsignedTx) SyntacticVerify(ctx *snow.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyntacticVerify", arg0) + ret := m.ctrl.Call(m, "SyntacticVerify", ctx) ret0, _ := ret[0].(error) return ret0 } // SyntacticVerify indicates an expected call of SyntacticVerify. -func (mr *MockUnsignedTxMockRecorder) SyntacticVerify(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) SyntacticVerify(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyntacticVerify", reflect.TypeOf((*MockUnsignedTx)(nil).SyntacticVerify), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyntacticVerify", reflect.TypeOf((*MockUnsignedTx)(nil).SyntacticVerify), ctx) } // Visit mocks base method. -func (m *MockUnsignedTx) Visit(arg0 Visitor) error { +func (m *MockUnsignedTx) Visit(visitor Visitor) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Visit", arg0) + ret := m.ctrl.Call(m, "Visit", visitor) ret0, _ := ret[0].(error) return ret0 } // Visit indicates an expected call of Visit. -func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { +func (mr *MockUnsignedTxMockRecorder) Visit(visitor any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), visitor) } diff --git a/avalanchego/vms/platformvm/txs/priorities.go b/avalanchego/vms/platformvm/txs/priorities.go index fdd65d74..a324bdae 100644 --- a/avalanchego/vms/platformvm/txs/priorities.go +++ b/avalanchego/vms/platformvm/txs/priorities.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -44,3 +44,47 @@ var PendingToCurrentPriorities = []Priority{ } type Priority byte + +func (p Priority) IsCurrent() bool { + return p.IsCurrentValidator() || p.IsCurrentDelegator() +} + +func (p Priority) IsPending() bool { + return p.IsPendingValidator() || p.IsPendingDelegator() +} + +func (p Priority) IsValidator() bool { + return p.IsCurrentValidator() || p.IsPendingValidator() +} + +func (p Priority) IsPermissionedValidator() bool { + return p == SubnetPermissionedValidatorCurrentPriority || + p == SubnetPermissionedValidatorPendingPriority +} + +func (p Priority) IsDelegator() bool { + return p.IsCurrentDelegator() || p.IsPendingDelegator() +} + +func (p Priority) IsCurrentValidator() bool { + return p == PrimaryNetworkValidatorCurrentPriority || + p == SubnetPermissionedValidatorCurrentPriority || + p == SubnetPermissionlessValidatorCurrentPriority +} + +func (p Priority) IsCurrentDelegator() bool { + return p == PrimaryNetworkDelegatorCurrentPriority || + p == SubnetPermissionlessDelegatorCurrentPriority +} + +func (p Priority) IsPendingValidator() bool { + return p == PrimaryNetworkValidatorPendingPriority || + p == SubnetPermissionedValidatorPendingPriority || + p == SubnetPermissionlessValidatorPendingPriority +} + +func (p Priority) IsPendingDelegator() bool { + return p == PrimaryNetworkDelegatorBanffPendingPriority || + p == PrimaryNetworkDelegatorApricotPendingPriority || + p == SubnetPermissionlessDelegatorPendingPriority +} diff --git a/avalanchego/vms/platformvm/txs/priorities_test.go b/avalanchego/vms/platformvm/txs/priorities_test.go new file mode 100644 index 00000000..5e629a85 --- /dev/null +++ b/avalanchego/vms/platformvm/txs/priorities_test.go @@ -0,0 +1,524 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txs + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPriorityIsCurrent(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: true, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsCurrent()) + }) + } +} + +func TestPriorityIsPending(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsPending()) + }) + } +} + +func TestPriorityIsValidator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: true, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsValidator()) + }) + } +} + +func TestPriorityIsPermissionedValidator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsPermissionedValidator()) + }) + } +} + +func TestPriorityIsDelegator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsDelegator()) + }) + } +} + +func TestPriorityIsCurrentValidator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: true, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsCurrentValidator()) + }) + } +} + +func TestPriorityIsCurrentDelegator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsCurrentDelegator()) + }) + } +} + +func TestPriorityIsPendingValidator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsPendingValidator()) + }) + } +} + +func TestPriorityIsPendingDelegator(t *testing.T) { + tests := []struct { + priority Priority + expected bool + }{ + { + priority: PrimaryNetworkDelegatorApricotPendingPriority, + expected: true, + }, + { + priority: PrimaryNetworkValidatorPendingPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorBanffPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionlessValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorPendingPriority, + expected: true, + }, + { + priority: SubnetPermissionedValidatorPendingPriority, + expected: false, + }, + { + priority: SubnetPermissionedValidatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessDelegatorCurrentPriority, + expected: false, + }, + { + priority: SubnetPermissionlessValidatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkDelegatorCurrentPriority, + expected: false, + }, + { + priority: PrimaryNetworkValidatorCurrentPriority, + expected: false, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.priority), func(t *testing.T) { + require.Equal(t, test.expected, test.priority.IsPendingDelegator()) + }) + } +} diff --git a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go index fc6126aa..ef55ccce 100644 --- a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -15,7 +15,7 @@ import ( var ( _ UnsignedTx = (*RemoveSubnetValidatorTx)(nil) - errRemovePrimaryNetworkValidator = errors.New("can't remove primary network validator with RemoveSubnetValidatorTx") + ErrRemovePrimaryNetworkValidator = errors.New("can't remove primary network validator with RemoveSubnetValidatorTx") ) // Removes a validator from a subnet. @@ -37,7 +37,7 @@ func (tx *RemoveSubnetValidatorTx) SyntacticVerify(ctx *snow.Context) error { // already passed syntactic verification return nil case tx.Subnet == constants.PrimaryNetworkID: - return errRemovePrimaryNetworkValidator + return ErrRemovePrimaryNetworkValidator } if err := tx.BaseTx.SyntacticVerify(ctx); err != nil { diff --git a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 6ad782af..02439701 100644 --- a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -1,33 +1,524 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( + "encoding/json" "errors" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) var errInvalidSubnetAuth = errors.New("invalid subnet auth") +func TestRemoveSubnetValidatorTxSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + nodeID := ids.BuildTestNodeID([]byte{ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + }) + subnetID := ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + } + + simpleRemoveValidatorTx := &RemoveSubnetValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{5}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + NodeID: nodeID, + Subnet: subnetID, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{3}, + }, + } + require.NoError(simpleRemoveValidatorTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleRemoveValidatorTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // RemoveSubnetValidatorTx Type ID + 0x00, 0x00, 0x00, 0x17, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // Inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 MilliAvax + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x05, + // length of memo field + 0x00, 0x00, 0x00, 0x00, + // nodeID to remove + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // subnetID to remove from + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x03, + } + var unsignedSimpleRemoveValidatorTx UnsignedTx = simpleRemoveValidatorTx + unsignedSimpleRemoveValidatorTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleRemoveValidatorTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleRemoveValidatorTxBytes, unsignedSimpleRemoveValidatorTxBytes) + + complexRemoveValidatorTx := &RemoveSubnetValidatorTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + NodeID: nodeID, + Subnet: subnetID, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + } + avax.SortTransferableOutputs(complexRemoveValidatorTx.Outs, Codec) + utils.Sort(complexRemoveValidatorTx.Ins) + require.NoError(complexRemoveValidatorTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexRemoveValidatorTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // RemoveSubnetValidatorTx Type ID + 0x00, 0x00, 0x00, 0x17, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x02, + // Outputs[0] + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // Outputs[1] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 Avax + 0x00, 0x00, 0x00, 0x00, 0x3b, 0x9a, 0xca, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x02, + // index of first signer + 0x00, 0x00, 0x00, 0x02, + // index of second signer + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // Custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x00, + // length of memo + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // nodeID to remove + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + 0x11, 0x22, 0x33, 0x44, + // subnetID to remove from + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x00, + } + var unsignedComplexRemoveValidatorTx UnsignedTx = complexRemoveValidatorTx + unsignedComplexRemoveValidatorTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexRemoveValidatorTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexRemoveValidatorTxBytes, unsignedComplexRemoveValidatorTxBytes) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexRemoveValidatorTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexRemoveValidatorTxJSONBytes, err := json.MarshalIndent(unsignedComplexRemoveValidatorTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521", + "nodeID": "NodeID-2ZbTY9GatRTrfinAoYiYLcf6CvrPAUYgo", + "subnetID": "SkB92YpWm4UpburLz9tEKZw2i67H3FF6YkjaU4BkFUDTG9Xm", + "subnetAuthorization": { + "signatureIndices": [] + } +}`, string(unsignedComplexRemoveValidatorTxJSONBytes)) +} + func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { type test struct { - name string - txFunc func(*gomock.Controller) *RemoveSubnetValidatorTx - shouldErr bool - // If [shouldErr] and [requireSpecificErr] != nil, - // require that the error we get is [requireSpecificErr]. - requireSpecificErr error + name string + txFunc func(*gomock.Controller) *RemoveSubnetValidatorTx + expectedErr error } var ( @@ -61,8 +552,6 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { // A BaseTx that fails syntactic verification. invalidBaseTx := BaseTx{} - // Sanity check. - require.Error(t, invalidBaseTx.SyntacticVerify(ctx)) tests := []test{ { @@ -70,14 +559,14 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *RemoveSubnetValidatorTx { return nil }, - shouldErr: true, + expectedErr: ErrNilTx, }, { name: "already verified", txFunc: func(*gomock.Controller) *RemoveSubnetValidatorTx { return &RemoveSubnetValidatorTx{BaseTx: verifiedBaseTx} }, - shouldErr: false, + expectedErr: nil, }, { name: "invalid BaseTx", @@ -90,7 +579,7 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { BaseTx: invalidBaseTx, } }, - shouldErr: true, + expectedErr: avax.ErrWrongNetworkID, }, { name: "invalid subnetID", @@ -102,8 +591,7 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { Subnet: constants.PrimaryNetworkID, } }, - shouldErr: true, - requireSpecificErr: errRemovePrimaryNetworkValidator, + expectedErr: ErrRemovePrimaryNetworkValidator, }, { name: "invalid subnetAuth", @@ -120,8 +608,7 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { SubnetAuth: invalidSubnetAuth, } }, - shouldErr: true, - requireSpecificErr: errInvalidSubnetAuth, + expectedErr: errInvalidSubnetAuth, }, { name: "passes verification", @@ -138,7 +625,7 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { SubnetAuth: validSubnetAuth, } }, - shouldErr: false, + expectedErr: nil, }, } @@ -146,18 +633,13 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) - if tt.shouldErr { - require.Error(err) - if tt.requireSpecificErr != nil { - require.ErrorIs(err, tt.requireSpecificErr) - } + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { return } - require.NoError(err) require.True(tx.SyntacticallyVerified) }) } diff --git a/avalanchego/vms/platformvm/txs/reward_validator_tx.go b/avalanchego/vms/platformvm/txs/reward_validator_tx.go index d4b579f1..85129af4 100644 --- a/avalanchego/vms/platformvm/txs/reward_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/reward_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -26,9 +26,6 @@ type RewardValidatorTx struct { // ID of the tx that created the delegator/validator being removed/rewarded TxID ids.ID `serialize:"true" json:"txID"` - // Marks if this validator should be rewarded according to this node. - ShouldPreferCommit bool `json:"-"` - unsignedBytes []byte // Unsigned byte representation of this data } diff --git a/avalanchego/vms/platformvm/txs/staker_tx.go b/avalanchego/vms/platformvm/txs/staker_tx.go index 049d3519..8adb1ac2 100644 --- a/avalanchego/vms/platformvm/txs/staker_tx.go +++ b/avalanchego/vms/platformvm/txs/staker_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -48,9 +48,13 @@ type Staker interface { // PublicKey returns the BLS public key registered by this transaction. If // there was no key registered by this transaction, it will return false. PublicKey() (*bls.PublicKey, bool, error) - StartTime() time.Time EndTime() time.Time Weight() uint64 - PendingPriority() Priority CurrentPriority() Priority } + +type ScheduledStaker interface { + Staker + StartTime() time.Time + PendingPriority() Priority +} diff --git a/avalanchego/vms/platformvm/txs/subnet_validator.go b/avalanchego/vms/platformvm/txs/subnet_validator.go index d9da9d31..a7c683f3 100644 --- a/avalanchego/vms/platformvm/txs/subnet_validator.go +++ b/avalanchego/vms/platformvm/txs/subnet_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/subnet_validator_test.go b/avalanchego/vms/platformvm/txs/subnet_validator_test.go index a38e2d8e..cdfbeaf1 100644 --- a/avalanchego/vms/platformvm/txs/subnet_validator_test.go +++ b/avalanchego/vms/platformvm/txs/subnet_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -21,7 +21,8 @@ func TestSubnetValidatorVerifySubnetID(t *testing.T) { Subnet: constants.PrimaryNetworkID, } - require.ErrorIs(vdr.Verify(), errBadSubnetID) + err := vdr.Verify() + require.ErrorIs(err, errBadSubnetID) } // Happy path diff --git a/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx.go b/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx.go new file mode 100644 index 00000000..4fa28078 --- /dev/null +++ b/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txs + +import ( + "errors" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" +) + +var ( + _ UnsignedTx = (*TransferSubnetOwnershipTx)(nil) + + ErrTransferPermissionlessSubnet = errors.New("cannot transfer ownership of a permissionless subnet") +) + +type TransferSubnetOwnershipTx struct { + // Metadata, inputs and outputs + BaseTx `serialize:"true"` + // ID of the subnet this tx is modifying + Subnet ids.ID `serialize:"true" json:"subnetID"` + // Proves that the issuer has the right to remove the node from the subnet. + SubnetAuth verify.Verifiable `serialize:"true" json:"subnetAuthorization"` + // Who is now authorized to manage this subnet + Owner fx.Owner `serialize:"true" json:"newOwner"` +} + +// InitCtx sets the FxID fields in the inputs and outputs of this +// [TransferSubnetOwnershipTx]. Also sets the [ctx] to the given [vm.ctx] so +// that the addresses can be json marshalled into human readable format +func (tx *TransferSubnetOwnershipTx) InitCtx(ctx *snow.Context) { + tx.BaseTx.InitCtx(ctx) + tx.Owner.InitCtx(ctx) +} + +func (tx *TransferSubnetOwnershipTx) SyntacticVerify(ctx *snow.Context) error { + switch { + case tx == nil: + return ErrNilTx + case tx.SyntacticallyVerified: + // already passed syntactic verification + return nil + case tx.Subnet == constants.PrimaryNetworkID: + return ErrTransferPermissionlessSubnet + } + + if err := tx.BaseTx.SyntacticVerify(ctx); err != nil { + return err + } + if err := verify.All(tx.SubnetAuth, tx.Owner); err != nil { + return err + } + + tx.SyntacticallyVerified = true + return nil +} + +func (tx *TransferSubnetOwnershipTx) Visit(visitor Visitor) error { + return visitor.TransferSubnetOwnershipTx(tx) +} diff --git a/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go b/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go new file mode 100644 index 00000000..ffac2653 --- /dev/null +++ b/avalanchego/vms/platformvm/txs/transfer_subnet_ownership_tx_test.go @@ -0,0 +1,668 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txs + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" +) + +func TestTransferSubnetOwnershipTxSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + subnetID := ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + } + + simpleTransferSubnetOwnershipTx := &TransferSubnetOwnershipTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.MilliAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{5}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Subnet: subnetID, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{3}, + }, + Owner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + } + require.NoError(simpleTransferSubnetOwnershipTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleTransferSubnetOwnershipTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // TransferSubnetOwnershipTx Type ID + 0x00, 0x00, 0x00, 0x21, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x00, + // Number of inputs + 0x00, 0x00, 0x00, 0x01, + // Inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 MilliAvax + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x05, + // length of memo + 0x00, 0x00, 0x00, 0x00, + // subnetID to modify + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x03, + // secp256k1fx output owners type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addrs + 0x00, 0x00, 0x00, 0x01, + // Addrs[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + var unsignedSimpleTransferSubnetOwnershipTx UnsignedTx = simpleTransferSubnetOwnershipTx + unsignedSimpleTransferSubnetOwnershipTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleTransferSubnetOwnershipTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleTransferSubnetOwnershipTxBytes, unsignedSimpleTransferSubnetOwnershipTxBytes) + + complexTransferSubnetOwnershipTx := &TransferSubnetOwnershipTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Subnet: subnetID, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + Owner: &secp256k1fx.OutputOwners{ + Locktime: 876543210, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + } + avax.SortTransferableOutputs(complexTransferSubnetOwnershipTx.Outs, Codec) + utils.Sort(complexTransferSubnetOwnershipTx.Ins) + require.NoError(complexTransferSubnetOwnershipTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexTransferSubnetOwnershipTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // TransferSubnetOwnershipTx Type ID + 0x00, 0x00, 0x00, 0x21, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Number of outputs + 0x00, 0x00, 0x00, 0x02, + // Outputs[0] + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // Outputs[1] + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // secp256k1fx output locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 1 Avax + 0x00, 0x00, 0x00, 0x00, 0x3b, 0x9a, 0xca, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x02, + // index of first signer + 0x00, 0x00, 0x00, 0x02, + // index of second signer + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // Custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x00, + // length of memo + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // subnetID to modify + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x00, + // secp256k1fx output owners type ID + 0x00, 0x00, 0x00, 0x0b, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addrs + 0x00, 0x00, 0x00, 0x01, + // Addrs[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + var unsignedComplexTransferSubnetOwnershipTx UnsignedTx = complexTransferSubnetOwnershipTx + unsignedComplexTransferSubnetOwnershipTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexTransferSubnetOwnershipTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexTransferSubnetOwnershipTxBytes, unsignedComplexTransferSubnetOwnershipTxBytes) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexTransferSubnetOwnershipTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexTransferSubnetOwnershipTxJSONBytes, err := json.MarshalIndent(unsignedComplexTransferSubnetOwnershipTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521", + "subnetID": "SkB92YpWm4UpburLz9tEKZw2i67H3FF6YkjaU4BkFUDTG9Xm", + "subnetAuthorization": { + "signatureIndices": [] + }, + "newOwner": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "locktime": 876543210, + "threshold": 1 + } +}`, string(unsignedComplexTransferSubnetOwnershipTxJSONBytes)) +} + +func TestTransferSubnetOwnershipTxSyntacticVerify(t *testing.T) { + type test struct { + name string + txFunc func(*gomock.Controller) *TransferSubnetOwnershipTx + expectedErr error + } + + var ( + networkID = uint32(1337) + chainID = ids.GenerateTestID() + ) + + ctx := &snow.Context{ + ChainID: chainID, + NetworkID: networkID, + } + + // A BaseTx that already passed syntactic verification. + verifiedBaseTx := BaseTx{ + SyntacticallyVerified: true, + } + // Sanity check. + require.NoError(t, verifiedBaseTx.SyntacticVerify(ctx)) + + // A BaseTx that passes syntactic verification. + validBaseTx := BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: networkID, + BlockchainID: chainID, + }, + } + // Sanity check. + require.NoError(t, validBaseTx.SyntacticVerify(ctx)) + // Make sure we're not caching the verification result. + require.False(t, validBaseTx.SyntacticallyVerified) + + // A BaseTx that fails syntactic verification. + invalidBaseTx := BaseTx{} + + tests := []test{ + { + name: "nil tx", + txFunc: func(*gomock.Controller) *TransferSubnetOwnershipTx { + return nil + }, + expectedErr: ErrNilTx, + }, + { + name: "already verified", + txFunc: func(*gomock.Controller) *TransferSubnetOwnershipTx { + return &TransferSubnetOwnershipTx{BaseTx: verifiedBaseTx} + }, + expectedErr: nil, + }, + { + name: "invalid BaseTx", + txFunc: func(*gomock.Controller) *TransferSubnetOwnershipTx { + return &TransferSubnetOwnershipTx{ + // Set subnetID so we don't error on that check. + Subnet: ids.GenerateTestID(), + BaseTx: invalidBaseTx, + } + }, + expectedErr: avax.ErrWrongNetworkID, + }, + { + name: "invalid subnetID", + txFunc: func(*gomock.Controller) *TransferSubnetOwnershipTx { + return &TransferSubnetOwnershipTx{ + BaseTx: validBaseTx, + Subnet: constants.PrimaryNetworkID, + } + }, + expectedErr: ErrTransferPermissionlessSubnet, + }, + { + name: "invalid subnetAuth", + txFunc: func(ctrl *gomock.Controller) *TransferSubnetOwnershipTx { + // This SubnetAuth fails verification. + invalidSubnetAuth := verify.NewMockVerifiable(ctrl) + invalidSubnetAuth.EXPECT().Verify().Return(errInvalidSubnetAuth) + return &TransferSubnetOwnershipTx{ + // Set subnetID so we don't error on that check. + Subnet: ids.GenerateTestID(), + BaseTx: validBaseTx, + SubnetAuth: invalidSubnetAuth, + } + }, + expectedErr: errInvalidSubnetAuth, + }, + { + name: "passes verification", + txFunc: func(ctrl *gomock.Controller) *TransferSubnetOwnershipTx { + // This SubnetAuth passes verification. + validSubnetAuth := verify.NewMockVerifiable(ctrl) + validSubnetAuth.EXPECT().Verify().Return(nil) + mockOwner := fx.NewMockOwner(ctrl) + mockOwner.EXPECT().Verify().Return(nil) + return &TransferSubnetOwnershipTx{ + // Set subnetID so we don't error on that check. + Subnet: ids.GenerateTestID(), + BaseTx: validBaseTx, + SubnetAuth: validSubnetAuth, + Owner: mockOwner, + } + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + tx := tt.txFunc(ctrl) + err := tx.SyntacticVerify(ctx) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.True(tx.SyntacticallyVerified) + }) + } +} diff --git a/avalanchego/vms/platformvm/txs/transform_subnet_tx.go b/avalanchego/vms/platformvm/txs/transform_subnet_tx.go index f540ea67..1ba543e1 100644 --- a/avalanchego/vms/platformvm/txs/transform_subnet_tx.go +++ b/avalanchego/vms/platformvm/txs/transform_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go b/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go index 82128bfc..30f3c492 100644 --- a/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go +++ b/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go @@ -1,23 +1,634 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( + "encoding/json" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) +func TestTransformSubnetTxSerialization(t *testing.T) { + require := require.New(t) + + addr := ids.ShortID{ + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + } + + avaxAssetID, err := ids.FromString("FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z") + require.NoError(err) + + customAssetID := ids.ID{ + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + } + + txID := ids.ID{ + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + } + subnetID := ids.ID{ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + } + + simpleTransformTx := &TransformSubnetTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{}, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 10 * units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + Memo: types.JSONByteSlice{}, + }, + }, + Subnet: subnetID, + AssetID: customAssetID, + InitialSupply: 0x1000000000000000, + MaximumSupply: 0xffffffffffffffff, + MinConsumptionRate: 1_000, + MaxConsumptionRate: 1_000_000, + MinValidatorStake: 1, + MaxValidatorStake: 0xffffffffffffffff, + MinStakeDuration: 1, + MaxStakeDuration: 365 * 24 * 60 * 60, + MinDelegationFee: reward.PercentDenominator, + MinDelegatorStake: 1, + MaxValidatorWeightFactor: 1, + UptimeRequirement: .95 * reward.PercentDenominator, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{3}, + }, + } + require.NoError(simpleTransformTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedSimpleTransformTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // TransformSubnetTx type ID + 0x00, 0x00, 0x00, 0x18, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of outputs + 0x00, 0x00, 0x00, 0x00, + // number of inputs + 0x00, 0x00, 0x00, 0x02, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX assetID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount = 10 AVAX + 0x00, 0x00, 0x00, 0x02, 0x54, 0x0b, 0xe4, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // length of memo + 0x00, 0x00, 0x00, 0x00, + // subnetID being transformed + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // staking asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // initial supply + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // maximum supply + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // minimum consumption rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, + // maximum consumption rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x40, + // minimum staking amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // maximum staking amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // minimum staking duration + 0x00, 0x00, 0x00, 0x01, + // maximum staking duration + 0x01, 0xe1, 0x33, 0x80, + // minimum delegation fee + 0x00, 0x0f, 0x42, 0x40, + // minimum delegation amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // maximum validator weight factor + 0x01, + // uptime requirement + 0x00, 0x0e, 0x7e, 0xf0, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x01, + // authorization signfature index + 0x00, 0x00, 0x00, 0x03, + } + var unsignedSimpleTransformTx UnsignedTx = simpleTransformTx + unsignedSimpleTransformTxBytes, err := Codec.Marshal(CodecVersion, &unsignedSimpleTransformTx) + require.NoError(err) + require.Equal(expectedUnsignedSimpleTransformTxBytes, unsignedSimpleTransformTxBytes) + + complexTransformTx := &TransformSubnetTx{ + BaseTx: BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: constants.MainnetID, + BlockchainID: constants.PlatformChainID, + Outs: []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 87654321, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 12345678, + Threshold: 0, + Addrs: []ids.ShortID{}, + }, + }, + }, + }, + { + Asset: avax.Asset{ + ID: customAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: 876543210, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 0xffffffffffffffff, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }, + }, + }, + Ins: []*avax.TransferableInput{ + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 1, + }, + Asset: avax.Asset{ + ID: avaxAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: units.KiloAvax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2, 5}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &stakeable.LockIn{ + Locktime: 876543210, + TransferableIn: &secp256k1fx.TransferInput{ + Amt: 0xefffffffffffffff, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: 3, + }, + Asset: avax.Asset{ + ID: customAssetID, + }, + In: &secp256k1fx.TransferInput{ + Amt: 0x1000000000000000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + }, + }, + }, + Memo: types.JSONByteSlice("😅\nwell that's\x01\x23\x45!"), + }, + }, + Subnet: subnetID, + AssetID: customAssetID, + InitialSupply: 0x1000000000000000, + MaximumSupply: 0x1000000000000000, + MinConsumptionRate: 0, + MaxConsumptionRate: 0, + MinValidatorStake: 1, + MaxValidatorStake: 0x1000000000000000, + MinStakeDuration: 1, + MaxStakeDuration: 1, + MinDelegationFee: 0, + MinDelegatorStake: 0xffffffffffffffff, + MaxValidatorWeightFactor: 255, + UptimeRequirement: 0, + SubnetAuth: &secp256k1fx.Input{ + SigIndices: []uint32{}, + }, + } + avax.SortTransferableOutputs(complexTransformTx.Outs, Codec) + utils.Sort(complexTransformTx.Ins) + require.NoError(complexTransformTx.SyntacticVerify(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + })) + + expectedUnsignedComplexTransformTxBytes := []byte{ + // Codec version + 0x00, 0x00, + // TransformSubnetTx type ID + 0x00, 0x00, 0x00, 0x18, + // Mainnet network ID + 0x00, 0x00, 0x00, 0x01, + // P-chain blockchain ID + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of outputs + 0x00, 0x00, 0x00, 0x02, + // outputs[0] + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x7f, 0xb1, + // seck256k1fx tranfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // secp256k1fx locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x61, 0x4e, + // threshold + 0x00, 0x00, 0x00, 0x00, + // number of addresses + 0x00, 0x00, 0x00, 0x00, + // outputs[1] + // custom assest ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // Stakeable locked output type ID + 0x00, 0x00, 0x00, 0x16, + // Locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // seck256k1fx tranfer output type ID + 0x00, 0x00, 0x00, 0x07, + // amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // secp256k1fx locktime + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, + 0x44, 0x55, 0x66, 0x77, + // number of inputs + 0x00, 0x00, 0x00, 0x03, + // inputs[0] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x01, + // Mainnet AVAX asset ID + 0x21, 0xe6, 0x73, 0x17, 0xcb, 0xc4, 0xbe, 0x2a, + 0xeb, 0x00, 0x67, 0x7a, 0xd6, 0x46, 0x27, 0x78, + 0xa8, 0xf5, 0x22, 0x74, 0xb9, 0xd6, 0x05, 0xdf, + 0x25, 0x91, 0xb2, 0x30, 0x27, 0xa8, 0x7d, 0xff, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // amount = 1,000 AVAX + 0x00, 0x00, 0x00, 0xe8, 0xd4, 0xa5, 0x10, 0x00, + // number of signatures indices + 0x00, 0x00, 0x00, 0x02, + // first signature index + 0x00, 0x00, 0x00, 0x02, + // second signature index + 0x00, 0x00, 0x00, 0x05, + // inputs[1] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x02, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // stakeable locked input type ID + 0x00, 0x00, 0x00, 0x15, + // locktime + 0x00, 0x00, 0x00, 0x00, 0x34, 0x3e, 0xfc, 0xea, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x01, + // index of signer + 0x00, 0x00, 0x00, 0x00, + // inputs[2] + // TxID + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99, 0x88, + // Tx output index + 0x00, 0x00, 0x00, 0x03, + // custom asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // secp256k1fx transfer input type ID + 0x00, 0x00, 0x00, 0x05, + // input amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of signatures needed in input + 0x00, 0x00, 0x00, 0x00, + // memo length + 0x00, 0x00, 0x00, 0x14, + // memo + 0xf0, 0x9f, 0x98, 0x85, 0x0a, 0x77, 0x65, 0x6c, + 0x6c, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, + 0x01, 0x23, 0x45, 0x21, + // subnetID being transformed + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + // staking asset ID + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + 0x99, 0x77, 0x55, 0x77, 0x11, 0x33, 0x55, 0x31, + // initial supply + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // maximum supply + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // minimum consumption rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // maximum consumption rate + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // minimum staking amount + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // maximum staking amount + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // minimum staking duration + 0x00, 0x00, 0x00, 0x01, + // maximum staking duration + 0x00, 0x00, 0x00, 0x01, + // minimum delegation fee + 0x00, 0x00, 0x00, 0x00, + // minimum delegation amount + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // maximum validator weight factor + 0xff, + // uptime requirement + 0x00, 0x00, 0x00, 0x00, + // secp256k1fx authorization type ID + 0x00, 0x00, 0x00, 0x0a, + // number of signatures needed in authorization + 0x00, 0x00, 0x00, 0x00, + } + var unsignedComplexTransformTx UnsignedTx = complexTransformTx + unsignedComplexTransformTxBytes, err := Codec.Marshal(CodecVersion, &unsignedComplexTransformTx) + require.NoError(err) + require.Equal(expectedUnsignedComplexTransformTxBytes, unsignedComplexTransformTxBytes) + + aliaser := ids.NewAliaser() + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + + unsignedComplexTransformTx.InitCtx(&snow.Context{ + NetworkID: 1, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + BCLookup: aliaser, + }) + + unsignedComplexTransformTxJSONBytes, err := json.MarshalIndent(unsignedComplexTransformTx, "", "\t") + require.NoError(err) + require.Equal(`{ + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 87654321, + "output": { + "addresses": [], + "amount": 1, + "locktime": 12345678, + "threshold": 0 + } + } + }, + { + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "locktime": 876543210, + "output": { + "addresses": [ + "P-avax1g32kvaugnx4tk3z4vemc3xd2hdz92enh972wxr" + ], + "amount": 18446744073709551615, + "locktime": 0, + "threshold": 1 + } + } + } + ], + "inputs": [ + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1000000000000, + "signatureIndices": [ + 2, + 5 + ] + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 2, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "locktime": 876543210, + "input": { + "amount": 17293822569102704639, + "signatureIndices": [ + 0 + ] + } + } + }, + { + "txID": "2wiU5PnFTjTmoAXGZutHAsPF36qGGyLHYHj9G1Aucfmb3JFFGN", + "outputIndex": 3, + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 1152921504606846976, + "signatureIndices": [] + } + } + ], + "memo": "0xf09f98850a77656c6c2074686174277301234521", + "subnetID": "SkB92YpWm4UpburLz9tEKZw2i67H3FF6YkjaU4BkFUDTG9Xm", + "assetID": "2Ab62uWwJw1T6VvmKD36ufsiuGZuX1pGykXAvPX1LtjTRHxwcc", + "initialSupply": 1152921504606846976, + "maximumSupply": 1152921504606846976, + "minConsumptionRate": 0, + "maxConsumptionRate": 0, + "minValidatorStake": 1, + "maxValidatorStake": 1152921504606846976, + "minStakeDuration": 1, + "maxStakeDuration": 1, + "minDelegationFee": 0, + "minDelegatorStake": 18446744073709551615, + "maxValidatorWeightFactor": 255, + "uptimeRequirement": 0, + "subnetAuthorization": { + "signatureIndices": [] + } +}`, string(unsignedComplexTransformTxJSONBytes)) +} + func TestTransformSubnetTxSyntacticVerify(t *testing.T) { type test struct { name string @@ -372,6 +983,29 @@ func TestTransformSubnetTxSyntacticVerify(t *testing.T) { }, err: errInvalidSubnetAuth, }, + { + name: "invalid BaseTx", + txFunc: func(*gomock.Controller) *TransformSubnetTx { + return &TransformSubnetTx{ + BaseTx: invalidBaseTx, + Subnet: ids.GenerateTestID(), + AssetID: ids.GenerateTestID(), + InitialSupply: 10, + MaximumSupply: 10, + MinConsumptionRate: 0, + MaxConsumptionRate: reward.PercentDenominator, + MinValidatorStake: 2, + MaxValidatorStake: 10, + MinStakeDuration: 1, + MaxStakeDuration: 2, + MinDelegationFee: reward.PercentDenominator, + MinDelegatorStake: 1, + MaxValidatorWeightFactor: 1, + UptimeRequirement: reward.PercentDenominator, + } + }, + err: avax.ErrWrongNetworkID, + }, { name: "passes verification", txFunc: func(ctrl *gomock.Controller) *TransformSubnetTx { @@ -404,33 +1038,10 @@ func TestTransformSubnetTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) - defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) require.ErrorIs(t, err, tt.err) }) } - - t.Run("invalid BaseTx", func(t *testing.T) { - tx := &TransformSubnetTx{ - BaseTx: invalidBaseTx, - Subnet: ids.GenerateTestID(), - AssetID: ids.GenerateTestID(), - InitialSupply: 10, - MaximumSupply: 10, - MinConsumptionRate: 0, - MaxConsumptionRate: reward.PercentDenominator, - MinValidatorStake: 2, - MaxValidatorStake: 10, - MinStakeDuration: 1, - MaxStakeDuration: 2, - MinDelegationFee: reward.PercentDenominator, - MinDelegatorStake: 1, - MaxValidatorWeightFactor: 1, - UptimeRequirement: reward.PercentDenominator, - } - err := tx.SyntacticVerify(ctx) - require.Error(t, err) - }) } diff --git a/avalanchego/vms/platformvm/txs/tx.go b/avalanchego/vms/platformvm/txs/tx.go index a6df33b8..9874f66e 100644 --- a/avalanchego/vms/platformvm/txs/tx.go +++ b/avalanchego/vms/platformvm/txs/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -18,6 +19,8 @@ import ( ) var ( + _ gossip.Gossipable = (*Tx)(nil) + ErrNilSignedTx = errors.New("nil signed tx is not valid") errSignedTxNotInitialized = errors.New("signed tx was never initialized and is not valid") @@ -31,7 +34,7 @@ type Tx struct { // The credentials of this transaction Creds []verify.Verifiable `serialize:"true" json:"credentials"` - id ids.ID + TxID ids.ID `json:"id"` bytes []byte } @@ -45,12 +48,12 @@ func NewSigned( } func (tx *Tx) Initialize(c codec.Manager) error { - signedBytes, err := c.Marshal(Version, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal ProposalTx: %w", err) } - unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) + unsignedBytesLen, err := c.Size(CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } @@ -63,7 +66,7 @@ func (tx *Tx) Initialize(c codec.Manager) error { func (tx *Tx) SetBytes(unsignedBytes, signedBytes []byte) { tx.Unsigned.SetBytes(unsignedBytes) tx.bytes = signedBytes - tx.id = hashing.ComputeHash256Array(signedBytes) + tx.TxID = hashing.ComputeHash256Array(signedBytes) } // Parse signed tx starting from its byte representation. @@ -75,7 +78,7 @@ func Parse(c codec.Manager, signedBytes []byte) (*Tx, error) { return nil, fmt.Errorf("couldn't parse tx: %w", err) } - unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) + unsignedBytesLen, err := c.Size(CodecVersion, &tx.Unsigned) if err != nil { return nil, fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } @@ -90,7 +93,11 @@ func (tx *Tx) Bytes() []byte { } func (tx *Tx) ID() ids.ID { - return tx.id + return tx.TxID +} + +func (tx *Tx) GossipID() ids.ID { + return tx.TxID } // UTXOs returns the UTXOs transaction is producing. @@ -100,7 +107,7 @@ func (tx *Tx) UTXOs() []*avax.UTXO { for i, out := range outs { utxos[i] = &avax.UTXO{ UTXOID: avax.UTXOID{ - TxID: tx.id, + TxID: tx.TxID, OutputIndex: uint32(i), }, Asset: avax.Asset{ID: out.AssetID()}, @@ -114,7 +121,7 @@ func (tx *Tx) SyntacticVerify(ctx *snow.Context) error { switch { case tx == nil: return ErrNilSignedTx - case tx.id == ids.Empty: + case tx.TxID == ids.Empty: return errSignedTxNotInitialized default: return tx.Unsigned.SyntacticVerify(ctx) @@ -125,7 +132,7 @@ func (tx *Tx) SyntacticVerify(ctx *snow.Context) error { // Note: We explicitly pass the codec in Sign since we may need to sign P-Chain // genesis txs whose length exceed the max length of txs.Codec. func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { - unsignedBytes, err := c.Marshal(Version, &tx.Unsigned) + unsignedBytes, err := c.Marshal(CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal UnsignedTx: %w", err) } @@ -146,7 +153,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { tx.Creds = append(tx.Creds, cred) // Attach credential } - signedBytes, err := c.Marshal(Version, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal ProposalTx: %w", err) } diff --git a/avalanchego/vms/platformvm/txs/txheap/by_age.go b/avalanchego/vms/platformvm/txs/txheap/by_age.go deleted file mode 100644 index a445822d..00000000 --- a/avalanchego/vms/platformvm/txs/txheap/by_age.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -var _ Heap = (*byAge)(nil) - -type byAge struct { - txHeap -} - -func NewByAge() Heap { - h := &byAge{} - h.initialize(h) - return h -} - -func (h *byAge) Less(i, j int) bool { - return h.txs[i].age < h.txs[j].age -} diff --git a/avalanchego/vms/platformvm/txs/txheap/by_end_time.go b/avalanchego/vms/platformvm/txs/txheap/by_end_time.go index 2b0cbd8d..9cbba82c 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_end_time.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_end_time.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -6,25 +6,33 @@ package txheap import ( "time" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) var _ TimedHeap = (*byEndTime)(nil) +type TimedHeap interface { + Heap + + Timestamp() time.Time +} + type byEndTime struct { txHeap } func NewByEndTime() TimedHeap { - h := &byEndTime{} - h.initialize(h) - return h -} - -func (h *byEndTime) Less(i, j int) bool { - iTime := h.txs[i].tx.Unsigned.(txs.Staker).EndTime() - jTime := h.txs[j].tx.Unsigned.(txs.Staker).EndTime() - return iTime.Before(jTime) + return &byEndTime{ + txHeap: txHeap{ + heap: heap.NewMap[ids.ID, heapTx](func(a, b heapTx) bool { + aTime := a.tx.Unsigned.(txs.Staker).EndTime() + bTime := b.tx.Unsigned.(txs.Staker).EndTime() + return aTime.Before(bTime) + }), + }, + } } func (h *byEndTime) Timestamp() time.Time { diff --git a/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go b/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go index 05995683..a629b7b1 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -func TestByStopTime(t *testing.T) { +func TestByEndTime(t *testing.T) { require := require.New(t) txHeap := NewByEndTime() @@ -23,39 +23,36 @@ func TestByStopTime(t *testing.T) { utx0 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{0}, + NodeID: ids.BuildTestNodeID([]byte{0}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, }, RewardsOwner: &secp256k1fx.OutputOwners{}, } tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx0.Initialize(txs.Codec)) utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, }, RewardsOwner: &secp256k1fx.OutputOwners{}, } tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx1.Initialize(txs.Codec)) utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ - NodeID: ids.NodeID{1}, + NodeID: ids.BuildTestNodeID([]byte{1}), Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, }, RewardsOwner: &secp256k1fx.OutputOwners{}, } tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx2.Initialize(txs.Codec)) txHeap.Add(tx2) require.Equal(utx2.EndTime(), txHeap.Timestamp()) diff --git a/avalanchego/vms/platformvm/txs/txheap/by_start_time.go b/avalanchego/vms/platformvm/txs/txheap/by_start_time.go deleted file mode 100644 index 31834cf0..00000000 --- a/avalanchego/vms/platformvm/txs/txheap/by_start_time.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "time" - - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ TimedHeap = (*byStartTime)(nil) - -type TimedHeap interface { - Heap - - Timestamp() time.Time -} - -type byStartTime struct { - txHeap -} - -func NewByStartTime() TimedHeap { - h := &byStartTime{} - h.initialize(h) - return h -} - -func (h *byStartTime) Less(i, j int) bool { - iTime := h.txs[i].tx.Unsigned.(txs.Staker).StartTime() - jTime := h.txs[j].tx.Unsigned.(txs.Staker).StartTime() - return iTime.Before(jTime) -} - -func (h *byStartTime) Timestamp() time.Time { - return h.Peek().Unsigned.(txs.Staker).StartTime() -} diff --git a/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go b/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go deleted file mode 100644 index fe9180d0..00000000 --- a/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -func TestByStartTime(t *testing.T) { - require := require.New(t) - - txHeap := NewByStartTime() - - baseTime := time.Now() - - utx0 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{0}, - Start: uint64(baseTime.Unix()) + 1, - End: uint64(baseTime.Unix()) + 1, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Initialize(txs.Codec) - require.NoError(err) - - utx1 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{1}, - Start: uint64(baseTime.Unix()) + 2, - End: uint64(baseTime.Unix()) + 2, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Initialize(txs.Codec) - require.NoError(err) - - utx2 := &txs.AddValidatorTx{ - Validator: txs.Validator{ - NodeID: ids.NodeID{1}, - Start: uint64(baseTime.Unix()) + 3, - End: uint64(baseTime.Unix()) + 3, - }, - RewardsOwner: &secp256k1fx.OutputOwners{}, - } - tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Initialize(txs.Codec) - require.NoError(err) - - txHeap.Add(tx2) - require.Equal(utx2.EndTime(), txHeap.Timestamp()) - - txHeap.Add(tx1) - require.Equal(utx1.EndTime(), txHeap.Timestamp()) - - txHeap.Add(tx0) - require.Equal(utx0.EndTime(), txHeap.Timestamp()) - require.Equal(tx0, txHeap.Peek()) -} diff --git a/avalanchego/vms/platformvm/txs/txheap/heap.go b/avalanchego/vms/platformvm/txs/txheap/heap.go index 4b6ba686..7c9e33d3 100644 --- a/avalanchego/vms/platformvm/txs/txheap/heap.go +++ b/avalanchego/vms/platformvm/txs/txheap/heap.go @@ -1,17 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap import ( - "container/heap" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ Heap = (*txHeap)(nil) - type Heap interface { Add(tx *txs.Tx) Get(txID ids.ID) *txs.Tx @@ -23,107 +20,57 @@ type Heap interface { } type heapTx struct { - tx *txs.Tx - index int - age int + tx *txs.Tx + age int } type txHeap struct { - self heap.Interface - - txIDToIndex map[ids.ID]int - txs []*heapTx - currentAge int -} - -func (h *txHeap) initialize(self heap.Interface) { - h.self = self - h.txIDToIndex = make(map[ids.ID]int) + heap heap.Map[ids.ID, heapTx] + currentAge int } func (h *txHeap) Add(tx *txs.Tx) { - heap.Push(h.self, tx) + txID := tx.ID() + if h.heap.Contains(txID) { + return + } + htx := heapTx{ + tx: tx, + age: h.currentAge, + } + h.currentAge++ + h.heap.Push(txID, htx) } func (h *txHeap) Get(txID ids.ID) *txs.Tx { - index, exists := h.txIDToIndex[txID] - if !exists { - return nil - } - return h.txs[index].tx + got, _ := h.heap.Get(txID) + return got.tx } func (h *txHeap) List() []*txs.Tx { - res := make([]*txs.Tx, 0, len(h.txs)) - for _, tx := range h.txs { + heapTxs := heap.MapValues(h.heap) + res := make([]*txs.Tx, 0, len(heapTxs)) + for _, tx := range heapTxs { res = append(res, tx.tx) } return res } func (h *txHeap) Remove(txID ids.ID) *txs.Tx { - index, exists := h.txIDToIndex[txID] - if !exists { - return nil - } - return heap.Remove(h.self, index).(*txs.Tx) + removed, _ := h.heap.Remove(txID) + return removed.tx } func (h *txHeap) Peek() *txs.Tx { - return h.txs[0].tx + _, peeked, _ := h.heap.Peek() + return peeked.tx } func (h *txHeap) RemoveTop() *txs.Tx { - return heap.Pop(h.self).(*txs.Tx) + _, popped, _ := h.heap.Pop() + return popped.tx } func (h *txHeap) Len() int { - return len(h.txs) -} - -func (h *txHeap) Swap(i, j int) { - // The follow "i"s and "j"s are intentionally swapped to perform the actual - // swap - iTx := h.txs[j] - jTx := h.txs[i] - - iTx.index = i - jTx.index = j - h.txs[i] = iTx - h.txs[j] = jTx - - iTxID := iTx.tx.ID() - jTxID := jTx.tx.ID() - h.txIDToIndex[iTxID] = i - h.txIDToIndex[jTxID] = j -} - -func (h *txHeap) Push(x interface{}) { - tx := x.(*txs.Tx) - - txID := tx.ID() - _, exists := h.txIDToIndex[txID] - if exists { - return - } - htx := &heapTx{ - tx: tx, - index: len(h.txs), - age: h.currentAge, - } - h.currentAge++ - h.txIDToIndex[txID] = htx.index - h.txs = append(h.txs, htx) -} - -func (h *txHeap) Pop() interface{} { - newLen := len(h.txs) - 1 - htx := h.txs[newLen] - h.txs[newLen] = nil - h.txs = h.txs[:newLen] - - tx := htx.tx - txID := tx.ID() - delete(h.txIDToIndex, txID) - return tx + return h.heap.Len() } diff --git a/avalanchego/vms/platformvm/txs/txheap/with_metrics.go b/avalanchego/vms/platformvm/txs/txheap/with_metrics.go deleted file mode 100644 index 60ab4f93..00000000 --- a/avalanchego/vms/platformvm/txs/txheap/with_metrics.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txheap - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ Heap = (*withMetrics)(nil) - -type withMetrics struct { - Heap - - numTxs prometheus.Gauge -} - -func NewWithMetrics( - txHeap Heap, - namespace string, - registerer prometheus.Registerer, -) (Heap, error) { - h := &withMetrics{ - Heap: txHeap, - numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "count", - Help: "Number of transactions in the heap", - }), - } - return h, registerer.Register(h.numTxs) -} - -func (h *withMetrics) Add(tx *txs.Tx) { - h.Heap.Add(tx) - h.numTxs.Set(float64(h.Heap.Len())) -} - -func (h *withMetrics) Remove(txID ids.ID) *txs.Tx { - tx := h.Heap.Remove(txID) - h.numTxs.Set(float64(h.Heap.Len())) - return tx -} - -func (h *withMetrics) RemoveTop() *txs.Tx { - tx := h.Heap.RemoveTop() - h.numTxs.Set(float64(h.Heap.Len())) - return tx -} diff --git a/avalanchego/vms/platformvm/txs/unsigned_tx.go b/avalanchego/vms/platformvm/txs/unsigned_tx.go index 7fe1702b..5b3e62dd 100644 --- a/avalanchego/vms/platformvm/txs/unsigned_tx.go +++ b/avalanchego/vms/platformvm/txs/unsigned_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/validator.go b/avalanchego/vms/platformvm/txs/validator.go index 79163392..726ba23b 100644 --- a/avalanchego/vms/platformvm/txs/validator.go +++ b/avalanchego/vms/platformvm/txs/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -40,11 +40,6 @@ func (v *Validator) EndTime() time.Time { return time.Unix(int64(v.End), 0) } -// Duration is the amount of time that this validator will be in the validator set -func (v *Validator) Duration() time.Duration { - return v.EndTime().Sub(v.StartTime()) -} - // Weight is this validator's weight when sampling func (v *Validator) Weight() uint64 { return v.Wght @@ -60,9 +55,8 @@ func (v *Validator) Verify() error { } } -// BoundedBy returns true iff the period that [validator] validates is a -// (non-strict) subset of the time that [other] validates. -// Namely, startTime <= v.StartTime() <= v.EndTime() <= endTime -func (v *Validator) BoundedBy(startTime, endTime time.Time) bool { - return !v.StartTime().Before(startTime) && !v.EndTime().After(endTime) +// BoundedBy returns true iff staker start and end are a +// (non-strict) subset of the provided time bound +func BoundedBy(stakerStart, stakerEnd, lowerBound, upperBound time.Time) bool { + return !stakerStart.Before(lowerBound) && !stakerEnd.After(upperBound) && !stakerEnd.Before(stakerStart) } diff --git a/avalanchego/vms/platformvm/txs/validator_test.go b/avalanchego/vms/platformvm/txs/validator_test.go index 047c7180..0b9e749c 100644 --- a/avalanchego/vms/platformvm/txs/validator_test.go +++ b/avalanchego/vms/platformvm/txs/validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,22 +9,20 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) const defaultWeight = 10000 -// each key controls an address that has [defaultBalance] AVAX at genesis -var keys = secp256k1.TestKeys() - -func TestValidatorBoundedBy(t *testing.T) { +func TestBoundedBy(t *testing.T) { require := require.New(t) + nodeID := ids.GenerateTestNodeID() + // case 1: a starts, a finishes, b starts, b finishes aStartTime := uint64(0) aEndTIme := uint64(1) a := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: aStartTime, End: aEndTIme, Wght: defaultWeight, @@ -33,59 +31,59 @@ func TestValidatorBoundedBy(t *testing.T) { bStartTime := uint64(2) bEndTime := uint64(3) b := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), + NodeID: nodeID, Start: bStartTime, End: bEndTime, Wght: defaultWeight, } - require.False(a.BoundedBy(b.StartTime(), b.EndTime())) - require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 2: a starts, b starts, a finishes, b finishes a.Start = 0 b.Start = 1 a.End = 2 b.End = 3 - require.False(a.BoundedBy(b.StartTime(), b.EndTime())) - require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 3: a starts, b starts, b finishes, a finishes a.Start = 0 b.Start = 1 b.End = 2 a.End = 3 - require.False(a.BoundedBy(b.StartTime(), b.EndTime())) - require.True(b.BoundedBy(a.StartTime(), a.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.True(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 4: b starts, a starts, a finishes, b finishes b.Start = 0 a.Start = 1 a.End = 2 b.End = 3 - require.True(a.BoundedBy(b.StartTime(), b.EndTime())) - require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + require.True(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 5: b starts, b finishes, a starts, a finishes b.Start = 0 b.End = 1 a.Start = 2 a.End = 3 - require.False(a.BoundedBy(b.StartTime(), b.EndTime())) - require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 6: b starts, a starts, b finishes, a finishes b.Start = 0 a.Start = 1 b.End = 2 a.End = 3 - require.False(a.BoundedBy(b.StartTime(), b.EndTime())) - require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + require.False(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.False(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) // case 3: a starts, b starts, b finishes, a finishes a.Start = 0 b.Start = 0 b.End = 1 a.End = 1 - require.True(a.BoundedBy(b.StartTime(), b.EndTime())) - require.True(b.BoundedBy(a.StartTime(), a.EndTime())) + require.True(BoundedBy(a.StartTime(), a.EndTime(), b.StartTime(), b.EndTime())) + require.True(BoundedBy(b.StartTime(), b.EndTime(), a.StartTime(), a.EndTime())) } diff --git a/avalanchego/vms/platformvm/txs/visitor.go b/avalanchego/vms/platformvm/txs/visitor.go index 18455d81..b3fc55af 100644 --- a/avalanchego/vms/platformvm/txs/visitor.go +++ b/avalanchego/vms/platformvm/txs/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -18,4 +18,6 @@ type Visitor interface { TransformSubnetTx(*TransformSubnetTx) error AddPermissionlessValidatorTx(*AddPermissionlessValidatorTx) error AddPermissionlessDelegatorTx(*AddPermissionlessDelegatorTx) error + TransferSubnetOwnershipTx(*TransferSubnetOwnershipTx) error + BaseTx(*BaseTx) error } diff --git a/avalanchego/vms/platformvm/utxo/handler.go b/avalanchego/vms/platformvm/utxo/handler.go index 90206bae..6368d97c 100644 --- a/avalanchego/vms/platformvm/utxo/handler.go +++ b/avalanchego/vms/platformvm/utxo/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -28,6 +28,13 @@ import ( var ( _ Handler = (*handler)(nil) + ErrInsufficientFunds = errors.New("insufficient funds") + ErrInsufficientUnlockedFunds = errors.New("insufficient unlocked funds") + ErrInsufficientLockedFunds = errors.New("insufficient locked funds") + errWrongNumberCredentials = errors.New("wrong number of credentials") + errWrongNumberUTXOs = errors.New("wrong number of UTXOs") + errAssetIDMismatch = errors.New("input asset ID does not match UTXO asset ID") + errLocktimeMismatch = errors.New("input locktime does not match UTXO locktime") errCantSign = errors.New("can't sign") errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") ) @@ -222,7 +229,7 @@ func (h *handler) Spend( remainingValue := in.Amount() // Stake any value that should be staked - amountToStake := math.Min( + amountToStake := min( amount-amountStaked, // Amount we still need to stake remainingValue, // Amount available to stake ) @@ -313,7 +320,7 @@ func (h *handler) Spend( remainingValue := in.Amount() // Burn any value that should be burned - amountToBurn := math.Min( + amountToBurn := min( fee-amountBurned, // Amount we still need to burn remainingValue, // Amount available to burn ) @@ -321,7 +328,7 @@ func (h *handler) Spend( remainingValue -= amountToBurn // Stake any value that should be staked - amountToStake := math.Min( + amountToStake := min( amount-amountStaked, // Amount we still need to stake remainingValue, // Amount available to stake ) @@ -371,8 +378,9 @@ func (h *handler) Spend( if amountBurned < fee || amountStaked < amount { return nil, nil, nil, nil, fmt.Errorf( - "provided keys have balance (unlocked, locked) (%d, %d) but need (%d, %d)", - amountBurned, amountStaked, fee, amount) + "%w (unlocked, locked) (%d, %d) but need (%d, %d)", + ErrInsufficientFunds, amountBurned, amountStaked, fee, amount, + ) } avax.SortTransferableInputsWithSigners(ins, signers) // sort inputs and keys @@ -391,23 +399,19 @@ func (h *handler) Authorize( []*secp256k1.PrivateKey, // Keys that prove ownership error, ) { - subnetTx, _, err := state.GetTx(subnetID) + subnetOwner, err := state.GetSubnetOwner(subnetID) if err != nil { return nil, nil, fmt.Errorf( - "failed to fetch subnet %s: %w", + "failed to fetch subnet owner for %s: %w", subnetID, err, ) } - subnet, ok := subnetTx.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return nil, nil, fmt.Errorf("expected tx type *txs.CreateSubnetTx but got %T", subnetTx.Unsigned) - } // Make sure the owners of the subnet match the provided keys - owner, ok := subnet.Owner.(*secp256k1fx.OutputOwners) + owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) if !ok { - return nil, nil, fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnet.Owner) + return nil, nil, fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) } // Add the keys to a keychain @@ -459,14 +463,16 @@ func (h *handler) VerifySpendUTXOs( ) error { if len(ins) != len(creds) { return fmt.Errorf( - "there are %d inputs but %d credentials. Should be same number", + "%w: %d inputs != %d credentials", + errWrongNumberCredentials, len(ins), len(creds), ) } if len(ins) != len(utxos) { return fmt.Errorf( - "there are %d inputs but %d utxos. Should be same number", + "%w: %d inputs != %d utxos", + errWrongNumberUTXOs, len(ins), len(utxos), ) @@ -496,8 +502,8 @@ func (h *handler) VerifySpendUTXOs( claimedAssetID := input.AssetID() if realAssetID != claimedAssetID { return fmt.Errorf( - "input %d has asset ID %s but UTXO has asset ID %s", - index, + "%w: %s != %s", + errAssetIDMismatch, claimedAssetID, realAssetID, ) @@ -520,7 +526,12 @@ func (h *handler) VerifySpendUTXOs( } else if ok { if inner.Locktime != locktime { // This input is locked, but its locktime is wrong - return fmt.Errorf("expected input %d locktime to be %d but got %d", index, locktime, inner.Locktime) + return fmt.Errorf( + "%w: %d != %d", + errLocktimeMismatch, + inner.Locktime, + locktime, + ) } in = inner.TransferableIn } @@ -546,7 +557,7 @@ func (h *handler) VerifySpendUTXOs( return fmt.Errorf("expected fx.Owned but got %T", out) } owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.Version, owner) + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) if err != nil { return fmt.Errorf("couldn't marshal owner: %w", err) } @@ -595,7 +606,7 @@ func (h *handler) VerifySpendUTXOs( return fmt.Errorf("expected fx.Owned but got %T", out) } owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.Version, owner) + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) if err != nil { return fmt.Errorf("couldn't marshal owner: %w", err) } @@ -630,10 +641,11 @@ func (h *handler) VerifySpendUTXOs( unlockedConsumedAsset := unlockedConsumed[assetID] if increase > unlockedConsumedAsset { return fmt.Errorf( - "address %s produces %d unlocked and consumes %d unlocked for locktime %d", + "%w: %s needs %d more %s for locktime %d", + ErrInsufficientLockedFunds, ownerID, - increase, - unlockedConsumedAsset, + increase-unlockedConsumedAsset, + assetID, locktime, ) } @@ -648,10 +660,10 @@ func (h *handler) VerifySpendUTXOs( // More unlocked tokens produced than consumed. Invalid. if unlockedProducedAsset > unlockedConsumedAsset { return fmt.Errorf( - "tx produces more unlocked %q (%d) than it consumes (%d)", + "%w: needs %d more %s", + ErrInsufficientUnlockedFunds, + unlockedProducedAsset-unlockedConsumedAsset, assetID, - unlockedProducedAsset, - unlockedConsumedAsset, ) } } diff --git a/avalanchego/vms/platformvm/utxo/handler_test.go b/avalanchego/vms/platformvm/utxo/handler_test.go index d5a2759e..d0224ed4 100644 --- a/avalanchego/vms/platformvm/utxo/handler_test.go +++ b/avalanchego/vms/platformvm/utxo/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -19,6 +19,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + safemath "github.com/ava-labs/avalanchego/utils/math" ) var _ txs.UnsignedTx = (*dummyUnsignedTx)(nil) @@ -37,8 +39,10 @@ func TestVerifySpendUTXOs(t *testing.T) { require.NoError(t, fx.InitializeVM(&secp256k1fx.TestVM{})) require.NoError(t, fx.Bootstrapped()) + ctx := snowtest.Context(t, snowtest.PChainID) + h := &handler{ - ctx: snow.DefaultContextTest(), + ctx: ctx, clk: &mockable.Clock{}, fx: fx, } @@ -62,7 +66,7 @@ func TestVerifySpendUTXOs(t *testing.T) { outs []*avax.TransferableOutput creds []verify.Verifiable producedAmounts map[ids.ID]uint64 - shouldErr bool + expectedErr error }{ { description: "no inputs, no outputs, no fee", @@ -71,7 +75,7 @@ func TestVerifySpendUTXOs(t *testing.T) { outs: []*avax.TransferableOutput{}, creds: []verify.Verifiable{}, producedAmounts: map[ids.ID]uint64{}, - shouldErr: false, + expectedErr: nil, }, { description: "no inputs, no outputs, positive fee", @@ -82,7 +86,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "wrong utxo assetID, one input, no outputs, no fee", @@ -103,7 +107,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: errAssetIDMismatch, }, { description: "one wrong assetID input, no outputs, no fee", @@ -124,7 +128,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: errAssetIDMismatch, }, { description: "one input, one wrong assetID output, no fee", @@ -152,7 +156,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "attempt to consume locked output as unlocked", @@ -176,7 +180,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: errLockedFundsNotMarkedAsLocked, }, { description: "attempt to modify locktime", @@ -203,7 +207,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: errLocktimeMismatch, }, { description: "one input, no outputs, positive fee", @@ -226,7 +230,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "wrong number of credentials", @@ -247,7 +251,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: errWrongNumberCredentials, }, { description: "wrong number of UTXOs", @@ -265,7 +269,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: errWrongNumberUTXOs, }, { description: "invalid credential", @@ -288,7 +292,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: secp256k1fx.ErrNilCredential, }, { description: "invalid signature", @@ -324,7 +328,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: secp256k1.ErrInvalidSig, }, { description: "one input, no outputs, positive fee", @@ -347,7 +351,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "locked one input, no outputs, no fee", @@ -374,7 +378,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: false, + expectedErr: nil, }, { description: "locked one input, no outputs, positive fee", @@ -403,7 +407,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "one locked and one unlocked input, one locked output, positive fee", @@ -459,7 +463,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "one locked and one unlocked input, one locked output, positive fee, partially locked", @@ -515,7 +519,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "one unlocked input, one locked output, zero fee", @@ -550,7 +554,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: false, + expectedErr: nil, }, { description: "attempted overflow", @@ -588,7 +592,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: safemath.ErrOverflow, }, { description: "attempted mint", @@ -623,7 +627,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: ErrInsufficientLockedFunds, }, { description: "attempted mint through locking", @@ -667,7 +671,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: safemath.ErrOverflow, }, { description: "attempted mint through mixed locking (low then high)", @@ -708,7 +712,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: ErrInsufficientLockedFunds, }, { description: "attempted mint through mixed locking (high then low)", @@ -749,7 +753,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: ErrInsufficientLockedFunds, }, { description: "transfer non-avax asset", @@ -781,7 +785,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: false, + expectedErr: nil, }, { description: "lock non-avax asset", @@ -816,7 +820,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: false, + expectedErr: nil, }, { description: "attempted asset conversion", @@ -848,7 +852,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: map[ids.ID]uint64{}, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "attempted asset conversion with burn", @@ -875,7 +879,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "two inputs, one output with custom asset, with fee", @@ -922,7 +926,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "one input, fee, custom asset", @@ -949,7 +953,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ h.ctx.AVAXAssetID: 1, }, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "one input, custom fee", @@ -976,7 +980,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ customAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "one input, custom fee, wrong burn", @@ -1003,7 +1007,7 @@ func TestVerifySpendUTXOs(t *testing.T) { producedAmounts: map[ids.ID]uint64{ customAssetID: 1, }, - shouldErr: true, + expectedErr: ErrInsufficientUnlockedFunds, }, { description: "two inputs, multiple fee", @@ -1044,7 +1048,7 @@ func TestVerifySpendUTXOs(t *testing.T) { h.ctx.AVAXAssetID: 1, customAssetID: 1, }, - shouldErr: false, + expectedErr: nil, }, { description: "one unlock input, one locked output, zero fee, unlocked, custom asset", @@ -1079,7 +1083,7 @@ func TestVerifySpendUTXOs(t *testing.T) { &secp256k1fx.Credential{}, }, producedAmounts: make(map[ids.ID]uint64), - shouldErr: false, + expectedErr: nil, }, } @@ -1087,7 +1091,6 @@ func TestVerifySpendUTXOs(t *testing.T) { h.clk.Set(now) t.Run(test.description, func(t *testing.T) { - require := require.New(t) err := h.VerifySpendUTXOs( &unsignedTx, test.utxos, @@ -1096,12 +1099,7 @@ func TestVerifySpendUTXOs(t *testing.T) { test.creds, test.producedAmounts, ) - - if test.shouldErr { - require.Error(err) - } else { - require.NoError(err) - } + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/avalanchego/vms/platformvm/utxo/mock_verifier.go b/avalanchego/vms/platformvm/utxo/mock_verifier.go index 904a1185..0447806c 100644 --- a/avalanchego/vms/platformvm/utxo/mock_verifier.go +++ b/avalanchego/vms/platformvm/utxo/mock_verifier.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/utxo (interfaces: Verifier) +// +// Generated by this command: +// +// mockgen -package=utxo -destination=vms/platformvm/utxo/mock_verifier.go github.com/ava-labs/avalanchego/vms/platformvm/utxo Verifier +// // Package utxo is a generated GoMock package. package utxo @@ -14,7 +16,7 @@ import ( avax "github.com/ava-labs/avalanchego/vms/components/avax" verify "github.com/ava-labs/avalanchego/vms/components/verify" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockVerifier is a mock of Verifier interface. @@ -49,7 +51,7 @@ func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 avax.UTXOGetter, ar } // VerifySpend indicates an expected call of VerifySpend. -func (mr *MockVerifierMockRecorder) VerifySpend(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifySpend(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifySpend", reflect.TypeOf((*MockVerifier)(nil).VerifySpend), arg0, arg1, arg2, arg3, arg4, arg5) } @@ -63,7 +65,7 @@ func (m *MockVerifier) VerifySpendUTXOs(arg0 txs.UnsignedTx, arg1 []*avax.UTXO, } // VerifySpendUTXOs indicates an expected call of VerifySpendUTXOs. -func (mr *MockVerifierMockRecorder) VerifySpendUTXOs(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockVerifierMockRecorder) VerifySpendUTXOs(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifySpendUTXOs", reflect.TypeOf((*MockVerifier)(nil).VerifySpendUTXOs), arg0, arg1, arg2, arg3, arg4, arg5) } diff --git a/avalanchego/vms/platformvm/validator_set_property_test.go b/avalanchego/vms/platformvm/validator_set_property_test.go new file mode 100644 index 00000000..cdac03ca --- /dev/null +++ b/avalanchego/vms/platformvm/validator_set_property_test.go @@ -0,0 +1,781 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "context" + "errors" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/api" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" +) + +const ( + startPrimaryWithBLS uint8 = iota + startSubnetValidator + + failedValidatorSnapshotString = "could not take validators snapshot: " + failedBuildingEventSeqString = "failed building events sequence: " +) + +var errEmptyEventsList = errors.New("empty events list") + +// for a given (permissioned) subnet, the test stakes and restakes multiple +// times a node as a primary and subnet validator. The BLS key of the node is +// changed across staking periods, and it can even be nil. We test that +// GetValidatorSet returns the correct primary and subnet validators data, with +// the right BLS key version at all relevant heights. +func TestGetValidatorsSetProperty(t *testing.T) { + properties := gopter.NewProperties(nil) + + // to reproduce a given scenario do something like this: + // parameters := gopter.DefaultTestParametersWithSeed(1685887576153675816) + // properties := gopter.NewProperties(parameters) + + properties.Property("check GetValidatorSet", prop.ForAll( + func(events []uint8) string { + vm, subnetID, err := buildVM(t) + if err != nil { + return "failed building vm: " + err.Error() + } + vm.ctx.Lock.Lock() + defer func() { + _ = vm.Shutdown(context.Background()) + vm.ctx.Lock.Unlock() + }() + nodeID := ids.GenerateTestNodeID() + + currentTime := defaultGenesisTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + // build a valid sequence of validators start/end times, given the + // random events sequence received as test input + validatorsTimes, err := buildTimestampsList(events, currentTime, nodeID) + if err != nil { + return "failed building events sequence: " + err.Error() + } + + validatorSetByHeightAndSubnet := make(map[uint64]map[ids.ID]map[ids.NodeID]*validators.GetValidatorOutput) + if err := takeValidatorsSnapshotAtCurrentHeight(vm, validatorSetByHeightAndSubnet); err != nil { + return failedValidatorSnapshotString + err.Error() + } + + // insert validator sequence + var ( + currentPrimaryValidator = (*state.Staker)(nil) + currentSubnetValidator = (*state.Staker)(nil) + ) + for _, ev := range validatorsTimes { + // at each step we remove at least a subnet validator + if currentSubnetValidator != nil { + err := terminateSubnetValidator(vm, currentSubnetValidator) + if err != nil { + return "could not terminate current subnet validator: " + err.Error() + } + currentSubnetValidator = nil + + if err := takeValidatorsSnapshotAtCurrentHeight(vm, validatorSetByHeightAndSubnet); err != nil { + return failedValidatorSnapshotString + err.Error() + } + } + + switch ev.eventType { + case startSubnetValidator: + currentSubnetValidator, err = addSubnetValidator(vm, ev, subnetID) + if err != nil { + return "could not add subnet validator: " + err.Error() + } + if err := takeValidatorsSnapshotAtCurrentHeight(vm, validatorSetByHeightAndSubnet); err != nil { + return failedValidatorSnapshotString + err.Error() + } + + case startPrimaryWithBLS: + // when adding a primary validator, also remove the current + // primary one + if currentPrimaryValidator != nil { + err := terminatePrimaryValidator(vm, currentPrimaryValidator) + if err != nil { + return "could not terminate current primary validator: " + err.Error() + } + // no need to nil current primary validator, we'll + // reassign immediately + + if err := takeValidatorsSnapshotAtCurrentHeight(vm, validatorSetByHeightAndSubnet); err != nil { + return failedValidatorSnapshotString + err.Error() + } + } + currentPrimaryValidator, err = addPrimaryValidatorWithBLSKey(vm, ev) + if err != nil { + return "could not add primary validator with BLS key: " + err.Error() + } + if err := takeValidatorsSnapshotAtCurrentHeight(vm, validatorSetByHeightAndSubnet); err != nil { + return failedValidatorSnapshotString + err.Error() + } + + default: + return fmt.Sprintf("unexpected staker type: %v", ev.eventType) + } + } + + // Checks: let's look back at validator sets at previous heights and + // make sure they match the snapshots already taken + snapshotHeights := maps.Keys(validatorSetByHeightAndSubnet) + sort.Slice(snapshotHeights, func(i, j int) bool { return snapshotHeights[i] < snapshotHeights[j] }) + for idx, snapShotHeight := range snapshotHeights { + lastAcceptedHeight, err := vm.GetCurrentHeight(context.Background()) + if err != nil { + return err.Error() + } + + nextSnapShotHeight := lastAcceptedHeight + 1 + if idx != len(snapshotHeights)-1 { + nextSnapShotHeight = snapshotHeights[idx+1] + } + + // within [snapShotHeight] and [nextSnapShotHeight], the validator set + // does not change and must be equal to snapshot at [snapShotHeight] + for height := snapShotHeight; height < nextSnapShotHeight; height++ { + for subnetID, validatorsSet := range validatorSetByHeightAndSubnet[snapShotHeight] { + res, err := vm.GetValidatorSet(context.Background(), height, subnetID) + if err != nil { + return fmt.Sprintf("failed GetValidatorSet at height %v: %v", height, err) + } + if !reflect.DeepEqual(validatorsSet, res) { + return "failed validators set comparison" + } + } + } + } + + return "" + }, + gen.SliceOfN( + 10, + gen.OneConstOf( + startPrimaryWithBLS, + startSubnetValidator, + ), + ).SuchThat(func(v interface{}) bool { + list := v.([]uint8) + return len(list) > 0 && list[0] == startPrimaryWithBLS + }), + )) + + properties.TestingRun(t) +} + +func takeValidatorsSnapshotAtCurrentHeight(vm *VM, validatorsSetByHeightAndSubnet map[uint64]map[ids.ID]map[ids.NodeID]*validators.GetValidatorOutput) error { + if validatorsSetByHeightAndSubnet == nil { + validatorsSetByHeightAndSubnet = make(map[uint64]map[ids.ID]map[ids.NodeID]*validators.GetValidatorOutput) + } + + lastBlkID := vm.state.GetLastAccepted() + lastBlk, err := vm.state.GetStatelessBlock(lastBlkID) + if err != nil { + return err + } + height := lastBlk.Height() + validatorsSetBySubnet, ok := validatorsSetByHeightAndSubnet[height] + if !ok { + validatorsSetByHeightAndSubnet[height] = make(map[ids.ID]map[ids.NodeID]*validators.GetValidatorOutput) + validatorsSetBySubnet = validatorsSetByHeightAndSubnet[height] + } + + stakerIt, err := vm.state.GetCurrentStakerIterator() + if err != nil { + return err + } + defer stakerIt.Release() + for stakerIt.Next() { + v := stakerIt.Value() + validatorsSet, ok := validatorsSetBySubnet[v.SubnetID] + if !ok { + validatorsSetBySubnet[v.SubnetID] = make(map[ids.NodeID]*validators.GetValidatorOutput) + validatorsSet = validatorsSetBySubnet[v.SubnetID] + } + + blsKey := v.PublicKey + if v.SubnetID != constants.PrimaryNetworkID { + // pick bls key from primary validator + s, err := vm.state.GetCurrentValidator(constants.PlatformChainID, v.NodeID) + if err != nil { + return err + } + blsKey = s.PublicKey + } + + validatorsSet[v.NodeID] = &validators.GetValidatorOutput{ + NodeID: v.NodeID, + PublicKey: blsKey, + Weight: v.Weight, + } + } + return nil +} + +func addSubnetValidator(vm *VM, data *validatorInputData, subnetID ids.ID) (*state.Staker, error) { + addr := keys[0].PublicKey().Address() + signedTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + vm.Config.MinValidatorStake, + uint64(data.startTime.Unix()), + uint64(data.endTime.Unix()), + data.nodeID, + subnetID, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + addr, + nil, + ) + if err != nil { + return nil, fmt.Errorf("could not create AddSubnetValidatorTx: %w", err) + } + return internalAddValidator(vm, signedTx) +} + +func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Staker, error) { + addr := keys[0].PublicKey().Address() + + sk, err := bls.NewSecretKey() + if err != nil { + return nil, fmt.Errorf("failed to generate BLS key: %w", err) + } + + signedTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + vm.Config.MinValidatorStake, + uint64(data.startTime.Unix()), + uint64(data.endTime.Unix()), + data.nodeID, + signer.NewProofOfPossession(sk), + addr, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + addr, + nil, + ) + if err != nil { + return nil, fmt.Errorf("could not create AddPermissionlessValidatorTx: %w", err) + } + return internalAddValidator(vm, signedTx) +} + +func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { + vm.ctx.Lock.Unlock() + err := vm.issueTx(context.Background(), signedTx) + vm.ctx.Lock.Lock() + + if err != nil { + return nil, fmt.Errorf("could not add tx to mempool: %w", err) + } + + blk, err := vm.Builder.BuildBlock(context.Background()) + if err != nil { + return nil, fmt.Errorf("failed building block: %w", err) + } + if err := blk.Verify(context.Background()); err != nil { + return nil, fmt.Errorf("failed verifying block: %w", err) + } + if err := blk.Accept(context.Background()); err != nil { + return nil, fmt.Errorf("failed accepting block: %w", err) + } + if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { + return nil, fmt.Errorf("failed setting preference: %w", err) + } + + stakerTx := signedTx.Unsigned.(txs.Staker) + return vm.state.GetCurrentValidator(stakerTx.SubnetID(), stakerTx.NodeID()) +} + +func terminateSubnetValidator(vm *VM, validator *state.Staker) error { + currentTime := validator.EndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) + if err != nil { + return fmt.Errorf("failed building block: %w", err) + } + if err := blk.Verify(context.Background()); err != nil { + return fmt.Errorf("failed verifying block: %w", err) + } + if err := blk.Accept(context.Background()); err != nil { + return fmt.Errorf("failed accepting block: %w", err) + } + if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { + return fmt.Errorf("failed setting preference: %w", err) + } + + return nil +} + +func terminatePrimaryValidator(vm *VM, validator *state.Staker) error { + currentTime := validator.EndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) + if err != nil { + return fmt.Errorf("failed building block: %w", err) + } + if err := blk.Verify(context.Background()); err != nil { + return fmt.Errorf("failed verifying block: %w", err) + } + + proposalBlk := blk.(snowman.OracleBlock) + options, err := proposalBlk.Options(context.Background()) + if err != nil { + return fmt.Errorf("failed retrieving options: %w", err) + } + + commit := options[0].(*blockexecutor.Block) + _, ok := commit.Block.(*block.BanffCommitBlock) + if !ok { + return fmt.Errorf("failed retrieving commit option: %w", err) + } + if err := blk.Accept(context.Background()); err != nil { + return fmt.Errorf("failed accepting block: %w", err) + } + + if err := commit.Verify(context.Background()); err != nil { + return fmt.Errorf("failed verifying commit block: %w", err) + } + if err := commit.Accept(context.Background()); err != nil { + return fmt.Errorf("failed accepting commit block: %w", err) + } + + if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { + return fmt.Errorf("failed setting preference: %w", err) + } + + return nil +} + +type validatorInputData struct { + eventType uint8 + startTime time.Time + endTime time.Time + nodeID ids.NodeID + publicKey *bls.PublicKey +} + +// buildTimestampsList creates validators start and end time, given the event list. +// output is returned as a list of validatorInputData +func buildTimestampsList(events []uint8, currentTime time.Time, nodeID ids.NodeID) ([]*validatorInputData, error) { + res := make([]*validatorInputData, 0, len(events)) + + currentTime = currentTime.Add(txexecutor.SyncBound) + switch endTime := currentTime.Add(defaultMinStakingDuration); events[0] { + case startPrimaryWithBLS: + sk, err := bls.NewSecretKey() + if err != nil { + return nil, fmt.Errorf("could not make private key: %w", err) + } + + res = append(res, &validatorInputData{ + eventType: startPrimaryWithBLS, + startTime: currentTime, + endTime: endTime, + nodeID: nodeID, + publicKey: bls.PublicFromSecretKey(sk), + }) + default: + return nil, fmt.Errorf("unexpected initial event %d", events[0]) + } + + // track current primary validator to make sure its staking period + // covers all of its subnet validators + currentPrimaryVal := res[0] + for i := 1; i < len(events); i++ { + currentTime = currentTime.Add(txexecutor.SyncBound) + + switch currentEvent := events[i]; currentEvent { + case startSubnetValidator: + endTime := currentTime.Add(defaultMinStakingDuration) + res = append(res, &validatorInputData{ + eventType: startSubnetValidator, + startTime: currentTime, + endTime: endTime, + nodeID: nodeID, + publicKey: nil, + }) + + currentPrimaryVal.endTime = endTime.Add(time.Second) + currentTime = endTime.Add(time.Second) + + case startPrimaryWithBLS: + currentTime = currentPrimaryVal.endTime.Add(txexecutor.SyncBound) + sk, err := bls.NewSecretKey() + if err != nil { + return nil, fmt.Errorf("could not make private key: %w", err) + } + + endTime := currentTime.Add(defaultMinStakingDuration) + val := &validatorInputData{ + eventType: startPrimaryWithBLS, + startTime: currentTime, + endTime: endTime, + nodeID: nodeID, + publicKey: bls.PublicFromSecretKey(sk), + } + res = append(res, val) + currentPrimaryVal = val + } + } + return res, nil +} + +func TestTimestampListGenerator(t *testing.T) { + properties := gopter.NewProperties(nil) + + properties.Property("primary validators are returned in sequence", prop.ForAll( + func(events []uint8) string { + currentTime := time.Now() + nodeID := ids.GenerateTestNodeID() + validatorsTimes, err := buildTimestampsList(events, currentTime, nodeID) + if err != nil { + return failedBuildingEventSeqString + err.Error() + } + + if len(validatorsTimes) == 0 { + return errEmptyEventsList.Error() + } + + // nil out non subnet validators + subnetIndexes := make([]int, 0) + for idx, ev := range validatorsTimes { + if ev.eventType == startSubnetValidator { + subnetIndexes = append(subnetIndexes, idx) + } + } + for _, idx := range subnetIndexes { + validatorsTimes[idx] = nil + } + + currentEventTime := currentTime + for i, ev := range validatorsTimes { + if ev == nil { + continue // a subnet validator + } + if currentEventTime.After(ev.startTime) { + return fmt.Sprintf("validator %d start time larger than current event time", i) + } + + if ev.startTime.After(ev.endTime) { + return fmt.Sprintf("validator %d start time larger than its end time", i) + } + + currentEventTime = ev.endTime + } + + return "" + }, + gen.SliceOf(gen.OneConstOf( + startPrimaryWithBLS, + startSubnetValidator, + )).SuchThat(func(v interface{}) bool { + list := v.([]uint8) + return len(list) > 0 && list[0] == startPrimaryWithBLS + }), + )) + + properties.Property("subnet validators are returned in sequence", prop.ForAll( + func(events []uint8) string { + currentTime := time.Now() + nodeID := ids.GenerateTestNodeID() + validatorsTimes, err := buildTimestampsList(events, currentTime, nodeID) + if err != nil { + return failedBuildingEventSeqString + err.Error() + } + + if len(validatorsTimes) == 0 { + return errEmptyEventsList.Error() + } + + // nil out non subnet validators + nonSubnetIndexes := make([]int, 0) + for idx, ev := range validatorsTimes { + if ev.eventType != startSubnetValidator { + nonSubnetIndexes = append(nonSubnetIndexes, idx) + } + } + for _, idx := range nonSubnetIndexes { + validatorsTimes[idx] = nil + } + + currentEventTime := currentTime + for i, ev := range validatorsTimes { + if ev == nil { + continue // a non-subnet validator + } + if currentEventTime.After(ev.startTime) { + return fmt.Sprintf("validator %d start time larger than current event time", i) + } + + if ev.startTime.After(ev.endTime) { + return fmt.Sprintf("validator %d start time larger than its end time", i) + } + + currentEventTime = ev.endTime + } + + return "" + }, + gen.SliceOf(gen.OneConstOf( + startPrimaryWithBLS, + startSubnetValidator, + )).SuchThat(func(v interface{}) bool { + list := v.([]uint8) + return len(list) > 0 && list[0] == startPrimaryWithBLS + }), + )) + + properties.Property("subnet validators' times are bound by a primary validator's times", prop.ForAll( + func(events []uint8) string { + currentTime := time.Now() + nodeID := ids.GenerateTestNodeID() + validatorsTimes, err := buildTimestampsList(events, currentTime, nodeID) + if err != nil { + return failedBuildingEventSeqString + err.Error() + } + + if len(validatorsTimes) == 0 { + return errEmptyEventsList.Error() + } + + currentPrimaryValidator := validatorsTimes[0] + for i := 1; i < len(validatorsTimes); i++ { + if validatorsTimes[i].eventType != startSubnetValidator { + currentPrimaryValidator = validatorsTimes[i] + continue + } + + subnetVal := validatorsTimes[i] + if currentPrimaryValidator.startTime.After(subnetVal.startTime) || + subnetVal.endTime.After(currentPrimaryValidator.endTime) { + return "subnet validator not bounded by primary network ones" + } + } + return "" + }, + gen.SliceOf(gen.OneConstOf( + startPrimaryWithBLS, + startSubnetValidator, + )).SuchThat(func(v interface{}) bool { + list := v.([]uint8) + return len(list) > 0 && list[0] == startPrimaryWithBLS + }), + )) + + properties.TestingRun(t) +} + +// add a single validator at the end of times, +// to make sure it won't pollute our tests +func buildVM(t *testing.T) (*VM, ids.ID, error) { + forkTime := defaultGenesisTime + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + SybilProtectionEnabled: true, + Validators: validators.NewManager(), + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + ApricotPhase3Time: forkTime, + ApricotPhase5Time: forkTime, + BanffTime: forkTime, + CortinaTime: forkTime, + }} + vm.clock.Set(forkTime.Add(time.Second)) + + baseDB := memdb.New() + chainDB := prefixdb.New([]byte{0}, baseDB) + atomicDB := prefixdb.New([]byte{1}, baseDB) + + msgChan := make(chan common.Message, 1) + ctx := snowtest.Context(t, snowtest.PChainID) + + m := atomic.NewMemory(atomicDB) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + appSender := &common.SenderTest{} + appSender.CantSendAppGossip = true + appSender.SendAppGossipF = func(context.Context, []byte) error { + return nil + } + + genesisBytes, err := buildCustomGenesis(ctx.AVAXAssetID) + if err != nil { + return nil, ids.Empty, err + } + + err = vm.Initialize( + context.Background(), + ctx, + chainDB, + genesisBytes, + nil, + nil, + msgChan, + nil, + appSender, + ) + if err != nil { + return nil, ids.Empty, err + } + + err = vm.SetState(context.Background(), snow.NormalOp) + if err != nil { + return nil, ids.Empty, err + } + + // Create a subnet and store it in testSubnet1 + // Note: following Banff activation, block acceptance will move + // chain time ahead + testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( + 1, // threshold + []ids.ShortID{keys[0].PublicKey().Address()}, + []*secp256k1.PrivateKey{keys[len(keys)-1]}, // pays tx fee + keys[0].PublicKey().Address(), // change addr + nil, + ) + if err != nil { + return nil, ids.Empty, err + } + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), testSubnet1) + vm.ctx.Lock.Lock() + if err != nil { + return nil, ids.Empty, err + } + + blk, err := vm.Builder.BuildBlock(context.Background()) + if err != nil { + return nil, ids.Empty, err + } + if err := blk.Verify(context.Background()); err != nil { + return nil, ids.Empty, err + } + if err := blk.Accept(context.Background()); err != nil { + return nil, ids.Empty, err + } + if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { + return nil, ids.Empty, err + } + + return vm, testSubnet1.ID(), nil +} + +func buildCustomGenesis(avaxAssetID ids.ID) ([]byte, error) { + genesisUTXOs := make([]api.UTXO, len(keys)) + for i, key := range keys { + id := key.PublicKey().Address() + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) + if err != nil { + return nil, err + } + genesisUTXOs[i] = api.UTXO{ + Amount: json.Uint64(defaultBalance), + Address: addr, + } + } + + // we need at least a validator, otherwise BuildBlock would fail, since it + // won't find next staker to promote/evict from stakers set. Contrary to + // what happens with production code we push such validator at the end of + // times, so to avoid interference with our tests + nodeID := genesisNodeIDs[len(genesisNodeIDs)-1] + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) + if err != nil { + return nil, err + } + + starTime := mockable.MaxTime.Add(-1 * defaultMinStakingDuration) + endTime := mockable.MaxTime + genesisValidator := api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ + StartTime: json.Uint64(starTime.Unix()), + EndTime: json.Uint64(endTime.Unix()), + NodeID: nodeID, + }, + RewardOwner: &api.Owner{ + Threshold: 1, + Addresses: []string{addr}, + }, + Staked: []api.UTXO{{ + Amount: json.Uint64(defaultWeight), + Address: addr, + }}, + DelegationFee: reward.PercentDenominator, + } + + buildGenesisArgs := api.BuildGenesisArgs{ + Encoding: formatting.Hex, + NetworkID: json.Uint32(constants.UnitTestID), + AvaxAssetID: avaxAssetID, + UTXOs: genesisUTXOs, + Validators: []api.GenesisPermissionlessValidator{genesisValidator}, + Chains: nil, + Time: json.Uint64(defaultGenesisTime.Unix()), + InitialSupply: json.Uint64(360 * units.MegaAvax), + } + + buildGenesisResponse := api.BuildGenesisReply{} + platformvmSS := api.StaticService{} + if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { + return nil, err + } + + genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) + if err != nil { + return nil, err + } + + return genesisBytes, nil +} diff --git a/avalanchego/vms/platformvm/validators/manager.go b/avalanchego/vms/platformvm/validators/manager.go new file mode 100644 index 00000000..2c8b025a --- /dev/null +++ b/avalanchego/vms/platformvm/validators/manager.go @@ -0,0 +1,373 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/window" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +const ( + validatorSetsCacheSize = 64 + maxRecentlyAcceptedWindowSize = 64 + minRecentlyAcceptedWindowSize = 16 + recentlyAcceptedWindowTTL = 2 * time.Minute +) + +var _ validators.State = (*manager)(nil) + +// Manager adds the ability to introduce newly accepted blocks IDs to the State +// interface. +type Manager interface { + validators.State + + // OnAcceptedBlockID registers the ID of the latest accepted block. + // It is used to update the [recentlyAccepted] sliding window. + OnAcceptedBlockID(blkID ids.ID) +} + +type State interface { + GetTx(txID ids.ID) (*txs.Tx, status.Status, error) + + GetLastAccepted() ids.ID + GetStatelessBlock(blockID ids.ID) (block.Block, error) + + // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis + // block until it has applied all of the diffs up to and including + // [endHeight]. Applying the diffs modifies [validators]. + // + // Invariant: If attempting to generate the validator set for + // [endHeight - 1], [validators] must initially contain the validator + // weights for [startHeight]. + // + // Note: Because this function iterates towards the genesis, [startHeight] + // should normally be greater than or equal to [endHeight]. + ApplyValidatorWeightDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, + subnetID ids.ID, + ) error + + // ApplyValidatorPublicKeyDiffs iterates from [startHeight] towards the + // genesis block until it has applied all of the diffs up to and including + // [endHeight]. Applying the diffs modifies [validators]. + // + // Invariant: If attempting to generate the validator set for + // [endHeight - 1], [validators] must initially contain the validator + // weights for [startHeight]. + // + // Note: Because this function iterates towards the genesis, [startHeight] + // should normally be greater than or equal to [endHeight]. + ApplyValidatorPublicKeyDiffs( + ctx context.Context, + validators map[ids.NodeID]*validators.GetValidatorOutput, + startHeight uint64, + endHeight uint64, + ) error +} + +func NewManager( + log logging.Logger, + cfg config.Config, + state State, + metrics metrics.Metrics, + clk *mockable.Clock, +) Manager { + return &manager{ + log: log, + cfg: cfg, + state: state, + metrics: metrics, + clk: clk, + caches: make(map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput]), + recentlyAccepted: window.New[ids.ID]( + window.Config{ + Clock: clk, + MaxSize: maxRecentlyAcceptedWindowSize, + MinSize: minRecentlyAcceptedWindowSize, + TTL: recentlyAcceptedWindowTTL, + }, + ), + } +} + +// TODO: Remove requirement for the P-chain's context lock to be held when +// calling exported functions. +type manager struct { + log logging.Logger + cfg config.Config + state State + metrics metrics.Metrics + clk *mockable.Clock + + // Maps caches for each subnet that is currently tracked. + // Key: Subnet ID + // Value: cache mapping height -> validator set map + caches map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput] + + // sliding window of blocks that were recently accepted + recentlyAccepted window.Window[ids.ID] +} + +// GetMinimumHeight returns the height of the most recent block beyond the +// horizon of our recentlyAccepted window. +// +// Because the time between blocks is arbitrary, we're only guaranteed that +// the window's configured TTL amount of time has passed once an element +// expires from the window. +// +// To try to always return a block older than the window's TTL, we return the +// parent of the oldest element in the window (as an expired element is always +// guaranteed to be sufficiently stale). If we haven't expired an element yet +// in the case of a process restart, we default to the lastAccepted block's +// height which is likely (but not guaranteed) to also be older than the +// window's configured TTL. +// +// If [UseCurrentHeight] is true, we override the block selection policy +// described above and we will always return the last accepted block height +// as the minimum. +func (m *manager) GetMinimumHeight(ctx context.Context) (uint64, error) { + if m.cfg.UseCurrentHeight { + return m.getCurrentHeight(ctx) + } + + oldest, ok := m.recentlyAccepted.Oldest() + if !ok { + return m.getCurrentHeight(ctx) + } + + blk, err := m.state.GetStatelessBlock(oldest) + if err != nil { + return 0, err + } + + // We subtract 1 from the height of [oldest] because we want the height of + // the last block accepted before the [recentlyAccepted] window. + // + // There is guaranteed to be a block accepted before this window because the + // first block added to [recentlyAccepted] window is >= height 1. + return blk.Height() - 1, nil +} + +func (m *manager) GetCurrentHeight(ctx context.Context) (uint64, error) { + return m.getCurrentHeight(ctx) +} + +// TODO: Pass the context into the state. +func (m *manager) getCurrentHeight(context.Context) (uint64, error) { + lastAcceptedID := m.state.GetLastAccepted() + lastAccepted, err := m.state.GetStatelessBlock(lastAcceptedID) + if err != nil { + return 0, err + } + return lastAccepted.Height(), nil +} + +func (m *manager) GetValidatorSet( + ctx context.Context, + targetHeight uint64, + subnetID ids.ID, +) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + validatorSetsCache := m.getValidatorSetCache(subnetID) + + if validatorSet, ok := validatorSetsCache.Get(targetHeight); ok { + m.metrics.IncValidatorSetsCached() + return validatorSet, nil + } + + // get the start time to track metrics + startTime := m.clk.Time() + + var ( + validatorSet map[ids.NodeID]*validators.GetValidatorOutput + currentHeight uint64 + err error + ) + if subnetID == constants.PrimaryNetworkID { + validatorSet, currentHeight, err = m.makePrimaryNetworkValidatorSet(ctx, targetHeight) + } else { + validatorSet, currentHeight, err = m.makeSubnetValidatorSet(ctx, targetHeight, subnetID) + } + if err != nil { + return nil, err + } + + // cache the validator set + validatorSetsCache.Put(targetHeight, validatorSet) + + duration := m.clk.Time().Sub(startTime) + m.metrics.IncValidatorSetsCreated() + m.metrics.AddValidatorSetsDuration(duration) + m.metrics.AddValidatorSetsHeightDiff(currentHeight - targetHeight) + return validatorSet, nil +} + +func (m *manager) getValidatorSetCache(subnetID ids.ID) cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput] { + // Only cache tracked subnets + if subnetID != constants.PrimaryNetworkID && !m.cfg.TrackedSubnets.Contains(subnetID) { + return &cache.Empty[uint64, map[ids.NodeID]*validators.GetValidatorOutput]{} + } + + validatorSetsCache, exists := m.caches[subnetID] + if exists { + return validatorSetsCache + } + + validatorSetsCache = &cache.LRU[uint64, map[ids.NodeID]*validators.GetValidatorOutput]{ + Size: validatorSetsCacheSize, + } + m.caches[subnetID] = validatorSetsCache + return validatorSetsCache +} + +func (m *manager) makePrimaryNetworkValidatorSet( + ctx context.Context, + targetHeight uint64, +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + validatorSet, currentHeight, err := m.getCurrentPrimaryValidatorSet(ctx) + if err != nil { + return nil, 0, err + } + if currentHeight < targetHeight { + return nil, 0, database.ErrNotFound + } + + // Rebuild primary network validators at [targetHeight] + // + // Note: Since we are attempting to generate the validator set at + // [targetHeight], we want to apply the diffs from + // (targetHeight, currentHeight]. Because the state interface is implemented + // to be inclusive, we apply diffs in [targetHeight + 1, currentHeight]. + lastDiffHeight := targetHeight + 1 + err = m.state.ApplyValidatorWeightDiffs( + ctx, + validatorSet, + currentHeight, + lastDiffHeight, + constants.PlatformChainID, + ) + if err != nil { + return nil, 0, err + } + + err = m.state.ApplyValidatorPublicKeyDiffs( + ctx, + validatorSet, + currentHeight, + lastDiffHeight, + ) + return validatorSet, currentHeight, err +} + +func (m *manager) getCurrentPrimaryValidatorSet( + ctx context.Context, +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) + currentHeight, err := m.getCurrentHeight(ctx) + return primaryMap, currentHeight, err +} + +func (m *manager) makeSubnetValidatorSet( + ctx context.Context, + targetHeight uint64, + subnetID ids.ID, +) (map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + subnetValidatorSet, primaryValidatorSet, currentHeight, err := m.getCurrentValidatorSets(ctx, subnetID) + if err != nil { + return nil, 0, err + } + if currentHeight < targetHeight { + return nil, 0, database.ErrNotFound + } + + // Rebuild subnet validators at [targetHeight] + // + // Note: Since we are attempting to generate the validator set at + // [targetHeight], we want to apply the diffs from + // (targetHeight, currentHeight]. Because the state interface is implemented + // to be inclusive, we apply diffs in [targetHeight + 1, currentHeight]. + lastDiffHeight := targetHeight + 1 + err = m.state.ApplyValidatorWeightDiffs( + ctx, + subnetValidatorSet, + currentHeight, + lastDiffHeight, + subnetID, + ) + if err != nil { + return nil, 0, err + } + + // Update the subnet validator set to include the public keys at + // [currentHeight]. When we apply the public key diffs, we will convert + // these keys to represent the public keys at [targetHeight]. If the subnet + // validator is not currently a primary network validator, it doesn't have a + // key at [currentHeight]. + for nodeID, vdr := range subnetValidatorSet { + if primaryVdr, ok := primaryValidatorSet[nodeID]; ok { + vdr.PublicKey = primaryVdr.PublicKey + } else { + vdr.PublicKey = nil + } + } + + err = m.state.ApplyValidatorPublicKeyDiffs( + ctx, + subnetValidatorSet, + currentHeight, + lastDiffHeight, + ) + return subnetValidatorSet, currentHeight, err +} + +func (m *manager) getCurrentValidatorSets( + ctx context.Context, + subnetID ids.ID, +) (map[ids.NodeID]*validators.GetValidatorOutput, map[ids.NodeID]*validators.GetValidatorOutput, uint64, error) { + subnetMap := m.cfg.Validators.GetMap(subnetID) + primaryMap := m.cfg.Validators.GetMap(constants.PrimaryNetworkID) + currentHeight, err := m.getCurrentHeight(ctx) + return subnetMap, primaryMap, currentHeight, err +} + +func (m *manager) GetSubnetID(_ context.Context, chainID ids.ID) (ids.ID, error) { + if chainID == constants.PlatformChainID { + return constants.PrimaryNetworkID, nil + } + + chainTx, _, err := m.state.GetTx(chainID) + if err != nil { + return ids.Empty, fmt.Errorf( + "problem retrieving blockchain %q: %w", + chainID, + err, + ) + } + chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) + if !ok { + return ids.Empty, fmt.Errorf("%q is not a blockchain", chainID) + } + return chain.SubnetID, nil +} + +func (m *manager) OnAcceptedBlockID(blkID ids.ID) { + m.recentlyAccepted.Add(blkID) +} diff --git a/avalanchego/vms/platformvm/validators/manager_benchmark_test.go b/avalanchego/vms/platformvm/validators/manager_benchmark_test.go new file mode 100644 index 00000000..912f3619 --- /dev/null +++ b/avalanchego/vms/platformvm/validators/manager_benchmark_test.go @@ -0,0 +1,274 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/api" + "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +// BenchmarkGetValidatorSet generates 10k diffs and calculates the time to +// generate the genesis validator set by applying them. +// +// This generates a single diff for each height. In practice there could be +// multiple or zero diffs at a given height. +// +// Note: BenchmarkGetValidatorSet gets the validator set of a subnet rather than +// the primary network because the primary network performs caching that would +// interfere with the benchmark. +func BenchmarkGetValidatorSet(b *testing.B) { + require := require.New(b) + + db, err := leveldb.New( + b.TempDir(), + nil, + logging.NoLog{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) + defer func() { + require.NoError(db.Close()) + }() + + avaxAssetID := ids.GenerateTestID() + genesisTime := time.Now().Truncate(time.Second) + genesisEndTime := genesisTime.Add(28 * 24 * time.Hour) + + addr, err := address.FormatBech32(constants.UnitTestHRP, ids.GenerateTestShortID().Bytes()) + require.NoError(err) + + genesisValidators := []api.GenesisPermissionlessValidator{{ + GenesisValidator: api.GenesisValidator{ + StartTime: json.Uint64(genesisTime.Unix()), + EndTime: json.Uint64(genesisEndTime.Unix()), + NodeID: ids.GenerateTestNodeID(), + }, + RewardOwner: &api.Owner{ + Threshold: 1, + Addresses: []string{addr}, + }, + Staked: []api.UTXO{{ + Amount: json.Uint64(2 * units.KiloAvax), + Address: addr, + }}, + DelegationFee: reward.PercentDenominator, + }} + + buildGenesisArgs := api.BuildGenesisArgs{ + NetworkID: json.Uint32(constants.UnitTestID), + AvaxAssetID: avaxAssetID, + UTXOs: nil, + Validators: genesisValidators, + Chains: nil, + Time: json.Uint64(genesisTime.Unix()), + InitialSupply: json.Uint64(360 * units.MegaAvax), + Encoding: formatting.Hex, + } + + buildGenesisResponse := api.BuildGenesisReply{} + platformvmSS := api.StaticService{} + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) + + genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) + require.NoError(err) + + vdrs := validators.NewManager() + + execConfig, err := config.GetExecutionConfig(nil) + require.NoError(err) + + metrics, err := metrics.New("", prometheus.NewRegistry()) + require.NoError(err) + + s, err := state.New( + db, + genesisBytes, + prometheus.NewRegistry(), + &config.Config{ + Validators: vdrs, + }, + execConfig, + &snow.Context{ + NetworkID: constants.UnitTestID, + NodeID: ids.GenerateTestNodeID(), + Log: logging.NoLog{}, + }, + metrics, + reward.NewCalculator(reward.Config{ + MaxConsumptionRate: .12 * reward.PercentDenominator, + MinConsumptionRate: .10 * reward.PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + }), + ) + require.NoError(err) + + m := NewManager( + logging.NoLog{}, + config.Config{ + Validators: vdrs, + }, + s, + metrics, + new(mockable.Clock), + ) + + var ( + nodeIDs []ids.NodeID + currentHeight uint64 + ) + for i := 0; i < 50; i++ { + currentHeight++ + nodeID, err := addPrimaryValidator(s, genesisTime, genesisEndTime, currentHeight) + require.NoError(err) + nodeIDs = append(nodeIDs, nodeID) + } + subnetID := ids.GenerateTestID() + for _, nodeID := range nodeIDs { + currentHeight++ + require.NoError(addSubnetValidator(s, subnetID, genesisTime, genesisEndTime, nodeID, currentHeight)) + } + for i := 0; i < 9900; i++ { + currentHeight++ + require.NoError(addSubnetDelegator(s, subnetID, genesisTime, genesisEndTime, nodeIDs, currentHeight)) + } + + ctx := context.Background() + height, err := m.GetCurrentHeight(ctx) + require.NoError(err) + require.Equal(currentHeight, height) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := m.GetValidatorSet(ctx, 0, subnetID) + require.NoError(err) + } + + b.StopTimer() +} + +func addPrimaryValidator( + s state.State, + startTime time.Time, + endTime time.Time, + height uint64, +) (ids.NodeID, error) { + sk, err := bls.NewSecretKey() + if err != nil { + return ids.EmptyNodeID, err + } + + nodeID := ids.GenerateTestNodeID() + s.PutCurrentValidator(&state.Staker{ + TxID: ids.GenerateTestID(), + NodeID: nodeID, + PublicKey: bls.PublicFromSecretKey(sk), + SubnetID: constants.PrimaryNetworkID, + Weight: 2 * units.MegaAvax, + StartTime: startTime, + EndTime: endTime, + PotentialReward: 0, + NextTime: endTime, + Priority: txs.PrimaryNetworkValidatorCurrentPriority, + }) + + blk, err := block.NewBanffStandardBlock(startTime, ids.GenerateTestID(), height, nil) + if err != nil { + return ids.EmptyNodeID, err + } + + s.AddStatelessBlock(blk) + s.SetHeight(height) + return nodeID, s.Commit() +} + +func addSubnetValidator( + s state.State, + subnetID ids.ID, + startTime time.Time, + endTime time.Time, + nodeID ids.NodeID, + height uint64, +) error { + s.PutCurrentValidator(&state.Staker{ + TxID: ids.GenerateTestID(), + NodeID: nodeID, + SubnetID: subnetID, + Weight: 1 * units.Avax, + StartTime: startTime, + EndTime: endTime, + PotentialReward: 0, + NextTime: endTime, + Priority: txs.SubnetPermissionlessValidatorCurrentPriority, + }) + + blk, err := block.NewBanffStandardBlock(startTime, ids.GenerateTestID(), height, nil) + if err != nil { + return err + } + + s.AddStatelessBlock(blk) + s.SetHeight(height) + return s.Commit() +} + +func addSubnetDelegator( + s state.State, + subnetID ids.ID, + startTime time.Time, + endTime time.Time, + nodeIDs []ids.NodeID, + height uint64, +) error { + i := rand.Intn(len(nodeIDs)) //#nosec G404 + nodeID := nodeIDs[i] + s.PutCurrentDelegator(&state.Staker{ + TxID: ids.GenerateTestID(), + NodeID: nodeID, + SubnetID: subnetID, + Weight: 1 * units.Avax, + StartTime: startTime, + EndTime: endTime, + PotentialReward: 0, + NextTime: endTime, + Priority: txs.SubnetPermissionlessDelegatorCurrentPriority, + }) + + blk, err := block.NewBanffStandardBlock(startTime, ids.GenerateTestID(), height, nil) + if err != nil { + return err + } + + s.AddStatelessBlock(blk) + s.SetLastAccepted(blk.ID()) + s.SetHeight(height) + return s.Commit() +} diff --git a/avalanchego/vms/platformvm/validators/test_manager.go b/avalanchego/vms/platformvm/validators/test_manager.go new file mode 100644 index 00000000..e04742f2 --- /dev/null +++ b/avalanchego/vms/platformvm/validators/test_manager.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" +) + +var TestManager Manager = testManager{} + +type testManager struct{} + +func (testManager) GetMinimumHeight(context.Context) (uint64, error) { + return 0, nil +} + +func (testManager) GetCurrentHeight(context.Context) (uint64, error) { + return 0, nil +} + +func (testManager) GetSubnetID(context.Context, ids.ID) (ids.ID, error) { + return ids.Empty, nil +} + +func (testManager) GetValidatorSet(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return nil, nil +} + +func (testManager) OnAcceptedBlockID(ids.ID) {} diff --git a/avalanchego/vms/platformvm/vm.go b/avalanchego/vms/platformvm/vm.go index 26aed61d..6167a279 100644 --- a/avalanchego/vms/platformvm/vm.go +++ b/avalanchego/vms/platformvm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -7,41 +7,36 @@ import ( "context" "errors" "fmt" + "math" + "net/http" "time" "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/window" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/api" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -49,31 +44,26 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/blocks/builder" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + snowmanblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" -) - -const ( - validatorSetsCacheSize = 512 - maxRecentlyAcceptedWindowSize = 256 - recentlyAcceptedWindowTTL = 5 * time.Minute + pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) var ( - _ block.ChainVM = (*VM)(nil) + _ snowmanblock.ChainVM = (*VM)(nil) _ secp256k1fx.VM = (*VM)(nil) _ validators.State = (*VM)(nil) _ validators.SubnetConnector = (*VM)(nil) - - errMissingValidatorSet = errors.New("missing validator set") - errMissingValidator = errors.New("missing validator") ) type VM struct { config.Config blockbuilder.Builder + network.Network + validators.State metrics metrics.Metrics atomicUtxosManager avax.AtomicUTXOManager @@ -84,8 +74,8 @@ type VM struct { uptimeManager uptime.Manager // The context of this vm - ctx *snow.Context - dbManager manager.Manager + ctx *snow.Context + db database.Database state state.State @@ -95,16 +85,16 @@ type VM struct { // Bootstrapped remembers if this chain has finished bootstrapping or not bootstrapped utils.Atomic[bool] - // Maps caches for each subnet that is currently tracked. - // Key: Subnet ID - // Value: cache mapping height -> validator set map - validatorSetCaches map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput] - - // sliding window of blocks that were recently accepted - recentlyAccepted window.Window[ids.ID] - txBuilder txbuilder.Builder manager blockexecutor.Manager + + // Cancelled on shutdown + onShutdownCtx context.Context + // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() + onShutdownCtxCancel context.CancelFunc + + // TODO: Remove after v1.11.x is activated + pruned utils.Atomic[bool] } // Initialize this blockchain. @@ -112,64 +102,64 @@ type VM struct { func (vm *VM) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, _ []byte, - _ []byte, + configBytes []byte, toEngine chan<- common.Message, _ []*common.Fx, appSender common.AppSender, ) error { chainCtx.Log.Verbo("initializing platform chain") + execConfig, err := config.GetExecutionConfig(configBytes) + if err != nil { + return err + } + chainCtx.Log.Info("using VM execution config", zap.Reflect("config", execConfig)) + registerer := prometheus.NewRegistry() if err := chainCtx.Metrics.Register(registerer); err != nil { return err } // Initialize metrics as soon as possible - var err error - vm.metrics, err = metrics.New("", registerer, vm.TrackedSubnets) + vm.metrics, err = metrics.New("", registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } vm.ctx = chainCtx - vm.dbManager = dbManager + vm.db = db - vm.codecRegistry = linearcodec.NewDefault() + // Note: this codec is never used to serialize anything + vm.codecRegistry = linearcodec.NewDefault(time.Time{}) vm.fx = &secp256k1fx.Fx{} if err := vm.fx.Initialize(vm); err != nil { return err } - vm.validatorSetCaches = make(map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput]) - vm.recentlyAccepted = window.New[ids.ID]( - window.Config{ - Clock: &vm.clock, - MaxSize: maxRecentlyAcceptedWindowSize, - TTL: recentlyAcceptedWindowTTL, - }, - ) - rewards := reward.NewCalculator(vm.RewardConfig) + vm.state, err = state.New( - vm.dbManager.Current().Database, + vm.db, genesisBytes, registerer, &vm.Config, + execConfig, vm.ctx, vm.metrics, rewards, - &vm.bootstrapped, ) if err != nil { return err } + validatorManager := pvalidators.NewManager(chainCtx.Log, vm.Config, vm.state, vm.metrics, &vm.clock) + vm.State = validatorManager vm.atomicUtxosManager = avax.NewAtomicUTXOManager(chainCtx.SharedMemory, txs.Codec) utxoHandler := utxo.NewHandler(vm.ctx, &vm.clock, vm.fx) - vm.uptimeManager = uptime.NewManager(vm.state) + vm.uptimeManager = uptime.NewManager(vm.state, &vm.clock) vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) vm.txBuilder = txbuilder.New( @@ -193,9 +183,7 @@ func (vm *VM) Initialize( Bootstrapped: &vm.bootstrapped, } - // Note: There is a circular dependency between the mempool and block - // builder which is broken by passing in the vm. - mempool, err := mempool.NewMempool("mempool", registerer, vm) + mempool, err := mempool.New("mempool", registerer, toEngine) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } @@ -205,15 +193,38 @@ func (vm *VM) Initialize( vm.metrics, vm.state, txExecutorBackend, - vm.recentlyAccepted, + validatorManager, ) + + txVerifier := network.NewLockedTxVerifier(&txExecutorBackend.Ctx.Lock, vm.manager) + vm.Network, err = network.New( + chainCtx.Log, + chainCtx.NodeID, + chainCtx.SubnetID, + validators.NewLockedState( + &chainCtx.Lock, + validatorManager, + ), + txVerifier, + mempool, + txExecutorBackend.Config.PartialSyncPrimaryNetwork, + appSender, + registerer, + execConfig.Network, + ) + if err != nil { + return fmt.Errorf("failed to initialize network: %w", err) + } + + vm.onShutdownCtx, vm.onShutdownCtxCancel = context.WithCancel(context.Background()) + // TODO: Wait for this goroutine to exit during Shutdown once the platformvm + // has better control of the context lock. + go vm.Network.Gossip(vm.onShutdownCtx) + vm.Builder = blockbuilder.New( mempool, - vm.txBuilder, txExecutorBackend, vm.manager, - toEngine, - appSender, ) // Create all of the chains that the database says exist @@ -228,16 +239,93 @@ func (vm *VM) Initialize( chainCtx.Log.Info("initializing last accepted", zap.Stringer("blkID", lastAcceptedID), ) - return vm.SetPreference(ctx, lastAcceptedID) + if err := vm.SetPreference(ctx, lastAcceptedID); err != nil { + return err + } + + // Incrementing [awaitShutdown] would cause a deadlock since + // [periodicallyPruneMempool] grabs the context lock. + go vm.periodicallyPruneMempool(execConfig.MempoolPruneFrequency) + + shouldPrune, err := vm.state.ShouldPrune() + if err != nil { + return fmt.Errorf( + "failed to check if the database should be pruned: %w", + err, + ) + } + if !shouldPrune { + chainCtx.Log.Info("state already pruned and indexed") + vm.pruned.Set(true) + return nil + } + + go func() { + err := vm.state.PruneAndIndex(&vm.ctx.Lock, vm.ctx.Log) + if err != nil { + vm.ctx.Log.Error("state pruning and height indexing failed", + zap.Error(err), + ) + } + + vm.pruned.Set(true) + }() + + return nil +} + +func (vm *VM) periodicallyPruneMempool(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + for { + select { + case <-vm.onShutdownCtx.Done(): + return + case <-ticker.C: + if err := vm.pruneMempool(); err != nil { + vm.ctx.Log.Debug("pruning mempool failed", + zap.Error(err), + ) + } + } + } +} + +func (vm *VM) pruneMempool() error { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // Packing all of the transactions in order performs additional checks that + // the MempoolTxVerifier doesn't include. So, evicting transactions from + // here is expected to happen occasionally. + blockTxs, err := vm.Builder.PackBlockTxs(math.MaxInt) + if err != nil { + return err + } + + for _, tx := range blockTxs { + if err := vm.Builder.Add(tx); err != nil { + vm.ctx.Log.Debug( + "failed to reissue tx", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + } + } + + return nil } // Create all chains that exist that this node validates. func (vm *VM) initBlockchains() error { - if err := vm.createSubnet(constants.PrimaryNetworkID); err != nil { + if vm.Config.PartialSyncPrimaryNetwork { + vm.ctx.Log.Info("skipping primary network chain creation") + } else if err := vm.createSubnet(constants.PrimaryNetworkID); err != nil { return err } - if vm.StakingEnabled { + if vm.SybilProtectionEnabled { for subnetID := range vm.TrackedSubnets { if err := vm.createSubnet(subnetID); err != nil { return err @@ -290,22 +378,22 @@ func (vm *VM) onNormalOperationsStarted() error { return err } - primaryVdrIDs, exists := vm.getValidatorIDs(constants.PrimaryNetworkID) - if !exists { - return errMissingValidatorSet - } + primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) if err := vm.uptimeManager.StartTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { return err } + vl := validators.NewLogger(vm.ctx.Log, constants.PrimaryNetworkID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + for subnetID := range vm.TrackedSubnets { - vdrIDs, exists := vm.getValidatorIDs(subnetID) - if !exists { - return errMissingValidatorSet - } + vdrIDs := vm.Validators.GetValidatorIDs(subnetID) if err := vm.uptimeManager.StartTracking(vdrIDs, subnetID); err != nil { return err } + + vl := validators.NewLogger(vm.ctx.Log, subnetID, vm.ctx.NodeID) + vm.Validators.RegisterCallbackListener(subnetID, vl) } if err := vm.state.Commit(); err != nil { @@ -313,7 +401,7 @@ func (vm *VM) onNormalOperationsStarted() error { } // Start the block builder - vm.Builder.ResetBlockTimer() + vm.Builder.StartBlockTimer() return nil } @@ -330,26 +418,21 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { // Shutdown this blockchain func (vm *VM) Shutdown(context.Context) error { - if vm.dbManager == nil { + if vm.db == nil { return nil } - vm.Builder.Shutdown() + vm.onShutdownCtxCancel() + vm.Builder.ShutdownBlockTimer() if vm.bootstrapped.Get() { - primaryVdrIDs, exists := vm.getValidatorIDs(constants.PrimaryNetworkID) - if !exists { - return errMissingValidatorSet - } + primaryVdrIDs := vm.Validators.GetValidatorIDs(constants.PrimaryNetworkID) if err := vm.uptimeManager.StopTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { return err } for subnetID := range vm.TrackedSubnets { - vdrIDs, exists := vm.getValidatorIDs(subnetID) - if !exists { - return errMissingValidatorSet - } + vdrIDs := vm.Validators.GetValidatorIDs(subnetID) if err := vm.uptimeManager.StopTracking(vdrIDs, subnetID); err != nil { return err } @@ -360,33 +443,16 @@ func (vm *VM) Shutdown(context.Context) error { } } - errs := wrappers.Errs{} - errs.Add( + return utils.Err( vm.state.Close(), - vm.dbManager.Close(), + vm.db.Close(), ) - return errs.Err -} - -func (vm *VM) getValidatorIDs(subnetID ids.ID) ([]ids.NodeID, bool) { - validatorSet, exist := vm.Validators.Get(subnetID) - if !exist { - return nil, false - } - validators := validatorSet.List() - - validatorIDs := make([]ids.NodeID, len(validators)) - for i, vdr := range validators { - validatorIDs[i] = vdr.NodeID - } - - return validatorIDs, true } func (vm *VM) ParseBlock(_ context.Context, b []byte) (snowman.Block, error) { // Note: blocks to be parsed are not verified, so we must used blocks.Codec // rather than blocks.GenesisCodec - statelessBlk, err := blocks.Parse(blocks.Codec, b) + statelessBlk, err := block.Parse(block.Codec, b) if err != nil { return nil, err } @@ -404,7 +470,9 @@ func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { // SetPreference sets the preferred block to be the one with ID [blkID] func (vm *VM) SetPreference(_ context.Context, blkID ids.ID) error { - vm.Builder.SetPreference(blkID) + if vm.manager.SetPreference(blkID) { + vm.Builder.ResetBlockTimer() + } return nil } @@ -415,49 +483,23 @@ func (*VM) Version(context.Context) (string, error) { // CreateHandlers returns a map where: // * keys are API endpoint extensions // * values are API handlers -func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") server.RegisterInterceptFunc(vm.metrics.InterceptRequest) server.RegisterAfterFunc(vm.metrics.AfterRequest) - if err := server.RegisterService( - &Service{ - vm: vm, - addrManager: avax.NewAddressManager(vm.ctx), - stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ - Size: stakerAttributesCacheSize, - }, + service := &Service{ + vm: vm, + addrManager: avax.NewAddressManager(vm.ctx), + stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ + Size: stakerAttributesCacheSize, }, - "platform", - ); err != nil { - return nil, err } - - return map[string]*common.HTTPHandler{ - "": { - Handler: server, - }, - }, nil -} - -// CreateStaticHandlers returns a map where: -// * keys are API endpoint extensions -// * values are API handlers -func (*VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHandler, error) { - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(&api.StaticService{}, "platform"); err != nil { - return nil, err - } - - return map[string]*common.HTTPHandler{ - "": { - LockOptions: common.NoLock, - Handler: server, - }, - }, nil + err := server.RegisterService(service, "platform") + return map[string]http.Handler{ + "": server, + }, err } func (vm *VM) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { @@ -475,200 +517,6 @@ func (vm *VM) Disconnected(_ context.Context, nodeID ids.NodeID) error { return vm.state.Commit() } -// GetValidatorSet returns the validator set at the specified height for the -// provided subnetID. -func (vm *VM) GetValidatorSet(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - validatorSetsCache, exists := vm.validatorSetCaches[subnetID] - if !exists { - validatorSetsCache = &cache.LRU[uint64, map[ids.NodeID]*validators.GetValidatorOutput]{Size: validatorSetsCacheSize} - // Only cache tracked subnets - if subnetID == constants.PrimaryNetworkID || vm.TrackedSubnets.Contains(subnetID) { - vm.validatorSetCaches[subnetID] = validatorSetsCache - } - } - - if validatorSet, ok := validatorSetsCache.Get(height); ok { - vm.metrics.IncValidatorSetsCached() - return validatorSet, nil - } - - lastAcceptedHeight, err := vm.GetCurrentHeight(ctx) - if err != nil { - return nil, err - } - if lastAcceptedHeight < height { - return nil, database.ErrNotFound - } - - // get the start time to track metrics - startTime := vm.Clock().Time() - - currentSubnetValidators, ok := vm.Validators.Get(subnetID) - if !ok { - currentSubnetValidators = validators.NewSet() - if err := vm.state.ValidatorSet(subnetID, currentSubnetValidators); err != nil { - return nil, err - } - } - currentPrimaryNetworkValidators, ok := vm.Validators.Get(constants.PrimaryNetworkID) - if !ok { - // This should never happen - return nil, errMissingValidatorSet - } - - currentSubnetValidatorList := currentSubnetValidators.List() - vdrSet := make(map[ids.NodeID]*validators.GetValidatorOutput, len(currentSubnetValidatorList)) - for _, vdr := range currentSubnetValidatorList { - primaryVdr, ok := currentPrimaryNetworkValidators.Get(vdr.NodeID) - if !ok { - // This should never happen - return nil, fmt.Errorf("%w: %s", errMissingValidator, vdr.NodeID) - } - vdrSet[vdr.NodeID] = &validators.GetValidatorOutput{ - NodeID: vdr.NodeID, - PublicKey: primaryVdr.PublicKey, - Weight: vdr.Weight, - } - } - - for i := lastAcceptedHeight; i > height; i-- { - weightDiffs, err := vm.state.GetValidatorWeightDiffs(i, subnetID) - if err != nil { - return nil, err - } - - for nodeID, weightDiff := range weightDiffs { - vdr, ok := vdrSet[nodeID] - if !ok { - // This node isn't in the current validator set. - vdr = &validators.GetValidatorOutput{ - NodeID: nodeID, - } - vdrSet[nodeID] = vdr - } - - // The weight of this node changed at this block. - var op func(uint64, uint64) (uint64, error) - if weightDiff.Decrease { - // The validator's weight was decreased at this block, so in the - // prior block it was higher. - op = math.Add64 - } else { - // The validator's weight was increased at this block, so in the - // prior block it was lower. - op = math.Sub[uint64] - } - - // Apply the weight change. - vdr.Weight, err = op(vdr.Weight, weightDiff.Amount) - if err != nil { - return nil, err - } - - if vdr.Weight == 0 { - // The validator's weight was 0 before this block so - // they weren't in the validator set. - delete(vdrSet, nodeID) - } - } - - pkDiffs, err := vm.state.GetValidatorPublicKeyDiffs(i) - if err != nil { - return nil, err - } - - for nodeID, pk := range pkDiffs { - // pkDiffs includes all primary network key diffs, if we are - // fetching a subnet's validator set, we should ignore non-subnet - // validators. - if vdr, ok := vdrSet[nodeID]; ok { - // The validator's public key was removed at this block, so it - // was in the validator set before. - vdr.PublicKey = pk - } - } - } - - // cache the validator set - validatorSetsCache.Put(height, vdrSet) - - endTime := vm.Clock().Time() - vm.metrics.IncValidatorSetsCreated() - vm.metrics.AddValidatorSetsDuration(endTime.Sub(startTime)) - vm.metrics.AddValidatorSetsHeightDiff(lastAcceptedHeight - height) - return vdrSet, nil -} - -// GetCurrentHeight returns the height of the last accepted block -func (vm *VM) GetSubnetID(_ context.Context, chainID ids.ID) (ids.ID, error) { - if chainID == constants.PlatformChainID { - return constants.PrimaryNetworkID, nil - } - - chainTx, _, err := vm.state.GetTx(chainID) - if err != nil { - return ids.Empty, fmt.Errorf( - "problem retrieving blockchain %q: %w", - chainID, - err, - ) - } - chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) - if !ok { - return ids.Empty, fmt.Errorf("%q is not a blockchain", chainID) - } - return chain.SubnetID, nil -} - -// GetMinimumHeight returns the height of the most recent block beyond the -// horizon of our recentlyAccepted window. -// -// Because the time between blocks is arbitrary, we're only guaranteed that -// the window's configured TTL amount of time has passed once an element -// expires from the window. -// -// To try to always return a block older than the window's TTL, we return the -// parent of the oldest element in the window (as an expired element is always -// guaranteed to be sufficiently stale). If we haven't expired an element yet -// in the case of a process restart, we default to the lastAccepted block's -// height which is likely (but not guaranteed) to also be older than the -// window's configured TTL. -// -// If [UseCurrentHeight] is true, we will always return the last accepted block -// height as the minimum. This is used to trigger the proposervm on recently -// created subnets before [recentlyAcceptedWindowTTL]. -func (vm *VM) GetMinimumHeight(ctx context.Context) (uint64, error) { - if vm.Config.UseCurrentHeight { - return vm.GetCurrentHeight(ctx) - } - - oldest, ok := vm.recentlyAccepted.Oldest() - if !ok { - return vm.GetCurrentHeight(ctx) - } - - blk, err := vm.manager.GetBlock(oldest) - if err != nil { - return 0, err - } - - // We subtract 1 from the height of [oldest] because we want the height of - // the last block accepted before the [recentlyAccepted] window. - // - // There is guaranteed to be a block accepted before this window because the - // first block added to [recentlyAccepted] window is >= height 1. - return blk.Height() - 1, nil -} - -// GetCurrentHeight returns the height of the last accepted block -func (vm *VM) GetCurrentHeight(context.Context) (uint64, error) { - lastAccepted, err := vm.manager.GetBlock(vm.state.GetLastAccepted()) - if err != nil { - return 0, err - } - return lastAccepted.Height(), nil -} - func (vm *VM) CodecRegistry() codec.Registry { return vm.codecRegistry } @@ -681,33 +529,29 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } -// Returns the percentage of the total stake of the subnet connected to this -// node. -func (vm *VM) getPercentConnected(subnetID ids.ID) (float64, error) { - vdrSet, exists := vm.Validators.Get(subnetID) - if !exists { - return 0, errMissingValidatorSet +func (vm *VM) VerifyHeightIndex(_ context.Context) error { + if vm.pruned.Get() { + return nil } - vdrSetWeight := vdrSet.Weight() - if vdrSetWeight == 0 { - return 1, nil - } + return snowmanblock.ErrIndexIncomplete +} - var ( - connectedStake uint64 - err error - ) - for _, vdr := range vdrSet.List() { - if !vm.uptimeManager.IsConnected(vdr.NodeID, subnetID) { - continue // not connected to us --> don't include - } - connectedStake, err = math.Add64(connectedStake, vdr.Weight) - if err != nil { - return 0, err - } +func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { + return vm.state.GetBlockIDAtHeight(height) +} + +func (vm *VM) issueTx(ctx context.Context, tx *txs.Tx) error { + err := vm.Network.IssueTx(ctx, tx) + if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { + vm.ctx.Log.Debug("failed to add tx to mempool", + zap.Stringer("txID", tx.ID()), + zap.Error(err), + ) + return err } - return float64(connectedStake) / float64(vdrSetWeight), nil + + return nil } func (vm *VM) EthVerificationEnabled() bool { diff --git a/avalanchego/vms/platformvm/vm_regression_test.go b/avalanchego/vms/platformvm/vm_regression_test.go index a4512077..8b726f9c 100644 --- a/avalanchego/vms/platformvm/vm_regression_test.go +++ b/avalanchego/vms/platformvm/vm_regression_test.go @@ -1,54 +1,61 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( + "bytes" "context" + "errors" "testing" "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" ) func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - validatorStartTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) nodeID := ids.GenerateTestNodeID() @@ -64,11 +71,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, changeAddr, + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -84,7 +94,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require.NoError(firstAdvanceTimeBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - firstDelegatorStartTime := validatorStartTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + firstDelegatorStartTime := validatorStartTime.Add(executor.SyncBound).Add(1 * time.Second) firstDelegatorEndTime := firstDelegatorStartTime.Add(vm.MinStakeDuration) // create valid tx @@ -96,11 +106,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { changeAddr, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -119,7 +132,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { secondDelegatorStartTime := firstDelegatorEndTime.Add(2 * time.Second) secondDelegatorEndTime := secondDelegatorStartTime.Add(vm.MinStakeDuration) - vm.clock.Set(secondDelegatorStartTime.Add(-10 * txexecutor.SyncBound)) + vm.clock.Set(secondDelegatorStartTime.Add(-10 * executor.SyncBound)) // create valid tx addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -130,11 +143,14 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { changeAddr, []*secp256k1.PrivateKey{keys[0], keys[1], keys[3]}, changeAddr, + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + vm.ctx.Lock.Lock() addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -154,16 +170,19 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { changeAddr, []*secp256k1.PrivateKey{keys[0], keys[1], keys[4]}, changeAddr, + nil, ) require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(addThirdDelegatorTx) - require.Error(err, "should have marked the delegator as being over delegated") + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), addThirdDelegatorTx) + require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestAddDelegatorTxHeapCorruption(t *testing.T) { - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -184,19 +203,16 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator4Stake := defaultMaxValidatorStake - validatorStake - defaultMinValidatorStake tests := []struct { - name string - ap3Time time.Time - shouldFail bool + name string + ap3Time time.Time }{ { - name: "pre-upgrade is no longer restrictive", - ap3Time: validatorEndTime, - shouldFail: false, + name: "pre-upgrade is no longer restrictive", + ap3Time: validatorEndTime, }, { - name: "post-upgrade calculate max stake correctly", - ap3Time: defaultGenesisTime, - shouldFail: false, + name: "post-upgrade calculate max stake correctly", + ap3Time: defaultGenesisTime, }, } @@ -204,21 +220,17 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, apricotPhase3) vm.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -226,17 +238,19 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the add validator tx - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -250,16 +264,18 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the first add delegator tx - err = vm.Builder.AddUnverifiedTx(addFirstDelegatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -273,16 +289,18 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the second add delegator tx - err = vm.Builder.AddUnverifiedTx(addSecondDelegatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the second add delegator tx addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -296,16 +314,18 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator3Stake, uint64(delegator3StartTime.Unix()), uint64(delegator3EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the third add delegator tx - err = vm.Builder.AddUnverifiedTx(addThirdDelegatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addThirdDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the third add delegator tx addThirdDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -319,25 +339,21 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { delegator4Stake, uint64(delegator4StartTime.Unix()), uint64(delegator4EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the fourth add delegator tx - err = vm.Builder.AddUnverifiedTx(addFourthDelegatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFourthDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the fourth add delegator tx addFourthDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) - - if test.shouldFail { - require.Error(err, "should have failed to allow new delegator") - return - } - require.NoError(err) require.NoError(addFourthDelegatorBlock.Verify(context.Background())) require.NoError(addFourthDelegatorBlock.Accept(context.Background())) @@ -350,51 +366,50 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { // panic. func TestUnverifiedParentPanicRegression(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + baseDB := memdb.New() + atomicDB := prefixdb.New([]byte{1}, baseDB) - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) vm := &VM{Config: config.Config{ Chains: chains.TestManager, - Validators: vdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, }} - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + msgChan := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, - baseDBManager, + baseDB, genesisBytes, nil, nil, msgChan, nil, nil, - ) - require.NoError(err) + )) m := atomic.NewMemory(atomicDB) vm.ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) // set time to post Banff fork - vm.clock.Set(banffForkTime.Add(time.Second)) - vm.state.SetTimestamp(banffForkTime.Add(time.Second)) + vm.clock.Set(latestForkTime.Add(time.Second)) + vm.state.SetTimestamp(latestForkTime.Add(time.Second)) key0 := keys[0] key1 := keys[1] @@ -406,6 +421,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { []ids.ShortID{addr0}, []*secp256k1.PrivateKey{key0}, addr0, + nil, ) require.NoError(err) @@ -414,6 +430,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { []ids.ShortID{addr1}, []*secp256k1.PrivateKey{key1}, addr1, + nil, ) require.NoError(err) @@ -422,17 +439,17 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { []ids.ShortID{addr1}, []*secp256k1.PrivateKey{key1}, addr0, + nil, ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessStandardBlk, err := blocks.NewBanffStandardBlock( + statelessStandardBlk, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -441,7 +458,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { require.NoError(err) addSubnetBlk0 := vm.manager.NewBlock(statelessStandardBlk) - statelessStandardBlk, err = blocks.NewBanffStandardBlock( + statelessStandardBlk, err = block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -450,7 +467,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { require.NoError(err) addSubnetBlk1 := vm.manager.NewBlock(statelessStandardBlk) - statelessStandardBlk, err = blocks.NewBanffStandardBlock( + statelessStandardBlk, err = block.NewBanffStandardBlock( preferredChainTime, addSubnetBlk1.ID(), preferredHeight+2, @@ -478,45 +495,36 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - newValidatorStartTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + nodeID := ids.GenerateTestNodeID() + newValidatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) - key, err := testKeyFactory.NewPrivateKey() - require.NoError(err) - - nodeID := ids.NodeID(key.PublicKey().Address()) - // Create the tx to add a new validator addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( vm.MinValidatorStake, uint64(newValidatorStartTime.Unix()), uint64(newValidatorEndTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) // Create the standard block to add the new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewBanffStandardBlock( + statelessBlk, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -525,8 +533,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) addValidatorStandardBlk := vm.manager.NewBlock(statelessBlk) - err = addValidatorStandardBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk.Verify(context.Background())) // Verify that the new validator now in pending validator set { @@ -569,10 +576,9 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ + require.NoError(signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers - }) - require.NoError(err) + })) // Create the standard block that will fail verification, and then be // re-verified. @@ -580,7 +586,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { preferredID = addValidatorStandardBlk.ID() preferredHeight = addValidatorStandardBlk.Height() - statelessImportBlk, err := blocks.NewBanffStandardBlock( + statelessImportBlk, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -593,7 +599,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. err = importBlk.Verify(context.Background()) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) // Because we no longer ever reject a block in verification, the status // should remain as processing. @@ -606,11 +612,11 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply( + require.NoError(peerSharedMemory.Apply( map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ @@ -621,13 +627,11 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { }, }, }, - ) - require.NoError(err) + )) // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(importBlk.Verify(context.Background())) // The status shouldn't have been changed during a successful verification. importBlkStatus = importBlk.Status() @@ -642,7 +646,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { preferredID = importBlk.ID() preferredHeight = importBlk.Height() - statelessAdvanceTimeStandardBlk, err := blocks.NewBanffStandardBlock( + statelessAdvanceTimeStandardBlk, err := block.NewBanffStandardBlock( newValidatorStartTime, preferredID, preferredHeight+1, @@ -651,8 +655,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) advanceTimeStandardBlk := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk) - err = advanceTimeStandardBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk.Verify(context.Background())) // Accept all the blocks allBlocks := []snowman.Block{ @@ -661,8 +664,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { advanceTimeStandardBlk, } for _, blk := range allBlocks { - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) status := blk.Status() require.Equal(choices.Accepted, status) @@ -670,29 +672,28 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() - vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) - is, err := state.New( - vm.dbManager.Current().Database, + execCfg, _ := config.GetExecutionConfig(nil) + newState, err := state.New( + vm.db, nil, prometheus.NewRegistry(), &vm.Config, + execCfg, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) - vm.state = is // Verify that new validator is now in the current validator set. { - _, err := vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + _, err := newState.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) - _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID) + _, err = newState.GetPendingValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) - currentTimestamp := vm.state.GetTimestamp() + currentTimestamp := newState.GetTimestamp() require.Equal(newValidatorStartTime.Unix(), currentTimestamp.Unix()) } } @@ -700,21 +701,16 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() vm.state.SetCurrentSupply(constants.PrimaryNetworkID, defaultRewardConfig.SupplyCap/2) - newValidatorStartTime0 := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID0 := ids.NodeID(ids.GenerateTestShortID()) + nodeID0 := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( @@ -722,22 +718,22 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), nodeID0, - ids.ShortID(nodeID0), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, + nil, ) require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessAddValidatorStandardBlk0, err := blocks.NewBanffStandardBlock( + statelessAddValidatorStandardBlk0, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -746,8 +742,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) addValidatorStandardBlk0 := vm.manager.NewBlock(statelessAddValidatorStandardBlk0) - err = addValidatorStandardBlk0.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk0.Verify(context.Background())) // Verify that first new validator now in pending validator set { @@ -767,7 +762,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { preferredID = addValidatorStandardBlk0.ID() preferredHeight = addValidatorStandardBlk0.Height() - statelessAdvanceTimeStandardBlk0, err := blocks.NewBanffStandardBlock( + statelessAdvanceTimeStandardBlk0, err := block.NewBanffStandardBlock( newValidatorStartTime0, preferredID, preferredHeight+1, @@ -776,8 +771,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk0 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk0) - err = advanceTimeStandardBlk0.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk0.Verify(context.Background())) // Verify that the first new validator is now in the current validator set. { @@ -826,10 +820,9 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ + require.NoError(signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers - }) - require.NoError(err) + })) // Create the standard block that will fail verification, and then be // re-verified. @@ -837,7 +830,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { preferredID = advanceTimeStandardBlk0.ID() preferredHeight = advanceTimeStandardBlk0.Height() - statelessImportBlk, err := blocks.NewBanffStandardBlock( + statelessImportBlk, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -849,7 +842,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. err = importBlk.Verify(context.Background()) - require.Error(err) + require.ErrorIs(err, database.ErrNotFound) // Because we no longer ever reject a block in verification, the status // should remain as processing. @@ -862,11 +855,11 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply( + require.NoError(peerSharedMemory.Apply( map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ @@ -877,22 +870,20 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { }, }, }, - ) - require.NoError(err) + )) // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(importBlk.Verify(context.Background())) // The status shouldn't have been changed during a successful verification. importBlkStatus = importBlk.Status() require.Equal(choices.Processing, importBlkStatus) - newValidatorStartTime1 := newValidatorStartTime0.Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime1 := newValidatorStartTime0.Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime1 := newValidatorStartTime1.Add(defaultMaxStakingDuration) - nodeID1 := ids.NodeID(ids.GenerateTestShortID()) + nodeID1 := ids.GenerateTestNodeID() // Create the tx to add the second new validator addValidatorTx1, err := vm.txBuilder.NewAddValidatorTx( @@ -900,10 +891,11 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { uint64(newValidatorStartTime1.Unix()), uint64(newValidatorEndTime1.Unix()), nodeID1, - ids.ShortID(nodeID1), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[1]}, ids.ShortEmpty, + nil, ) require.NoError(err) @@ -912,7 +904,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { preferredID = importBlk.ID() preferredHeight = importBlk.Height() - statelessAddValidatorStandardBlk1, err := blocks.NewBanffStandardBlock( + statelessAddValidatorStandardBlk1, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -922,8 +914,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { addValidatorStandardBlk1 := vm.manager.NewBlock(statelessAddValidatorStandardBlk1) - err = addValidatorStandardBlk1.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk1.Verify(context.Background())) // Verify that the second new validator now in pending validator set { @@ -943,7 +934,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { preferredID = addValidatorStandardBlk1.ID() preferredHeight = addValidatorStandardBlk1.Height() - statelessAdvanceTimeStandardBlk1, err := blocks.NewBanffStandardBlock( + statelessAdvanceTimeStandardBlk1, err := block.NewBanffStandardBlock( newValidatorStartTime1, preferredID, preferredHeight+1, @@ -952,8 +943,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk1 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk1) - err = advanceTimeStandardBlk1.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk1.Verify(context.Background())) // Verify that the second new validator is now in the current validator set. { @@ -979,8 +969,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { advanceTimeStandardBlk1, } for _, blk := range allBlocks { - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) status := blk.Status() require.Equal(choices.Accepted, status) @@ -988,38 +977,37 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Force a reload of the state from the database. vm.Config.Validators = validators.NewManager() - vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) - is, err := state.New( - vm.dbManager.Current().Database, + execCfg, _ := config.GetExecutionConfig(nil) + newState, err := state.New( + vm.db, nil, prometheus.NewRegistry(), &vm.Config, + execCfg, vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), - &utils.Atomic[bool]{}, ) require.NoError(err) - vm.state = is // Verify that validators are in the current validator set with the correct // reward calculated. { - staker0, err := vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID0) + staker0, err := newState.GetCurrentValidator(constants.PrimaryNetworkID, nodeID0) require.NoError(err) require.EqualValues(0, staker0.PotentialReward) - staker1, err := vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID1) + staker1, err := newState.GetCurrentValidator(constants.PrimaryNetworkID, nodeID1) require.NoError(err) require.EqualValues(0, staker1.PotentialReward) - _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID0) + _, err = newState.GetPendingValidator(constants.PrimaryNetworkID, nodeID0) require.ErrorIs(err, database.ErrNotFound) - _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID1) + _, err = newState.GetPendingValidator(constants.PrimaryNetworkID, nodeID1) require.ErrorIs(err, database.ErrNotFound) - currentTimestamp := vm.state.GetTimestamp() + currentTimestamp := newState.GetTimestamp() require.Equal(newValidatorStartTime1.Unix(), currentTimestamp.Unix()) } } @@ -1027,31 +1015,20 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - - vm.ctx.Lock.Unlock() - }() - - nodeID0 := ids.NodeID(keys[0].PublicKey().Address()) - nodeID1 := ids.NodeID(keys[1].PublicKey().Address()) - nodeID2 := ids.NodeID(keys[2].PublicKey().Address()) - nodeID3 := ids.NodeID(keys[3].PublicKey().Address()) - nodeID4 := ids.NodeID(keys[4].PublicKey().Address()) + defer vm.ctx.Lock.Unlock() currentHeight, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) - require.EqualValues(1, currentHeight) + require.Equal(uint64(1), currentHeight) expectedValidators1 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, } validators, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) require.NoError(err) @@ -1059,33 +1036,33 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require.Equal(weight, validators[nodeID].Weight) } - newValidatorStartTime0 := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime0 := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) - nodeID5 := ids.GenerateTestNodeID() + extraNodeID := ids.GenerateTestNodeID() // Create the tx to add the first new validator addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( vm.MaxValidatorStake, uint64(newValidatorStartTime0.Unix()), uint64(newValidatorEndTime0.Unix()), - nodeID5, + extraNodeID, ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.GenerateTestShortID(), + nil, ) require.NoError(err) // Create the standard block to add the first new validator - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - preferredChainTime := preferred.Timestamp() - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessStandardBlk, err := blocks.NewBanffStandardBlock( + statelessStandardBlk, err := block.NewBanffStandardBlock( preferredChainTime, preferredID, preferredHeight+1, @@ -1099,7 +1076,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { currentHeight, err = vm.GetCurrentHeight(context.Background()) require.NoError(err) - require.EqualValues(2, currentHeight) + require.Equal(uint64(2), currentHeight) for i := uint64(1); i <= 2; i++ { validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) @@ -1115,12 +1092,13 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { // Create the standard block that moves the first new validator from the // pending validator set into the current validator set. - preferred, err = vm.Builder.Preferred() + preferredID = vm.manager.Preferred() + preferred, err = vm.manager.GetBlock(preferredID) require.NoError(err) preferredID = preferred.ID() preferredHeight = preferred.Height() - statelessStandardBlk, err = blocks.NewBanffStandardBlock( + statelessStandardBlk, err = block.NewBanffStandardBlock( newValidatorStartTime0, preferredID, preferredHeight+1, @@ -1134,7 +1112,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { currentHeight, err = vm.GetCurrentHeight(context.Background()) require.NoError(err) - require.EqualValues(3, currentHeight) + require.Equal(uint64(3), currentHeight) for i := uint64(1); i <= 2; i++ { validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) @@ -1145,12 +1123,12 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { } expectedValidators2 := map[ids.NodeID]uint64{ - nodeID0: defaultWeight, - nodeID1: defaultWeight, - nodeID2: defaultWeight, - nodeID3: defaultWeight, - nodeID4: defaultWeight, - nodeID5: vm.MaxValidatorStake, + genesisNodeIDs[0]: defaultWeight, + genesisNodeIDs[1]: defaultWeight, + genesisNodeIDs[2]: defaultWeight, + genesisNodeIDs[3]: defaultWeight, + genesisNodeIDs[4]: defaultWeight, + extraNodeID: vm.MaxValidatorStake, } validators, err = vm.GetValidatorSet(context.Background(), 3, constants.PrimaryNetworkID) require.NoError(err) @@ -1162,7 +1140,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -1174,20 +1152,15 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2EndTime := delegator2StartTime.Add(3 * defaultMinStakingDuration) delegator2Stake := defaultMaxValidatorStake - validatorStake - vm, _, _ := defaultVM() - + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() - - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() // create valid tx @@ -1195,17 +1168,19 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { validatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the add validator tx - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1219,16 +1194,18 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator1Stake, uint64(delegator1StartTime.Unix()), uint64(delegator1EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // issue the first add delegator tx - err = vm.Builder.AddUnverifiedTx(addFirstDelegatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1242,54 +1219,55 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2Stake, uint64(delegator2StartTime.Unix()), uint64(delegator2EndTime.Unix()), - ids.NodeID(id), + nodeID, keys[0].PublicKey().Address(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) // attempting to issue the second add delegator tx should fail because the // total stake weight would go over the limit. - require.Error(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), addSecondDelegatorTx) + require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() - + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() - - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) - id := key.PublicKey().Address() + id := key.Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1303,11 +1281,13 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t []ids.ShortID{changeAddr}, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1320,15 +1300,17 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1346,10 +1328,11 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.Empty(emptyValidatorSet) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) @@ -1357,8 +1340,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1379,39 +1363,36 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() - + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + defer vm.ctx.Lock.Unlock() - vm.ctx.Lock.Unlock() - }() - - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1425,11 +1406,13 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t []ids.ShortID{changeAddr}, []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1438,27 +1421,21 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - vm.TrackedSubnets.Add(createSubnetTx.ID()) - subnetValidators := validators.NewSet() - err = vm.state.ValidatorSet(createSubnetTx.ID(), subnetValidators) - require.NoError(err) - - added := vm.Validators.Add(createSubnetTx.ID(), subnetValidators) - require.True(added) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1468,10 +1445,11 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, + nil, ) require.NoError(err) @@ -1479,8 +1457,9 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1489,3 +1468,776 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(removeSubnetValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) } + +// GetValidatorSet must return the BLS keys for a given validator correctly when +// queried at a previous height, even in case it has currently expired +func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { + // setup + require := require.New(t) + vm, _, _ := defaultVM(t, cortina) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + subnetID := testSubnet1.TxID + + // setup time + currentTime := defaultGenesisTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + // A subnet validator stakes and then stops; also its primary network counterpart stops staking + var ( + primaryStartTime = currentTime.Add(executor.SyncBound) + subnetStartTime = primaryStartTime.Add(executor.SyncBound) + subnetEndTime = subnetStartTime.Add(defaultMinStakingDuration) + primaryEndTime = subnetEndTime.Add(time.Second) + primaryReStartTime = primaryEndTime.Add(executor.SyncBound) + primaryReEndTime = primaryReStartTime.Add(defaultMinStakingDuration) + ) + + // insert primary network validator + var ( + nodeID = ids.GenerateTestNodeID() + addr = keys[0].PublicKey().Address() + ) + sk1, err := bls.NewSecretKey() + require.NoError(err) + + // build primary network validator with BLS key + primaryTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime.Unix()), + uint64(primaryEndTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk1), + addr, // reward address + reward.PercentDenominator, + keys, + addr, // change address + nil, + ) + require.NoError(err) + uPrimaryTx := primaryTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting primary validator to current + currentTime = primaryStartTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // insert the subnet validator + subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + 1, // Weight + uint64(subnetStartTime.Unix()), // Start time + uint64(subnetEndTime.Unix()), // end time + nodeID, // Node ID + subnetID, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting the subnet validator to current + currentTime = subnetStartTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.NoError(err) + + subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating the subnet validator + currentTime = subnetEndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + subnetEndHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating primary network validator + currentTime = primaryEndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + + proposalBlk := blk.(snowman.OracleBlock) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // reinsert primary validator with a different BLS key + sk2, err := bls.NewSecretKey() + require.NoError(err) + require.NotEqual(sk1, sk2) + + primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + vm.MinValidatorStake, + uint64(primaryReStartTime.Unix()), + uint64(primaryReEndTime.Unix()), + nodeID, + signer.NewProofOfPossession(sk2), + addr, // reward address + reward.PercentDenominator, + keys, + addr, // change address + nil, + ) + require.NoError(err) + uPrimaryRestartTx := primaryRestartTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting restarted primary validator to current + currentTime = primaryReStartTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + primaryRestartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // Show that validators are rebuilt with the right BLS key + for height := primaryStartHeight; height < primaryEndHeight; height++ { + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + constants.PrimaryNetworkID, + height, + uPrimaryTx.Signer.Key(), + )) + } + for height := primaryEndHeight; height < primaryRestartHeight; height++ { + err := checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + constants.PrimaryNetworkID, + primaryEndHeight, + uPrimaryTx.Signer.Key(), + ) + require.ErrorIs(err, database.ErrNotFound) + } + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + constants.PrimaryNetworkID, + primaryRestartHeight, + uPrimaryRestartTx.Signer.Key(), + )) + + for height := subnetStartHeight; height < subnetEndHeight; height++ { + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + subnetID, + height, + uPrimaryTx.Signer.Key(), + )) + } + + for height := subnetEndHeight; height <= primaryRestartHeight; height++ { + err := checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + subnetID, + primaryEndHeight, + uPrimaryTx.Signer.Key(), + ) + require.ErrorIs(err, database.ErrNotFound) + } +} + +func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { + // A primary network validator has an empty BLS key. Then it restakes adding + // the BLS key. Querying the validator set back when BLS key was empty must + // return an empty BLS key. + + // setup + require := require.New(t) + vm, _, _ := defaultVM(t, cortina) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // setup time + currentTime := defaultGenesisTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + // A primary network validator stake twice + var ( + primaryStartTime1 = currentTime.Add(executor.SyncBound) + primaryEndTime1 = primaryStartTime1.Add(defaultMinStakingDuration) + primaryStartTime2 = primaryEndTime1.Add(executor.SyncBound) + primaryEndTime2 = primaryStartTime2.Add(defaultMinStakingDuration) + ) + + // Add a primary network validator with no BLS key + nodeID := ids.GenerateTestNodeID() + addr := keys[0].PublicKey().Address() + primaryTx1, err := vm.txBuilder.NewAddValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime1.Unix()), + uint64(primaryEndTime1.Unix()), + nodeID, + addr, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting primary validator to current + currentTime = primaryStartTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating primary network validator + currentTime = primaryEndTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + + proposalBlk := blk.(snowman.OracleBlock) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // reinsert primary validator with a different BLS key + sk2, err := bls.NewSecretKey() + require.NoError(err) + + primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime2.Unix()), + uint64(primaryEndTime2.Unix()), + nodeID, + signer.NewProofOfPossession(sk2), + addr, // reward address + reward.PercentDenominator, + keys, + addr, // change address + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting restarted primary validator to current + currentTime = primaryStartTime2 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + emptySigner := &signer.Empty{} + for height := primaryStartHeight; height < primaryEndHeight; height++ { + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + constants.PrimaryNetworkID, + height, + emptySigner.Key(), + )) + } +} + +func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { + // A primary network validator has an empty BLS key and a subnet validator. + // Primary network validator terminates its first staking cycle and it + // restakes adding the BLS key. Querying the validator set back when BLS key + // was empty must return an empty BLS key for the subnet validator + + // setup + require := require.New(t) + vm, _, _ := defaultVM(t, cortina) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + subnetID := testSubnet1.TxID + + // setup time + currentTime := defaultGenesisTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + // A primary network validator stake twice + var ( + primaryStartTime1 = currentTime.Add(executor.SyncBound) + subnetStartTime = primaryStartTime1.Add(executor.SyncBound) + subnetEndTime = subnetStartTime.Add(defaultMinStakingDuration) + primaryEndTime1 = subnetEndTime.Add(time.Second) + primaryStartTime2 = primaryEndTime1.Add(executor.SyncBound) + primaryEndTime2 = primaryStartTime2.Add(defaultMinStakingDuration) + ) + + // Add a primary network validator with no BLS key + nodeID := ids.GenerateTestNodeID() + addr := keys[0].PublicKey().Address() + primaryTx1, err := vm.txBuilder.NewAddValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime1.Unix()), + uint64(primaryEndTime1.Unix()), + nodeID, + addr, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting primary validator to current + currentTime = primaryStartTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // insert the subnet validator + subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + 1, // Weight + uint64(subnetStartTime.Unix()), // Start time + uint64(subnetEndTime.Unix()), // end time + nodeID, // Node ID + subnetID, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting the subnet validator to current + currentTime = subnetStartTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.NoError(err) + + subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating the subnet validator + currentTime = subnetEndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + subnetEndHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating primary network validator + currentTime = primaryEndTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + + proposalBlk := blk.(snowman.OracleBlock) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // reinsert primary validator with a different BLS key + sk2, err := bls.NewSecretKey() + require.NoError(err) + + primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime2.Unix()), + uint64(primaryEndTime2.Unix()), + nodeID, + signer.NewProofOfPossession(sk2), + addr, // reward address + reward.PercentDenominator, + keys, + addr, // change address + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting restarted primary validator to current + currentTime = primaryStartTime2 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + require.NoError(buildAndAcceptStandardBlock(vm)) + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + emptySigner := &signer.Empty{} + for height := primaryStartHeight; height < primaryEndHeight; height++ { + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + constants.PrimaryNetworkID, + height, + emptySigner.Key(), + )) + } + for height := subnetStartHeight; height < subnetEndHeight; height++ { + require.NoError(checkValidatorBlsKeyIsSet( + vm.State, + nodeID, + subnetID, + height, + emptySigner.Key(), + )) + } +} + +func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { + // A primary network validator and a subnet validator are running. + // Primary network validator terminates its staking cycle. + // Querying the validator set when the subnet validator existed should + // succeed. + + // setup + require := require.New(t) + vm, _, _ := defaultVM(t, cortina) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + subnetID := testSubnet1.TxID + + // setup time + currentTime := defaultGenesisTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + // A primary network validator stake twice + var ( + primaryStartTime1 = currentTime.Add(executor.SyncBound) + subnetStartTime = primaryStartTime1.Add(executor.SyncBound) + subnetEndTime = subnetStartTime.Add(defaultMinStakingDuration) + primaryEndTime1 = subnetEndTime.Add(time.Second) + ) + + // Add a primary network validator with no BLS key + nodeID := ids.GenerateTestNodeID() + addr := keys[0].PublicKey().Address() + primaryTx1, err := vm.txBuilder.NewAddValidatorTx( + vm.MinValidatorStake, + uint64(primaryStartTime1.Unix()), + uint64(primaryEndTime1.Unix()), + nodeID, + addr, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), primaryTx1)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting primary validator to current + currentTime = primaryStartTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.NoError(err) + + // insert the subnet validator + subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + 1, // Weight + uint64(subnetStartTime.Unix()), // Start time + uint64(subnetEndTime.Unix()), // end time + nodeID, // Node ID + subnetID, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + addr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), subnetTx)) + vm.ctx.Lock.Lock() + require.NoError(buildAndAcceptStandardBlock(vm)) + + // move time ahead, promoting the subnet validator to current + currentTime = subnetStartTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.NoError(err) + + subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + require.NoError(err) + + // move time ahead, terminating the subnet validator + currentTime = subnetEndTime + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + require.NoError(buildAndAcceptStandardBlock(vm)) + + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + // move time ahead, terminating primary network validator + currentTime = primaryEndTime1 + vm.clock.Set(currentTime) + vm.state.SetTimestamp(currentTime) + + blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + + proposalBlk := blk.(snowman.OracleBlock) + options, err := proposalBlk.Options(context.Background()) + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) + + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + // Generating the validator set should not error when re-introducing a + // subnet validator whose primary network validator was also removed. + _, err = vm.State.GetValidatorSet(context.Background(), subnetStartHeight, subnetID) + require.NoError(err) +} + +func TestValidatorSetRaceCondition(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, cortina) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + nodeID := ids.GenerateTestNodeID() + require.NoError(vm.Connected(context.Background(), nodeID, version.CurrentApp)) + + protocolAppRequestBytest, err := gossip.MarshalAppRequest( + bloom.EmptyFilter.Marshal(), + ids.Empty[:], + ) + require.NoError(err) + + appRequestBytes := p2p.PrefixMessage( + p2p.ProtocolPrefix(network.TxGossipHandlerID), + protocolAppRequestBytest, + ) + + var ( + eg errgroup.Group + ctx, cancel = context.WithCancel(context.Background()) + ) + // keep 10 workers running + for i := 0; i < 10; i++ { + eg.Go(func() error { + for ctx.Err() == nil { + err := vm.AppRequest( + context.Background(), + nodeID, + 0, + time.Now().Add(time.Hour), + appRequestBytes, + ) + if err != nil { + return err + } + } + return nil + }) + } + + // If the validator set lock isn't held, the race detector should fail here. + for i := uint64(0); i < 1000; i++ { + blk, err := block.NewBanffStandardBlock( + time.Now(), + vm.state.GetLastAccepted(), + i, + nil, + ) + require.NoError(err) + + vm.state.SetLastAccepted(blk.ID()) + vm.state.SetHeight(blk.Height()) + vm.state.AddStatelessBlock(blk) + } + + // If the validator set lock is grabbed, we need to make sure to release the + // lock to avoid a deadlock. + vm.ctx.Lock.Unlock() + cancel() // stop and wait for workers + require.NoError(eg.Wait()) + vm.ctx.Lock.Lock() +} + +func buildAndAcceptStandardBlock(vm *VM) error { + blk, err := vm.Builder.BuildBlock(context.Background()) + if err != nil { + return err + } + + if err := blk.Verify(context.Background()); err != nil { + return err + } + + if err := blk.Accept(context.Background()); err != nil { + return err + } + + return vm.SetPreference(context.Background(), vm.manager.LastAccepted()) +} + +func checkValidatorBlsKeyIsSet( + valState validators.State, + nodeID ids.NodeID, + subnetID ids.ID, + height uint64, + expectedBlsKey *bls.PublicKey, +) error { + vals, err := valState.GetValidatorSet(context.Background(), height, subnetID) + if err != nil { + return err + } + + val, found := vals[nodeID] + switch { + case !found: + return database.ErrNotFound + case expectedBlsKey == val.PublicKey: + return nil + case expectedBlsKey == nil && val.PublicKey != nil: + return errors.New("unexpected BLS key") + case expectedBlsKey != nil && val.PublicKey == nil: + return errors.New("missing BLS key") + case !bytes.Equal(bls.SerializePublicKey(expectedBlsKey), bls.SerializePublicKey(val.PublicKey)): + return errors.New("incorrect BLS key") + default: + return nil + } +} diff --git a/avalanchego/vms/platformvm/vm_test.go b/avalanchego/vms/platformvm/vm_test.go index 539fd386..1b16e72b 100644 --- a/avalanchego/vms/platformvm/vm_test.go +++ b/avalanchego/vms/platformvm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -6,21 +6,17 @@ package platformvm import ( "bytes" "context" - "errors" "fmt" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" @@ -37,6 +33,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" @@ -53,14 +50,13 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -69,13 +65,22 @@ import ( smeng "github.com/ava-labs/avalanchego/snow/engine/snowman" snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) const ( - testNetworkID = 10 // To be used in tests - defaultWeight = 10000 + apricotPhase3 fork = iota + apricotPhase5 + banff + cortina + durango + + latestFork = durango + + defaultWeight uint64 = 10000 ) var ( @@ -89,9 +94,6 @@ var ( SupplyCap: 720 * units.MegaAvax, } - // AVAX asset ID in tests - avaxAssetID = ids.ID{'y', 'e', 'e', 't'} - defaultTxFee = uint64(100) // chain timestamp at genesis @@ -103,176 +105,65 @@ var ( // time that genesis validators stop validating defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) - banffForkTime = defaultValidateEndTime.Add(-5 * defaultMinStakingDuration) + latestForkTime = defaultGenesisTime.Add(time.Second) // each key controls an address that has [defaultBalance] AVAX at genesis keys = secp256k1.TestKeys() - defaultMinValidatorStake = 5 * units.MilliAvax - defaultMaxValidatorStake = 500 * units.MilliAvax + // Node IDs of genesis validators. Initialized in init function + genesisNodeIDs []ids.NodeID defaultMinDelegatorStake = 1 * units.MilliAvax - - // amount all genesis validators have in defaultVM - defaultBalance = 100 * defaultMinValidatorStake + defaultMinValidatorStake = 5 * defaultMinDelegatorStake + defaultMaxValidatorStake = 100 * defaultMinValidatorStake + defaultBalance = 2 * defaultMaxValidatorStake // amount all genesis validators have in defaultVM // subnet that exists at genesis in defaultVM // Its controlKeys are keys[0], keys[1], keys[2] // Its threshold is 2 testSubnet1 *txs.Tx testSubnet1ControlKeys = keys[0:3] - - xChainID = ids.Empty.Prefix(0) - cChainID = ids.Empty.Prefix(1) - - // Used to create and use keys. - testKeyFactory secp256k1.Factory - - errMissing = errors.New("missing") ) -type mutableSharedMemory struct { - atomic.SharedMemory -} +func init() { + for _, key := range keys { + // TODO: use ids.GenerateTestNodeID() instead of ids.BuildTestNodeID + // Can be done when TestGetState is refactored + nodeBytes := key.PublicKey().Address() + nodeID := ids.BuildTestNodeID(nodeBytes[:]) -func defaultContext() *snow.Context { - ctx := snow.DefaultContextTest() - ctx.NetworkID = testNetworkID - ctx.XChainID = xChainID - ctx.CChainID = cChainID - ctx.AVAXAssetID = avaxAssetID - aliaser := ids.NewAliaser() - - errs := wrappers.Errs{} - errs.Add( - aliaser.Alias(constants.PlatformChainID, "P"), - aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String()), - aliaser.Alias(xChainID, "X"), - aliaser.Alias(xChainID, xChainID.String()), - aliaser.Alias(cChainID, "C"), - aliaser.Alias(cChainID, cChainID.String()), - ) - if errs.Errored() { - panic(errs.Err) - } - ctx.BCLookup = aliaser - - ctx.ValidatorState = &validators.TestState{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errMissing - } - return subnetID, nil - }, + genesisNodeIDs = append(genesisNodeIDs, nodeID) } - return ctx } -// Returns: -// 1) The genesis state -// 2) The byte representation of the default genesis for tests -func defaultGenesis() (*api.BuildGenesisArgs, []byte) { - genesisUTXOs := make([]api.UTXO, len(keys)) - hrp := constants.NetworkIDToHRP[testNetworkID] - for i, key := range keys { - id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - panic(err) - } - genesisUTXOs[i] = api.UTXO{ - Amount: json.Uint64(defaultBalance), - Address: addr, - } - } - - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - panic(err) - } - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ - StartTime: json.Uint64(defaultValidateStartTime.Unix()), - EndTime: json.Uint64(defaultValidateEndTime.Unix()), - NodeID: nodeID, - }, - RewardOwner: &api.Owner{ - Threshold: 1, - Addresses: []string{addr}, - }, - Staked: []api.UTXO{{ - Amount: json.Uint64(defaultWeight), - Address: addr, - }}, - DelegationFee: reward.PercentDenominator, - } - } - - buildGenesisArgs := api.BuildGenesisArgs{ - Encoding: formatting.Hex, - NetworkID: json.Uint32(testNetworkID), - AvaxAssetID: avaxAssetID, - UTXOs: genesisUTXOs, - Validators: genesisValidators, - Chains: nil, - Time: json.Uint64(defaultGenesisTime.Unix()), - InitialSupply: json.Uint64(360 * units.MegaAvax), - } - - buildGenesisResponse := api.BuildGenesisReply{} - platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) - } - - genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - panic(err) - } - - return &buildGenesisArgs, genesisBytes -} +type fork uint8 -// Returns: -// 1) The genesis state -// 2) The byte representation of the default genesis for tests -func BuildGenesisTest(t *testing.T) (*api.BuildGenesisArgs, []byte) { - return BuildGenesisTestWithArgs(t, nil) +type mutableSharedMemory struct { + atomic.SharedMemory } // Returns: // 1) The genesis state // 2) The byte representation of the default genesis for tests -func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.BuildGenesisArgs, []byte) { +func defaultGenesis(t *testing.T, avaxAssetID ids.ID) (*api.BuildGenesisArgs, []byte) { require := require.New(t) + genesisUTXOs := make([]api.UTXO, len(keys)) - hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range keys { id := key.PublicKey().Address() - addr, err := address.FormatBech32(hrp, id.Bytes()) + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) require.NoError(err) - genesisUTXOs[i] = api.UTXO{ Amount: json.Uint64(defaultBalance), Address: addr, } } - genesisValidators := make([]api.PermissionlessValidator, len(keys)) - for i, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) - addr, err := address.FormatBech32(hrp, nodeID.Bytes()) + genesisValidators := make([]api.GenesisPermissionlessValidator, len(genesisNodeIDs)) + for i, nodeID := range genesisNodeIDs { + addr, err := address.FormatBech32(constants.UnitTestHRP, nodeID.Bytes()) require.NoError(err) - - genesisValidators[i] = api.PermissionlessValidator{ - Staker: api.Staker{ + genesisValidators[i] = api.GenesisPermissionlessValidator{ + GenesisValidator: api.GenesisValidator{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), EndTime: json.Uint64(defaultValidateEndTime.Unix()), NodeID: nodeID, @@ -290,24 +181,19 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu } buildGenesisArgs := api.BuildGenesisArgs{ - NetworkID: json.Uint32(testNetworkID), + Encoding: formatting.Hex, + NetworkID: json.Uint32(constants.UnitTestID), AvaxAssetID: avaxAssetID, UTXOs: genesisUTXOs, Validators: genesisValidators, Chains: nil, Time: json.Uint64(defaultGenesisTime.Unix()), InitialSupply: json.Uint64(360 * units.MegaAvax), - Encoding: formatting.Hex, - } - - if args != nil { - buildGenesisArgs = *args } buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse) - require.NoError(err) + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) require.NoError(err) @@ -315,15 +201,43 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu return &buildGenesisArgs, genesisBytes } -func defaultVM() (*VM, database.Database, *mutableSharedMemory) { - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) +func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemory) { + require := require.New(t) + var ( + apricotPhase3Time = mockable.MaxTime + apricotPhase5Time = mockable.MaxTime + banffTime = mockable.MaxTime + cortinaTime = mockable.MaxTime + durangoTime = mockable.MaxTime + ) + + // always reset latestForkTime (a package level variable) + // to ensure test independence + latestForkTime = defaultGenesisTime.Add(time.Second) + switch f { + case durango: + durangoTime = latestForkTime + fallthrough + case cortina: + cortinaTime = latestForkTime + fallthrough + case banff: + banffTime = latestForkTime + fallthrough + case apricotPhase5: + apricotPhase5Time = latestForkTime + fallthrough + case apricotPhase3: + apricotPhase3Time = latestForkTime + default: + require.NoError(fmt.Errorf("unhandled fork %d", f)) + } + vm := &VM{Config: config.Config{ Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - StakingEnabled: true, - Validators: vdrs, + SybilProtectionEnabled: true, + Validators: validators.NewManager(), TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, TransformSubnetTxFee: 100 * defaultTxFee, @@ -334,18 +248,20 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: banffForkTime, + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, + BanffTime: banffTime, + CortinaTime: cortinaTime, + DurangoTime: durangoTime, }} - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - chainDBManager := baseDBManager.NewPrefixDBManager([]byte{0}) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + db := memdb.New() + chainDB := prefixdb.New([]byte{0}, db) + atomicDB := prefixdb.New([]byte{1}, db) - vm.clock.Set(banffForkTime.Add(time.Second)) + vm.clock.Set(latestForkTime) msgChan := make(chan common.Message, 1) - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) m := atomic.NewMemory(atomicDB) msm := &mutableSharedMemory{ @@ -355,157 +271,69 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { ctx.Lock.Lock() defer ctx.Lock.Unlock() - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) appSender := &common.SenderTest{} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } - err := vm.Initialize( + dynamicConfigBytes := []byte(`{"network":{"max-validator-set-staleness":0}}`) + require.NoError(vm.Initialize( context.Background(), ctx, - chainDBManager, + chainDB, genesisBytes, nil, - nil, + dynamicConfigBytes, msgChan, nil, appSender, - ) - if err != nil { - panic(err) - } + )) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - panic(err) - } + // align chain time and local clock + vm.state.SetTimestamp(vm.clock.Time()) + + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead + var err error testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, []*secp256k1.PrivateKey{keys[0]}, // pays tx fee keys[0].PublicKey().Address(), // change addr - ) - if err != nil { - panic(err) - } else if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { - panic(err) - } else if blk, err := vm.Builder.BuildBlock(context.Background()); err != nil { - panic(err) - } else if err := blk.Verify(context.Background()); err != nil { - panic(err) - } else if err := blk.Accept(context.Background()); err != nil { - panic(err) - } else if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { - panic(err) - } - - return vm, baseDBManager.Current().Database, msm -} - -func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan common.Message, *VM, *atomic.Memory) { - require := require.New(t) - var genesisBytes []byte - - if args != nil { - _, genesisBytes = BuildGenesisTestWithArgs(t, args) - } else { - _, genesisBytes = BuildGenesisTest(t) - } - - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) - vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - Validators: vdrs, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - TxFee: defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, - }} - - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - chainDBManager := baseDBManager.NewPrefixDBManager([]byte{0}) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) - - vm.clock.Set(defaultGenesisTime) - msgChan := make(chan common.Message, 1) - ctx := defaultContext() - - m := atomic.NewMemory(atomicDB) - - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - ctx.Lock.Lock() - defer ctx.Lock.Unlock() - appSender := &common.SenderTest{T: t} - appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, []byte) error { - return nil - } - err := vm.Initialize( - context.Background(), - ctx, - chainDBManager, - genesisBytes, - nil, - nil, - msgChan, nil, - appSender, - ) - require.NoError(err) - - err = vm.SetState(context.Background(), snow.NormalOp) - require.NoError(err) - - // Create a subnet and store it in testSubnet1 - testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( - 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet - // control keys are keys[0], keys[1], keys[2] - []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, - []*secp256k1.PrivateKey{keys[0]}, // pays tx fee - keys[0].PublicKey().Address(), // change addr ) require.NoError(err) - - err = vm.Builder.AddUnverifiedTx(testSubnet1) - require.NoError(err) - + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), testSubnet1)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - err = blk.Verify(context.Background()) - require.NoError(err) + t.Cleanup(func() { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) + }) - return genesisBytes, msgChan, vm, m + return vm, db, msm } // Ensure genesis state is parsed from bytes and stored correctly func TestGenesis(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Ensure the genesis block has been accepted and stored genesisBlockID, err := vm.LastAccepted(context.Background()) // lastAccepted should be ID of genesis block @@ -515,7 +343,7 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(choices.Accepted, genesisBlock.Status()) - genesisState, _ := defaultGenesis() + genesisState, _ := defaultGenesis(t, vm.ctx.AVAXAssetID) // Ensure all the genesis UTXOs are there for _, utxo := range genesisState.UTXOs { _, addrBytes, err := address.ParseBech32(utxo.Address) @@ -524,8 +352,7 @@ func TestGenesis(t *testing.T) { addr, err := ids.ToShortID(addrBytes) require.NoError(err) - addrs := set.Set[ids.ShortID]{} - addrs.Add(addr) + addrs := set.Of(addr) utxos, err := avax.GetAllUTXOs(vm.state, addrs) require.NoError(err) require.Len(utxos, 1) @@ -533,25 +360,20 @@ func TestGenesis(t *testing.T) { out := utxos[0].Out.(*secp256k1fx.TransferOutput) if out.Amount() != uint64(utxo.Amount) { id := keys[0].PublicKey().Address() - hrp := constants.NetworkIDToHRP[testNetworkID] - addr, err := address.FormatBech32(hrp, id.Bytes()) + addr, err := address.FormatBech32(constants.UnitTestHRP, id.Bytes()) require.NoError(err) require.Equal(utxo.Address, addr) - require.Equal(uint64(utxo.Amount)-vm.TxFee, out.Amount()) + require.Equal(uint64(utxo.Amount)-vm.CreateSubnetTxFee, out.Amount()) } } // Ensure current validator set of primary network is correct - vdrSet, ok := vm.Validators.Get(constants.PrimaryNetworkID) - require.True(ok) + require.Len(genesisState.Validators, vm.Validators.Count(constants.PrimaryNetworkID)) - currentValidators := vdrSet.List() - require.Equal(len(currentValidators), len(genesisState.Validators)) - - for _, key := range keys { - nodeID := ids.NodeID(key.PublicKey().Address()) - require.True(vdrSet.Contains(nodeID)) + for _, nodeID := range genesisNodeIDs { + _, ok := vm.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) + require.True(ok) } // Ensure the new subnet we created exists @@ -562,33 +384,39 @@ func TestGenesis(t *testing.T) { // accept proposal to add validator to primary network func TestAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.GenerateTestNodeID() - rewardAddress := ids.GenerateTestShortID() + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = ids.GenerateTestNodeID() + rewardAddress = ids.GenerateTestShortID() + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) // create valid tx - tx, err := vm.txBuilder.NewAddValidatorTx( + tx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( vm.MinValidatorStake, uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, + signer.NewProofOfPossession(sk), rewardAddress, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -600,26 +428,21 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - // Verify that new validator now in pending validator set - _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID) + // Verify that new validator now in current validator set + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) } // verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + nodeID := ids.GenerateTestNodeID() startTime := defaultGenesisTime.Add(-txexecutor.SyncBound).Add(-1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) - key, _ := testKeyFactory.NewPrivateKey() - nodeID := ids.NodeID(key.PublicKey().Address()) // create invalid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -627,19 +450,20 @@ func TestInvalidAddValidatorCommit(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), nodeID, - ids.ShortID(nodeID), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewBanffStandardBlock( + + statelessBlk, err := block.NewBanffStandardBlock( preferred.Timestamp(), preferredID, preferredHeight+1, @@ -647,36 +471,32 @@ func TestInvalidAddValidatorCommit(t *testing.T) { ) require.NoError(err) - blk := vm.manager.NewBlock(statelessBlk) - require.NoError(err) - - blkBytes := blk.Bytes() + blkBytes := statelessBlk.Bytes() parsedBlock, err := vm.ParseBlock(context.Background(), blkBytes) require.NoError(err) err = parsedBlock.Verify(context.Background()) - require.Error(err) + require.ErrorIs(err, txexecutor.ErrTimestampNotBeforeStartTime) txID := statelessBlk.Txs()[0].ID() reason := vm.Builder.GetDropReason(txID) - require.Error(reason) + require.ErrorIs(reason, txexecutor.ErrTimestampNotBeforeStartTime) } // Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.GenerateTestNodeID() - rewardAddress := ids.GenerateTestShortID() + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = ids.GenerateTestNodeID() + rewardAddress = ids.GenerateTestShortID() + ) // create valid tx tx, err := vm.txBuilder.NewAddValidatorTx( @@ -688,11 +508,14 @@ func TestAddValidatorReject(t *testing.T) { reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -701,7 +524,7 @@ func TestAddValidatorReject(t *testing.T) { require.NoError(blk.Reject(context.Background())) _, _, err = vm.state.GetTx(tx.ID()) - require.Error(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -710,51 +533,53 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Use nodeID that is already in the genesis - repeatNodeID := ids.NodeID(keys[0].PublicKey().Address()) + repeatNodeID := genesisNodeIDs[0] - startTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) + sk, err := bls.NewSecretKey() + require.NoError(err) + // create valid tx - tx, err := vm.txBuilder.NewAddValidatorTx( + tx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( vm.MinValidatorStake, uint64(startTime.Unix()), uint64(endTime.Unix()), repeatNodeID, - ids.ShortID(repeatNodeID), + signer.NewProofOfPossession(sk), + ids.GenerateTestShortID(), reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // trigger block creation - err = vm.Builder.AddUnverifiedTx(tx) - require.Error(err, "should have erred due to adding a validator with a nodeID that is already in the validator set") + vm.ctx.Lock.Unlock() + err = vm.issueTx(context.Background(), tx) + vm.ctx.Lock.Lock() + require.ErrorIs(err, txexecutor.ErrDuplicateValidator) } // Accept proposal to add validator to subnet func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = genesisNodeIDs[0] + ) // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -767,11 +592,14 @@ func TestAddSubnetValidatorAccept(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -783,24 +611,23 @@ func TestAddSubnetValidatorAccept(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - // Verify that new validator is in pending validator set - _, err = vm.state.GetPendingValidator(testSubnet1.ID(), nodeID) + // Verify that new validator is in current validator set + _, err = vm.state.GetCurrentValidator(testSubnet1.ID(), nodeID) require.NoError(err) } // Reject proposal to add validator to subnet func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) - endTime := startTime.Add(defaultMinStakingDuration) - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + var ( + startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) + endTime = startTime.Add(defaultMinStakingDuration) + nodeID = genesisNodeIDs[0] + ) // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] @@ -813,11 +640,14 @@ func TestAddSubnetValidatorReject(t *testing.T) { testSubnet1.ID(), []*secp256k1.PrivateKey{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) // trigger block creation - require.NoError(vm.Builder.AddUnverifiedTx(tx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -826,187 +656,115 @@ func TestAddSubnetValidatorReject(t *testing.T) { require.NoError(blk.Reject(context.Background())) _, _, err = vm.state.GetTx(tx.ID()) - require.Error(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) - // Verify that new validator NOT in pending validator set - _, err = vm.state.GetPendingValidator(testSubnet1.ID(), nodeID) + // Verify that new validator NOT in validator set + _, err = vm.state.GetCurrentValidator(testSubnet1.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) } // Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time + // Advance time and create proposal to reward a genesis validator + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options(context.Background()) + options, err := blk.(smcon.OracleBlock).Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) + require.IsType(&block.BanffCommitBlock{}, commit.Block) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) + require.IsType(&block.BanffAbortBlock{}, abort.Block) - require.NoError(block.Accept(context.Background())) + // Assert block tries to reward a genesis validator + rewardTx := blk.(block.Block).Txs()[0].Unsigned + require.IsType(&txs.RewardValidatorTx{}, rewardTx) + + // Verify options and accept commmit block require.NoError(commit.Verify(context.Background())) require.NoError(abort.Verify(context.Background())) - - txID := blk.(blocks.Block).Txs()[0].ID() + txID := blk.(block.Block).Txs()[0].ID() { - onAccept, ok := vm.manager.GetState(abort.ID()) + onAbort, ok := vm.manager.GetState(abort.ID()) require.True(ok) - _, txStatus, err := onAccept.GetTx(txID) + _, txStatus, err := onAbort.GetTx(txID) require.NoError(err) require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept(context.Background())) // advance the timestamp - lastAcceptedID, err := vm.LastAccepted(context.Background()) - require.NoError(err) - require.NoError(vm.SetPreference(context.Background(), lastAcceptedID)) - - _, txStatus, err := vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Accept(context.Background())) // Verify that chain's timestamp has advanced timestamp := vm.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - blk, err = vm.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator + // Verify that rewarded validator has been removed. + // Note that test genesis has multiple validators + // terminating at the same time. The rewarded validator + // will the first by txID. To make the test more stable + // (txID changes every time we change any parameter + // of the tx creating the validator), we explicitly + // check that rewarded validator is removed from staker set. + _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) + require.Equal(status.Committed, txStatus) - require.NoError(blk.Verify(context.Background())) - - // Assert preferences are correct - block = blk.(smcon.OracleBlock) - options, err = block.Options(context.Background()) + tx, _, err := vm.state.GetTx(rewardTx.(*txs.RewardValidatorTx).TxID) require.NoError(err) + require.IsType(&txs.AddValidatorTx{}, tx.Unsigned) - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(block.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - - txID = blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept(context.Background())) // reward the genesis validator - - _, txStatus, err = vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) - - _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, ids.NodeID(keys[1].PublicKey().Address())) - require.ErrorIs(err, database.ErrNotFound) -} + valTx, _ := tx.Unsigned.(*txs.AddValidatorTx) + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, valTx.NodeID()) + require.ErrorIs(err, database.ErrNotFound) +} // Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time + // Advance time and create proposal to reward a genesis validator + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options(context.Background()) + oracleBlk := blk.(smcon.OracleBlock) + options, err := oracleBlk.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) + require.IsType(&block.BanffCommitBlock{}, commit.Block) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(block.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - - txID := blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept(context.Background())) // advance the timestamp - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - - _, txStatus, err := vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) - - timestamp := vm.state.GetTimestamp() - require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - - blk, err = vm.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator - require.NoError(err) - - require.NoError(blk.Verify(context.Background())) - - block = blk.(smcon.OracleBlock) - options, err = block.Options(context.Background()) - require.NoError(err) - - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) + require.IsType(&block.BanffAbortBlock{}, abort.Block) - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) + // Assert block tries to reward a genesis validator + rewardTx := oracleBlk.(block.Block).Txs()[0].Unsigned + require.IsType(&txs.RewardValidatorTx{}, rewardTx) - require.NoError(blk.Accept(context.Background())) + // Verify options and accept abort block require.NoError(commit.Verify(context.Background())) - - txID = blk.(blocks.Block).Txs()[0].ID() + require.NoError(abort.Verify(context.Background())) + txID := blk.(block.Block).Txs()[0].ID() { onAccept, ok := vm.manager.GetState(commit.ID()) require.True(ok) @@ -1016,137 +774,50 @@ func TestRewardValidatorReject(t *testing.T) { require.Equal(status.Committed, txStatus) } - require.NoError(abort.Verify(context.Background())) - require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator - - _, txStatus, err = vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - - _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, ids.NodeID(keys[1].PublicKey().Address())) - require.ErrorIs(err, database.ErrNotFound) -} - -// Test case where primary network validator is preferred to be rewarded -func TestRewardValidatorPreferred(t *testing.T) { - require := require.New(t) - vm, _, _ := defaultVM() - vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() - - // Fast forward clock to time for genesis validators to leave - vm.clock.Set(defaultValidateEndTime) - - blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time - require.NoError(err) - require.NoError(blk.Verify(context.Background())) - - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options(context.Background()) - require.NoError(err) - - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(block.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - - txID := blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept(context.Background())) // advance the timestamp - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - - _, txStatus, err := vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) + require.NoError(blk.Accept(context.Background())) + require.NoError(abort.Accept(context.Background())) + // Verify that chain's timestamp has advanced timestamp := vm.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - // should contain proposal to reward genesis validator - blk, err = vm.Builder.BuildBlock(context.Background()) - require.NoError(err) - - require.NoError(blk.Verify(context.Background())) - - block = blk.(smcon.OracleBlock) - options, err = block.Options(context.Background()) + // Verify that rewarded validator has been removed. + // Note that test genesis has multiple validators + // terminating at the same time. The rewarded validator + // will the first by txID. To make the test more stable + // (txID changes every time we change any parameter + // of the tx creating the validator), we explicitly + // check that rewarded validator is removed from staker set. + _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) + require.Equal(status.Aborted, txStatus) - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - - txID = blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(commit.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) - } - - require.NoError(abort.Verify(context.Background())) - require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator - - _, txStatus, err = vm.state.GetTx(txID) + tx, _, err := vm.state.GetTx(rewardTx.(*txs.RewardValidatorTx).TxID) require.NoError(err) - require.Equal(status.Aborted, txStatus) + require.IsType(&txs.AddValidatorTx{}, tx.Unsigned) - _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, ids.NodeID(keys[1].PublicKey().Address())) + valTx, _ := tx.Unsigned.(*txs.AddValidatorTx) + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, valTx.NodeID()) require.ErrorIs(err, database.ErrNotFound) } // Ensure BuildBlock errors when there is no block to build func TestUnneededBuildBlock(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + _, err := vm.Builder.BuildBlock(context.Background()) - require.Error(err) + require.ErrorIs(err, blockbuilder.ErrNoPendingBlocks) } // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx, err := vm.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -1156,20 +827,20 @@ func TestCreateChain(t *testing.T) { "name", []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) // should contain proposal to create chain - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1190,20 +861,15 @@ func TestCreateChain(t *testing.T) { // test where we: // 1) Create a subnet -// 2) Add a validator to the subnet's pending validator set -// 3) Advance timestamp to validator's start time (moving the validator from pending to current) -// 4) Advance timestamp to validator's end time (removing validator from current) +// 2) Add a validator to the subnet's current validator set +// 3) Advance timestamp to validator's end time (removing validator from current) func TestCreateSubnet(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() - - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + defer vm.ctx.Lock.Unlock() + nodeID := genesisNodeIDs[0] createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( 1, // threshold []ids.ShortID{ // control keys @@ -1212,12 +878,15 @@ func TestCreateSubnet(t *testing.T) { }, []*secp256k1.PrivateKey{keys[0]}, // payer keys[0].PublicKey().Address(), // change addr + nil, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() - // should contain proposal to create subnet + // should contain the CreateSubnetTx blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -1253,36 +922,26 @@ func TestCreateSubnet(t *testing.T) { createSubnetTx.ID(), []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) - require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() blk, err = vm.Builder.BuildBlock(context.Background()) // should add validator to the new subnet require.NoError(err) require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) // add the validator to pending validator set + require.NoError(blk.Accept(context.Background())) // add the validator to current validator set require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - txID := blk.(blocks.Block).Txs()[0].ID() + txID := blk.(block.Block).Txs()[0].ID() _, txStatus, err = vm.state.GetTx(txID) require.NoError(err) require.Equal(status.Committed, txStatus) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) - require.NoError(err) - - // Advance time to when new validator should start validating - // Create a block with an advance time tx that moves validator - // from pending to current validator set - vm.clock.Set(startTime) - blk, err = vm.Builder.BuildBlock(context.Background()) // should be advance time tx - require.NoError(err) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) // move validator addValidatorTx from pending to current - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -1306,13 +965,9 @@ func TestCreateSubnet(t *testing.T) { // test asset import func TestAtomicImport(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), @@ -1331,14 +986,15 @@ func TestAtomicImport(t *testing.T) { recipientKey.PublicKey().Address(), []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr + nil, ) - require.Error(err, "should have errored due to missing utxos") + require.ErrorIs(err, txbuilder.ErrNoFunds) // Provide the avm UTXO utxo := &avax.UTXO{ UTXOID: utxoID, - Asset: avax.Asset{ID: avaxAssetID}, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1347,11 +1003,11 @@ func TestAtomicImport(t *testing.T) { }, }, } - utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) + utxoBytes, err := txs.Codec.Marshal(txs.CodecVersion, utxo) require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ { @@ -1363,29 +1019,27 @@ func TestAtomicImport(t *testing.T) { }, }, }, - }, - ) - require.NoError(err) + })) tx, err := vm.txBuilder.NewImportTx( vm.ctx.XChainID, recipientKey.PublicKey().Address(), []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty, // change addr + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), tx)) + vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1399,13 +1053,9 @@ func TestAtomicImport(t *testing.T) { // test optimistic asset import func TestOptimisticAtomicImport(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, apricotPhase3) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1424,16 +1074,14 @@ func TestOptimisticAtomicImport(t *testing.T) { }, }}, }} - err := tx.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) require.NoError(err) - - preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewApricotAtomicBlock( + statelessBlk, err := block.NewApricotAtomicBlock( preferredID, preferredHeight+1, tx, @@ -1443,19 +1091,15 @@ func TestOptimisticAtomicImport(t *testing.T) { blk := vm.manager.NewBlock(statelessBlk) err = blk.Verify(context.Background()) - require.Error(err, "should have erred due to missing UTXOs") + require.ErrorIs(err, database.ErrNotFound) // erred due to missing shared memory UTXOs - err = vm.SetState(context.Background(), snow.Bootstrapping) - require.NoError(err) + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // skips shared memory UTXO verification during bootstrapping - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) - err = vm.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1466,39 +1110,36 @@ func TestOptimisticAtomicImport(t *testing.T) { // test restarting the node func TestRestartFullyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - db := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() - firstDB := db.NewPrefixDBManager([]byte{}) - firstVdrs := validators.NewManager() - firstPrimaryVdrs := validators.NewSet() - _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) + firstDB := prefixdb.New([]byte{}, db) firstVM := &VM{Config: config.Config{ Chains: chains.TestManager, - Validators: firstVdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - firstCtx := defaultContext() + firstCtx := snowtest.Context(t, snowtest.PChainID) + + _, genesisBytes := defaultGenesis(t, firstCtx.AVAXAssetID) - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + baseDB := memdb.New() + atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(firstCtx.ChainID), - } - firstCtx.SharedMemory = msm + firstCtx.SharedMemory = m.NewSharedMemory(firstCtx.ChainID) - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) firstVM.clock.Set(initialClkTime) firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) - err := firstVM.Initialize( + require.NoError(firstVM.Initialize( context.Background(), firstCtx, firstDB, @@ -1508,19 +1149,11 @@ func TestRestartFullyAccepted(t *testing.T) { firstMsgChan, nil, nil, - ) - require.NoError(err) + )) genesisID, err := firstVM.LastAccepted(context.Background()) require.NoError(err) - nextChainTime := initialClkTime.Add(time.Second) - firstVM.clock.Set(initialClkTime) - preferred, err := firstVM.Builder.Preferred() - require.NoError(err) - preferredID := preferred.ID() - preferredHeight := preferred.Height() - // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1541,7 +1174,15 @@ func TestRestartFullyAccepted(t *testing.T) { }} require.NoError(tx.Initialize(txs.Codec)) - statelessBlk, err := blocks.NewBanffStandardBlock( + nextChainTime := initialClkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) + + preferredID := firstVM.manager.Preferred() + preferred, err := firstVM.manager.GetBlock(preferredID) + require.NoError(err) + preferredHeight := preferred.Height() + + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, preferredHeight+1, @@ -1559,32 +1200,30 @@ func TestRestartFullyAccepted(t *testing.T) { require.NoError(firstVM.Shutdown(context.Background())) firstCtx.Lock.Unlock() - secondVdrs := validators.NewManager() - secondPrimaryVdrs := validators.NewSet() - _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) secondVM := &VM{Config: config.Config{ Chains: chains.TestManager, - Validators: secondVdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - secondCtx := defaultContext() - secondCtx.SharedMemory = msm + secondCtx := snowtest.Context(t, snowtest.PChainID) + secondCtx.SharedMemory = firstCtx.SharedMemory secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() defer func() { - err := secondVM.Shutdown(context.Background()) - require.NoError(err) + require.NoError(secondVM.Shutdown(context.Background())) secondCtx.Lock.Unlock() }() - secondDB := db.NewPrefixDBManager([]byte{}) + secondDB := prefixdb.New([]byte{}, db) secondMsgChan := make(chan common.Message, 1) - err = secondVM.Initialize( + require.NoError(secondVM.Initialize( context.Background(), secondCtx, secondDB, @@ -1594,8 +1233,7 @@ func TestRestartFullyAccepted(t *testing.T) { secondMsgChan, nil, nil, - ) - require.NoError(err) + )) lastAccepted, err := secondVM.LastAccepted(context.Background()) require.NoError(err) @@ -1606,59 +1244,49 @@ func TestRestartFullyAccepted(t *testing.T) { func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - vmDBManager := baseDBManager.NewPrefixDBManager([]byte("vm")) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDBManager.Current().Database) - + baseDB := memdb.New() + vmDB := prefixdb.New(chains.VMDBPrefix, baseDB) + bootstrappingDB := prefixdb.New(chains.ChainBootstrappingDBPrefix, baseDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) require.NoError(err) - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) vm := &VM{Config: config.Config{ Chains: chains.TestManager, - Validators: vdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) + + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) - atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + atomicDB := prefixdb.New([]byte{1}, baseDB) m := atomic.NewMemory(atomicDB) - msm := &mutableSharedMemory{ - SharedMemory: m.NewSharedMemory(ctx.ChainID), - } - ctx.SharedMemory = msm + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - consensusCtx := snow.DefaultConsensusContextTest() - consensusCtx.Context = ctx + consensusCtx := snowtest.ConsensusContext(ctx) ctx.Lock.Lock() msgChan := make(chan common.Message, 1) - err = vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, - vmDBManager, + vmDB, genesisBytes, nil, nil, msgChan, nil, nil, - ) - require.NoError(err) - - preferred, err := vm.Builder.Preferred() - require.NoError(err) + )) // include a tx to make the block be accepted tx := &txs.Tx{Unsigned: &txs.ImportTx{ @@ -1681,9 +1309,13 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(tx.Initialize(txs.Codec)) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewBanffStandardBlock( + + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, preferredHeight+1, @@ -1697,9 +1329,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { advanceTimeBlkID := advanceTimeBlk.ID() advanceTimeBlkBytes := advanceTimeBlk.Bytes() - peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} - beacons := validators.NewSet() - require.NoError(beacons.Add(peerID, nil, ids.Empty, 1)) + peerID := ids.BuildTestNodeID([]byte{1, 2, 3, 4, 5, 4, 3, 2, 1}) + beacons := validators.NewManager() + require.NoError(beacons.AddStaker(ctx.SubnetID, peerID, nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() timeoutManager, err := timeout.NewManager( @@ -1717,6 +1349,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.NoError(err) go timeoutManager.Dispatch() + defer timeoutManager.Stop() chainRouter := &router.ChainRouter{} @@ -1724,7 +1357,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { mc, err := message.NewCreator(logging.NoLog{}, metrics, "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second) require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, timeoutManager, @@ -1736,8 +1369,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) externalSender := &sender.ExternalSenderTest{TB: t} externalSender.Default(true) @@ -1760,19 +1392,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ) require.NoError(err) - var reqID uint32 - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { - inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) - require.NoError(err) - require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) - - requestID, ok := message.GetRequestID(inMsg.Message()) - require.True(ok) - - reqID = requestID - return nodeIDs - } - isBootstrapped := false bootstrapTracker := &common.BootstrapTrackerTest{ T: t, @@ -1785,32 +1404,33 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { } peers := tracker.NewPeers() - startup := tracker.NewStartup(peers, (beacons.Weight()+1)/2) - beacons.RegisterCallbackListener(startup) + totalWeight, err := beacons.TotalWeight(ctx.SubnetID) + require.NoError(err) + startup := tracker.NewStartup(peers, (totalWeight+1)/2) + beacons.RegisterCallbackListener(ctx.SubnetID, startup) // The engine handles consensus - consensus := &smcon.Topological{} - commonCfg := common.Config{ + snowGetHandler, err := snowgetter.New( + vm, + sender, + consensusCtx.Log, + time.Second, + 2000, + consensusCtx.Registerer, + ) + require.NoError(err) + + bootstrapConfig := bootstrap.Config{ + AllGetsServer: snowGetHandler, Ctx: consensusCtx, Beacons: beacons, - SampleK: beacons.Len(), + SampleK: beacons.Count(ctx.SubnetID), StartupTracker: startup, - Alpha: (beacons.Weight() + 1) / 2, Sender: sender, BootstrapTracker: bootstrapTracker, - AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } - - snowGetHandler, err := snowgetter.New(vm, commonCfg) - require.NoError(err) - - bootstrapConfig := bootstrap.Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocked, - VM: vm, + Blocked: blocked, + VM: vm, } // Asynchronously passes messages from the network to the consensus engine @@ -1831,6 +1451,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { cpuTracker, vm, subnets.New(ctx.NodeID, subnets.Config{}), + tracker.NewPeers(), ) require.NoError(err) @@ -1842,7 +1463,8 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { Validators: beacons, Params: snowball.Parameters{ K: 1, - Alpha: 1, + AlphaPreference: 1, + AlphaConfidence: 1, BetaVirtuous: 20, BetaRogue: 20, ConcurrentRepolls: 1, @@ -1850,13 +1472,12 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, }, - Consensus: consensus, + Consensus: &smcon.Topological{}, } engine, err := smeng.New(engineConfig) require.NoError(err) bootstrapper, err := bootstrap.New( - context.Background(), bootstrapConfig, engine.Start, ) @@ -1887,8 +1508,20 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { h.Start(context.Background(), false) ctx.Lock.Lock() - err = bootstrapper.Connected(context.Background(), peerID, version.CurrentApp) - require.NoError(err) + var reqID uint32 + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) + + requestID, ok := message.GetRequestID(inMsg.Message()) + require.True(ok) + + reqID = requestID + return nodeIDs + } + + require.NoError(bootstrapper.Connected(context.Background(), peerID, version.CurrentApp)) externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) @@ -1900,9 +1533,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return nodeIDs } - frontier := []ids.ID{advanceTimeBlkID} - err = bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, frontier) - require.NoError(err) + require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, advanceTimeBlkID)) externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) @@ -1918,17 +1549,40 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return nodeIDs } + frontier := set.Of(advanceTimeBlkID) require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) - externalSender.SendF = nil - externalSender.CantSend = false + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) + + requestID, ok := message.GetRequestID(inMsg.Message()) + require.True(ok) + + reqID = requestID + return nodeIDs + } require.NoError(bootstrapper.Ancestors(context.Background(), peerID, reqID, [][]byte{advanceTimeBlkBytes})) - preferred, err = vm.Builder.Preferred() - require.NoError(err) + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedOp, inMsgIntf.Op()) + inMsg := inMsgIntf.Message().(*p2p.GetAccepted) + + reqID = inMsg.RequestId + return nodeIDs + } - require.Equal(advanceTimeBlk.ID(), preferred.ID()) + require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, advanceTimeBlkID)) + + externalSender.SendF = nil + externalSender.CantSend = false + + require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) + require.Equal(advanceTimeBlk.ID(), vm.manager.Preferred()) ctx.Lock.Unlock() chainRouter.Shutdown(context.Background()) @@ -1936,44 +1590,42 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { func TestUnverifiedParent(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - dbManager := manager.NewMemDB(version.Semantic1_0_0) - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) vm := &VM{Config: config.Config{ Chains: chains.TestManager, - Validators: vdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + msgChan := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, - dbManager, + memdb.New(), genesisBytes, nil, nil, msgChan, nil, nil, - ) - require.NoError(err) + )) // include a tx1 to make the block be accepted tx1 := &txs.Tx{Unsigned: &txs.ImportTx{ @@ -1995,13 +1647,14 @@ func TestUnverifiedParent(t *testing.T) { }} require.NoError(tx1.Initialize(txs.Codec)) - preferred, err := vm.Builder.Preferred() - require.NoError(err) nextChainTime := initialClkTime.Add(time.Second) - preferredID := preferred.ID() + + preferredID := vm.manager.Preferred() + preferred, err := vm.manager.GetBlock(preferredID) + require.NoError(err) preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewBanffStandardBlock( + statelessBlk, err := block.NewBanffStandardBlock( nextChainTime, preferredID, preferredHeight+1, @@ -2009,10 +1662,9 @@ func TestUnverifiedParent(t *testing.T) { ) require.NoError(err) firstAdvanceTimeBlk := vm.manager.NewBlock(statelessBlk) - err = firstAdvanceTimeBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) - // include a tx1 to make the block be accepted + // include a tx2 to make the block be accepted tx2 := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: vm.ctx.NetworkID, @@ -2030,10 +1682,10 @@ func TestUnverifiedParent(t *testing.T) { }, }}, }} - require.NoError(tx1.Initialize(txs.Codec)) + require.NoError(tx2.Initialize(txs.Codec)) nextChainTime = nextChainTime.Add(time.Second) vm.clock.Set(nextChainTime) - statelessSecondAdvanceTimeBlk, err := blocks.NewBanffStandardBlock( + statelessSecondAdvanceTimeBlk, err := block.NewBanffStandardBlock( nextChainTime, firstAdvanceTimeBlk.ID(), firstAdvanceTimeBlk.Height()+1, @@ -2047,14 +1699,11 @@ func TestUnverifiedParent(t *testing.T) { } func TestMaxStakeAmount(t *testing.T) { - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(t, vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - nodeID := ids.NodeID(keys[0].PublicKey().Address()) + nodeID := genesisNodeIDs[0] tests := []struct { description string @@ -2091,34 +1740,36 @@ func TestMaxStakeAmount(t *testing.T) { amount, err := txexecutor.GetMaxWeight(vm.state, staker, test.startTime, test.endTime) require.NoError(err) - require.EqualValues(defaultWeight, amount) + require.Equal(defaultWeight, amount) }) } } func TestUptimeDisallowedWithRestart(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - db := manager.NewMemDB(version.Semantic1_0_0) + latestForkTime = defaultValidateStartTime.Add(defaultMinStakingDuration) + db := memdb.New() - firstDB := db.NewPrefixDBManager([]byte{}) - firstVdrs := validators.NewManager() - firstPrimaryVdrs := validators.NewSet() - _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) + firstDB := prefixdb.New([]byte{}, db) + const firstUptimePercentage = 20 // 20% firstVM := &VM{Config: config.Config{ Chains: chains.TestManager, - UptimePercentage: .2, + UptimePercentage: firstUptimePercentage / 100., RewardConfig: defaultRewardConfig, - Validators: firstVdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - firstCtx := defaultContext() + firstCtx := snowtest.Context(t, snowtest.PChainID) firstCtx.Lock.Lock() + _, genesisBytes := defaultGenesis(t, firstCtx.AVAXAssetID) + firstMsgChan := make(chan common.Message, 1) - err := firstVM.Initialize( + require.NoError(firstVM.Initialize( context.Background(), firstCtx, firstDB, @@ -2128,43 +1779,51 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstMsgChan, nil, nil, - ) - require.NoError(err) + )) - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) firstVM.clock.Set(initialClkTime) - firstVM.uptimeManager.(uptime.TestManager).SetTime(initialClkTime) + // Set VM state to NormalOp, to start tracking validators' uptime require.NoError(firstVM.SetState(context.Background(), snow.Bootstrapping)) require.NoError(firstVM.SetState(context.Background(), snow.NormalOp)) - // Fast forward clock to time for genesis validators to leave - firstVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) + // Fast forward clock so that validators meet 20% uptime required for reward + durationForReward := defaultValidateEndTime.Sub(defaultValidateStartTime) * firstUptimePercentage / 100 + vmStopTime := defaultValidateStartTime.Add(durationForReward) + firstVM.clock.Set(vmStopTime) + // Shutdown VM to stop all genesis validator uptime. + // At this point they have been validating for the 20% uptime needed to be rewarded require.NoError(firstVM.Shutdown(context.Background())) firstCtx.Lock.Unlock() - secondDB := db.NewPrefixDBManager([]byte{}) - secondVdrs := validators.NewManager() - secondPrimaryVdrs := validators.NewSet() - _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) + // Restart the VM with a larger uptime requirement + secondDB := prefixdb.New([]byte{}, db) + const secondUptimePercentage = 21 // 21% > firstUptimePercentage, so uptime for reward is not met now secondVM := &VM{Config: config.Config{ Chains: chains.TestManager, - UptimePercentage: .21, - Validators: secondVdrs, + UptimePercentage: secondUptimePercentage / 100., + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - secondCtx := defaultContext() + secondCtx := snowtest.Context(t, snowtest.PChainID) secondCtx.Lock.Lock() defer func() { require.NoError(secondVM.Shutdown(context.Background())) secondCtx.Lock.Unlock() }() + atomicDB := prefixdb.New([]byte{1}, db) + m := atomic.NewMemory(atomicDB) + secondCtx.SharedMemory = m.NewSharedMemory(secondCtx.ChainID) + secondMsgChan := make(chan common.Message, 1) - err = secondVM.Initialize( + require.NoError(secondVM.Initialize( context.Background(), secondCtx, secondDB, @@ -2174,131 +1833,95 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondMsgChan, nil, nil, - ) - require.NoError(err) + )) - secondVM.clock.Set(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) - secondVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) + secondVM.clock.Set(vmStopTime) + // Set VM state to NormalOp, to start tracking validators' uptime require.NoError(secondVM.SetState(context.Background(), snow.Bootstrapping)) require.NoError(secondVM.SetState(context.Background(), snow.NormalOp)) + // after restart and change of uptime required for reward, push validators to their end of life secondVM.clock.Set(defaultValidateEndTime) - secondVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) - blk, err := secondVM.Builder.BuildBlock(context.Background()) // should advance time + // evaluate a genesis validator for reward + blk, err := secondVM.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options(context.Background()) - require.NoError(err) - - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(block.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) - - proposalTx := blk.(blocks.Block).Txs()[0] - { - onAccept, ok := secondVM.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(proposalTx.ID()) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept(context.Background())) // advance the timestamp - require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) - - _, txStatus, err := secondVM.state.GetTx(proposalTx.ID()) + // Assert preferences are correct. + // secondVM should prefer abort since uptime requirements are not met anymore + oracleBlk := blk.(smcon.OracleBlock) + options, err := oracleBlk.Options(context.Background()) require.NoError(err) - require.Equal(status.Committed, txStatus) - // Verify that chain's timestamp has advanced - timestamp := secondVM.state.GetTimestamp() - require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - - blk, err = secondVM.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator - require.NoError(err) - - require.NoError(blk.Verify(context.Background())) + abort := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffAbortBlock{}, abort.Block) - block = blk.(smcon.OracleBlock) - options, err = block.Options(context.Background()) - require.NoError(err) - - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) + commit := options[1].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) + // Assert block tries to reward a genesis validator + rewardTx := oracleBlk.(block.Block).Txs()[0].Unsigned + require.IsType(&txs.RewardValidatorTx{}, rewardTx) + txID := blk.(block.Block).Txs()[0].ID() - require.NoError(blk.Accept(context.Background())) + // Verify options and accept abort block require.NoError(commit.Verify(context.Background())) - require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) - - proposalTx = blk.(blocks.Block).Txs()[0] - { - onAccept, ok := secondVM.manager.GetState(commit.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(proposalTx.ID()) - require.NoError(err) - require.Equal(status.Committed, txStatus) - } - require.NoError(abort.Verify(context.Background())) - require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator + require.NoError(blk.Accept(context.Background())) + require.NoError(abort.Accept(context.Background())) require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) - _, txStatus, err = secondVM.state.GetTx(proposalTx.ID()) + // Verify that rewarded validator has been removed. + // Note that test genesis has multiple validators + // terminating at the same time. The rewarded validator + // will the first by txID. To make the test more stable + // (txID changes every time we change any parameter + // of the tx creating the validator), we explicitly + // check that rewarded validator is removed from staker set. + _, txStatus, err := secondVM.state.GetTx(txID) require.NoError(err) require.Equal(status.Aborted, txStatus) - _, err = secondVM.state.GetCurrentValidator( - constants.PrimaryNetworkID, - ids.NodeID(keys[1].PublicKey().Address()), - ) + tx, _, err := secondVM.state.GetTx(rewardTx.(*txs.RewardValidatorTx).TxID) + require.NoError(err) + require.IsType(&txs.AddValidatorTx{}, tx.Unsigned) + + valTx, _ := tx.Unsigned.(*txs.AddValidatorTx) + _, err = secondVM.state.GetCurrentValidator(constants.PrimaryNetworkID, valTx.NodeID()) require.ErrorIs(err, database.ErrNotFound) } func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() - db := manager.NewMemDB(version.Semantic1_0_0) + latestForkTime = defaultValidateStartTime.Add(defaultMinStakingDuration) + + db := memdb.New() - vdrs := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) vm := &VM{Config: config.Config{ Chains: chains.TestManager, UptimePercentage: .2, RewardConfig: defaultRewardConfig, - Validators: vdrs, + Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: banffForkTime, + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, }} - ctx := defaultContext() + ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() + _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) + + atomicDB := prefixdb.New([]byte{1}, db) + m := atomic.NewMemory(atomicDB) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + msgChan := make(chan common.Message, 1) appSender := &common.SenderTest{T: t} - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, db, @@ -2308,556 +1931,107 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { msgChan, nil, appSender, - ) - require.NoError(err) + )) defer func() { require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() - initialClkTime := banffForkTime.Add(time.Second) + initialClkTime := latestForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - vm.uptimeManager.(uptime.TestManager).SetTime(initialClkTime) + // Set VM state to NormalOp, to start tracking validators' uptime require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - vm.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time + // evaluate a genesis validator for reward + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) - // first the time will be advanced. - block := blk.(smcon.OracleBlock) - options, err := block.Options(context.Background()) + // Assert preferences are correct. + // vm should prefer abort since uptime requirements are not met. + oracleBlk := blk.(smcon.OracleBlock) + options, err := oracleBlk.Options(context.Background()) require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) + abort := options[0].(*blockexecutor.Block) + require.IsType(&block.BanffAbortBlock{}, abort.Block) - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) + commit := options[1].(*blockexecutor.Block) + require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(block.Accept(context.Background())) + // Assert block tries to reward a genesis validator + rewardTx := oracleBlk.(block.Block).Txs()[0].Unsigned + require.IsType(&txs.RewardValidatorTx{}, rewardTx) + txID := blk.(block.Block).Txs()[0].ID() + + // Verify options and accept abort block require.NoError(commit.Verify(context.Background())) require.NoError(abort.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) // advance the timestamp + require.NoError(blk.Accept(context.Background())) + require.NoError(abort.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - // Verify that chain's timestamp has advanced - timestamp := vm.state.GetTimestamp() - require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - - // should contain proposal to reward genesis validator - blk, err = vm.Builder.BuildBlock(context.Background()) + // Verify that rewarded validator has been removed. + // Note that test genesis has multiple validators + // terminating at the same time. The rewarded validator + // will the first by txID. To make the test more stable + // (txID changes every time we change any parameter + // of the tx creating the validator), we explicitly + // check that rewarded validator is removed from staker set. + _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) + require.Equal(status.Aborted, txStatus) - require.NoError(blk.Verify(context.Background())) - - block = blk.(smcon.OracleBlock) - options, err = block.Options(context.Background()) + tx, _, err := vm.state.GetTx(rewardTx.(*txs.RewardValidatorTx).TxID) require.NoError(err) + require.IsType(&txs.AddValidatorTx{}, tx.Unsigned) - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.BanffCommitBlock) - require.True(ok) - - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.BanffAbortBlock) - require.True(ok) - - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - - _, err = vm.state.GetCurrentValidator( - constants.PrimaryNetworkID, - ids.NodeID(keys[1].PublicKey().Address()), - ) + valTx, _ := tx.Unsigned.(*txs.AddValidatorTx) + _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, valTx.NodeID()) require.ErrorIs(err, database.ErrNotFound) } -func TestVM_GetValidatorSet(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Setup VM - _, genesisBytes := defaultGenesis() - db := manager.NewMemDB(version.Semantic1_0_0) - - vdrManager := validators.NewManager() - primaryVdrs := validators.NewSet() - _ = vdrManager.Add(constants.PrimaryNetworkID, primaryVdrs) - - vm := &VM{Config: config.Config{ - Chains: chains.TestManager, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: vdrManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: mockable.MaxTime, - }} - - ctx := defaultContext() - ctx.Lock.Lock() - - msgChan := make(chan common.Message, 1) - appSender := &common.SenderTest{T: t} - err := vm.Initialize(context.Background(), ctx, db, genesisBytes, nil, nil, msgChan, nil, appSender) - require.NoError(t, err) - defer func() { - require.NoError(t, vm.Shutdown(context.Background())) - ctx.Lock.Unlock() - }() - - vm.clock.Set(defaultGenesisTime) - vm.uptimeManager.(uptime.TestManager).SetTime(defaultGenesisTime) - - require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) - - var ( - oldVdrs = vm.Validators - oldState = vm.state - numVdrs = 4 - vdrBaseWeight = uint64(1_000) - vdrs []*validators.Validator - ) - // Populate the validator set to use below - for i := 0; i < numVdrs; i++ { - sk, err := bls.NewSecretKey() - require.NoError(t, err) - - vdrs = append(vdrs, &validators.Validator{ - NodeID: ids.GenerateTestNodeID(), - PublicKey: bls.PublicFromSecretKey(sk), - Weight: vdrBaseWeight + uint64(i), - }) - } - - type test struct { - name string - // Height we're getting the diff at - height uint64 - lastAcceptedHeight uint64 - subnetID ids.ID - // Validator sets at tip - currentPrimaryNetworkValidators []*validators.Validator - currentSubnetValidators []*validators.Validator - // Diff at tip, block before tip, etc. - // This must have [height] - [lastAcceptedHeight] elements - weightDiffs []map[ids.NodeID]*state.ValidatorWeightDiff - // Diff at tip, block before tip, etc. - // This must have [height] - [lastAcceptedHeight] elements - pkDiffs []map[ids.NodeID]*bls.PublicKey - expectedVdrSet map[ids.NodeID]*validators.GetValidatorOutput - expectedErr error - } - - tests := []test{ - { - name: "after tip", - height: 1, - lastAcceptedHeight: 0, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{}, - expectedErr: database.ErrNotFound, - }, - { - name: "at tip", - height: 1, - lastAcceptedHeight: 1, - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - }, - currentSubnetValidators: []*validators.Validator{ - copySubnetValidator(vdrs[0]), - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight, - }, - }, - expectedErr: nil, - }, - { - name: "1 before tip", - height: 2, - lastAcceptedHeight: 3, - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - copyPrimaryValidator(vdrs[1]), - }, - currentSubnetValidators: []*validators.Validator{ - // At tip we have these 2 validators - copySubnetValidator(vdrs[0]), - copySubnetValidator(vdrs[1]), - }, - weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ - { - // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, - // and vdrs[2] left - vdrs[0].NodeID: { - Decrease: true, - Amount: 1, - }, - vdrs[1].NodeID: { - Decrease: false, - Amount: 1, - }, - vdrs[2].NodeID: { - Decrease: true, - Amount: vdrs[2].Weight, - }, - }, - }, - pkDiffs: []map[ids.NodeID]*bls.PublicKey{ - { - vdrs[2].NodeID: vdrs[2].PublicKey, - }, - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight + 1, - }, - vdrs[1].NodeID: { - NodeID: vdrs[1].NodeID, - PublicKey: vdrs[1].PublicKey, - Weight: vdrs[1].Weight - 1, - }, - vdrs[2].NodeID: { - NodeID: vdrs[2].NodeID, - PublicKey: vdrs[2].PublicKey, - Weight: vdrs[2].Weight, - }, - }, - expectedErr: nil, - }, - { - name: "2 before tip", - height: 3, - lastAcceptedHeight: 5, - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - copyPrimaryValidator(vdrs[1]), - }, - currentSubnetValidators: []*validators.Validator{ - // At tip we have these 2 validators - copySubnetValidator(vdrs[0]), - copySubnetValidator(vdrs[1]), - }, - weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ - { - // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, - // and vdrs[2] left - vdrs[0].NodeID: { - Decrease: true, - Amount: 1, - }, - vdrs[1].NodeID: { - Decrease: false, - Amount: 1, - }, - vdrs[2].NodeID: { - Decrease: true, - Amount: vdrs[2].Weight, - }, - }, - { - // At the block before tip vdrs[0] lost weight, vdrs[1] gained weight, - // vdrs[2] joined - vdrs[0].NodeID: { - Decrease: true, - Amount: 1, - }, - vdrs[1].NodeID: { - Decrease: false, - Amount: 1, - }, - vdrs[2].NodeID: { - Decrease: false, - Amount: vdrs[2].Weight, - }, - }, - }, - pkDiffs: []map[ids.NodeID]*bls.PublicKey{ - { - vdrs[2].NodeID: vdrs[2].PublicKey, - }, - {}, - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight + 2, - }, - vdrs[1].NodeID: { - NodeID: vdrs[1].NodeID, - PublicKey: vdrs[1].PublicKey, - Weight: vdrs[1].Weight - 2, - }, - }, - expectedErr: nil, - }, - { - name: "1 before tip; nil public key", - height: 4, - lastAcceptedHeight: 5, - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - copyPrimaryValidator(vdrs[1]), - }, - currentSubnetValidators: []*validators.Validator{ - // At tip we have these 2 validators - copySubnetValidator(vdrs[0]), - copySubnetValidator(vdrs[1]), - }, - weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ - { - // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, - // and vdrs[2] left - vdrs[0].NodeID: { - Decrease: true, - Amount: 1, - }, - vdrs[1].NodeID: { - Decrease: false, - Amount: 1, - }, - vdrs[2].NodeID: { - Decrease: true, - Amount: vdrs[2].Weight, - }, - }, - }, - pkDiffs: []map[ids.NodeID]*bls.PublicKey{ - {}, - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight + 1, - }, - vdrs[1].NodeID: { - NodeID: vdrs[1].NodeID, - PublicKey: vdrs[1].PublicKey, - Weight: vdrs[1].Weight - 1, - }, - vdrs[2].NodeID: { - NodeID: vdrs[2].NodeID, - Weight: vdrs[2].Weight, - }, - }, - expectedErr: nil, - }, - { - name: "1 before tip; subnet", - height: 5, - lastAcceptedHeight: 6, - subnetID: ids.GenerateTestID(), - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - copyPrimaryValidator(vdrs[1]), - copyPrimaryValidator(vdrs[3]), - }, - currentSubnetValidators: []*validators.Validator{ - // At tip we have these 2 validators - copySubnetValidator(vdrs[0]), - copySubnetValidator(vdrs[1]), - }, - weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ - { - // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, - // and vdrs[2] left - vdrs[0].NodeID: { - Decrease: true, - Amount: 1, - }, - vdrs[1].NodeID: { - Decrease: false, - Amount: 1, - }, - vdrs[2].NodeID: { - Decrease: true, - Amount: vdrs[2].Weight, - }, - }, - }, - pkDiffs: []map[ids.NodeID]*bls.PublicKey{ - {}, - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight + 1, - }, - vdrs[1].NodeID: { - NodeID: vdrs[1].NodeID, - PublicKey: vdrs[1].PublicKey, - Weight: vdrs[1].Weight - 1, - }, - vdrs[2].NodeID: { - NodeID: vdrs[2].NodeID, - Weight: vdrs[2].Weight, - }, - }, - expectedErr: nil, - }, - { - name: "unrelated primary network key removal on subnet lookup", - height: 4, - lastAcceptedHeight: 5, - subnetID: ids.GenerateTestID(), - currentPrimaryNetworkValidators: []*validators.Validator{ - copyPrimaryValidator(vdrs[0]), - }, - currentSubnetValidators: []*validators.Validator{ - copySubnetValidator(vdrs[0]), - }, - weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ - {}, - }, - pkDiffs: []map[ids.NodeID]*bls.PublicKey{ - { - vdrs[1].NodeID: vdrs[1].PublicKey, - }, - }, - expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ - vdrs[0].NodeID: { - NodeID: vdrs[0].NodeID, - PublicKey: vdrs[0].PublicKey, - Weight: vdrs[0].Weight, - }, - }, - expectedErr: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - // Mock the VM's validators - vdrs := validators.NewMockManager(ctrl) - vm.Validators = vdrs - mockSubnetVdrSet := validators.NewMockSet(ctrl) - mockSubnetVdrSet.EXPECT().List().Return(tt.currentSubnetValidators).AnyTimes() - vdrs.EXPECT().Get(tt.subnetID).Return(mockSubnetVdrSet, true).AnyTimes() - - mockPrimaryVdrSet := mockSubnetVdrSet - if tt.subnetID != constants.PrimaryNetworkID { - mockPrimaryVdrSet = validators.NewMockSet(ctrl) - vdrs.EXPECT().Get(constants.PrimaryNetworkID).Return(mockPrimaryVdrSet, true).AnyTimes() - } - for _, vdr := range tt.currentPrimaryNetworkValidators { - mockPrimaryVdrSet.EXPECT().Get(vdr.NodeID).Return(vdr, true).AnyTimes() - } - - // Mock the block manager - mockManager := blockexecutor.NewMockManager(ctrl) - vm.manager = mockManager - - // Mock the VM's state - mockState := state.NewMockState(ctrl) - vm.state = mockState - - // Tell state what diffs to report - for _, weightDiff := range tt.weightDiffs { - mockState.EXPECT().GetValidatorWeightDiffs(gomock.Any(), gomock.Any()).Return(weightDiff, nil) - } - - for _, pkDiff := range tt.pkDiffs { - mockState.EXPECT().GetValidatorPublicKeyDiffs(gomock.Any()).Return(pkDiff, nil) - } - - // Tell state last accepted block to report - mockTip := smcon.NewMockBlock(ctrl) - mockTip.EXPECT().Height().Return(tt.lastAcceptedHeight) - mockTipID := ids.GenerateTestID() - mockState.EXPECT().GetLastAccepted().Return(mockTipID) - mockManager.EXPECT().GetBlock(mockTipID).Return(mockTip, nil) - - // Compute validator set at previous height - gotVdrSet, err := vm.GetValidatorSet(context.Background(), tt.height, tt.subnetID) - require.ErrorIs(err, tt.expectedErr) - if tt.expectedErr != nil { - return - } - require.Equal(len(tt.expectedVdrSet), len(gotVdrSet)) - for nodeID, vdr := range tt.expectedVdrSet { - otherVdr, ok := gotVdrSet[nodeID] - require.True(ok) - require.Equal(vdr, otherVdr) - } - }) - } - - // Put these back so we don't need to mock calls made on Shutdown - vm.Validators = oldVdrs - vm.state = oldState -} - -func copyPrimaryValidator(vdr *validators.Validator) *validators.Validator { - newVdr := *vdr - return &newVdr -} - -func copySubnetValidator(vdr *validators.Validator) *validators.Validator { - newVdr := *vdr - newVdr.PublicKey = nil - return &newVdr -} - func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require := require.New(t) - validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() - + vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() - key, err := testKeyFactory.NewPrivateKey() + key, err := secp256k1.NewPrivateKey() require.NoError(err) id := key.PublicKey().Address() + nodeID := ids.GenerateTestNodeID() + sk, err := bls.NewSecretKey() + require.NoError(err) - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( + addValidatorTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, + signer.NewProofOfPossession(sk), id, reward.PercentDenominator, []*secp256k1.PrivateKey{keys[0]}, keys[0].Address(), + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2871,11 +2045,13 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { []ids.ShortID{id}, []*secp256k1.PrivateKey{keys[0]}, keys[0].Address(), + nil, ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2888,22 +2064,24 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { defaultMaxValidatorStake, uint64(validatorStartTime.Unix()), uint64(validatorEndTime.Unix()), - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[1]}, keys[1].Address(), + nil, ) require.NoError(err) removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( - ids.NodeID(id), + nodeID, createSubnetTx.ID(), []*secp256k1.PrivateKey{key, keys[2]}, keys[2].Address(), + nil, ) require.NoError(err) - statelessBlock, err := blocks.NewBanffStandardBlock( + statelessBlock, err := block.NewBanffStandardBlock( vm.state.GetTimestamp(), createSubnetBlock.ID(), createSubnetBlock.Height()+1, @@ -2921,6 +2099,241 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(block.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), ids.NodeID(id)) + _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) } + +func TestTransferSubnetOwnershipTx(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, latestFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // Create a subnet + createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( + 1, + []ids.ShortID{keys[0].PublicKey().Address()}, + []*secp256k1.PrivateKey{keys[0]}, + keys[0].Address(), + nil, + ) + require.NoError(err) + subnetID := createSubnetTx.ID() + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + vm.ctx.Lock.Lock() + createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + createSubnetRawBlock := createSubnetBlock.(*blockexecutor.Block).Block + require.IsType(&block.BanffStandardBlock{}, createSubnetRawBlock) + require.Contains(createSubnetRawBlock.Txs(), createSubnetTx) + + require.NoError(createSubnetBlock.Verify(context.Background())) + require.NoError(createSubnetBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + subnetOwner, err := vm.state.GetSubnetOwner(subnetID) + require.NoError(err) + expectedOwner := &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].PublicKey().Address(), + }, + } + require.Equal(expectedOwner, subnetOwner) + + transferSubnetOwnershipTx, err := vm.txBuilder.NewTransferSubnetOwnershipTx( + subnetID, + 1, + []ids.ShortID{keys[1].PublicKey().Address()}, + []*secp256k1.PrivateKey{keys[0]}, + ids.ShortEmpty, // change addr + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), transferSubnetOwnershipTx)) + vm.ctx.Lock.Lock() + transferSubnetOwnershipBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + transferSubnetOwnershipRawBlock := transferSubnetOwnershipBlock.(*blockexecutor.Block).Block + require.IsType(&block.BanffStandardBlock{}, transferSubnetOwnershipRawBlock) + require.Contains(transferSubnetOwnershipRawBlock.Txs(), transferSubnetOwnershipTx) + + require.NoError(transferSubnetOwnershipBlock.Verify(context.Background())) + require.NoError(transferSubnetOwnershipBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + subnetOwner, err = vm.state.GetSubnetOwner(subnetID) + require.NoError(err) + expectedOwner = &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].PublicKey().Address(), + }, + } + require.Equal(expectedOwner, subnetOwner) +} + +func TestBaseTx(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, latestFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + sendAmt := uint64(100000) + changeAddr := ids.ShortEmpty + + baseTx, err := vm.txBuilder.NewBaseTx( + sendAmt, + secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + []*secp256k1.PrivateKey{keys[0]}, + changeAddr, + nil, + ) + require.NoError(err) + + totalInputAmt := uint64(0) + key0InputAmt := uint64(0) + for inputID := range baseTx.Unsigned.InputIDs() { + utxo, err := vm.state.GetUTXO(inputID) + require.NoError(err) + require.IsType(&secp256k1fx.TransferOutput{}, utxo.Out) + castOut := utxo.Out.(*secp256k1fx.TransferOutput) + if castOut.AddressesSet().Equals(set.Of(keys[0].Address())) { + key0InputAmt += castOut.Amt + } + totalInputAmt += castOut.Amt + } + require.Equal(totalInputAmt, key0InputAmt) + + totalOutputAmt := uint64(0) + key0OutputAmt := uint64(0) + key1OutputAmt := uint64(0) + changeAddrOutputAmt := uint64(0) + for _, output := range baseTx.Unsigned.Outputs() { + require.IsType(&secp256k1fx.TransferOutput{}, output.Out) + castOut := output.Out.(*secp256k1fx.TransferOutput) + if castOut.AddressesSet().Equals(set.Of(keys[0].Address())) { + key0OutputAmt += castOut.Amt + } + if castOut.AddressesSet().Equals(set.Of(keys[1].Address())) { + key1OutputAmt += castOut.Amt + } + if castOut.AddressesSet().Equals(set.Of(changeAddr)) { + changeAddrOutputAmt += castOut.Amt + } + totalOutputAmt += castOut.Amt + } + require.Equal(totalOutputAmt, key0OutputAmt+key1OutputAmt+changeAddrOutputAmt) + + require.Equal(vm.TxFee, totalInputAmt-totalOutputAmt) + require.Equal(sendAmt, key1OutputAmt) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), baseTx)) + vm.ctx.Lock.Lock() + baseTxBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + baseTxRawBlock := baseTxBlock.(*blockexecutor.Block).Block + require.IsType(&block.BanffStandardBlock{}, baseTxRawBlock) + require.Contains(baseTxRawBlock.Txs(), baseTx) + + require.NoError(baseTxBlock.Verify(context.Background())) + require.NoError(baseTxBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) +} + +func TestPruneMempool(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, latestFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + // Create a tx that will be valid regardless of timestamp. + sendAmt := uint64(100000) + changeAddr := ids.ShortEmpty + + baseTx, err := vm.txBuilder.NewBaseTx( + sendAmt, + secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + []*secp256k1.PrivateKey{keys[0]}, + changeAddr, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), baseTx)) + vm.ctx.Lock.Lock() + + // [baseTx] should be in the mempool. + baseTxID := baseTx.ID() + _, ok := vm.Builder.Get(baseTxID) + require.True(ok) + + // Create a tx that will be invalid after time advancement. + var ( + startTime = vm.clock.Time() + endTime = startTime.Add(vm.MinStakeDuration) + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + addValidatorTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( + defaultMinValidatorStake, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + ids.GenerateTestNodeID(), + signer.NewProofOfPossession(sk), + keys[2].Address(), + 20000, + []*secp256k1.PrivateKey{keys[1]}, + ids.ShortEmpty, + nil, + ) + require.NoError(err) + + vm.ctx.Lock.Unlock() + require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + vm.ctx.Lock.Lock() + + // Advance clock to [endTime], making [addValidatorTx] invalid. + vm.clock.Set(endTime) + + // [addValidatorTx] and [baseTx] should still be in the mempool. + addValidatorTxID := addValidatorTx.ID() + _, ok = vm.Builder.Get(addValidatorTxID) + require.True(ok) + _, ok = vm.Builder.Get(baseTxID) + require.True(ok) + + vm.ctx.Lock.Unlock() + require.NoError(vm.pruneMempool()) + vm.ctx.Lock.Lock() + + // [addValidatorTx] should be ejected from the mempool. + // [baseTx] should still be in the mempool. + _, ok = vm.Builder.Get(addValidatorTxID) + require.False(ok) + _, ok = vm.Builder.Get(baseTxID) + require.True(ok) +} diff --git a/avalanchego/vms/platformvm/warp/README.md b/avalanchego/vms/platformvm/warp/README.md new file mode 100644 index 00000000..a21861bc --- /dev/null +++ b/avalanchego/vms/platformvm/warp/README.md @@ -0,0 +1,121 @@ +# Avalanche Warp Messaging + +Avalanche Warp Messaging (AWM) provides a primitive for cross-subnet communication on the Avalanche Network. + +The Avalanche P-Chain provides an index of every Subnet's validator set on the Avalanche Network, including the BLS public key of each validator (as of the [Banff Upgrade](https://github.com/ava-labs/avalanchego/releases/v1.9.0)). AWM utilizes the weighted validator sets stored on the P-Chain to build a cross-subnet communication protocol between any two Subnets on the Avalanche Network. + +Any Virtual Machine (VM) on Avalanche can integrate Avalanche Warp Messaging to send and receive messages across Avalanche Subnets. + +## Background + +This README assumes familiarity with: + +- Avalanche P-Chain / [PlatformVM](../) +- [ProposerVM](../../proposervm/README.md) +- Basic familiarity with [BLS Multi-Signatures](https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html) + +## BLS Multi-Signatures with Public-Key Aggregation + +Avalanche Warp Messaging utilizes BLS multi-signatures with public key aggregation in order to verify messages signed by another Subnet. When a validator joins a Subnet, the P-Chain records the validator's BLS public key and NodeID, as well as a proof of possession of the validator's BLS private key to defend against [rogue public-key attacks](https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html#mjx-eqn-eqaggsame). + +AWM utilizes the validator set's weights and public keys to verify that an aggregate signature has sufficient weight signing the message from the source Subnet. + +BLS provides a way to aggregate signatures off chain into a single signature that can be efficiently verified on chain. + +## AWM Serialization + +Unsigned Message: +``` ++-----------------+----------+--------------------------+ +| network_id : uint32 | 4 bytes | ++-----------------+----------+--------------------------+ +| source_chain_id : [32]byte | 32 bytes | ++-----------------+----------+--------------------------+ +| payload : []byte | 4 + size(payload) | ++-----------------+----------+--------------------------+ + | 40 + size(payload) bytes| + +--------------------------+ +``` + +- `networkID` is the unique ID of an Avalanche Network (Mainnet/Testnet) and provides replay protection for BLS Signers across different Avalanche Networks +- `sourceChainID` is the hash of the transaction that created the blockchain on the Avalanche P-Chain. It serves as the unique identifier for the blockchain across the Avalanche Network so that each blockchain can only sign a message with its own id. +- `payload` provides an arbitrary byte array containing the contents of the message. VMs define their own message types to include in the `payload` + + +BitSetSignature: +``` ++-----------+----------+---------------------------+ +| type_id : uint32 | 4 bytes | ++-----------+----------+---------------------------+ +| signers : []byte | 4 + len(signers) | ++-----------+----------+---------------------------+ +| signature : [96]byte | 96 bytes | ++-----------+----------+---------------------------+ + | 104 + size(signers) bytes | + +---------------------------+ +``` + +- `typeID` is the ID of this signature type, which is `0x00000000` +- `signers` encodes a bitset of which validators' signatures are included (a bitset is a byte array where each bit indicates membership of the element at that index in the set) +- `signature` is an aggregated BLS Multi-Signature of the Unsigned Message + +BitSetSignatures are verified within the context of a specific P-Chain height. At any given P-Chain height, the PlatformVM serves a canonically ordered validator set for the source subnet (validator set is ordered lexicographically by the BLS public key's byte representation). The `signers` bitset encodes which validator signatures were included. A value of `1` at index `i` in `signers` bitset indicates that a corresponding signature from the same validator at index `i` in the canonical validator set was included in the aggregate signature. + +The bitset tells the verifier which BLS public keys should be aggregated to verify the warp message. + +Signed Message: +``` ++------------------+------------------+-------------------------------------------------+ +| unsigned_message : UnsignedMessage | size(unsigned_message) | ++------------------+------------------+-------------------------------------------------+ +| signature : Signature | size(signature) | ++------------------+------------------+-------------------------------------------------+ + | size(unsigned_message) + size(signature) bytes | + +-------------------------------------------------+ +``` + +## Sending an Avalanche Warp Message + +A blockchain on Avalanche sends an Avalanche Warp Message by coming to agreement on the message that every validator should be willing to sign. As an example, the VM of a blockchain may define that once a block is accepted, the VM should be willing to sign a message including the block hash in the payload to attest to any other Subnet that the block was accepted. The contents of the payload, how to aggregate the signature (VM-to-VM communication, off-chain relayer, etc.), is left to the VM. + +Once the validator set of a blockchain is willing to sign an arbitrary message `M`, an aggregator performs the following process: + +1. Gather signatures of the message `M` from `N` validators (where the `N` validators meet the required threshold of stake on the destination chain) +2. Aggregate the `N` signatures into a multi-signature +3. Look up the canonical validator set at the P-Chain height where the message will be verified +4. Encode the selection of the `N` validators included in the signature in a bitset +5. Construct the signed message from the aggregate signature, bitset, and original unsigned message + +## Verifying / Receiving an Avalanche Warp Message + +Avalanache Warp Messages are verified within the context of a specific P-Chain height included in the [ProposerVM](../../proposervm/README.md)'s header. The P-Chain height is provided as context to the underlying VM when verifying the underlying VM's blocks (implemented by the optional interface [WithVerifyContext](../../../snow/engine/snowman/block/block_context_vm.go)). + +To verify the message, the underlying VM utilizes this `warp` package to perform the following steps: + +1. Lookup the canonical validator set of the Subnet sending the message at the P-Chain height +2. Filter the canonical validator set to only the validators claimed by the signature +3. Verify the weight of the included validators meets the required threshold defined by the receiving VM +4. Aggregate the public keys of the claimed validators into a single aggregate public key +5. Verify the aggregate signature of the unsigned message against the aggregate public key + +Once a message is verified, it is left to the VM to define the semantics of delivering a verified message. + +## Design Considerations + +### Processing Historical Avalanche Warp Messages + +Verifying an Avalanche Warp Message requires a lookup of validator sets at a specific P-Chain height. The P-Chain serves lookups maintaining validator set diffs that can be applied in-order to reconstruct the validator set of any Subnet at any height. + +As the P-Chain grows, the number of validator set diffs that needs to be applied in order to reconstruct the validator set needed to verify an Avalanche Warp Messages increases over time. + +Therefore, in order to support verifying historical Avalanche Warp Messages, VMs should provide a mechanism to determine whether an Avalanche Warp Message was treated as valid or invalid within a historical block. + +When nodes bootstrap in the future, they bootstrap blocks that have already been marked as accepted by the network, so they can assume the block was verified by the validators of the network when it was first accepted. + +Therefore, the new bootstrapping node can assume the block was valid to determine whether an Avalanche Warp Message should be treated as valid/invalid within the execution of that block. + +Two strategies to provide that mechanism are: + +- Require warp message validity for transaction inclusion. If the transaction is included, the warp message must have passed verification. +- Include the results of warp message verification in the block itself. Use the results to determine which messages passed verification. + diff --git a/avalanchego/vms/platformvm/warp/codec.go b/avalanchego/vms/platformvm/warp/codec.go index 0213a670..6ef6e526 100644 --- a/avalanchego/vms/platformvm/warp/codec.go +++ b/avalanchego/vms/platformvm/warp/codec.go @@ -1,31 +1,30 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) -const codecVersion = 0 +const CodecVersion = 0 -// Codec does serialization and deserialization for Warp messages. -var c codec.Manager +var Codec codec.Manager func init() { - c = codec.NewManager(math.MaxInt) - lc := linearcodec.NewCustomMaxLength(math.MaxInt32) + Codec = codec.NewManager(math.MaxInt) + lc := linearcodec.NewDefault(time.Time{}) - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( lc.RegisterType(&BitSetSignature{}), - c.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/avalanchego/vms/platformvm/warp/constants.go b/avalanchego/vms/platformvm/warp/constants.go index a91f5f39..723cdf50 100644 --- a/avalanchego/vms/platformvm/warp/constants.go +++ b/avalanchego/vms/platformvm/warp/constants.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp diff --git a/avalanchego/vms/platformvm/warp/gwarp/client.go b/avalanchego/vms/platformvm/warp/gwarp/client.go index 96446fc8..0b51a549 100644 --- a/avalanchego/vms/platformvm/warp/gwarp/client.go +++ b/avalanchego/vms/platformvm/warp/gwarp/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp @@ -23,9 +23,9 @@ func NewClient(client pb.SignerClient) *Client { func (c *Client) Sign(unsignedMsg *warp.UnsignedMessage) ([]byte, error) { resp, err := c.client.Sign(context.Background(), &pb.SignRequest{ - SourceChainId: unsignedMsg.SourceChainID[:], - DestinationChainId: unsignedMsg.DestinationChainID[:], - Payload: unsignedMsg.Payload, + NetworkId: unsignedMsg.NetworkID, + SourceChainId: unsignedMsg.SourceChainID[:], + Payload: unsignedMsg.Payload, }) if err != nil { return nil, err diff --git a/avalanchego/vms/platformvm/warp/gwarp/server.go b/avalanchego/vms/platformvm/warp/gwarp/server.go index f1ac8964..7857f4e0 100644 --- a/avalanchego/vms/platformvm/warp/gwarp/server.go +++ b/avalanchego/vms/platformvm/warp/gwarp/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp @@ -29,14 +29,9 @@ func (s *Server) Sign(_ context.Context, unsignedMsg *pb.SignRequest) (*pb.SignR return nil, err } - destinationChainID, err := ids.ToID(unsignedMsg.DestinationChainId) - if err != nil { - return nil, err - } - msg, err := warp.NewUnsignedMessage( + unsignedMsg.NetworkId, sourceChainID, - destinationChainID, unsignedMsg.Payload, ) if err != nil { diff --git a/avalanchego/vms/platformvm/warp/gwarp/signer_test.go b/avalanchego/vms/platformvm/warp/gwarp/signer_test.go index ec443415..31c7b3e9 100644 --- a/avalanchego/vms/platformvm/warp/gwarp/signer_test.go +++ b/avalanchego/vms/platformvm/warp/gwarp/signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwarp @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" @@ -17,11 +18,11 @@ import ( ) type testSigner struct { - client *Client - server warp.Signer - sk *bls.SecretKey - chainID ids.ID - closeFn func() + client *Client + server warp.Signer + sk *bls.SecretKey + networkID uint32 + chainID ids.ID } func setupSigner(t testing.TB) *testSigner { @@ -33,15 +34,14 @@ func setupSigner(t testing.TB) *testSigner { chainID := ids.GenerateTestID() s := &testSigner{ - server: warp.NewSigner(sk, chainID), - sk: sk, - chainID: chainID, + server: warp.NewSigner(sk, constants.UnitTestID, chainID), + sk: sk, + networkID: constants.UnitTestID, + chainID: chainID, } listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() @@ -54,18 +54,21 @@ func setupSigner(t testing.TB) *testSigner { require.NoError(err) s.client = NewClient(pb.NewSignerClient(conn)) - s.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return s } func TestInterface(t *testing.T) { - for _, test := range warp.SignerTests { - s := setupSigner(t) - test(t, s.client, s.sk, s.chainID) - s.closeFn() + for name, test := range warp.SignerTests { + t.Run(name, func(t *testing.T) { + s := setupSigner(t) + test(t, s.client, s.sk, s.networkID, s.chainID) + }) } } diff --git a/avalanchego/vms/platformvm/warp/message.go b/avalanchego/vms/platformvm/warp/message.go index 34850aed..b9be351b 100644 --- a/avalanchego/vms/platformvm/warp/message.go +++ b/avalanchego/vms/platformvm/warp/message.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp +import "fmt" + // Message defines the standard format for a Warp message. type Message struct { UnsignedMessage `serialize:"true"` @@ -28,7 +30,7 @@ func ParseMessage(b []byte) (*Message, error) { msg := &Message{ bytes: b, } - _, err := c.Unmarshal(b, msg) + _, err := Codec.Unmarshal(b, msg) if err != nil { return nil, err } @@ -38,7 +40,7 @@ func ParseMessage(b []byte) (*Message, error) { // Initialize recalculates the result of Bytes(). It does not call Initialize() // on the UnsignedMessage. func (m *Message) Initialize() error { - bytes, err := c.Marshal(codecVersion, m) + bytes, err := Codec.Marshal(CodecVersion, m) m.bytes = bytes return err } @@ -49,3 +51,7 @@ func (m *Message) Initialize() error { func (m *Message) Bytes() []byte { return m.bytes } + +func (m *Message) String() string { + return fmt.Sprintf("WarpMessage(%s, %s)", &m.UnsignedMessage, m.Signature) +} diff --git a/avalanchego/vms/platformvm/warp/message_test.go b/avalanchego/vms/platformvm/warp/message_test.go index 94375530..99a50b36 100644 --- a/avalanchego/vms/platformvm/warp/message_test.go +++ b/avalanchego/vms/platformvm/warp/message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -8,8 +8,9 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" ) @@ -17,7 +18,7 @@ func TestMessage(t *testing.T) { require := require.New(t) unsignedMsg, err := NewUnsignedMessage( - ids.GenerateTestID(), + constants.UnitTestID, ids.GenerateTestID(), []byte("payload"), ) @@ -39,6 +40,9 @@ func TestMessage(t *testing.T) { } func TestParseMessageJunk(t *testing.T) { - _, err := ParseMessage(utils.RandomBytes(1024)) - require.Error(t, err) + require := require.New(t) + + bytes := []byte{0, 1, 2, 3, 4, 5, 6, 7} + _, err := ParseMessage(bytes) + require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/avalanchego/vms/platformvm/warp/payload/README.md b/avalanchego/vms/platformvm/warp/payload/README.md new file mode 100644 index 00000000..2da32ee5 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/README.md @@ -0,0 +1,47 @@ +# Payload + +An Avalanche Unsigned Warp Message already includes a `networkID`, `sourceChainID`, and `payload` field. The `payload` field can be parsed into one of the types included in this package to be further handled by the VM. + +## Hash + +Hash: +``` ++-----------------+----------+-----------+ +| codecID : uint16 | 2 bytes | ++-----------------+----------+-----------+ +| typeID : uint32 | 4 bytes | ++-----------------+----------+-----------+ +| hash : [32]byte | 32 bytes | ++-----------------+----------+-----------+ + | 38 bytes | + +-----------+ +``` + +- `codecID` is the codec version used to serialize the payload and is hardcoded to `0x0000` +- `typeID` is the payload type identifier and is `0x00000000` for `Hash` +- `hash` is a hash from the `sourceChainID`. The format of the expected preimage is chain specific. Some examples for valid hash values are: + - root of a merkle tree + - accepted block hash on the source chain + - accepted transaction hash on the source chain + +## AddressedCall + +AddressedCall: +``` ++---------------------+--------+----------------------------------+ +| codecID : uint16 | 2 bytes | ++---------------------+--------+----------------------------------+ +| typeID : uint32 | 4 bytes | ++---------------------+--------+----------------------------------+ +| sourceAddress : []byte | 4 + len(address) | ++---------------------+--------+----------------------------------+ +| payload : []byte | 4 + len(payload) | ++---------------------+--------+----------------------------------+ + | 14 + len(payload) + len(address) | + +----------------------------------+ +``` + +- `codecID` is the codec version used to serialize the payload and is hardcoded to `0x0000` +- `typeID` is the payload type identifier and is `0x00000001` for `AddressedCall` +- `sourceAddress` is the address that sent this message from the source chain +- `payload` is an arbitrary byte array payload diff --git a/avalanchego/vms/platformvm/warp/payload/addressed_call.go b/avalanchego/vms/platformvm/warp/payload/addressed_call.go new file mode 100644 index 00000000..b3617ce4 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/addressed_call.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import "fmt" + +var _ Payload = (*AddressedCall)(nil) + +// AddressedCall defines the format for delivering a call across VMs including a +// source address and a payload. +// +// Note: If a destination address is expected, it should be encoded in the +// payload. +type AddressedCall struct { + SourceAddress []byte `serialize:"true"` + Payload []byte `serialize:"true"` + + bytes []byte +} + +// NewAddressedCall creates a new *AddressedCall and initializes it. +func NewAddressedCall(sourceAddress []byte, payload []byte) (*AddressedCall, error) { + ap := &AddressedCall{ + SourceAddress: sourceAddress, + Payload: payload, + } + return ap, initialize(ap) +} + +// ParseAddressedCall converts a slice of bytes into an initialized +// AddressedCall. +func ParseAddressedCall(b []byte) (*AddressedCall, error) { + payloadIntf, err := Parse(b) + if err != nil { + return nil, err + } + payload, ok := payloadIntf.(*AddressedCall) + if !ok { + return nil, fmt.Errorf("%w: %T", errWrongType, payloadIntf) + } + return payload, nil +} + +// Bytes returns the binary representation of this payload. It assumes that the +// payload is initialized from either NewAddressedCall or Parse. +func (a *AddressedCall) Bytes() []byte { + return a.bytes +} + +func (a *AddressedCall) initialize(bytes []byte) { + a.bytes = bytes +} diff --git a/avalanchego/vms/platformvm/warp/payload/addressed_call_test.go b/avalanchego/vms/platformvm/warp/payload/addressed_call_test.go new file mode 100644 index 00000000..77a885d8 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/addressed_call_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "encoding/base64" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" +) + +func TestAddressedCall(t *testing.T) { + require := require.New(t) + shortID := ids.GenerateTestShortID() + + addressedPayload, err := NewAddressedCall( + shortID[:], + []byte{1, 2, 3}, + ) + require.NoError(err) + + addressedPayloadBytes := addressedPayload.Bytes() + parsedAddressedPayload, err := ParseAddressedCall(addressedPayloadBytes) + require.NoError(err) + require.Equal(addressedPayload, parsedAddressedPayload) +} + +func TestParseAddressedCallJunk(t *testing.T) { + _, err := ParseAddressedCall(junkBytes) + require.ErrorIs(t, err, codec.ErrUnknownVersion) +} + +func TestAddressedCallBytes(t *testing.T) { + require := require.New(t) + base64Payload := "AAAAAAABAAAAEAECAwAAAAAAAAAAAAAAAAAAAAADCgsM" + addressedPayload, err := NewAddressedCall( + []byte{1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + []byte{10, 11, 12}, + ) + require.NoError(err) + require.Equal(base64Payload, base64.StdEncoding.EncodeToString(addressedPayload.Bytes())) +} diff --git a/avalanchego/vms/platformvm/warp/payload/codec.go b/avalanchego/vms/platformvm/warp/payload/codec.go new file mode 100644 index 00000000..d188029a --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/codec.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/units" +) + +const ( + CodecVersion = 0 + + MaxMessageSize = 24 * units.KiB +) + +var Codec codec.Manager + +func init() { + Codec = codec.NewManager(MaxMessageSize) + lc := linearcodec.NewDefault(time.Time{}) + + err := utils.Err( + lc.RegisterType(&Hash{}), + lc.RegisterType(&AddressedCall{}), + Codec.RegisterCodec(CodecVersion, lc), + ) + if err != nil { + panic(err) + } +} diff --git a/avalanchego/vms/platformvm/warp/payload/hash.go b/avalanchego/vms/platformvm/warp/payload/hash.go new file mode 100644 index 00000000..330f74fd --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/hash.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +var _ Payload = (*Hash)(nil) + +type Hash struct { + Hash ids.ID `serialize:"true"` + + bytes []byte +} + +// NewHash creates a new *Hash and initializes it. +func NewHash(hash ids.ID) (*Hash, error) { + bhp := &Hash{ + Hash: hash, + } + return bhp, initialize(bhp) +} + +// ParseHash converts a slice of bytes into an initialized Hash. +func ParseHash(b []byte) (*Hash, error) { + payloadIntf, err := Parse(b) + if err != nil { + return nil, err + } + payload, ok := payloadIntf.(*Hash) + if !ok { + return nil, fmt.Errorf("%w: %T", errWrongType, payloadIntf) + } + return payload, nil +} + +// Bytes returns the binary representation of this payload. It assumes that the +// payload is initialized from either NewHash or Parse. +func (b *Hash) Bytes() []byte { + return b.bytes +} + +func (b *Hash) initialize(bytes []byte) { + b.bytes = bytes +} diff --git a/avalanchego/vms/platformvm/warp/payload/hash_test.go b/avalanchego/vms/platformvm/warp/payload/hash_test.go new file mode 100644 index 00000000..d58fe5e6 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/hash_test.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "encoding/base64" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" +) + +func TestHash(t *testing.T) { + require := require.New(t) + + hashPayload, err := NewHash(ids.GenerateTestID()) + require.NoError(err) + + hashPayloadBytes := hashPayload.Bytes() + parsedHashPayload, err := ParseHash(hashPayloadBytes) + require.NoError(err) + require.Equal(hashPayload, parsedHashPayload) +} + +func TestParseHashJunk(t *testing.T) { + _, err := ParseHash(junkBytes) + require.ErrorIs(t, err, codec.ErrUnknownVersion) +} + +func TestHashBytes(t *testing.T) { + require := require.New(t) + base64Payload := "AAAAAAAABAUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + hashPayload, err := NewHash(ids.ID{4, 5, 6}) + require.NoError(err) + require.Equal(base64Payload, base64.StdEncoding.EncodeToString(hashPayload.Bytes())) +} diff --git a/avalanchego/vms/platformvm/warp/payload/payload.go b/avalanchego/vms/platformvm/warp/payload/payload.go new file mode 100644 index 00000000..c5c09464 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/payload.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "errors" + "fmt" +) + +var errWrongType = errors.New("wrong payload type") + +// Payload provides a common interface for all payloads implemented by this +// package. +type Payload interface { + // Bytes returns the binary representation of this payload. + Bytes() []byte + + // initialize the payload with the provided binary representation. + initialize(b []byte) +} + +func Parse(bytes []byte) (Payload, error) { + var payload Payload + if _, err := Codec.Unmarshal(bytes, &payload); err != nil { + return nil, err + } + payload.initialize(bytes) + return payload, nil +} + +func initialize(p Payload) error { + bytes, err := Codec.Marshal(CodecVersion, &p) + if err != nil { + return fmt.Errorf("couldn't marshal %T payload: %w", p, err) + } + p.initialize(bytes) + return nil +} diff --git a/avalanchego/vms/platformvm/warp/payload/payload_test.go b/avalanchego/vms/platformvm/warp/payload/payload_test.go new file mode 100644 index 00000000..86b584ae --- /dev/null +++ b/avalanchego/vms/platformvm/warp/payload/payload_test.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package payload + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" +) + +var junkBytes = []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88} + +func TestParseJunk(t *testing.T) { + require := require.New(t) + _, err := Parse(junkBytes) + require.ErrorIs(err, codec.ErrUnknownVersion) +} + +func TestParseWrongPayloadType(t *testing.T) { + require := require.New(t) + hashPayload, err := NewHash(ids.GenerateTestID()) + require.NoError(err) + + shortID := ids.GenerateTestShortID() + addressedPayload, err := NewAddressedCall( + shortID[:], + []byte{1, 2, 3}, + ) + require.NoError(err) + + _, err = ParseAddressedCall(hashPayload.Bytes()) + require.ErrorIs(err, errWrongType) + + _, err = ParseHash(addressedPayload.Bytes()) + require.ErrorIs(err, errWrongType) +} + +func TestParse(t *testing.T) { + require := require.New(t) + hashPayload, err := NewHash(ids.ID{4, 5, 6}) + require.NoError(err) + + parsedHashPayload, err := Parse(hashPayload.Bytes()) + require.NoError(err) + require.Equal(hashPayload, parsedHashPayload) + + addressedPayload, err := NewAddressedCall( + []byte{1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + []byte{10, 11, 12}, + ) + require.NoError(err) + + parsedAddressedPayload, err := Parse(addressedPayload.Bytes()) + require.NoError(err) + require.Equal(addressedPayload, parsedAddressedPayload) +} diff --git a/avalanchego/vms/platformvm/warp/signature.go b/avalanchego/vms/platformvm/warp/signature.go index df3973cc..2df8d387 100644 --- a/avalanchego/vms/platformvm/warp/signature.go +++ b/avalanchego/vms/platformvm/warp/signature.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -24,6 +24,8 @@ var ( ) type Signature interface { + fmt.Stringer + // NumSigners is the number of [bls.PublicKeys] that participated in the // [Signature]. This is exposed because users of these signatures typically // impose a verification fee that is a function of the number of @@ -37,6 +39,7 @@ type Signature interface { Verify( ctx context.Context, msg *UnsignedMessage, + networkID uint32, pChainState validators.State, pChainHeight uint64, quorumNum uint64, @@ -67,11 +70,16 @@ func (s *BitSetSignature) NumSigners() (int, error) { func (s *BitSetSignature) Verify( ctx context.Context, msg *UnsignedMessage, + networkID uint32, pChainState validators.State, pChainHeight uint64, quorumNum uint64, quorumDen uint64, ) error { + if msg.NetworkID != networkID { + return ErrWrongNetworkID + } + subnetID, err := pChainState.GetSubnetID(ctx, msg.SourceChainID) if err != nil { return err @@ -115,7 +123,7 @@ func (s *BitSetSignature) Verify( // Parse the aggregate signature aggSig, err := bls.SignatureFromBytes(s.Signature[:]) if err != nil { - return fmt.Errorf("%w: %v", ErrParseSignature, err) + return fmt.Errorf("%w: %w", ErrParseSignature, err) } // Create the aggregate public key @@ -132,6 +140,10 @@ func (s *BitSetSignature) Verify( return nil } +func (s *BitSetSignature) String() string { + return fmt.Sprintf("BitSetSignature(Signers = %x, Signature = %x)", s.Signers, s.Signature) +} + // VerifyWeight returns [nil] if [sigWeight] is at least [quorumNum]/[quorumDen] // of [totalWeight]. // If [sigWeight >= totalWeight * quorumNum / quorumDen] then return [nil] diff --git a/avalanchego/vms/platformvm/warp/signature_test.go b/avalanchego/vms/platformvm/warp/signature_test.go index a24fb176..56ab16c4 100644 --- a/avalanchego/vms/platformvm/warp/signature_test.go +++ b/avalanchego/vms/platformvm/warp/signature_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -9,13 +9,13 @@ import ( "math" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" ) @@ -38,8 +38,8 @@ type testValidator struct { vdr *Validator } -func (v *testValidator) Less(o *testValidator) bool { - return v.vdr.Less(o.vdr) +func (v *testValidator) Compare(o *testValidator) int { + return v.vdr.Compare(o.vdr) } func newTestValidator() *testValidator { @@ -55,7 +55,7 @@ func newTestValidator() *testValidator { sk: sk, vdr: &Validator{ PublicKey: pk, - PublicKeyBytes: bls.PublicKeyToBytes(pk), + PublicKeyBytes: bls.SerializePublicKey(pk), Weight: 3, NodeIDs: []ids.NodeID{nodeID}, }, @@ -155,6 +155,7 @@ func TestSignatureVerification(t *testing.T) { tests := []struct { name string + networkID uint32 stateF func(*gomock.Controller) validators.State quorumNum uint64 quorumDen uint64 @@ -162,7 +163,8 @@ func TestSignatureVerification(t *testing.T) { err error }{ { - name: "can't get subnetID", + name: "can't get subnetID", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, errTest) @@ -172,8 +174,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, nil, ) require.NoError(err) @@ -188,7 +190,8 @@ func TestSignatureVerification(t *testing.T) { err: errTest, }, { - name: "can't get validator set", + name: "can't get validator set", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -199,8 +202,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, nil, ) require.NoError(err) @@ -215,7 +218,8 @@ func TestSignatureVerification(t *testing.T) { err: errTest, }, { - name: "weight overflow", + name: "weight overflow", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -238,6 +242,7 @@ func TestSignatureVerification(t *testing.T) { msgF: func(*require.Assertions) *Message { return &Message{ UnsignedMessage: UnsignedMessage{ + NetworkID: constants.UnitTestID, SourceChainID: sourceChainID, }, Signature: &BitSetSignature{ @@ -248,7 +253,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrWeightOverflow, }, { - name: "invalid bit set index", + name: "invalid bit set index", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -259,8 +265,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -278,7 +284,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrInvalidBitSet, }, { - name: "unknown index", + name: "unknown index", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -289,8 +296,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -311,7 +318,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrUnknownValidator, }, { - name: "insufficient weight", + name: "insufficient weight", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -322,8 +330,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 1, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -355,7 +363,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrInsufficientWeight, }, { - name: "can't parse sig", + name: "can't parse sig", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -366,8 +375,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -389,7 +398,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrParseSignature, }, { - name: "no validators", + name: "no validators", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -400,8 +410,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -424,7 +434,8 @@ func TestSignatureVerification(t *testing.T) { err: bls.ErrNoPublicKeys, }, { - name: "invalid signature (substitute)", + name: "invalid signature (substitute)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -435,8 +446,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -468,7 +479,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrInvalidSignature, }, { - name: "invalid signature (missing one)", + name: "invalid signature (missing one)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -479,8 +491,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -508,7 +520,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrInvalidSignature, }, { - name: "invalid signature (extra one)", + name: "invalid signature (extra one)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -519,8 +532,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -553,7 +566,8 @@ func TestSignatureVerification(t *testing.T) { err: ErrInvalidSignature, }, { - name: "valid signature", + name: "valid signature", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -564,8 +578,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -597,7 +611,8 @@ func TestSignatureVerification(t *testing.T) { err: nil, }, { - name: "valid signature (boundary)", + name: "valid signature (boundary)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -608,8 +623,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -641,7 +656,8 @@ func TestSignatureVerification(t *testing.T) { err: nil, }, { - name: "valid signature (missing key)", + name: "valid signature (missing key)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -668,8 +684,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -702,7 +718,8 @@ func TestSignatureVerification(t *testing.T) { err: nil, }, { - name: "valid signature (duplicate key)", + name: "valid signature (duplicate key)", + networkID: constants.UnitTestID, stateF: func(ctrl *gomock.Controller) validators.State { state := validators.NewMockState(ctrl) state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) @@ -729,8 +746,8 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *Message { unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID, sourceChainID, - ids.Empty, []byte{1, 2, 3}, ) require.NoError(err) @@ -760,13 +777,55 @@ func TestSignatureVerification(t *testing.T) { }, err: nil, }, + { + name: "incorrect networkID", + networkID: constants.UnitTestID, + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + constants.UnitTestID+1, + sourceChainID, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr[1], vdr[2]], + // which is 6, which is greater than 4.5 + signers := set.NewBits() + signers.Add(1) + signers.Add(2) + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: ErrWrongNetworkID, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() msg := tt.msgF(require) pChainState := tt.stateF(ctrl) @@ -774,6 +833,7 @@ func TestSignatureVerification(t *testing.T) { err := msg.Signature.Verify( context.Background(), &msg.UnsignedMessage, + tt.networkID, pChainState, pChainHeight, tt.quorumNum, diff --git a/avalanchego/vms/platformvm/warp/signer.go b/avalanchego/vms/platformvm/warp/signer.go index dddb0816..8372aef0 100644 --- a/avalanchego/vms/platformvm/warp/signer.go +++ b/avalanchego/vms/platformvm/warp/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -13,7 +13,8 @@ import ( var ( _ Signer = (*signer)(nil) - errWrongSourceChainID = errors.New("wrong SourceChainID") + ErrWrongSourceChainID = errors.New("wrong SourceChainID") + ErrWrongNetworkID = errors.New("wrong networkID") ) type Signer interface { @@ -25,21 +26,26 @@ type Signer interface { Sign(msg *UnsignedMessage) ([]byte, error) } -func NewSigner(sk *bls.SecretKey, chainID ids.ID) Signer { +func NewSigner(sk *bls.SecretKey, networkID uint32, chainID ids.ID) Signer { return &signer{ - sk: sk, - chainID: chainID, + sk: sk, + networkID: networkID, + chainID: chainID, } } type signer struct { - sk *bls.SecretKey - chainID ids.ID + sk *bls.SecretKey + networkID uint32 + chainID ids.ID } func (s *signer) Sign(msg *UnsignedMessage) ([]byte, error) { if msg.SourceChainID != s.chainID { - return nil, errWrongSourceChainID + return nil, ErrWrongSourceChainID + } + if msg.NetworkID != s.networkID { + return nil, ErrWrongNetworkID } msgBytes := msg.Bytes() diff --git a/avalanchego/vms/platformvm/warp/signer_test.go b/avalanchego/vms/platformvm/warp/signer_test.go index 46ee6eeb..84b51f65 100644 --- a/avalanchego/vms/platformvm/warp/signer_test.go +++ b/avalanchego/vms/platformvm/warp/signer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -9,17 +9,20 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" ) func TestSigner(t *testing.T) { - for _, test := range SignerTests { - sk, err := bls.NewSecretKey() - require.NoError(t, err) + for name, test := range SignerTests { + t.Run(name, func(t *testing.T) { + sk, err := bls.NewSecretKey() + require.NoError(t, err) - chainID := ids.GenerateTestID() - s := NewSigner(sk, chainID) + chainID := ids.GenerateTestID() + s := NewSigner(sk, constants.UnitTestID, chainID) - test(t, s, sk, chainID) + test(t, s, sk, constants.UnitTestID, chainID) + }) } } diff --git a/avalanchego/vms/platformvm/warp/test_signer.go b/avalanchego/vms/platformvm/warp/test_signer.go index f030447f..e30423ed 100644 --- a/avalanchego/vms/platformvm/warp/test_signer.go +++ b/avalanchego/vms/platformvm/warp/test_signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -9,37 +9,56 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" ) // SignerTests is a list of all signer tests -var SignerTests = []func(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.ID){ - TestSignerWrongChainID, - TestSignerVerifies, +var SignerTests = map[string]func(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID){ + "WrongChainID": TestWrongChainID, + "WrongNetworkID": TestWrongNetworkID, + "Verifies": TestVerifies, } // Test that using a random SourceChainID results in an error -func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ ids.ID) { +func TestWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, _ ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( + constants.UnitTestID, ids.GenerateTestID(), - ids.GenerateTestID(), []byte("payload"), ) require.NoError(err) _, err = s.Sign(msg) - require.Error(err) + // TODO: require error to be ErrWrongSourceChainID + require.Error(err) //nolint:forbidigo // currently returns grpc errors too +} + +// Test that using a different networkID results in an error +func TestWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkID uint32, blockchainID ids.ID) { + require := require.New(t) + + msg, err := NewUnsignedMessage( + networkID+1, + blockchainID, + []byte("payload"), + ) + require.NoError(err) + + _, err = s.Sign(msg) + // TODO: require error to be ErrWrongNetworkID + require.Error(err) //nolint:forbidigo // currently returns grpc errors too } // Test that a signature generated with the signer verifies correctly -func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.ID) { +func TestVerifies(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( + networkID, chainID, - ids.GenerateTestID(), []byte("payload"), ) require.NoError(err) @@ -52,6 +71,5 @@ func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.I pk := bls.PublicFromSecretKey(sk) msgBytes := msg.Bytes() - valid := bls.Verify(pk, sig, msgBytes) - require.True(valid) + require.True(bls.Verify(pk, sig, msgBytes)) } diff --git a/avalanchego/vms/platformvm/warp/unsigned_message.go b/avalanchego/vms/platformvm/warp/unsigned_message.go index be266834..56081d98 100644 --- a/avalanchego/vms/platformvm/warp/unsigned_message.go +++ b/avalanchego/vms/platformvm/warp/unsigned_message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -12,9 +12,9 @@ import ( // UnsignedMessage defines the standard format for an unsigned Warp message. type UnsignedMessage struct { - SourceChainID ids.ID `serialize:"true"` - DestinationChainID ids.ID `serialize:"true"` - Payload []byte `serialize:"true"` + NetworkID uint32 `serialize:"true"` + SourceChainID ids.ID `serialize:"true"` + Payload []byte `serialize:"true"` bytes []byte id ids.ID @@ -22,14 +22,14 @@ type UnsignedMessage struct { // NewUnsignedMessage creates a new *UnsignedMessage and initializes it. func NewUnsignedMessage( + networkID uint32, sourceChainID ids.ID, - destinationChainID ids.ID, payload []byte, ) (*UnsignedMessage, error) { msg := &UnsignedMessage{ - SourceChainID: sourceChainID, - DestinationChainID: destinationChainID, - Payload: payload, + NetworkID: networkID, + SourceChainID: sourceChainID, + Payload: payload, } return msg, msg.Initialize() } @@ -41,13 +41,13 @@ func ParseUnsignedMessage(b []byte) (*UnsignedMessage, error) { bytes: b, id: hashing.ComputeHash256Array(b), } - _, err := c.Unmarshal(b, msg) + _, err := Codec.Unmarshal(b, msg) return msg, err } // Initialize recalculates the result of Bytes(). func (m *UnsignedMessage) Initialize() error { - bytes, err := c.Marshal(codecVersion, m) + bytes, err := Codec.Marshal(CodecVersion, m) if err != nil { return fmt.Errorf("couldn't marshal warp unsigned message: %w", err) } @@ -69,3 +69,7 @@ func (m *UnsignedMessage) Bytes() []byte { func (m *UnsignedMessage) ID() ids.ID { return m.id } + +func (m *UnsignedMessage) String() string { + return fmt.Sprintf("UnsignedMessage(NetworkID = %d, SourceChainID = %s, Payload = %x)", m.NetworkID, m.SourceChainID, m.Payload) +} diff --git a/avalanchego/vms/platformvm/warp/unsigned_message_test.go b/avalanchego/vms/platformvm/warp/unsigned_message_test.go index cf8073d8..03a140d1 100644 --- a/avalanchego/vms/platformvm/warp/unsigned_message_test.go +++ b/avalanchego/vms/platformvm/warp/unsigned_message_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -8,15 +8,16 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" ) func TestUnsignedMessage(t *testing.T) { require := require.New(t) msg, err := NewUnsignedMessage( - ids.GenerateTestID(), + constants.UnitTestID, ids.GenerateTestID(), []byte("payload"), ) @@ -29,6 +30,9 @@ func TestUnsignedMessage(t *testing.T) { } func TestParseUnsignedMessageJunk(t *testing.T) { - _, err := ParseUnsignedMessage(utils.RandomBytes(1024)) - require.Error(t, err) + require := require.New(t) + + bytes := []byte{0, 1, 2, 3, 4, 5, 6, 7} + _, err := ParseUnsignedMessage(bytes) + require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/avalanchego/vms/platformvm/warp/validator.go b/avalanchego/vms/platformvm/warp/validator.go index 357a442b..2ada068a 100644 --- a/avalanchego/vms/platformvm/warp/validator.go +++ b/avalanchego/vms/platformvm/warp/validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -26,6 +26,12 @@ var ( ErrWeightOverflow = errors.New("weight overflowed") ) +// ValidatorState defines the functions that must be implemented to get +// the canonical validator set for warp message validation. +type ValidatorState interface { + GetValidatorSet(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) +} + type Validator struct { PublicKey *bls.PublicKey PublicKeyBytes []byte @@ -33,8 +39,8 @@ type Validator struct { NodeIDs []ids.NodeID } -func (v *Validator) Less(o *Validator) bool { - return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) < 0 +func (v *Validator) Compare(o *Validator) int { + return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) } // GetCanonicalValidatorSet returns the validator set of [subnetID] at @@ -42,7 +48,7 @@ func (v *Validator) Less(o *Validator) bool { // [subnetID]. func GetCanonicalValidatorSet( ctx context.Context, - pChainState validators.State, + pChainState ValidatorState, pChainHeight uint64, subnetID ids.ID, ) ([]*Validator, uint64, error) { @@ -59,14 +65,14 @@ func GetCanonicalValidatorSet( for _, vdr := range vdrSet { totalWeight, err = math.Add64(totalWeight, vdr.Weight) if err != nil { - return nil, 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) + return nil, 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) } if vdr.PublicKey == nil { continue } - pkBytes := bls.PublicKeyToBytes(vdr.PublicKey) + pkBytes := bls.SerializePublicKey(vdr.PublicKey) uniqueVdr, ok := vdrs[string(pkBytes)] if !ok { uniqueVdr = &Validator{ @@ -124,7 +130,7 @@ func SumWeight(vdrs []*Validator) (uint64, error) { for _, vdr := range vdrs { weight, err = math.Add64(weight, vdr.Weight) if err != nil { - return 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) + return 0, fmt.Errorf("%w: %w", ErrWeightOverflow, err) } } return weight, nil diff --git a/avalanchego/vms/platformvm/warp/validator_test.go b/avalanchego/vms/platformvm/warp/validator_test.go index ef8998eb..af680edd 100644 --- a/avalanchego/vms/platformvm/warp/validator_test.go +++ b/avalanchego/vms/platformvm/warp/validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package warp @@ -6,11 +6,11 @@ package warp import ( "context" "math" + "strconv" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" @@ -134,7 +134,6 @@ func TestGetCanonicalValidatorSet(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() state := tt.stateF(ctrl) @@ -146,7 +145,7 @@ func TestGetCanonicalValidatorSet(t *testing.T) { require.Equal(tt.expectedWeight, weight) // These are pointers so have to test equality like this - require.Equal(len(tt.expectedVdrs), len(vdrs)) + require.Len(vdrs, len(tt.expectedVdrs)) for i, expectedVdr := range tt.expectedVdrs { gotVdr := vdrs[i] expectedPKBytes := bls.PublicKeyToBytes(expectedVdr.PublicKey) @@ -166,7 +165,7 @@ func TestFilterValidators(t *testing.T) { pk0 := bls.PublicFromSecretKey(sk0) vdr0 := &Validator{ PublicKey: pk0, - PublicKeyBytes: bls.PublicKeyToBytes(pk0), + PublicKeyBytes: bls.SerializePublicKey(pk0), Weight: 1, } @@ -175,7 +174,7 @@ func TestFilterValidators(t *testing.T) { pk1 := bls.PublicFromSecretKey(sk1) vdr1 := &Validator{ PublicKey: pk1, - PublicKeyBytes: bls.PublicKeyToBytes(pk1), + PublicKeyBytes: bls.SerializePublicKey(pk1), Weight: 2, } @@ -244,9 +243,10 @@ func TestFilterValidators(t *testing.T) { vdrs, err := FilterValidators(tt.indices, tt.vdrs) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.expectedVdrs, vdrs) + if tt.expectedErr != nil { + return } + require.Equal(tt.expectedVdrs, vdrs) }) } } @@ -298,8 +298,47 @@ func TestSumWeight(t *testing.T) { sum, err := SumWeight(tt.vdrs) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.expectedSum, sum) + if tt.expectedErr != nil { + return + } + require.Equal(tt.expectedSum, sum) + }) + } +} + +func BenchmarkGetCanonicalValidatorSet(b *testing.B) { + pChainHeight := uint64(1) + subnetID := ids.GenerateTestID() + numNodes := 10_000 + getValidatorOutputs := make([]*validators.GetValidatorOutput, 0, numNodes) + for i := 0; i < numNodes; i++ { + nodeID := ids.GenerateTestNodeID() + blsPrivateKey, err := bls.NewSecretKey() + require.NoError(b, err) + blsPublicKey := bls.PublicFromSecretKey(blsPrivateKey) + getValidatorOutputs = append(getValidatorOutputs, &validators.GetValidatorOutput{ + NodeID: nodeID, + PublicKey: blsPublicKey, + Weight: 20, + }) + } + + for _, size := range []int{0, 1, 10, 100, 1_000, 10_000} { + getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) + for i := 0; i < size; i++ { + validator := getValidatorOutputs[i] + getValidatorsOutput[validator.NodeID] = validator + } + validatorState := &validators.TestState{ + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return getValidatorsOutput, nil + }, + } + + b.Run(strconv.Itoa(size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _, err := GetCanonicalValidatorSet(context.Background(), validatorState, pChainHeight, subnetID) + require.NoError(b, err) } }) } diff --git a/avalanchego/vms/propertyfx/burn_operation.go b/avalanchego/vms/propertyfx/burn_operation.go index 4217420b..1dedb4c2 100644 --- a/avalanchego/vms/propertyfx/burn_operation.go +++ b/avalanchego/vms/propertyfx/burn_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/burn_operation_test.go b/avalanchego/vms/propertyfx/burn_operation_test.go index 0b5715ea..b6a995b0 100644 --- a/avalanchego/vms/propertyfx/burn_operation_test.go +++ b/avalanchego/vms/propertyfx/burn_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,6 +6,8 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -14,21 +16,17 @@ func TestBurnOperationInvalid(t *testing.T) { op := BurnOperation{Input: secp256k1fx.Input{ SigIndices: []uint32{1, 0}, }} - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestBurnOperationNumberOfOutput(t *testing.T) { op := BurnOperation{} - if outs := op.Outs(); len(outs) != 0 { - t.Fatalf("wrong number of outputs") - } + require.Empty(t, op.Outs()) } func TestBurnOperationState(t *testing.T) { intf := interface{}(&BurnOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/propertyfx/credential.go b/avalanchego/vms/propertyfx/credential.go index a2622cf7..3a464cc2 100644 --- a/avalanchego/vms/propertyfx/credential.go +++ b/avalanchego/vms/propertyfx/credential.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx -import ( - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) +import "github.com/ava-labs/avalanchego/vms/secp256k1fx" type Credential struct { secp256k1fx.Credential `serialize:"true"` diff --git a/avalanchego/vms/propertyfx/credential_test.go b/avalanchego/vms/propertyfx/credential_test.go index d03d5b2b..3ce9bc97 100644 --- a/avalanchego/vms/propertyfx/credential_test.go +++ b/avalanchego/vms/propertyfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestCredentialState(t *testing.T) { intf := interface{}(&Credential{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/propertyfx/factory.go b/avalanchego/vms/propertyfx/factory.go index 21c69c97..53d6101b 100644 --- a/avalanchego/vms/propertyfx/factory.go +++ b/avalanchego/vms/propertyfx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "propertyfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/avalanchego/vms/propertyfx/factory_test.go b/avalanchego/vms/propertyfx/factory_test.go index 25dc8935..9aa46192 100644 --- a/avalanchego/vms/propertyfx/factory_test.go +++ b/avalanchego/vms/propertyfx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,14 +6,12 @@ package propertyfx import ( "testing" - "github.com/ava-labs/avalanchego/utils/logging" + "github.com/stretchr/testify/require" ) func TestFactory(t *testing.T) { + require := require.New(t) + factory := Factory{} - if fx, err := factory.New(logging.NoLog{}); err != nil { - t.Fatal(err) - } else if fx == nil { - t.Fatalf("Factory.New returned nil") - } + require.Equal(&Fx{}, factory.New()) } diff --git a/avalanchego/vms/propertyfx/fx.go b/avalanchego/vms/propertyfx/fx.go index 2719c37e..24a3dff1 100644 --- a/avalanchego/vms/propertyfx/fx.go +++ b/avalanchego/vms/propertyfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,7 +6,7 @@ package propertyfx import ( "errors" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -32,15 +32,13 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log.Debug("initializing nft fx") c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&MintOutput{}), c.RegisterType(&OwnedOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&BurnOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { diff --git a/avalanchego/vms/propertyfx/fx_test.go b/avalanchego/vms/propertyfx/fx_test.go index f46602ab..0cd995ba 100644 --- a/avalanchego/vms/propertyfx/fx_test.go +++ b/avalanchego/vms/propertyfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -37,36 +39,31 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} - err := fx.Initialize(&vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { fx := Fx{} err := fx.Initialize(nil) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, secp256k1fx.ErrWrongVMType) } func TestFxVerifyMintOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -94,23 +91,21 @@ func TestFxVerifyMintOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyMintOperationWrongTx(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) cred := &Credential{Credential: secp256k1fx.Credential{ Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, @@ -129,23 +124,22 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid tx") - } + err := fx.VerifyOperation(nil, op, cred, utxos) + require.ErrorIs(err, errWrongTxType) } func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -161,23 +155,22 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { } utxos := []interface{}{} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to not enough utxos") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongNumberOfUTXOs) } func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -194,23 +187,22 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a bad credential") - } + err := fx.VerifyOperation(tx, op, nil, utxos) + require.ErrorIs(err, errWrongCredentialType) } func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -226,23 +218,22 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -265,23 +256,22 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrAddrsNotSortedUnique) } func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -303,23 +293,22 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid mint output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongMintOutput) } func TestFxVerifyTransferOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -339,23 +328,21 @@ func TestFxVerifyTransferOperation(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -369,23 +356,22 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { }} utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -405,23 +391,22 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -438,24 +423,22 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an unknown operation") - } + err := fx.VerifyOperation(tx, nil, cred, utxos) + require.ErrorIs(err, errWrongOperationType) } func TestFxVerifyTransfer(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { - t.Fatalf("this Fx doesn't support transfers") - } + require.NoError(fx.Initialize(&vm)) + err := fx.VerifyTransfer(nil, nil, nil, nil) + require.ErrorIs(err, errCantTransfer) } diff --git a/avalanchego/vms/propertyfx/mint_operation.go b/avalanchego/vms/propertyfx/mint_operation.go index 535ea135..7eecf5de 100644 --- a/avalanchego/vms/propertyfx/mint_operation.go +++ b/avalanchego/vms/propertyfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/mint_operation_test.go b/avalanchego/vms/propertyfx/mint_operation_test.go index 80e5cc24..abcc552a 100644 --- a/avalanchego/vms/propertyfx/mint_operation_test.go +++ b/avalanchego/vms/propertyfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,15 +6,16 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestMintOperationVerifyNil(t *testing.T) { op := (*MintOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilMintOperation) } func TestMintOperationVerifyInvalidOutput(t *testing.T) { @@ -25,21 +26,17 @@ func TestMintOperationVerifyInvalidOutput(t *testing.T) { }, }, } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnspendable) } func TestMintOperationOuts(t *testing.T) { op := MintOperation{} - if outs := op.Outs(); len(outs) != 2 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 2) } func TestMintOperationState(t *testing.T) { intf := interface{}(&MintOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/avalanchego/vms/propertyfx/mint_output.go b/avalanchego/vms/propertyfx/mint_output.go index 9284d807..7ff60375 100644 --- a/avalanchego/vms/propertyfx/mint_output.go +++ b/avalanchego/vms/propertyfx/mint_output.go @@ -1,12 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var _ verify.State = (*MintOutput)(nil) + type MintOutput struct { + verify.IsState `json:"-"` + secp256k1fx.OutputOwners `serialize:"true"` } diff --git a/avalanchego/vms/propertyfx/mint_output_test.go b/avalanchego/vms/propertyfx/mint_output_test.go index 9e79f6a2..4cfa1da0 100644 --- a/avalanchego/vms/propertyfx/mint_output_test.go +++ b/avalanchego/vms/propertyfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestMintOutputState(t *testing.T) { intf := interface{}(&MintOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/avalanchego/vms/propertyfx/owned_output.go b/avalanchego/vms/propertyfx/owned_output.go index 295b6feb..cbe2f437 100644 --- a/avalanchego/vms/propertyfx/owned_output.go +++ b/avalanchego/vms/propertyfx/owned_output.go @@ -1,12 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var _ verify.State = (*OwnedOutput)(nil) + type OwnedOutput struct { + verify.IsState `json:"-"` + secp256k1fx.OutputOwners `serialize:"true"` } diff --git a/avalanchego/vms/propertyfx/owned_output_test.go b/avalanchego/vms/propertyfx/owned_output_test.go index c08c382f..a9c9adc5 100644 --- a/avalanchego/vms/propertyfx/owned_output_test.go +++ b/avalanchego/vms/propertyfx/owned_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestOwnedOutputState(t *testing.T) { intf := interface{}(&OwnedOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/avalanchego/vms/proposervm/README.md b/avalanchego/vms/proposervm/README.md index e01a014f..6dec4fe9 100644 --- a/avalanchego/vms/proposervm/README.md +++ b/avalanchego/vms/proposervm/README.md @@ -47,7 +47,7 @@ A proposer in position `i` in the proposers list has its submission windows star The following validation rules are enforced: - Given a `proposervm.Block` **C** and its parent block **P**, **P**'s inner block must be **C**'s inner block's parent. -- A block must have a `PChainHeight` is larger or equal to its parent's `PChainHeight` (`PChainHeight` is monotonic). +- A block must have a `PChainHeight` that is larger or equal to its parent's `PChainHeight` (`PChainHeight` is monotonic). - A block must have a `PChainHeight` that is less or equal to current P-Chain height. - A block must have a `Timestamp` larger or equal to its parent's `Timestamp` (`Timestamp` is monotonic) - A block received by a node at time `t_local` must have a `Timestamp` such that `Timestamp < t_local + maxSkew` (a block too far in the future is invalid). `maxSkew` is currently set to `10 seconds`. diff --git a/avalanchego/vms/proposervm/batched_vm.go b/avalanchego/vms/proposervm/batched_vm.go index a9c7f7b6..0bf51482 100644 --- a/avalanchego/vms/proposervm/batched_vm.go +++ b/avalanchego/vms/proposervm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -24,7 +24,7 @@ func (vm *VM) GetAncestors( blkID ids.ID, maxBlocksNum int, maxBlocksSize int, - maxBlocksRetrivalTime time.Duration, + maxBlocksRetrievalTime time.Duration, ) ([][]byte, error) { if vm.batchedVM == nil { return nil, block.ErrRemoteVMNotImplemented @@ -50,15 +50,13 @@ func (vm *VM) GetAncestors( // is repr. by an int. currentByteLength += wrappers.IntLen + len(blkBytes) elapsedTime := vm.Clock.Time().Sub(startTime) - if len(res) > 0 && (currentByteLength >= maxBlocksSize || maxBlocksRetrivalTime <= elapsedTime) { + if len(res) > 0 && (currentByteLength >= maxBlocksSize || maxBlocksRetrievalTime <= elapsedTime) { return res, nil // reached maximum size or ran out of time } res = append(res, blkBytes) blkID = blk.ParentID() - maxBlocksNum-- - - if maxBlocksNum <= 0 { + if len(res) >= maxBlocksNum { return res, nil } } @@ -66,7 +64,7 @@ func (vm *VM) GetAncestors( // snowman++ fork may have been hit. preMaxBlocksNum := maxBlocksNum - len(res) preMaxBlocksSize := maxBlocksSize - currentByteLength - preMaxBlocksRetrivalTime := maxBlocksRetrivalTime - time.Since(startTime) + preMaxBlocksRetrivalTime := maxBlocksRetrievalTime - time.Since(startTime) innerBytes, err := vm.batchedVM.GetAncestors( ctx, blkID, @@ -103,7 +101,7 @@ func (vm *VM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.B ) for ; blocksIndex < len(blks); blocksIndex++ { blkBytes := blks[blocksIndex] - statelessBlock, err := statelessblock.Parse(blkBytes) + statelessBlock, err := statelessblock.Parse(blkBytes, vm.DurangoTime) if err != nil { break } diff --git a/avalanchego/vms/proposervm/batched_vm_test.go b/avalanchego/vms/proposervm/batched_vm_test.go index b61b9062..3564b0b8 100644 --- a/avalanchego/vms/proposervm/batched_vm_test.go +++ b/avalanchego/vms/proposervm/batched_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -6,56 +6,70 @@ package proposervm import ( "bytes" "context" - "crypto" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) func TestCoreVMNotRemote(t *testing.T) { // if coreVM is not remote VM, a specific error is returned require := require.New(t) - _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() blkID := ids.Empty - maxBlocksNum := 1000 // an high value to get all built blocks - maxBlocksSize := 1000000 // an high value to get all built blocks - maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks - _, errAncestors := proVM.GetAncestors( + maxBlocksNum := 1000 // a high value to get all built blocks + maxBlocksSize := 1000000 // a high value to get all built blocks + maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks + _, err := proVM.GetAncestors( context.Background(), blkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime, ) - require.Error(errAncestors) + require.ErrorIs(err, block.ErrRemoteVMNotImplemented) var blks [][]byte - _, errBatchedParse := proVM.BatchedParseBlock(context.Background(), blks) - require.Error(errBatchedParse) + _, err = proVM.BatchedParseBlock(context.Background(), blks) + require.ErrorIs(err, block.ErrRemoteVMNotImplemented) } func TestGetAncestorsPreForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, mockable.MaxTime) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some prefork blocks.... coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{1}, @@ -67,7 +81,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) @@ -82,7 +96,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{2}, @@ -94,7 +108,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -109,7 +123,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{3}, @@ -121,7 +135,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. @@ -147,9 +161,9 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { } reqBlkID := builtBlk3.ID() - maxBlocksNum := 1000 // an high value to get all built blocks - maxBlocksSize := 1000000 // an high value to get all built blocks - maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks + maxBlocksNum := 1000 // a high value to get all built blocks + maxBlocksSize := 1000000 // a high value to get all built blocks + maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks res, err := proRemoteVM.GetAncestors( context.Background(), reqBlkID, @@ -159,11 +173,11 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { ) // ... and check returned values are as expected - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 3, "GetAncestor returned %v entries instead of %v", len(res), 3) - require.EqualValues(res[0], builtBlk3.Bytes()) - require.EqualValues(res[1], builtBlk2.Bytes()) - require.EqualValues(res[2], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 3) + require.Equal(builtBlk3.Bytes(), res[0]) + require.Equal(builtBlk2.Bytes(), res[1]) + require.Equal(builtBlk1.Bytes(), res[2]) // another good call reqBlkID = builtBlk1.ID() @@ -174,9 +188,9 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) - require.EqualValues(res[0], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 1) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -187,72 +201,76 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Empty(res, "GetAncestor returned %v entries instead of %v", len(res), 0) + require.NoError(err) + require.Empty(res) } func TestGetAncestorsPostForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, time.Time{}) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some post-Fork blocks.... coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk1.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, 0)) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) @@ -296,9 +314,9 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { } reqBlkID := builtBlk3.ID() - maxBlocksNum := 1000 // an high value to get all built blocks - maxBlocksSize := 1000000 // an high value to get all built blocks - maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks + maxBlocksNum := 1000 // a high value to get all built blocks + maxBlocksSize := 1000000 // a high value to get all built blocks + maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks res, err := proRemoteVM.GetAncestors( context.Background(), reqBlkID, @@ -308,11 +326,11 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { ) // ... and check returned values are as expected - require.NoError(err, "Error calling GetAncestors: %v", err) + require.NoError(err) require.Len(res, 3) - require.EqualValues(res[0], builtBlk3.Bytes()) - require.EqualValues(res[1], builtBlk2.Bytes()) - require.EqualValues(res[2], builtBlk1.Bytes()) + require.Equal(builtBlk3.Bytes(), res[0]) + require.Equal(builtBlk2.Bytes(), res[1]) + require.Equal(builtBlk1.Bytes(), res[2]) // another good call reqBlkID = builtBlk1.ID() @@ -323,9 +341,9 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) - require.EqualValues(res[0], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 1) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -336,24 +354,33 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Empty(res, "GetAncestor returned %v entries instead of %v", len(res), 0) + require.NoError(err) + require.Empty(res) } func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { require := require.New(t) - currentTime := time.Now().Truncate(time.Second) - preForkTime := currentTime.Add(5 * time.Minute) - forkTime := currentTime.Add(10 * time.Minute) - postForkTime := currentTime.Add(15 * time.Minute) + + var ( + currentTime = time.Now().Truncate(time.Second) + preForkTime = currentTime.Add(5 * time.Minute) + forkTime = currentTime.Add(10 * time.Minute) + postForkTime = currentTime.Add(15 * time.Minute) + + durangoTime = forkTime + ) + // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some prefork blocks.... proRemoteVM.Set(preForkTime) coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{1}, @@ -365,9 +392,8 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") - _, ok := builtBlk1.(*preForkBlock) - require.True(ok, "Block should be a pre-fork one") + require.NoError(err) + require.IsType(&preForkBlock{}, builtBlk1) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) @@ -382,7 +408,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{2}, @@ -394,9 +420,8 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk2.(*preForkBlock) - require.True(ok, "Block should be a pre-fork one") + require.NoError(err) + require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -413,75 +438,71 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { proRemoteVM.Set(postForkTime) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: postForkTime.Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk3.(*postForkBlock) - require.True(ok, "Block should be a post-fork one") + require.NoError(err) + require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) coreBlk4 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(444), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - TimestampV: postForkTime, + BytesV: []byte{4}, + ParentV: coreBlk3.ID(), + HeightV: coreBlk3.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk4.(*postForkBlock) - require.True(ok, "Block should be a post-fork one") + require.NoError(err) + require.IsType(&postForkBlock{}, builtBlk4) require.NoError(builtBlk4.Verify(context.Background())) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. // Simply return an empty result - coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, _, _ int, _ time.Duration) ([][]byte, error) { - res := make([][]byte, 0, 3) + coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, maxBlocksNum, _ int, _ time.Duration) ([][]byte, error) { + sortedBlocks := [][]byte{ + coreBlk4.Bytes(), + coreBlk3.Bytes(), + coreBlk2.Bytes(), + coreBlk1.Bytes(), + } + var startIndex int switch blkID { case coreBlk4.ID(): - res = append(res, coreBlk4.Bytes()) - res = append(res, coreBlk3.Bytes()) - res = append(res, coreBlk2.Bytes()) - res = append(res, coreBlk1.Bytes()) - return res, nil + startIndex = 0 case coreBlk3.ID(): - res = append(res, coreBlk3.Bytes()) - res = append(res, coreBlk2.Bytes()) - res = append(res, coreBlk1.Bytes()) - return res, nil + startIndex = 1 case coreBlk2.ID(): - res = append(res, coreBlk2.Bytes()) - res = append(res, coreBlk1.Bytes()) - return res, nil + startIndex = 2 case coreBlk1.ID(): - res = append(res, coreBlk1.Bytes()) - return res, nil + startIndex = 3 default: - return res, nil + return nil, nil // unknown blockID } + + endIndex := min(startIndex+maxBlocksNum, len(sortedBlocks)) + return sortedBlocks[startIndex:endIndex], nil } + // load all known blocks reqBlkID := builtBlk4.ID() maxBlocksNum := 1000 // an high value to get all built blocks maxBlocksSize := 1000000 // an high value to get all built blocks @@ -495,12 +516,30 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { ) // ... and check returned values are as expected - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 4, "GetAncestor returned %v entries instead of %v", len(res), 4) - require.EqualValues(res[0], builtBlk4.Bytes()) - require.EqualValues(res[1], builtBlk3.Bytes()) - require.EqualValues(res[2], builtBlk2.Bytes()) - require.EqualValues(res[3], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 4) + require.Equal(builtBlk4.Bytes(), res[0]) + require.Equal(builtBlk3.Bytes(), res[1]) + require.Equal(builtBlk2.Bytes(), res[2]) + require.Equal(builtBlk1.Bytes(), res[3]) + + // Regression case: load some prefork and some postfork blocks. + reqBlkID = builtBlk4.ID() + maxBlocksNum = 3 + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) + + // ... and check returned values are as expected + require.NoError(err) + require.Len(res, 3) + require.Equal(builtBlk4.Bytes(), res[0]) + require.Equal(builtBlk3.Bytes(), res[1]) + require.Equal(builtBlk2.Bytes(), res[2]) // another good call reqBlkID = builtBlk1.ID() @@ -511,9 +550,9 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) - require.EqualValues(res[0], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 1) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -524,30 +563,36 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { maxBlocksSize, maxBlocksRetrivalTime, ) - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 0, "GetAncestor returned %v entries instead of %v", len(res), 0) + require.NoError(err) + require.Empty(res) } func TestBatchedParseBlockPreForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, mockable.MaxTime) // disable ProBlks + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some prefork blocks.... coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) @@ -562,19 +607,18 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -589,19 +633,18 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -639,75 +682,79 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { builtBlk3.Bytes(), } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) - require.Equal(res[0].ID(), builtBlk1.ID()) - require.Equal(res[1].ID(), builtBlk2.ID()) - require.Equal(res[2].ID(), builtBlk3.ID()) + require.NoError(err) + require.Len(res, 3) + require.Equal(builtBlk1.ID(), res[0].ID()) + require.Equal(builtBlk2.ID(), res[1].ID()) + require.Equal(builtBlk3.ID(), res[2].ID()) } func TestBatchedParseBlockPostForkOnly(t *testing.T) { require := require.New(t) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, time.Time{}) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some post-Fork blocks.... coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk1.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{2}, + ParentV: coreBlk1.ID(), + HeightV: coreBlk1.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, builtBlk2.(*postForkBlock).PChainHeight())) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -745,27 +792,36 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { builtBlk3.Bytes(), } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) - require.Equal(res[0].ID(), builtBlk1.ID()) - require.Equal(res[1].ID(), builtBlk2.ID()) - require.Equal(res[2].ID(), builtBlk3.ID()) + require.NoError(err) + require.Len(res, 3) + require.Equal(builtBlk1.ID(), res[0].ID()) + require.Equal(builtBlk2.ID(), res[1].ID()) + require.Equal(builtBlk3.ID(), res[2].ID()) } func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { require := require.New(t) - currentTime := time.Now().Truncate(time.Second) - preForkTime := currentTime.Add(5 * time.Minute) - forkTime := currentTime.Add(10 * time.Minute) - postForkTime := currentTime.Add(15 * time.Minute) + + var ( + currentTime = time.Now().Truncate(time.Second) + preForkTime = currentTime.Add(5 * time.Minute) + forkTime = currentTime.Add(10 * time.Minute) + postForkTime = currentTime.Add(15 * time.Minute) + + durangoTime = forkTime + ) + // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime) + coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) + defer func() { + require.NoError(proRemoteVM.Shutdown(context.Background())) + }() // Build some prefork blocks.... proRemoteVM.Set(preForkTime) coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{1}, @@ -777,9 +833,8 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") - _, ok := builtBlk1.(*preForkBlock) - require.True(ok, "Block should be a pre-fork one") + require.NoError(err) + require.IsType(&preForkBlock{}, builtBlk1) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) @@ -794,7 +849,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, BytesV: []byte{2}, @@ -806,9 +861,8 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk2.(*preForkBlock) - require.True(ok, "Block should be a pre-fork one") + require.NoError(err) + require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -825,44 +879,40 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { proRemoteVM.Set(postForkTime) coreBlk3 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: postForkTime.Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreBlk2.ID(), + HeightV: coreBlk2.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk3.(*postForkBlock) - require.True(ok, "Block should be a post-fork one") + require.NoError(err) + require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) - proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) coreBlk4 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(444), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - TimestampV: postForkTime, + BytesV: []byte{4}, + ParentV: coreBlk3.ID(), + HeightV: coreBlk3.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") - _, ok = builtBlk4.(*postForkBlock) - require.True(ok, "Block should be a post-fork one") + require.NoError(err) + require.IsType(&postForkBlock{}, builtBlk4) require.NoError(builtBlk4.Verify(context.Background())) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -907,12 +957,12 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 4, "BatchedParseBlock returned %v entries instead of %v", len(res), 4) - require.Equal(res[0].ID(), builtBlk4.ID()) - require.Equal(res[1].ID(), builtBlk3.ID()) - require.Equal(res[2].ID(), builtBlk2.ID()) - require.Equal(res[3].ID(), builtBlk1.ID()) + require.NoError(err) + require.Len(res, 4) + require.Equal(builtBlk4.ID(), res[0].ID()) + require.Equal(builtBlk3.ID(), res[1].ID()) + require.Equal(builtBlk2.ID(), res[2].ID()) + require.Equal(builtBlk1.ID(), res[3].ID()) } type TestRemoteProposerVM struct { @@ -922,12 +972,15 @@ type TestRemoteProposerVM struct { func initTestRemoteProposerVM( t *testing.T, - proBlkStartTime time.Time, + activationTime, + durangoTime time.Time, ) ( TestRemoteProposerVM, *VM, *snowman.TestBlock, ) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -949,7 +1002,7 @@ func initTestRemoteProposerVM( coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, @@ -978,14 +1031,21 @@ func initTestRemoteProposerVM( return nil, errUnknownBlock } } + coreVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } proVM := New( coreVM, - proBlkStartTime, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: activationTime, + DurangoTime: durangoTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -998,58 +1058,52 @@ func initTestRemoteProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } - + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) return coreVM, proVM, coreGenBlk } diff --git a/avalanchego/vms/proposervm/block.go b/avalanchego/vms/proposervm/block.go index a448dcb3..c43e8c5e 100644 --- a/avalanchego/vms/proposervm/block.go +++ b/avalanchego/vms/proposervm/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -35,6 +35,7 @@ var ( errPChainHeightNotReached = errors.New("block P-chain height larger than current P-chain height") errTimeTooAdvanced = errors.New("time is too far advanced") errProposerWindowNotStarted = errors.New("proposer window hasn't started") + errUnexpectedProposer = errors.New("unexpected proposer for current window") errProposersNotActivated = errors.New("proposers haven't been activated yet") errPChainHeightTooLow = errors.New("block P-chain height is too low") ) @@ -124,42 +125,41 @@ func (p *postForkCommonComponents) Verify( // If the node is currently syncing - we don't assume that the P-chain has // been synced up to this point yet. if p.vm.consensusState == snow.NormalOp { - childID := child.ID() currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight(ctx) if err != nil { p.vm.ctx.Log.Error("block verification failed", zap.String("reason", "failed to get current P-Chain height"), - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Error(err), ) return err } if childPChainHeight > currentPChainHeight { - return errPChainHeightNotReached + return fmt.Errorf("%w: %d > %d", + errPChainHeightNotReached, + childPChainHeight, + currentPChainHeight, + ) } - childHeight := child.Height() - proposerID := child.Proposer() - minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID) + var shouldHaveProposer bool + if p.vm.IsDurangoActivated(parentTimestamp) { + shouldHaveProposer, err = p.verifyPostDurangoBlockDelay(ctx, parentTimestamp, parentPChainHeight, child) + } else { + shouldHaveProposer, err = p.verifyPreDurangoBlockDelay(ctx, parentTimestamp, parentPChainHeight, child) + } if err != nil { return err } - delay := childTimestamp.Sub(parentTimestamp) - if delay < minDelay { - return errProposerWindowNotStarted - } - // Verify the signature of the node - shouldHaveProposer := delay < proposer.MaxDelay if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil { return err } p.vm.ctx.Log.Debug("verified post-fork block", - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), zap.Time("blockTimestamp", childTimestamp), ) } @@ -190,35 +190,34 @@ func (p *postForkCommonComponents) buildChild( // is at least the parent's P-Chain height pChainHeight, err := p.vm.optimalPChainHeight(ctx, parentPChainHeight) if err != nil { + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate optimal P-chain height"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) return nil, err } - delay := newTimestamp.Sub(parentTimestamp) - if delay < proposer.MaxDelay { - parentHeight := p.innerBlk.Height() - proposerID := p.vm.ctx.NodeID - minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID) - if err != nil { - return nil, err - } - - if delay < minDelay { - // It's not our turn to propose a block yet. This is likely caused - // by having previously notified the consensus engine to attempt to - // build a block on top of a block that is no longer the preferred - // block. - p.vm.ctx.Log.Debug("build block dropped", - zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), - zap.Time("blockTimestamp", newTimestamp), - ) - - // In case the inner VM only issued one pendingTxs message, we - // should attempt to re-handle that once it is our turn to build the - // block. - p.vm.notifyInnerBlockReady() - return nil, errProposerWindowNotStarted - } + var shouldBuildSignedBlock bool + if p.vm.IsDurangoActivated(parentTimestamp) { + shouldBuildSignedBlock, err = p.shouldBuildSignedBlockPostDurango( + ctx, + parentID, + parentTimestamp, + parentPChainHeight, + newTimestamp, + ) + } else { + shouldBuildSignedBlock, err = p.shouldBuildSignedBlockPreDurango( + ctx, + parentID, + parentTimestamp, + parentPChainHeight, + newTimestamp, + ) + } + if err != nil { + return nil, err } var innerBlock snowman.Block @@ -235,25 +234,31 @@ func (p *postForkCommonComponents) buildChild( // Build the child var statelessChild block.SignedBlock - if delay >= proposer.MaxDelay { - statelessChild, err = block.BuildUnsigned( + if shouldBuildSignedBlock { + statelessChild, err = block.Build( parentID, newTimestamp, pChainHeight, + p.vm.StakingCertLeaf, innerBlock.Bytes(), + p.vm.ctx.ChainID, + p.vm.StakingLeafSigner, ) } else { - statelessChild, err = block.Build( + statelessChild, err = block.BuildUnsigned( parentID, newTimestamp, pChainHeight, - p.vm.stakingCertLeaf, innerBlock.Bytes(), - p.vm.ctx.ChainID, - p.vm.stakingLeafSigner, ) } if err != nil { + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to generate proposervm block header"), + zap.Stringer("parentID", parentID), + zap.Stringer("blkID", innerBlock.ID()), + zap.Error(err), + ) return nil, err } @@ -314,3 +319,186 @@ func verifyIsNotOracleBlock(ctx context.Context, b snowman.Block) error { return err } } + +func (p *postForkCommonComponents) verifyPreDurangoBlockDelay( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + blk *postForkBlock, +) (bool, error) { + var ( + blkTimestamp = blk.Timestamp() + childHeight = blk.Height() + proposerID = blk.Proposer() + ) + minDelay, err := p.vm.Windower.Delay( + ctx, + childHeight, + parentPChainHeight, + proposerID, + proposer.MaxVerifyWindows, + ) + if err != nil { + p.vm.ctx.Log.Error("unexpected block verification failure", + zap.String("reason", "failed to calculate required timestamp delay"), + zap.Stringer("blkID", blk.ID()), + zap.Error(err), + ) + return false, err + } + + delay := blkTimestamp.Sub(parentTimestamp) + if delay < minDelay { + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) + } + + return delay < proposer.MaxVerifyDelay, nil +} + +func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + blk *postForkBlock, +) (bool, error) { + var ( + blkTimestamp = blk.Timestamp() + blkHeight = blk.Height() + currentSlot = proposer.TimeToSlot(parentTimestamp, blkTimestamp) + proposerID = blk.Proposer() + ) + + expectedProposerID, err := p.vm.Windower.ExpectedProposer( + ctx, + blkHeight, + parentPChainHeight, + currentSlot, + ) + switch { + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return false, nil // block should be unsigned + case err != nil: + p.vm.ctx.Log.Error("unexpected block verification failure", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("blkID", blk.ID()), + zap.Error(err), + ) + return false, err + case expectedProposerID == proposerID: + return true, nil // block should be signed + default: + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) + } +} + +func (p *postForkCommonComponents) shouldBuildSignedBlockPostDurango( + ctx context.Context, + parentID ids.ID, + parentTimestamp time.Time, + parentPChainHeight uint64, + newTimestamp time.Time, +) (bool, error) { + parentHeight := p.innerBlk.Height() + currentSlot := proposer.TimeToSlot(parentTimestamp, newTimestamp) + expectedProposerID, err := p.vm.Windower.ExpectedProposer( + ctx, + parentHeight+1, + parentPChainHeight, + currentSlot, + ) + switch { + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return false, nil // build an unsigned block + case err != nil: + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + case expectedProposerID == p.vm.ctx.NodeID: + return true, nil // build a signed block + } + + // It's not our turn to propose a block yet. This is likely caused by having + // previously notified the consensus engine to attempt to build a block on + // top of a block that is no longer the preferred block. + p.vm.ctx.Log.Debug("build block dropped", + zap.Time("parentTimestamp", parentTimestamp), + zap.Time("blockTimestamp", newTimestamp), + zap.Uint64("slot", currentSlot), + zap.Stringer("expectedProposer", expectedProposerID), + ) + + // We need to reschedule the block builder to the next time we can try to + // build a block. + // + // TODO: After Durango activates, restructure this logic to separate + // updating the scheduler from verifying the proposerID. + nextStartTime, err := p.vm.getPostDurangoSlotTime( + ctx, + parentHeight+1, + parentPChainHeight, + currentSlot+1, // We know we aren't the proposer for the current slot + parentTimestamp, + ) + if err != nil { + p.vm.ctx.Log.Error("failed to reset block builder scheduler", + zap.String("reason", "failed to calculate expected proposer"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + } + p.vm.Scheduler.SetBuildBlockTime(nextStartTime) + + // In case the inner VM only issued one pendingTxs message, we should + // attempt to re-handle that once it is our turn to build the block. + p.vm.notifyInnerBlockReady() + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) +} + +func (p *postForkCommonComponents) shouldBuildSignedBlockPreDurango( + ctx context.Context, + parentID ids.ID, + parentTimestamp time.Time, + parentPChainHeight uint64, + newTimestamp time.Time, +) (bool, error) { + delay := newTimestamp.Sub(parentTimestamp) + if delay >= proposer.MaxBuildDelay { + return false, nil // time for any node to build an unsigned block + } + + parentHeight := p.innerBlk.Height() + proposerID := p.vm.ctx.NodeID + minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows) + if err != nil { + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate required timestamp delay"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + } + + if delay >= minDelay { + // it's time for this node to propose a block. It'll be signed or + // unsigned depending on the delay + return delay < proposer.MaxVerifyDelay, nil + } + + // It's not our turn to propose a block yet. This is likely caused by having + // previously notified the consensus engine to attempt to build a block on + // top of a block that is no longer the preferred block. + p.vm.ctx.Log.Debug("build block dropped", + zap.Time("parentTimestamp", parentTimestamp), + zap.Duration("minDelay", minDelay), + zap.Time("blockTimestamp", newTimestamp), + ) + + // In case the inner VM only issued one pendingTxs message, we should + // attempt to re-handle that once it is our turn to build the block. + p.vm.notifyInnerBlockReady() + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) +} diff --git a/avalanchego/vms/proposervm/block/block.go b/avalanchego/vms/proposervm/block/block.go index efdba801..0f5b3743 100644 --- a/avalanchego/vms/proposervm/block/block.go +++ b/avalanchego/vms/proposervm/block/block.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( - "crypto/x509" "errors" + "fmt" "time" "github.com/ava-labs/avalanchego/ids" @@ -19,6 +19,7 @@ var ( errUnexpectedProposer = errors.New("expected no proposer but one was provided") errMissingProposer = errors.New("expected proposer but none was provided") + errInvalidCertificate = errors.New("invalid certificate") ) type Block interface { @@ -27,7 +28,7 @@ type Block interface { Block() []byte Bytes() []byte - initialize(bytes []byte) error + initialize(bytes []byte, durangoTime time.Time) error } type SignedBlock interface { @@ -54,7 +55,7 @@ type statelessBlock struct { id ids.ID timestamp time.Time - cert *x509.Certificate + cert *staking.Certificate proposer ids.NodeID bytes []byte } @@ -75,7 +76,7 @@ func (b *statelessBlock) Bytes() []byte { return b.bytes } -func (b *statelessBlock) initialize(bytes []byte) error { +func (b *statelessBlock) initialize(bytes []byte, durangoTime time.Time) error { b.bytes = bytes // The serialized form of the block is the unsignedBytes followed by the @@ -90,17 +91,18 @@ func (b *statelessBlock) initialize(bytes []byte) error { return nil } - cert, err := x509.ParseCertificate(b.StatelessBlock.Certificate) - if err != nil { - return err + // TODO: Remove durangoTime after v1.11.x has activated. + var err error + if b.timestamp.Before(durangoTime) { + b.cert, err = staking.ParseCertificate(b.StatelessBlock.Certificate) + } else { + b.cert, err = staking.ParseCertificatePermissive(b.StatelessBlock.Certificate) } - - if err := staking.VerifyCertificate(cert); err != nil { - return err + if err != nil { + return fmt.Errorf("%w: %w", errInvalidCertificate, err) } - b.cert = cert - b.proposer = ids.NodeIDFromCert(cert) + b.proposer = ids.NodeIDFromCert(b.cert) return nil } @@ -132,5 +134,9 @@ func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { } headerBytes := header.Bytes() - return b.cert.CheckSignature(b.cert.SignatureAlgorithm, headerBytes, b.Signature) + return staking.CheckSignature( + b.cert, + headerBytes, + b.Signature, + ) } diff --git a/avalanchego/vms/proposervm/block/block_test.go b/avalanchego/vms/proposervm/block/block_test.go index 7b6b6de5..8a8a57ae 100644 --- a/avalanchego/vms/proposervm/block/block_test.go +++ b/avalanchego/vms/proposervm/block/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -41,10 +41,10 @@ func TestVerifyNoCertWithSignature(t *testing.T) { builtBlock.Signature = []byte{0} err = builtBlock.Verify(false, ids.Empty) - require.Error(err) + require.ErrorIs(err, errUnexpectedProposer) err = builtBlock.Verify(true, ids.Empty) - require.Error(err) + require.ErrorIs(err, errMissingProposer) } func TestBlockSizeLimit(t *testing.T) { diff --git a/avalanchego/vms/proposervm/block/build.go b/avalanchego/vms/proposervm/block/build.go index ccfa4da9..b13255c9 100644 --- a/avalanchego/vms/proposervm/block/build.go +++ b/avalanchego/vms/proposervm/block/build.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -6,10 +6,10 @@ package block import ( "crypto" "crypto/rand" - "crypto/x509" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -31,18 +31,21 @@ func BuildUnsigned( timestamp: timestamp, } - bytes, err := c.Marshal(codecVersion, &block) + bytes, err := Codec.Marshal(CodecVersion, &block) if err != nil { return nil, err } - return block, block.initialize(bytes) + + // Invariant: The durango timestamp isn't used here because the certificate + // is empty. + return block, block.initialize(bytes, time.Time{}) } func Build( parentID ids.ID, timestamp time.Time, pChainHeight uint64, - cert *x509.Certificate, + cert *staking.Certificate, blockBytes []byte, chainID ids.ID, key crypto.Signer, @@ -61,7 +64,7 @@ func Build( } var blockIntf SignedBlock = block - unsignedBytesWithEmptySignature, err := c.Marshal(codecVersion, &blockIntf) + unsignedBytesWithEmptySignature, err := Codec.Marshal(CodecVersion, &blockIntf) if err != nil { return nil, err } @@ -85,7 +88,7 @@ func Build( return nil, err } - block.bytes, err = c.Marshal(codecVersion, &blockIntf) + block.bytes, err = Codec.Marshal(CodecVersion, &blockIntf) return block, err } @@ -100,7 +103,7 @@ func BuildHeader( Body: bodyID, } - bytes, err := c.Marshal(codecVersion, &header) + bytes, err := Codec.Marshal(CodecVersion, &header) header.bytes = bytes return &header, err } @@ -117,9 +120,11 @@ func BuildOption( InnerBytes: innerBytes, } - bytes, err := c.Marshal(codecVersion, &block) + bytes, err := Codec.Marshal(CodecVersion, &block) if err != nil { return nil, err } - return block, block.initialize(bytes) + + // Invariant: The durango timestamp isn't used. + return block, block.initialize(bytes, time.Time{}) } diff --git a/avalanchego/vms/proposervm/block/build_test.go b/avalanchego/vms/proposervm/block/build_test.go index c2d89cff..8388e8a4 100644 --- a/avalanchego/vms/proposervm/block/build_test.go +++ b/avalanchego/vms/proposervm/block/build_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -26,7 +26,7 @@ func TestBuild(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - cert := tlsCert.Leaf + cert := staking.CertificateFromX509(tlsCert.Leaf) key := tlsCert.PrivateKey.(crypto.Signer) builtBlock, err := Build( @@ -45,11 +45,10 @@ func TestBuild(t *testing.T) { require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) - err = builtBlock.Verify(true, chainID) - require.NoError(err) + require.NoError(builtBlock.Verify(true, chainID)) err = builtBlock.Verify(false, chainID) - require.Error(err) + require.ErrorIs(err, errUnexpectedProposer) } func TestBuildUnsigned(t *testing.T) { @@ -69,11 +68,10 @@ func TestBuildUnsigned(t *testing.T) { require.Equal(innerBlockBytes, builtBlock.Block()) require.Equal(ids.EmptyNodeID, builtBlock.Proposer()) - err = builtBlock.Verify(false, ids.Empty) - require.NoError(err) + require.NoError(builtBlock.Verify(false, ids.Empty)) err = builtBlock.Verify(true, ids.Empty) - require.Error(err) + require.ErrorIs(err, errMissingProposer) } func TestBuildHeader(t *testing.T) { diff --git a/avalanchego/vms/proposervm/block/codec.go b/avalanchego/vms/proposervm/block/codec.go index bf8089db..ca231800 100644 --- a/avalanchego/vms/proposervm/block/codec.go +++ b/avalanchego/vms/proposervm/block/codec.go @@ -1,36 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) -const codecVersion = 0 +const CodecVersion = 0 -// The maximum block size is enforced by the p2p message size limit. -// See: [constants.DefaultMaxMessageSize] -// -// Invariant: This codec must never be used to unmarshal a slice unless it is a -// `[]byte`. Otherwise a malicious payload could cause an OOM. -var c codec.Manager +var Codec codec.Manager func init() { - linearCodec := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt) + lc := linearcodec.NewDefault(time.Time{}) + // The maximum block size is enforced by the p2p message size limit. + // See: [constants.DefaultMaxMessageSize] + Codec = codec.NewManager(math.MaxInt) - errs := wrappers.Errs{} - errs.Add( - linearCodec.RegisterType(&statelessBlock{}), - linearCodec.RegisterType(&option{}), - c.RegisterCodec(codecVersion, linearCodec), + err := utils.Err( + lc.RegisterType(&statelessBlock{}), + lc.RegisterType(&option{}), + Codec.RegisterCodec(CodecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + if err != nil { + panic(err) } } diff --git a/avalanchego/vms/proposervm/block/header.go b/avalanchego/vms/proposervm/block/header.go index 47ed36b9..83c4e813 100644 --- a/avalanchego/vms/proposervm/block/header.go +++ b/avalanchego/vms/proposervm/block/header.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" type Header interface { ChainID() ids.ID diff --git a/avalanchego/vms/proposervm/block/header_test.go b/avalanchego/vms/proposervm/block/header_test.go index bdbfaf3b..a4db5938 100644 --- a/avalanchego/vms/proposervm/block/header_test.go +++ b/avalanchego/vms/proposervm/block/header_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/vms/proposervm/block/option.go b/avalanchego/vms/proposervm/block/option.go index 180b90e3..7edb39bd 100644 --- a/avalanchego/vms/proposervm/block/option.go +++ b/avalanchego/vms/proposervm/block/option.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "time" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -32,7 +34,7 @@ func (b *option) Bytes() []byte { return b.bytes } -func (b *option) initialize(bytes []byte) error { +func (b *option) initialize(bytes []byte, _ time.Time) error { b.id = hashing.ComputeHash256Array(bytes) b.bytes = bytes return nil diff --git a/avalanchego/vms/proposervm/block/option_test.go b/avalanchego/vms/proposervm/block/option_test.go index f6d4f409..d5af9c10 100644 --- a/avalanchego/vms/proposervm/block/option_test.go +++ b/avalanchego/vms/proposervm/block/option_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/vms/proposervm/block/parse.go b/avalanchego/vms/proposervm/block/parse.go index 00ca2e80..bf9b44ad 100644 --- a/avalanchego/vms/proposervm/block/parse.go +++ b/avalanchego/vms/proposervm/block/parse.go @@ -1,32 +1,33 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( "fmt" + "time" ) -func Parse(bytes []byte) (Block, error) { +func Parse(bytes []byte, durangoTime time.Time) (Block, error) { var block Block - parsedVersion, err := c.Unmarshal(bytes, &block) + parsedVersion, err := Codec.Unmarshal(bytes, &block) if err != nil { return nil, err } - if parsedVersion != codecVersion { - return nil, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) + if parsedVersion != CodecVersion { + return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } - return block, block.initialize(bytes) + return block, block.initialize(bytes, durangoTime) } func ParseHeader(bytes []byte) (Header, error) { header := statelessHeader{} - parsedVersion, err := c.Unmarshal(bytes, &header) + parsedVersion, err := Codec.Unmarshal(bytes, &header) if err != nil { return nil, err } - if parsedVersion != codecVersion { - return nil, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) + if parsedVersion != CodecVersion { + return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } header.bytes = bytes return &header, nil diff --git a/avalanchego/vms/proposervm/block/parse_test.go b/avalanchego/vms/proposervm/block/parse_test.go index f7499601..148bac82 100644 --- a/avalanchego/vms/proposervm/block/parse_test.go +++ b/avalanchego/vms/proposervm/block/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/staking" ) @@ -27,7 +28,7 @@ func TestParse(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - cert := tlsCert.Leaf + cert := staking.CertificateFromX509(tlsCert.Leaf) key := tlsCert.PrivateKey.(crypto.Signer) builtBlock, err := Build( @@ -42,14 +43,19 @@ func TestParse(t *testing.T) { require.NoError(err) builtBlockBytes := builtBlock.Bytes() - - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, chainID, builtBlock, parsedBlock) + durangoTimes := []time.Time{ + timestamp.Add(time.Second), // Durango not activated yet + timestamp.Add(-time.Second), // Durango activated + } + for _, durangoTime := range durangoTimes { + parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) + require.NoError(err) + + parsedBlock, ok := parsedBlockIntf.(SignedBlock) + require.True(ok) + + equal(require, chainID, builtBlock, parsedBlock) + } } func TestParseDuplicateExtension(t *testing.T) { @@ -59,8 +65,16 @@ func TestParseDuplicateExtension(t *testing.T) { blockBytes, err := hex.DecodeString(blockHex) require.NoError(err) - _, err = Parse(blockBytes) - require.Error(err) // Do not check for errDuplicateExtension to support g1.19 + // Note: The above blockHex specifies 123 as the block's timestamp. + timestamp := time.Unix(123, 0) + durangoNotYetActivatedTime := timestamp.Add(time.Second) + durangoAlreadyActivatedTime := timestamp.Add(-time.Second) + + _, err = Parse(blockBytes, durangoNotYetActivatedTime) + require.ErrorIs(err, errInvalidCertificate) + + _, err = Parse(blockBytes, durangoAlreadyActivatedTime) + require.NoError(err) } func TestParseHeader(t *testing.T) { @@ -96,7 +110,7 @@ func TestParseOption(t *testing.T) { builtOptionBytes := builtOption.Bytes() - parsedOption, err := Parse(builtOptionBytes) + parsedOption, err := Parse(builtOptionBytes, time.Time{}) require.NoError(err) equalOption(require, builtOption, parsedOption) @@ -114,14 +128,19 @@ func TestParseUnsigned(t *testing.T) { require.NoError(err) builtBlockBytes := builtBlock.Bytes() - - parsedBlockIntf, err := Parse(builtBlockBytes) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, ids.Empty, builtBlock, parsedBlock) + durangoTimes := []time.Time{ + timestamp.Add(time.Second), // Durango not activated yet + timestamp.Add(-time.Second), // Durango activated + } + for _, durangoTime := range durangoTimes { + parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) + require.NoError(err) + + parsedBlock, ok := parsedBlockIntf.(SignedBlock) + require.True(ok) + + equal(require, ids.Empty, builtBlock, parsedBlock) + } } func TestParseGibberish(t *testing.T) { @@ -129,6 +148,6 @@ func TestParseGibberish(t *testing.T) { bytes := []byte{0, 1, 2, 3, 4, 5} - _, err := Parse(bytes) - require.Error(err) + _, err := Parse(bytes, time.Time{}) + require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/avalanchego/vms/proposervm/block_server.go b/avalanchego/vms/proposervm/block_server.go index e9e2e192..6a056c8b 100644 --- a/avalanchego/vms/proposervm/block_server.go +++ b/avalanchego/vms/proposervm/block_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/avalanchego/vms/proposervm/block_test.go b/avalanchego/vms/proposervm/block_test.go index 13da3c18..3743cf8f 100644 --- a/avalanchego/vms/proposervm/block_test.go +++ b/avalanchego/vms/proposervm/block_test.go @@ -1,29 +1,31 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "bytes" "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" - "crypto/x509" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" + "github.com/ava-labs/avalanchego/vms/proposervm/scheduler" ) // Assert that when the underlying VM implements ChainVMWithBuildBlockContext @@ -33,41 +35,54 @@ import ( func TestPostForkCommonComponents_buildChild(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - pChainHeight := uint64(1337) - parentID := ids.GenerateTestID() - parentTimestamp := time.Now() - blkID := ids.GenerateTestID() + var ( + nodeID = ids.GenerateTestNodeID() + pChainHeight uint64 = 1337 + parentID = ids.GenerateTestID() + parentTimestamp = time.Now().Truncate(time.Second) + parentHeight uint64 = 1234 + blkID = ids.GenerateTestID() + ) + innerBlk := snowman.NewMockBlock(ctrl) innerBlk.EXPECT().ID().Return(blkID).AnyTimes() - innerBlk.EXPECT().Height().Return(pChainHeight - 1).AnyTimes() + innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() + builtBlk := snowman.NewMockBlock(ctrl) builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() - innerVM := mocks.NewMockChainVM(ctrl) - innerBlockBuilderVM := mocks.NewMockBuildBlockWithContextChainVM(ctrl) + + innerVM := block.NewMockChainVM(ctrl) + innerBlockBuilderVM := block.NewMockBuildBlockWithContextChainVM(ctrl) innerBlockBuilderVM.EXPECT().BuildBlockWithContext(gomock.Any(), &block.Context{ PChainHeight: pChainHeight - 1, }).Return(builtBlk, nil).AnyTimes() + vdrState := validators.NewMockState(ctrl) vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + windower := proposer.NewMockWindower(ctrl) - windower.EXPECT().Delay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(time.Duration(0), nil).AnyTimes() + windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nodeID, nil).AnyTimes() pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(err) vm := &VM{ + Config: Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + StakingCertLeaf: &staking.Certificate{}, + StakingLeafSigner: pk, + }, ChainVM: innerVM, blockBuilderVM: innerBlockBuilderVM, ctx: &snow.Context{ + NodeID: nodeID, ValidatorState: vdrState, Log: logging.NoLog{}, }, - Windower: windower, - stakingCertLeaf: &x509.Certificate{}, - stakingLeafSigner: pk, + Windower: windower, } blk := &postForkCommonComponents{ @@ -85,3 +100,351 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { require.NoError(err) require.Equal(builtBlk, gotChild.(*postForkBlock).innerBlk) } + +func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(ctx)) + }() + + // Build a post fork block. It'll be the parent block in our test cases + parentTime := time.Now().Truncate(time.Second) + proVM.Set(parentTime) + + coreParentBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreParentBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch { + case blkID == coreParentBlk.ID(): + return coreParentBlk, nil + case blkID == coreGenBlk.ID(): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { // needed when setting preference + switch { + case bytes.Equal(b, coreParentBlk.Bytes()): + return coreParentBlk, nil + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + + parentBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.NoError(parentBlk.Verify(ctx)) + require.NoError(parentBlk.Accept(ctx)) + + // Make sure preference is duly set + require.NoError(proVM.SetPreference(ctx, parentBlk.ID())) + require.Equal(proVM.preferred, parentBlk.ID()) + _, err = proVM.getPostForkBlock(ctx, parentBlk.ID()) + require.NoError(err) + + // Force this node to be the only validator, so to guarantee + // it'd be picked if block build time was before MaxVerifyDelay + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + // a validator with a weight large enough to fully fill the proposers list + weight := uint64(proposer.MaxBuildWindows * 2) + + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: weight, + }, + }, nil + } + + coreChildBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{2}, + ParentV: coreParentBlk.ID(), + HeightV: coreParentBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreChildBlk, nil + } + + { + // Set local clock before MaxVerifyDelay from parent timestamp. + // Check that child block is signed. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(proVM.ctx.NodeID, childBlk.(*postForkBlock).Proposer()) // signed block + } + + { + // Set local clock exactly MaxVerifyDelay from parent timestamp. + // Check that child block is unsigned. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // signed block + } + + { + // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp + // Check that child block is unsigned + localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } + + { + // Set local clock after MaxBuildDelay from parent timestamp + // Check that child block is unsigned + localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } +} + +func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(ctx)) + }() + + // Build a post fork block. It'll be the parent block in our test cases + parentTime := time.Now().Truncate(time.Second) + proVM.Set(parentTime) + + coreParentBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreParentBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch { + case blkID == coreParentBlk.ID(): + return coreParentBlk, nil + case blkID == coreGenBlk.ID(): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { // needed when setting preference + switch { + case bytes.Equal(b, coreParentBlk.Bytes()): + return coreParentBlk, nil + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + default: + return nil, errUnknownBlock + } + } + + parentBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.NoError(parentBlk.Verify(ctx)) + require.NoError(parentBlk.Accept(ctx)) + + // Make sure preference is duly set + require.NoError(proVM.SetPreference(ctx, parentBlk.ID())) + require.Equal(proVM.preferred, parentBlk.ID()) + _, err = proVM.getPostForkBlock(ctx, parentBlk.ID()) + require.NoError(err) + + // Mark node as non validator + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + aValidator = ids.GenerateTestNodeID() + + // a validator with a weight large enough to fully fill the proposers list + weight = uint64(proposer.MaxBuildWindows * 2) + ) + return map[ids.NodeID]*validators.GetValidatorOutput{ + aValidator: { + NodeID: aValidator, + Weight: weight, + }, + }, nil + } + + coreChildBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{2}, + ParentV: coreParentBlk.ID(), + HeightV: coreParentBlk.Height() + 1, + } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreChildBlk, nil + } + + { + // Set local clock before MaxVerifyDelay from parent timestamp. + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock exactly MaxVerifyDelay from parent timestamp. + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp + // Check that child block is not built. + localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) + proVM.Set(localTime) + + _, err := proVM.BuildBlock(ctx) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // Set local clock after MaxBuildDelay from parent timestamp + // Check that child block is built and it is unsigned + localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) + proVM.Set(localTime) + + childBlk, err := proVM.BuildBlock(ctx) + require.NoError(err) + require.IsType(&postForkBlock{}, childBlk) + require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + } +} + +// We consider cases where this node is not current proposer (may be scheduled in the next future or not). +// We check that scheduler is called nonetheless, to be able to process innerVM block requests +func TestPostDurangoBuildChildResetScheduler(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + var ( + thisNodeID = ids.GenerateTestNodeID() + selectedProposer = ids.GenerateTestNodeID() + pChainHeight uint64 = 1337 + parentID = ids.GenerateTestID() + parentTimestamp = time.Now().Truncate(time.Second) + now = parentTimestamp.Add(12 * time.Second) + parentHeight uint64 = 1234 + ) + + innerBlk := snowman.NewMockBlock(ctrl) + innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() + + vdrState := validators.NewMockState(ctrl) + vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + + windower := proposer.NewMockWindower(ctrl) + windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(selectedProposer, nil).AnyTimes() // return a proposer different from thisNode, to check whether scheduler is reset + + scheduler := scheduler.NewMockScheduler(ctrl) + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(err) + vm := &VM{ + Config: Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + StakingCertLeaf: &staking.Certificate{}, + StakingLeafSigner: pk, + }, + ChainVM: block.NewMockChainVM(ctrl), + ctx: &snow.Context{ + NodeID: thisNodeID, + ValidatorState: vdrState, + Log: logging.NoLog{}, + }, + Windower: windower, + Scheduler: scheduler, + } + vm.Clock.Set(now) + + blk := &postForkCommonComponents{ + innerBlk: innerBlk, + vm: vm, + } + + delays := []time.Duration{ + proposer.MaxLookAheadWindow - time.Minute, + proposer.MaxLookAheadWindow, + proposer.MaxLookAheadWindow + time.Minute, + } + + for _, delay := range delays { + windower.EXPECT().MinDelayForProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(delay, nil).Times(1) + + // we mock the scheduler setting the exact time we expect it to be reset + // to + expectedSchedulerTime := parentTimestamp.Add(delay) + scheduler.EXPECT().SetBuildBlockTime(expectedSchedulerTime).Times(1) + + _, err = blk.buildChild( + context.Background(), + parentID, + parentTimestamp, + pChainHeight-1, + ) + require.ErrorIs(err, errUnexpectedProposer) + } +} diff --git a/avalanchego/vms/proposervm/config.go b/avalanchego/vms/proposervm/config.go new file mode 100644 index 00000000..a7eb4ff0 --- /dev/null +++ b/avalanchego/vms/proposervm/config.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "crypto" + "time" + + "github.com/ava-labs/avalanchego/staking" +) + +type Config struct { + // Time at which proposerVM activates its congestion control mechanism + ActivationTime time.Time + + // Durango fork activation time + DurangoTime time.Time + + // Minimal P-chain height referenced upon block building + MinimumPChainHeight uint64 + + // Configurable minimal delay among blocks issued consecutively + MinBlkDelay time.Duration + + // Maximal number of block indexed. + // Zero signals all blocks are indexed. + NumHistoricalBlocks uint64 + + // Block signer + StakingLeafSigner crypto.Signer + + // Block certificate + StakingCertLeaf *staking.Certificate +} + +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) +} diff --git a/avalanchego/vms/proposervm/height_indexed_vm.go b/avalanchego/vms/proposervm/height_indexed_vm.go index ff12456d..a29334f6 100644 --- a/avalanchego/vms/proposervm/height_indexed_vm.go +++ b/avalanchego/vms/proposervm/height_indexed_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -14,6 +14,8 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) +const pruneCommitPeriod = 1024 + // shouldHeightIndexBeRepaired checks if index needs repairing and stores a // checkpoint if repairing is needed. // @@ -53,10 +55,6 @@ func (vm *VM) shouldHeightIndexBeRepaired(ctx context.Context) (bool, error) { // vm.ctx.Lock should be held func (vm *VM) VerifyHeightIndex(context.Context) error { - if vm.hVM == nil { - return block.ErrHeightIndexedVMNotImplemented - } - if !vm.hIndexer.IsRepaired() { return block.ErrIndexIncomplete } @@ -74,13 +72,13 @@ func (vm *VM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, er switch forkHeight, err := vm.State.GetForkHeight(); err { case nil: if height < forkHeight { - return vm.hVM.GetBlockIDAtHeight(ctx, height) + return vm.ChainVM.GetBlockIDAtHeight(ctx, height) } return vm.State.GetBlockIDAtHeight(height) case database.ErrNotFound: // fork not reached yet. Block must be pre-fork - return vm.hVM.GetBlockIDAtHeight(ctx, height) + return vm.ChainVM.GetBlockIDAtHeight(ctx, height) default: return ids.Empty, err @@ -113,7 +111,8 @@ func (vm *VM) updateHeightIndex(height uint64, blkID ids.ID) error { } func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { - switch _, err := vm.State.GetForkHeight(); err { + forkHeight, err := vm.State.GetForkHeight() + switch err { case nil: // The fork was already reached. Just update the index. @@ -122,14 +121,107 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { if err := vm.State.SetForkHeight(height); err != nil { return fmt.Errorf("failed storing fork height: %w", err) } + forkHeight = height default: return fmt.Errorf("failed to load fork height: %w", err) } + if err := vm.State.SetBlockIDAtHeight(height, blkID); err != nil { + return err + } + vm.ctx.Log.Debug("indexed block", zap.Stringer("blkID", blkID), zap.Uint64("height", height), ) - return vm.State.SetBlockIDAtHeight(height, blkID) + + if vm.NumHistoricalBlocks == 0 { + return nil + } + + blocksSinceFork := height - forkHeight + // Note: The last accepted block is not considered a historical block. Which + // is why <= is used rather than <. This prevents the user from only storing + // the last accepted block, which can never be safe due to the non-atomic + // commits between the proposervm database and the innerVM's database. + if blocksSinceFork <= vm.NumHistoricalBlocks { + return nil + } + + // Note: heightToDelete is >= forkHeight, so it is guaranteed not to + // underflow. + heightToDelete := height - vm.NumHistoricalBlocks - 1 + blockToDelete, err := vm.State.GetBlockIDAtHeight(heightToDelete) + if err == database.ErrNotFound { + // Block may have already been deleted. This can happen due to a + // proposervm rollback, the node having recently state-synced, or the + // user reconfiguring the node to store more historical blocks than a + // prior run. + return nil + } + if err != nil { + return err + } + + if err := vm.State.DeleteBlockIDAtHeight(heightToDelete); err != nil { + return err + } + if err := vm.State.DeleteBlock(blockToDelete); err != nil { + return err + } + + vm.ctx.Log.Debug("deleted block", + zap.Stringer("blkID", blockToDelete), + zap.Uint64("height", heightToDelete), + ) + return nil +} + +// TODO: Support async deletion of old blocks. +func (vm *VM) pruneOldBlocks() error { + if vm.NumHistoricalBlocks == 0 { + return nil + } + + height, err := vm.State.GetMinimumHeight() + if err == database.ErrNotFound { + // Chain hasn't forked yet + return nil + } + + // TODO: Refactor to use DB iterators. + // + // Note: vm.lastAcceptedHeight is guaranteed to be >= height, so the + // subtraction can never underflow. + for vm.lastAcceptedHeight-height > vm.NumHistoricalBlocks { + blockToDelete, err := vm.State.GetBlockIDAtHeight(height) + if err != nil { + return err + } + + if err := vm.State.DeleteBlockIDAtHeight(height); err != nil { + return err + } + if err := vm.State.DeleteBlock(blockToDelete); err != nil { + return err + } + + vm.ctx.Log.Debug("deleted block", + zap.Stringer("blkID", blockToDelete), + zap.Uint64("height", height), + ) + + // Note: height is < vm.lastAcceptedHeight, so it is guaranteed not to + // overflow. + height++ + if height%pruneCommitPeriod != 0 { + continue + } + + if err := vm.db.Commit(); err != nil { + return err + } + } + return vm.db.Commit() } diff --git a/avalanchego/vms/proposervm/indexer/block_server.go b/avalanchego/vms/proposervm/indexer/block_server.go index e817b9ba..fcecaf9e 100644 --- a/avalanchego/vms/proposervm/indexer/block_server.go +++ b/avalanchego/vms/proposervm/indexer/block_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/avalanchego/vms/proposervm/indexer/block_server_test.go b/avalanchego/vms/proposervm/indexer/block_server_test.go index 5bf74258..a973d66a 100644 --- a/avalanchego/vms/proposervm/indexer/block_server_test.go +++ b/avalanchego/vms/proposervm/indexer/block_server_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) @@ -35,7 +37,7 @@ func (tsb *TestBlockServer) GetFullPostForkBlock(ctx context.Context, blkID ids. return tsb.GetFullPostForkBlockF(ctx, blkID) } if tsb.CantGetFullPostForkBlock && tsb.T != nil { - tsb.T.Fatal(errGetWrappingBlk) + require.FailNow(tsb.T, errGetWrappingBlk.Error()) } return nil, errGetWrappingBlk } @@ -45,7 +47,7 @@ func (tsb *TestBlockServer) Commit() error { return tsb.CommitF() } if tsb.CantCommit && tsb.T != nil { - tsb.T.Fatal(errCommit) + require.FailNow(tsb.T, errCommit.Error()) } return errCommit } diff --git a/avalanchego/vms/proposervm/indexer/height_indexer.go b/avalanchego/vms/proposervm/indexer/height_indexer.go index 833798c0..c0a1e415 100644 --- a/avalanchego/vms/proposervm/indexer/height_indexer.go +++ b/avalanchego/vms/proposervm/indexer/height_indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -20,9 +20,10 @@ import ( // default number of heights to index before committing const ( defaultCommitFrequency = 1024 - // Sleep [sleepDurationMultiplier]x (5x) the amount of time we spend processing the block - // to ensure the async indexing does not bottleneck the node. - sleepDurationMultiplier = 5 + // Sleep [sleepDurationMultiplier]x (10x) the amount of time we spend + // processing the block to ensure the async indexing does not bottleneck the + // node. + sleepDurationMultiplier = 10 ) var _ HeightIndexer = (*heightIndexer)(nil) diff --git a/avalanchego/vms/proposervm/indexer/height_indexer_test.go b/avalanchego/vms/proposervm/indexer/height_indexer_test.go index 3c1e671f..2a093530 100644 --- a/avalanchego/vms/proposervm/indexer/height_indexer_test.go +++ b/avalanchego/vms/proposervm/indexer/height_indexer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -94,7 +94,7 @@ func TestHeightBlockIndexPostFork(t *testing.T) { // check that height index is fully built loadedForkHeight, err := storedState.GetForkHeight() require.NoError(err) - require.True(loadedForkHeight == 1) + require.Equal(uint64(1), loadedForkHeight) for height := uint64(1); height <= blkNumber; height++ { _, err := storedState.GetBlockIDAtHeight(height) require.NoError(err) @@ -174,10 +174,10 @@ func TestHeightBlockIndexAcrossFork(t *testing.T) { // check that height index is fully built loadedForkHeight, err := storedState.GetForkHeight() require.NoError(err) - require.True(loadedForkHeight == forkHeight) + require.Equal(forkHeight, loadedForkHeight) for height := uint64(0); height < forkHeight; height++ { _, err := storedState.GetBlockIDAtHeight(height) - require.Error(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) } for height := forkHeight; height <= blkNumber; height++ { _, err := storedState.GetBlockIDAtHeight(height) @@ -270,7 +270,7 @@ func TestHeightBlockIndexResumeFromCheckPoint(t *testing.T) { // check that height index is fully built loadedForkHeight, err := storedState.GetForkHeight() require.NoError(err) - require.True(loadedForkHeight == forkHeight) + require.Equal(forkHeight, loadedForkHeight) for height := forkHeight; height <= checkpointBlk.Height(); height++ { _, err := storedState.GetBlockIDAtHeight(height) require.NoError(err) diff --git a/avalanchego/vms/proposervm/main_test.go b/avalanchego/vms/proposervm/main_test.go new file mode 100644 index 00000000..72165ddb --- /dev/null +++ b/avalanchego/vms/proposervm/main_test.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "testing" + + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} diff --git a/avalanchego/vms/proposervm/mock_post_fork_block.go b/avalanchego/vms/proposervm/mock_post_fork_block.go index 6dfa398c..ab449b63 100644 --- a/avalanchego/vms/proposervm/mock_post_fork_block.go +++ b/avalanchego/vms/proposervm/mock_post_fork_block.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm (interfaces: PostForkBlock) +// +// Generated by this command: +// +// mockgen -package=proposervm -destination=vms/proposervm/mock_post_fork_block.go github.com/ava-labs/avalanchego/vms/proposervm PostForkBlock +// // Package proposervm is a generated GoMock package. package proposervm @@ -16,7 +18,7 @@ import ( choices "github.com/ava-labs/avalanchego/snow/choices" snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" block "github.com/ava-labs/avalanchego/vms/proposervm/block" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockPostForkBlock is a mock of PostForkBlock interface. @@ -51,7 +53,7 @@ func (m *MockPostForkBlock) Accept(arg0 context.Context) error { } // Accept indicates an expected call of Accept. -func (mr *MockPostForkBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Accept(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockPostForkBlock)(nil).Accept), arg0) } @@ -121,7 +123,7 @@ func (m *MockPostForkBlock) Reject(arg0 context.Context) error { } // Reject indicates an expected call of Reject. -func (mr *MockPostForkBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Reject(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockPostForkBlock)(nil).Reject), arg0) } @@ -163,7 +165,7 @@ func (m *MockPostForkBlock) Verify(arg0 context.Context) error { } // Verify indicates an expected call of Verify. -func (mr *MockPostForkBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) Verify(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockPostForkBlock)(nil).Verify), arg0) } @@ -177,7 +179,7 @@ func (m *MockPostForkBlock) acceptInnerBlk(arg0 context.Context) error { } // acceptInnerBlk indicates an expected call of acceptInnerBlk. -func (mr *MockPostForkBlockMockRecorder) acceptInnerBlk(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) acceptInnerBlk(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "acceptInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).acceptInnerBlk), arg0) } @@ -206,7 +208,7 @@ func (m *MockPostForkBlock) buildChild(arg0 context.Context) (Block, error) { } // buildChild indicates an expected call of buildChild. -func (mr *MockPostForkBlockMockRecorder) buildChild(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) buildChild(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "buildChild", reflect.TypeOf((*MockPostForkBlock)(nil).buildChild), arg0) } @@ -249,7 +251,7 @@ func (m *MockPostForkBlock) pChainHeight(arg0 context.Context) (uint64, error) { } // pChainHeight indicates an expected call of pChainHeight. -func (mr *MockPostForkBlockMockRecorder) pChainHeight(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) pChainHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pChainHeight", reflect.TypeOf((*MockPostForkBlock)(nil).pChainHeight), arg0) } @@ -261,7 +263,7 @@ func (m *MockPostForkBlock) setInnerBlk(arg0 snowman.Block) { } // setInnerBlk indicates an expected call of setInnerBlk. -func (mr *MockPostForkBlockMockRecorder) setInnerBlk(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) setInnerBlk(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).setInnerBlk), arg0) } @@ -273,7 +275,7 @@ func (m *MockPostForkBlock) setStatus(arg0 choices.Status) { } // setStatus indicates an expected call of setStatus. -func (mr *MockPostForkBlockMockRecorder) setStatus(arg0 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) setStatus(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setStatus", reflect.TypeOf((*MockPostForkBlock)(nil).setStatus), arg0) } @@ -287,7 +289,7 @@ func (m *MockPostForkBlock) verifyPostForkChild(arg0 context.Context, arg1 *post } // verifyPostForkChild indicates an expected call of verifyPostForkChild. -func (mr *MockPostForkBlockMockRecorder) verifyPostForkChild(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPostForkChild(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkChild), arg0, arg1) } @@ -301,7 +303,7 @@ func (m *MockPostForkBlock) verifyPostForkOption(arg0 context.Context, arg1 *pos } // verifyPostForkOption indicates an expected call of verifyPostForkOption. -func (mr *MockPostForkBlockMockRecorder) verifyPostForkOption(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPostForkOption(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkOption", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkOption), arg0, arg1) } @@ -315,7 +317,7 @@ func (m *MockPostForkBlock) verifyPreForkChild(arg0 context.Context, arg1 *preFo } // verifyPreForkChild indicates an expected call of verifyPreForkChild. -func (mr *MockPostForkBlockMockRecorder) verifyPreForkChild(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockPostForkBlockMockRecorder) verifyPreForkChild(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPreForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPreForkChild), arg0, arg1) } diff --git a/avalanchego/vms/proposervm/post_fork_block.go b/avalanchego/vms/proposervm/post_fork_block.go index 69e59aae..707b6dc3 100644 --- a/avalanchego/vms/proposervm/post_fork_block.go +++ b/avalanchego/vms/proposervm/post_fork_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -34,16 +34,8 @@ func (b *postForkBlock) acceptOuterBlk() error { // Update in-memory references b.status = choices.Accepted b.vm.lastAcceptedTime = b.Timestamp() - b.vm.lastAcceptedHeight = b.Height() - blkID := b.ID() - delete(b.vm.verifiedBlocks, blkID) - - // Persist this block, its height index, and its status - if err := b.vm.State.SetLastAccepted(blkID); err != nil { - return err - } - return b.vm.storePostForkBlock(b) + return b.vm.acceptPostForkBlock(b) } func (b *postForkBlock) acceptInnerBlk(ctx context.Context) error { diff --git a/avalanchego/vms/proposervm/post_fork_block_test.go b/avalanchego/vms/proposervm/post_fork_block_test.go index f4912172..a16d4a7d 100644 --- a/avalanchego/vms/proposervm/post_fork_block_test.go +++ b/avalanchego/vms/proposervm/post_fork_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -10,10 +10,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) @@ -22,6 +26,8 @@ var errDuplicateVerify = errors.New("duplicate verify") // ProposerBlock Option interface tests section func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { + require := require.New(t) + // setup proBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ @@ -31,12 +37,18 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") - } + require.Equal(snowman.ErrNotOracle, err) // setup - _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + innerOracleBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -64,14 +76,12 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { ids.Empty, // refer unknown parent time.Time{}, 0, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerOracleBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk = postForkBlock{ SignedBlock: slb, postForkCommonComponents: postForkCommonComponents{ @@ -83,38 +93,46 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err = proBlk.Options(context.Background()) - if err != nil { - t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") - } + require.NoError(err) } // ProposerBlock.Verify tests section -func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks +func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime // pre Durango + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -123,46 +141,26 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proposer block: %s", err) - } + parentBlk, err := proVM.BuildBlock(context.Background()) + require.NoError(err) - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // .. create child block ... childCoreBlk := &snowman.TestBlock{ - ParentV: prntCoreBlk.ID(), - BytesV: []byte{2}, - TimestampV: prntCoreBlk.Timestamp(), - } - childSlb, err := block.Build( - ids.Empty, // refer unknown parent - childCoreBlk.Timestamp(), - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") + ParentV: parentCoreBlk.ID(), + BytesV: []byte{2}, + HeightV: parentCoreBlk.Height() + 1, } - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -170,59 +168,74 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { }, } - // child block referring unknown parent does not verify - err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Block with unknown parent should not verify") - } + // set proVM to be able to build unsigned blocks + proVM.Set(proVM.Time().Add(proposer.MaxVerifyDelay)) - // child block referring known parent does verify - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), // refer known parent - prntProBlk.Timestamp().Add(proposer.MaxDelay), - pChainHeight, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err != nil { - t.Fatal("could not sign parent block") + { + // child block referring unknown parent does not verify + childSlb, err := block.BuildUnsigned( + ids.Empty, // refer unknown parent + proVM.Time(), + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("Block with known parent should verify: %s", err) + { + // child block referring known parent does verify + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), // refer known parent + proVM.Time(), + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) } } -func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks +func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime // post Durango + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } - // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -231,53 +244,25 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } - - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + parentBlk, err := proVM.BuildBlock(context.Background()) + require.NoError(err) - prntTimestamp := prntProBlk.Timestamp() + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - ParentV: prntCoreBlk.ID(), + ParentV: parentCoreBlk.ID(), BytesV: []byte{2}, + HeightV: parentCoreBlk.Height() + 1, } - - // child block timestamp cannot be lower than parent timestamp - childCoreBlk.TimestampV = prntTimestamp.Add(-1 * time.Second) - proVM.Clock.Set(childCoreBlk.TimestampV) - childSlb, err := block.Build( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -285,142 +270,306 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { }, } - err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Proposer block timestamp too old should not verify") + require.NoError(waitForProposerWindow(proVM, parentBlk, parentBlk.(*postForkBlock).PChainHeight())) + + { + // child block referring unknown parent does not verify + childSlb, err := block.Build( + ids.Empty, // refer unknown parent + proVM.Time(), + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) + } + + { + // child block referring known parent does verify + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + + require.NoError(err) + childBlk.SignedBlock = childSlb + + proVM.Set(childSlb.Timestamp()) + require.NoError(childBlk.Verify(context.Background())) } +} - // block cannot arrive before its creator window starts - blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), pChainHeight, proVM.ctx.NodeID) - if err != nil { - t.Fatal("Could not calculate submission window") - } - beforeWinStart := prntTimestamp.Add(blkWinDelay).Add(-1 * time.Second) - proVM.Clock.Set(beforeWinStart) - childSlb, err = block.Build( - prntProBlk.ID(), - beforeWinStart, - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, +func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime ) - if err != nil { - t.Fatal("could not build stateless block") + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + // reduce validator state to allow proVM.ctx.NodeID to be easily selected as proposer + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + ) + return map[ids.NodeID]*validators.GetValidatorOutput{ + thisNode: { + NodeID: thisNode, + Weight: 5, + }, + nodeID1: { + NodeID: nodeID1, + Weight: 100, + }, + }, nil } - childProBlk.SignedBlock = childSlb + proVM.ctx.ValidatorState = valState - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("Proposer block timestamp before submission window should not verify") + pChainHeight := uint64(100) + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil } - // block can arrive at its creator window starts - atWindowStart := prntTimestamp.Add(blkWinDelay) - proVM.Clock.Set(atWindowStart) - childSlb, err = block.Build( - prntProBlk.ID(), - atWindowStart, - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") + // create parent block ... + parentCoreBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(1111), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } - childProBlk.SignedBlock = childSlb - - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("Proposer block timestamp at submission window start should verify") + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return parentCoreBlk, nil } - - // block can arrive after its creator window starts - afterWindowStart := prntTimestamp.Add(blkWinDelay).Add(5 * time.Second) - proVM.Clock.Set(afterWindowStart) - childSlb, err = block.Build( - prntProBlk.ID(), - afterWindowStart, - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case coreGenBlk.ID(): + return coreGenBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil + default: + return nil, database.ErrNotFound + } } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Proposer block timestamp after submission window start should verify") + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { + switch { + case bytes.Equal(b, coreGenBlk.Bytes()): + return coreGenBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil + default: + return nil, errUnknownBlock + } } - // block can arrive within submission window - atSubWindowEnd := proVM.Time().Add(proposer.MaxDelay) - proVM.Clock.Set(atSubWindowEnd) - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - atSubWindowEnd, - pChainHeight, - childCoreBlk.Bytes(), + parentBlk, err := proVM.BuildBlock(context.Background()) + require.NoError(err) + + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + + var ( + parentTimestamp = parentBlk.Timestamp() + parentPChainHeight = parentBlk.(*postForkBlock).PChainHeight() ) - if err != nil { - t.Fatal("could not build stateless block") + + childCoreBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(2222), + StatusV: choices.Processing, + }, + ParentV: parentCoreBlk.ID(), + HeightV: parentCoreBlk.Height() + 1, + BytesV: []byte{2}, } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Proposer block timestamp within submission window should verify") + childBlk := postForkBlock{ + postForkCommonComponents: postForkCommonComponents{ + vm: proVM, + innerBlk: childCoreBlk, + status: choices.Processing, + }, } - // block timestamp cannot be too much in the future - afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) - childSlb, err = block.Build( - prntProBlk.ID(), - afterSubWinEnd, - pChainHeight, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("Proposer block timestamp after submission window should not verify") - } else if err == nil { - t.Fatal("Proposer block timestamp after submission window should have different error") + { + // child block timestamp cannot be lower than parent timestamp + newTime := parentTimestamp.Add(-1 * time.Second) + proVM.Clock.Set(newTime) + + childSlb, err := block.Build( + parentBlk.ID(), + newTime, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeNotMonotonic) + } + + blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), parentPChainHeight, proVM.ctx.NodeID, proposer.MaxVerifyWindows) + require.NoError(err) + + { + // block cannot arrive before its creator window starts + beforeWinStart := parentTimestamp.Add(blkWinDelay).Add(-1 * time.Second) + proVM.Clock.Set(beforeWinStart) + + childSlb, err := block.Build( + parentBlk.ID(), + beforeWinStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) + } + + { + // block can arrive at its creator window starts + atWindowStart := parentTimestamp.Add(blkWinDelay) + proVM.Clock.Set(atWindowStart) + + childSlb, err := block.Build( + parentBlk.ID(), + atWindowStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block can arrive after its creator window starts + afterWindowStart := parentTimestamp.Add(blkWinDelay).Add(5 * time.Second) + proVM.Clock.Set(afterWindowStart) + + childSlb, err := block.Build( + parentBlk.ID(), + afterWindowStart, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block can arrive within submission window + atSubWindowEnd := proVM.Time().Add(proposer.MaxVerifyDelay) + proVM.Clock.Set(atSubWindowEnd) + + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + atSubWindowEnd, + pChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block timestamp cannot be too much in the future + afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) + + childSlb, err := block.Build( + parentBlk.ID(), + afterSubWinEnd, + pChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) } } func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return pChainHeight / 50, nil + } // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -429,52 +578,34 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, errUnknownBlock } } - prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + parentBlk, err := proVM.BuildBlock(context.Background()) + require.NoError(err) - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) - prntBlkPChainHeight := pChainHeight + // set VM to be ready to build next block. We set it to generate unsigned blocks + // for simplicity. + parentBlkPChainHeight := parentBlk.(*postForkBlock).PChainHeight() + require.NoError(waitForProposerWindow(proVM, parentBlk, parentBlkPChainHeight)) childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - ParentV: prntCoreBlk.ID(), - BytesV: []byte{2}, - TimestampV: prntProBlk.Timestamp().Add(proposer.MaxDelay), - } - - // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.Build( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight-1, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") + ParentV: parentCoreBlk.ID(), + HeightV: parentBlk.Height() + 1, + BytesV: []byte{2}, } - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -482,84 +613,114 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { }, } - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") - } else if err == nil { - t.Fatal("Proposer block has wrong height should have different error") - } - - // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - - proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) - } - - // child P-Chain height may follow parent P-Chain height - pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight+1, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") + { + // child P-Chain height must not precede parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight-1, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotMonotonic) + } + + { + // child P-Chain height can be equal to parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // child P-Chain height may follow parent P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + parentBlkPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) } - // block P-Chain height can be equal to current P-Chain height currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") - } - - // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsigned( - prntProBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight*2, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { - t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") + { + // block P-Chain height can be equal to current P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + currPChainHeight, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block P-Chain height cannot be at higher than current P-Chain height + childSlb, err := block.Build( + parentBlk.ID(), + proVM.Time(), + currPChainHeight*2, + proVM.StakingCertLeaf, + childCoreBlk.Bytes(), + proVM.ctx.ChainID, + proVM.StakingLeafSigner, + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) } } func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil } - // proVM.SetStartTime(timer.MaxTime) // switch off scheduler for current test + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return pChainHeight / 50, nil + } // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -568,9 +729,9 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -579,18 +740,18 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -627,62 +788,38 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } oracleBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := oracleBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), oracleBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(oracleBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), oracleBlk.ID())) // retrieve one option and verify block built on it - postForkOracleBlk, ok := oracleBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, oracleBlk) + postForkOracleBlk := oracleBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) parentBlk := opts[0] - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + + // set VM to be ready to build next block. We set it to generate unsigned blocks + // for simplicity. + nextTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) + proVM.Set(nextTime) - prntBlkPChainHeight := pChainHeight + parentBlkPChainHeight := postForkOracleBlk.PChainHeight() // option takes proposal blocks' Pchain height childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{2}, - TimestampV: parentBlk.Timestamp().Add(proposer.MaxDelay), - } - - // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.Build( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight-1, - proVM.stakingCertLeaf, - childCoreBlk.Bytes(), - proVM.ctx.ChainID, - proVM.stakingLeafSigner, - ) - if err != nil { - t.Fatal("could not build stateless block") + ParentV: oracleCoreBlk.opts[0].ID(), + BytesV: []byte{2}, + HeightV: oracleCoreBlk.opts[0].Height() + 1, } - childProBlk := postForkBlock{ - SignedBlock: childSlb, + childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, innerBlk: childCoreBlk, @@ -690,79 +827,93 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) }, } - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") - } + { + // child P-Chain height must not precede parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight-1, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb - // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotMonotonic) } - childProBlk.SignedBlock = childSlb - proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) - } + { + // child P-Chain height can be equal to parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb - // child P-Chain height may follow parent P-Chain height - pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - prntBlkPChainHeight+1, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") + require.NoError(childBlk.Verify(context.Background())) } - // block P-Chain height can be equal to current P-Chain height - currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") - } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") - } + { + // child P-Chain height may follow parent P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + parentBlkPChainHeight+1, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb - // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsigned( - parentBlk.ID(), - childCoreBlk.Timestamp(), - currPChainHeight*2, - childCoreBlk.Bytes(), - ) - if err != nil { - t.Fatal("could not build stateless block") + require.NoError(childBlk.Verify(context.Background())) } - childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { - t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") + + currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + { + // block P-Chain height can be equal to current P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + currPChainHeight, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + + require.NoError(childBlk.Verify(context.Background())) + } + + { + // block P-Chain height cannot be at higher than current P-Chain height + childSlb, err := block.BuildUnsigned( + parentBlk.ID(), + nextTime, + currPChainHeight*2, + childCoreBlk.Bytes(), + ) + require.NoError(err) + childBlk.SignedBlock = childSlb + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) } } func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { + require := require.New(t) + // Verify a block once (in this test by building it). // Show that other verify call would not call coreBlk.Verify() - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(2000) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil @@ -773,9 +924,9 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -802,31 +953,34 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } + require.NoError(err) - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(builtBlk.Verify(context.Background())) // set error on coreBlock.Verify and recall Verify() coreBlk.VerifyV = errDuplicateVerify - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(builtBlk.Verify(context.Background())) // rebuild a block with the same core block pChainHeight++ - if _, err := proVM.BuildBlock(context.Background()); err != nil { - t.Fatal("could not build block with same core block") - } + _, err = proVM.BuildBlock(context.Background()) + require.NoError(err) } // ProposerBlock.Accept tests section func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { + require := require.New(t) + // setup - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + pChainHeight := uint64(2000) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return pChainHeight, nil @@ -837,9 +991,9 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -866,14 +1020,10 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) // test - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(builtBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { @@ -881,15 +1031,23 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != builtBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(builtBlk.ID(), acceptedID) } func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + var minimumHeight uint64 valState.GetMinimumHeightF = func(context.Context) (uint64, error) { return minimumHeight, nil @@ -901,10 +1059,9 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -913,76 +1070,70 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t minimumHeight = coreGenBlk.Height() proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build proBlk1") - } + require.NoError(err) minimumHeight++ proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } + require.NoError(err) + require.NotEqual(proBlk2.ID(), proBlk1.ID()) // set proBlk1 as preferred - if err := proBlk1.Accept(context.Background()); err != nil { - t.Fatal("could not accept proBlk1") - } - if coreBlk.Status() != choices.Accepted { - t.Fatal("coreBlk should have been accepted") - } + require.NoError(proBlk1.Accept(context.Background())) + require.Equal(choices.Accepted, coreBlk.Status()) - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != proBlk1.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(proBlk1.ID(), acceptedID) } // ProposerBlock.Reject tests section func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } sb, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } - proBlk, ok := sb.(*postForkBlock) - if !ok { - t.Fatal("built block has not expected type") - } + require.NoError(err) + require.IsType(&postForkBlock{}, sb) + proBlk := sb.(*postForkBlock) - if err := proBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } - - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.NoError(proBlk.Reject(context.Background())) - if proBlk.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.Equal(choices.Rejected, proBlk.Status()) + require.NotEqual(choices.Rejected, proBlk.innerBlk.Status()) } func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -991,9 +1142,9 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } coreOpt0 := &snowman.TestBlock{ @@ -1001,18 +1152,18 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } coreOpt1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } oracleCoreBlk.opts = [2]snowman.Block{ coreOpt0, @@ -1052,51 +1203,33 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... - postForkOracleBlk, ok := parentBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, parentBlk) + postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } - if _, ok := opts[0].(*postForkOption); !ok { - t.Fatal("unexpected option type") - } + require.NoError(err) + require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // Build the child statelessChild, err := block.Build( postForkOracleBlk.ID(), postForkOracleBlk.Timestamp().Add(proposer.WindowDuration), postForkOracleBlk.PChainHeight(), - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, oracleCoreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -1105,23 +1238,29 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") - } + require.ErrorIs(err, errUnexpectedBlockType) } func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 5) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 5) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -1151,9 +1290,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { 4, coreBlk.Bytes(), ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -1162,7 +1299,5 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") - } + require.ErrorIs(err, errPChainHeightTooLow) } diff --git a/avalanchego/vms/proposervm/post_fork_option.go b/avalanchego/vms/proposervm/post_fork_option.go index e5745e04..93cfd255 100644 --- a/avalanchego/vms/proposervm/post_fork_option.go +++ b/avalanchego/vms/proposervm/post_fork_option.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -7,6 +7,8 @@ import ( "context" "time" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/vms/proposervm/block" @@ -39,16 +41,8 @@ func (b *postForkOption) Accept(ctx context.Context) error { func (b *postForkOption) acceptOuterBlk() error { // Update in-memory references b.status = choices.Accepted - b.vm.lastAcceptedHeight = b.Height() - - blkID := b.ID() - delete(b.vm.verifiedBlocks, blkID) - // Persist this block, its height index, and its status - if err := b.vm.State.SetLastAccepted(blkID); err != nil { - return err - } - return b.vm.storePostForkBlock(b) + return b.vm.acceptPostForkBlock(b) } func (b *postForkOption) acceptInnerBlk(ctx context.Context) error { @@ -113,13 +107,19 @@ func (*postForkOption) verifyPostForkOption(context.Context, *postForkOption) er } func (b *postForkOption) buildChild(ctx context.Context) (Block, error) { + parentID := b.ID() parentPChainHeight, err := b.pChainHeight(ctx) if err != nil { + b.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to fetch parent's P-chain height"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) return nil, err } return b.postForkCommonComponents.buildChild( ctx, - b.ID(), + parentID, b.Timestamp(), parentPChainHeight, ) diff --git a/avalanchego/vms/proposervm/post_fork_option_test.go b/avalanchego/vms/proposervm/post_fork_option_test.go index 3f6c0ac6..dd16f8cd 100644 --- a/avalanchego/vms/proposervm/post_fork_option_test.go +++ b/avalanchego/vms/proposervm/post_fork_option_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -6,19 +6,18 @@ package proposervm import ( "bytes" "context" - "crypto" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) var _ snowman.OracleBlock = (*TestOptionsBlock)(nil) @@ -35,8 +34,16 @@ func (tob TestOptionsBlock) Options(context.Context) ([2]snowman.Block, error) { // ProposerBlock.Verify tests section func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -45,9 +52,9 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -56,18 +63,18 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -104,74 +111,58 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... - postForkOracleBlk, ok := parentBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, parentBlk) + postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } - if _, ok := opts[0].(*postForkOption); !ok { - t.Fatal("unexpected option type") - } + require.NoError(err) + require.IsType(&postForkOption{}, opts[0]) // ... and verify them - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // show we can build on options - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(4444), StatusV: choices.Processing, }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{4}, - TimestampV: oracleCoreBlk.opts[0].Timestamp().Add(proposer.MaxDelay), + ParentV: oracleCoreBlk.opts[0].ID(), + BytesV: []byte{4}, + HeightV: oracleCoreBlk.opts[0].Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return childCoreBlk, nil } - proVM.Set(childCoreBlk.Timestamp()) + require.NoError(waitForProposerWindow(proVM, opts[0], postForkOracleBlk.PChainHeight())) proChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build on top of option") - } - if _, ok := proChild.(*postForkBlock); !ok { - t.Fatal("unexpected block type") - } - if err := proChild.Verify(context.Background()); err != nil { - t.Fatal("block built on option does not verify") - } + require.NoError(err) + require.IsType(&postForkBlock{}, proChild) + require.NoError(proChild.Verify(context.Background())) } // ProposerBlock.Accept tests section func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { + require := require.New(t) + // Verify an option once; then show that another verify call would not call coreBlk.Verify() - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -180,9 +171,9 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } coreOpt0 := &snowman.TestBlock{ @@ -190,18 +181,18 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } coreOpt1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, } oracleCoreBlk.opts = [2]snowman.Block{ coreOpt0, @@ -241,55 +232,42 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... - postForkOracleBlk, ok := parentBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, parentBlk) + postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } - if _, ok := opts[0].(*postForkOption); !ok { - t.Fatal("unexpected option type") - } + require.NoError(err) + require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // set error on coreBlock.Verify and recall Verify() coreOpt0.VerifyV = errDuplicateVerify coreOpt1.VerifyV = errDuplicateVerify // ... and verify them again. They verify without call to innerBlk - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) } func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { - // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -298,9 +276,9 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -309,18 +287,18 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -357,14 +335,10 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) // accept oracle block - if err := parentBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(parentBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.Status() == choices.Accepted { @@ -372,25 +346,17 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != parentBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(parentBlk.ID(), acceptedID) // accept one of the options - postForkOracleBlk, ok := parentBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, parentBlk) + postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := opts[0].Accept(context.Background()); err != nil { - t.Fatal("could not accept option") - } + require.NoError(opts[0].Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.opts[0].Status() == choices.Accepted { @@ -398,18 +364,23 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } return oracleCoreBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != opts[0].ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err = proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(opts[0].ID(), acceptedID) } // ProposerBlock.Reject tests section func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { - // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create post fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -418,9 +389,9 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, } oracleCoreBlk.opts = [2]snowman.Block{ @@ -429,18 +400,18 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: oracleCoreBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: oracleCoreBlk.ID(), + HeightV: oracleCoreBlk.Height() + 1, }, } @@ -477,58 +448,44 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) // reject oracle block - if err := builtBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } - proBlk, ok := builtBlk.(*postForkBlock) - if !ok { - t.Fatal("built block has not expected type") - } + require.NoError(builtBlk.Reject(context.Background())) + require.IsType(&postForkBlock{}, builtBlk) + proBlk := builtBlk.(*postForkBlock) - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.Equal(choices.Rejected, proBlk.Status()) - if proBlk.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.NotEqual(choices.Rejected, proBlk.innerBlk.Status()) // reject an option - postForkOracleBlk, ok := builtBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, builtBlk) + postForkOracleBlk := builtBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := opts[0].Reject(context.Background()); err != nil { - t.Fatal("could not accept option") - } - proOpt, ok := opts[0].(*postForkOption) - if !ok { - t.Fatal("built block has not expected type") - } + require.NoError(opts[0].Reject(context.Background())) + require.IsType(&postForkOption{}, opts[0]) + proOpt := opts[0].(*postForkOption) - if proOpt.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.Equal(choices.Rejected, proOpt.Status()) - if proOpt.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.NotEqual(choices.Rejected, proOpt.innerBlk.Status()) } func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { + require := require.New(t) + // Verify an option once; then show that another verify call would not call coreBlk.Verify() - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -536,9 +493,9 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, optsErr: snowman.ErrNotOracle, } @@ -548,10 +505,9 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk.ID(), - HeightV: coreBlk.Height() + 1, - TimestampV: coreBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk.ID(), + HeightV: coreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -583,27 +539,19 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - postForkBlk, ok := parentBlk.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, parentBlk) + postForkBlk := parentBlk.(*postForkBlock) _, err = postForkBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("should have reported that the block isn't an oracle block") - } + require.Equal(snowman.ErrNotOracle, err) // Build the child statelessChild, err := block.BuildOption( postForkBlk.ID(), coreChildBlk.Bytes(), ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -612,13 +560,17 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that should have been signed") - } + require.ErrorIs(err, database.ErrNotFound) } func TestOptionTimestampValidity(t *testing.T) { - coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, activationTime, durangoTime, 0) coreOracleBlkID := ids.GenerateTestID() coreOracleBlk := &TestOptionsBlock{ @@ -627,10 +579,9 @@ func TestOptionTimestampValidity(t *testing.T) { IDV: coreOracleBlkID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -638,30 +589,30 @@ func TestOptionTimestampValidity(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreOracleBlkID, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{2}, + ParentV: coreOracleBlkID, + HeightV: coreGenBlk.Height() + 2, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreOracleBlkID, - TimestampV: coreGenBlk.Timestamp().Add(time.Second), + BytesV: []byte{3}, + ParentV: coreOracleBlkID, + HeightV: coreGenBlk.Height() + 2, }, }, } + + oracleBlkTime := proVM.Time().Truncate(time.Second) statelessBlock, err := block.BuildUnsigned( coreGenBlk.ID(), - coreGenBlk.Timestamp(), + oracleBlkTime, 0, coreOracleBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -693,67 +644,54 @@ func TestOptionTimestampValidity(t *testing.T) { } statefulBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock.Verify(context.Background())) statefulOracleBlock, ok := statefulBlock.(snowman.OracleBlock) - if !ok { - t.Fatal("should have reported as an oracle block") - } + require.True(ok) options, err := statefulOracleBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) option := options[0] - if err := option.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(option.Verify(context.Background())) - if err := statefulBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock.Accept(context.Background())) coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { - t.Fatal("called GetBlock when unable to handle the error") + require.FailNow("called GetBlock when unable to handle the error") return nil, nil } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { - t.Fatal("called ParseBlock when unable to handle the error") + require.FailNow("called ParseBlock when unable to handle the error") return nil, nil } - expectedTime := coreGenBlk.Timestamp() - if optionTime := option.Timestamp(); !optionTime.Equal(expectedTime) { - t.Fatalf("wrong time returned expected %s got %s", expectedTime, optionTime) - } + require.Equal(oracleBlkTime, option.Timestamp()) - if err := option.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(option.Accept(context.Background())) + require.NoError(proVM.Shutdown(context.Background())) // Restart the node. - ctx := proVM.ctx proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, @@ -796,7 +734,7 @@ func TestOptionTimestampValidity(t *testing.T) { } } - err = proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, db, @@ -806,30 +744,24 @@ func TestOptionTimestampValidity(t *testing.T) { nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() statefulOptionBlock, err := proVM.ParseBlock(context.Background(), option.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := statefulOptionBlock.Status(); status != choices.Accepted { - t.Fatalf("wrong status returned expected %s got %s", choices.Accepted, status) - } + require.Equal(choices.Accepted, statefulOptionBlock.Status()) coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { - t.Fatal("called GetBlock when unable to handle the error") + require.FailNow("called GetBlock when unable to handle the error") return nil, nil } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { - t.Fatal("called ParseBlock when unable to handle the error") + require.FailNow("called ParseBlock when unable to handle the error") return nil, nil } - if optionTime := statefulOptionBlock.Timestamp(); !optionTime.Equal(expectedTime) { - t.Fatalf("wrong time returned expected %s got %s", expectedTime, optionTime) - } + require.Equal(oracleBlkTime, statefulOptionBlock.Timestamp()) } diff --git a/avalanchego/vms/proposervm/pre_fork_block.go b/avalanchego/vms/proposervm/pre_fork_block.go index 7fcb8d70..199c1c98 100644 --- a/avalanchego/vms/proposervm/pre_fork_block.go +++ b/avalanchego/vms/proposervm/pre_fork_block.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "context" + "fmt" "time" "go.uber.org/zap" @@ -37,6 +38,28 @@ func (b *preForkBlock) acceptInnerBlk(ctx context.Context) error { return b.Block.Accept(ctx) } +func (b *preForkBlock) Status() choices.Status { + forkHeight, err := b.vm.getForkHeight() + if err == database.ErrNotFound { + return b.Block.Status() + } + if err != nil { + // TODO: Once `Status()` can return an error, we should return the error + // here. + b.vm.ctx.Log.Error("unexpected error looking up fork height", + zap.Error(err), + ) + return b.Block.Status() + } + + // The fork has occurred earlier than this block, so preForkBlocks are all + // invalid. + if b.Height() >= forkHeight { + return choices.Rejected + } + return b.Block.Status() +} + func (b *preForkBlock) Verify(ctx context.Context) error { parent, err := b.vm.getPreForkBlock(ctx, b.Block.Parent()) if err != nil { @@ -74,15 +97,11 @@ func (b *preForkBlock) getInnerBlk() snowman.Block { func (b *preForkBlock) verifyPreForkChild(ctx context.Context, child *preForkBlock) error { parentTimestamp := b.Timestamp() - if !parentTimestamp.Before(b.vm.activationTime) { + if !parentTimestamp.Before(b.vm.ActivationTime) { if err := verifyIsOracleBlock(ctx, b.Block); err != nil { return err } - if err := b.verifyIsPreForkBlock(); err != nil { - return err - } - b.vm.ctx.Log.Debug("allowing pre-fork block after the fork time", zap.String("reason", "parent is an oracle block"), zap.Stringer("blkID", b.ID()), @@ -98,10 +117,6 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB return err } - if err := b.verifyIsPreForkBlock(); err != nil { - return err - } - childID := child.ID() childPChainHeight := child.PChainHeight() currentPChainHeight, err := b.vm.ctx.ValidatorState.GetCurrentHeight(ctx) @@ -114,9 +129,13 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB return err } if childPChainHeight > currentPChainHeight { - return errPChainHeightNotReached + return fmt.Errorf("%w: %d > %d", + errPChainHeightNotReached, + childPChainHeight, + currentPChainHeight, + ) } - if childPChainHeight < b.vm.minimumPChainHeight { + if childPChainHeight < b.vm.MinimumPChainHeight { return errPChainHeightTooLow } @@ -131,7 +150,7 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB // if the *preForkBlock is the last *preForkBlock before activation takes effect // (its timestamp is at or after the activation time) parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { return errProposersNotActivated } @@ -162,7 +181,7 @@ func (*preForkBlock) verifyPostForkOption(context.Context, *postForkOption) erro func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { // The chain hasn't forked yet innerBlock, err := b.vm.ChainVM.BuildBlock(ctx) if err != nil { @@ -191,8 +210,13 @@ func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { // The child's P-Chain height is proposed as the optimal P-Chain height that // is at least the minimum height - pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.minimumPChainHeight) + pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.MinimumPChainHeight) if err != nil { + b.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate optimal P-chain height"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) return nil, err } @@ -232,26 +256,3 @@ func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { func (*preForkBlock) pChainHeight(context.Context) (uint64, error) { return 0, nil } - -func (b *preForkBlock) verifyIsPreForkBlock() error { - if status := b.Status(); status == choices.Accepted { - _, err := b.vm.GetLastAccepted() - if err == nil { - // If this block is accepted and it was a preForkBlock, then there - // shouldn't have been an accepted postForkBlock yet. If there was - // an accepted postForkBlock, then this block wasn't a preForkBlock. - return errUnexpectedBlockType - } - if err != database.ErrNotFound { - // If an unexpected error was returned - propagate that that - // error. - return err - } - } else if _, contains := b.vm.Tree.Get(b.Block); contains { - // If this block is a preForkBlock, then it's inner block shouldn't have - // been registered into the inner block tree. If this block was - // registered into the inner block tree, then it wasn't a preForkBlock. - return errUnexpectedBlockType - } - return nil -} diff --git a/avalanchego/vms/proposervm/pre_fork_block_test.go b/avalanchego/vms/proposervm/pre_fork_block_test.go index 08df36b0..3261f5f9 100644 --- a/avalanchego/vms/proposervm/pre_fork_block_test.go +++ b/avalanchego/vms/proposervm/pre_fork_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -9,24 +9,25 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" + + statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { + require := require.New(t) + // setup proBlk := preForkBlock{ Block: &snowman.TestBlock{}, @@ -34,9 +35,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") - } + require.Equal(snowman.ErrNotOracle, err) // setup proBlk = preForkBlock{ @@ -45,13 +44,20 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err = proBlk.Options(context.Background()) - if err != nil { - t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") - } + require.NoError(err) } func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create pre fork oracle block ... oracleCoreBlk := &TestOptionsBlock{ @@ -102,27 +108,17 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork oracle block") - } + require.NoError(err) // retrieve options ... - preForkOracleBlk, ok := parentBlk.(*preForkBlock) - if !ok { - t.Fatal("expected pre fork block") - } + require.IsType(&preForkBlock{}, parentBlk) + preForkOracleBlk := parentBlk.(*preForkBlock) opts, err := preForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from pre fork oracle block") - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option should verify") - } + require.NoError(err) + require.NoError(opts[0].Verify(context.Background())) // ... show a block can be built on top of an option - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -139,17 +135,21 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } preForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork block on pre fork option block") - } - if _, ok := preForkChild.(*preForkBlock); !ok { - t.Fatal("expected pre fork block built on pre fork option block") - } + require.NoError(err) + require.IsType(&preForkBlock{}, preForkChild) } func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + require := require.New(t) + + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create pre fork oracle block pre activation time... oracleCoreBlk := &TestOptionsBlock{ @@ -205,27 +205,17 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork oracle block") - } + require.NoError(err) // retrieve options ... - preForkOracleBlk, ok := parentBlk.(*preForkBlock) - if !ok { - t.Fatal("expected pre fork block") - } + require.IsType(&preForkBlock{}, parentBlk) + preForkOracleBlk := parentBlk.(*preForkBlock) opts, err := preForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from pre fork oracle block") - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option should verify") - } + require.NoError(err) + require.NoError(opts[0].Verify(context.Background())) // ... show a block can be built on top of an option - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -242,24 +232,26 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } postForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork block on pre fork option block") - } - if _, ok := postForkChild.(*postForkBlock); !ok { - t.Fatal("expected pre fork block built on pre fork option block") - } + require.NoError(err) + require.IsType(&postForkBlock{}, postForkChild) } func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + require := require.New(t) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + require.True(coreGenBlk.Timestamp().Before(activationTime)) // create parent block ... - prntCoreBlk := &snowman.TestBlock{ + parentCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, @@ -269,14 +261,14 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - return prntCoreBlk, nil + return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil - case prntCoreBlk.ID(): - return prntCoreBlk, nil + case parentCoreBlk.ID(): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } @@ -285,18 +277,15 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil - case bytes.Equal(b, prntCoreBlk.Bytes()): - return prntCoreBlk, nil + case bytes.Equal(b, parentCoreBlk.Bytes()): + return parentCoreBlk, nil default: return nil, database.ErrNotFound } } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + parentBlk, err := proVM.BuildBlock(context.Background()) + require.NoError(err) // .. create child block ... childCoreBlk := &snowman.TestBlock{ @@ -305,33 +294,40 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { StatusV: choices.Processing, }, BytesV: []byte{2}, - TimestampV: prntCoreBlk.Timestamp().Add(proposer.MaxDelay), + TimestampV: parentCoreBlk.Timestamp(), } - childProBlk := preForkBlock{ + childBlk := preForkBlock{ Block: childCoreBlk, vm: proVM, } - // child block referring unknown parent does not verify - childCoreBlk.ParentV = ids.Empty - err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Block with unknown parent should not verify") + { + // child block referring unknown parent does not verify + childCoreBlk.ParentV = ids.Empty + err = childBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) } - // child block referring known parent does verify - childCoreBlk.ParentV = prntProBlk.ID() - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Block with known parent should verify") + { + // child block referring known parent does verify + childCoreBlk.ParentV = parentBlk.ID() + require.NoError(childBlk.Verify(context.Background())) } } func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require := require.New(t) + + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + require.True(coreGenBlk.Timestamp().Before(activationTime)) preActivationTime := activationTime.Add(-1 * time.Second) proVM.Set(preActivationTime) @@ -351,29 +347,22 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { // preFork block verifies if parent is before fork activation time preForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } else if _, ok := preForkChild.(*preForkBlock); !ok { - t.Fatal("expected preForkBlock") - } + require.NoError(err) + require.IsType(&preForkBlock{}, preForkChild) - if err := preForkChild.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(preForkChild.Verify(context.Background())) // postFork block does NOT verify if parent is before fork activation time - postForkStatelessChild, err := block.Build( + postForkStatelessChild, err := statelessblock.Build( coreGenBlk.ID(), coreBlk.Timestamp(), 0, // pChainHeight - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) postForkChild := &postForkBlock{ SignedBlock: postForkStatelessChild, postForkCommonComponents: postForkCommonComponents{ @@ -383,23 +372,18 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { }, } - if !postForkChild.Timestamp().Before(activationTime) { - t.Fatal("This test requires postForkChild to be before fork activation time") - } - if err := postForkChild.Verify(context.Background()); err == nil { - t.Fatal("post Fork blocks should NOT verify before fork") - } + require.True(postForkChild.Timestamp().Before(activationTime)) + err = postForkChild.Verify(context.Background()) + require.ErrorIs(err, errProposersNotActivated) // once activation time is crossed postForkBlock are produced postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) - coreVM.SetPreferenceF = func(_ context.Context, id ids.ID) error { + coreVM.SetPreferenceF = func(context.Context, ids.ID) error { return nil } - if err := proVM.SetPreference(context.Background(), preForkChild.ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), preForkChild.ID())) secondCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -420,25 +404,18 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { case coreBlk.ID(): return coreBlk, nil default: - t.Fatal("attempt to get unknown block") + require.FailNow("attempt to get unknown block") return nil, nil } } lastPreForkBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } else if _, ok := lastPreForkBlk.(*preForkBlock); !ok { - t.Fatal("expected preForkBlock") - } + require.NoError(err) + require.IsType(&preForkBlock{}, lastPreForkBlk) - if err := lastPreForkBlk.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(lastPreForkBlk.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), lastPreForkBlk.ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), lastPreForkBlk.ID())) thirdCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(333), @@ -460,27 +437,30 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { case secondCoreBlk.ID(): return secondCoreBlk, nil default: - t.Fatal("attempt to get unknown block") + require.FailNow("attempt to get unknown block") return nil, nil } } firstPostForkBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } else if _, ok := firstPostForkBlk.(*postForkBlock); !ok { - t.Fatal("expected preForkBlock") - } + require.NoError(err) + require.IsType(&postForkBlock{}, firstPostForkBlk) - if err := firstPostForkBlk.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(firstPostForkBlk.Verify(context.Background())) } func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { - activationTime := genesisTimestamp.Add(-1 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) + require := require.New(t) + + var ( + activationTime = genesisTimestamp.Add(-1 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(activationTime) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // build parent block after fork activation time ... coreBlock := &snowman.TestBlock{ @@ -499,29 +479,32 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { // postFork block verifies if parent is after fork activation time postForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } else if _, ok := postForkChild.(*postForkBlock); !ok { - t.Fatal("expected postForkBlock") - } + require.NoError(err) + require.IsType(&postForkBlock{}, postForkChild) - if err := postForkChild.Verify(context.Background()); err != nil { - t.Fatal("post Fork blocks should verify after fork") - } + require.NoError(postForkChild.Verify(context.Background())) // preFork block does NOT verify if parent is after fork activation time preForkChild := preForkBlock{ Block: coreBlock, vm: proVM, } - if err := preForkChild.Verify(context.Background()); err == nil { - t.Fatal("pre Fork blocks should NOT verify after fork") - } + err = preForkChild.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) } func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { + require := require.New(t) + // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -556,14 +539,10 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) // test - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(builtBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { @@ -571,16 +550,24 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != builtBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(builtBlk.ID(), acceptedID) } // ProposerBlock.Reject tests section func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(111), @@ -595,33 +582,28 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { } sb, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } - proBlk, ok := sb.(*preForkBlock) - if !ok { - t.Fatal("built block has not expected type") - } - - if err := proBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } - - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.NoError(err) + require.IsType(&preForkBlock{}, sb) + proBlk := sb.(*preForkBlock) - if proBlk.Block.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.NoError(proBlk.Reject(context.Background())) + require.Equal(choices.Rejected, proBlk.Status()) + require.Equal(choices.Rejected, proBlk.Block.Status()) } func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require := require.New(t) + + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) @@ -688,39 +670,34 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { } firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := firstBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(firstBlock.Verify(context.Background())) oracleBlock, ok := firstBlock.(snowman.OracleBlock) - if !ok { - t.Fatal("should have returned an oracle block") - } + require.True(ok) options, err := oracleBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := options[0].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(options[0].Verify(context.Background())) - if err := options[1].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(options[1].Verify(context.Background())) } func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { - activationTime := genesisTimestamp.Add(10 * time.Second) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require := require.New(t) + + var ( + activationTime = genesisTimestamp.Add(10 * time.Second) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) @@ -787,26 +764,20 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := firstBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(firstBlock.Verify(context.Background())) - slb, err := block.Build( + slb, err := statelessblock.Build( firstBlock.ID(), // refer unknown parent firstBlock.Timestamp(), 0, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), slb.Bytes()) if err != nil { @@ -815,9 +786,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be a pre fork block") - } + require.ErrorIs(err, errUnexpectedBlockType) } // Assert that when the underlying VM implements ChainVMWithBuildBlockContext @@ -826,7 +795,6 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() pChainHeight := uint64(1337) blkID := ids.GenerateTestID() @@ -837,7 +805,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) innerVM.EXPECT().BuildBlock(gomock.Any()).Return(builtBlk, nil).AnyTimes() vdrState := validators.NewMockState(ctrl) vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() @@ -862,7 +830,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { // Should call BuildBlock since proposervm is not activated innerBlk.EXPECT().Timestamp().Return(time.Time{}) - vm.activationTime = mockable.MaxTime + vm.ActivationTime = mockable.MaxTime gotChild, err = blk.buildChild(context.Background()) require.NoError(err) diff --git a/avalanchego/vms/proposervm/proposer/mock_windower.go b/avalanchego/vms/proposervm/proposer/mock_windower.go index 0d6c9b45..5384da8c 100644 --- a/avalanchego/vms/proposervm/proposer/mock_windower.go +++ b/avalanchego/vms/proposervm/proposer/mock_windower.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm/proposer (interfaces: Windower) +// +// Generated by this command: +// +// mockgen -package=proposer -destination=vms/proposervm/proposer/mock_windower.go github.com/ava-labs/avalanchego/vms/proposervm/proposer Windower +// // Package proposer is a generated GoMock package. package proposer @@ -13,7 +15,7 @@ import ( time "time" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockWindower is a mock of Windower interface. @@ -40,31 +42,61 @@ func (m *MockWindower) EXPECT() *MockWindowerMockRecorder { } // Delay mocks base method. -func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID) (time.Duration, error) { +func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID, arg4 int) (time.Duration, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delay", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Delay", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(error) return ret0, ret1 } // Delay indicates an expected call of Delay. -func (mr *MockWindowerMockRecorder) Delay(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockWindowerMockRecorder) Delay(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delay", reflect.TypeOf((*MockWindower)(nil).Delay), arg0, arg1, arg2, arg3, arg4) +} + +// ExpectedProposer mocks base method. +func (m *MockWindower) ExpectedProposer(arg0 context.Context, arg1, arg2, arg3 uint64) (ids.NodeID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExpectedProposer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(ids.NodeID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExpectedProposer indicates an expected call of ExpectedProposer. +func (mr *MockWindowerMockRecorder) ExpectedProposer(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExpectedProposer", reflect.TypeOf((*MockWindower)(nil).ExpectedProposer), arg0, arg1, arg2, arg3) +} + +// MinDelayForProposer mocks base method. +func (m *MockWindower) MinDelayForProposer(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID, arg4 uint64) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MinDelayForProposer", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MinDelayForProposer indicates an expected call of MinDelayForProposer. +func (mr *MockWindowerMockRecorder) MinDelayForProposer(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delay", reflect.TypeOf((*MockWindower)(nil).Delay), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinDelayForProposer", reflect.TypeOf((*MockWindower)(nil).MinDelayForProposer), arg0, arg1, arg2, arg3, arg4) } // Proposers mocks base method. -func (m *MockWindower) Proposers(arg0 context.Context, arg1, arg2 uint64) ([]ids.NodeID, error) { +func (m *MockWindower) Proposers(arg0 context.Context, arg1, arg2 uint64, arg3 int) ([]ids.NodeID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Proposers", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "Proposers", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]ids.NodeID) ret1, _ := ret[1].(error) return ret0, ret1 } // Proposers indicates an expected call of Proposers. -func (mr *MockWindowerMockRecorder) Proposers(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockWindowerMockRecorder) Proposers(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Proposers", reflect.TypeOf((*MockWindower)(nil).Proposers), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Proposers", reflect.TypeOf((*MockWindower)(nil).Proposers), arg0, arg1, arg2, arg3) } diff --git a/avalanchego/vms/proposervm/proposer/validators.go b/avalanchego/vms/proposervm/proposer/validators.go index ba60a088..6af99618 100644 --- a/avalanchego/vms/proposervm/proposer/validators.go +++ b/avalanchego/vms/proposervm/proposer/validators.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer @@ -15,6 +15,6 @@ type validatorData struct { weight uint64 } -func (d validatorData) Less(other validatorData) bool { - return d.id.Less(other.id) +func (d validatorData) Compare(other validatorData) int { + return d.id.Compare(other.id) } diff --git a/avalanchego/vms/proposervm/proposer/validators_test.go b/avalanchego/vms/proposervm/proposer/validators_test.go index a0703d49..e86f2c80 100644 --- a/avalanchego/vms/proposervm/proposer/validators_test.go +++ b/avalanchego/vms/proposervm/proposer/validators_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -11,16 +12,31 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestValidatorDataLess(t *testing.T) { - require := require.New(t) - - var v1, v2 validatorData - require.False(v1.Less(v2)) - require.False(v2.Less(v1)) +func TestValidatorDataCompare(t *testing.T) { + tests := []struct { + a validatorData + b validatorData + expected int + }{ + { + a: validatorData{}, + b: validatorData{}, + expected: 0, + }, + { + a: validatorData{ + id: ids.BuildTestNodeID([]byte{1}), + }, + b: validatorData{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.id, test.b.id, test.expected), func(t *testing.T) { + require := require.New(t) - v1 = validatorData{ - id: ids.NodeID{1}, + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(v1.Less(v2)) - require.True(v2.Less(v1)) } diff --git a/avalanchego/vms/proposervm/proposer/windower.go b/avalanchego/vms/proposervm/proposer/windower.go index 4f67b279..b9a633c7 100644 --- a/avalanchego/vms/proposervm/proposer/windower.go +++ b/avalanchego/vms/proposervm/proposer/windower.go @@ -1,12 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( "context" + "errors" + "fmt" + "math/bits" "time" + "gonum.org/v1/gonum/mathext/prng" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" @@ -17,31 +22,76 @@ import ( // Proposer list constants const ( - MaxWindows = 6 WindowDuration = 5 * time.Second - MaxDelay = MaxWindows * WindowDuration + + MaxVerifyWindows = 6 + MaxVerifyDelay = MaxVerifyWindows * WindowDuration // 30 seconds + + MaxBuildWindows = 60 + MaxBuildDelay = MaxBuildWindows * WindowDuration // 5 minutes + + MaxLookAheadSlots = 720 + MaxLookAheadWindow = MaxLookAheadSlots * WindowDuration // 1 hour ) -var _ Windower = (*windower)(nil) +var ( + _ Windower = (*windower)(nil) + + ErrAnyoneCanPropose = errors.New("anyone can propose") +) type Windower interface { - // Proposers returns the proposer list for building a block at [chainHeight] + // Proposers returns the proposer list for building a block at [blockHeight] // when the validator set is defined at [pChainHeight]. The list is returned // in order. The minimum delay of a validator is the index they appear times // [WindowDuration]. Proposers( ctx context.Context, - chainHeight, + blockHeight, pChainHeight uint64, + maxWindows int, ) ([]ids.NodeID, error) + // Delay returns the amount of time that [validatorID] must wait before - // building a block at [chainHeight] when the validator set is defined at + // building a block at [blockHeight] when the validator set is defined at // [pChainHeight]. Delay( ctx context.Context, - chainHeight, + blockHeight, pChainHeight uint64, validatorID ids.NodeID, + maxWindows int, + ) (time.Duration, error) + + // In the Post-Durango windowing scheme, every validator active at + // [pChainHeight] gets specific slots it can propose in (instead of being + // able to propose from a given time on as it happens Pre-Durango). + // [ExpectedProposer] calculates which nodeID is scheduled to propose a + // block of height [blockHeight] at [slot]. + // If no validators are currently available, [ErrAnyoneCanPropose] is + // returned. + ExpectedProposer( + ctx context.Context, + blockHeight, + pChainHeight, + slot uint64, + ) (ids.NodeID, error) + + // In the Post-Durango windowing scheme, every validator active at + // [pChainHeight] gets specific slots it can propose in (instead of being + // able to propose from a given time on as it happens Pre-Durango). + // [MinDelayForProposer] specifies how long [nodeID] needs to wait for its + // slot to start. Delay is specified as starting from slot zero start. + // (which is parent timestamp). For efficiency reasons, we cap the slot + // search to [MaxLookAheadSlots]. + // If no validators are currently available, [ErrAnyoneCanPropose] is + // returned. + MinDelayForProposer( + ctx context.Context, + blockHeight, + pChainHeight uint64, + nodeID ids.NodeID, + startSlot uint64, ) (time.Duration, error) } @@ -51,7 +101,6 @@ type windower struct { state validators.State subnetID ids.ID chainSource uint64 - sampler sampler.WeightedWithoutReplacement } func New(state validators.State, subnetID, chainID ids.ID) Windower { @@ -60,56 +109,30 @@ func New(state validators.State, subnetID, chainID ids.ID) Windower { state: state, subnetID: subnetID, chainSource: w.UnpackLong(), - sampler: sampler.NewDeterministicWeightedWithoutReplacement(), } } -func (w *windower) Proposers(ctx context.Context, chainHeight, pChainHeight uint64) ([]ids.NodeID, error) { - // get the validator set by the p-chain height - validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) +func (w *windower) Proposers(ctx context.Context, blockHeight, pChainHeight uint64, maxWindows int) ([]ids.NodeID, error) { + // Note: The 32-bit prng is used here for legacy reasons. All other usages + // of a prng in this file should use the 64-bit version. + source := prng.NewMT19937() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) if err != nil { return nil, err } - // convert the map of validators to a slice - validators := make([]validatorData, 0, len(validatorsMap)) - weight := uint64(0) - for k, v := range validatorsMap { - validators = append(validators, validatorData{ - id: k, - weight: v.Weight, - }) - newWeight, err := math.Add64(weight, v.Weight) + var totalWeight uint64 + for _, validator := range validators { + totalWeight, err = math.Add64(totalWeight, validator.weight) if err != nil { return nil, err } - weight = newWeight - } - - // canonically sort validators - // Note: validators are sorted by ID, sorting by weight would not create a - // canonically sorted list - utils.Sort(validators) - - // convert the slice of validators to a slice of weights - validatorWeights := make([]uint64, len(validators)) - for i, v := range validators { - validatorWeights[i] = v.weight - } - - if err := w.sampler.Initialize(validatorWeights); err != nil { - return nil, err - } - - numToSample := MaxWindows - if weight < uint64(numToSample) { - numToSample = int(weight) } - seed := chainHeight ^ w.chainSource - w.sampler.Seed(int64(seed)) + source.Seed(w.chainSource ^ blockHeight) - indices, err := w.sampler.Sample(numToSample) + numToSample := int(min(uint64(maxWindows), totalWeight)) + indices, err := sampler.Sample(numToSample) if err != nil { return nil, err } @@ -121,12 +144,12 @@ func (w *windower) Proposers(ctx context.Context, chainHeight, pChainHeight uint return nodeIDs, nil } -func (w *windower) Delay(ctx context.Context, chainHeight, pChainHeight uint64, validatorID ids.NodeID) (time.Duration, error) { +func (w *windower) Delay(ctx context.Context, blockHeight, pChainHeight uint64, validatorID ids.NodeID, maxWindows int) (time.Duration, error) { if validatorID == ids.EmptyNodeID { - return MaxDelay, nil + return time.Duration(maxWindows) * WindowDuration, nil } - proposers, err := w.Proposers(ctx, chainHeight, pChainHeight) + proposers, err := w.Proposers(ctx, blockHeight, pChainHeight, maxWindows) if err != nil { return 0, err } @@ -140,3 +163,124 @@ func (w *windower) Delay(ctx context.Context, chainHeight, pChainHeight uint64, } return delay, nil } + +func (w *windower) ExpectedProposer( + ctx context.Context, + blockHeight, + pChainHeight, + slot uint64, +) (ids.NodeID, error) { + source := prng.NewMT19937_64() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) + if err != nil { + return ids.EmptyNodeID, err + } + if len(validators) == 0 { + return ids.EmptyNodeID, ErrAnyoneCanPropose + } + + return w.expectedProposer( + validators, + source, + sampler, + blockHeight, + slot, + ) +} + +func (w *windower) MinDelayForProposer( + ctx context.Context, + blockHeight, + pChainHeight uint64, + nodeID ids.NodeID, + startSlot uint64, +) (time.Duration, error) { + source := prng.NewMT19937_64() + sampler, validators, err := w.makeSampler(ctx, pChainHeight, source) + if err != nil { + return 0, err + } + if len(validators) == 0 { + return 0, ErrAnyoneCanPropose + } + + maxSlot := startSlot + MaxLookAheadSlots + for slot := startSlot; slot < maxSlot; slot++ { + expectedNodeID, err := w.expectedProposer( + validators, + source, + sampler, + blockHeight, + slot, + ) + if err != nil { + return 0, err + } + + if expectedNodeID == nodeID { + return time.Duration(slot) * WindowDuration, nil + } + } + + // no slots scheduled for the max window we inspect. Return max delay + return time.Duration(maxSlot) * WindowDuration, nil +} + +func (w *windower) makeSampler( + ctx context.Context, + pChainHeight uint64, + source sampler.Source, +) (sampler.WeightedWithoutReplacement, []validatorData, error) { + // Get the canconical representation of the validator set at the provided + // p-chain height. + validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) + if err != nil { + return nil, nil, err + } + + validators := make([]validatorData, 0, len(validatorsMap)) + for k, v := range validatorsMap { + validators = append(validators, validatorData{ + id: k, + weight: v.Weight, + }) + } + + // Note: validators are sorted by ID. Sorting by weight would not create a + // canonically sorted list. + utils.Sort(validators) + + weights := make([]uint64, len(validators)) + for i, validator := range validators { + weights[i] = validator.weight + } + + sampler := sampler.NewDeterministicWeightedWithoutReplacement(source) + return sampler, validators, sampler.Initialize(weights) +} + +func (w *windower) expectedProposer( + validators []validatorData, + source *prng.MT19937_64, + sampler sampler.WeightedWithoutReplacement, + blockHeight, + slot uint64, +) (ids.NodeID, error) { + // Slot is reversed to utilize a different state space in the seed than the + // height. If the slot was not reversed the state space would collide; + // biasing the seed generation. For example, without reversing the slot + // height=0 and slot=1 would equal height=1 and slot=0. + source.Seed(w.chainSource ^ blockHeight ^ bits.Reverse64(slot)) + indices, err := sampler.Sample(1) + if err != nil { + return ids.EmptyNodeID, fmt.Errorf("failed sampling proposers: %w", err) + } + return validators[indices[0]].id, nil +} + +func TimeToSlot(start, now time.Time) uint64 { + if now.Before(start) { + return 0 + } + return uint64(now.Sub(start) / WindowDuration) +} diff --git a/avalanchego/vms/proposervm/proposer/windower_test.go b/avalanchego/vms/proposervm/proposer/windower_test.go index ef7cc706..b3345181 100644 --- a/avalanchego/vms/proposervm/proposer/windower_test.go +++ b/avalanchego/vms/proposervm/proposer/windower_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( "context" + "math" "math/rand" "testing" "time" @@ -13,35 +14,49 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + + safemath "github.com/ava-labs/avalanchego/utils/math" +) + +var ( + subnetID = ids.GenerateTestID() + randomChainID = ids.GenerateTestID() + fixedChainID = ids.ID{0, 2} ) func TestWindowerNoValidators(t *testing.T) { require := require.New(t) - subnetID := ids.GenerateTestID() - chainID := ids.GenerateTestID() - nodeID := ids.GenerateTestNodeID() - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return nil, nil - }, - } - - w := New(vdrState, subnetID, chainID) + _, vdrState := makeValidators(t, 0) + w := New(vdrState, subnetID, randomChainID) - delay, err := w.Delay(context.Background(), 1, 0, nodeID) + var ( + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + nodeID = ids.GenerateTestNodeID() + slot uint64 = 1 + ) + delay, err := w.Delay(context.Background(), chainHeight, pChainHeight, nodeID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(0, delay) + require.Zero(delay) + + proposer, err := w.ExpectedProposer(context.Background(), chainHeight, pChainHeight, slot) + require.ErrorIs(err, ErrAnyoneCanPropose) + require.Equal(ids.EmptyNodeID, proposer) + + delay, err = w.MinDelayForProposer(context.Background(), chainHeight, pChainHeight, nodeID, slot) + require.ErrorIs(err, ErrAnyoneCanPropose) + require.Zero(delay) } func TestWindowerRepeatedValidator(t *testing.T) { require := require.New(t) - subnetID := ids.GenerateTestID() - chainID := ids.GenerateTestID() - validatorID := ids.GenerateTestNodeID() - nonValidatorID := ids.GenerateTestNodeID() + var ( + validatorID = ids.GenerateTestNodeID() + nonValidatorID = ids.GenerateTestNodeID() + ) + vdrState := &validators.TestState{ T: t, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -54,41 +69,22 @@ func TestWindowerRepeatedValidator(t *testing.T) { }, } - w := New(vdrState, subnetID, chainID) + w := New(vdrState, subnetID, randomChainID) - validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(0, validatorDelay) + require.Zero(validatorDelay) - nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID) + nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(MaxDelay, nonValidatorDelay) + require.Equal(MaxVerifyDelay, nonValidatorDelay) } -func TestWindowerChangeByHeight(t *testing.T) { +func TestDelayChangeByHeight(t *testing.T) { require := require.New(t) - subnetID := ids.ID{0, 1} - chainID := ids.ID{0, 2} - validatorIDs := make([]ids.NodeID, MaxWindows) - for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} - } - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) - for _, id := range validatorIDs { - vdrs[id] = &validators.GetValidatorOutput{ - NodeID: id, - Weight: 1, - } - } - return vdrs, nil - }, - } - - w := New(vdrState, subnetID, chainID) + validatorIDs, vdrState := makeValidators(t, MaxVerifyWindows) + w := New(vdrState, subnetID, fixedChainID) expectedDelays1 := []time.Duration{ 2 * WindowDuration, @@ -100,9 +96,9 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(expectedDelay, validatorDelay) + require.Equal(expectedDelay, validatorDelay) } expectedDelays2 := []time.Duration{ @@ -115,41 +111,27 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays2 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(expectedDelay, validatorDelay) + require.Equal(expectedDelay, validatorDelay) } } -func TestWindowerChangeByChain(t *testing.T) { +func TestDelayChangeByChain(t *testing.T) { require := require.New(t) - subnetID := ids.ID{0, 1} + source := rand.NewSource(int64(0)) + rng := rand.New(source) // #nosec G404 - rand.Seed(0) chainID0 := ids.ID{} - _, _ = rand.Read(chainID0[:]) // #nosec G404 - chainID1 := ids.ID{} - _, _ = rand.Read(chainID1[:]) // #nosec G404 + _, err := rng.Read(chainID0[:]) + require.NoError(err) - validatorIDs := make([]ids.NodeID, MaxWindows) - for i := range validatorIDs { - validatorIDs[i] = ids.NodeID{byte(i + 1)} - } - vdrState := &validators.TestState{ - T: t, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) - for _, id := range validatorIDs { - vdrs[id] = &validators.GetValidatorOutput{ - NodeID: id, - Weight: 1, - } - } - return vdrs, nil - }, - } + chainID1 := ids.ID{} + _, err = rng.Read(chainID1[:]) + require.NoError(err) + validatorIDs, vdrState := makeValidators(t, MaxVerifyWindows) w0 := New(vdrState, subnetID, chainID0) w1 := New(vdrState, subnetID, chainID1) @@ -163,9 +145,9 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays0 { vdrID := validatorIDs[i] - validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) - require.EqualValues(expectedDelay, validatorDelay) + require.Equal(expectedDelay, validatorDelay) } expectedDelays1 := []time.Duration{ @@ -178,8 +160,308 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID) + validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) + require.NoError(err) + require.Equal(expectedDelay, validatorDelay) + } +} + +func TestExpectedProposerChangeByHeight(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedProposers := map[uint64]ids.NodeID{ + 1: validatorIDs[2], + 2: validatorIDs[1], + } + + for chainHeight, expectedProposerID := range expectedProposers { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, proposerID) + } +} + +func TestExpectedProposerChangeByChain(t *testing.T) { + require := require.New(t) + + source := rand.NewSource(int64(0)) + rng := rand.New(source) // #nosec G404 + + chainID0 := ids.ID{} + _, err := rng.Read(chainID0[:]) + require.NoError(err) + + chainID1 := ids.ID{} + _, err = rng.Read(chainID1[:]) + require.NoError(err) + + validatorIDs, vdrState := makeValidators(t, 10) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedProposers := map[ids.ID]ids.NodeID{ + chainID0: validatorIDs[5], + chainID1: validatorIDs[3], + } + + for chainID, expectedProposerID := range expectedProposers { + w := New(vdrState, subnetID, chainID) + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, proposerID) + } +} + +func TestExpectedProposerChangeBySlot(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + ) + + proposers := []ids.NodeID{ + validatorIDs[2], + validatorIDs[0], + validatorIDs[9], + validatorIDs[7], + validatorIDs[0], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[3], + validatorIDs[4], + validatorIDs[0], + validatorIDs[6], + validatorIDs[3], + validatorIDs[2], + validatorIDs[1], + validatorIDs[6], + validatorIDs[0], + validatorIDs[5], + validatorIDs[1], + validatorIDs[9], + validatorIDs[6], + validatorIDs[0], + validatorIDs[8], + } + expectedProposers := map[uint64]ids.NodeID{ + MaxLookAheadSlots: validatorIDs[4], + MaxLookAheadSlots + 1: validatorIDs[6], + } + for slot, expectedProposerID := range proposers { + expectedProposers[uint64(slot)] = expectedProposerID + } + + for slot, expectedProposerID := range expectedProposers { + actualProposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + require.Equal(expectedProposerID, actualProposerID) + } +} + +func TestCoherenceOfExpectedProposerAndMinDelayForProposer(t *testing.T) { + require := require.New(t) + + _, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + ) + + for slot := uint64(0); slot < 3*MaxLookAheadSlots; slot++ { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + + // proposerID is the scheduled proposer. It should start with the + // expected delay + delay, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, proposerID, slot) + require.NoError(err) + require.Equal(time.Duration(slot)*WindowDuration, delay) + } +} + +func TestMinDelayForProposer(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + chainHeight uint64 = 1 + pChainHeight uint64 = 0 + slot uint64 = 0 + ) + + expectedDelays := map[ids.NodeID]time.Duration{ + validatorIDs[0]: 1 * WindowDuration, + validatorIDs[1]: 15 * WindowDuration, + validatorIDs[2]: 0 * WindowDuration, + validatorIDs[3]: 5 * WindowDuration, + validatorIDs[4]: 10 * WindowDuration, + validatorIDs[5]: 18 * WindowDuration, + validatorIDs[6]: 12 * WindowDuration, + validatorIDs[7]: 3 * WindowDuration, + validatorIDs[8]: 23 * WindowDuration, + validatorIDs[9]: 2 * WindowDuration, + ids.GenerateTestNodeID(): MaxLookAheadWindow, + } + + for nodeID, expectedDelay := range expectedDelays { + delay, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, nodeID, slot) + require.NoError(err) + require.Equal(expectedDelay, delay) + } +} + +func BenchmarkMinDelayForProposer(b *testing.B) { + require := require.New(b) + + _, vdrState := makeValidators(b, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + chainHeight uint64 = 1 + nodeID = ids.GenerateTestNodeID() // Ensure to exhaust the search + slot uint64 = 0 + ) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := w.MinDelayForProposer(dummyCtx, chainHeight, pChainHeight, nodeID, slot) require.NoError(err) - require.EqualValues(expectedDelay, validatorDelay) } } + +func TestTimeToSlot(t *testing.T) { + parentTime := time.Now() + tests := []struct { + timeOffset time.Duration + expectedSlot uint64 + }{ + { + timeOffset: -WindowDuration, + expectedSlot: 0, + }, + { + timeOffset: -time.Second, + expectedSlot: 0, + }, + { + timeOffset: 0, + expectedSlot: 0, + }, + { + timeOffset: WindowDuration, + expectedSlot: 1, + }, + { + timeOffset: 2 * WindowDuration, + expectedSlot: 2, + }, + } + for _, test := range tests { + t.Run(test.timeOffset.String(), func(t *testing.T) { + slot := TimeToSlot(parentTime, parentTime.Add(test.timeOffset)) + require.Equal(t, test.expectedSlot, slot) + }) + } +} + +// Ensure that the proposer distribution is within 3 standard deviations of the +// expected value assuming a truly random binomial distribution. +func TestProposerDistribution(t *testing.T) { + require := require.New(t) + + validatorIDs, vdrState := makeValidators(t, 10) + w := New(vdrState, subnetID, fixedChainID) + + var ( + dummyCtx = context.Background() + pChainHeight uint64 = 0 + numChainHeights uint64 = 100 + numSlots uint64 = 100 + ) + + proposerFrequency := make(map[ids.NodeID]int) + for _, validatorID := range validatorIDs { + // Initialize the map to 0s to include validators that are never sampled + // in the analysis. + proposerFrequency[validatorID] = 0 + } + for chainHeight := uint64(0); chainHeight < numChainHeights; chainHeight++ { + for slot := uint64(0); slot < numSlots; slot++ { + proposerID, err := w.ExpectedProposer(dummyCtx, chainHeight, pChainHeight, slot) + require.NoError(err) + proposerFrequency[proposerID]++ + } + } + + var ( + totalNumberOfSamples = numChainHeights * numSlots + probabilityOfBeingSampled = 1 / float64(len(validatorIDs)) + expectedNumberOfSamples = uint64(probabilityOfBeingSampled * float64(totalNumberOfSamples)) + variance = float64(totalNumberOfSamples) * probabilityOfBeingSampled * (1 - probabilityOfBeingSampled) + stdDeviation = math.Sqrt(variance) + maxDeviation uint64 + ) + for _, sampled := range proposerFrequency { + maxDeviation = max( + maxDeviation, + safemath.AbsDiff( + uint64(sampled), + expectedNumberOfSamples, + ), + ) + } + + maxSTDDeviation := float64(maxDeviation) / stdDeviation + require.Less(maxSTDDeviation, 3.) +} + +func makeValidators(t testing.TB, count int) ([]ids.NodeID, *validators.TestState) { + validatorIDs := make([]ids.NodeID, count) + for i := range validatorIDs { + validatorIDs[i] = ids.BuildTestNodeID([]byte{byte(i) + 1}) + } + + vdrState := &validators.TestState{ + T: t, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxVerifyWindows) + for _, id := range validatorIDs { + vdrs[id] = &validators.GetValidatorOutput{ + NodeID: id, + Weight: 1, + } + } + return vdrs, nil + }, + } + return validatorIDs, vdrState +} diff --git a/avalanchego/vms/proposervm/scheduler/mock_scheduler.go b/avalanchego/vms/proposervm/scheduler/mock_scheduler.go new file mode 100644 index 00000000..f4a8f1e6 --- /dev/null +++ b/avalanchego/vms/proposervm/scheduler/mock_scheduler.go @@ -0,0 +1,76 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/proposervm/scheduler (interfaces: Scheduler) +// +// Generated by this command: +// +// mockgen -package=scheduler -destination=vms/proposervm/scheduler/mock_scheduler.go github.com/ava-labs/avalanchego/vms/proposervm/scheduler Scheduler +// + +// Package scheduler is a generated GoMock package. +package scheduler + +import ( + reflect "reflect" + time "time" + + gomock "go.uber.org/mock/gomock" +) + +// MockScheduler is a mock of Scheduler interface. +type MockScheduler struct { + ctrl *gomock.Controller + recorder *MockSchedulerMockRecorder +} + +// MockSchedulerMockRecorder is the mock recorder for MockScheduler. +type MockSchedulerMockRecorder struct { + mock *MockScheduler +} + +// NewMockScheduler creates a new mock instance. +func NewMockScheduler(ctrl *gomock.Controller) *MockScheduler { + mock := &MockScheduler{ctrl: ctrl} + mock.recorder = &MockSchedulerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduler) EXPECT() *MockSchedulerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockScheduler) Close() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Close") +} + +// Close indicates an expected call of Close. +func (mr *MockSchedulerMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockScheduler)(nil).Close)) +} + +// Dispatch mocks base method. +func (m *MockScheduler) Dispatch(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Dispatch", arg0) +} + +// Dispatch indicates an expected call of Dispatch. +func (mr *MockSchedulerMockRecorder) Dispatch(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dispatch", reflect.TypeOf((*MockScheduler)(nil).Dispatch), arg0) +} + +// SetBuildBlockTime mocks base method. +func (m *MockScheduler) SetBuildBlockTime(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBuildBlockTime", arg0) +} + +// SetBuildBlockTime indicates an expected call of SetBuildBlockTime. +func (mr *MockSchedulerMockRecorder) SetBuildBlockTime(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBuildBlockTime", reflect.TypeOf((*MockScheduler)(nil).SetBuildBlockTime), arg0) +} diff --git a/avalanchego/vms/proposervm/scheduler/scheduler.go b/avalanchego/vms/proposervm/scheduler/scheduler.go index e0062019..8395596a 100644 --- a/avalanchego/vms/proposervm/scheduler/scheduler.go +++ b/avalanchego/vms/proposervm/scheduler/scheduler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler @@ -14,6 +14,9 @@ import ( type Scheduler interface { Dispatch(startTime time.Time) + + // Client must guarantee that [SetBuildBlockTime] + // is never called after [Close] SetBuildBlockTime(t time.Time) Close() } diff --git a/avalanchego/vms/proposervm/scheduler/scheduler_test.go b/avalanchego/vms/proposervm/scheduler/scheduler_test.go index 74693657..77ed39a6 100644 --- a/avalanchego/vms/proposervm/scheduler/scheduler_test.go +++ b/avalanchego/vms/proposervm/scheduler/scheduler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,9 +24,7 @@ func TestDelayFromNew(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - if time.Until(startTime) > 0 { - t.Fatalf("passed message too soon") - } + require.LessOrEqual(t, time.Until(startTime), time.Duration(0)) } func TestDelayFromSetTime(t *testing.T) { @@ -41,9 +41,7 @@ func TestDelayFromSetTime(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - if time.Until(startTime) > 0 { - t.Fatalf("passed message too soon") - } + require.LessOrEqual(t, time.Until(startTime), time.Duration(0)) } func TestReceipt(*testing.T) { diff --git a/avalanchego/vms/proposervm/state/block_height_index.go b/avalanchego/vms/proposervm/state/block_height_index.go index 1a6f0297..b60fca0c 100644 --- a/avalanchego/vms/proposervm/state/block_height_index.go +++ b/avalanchego/vms/proposervm/state/block_height_index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -24,6 +24,9 @@ var ( ) type HeightIndexGetter interface { + // GetMinimumHeight return the smallest height of an indexed blockID. If + // there are no indexed blockIDs, ErrNotFound will be returned. + GetMinimumHeight() (uint64, error) GetBlockIDAtHeight(height uint64) (ids.ID, error) // Fork height is stored when the first post-fork block/option is accepted. @@ -32,8 +35,9 @@ type HeightIndexGetter interface { } type HeightIndexWriter interface { - SetBlockIDAtHeight(height uint64, blkID ids.ID) error SetForkHeight(height uint64) error + SetBlockIDAtHeight(height uint64, blkID ids.ID) error + DeleteBlockIDAtHeight(height uint64) error } // A checkpoint is the blockID of the next block to be considered @@ -75,6 +79,21 @@ func NewHeightIndex(db database.Database, commitable versiondb.Commitable) Heigh } } +func (hi *heightIndex) GetMinimumHeight() (uint64, error) { + it := hi.heightDB.NewIterator() + defer it.Release() + + if !it.Next() { + return 0, database.ErrNotFound + } + + height, err := database.ParseUInt64(it.Key()) + if err != nil { + return 0, err + } + return height, it.Error() +} + func (hi *heightIndex) GetBlockIDAtHeight(height uint64) (ids.ID, error) { if blkID, found := hi.heightsCache.Get(height); found { return blkID, nil @@ -95,6 +114,12 @@ func (hi *heightIndex) SetBlockIDAtHeight(height uint64, blkID ids.ID) error { return database.PutID(hi.heightDB, key, blkID) } +func (hi *heightIndex) DeleteBlockIDAtHeight(height uint64) error { + hi.heightsCache.Evict(height) + key := database.PackUInt64(height) + return hi.heightDB.Delete(key) +} + func (hi *heightIndex) GetForkHeight() (uint64, error) { return database.GetUInt64(hi.metadataDB, forkKey) } diff --git a/avalanchego/vms/proposervm/state/block_state.go b/avalanchego/vms/proposervm/state/block_state.go index 6d426d2e..0c5e210a 100644 --- a/avalanchego/vms/proposervm/state/block_state.go +++ b/avalanchego/vms/proposervm/state/block_state.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "errors" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -14,10 +13,15 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -const blockCacheSize = 8192 +const blockCacheSize = 64 * units.MiB var ( errBlockWrongVersion = errors.New("wrong version") @@ -28,6 +32,7 @@ var ( type BlockState interface { GetBlock(blkID ids.ID) (block.Block, choices.Status, error) PutBlock(blk block.Block, status choices.Status) error + DeleteBlock(blkID ids.ID) error } type blockState struct { @@ -45,18 +50,31 @@ type blockWrapper struct { block block.Block } +func cachedBlockSize(_ ids.ID, bw *blockWrapper) int { + if bw == nil { + return ids.IDLen + constants.PointerOverhead + } + return ids.IDLen + len(bw.Block) + wrappers.IntLen + 2*constants.PointerOverhead +} + func NewBlockState(db database.Database) BlockState { return &blockState{ - blkCache: &cache.LRU[ids.ID, *blockWrapper]{Size: blockCacheSize}, - db: db, + blkCache: cache.NewSizedLRU[ids.ID, *blockWrapper]( + blockCacheSize, + cachedBlockSize, + ), + db: db, } } func NewMeteredBlockState(db database.Database, namespace string, metrics prometheus.Registerer) (BlockState, error) { blkCache, err := metercacher.New[ids.ID, *blockWrapper]( - fmt.Sprintf("%s_block_cache", namespace), + metric.AppendNamespace(namespace, "block_cache"), metrics, - &cache.LRU[ids.ID, *blockWrapper]{Size: blockCacheSize}, + cache.NewSizedLRU[ids.ID, *blockWrapper]( + blockCacheSize, + cachedBlockSize, + ), ) return &blockState{ @@ -83,16 +101,20 @@ func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) } blkWrapper := blockWrapper{} - parsedVersion, err := c.Unmarshal(blkWrapperBytes, &blkWrapper) + parsedVersion, err := Codec.Unmarshal(blkWrapperBytes, &blkWrapper) if err != nil { return nil, choices.Unknown, err } - if parsedVersion != version { + if parsedVersion != CodecVersion { return nil, choices.Unknown, errBlockWrongVersion } // The key was in the database - blk, err := block.Parse(blkWrapper.Block) + // + // Invariant: Blocks stored on disk were previously accepted by this node. + // Because the durango activation relaxes TLS cert parsing rules, we assume + // it is always activated here. + blk, err := block.Parse(blkWrapper.Block, version.DefaultUpgradeTime) if err != nil { return nil, choices.Unknown, err } @@ -109,7 +131,7 @@ func (s *blockState) PutBlock(blk block.Block, status choices.Status) error { block: blk, } - bytes, err := c.Marshal(version, &blkWrapper) + bytes, err := Codec.Marshal(CodecVersion, &blkWrapper) if err != nil { return err } @@ -118,3 +140,8 @@ func (s *blockState) PutBlock(blk block.Block, status choices.Status) error { s.blkCache.Put(blkID, &blkWrapper) return s.db.Put(blkID[:], bytes) } + +func (s *blockState) DeleteBlock(blkID ids.ID) error { + s.blkCache.Evict(blkID) + return s.db.Delete(blkID[:]) +} diff --git a/avalanchego/vms/proposervm/state/block_state_test.go b/avalanchego/vms/proposervm/state/block_state_test.go index 22b4d87b..269d7fbd 100644 --- a/avalanchego/vms/proposervm/state/block_state_test.go +++ b/avalanchego/vms/proposervm/state/block_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -9,7 +9,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -30,7 +29,7 @@ func testBlockState(a *require.Assertions, bs BlockState) { tlsCert, err := staking.NewTLSCert() a.NoError(err) - cert := tlsCert.Leaf + cert := staking.CertificateFromX509(tlsCert.Leaf) key := tlsCert.PrivateKey.(crypto.Signer) b, err := block.Build( diff --git a/avalanchego/vms/proposervm/state/chain_state.go b/avalanchego/vms/proposervm/state/chain_state.go index 0f1a1bfb..e4ed34dd 100644 --- a/avalanchego/vms/proposervm/state/chain_state.go +++ b/avalanchego/vms/proposervm/state/chain_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/chain_state_test.go b/avalanchego/vms/proposervm/state/chain_state_test.go index ab14f422..6b45585f 100644 --- a/avalanchego/vms/proposervm/state/chain_state_test.go +++ b/avalanchego/vms/proposervm/state/chain_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/codec.go b/avalanchego/vms/proposervm/state/codec.go index f7352380..63727894 100644 --- a/avalanchego/vms/proposervm/state/codec.go +++ b/avalanchego/vms/proposervm/state/codec.go @@ -1,24 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const version = 0 +const CodecVersion = 0 -var c codec.Manager +var Codec codec.Manager func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) - err := c.RegisterCodec(version, lc) + err := Codec.RegisterCodec(CodecVersion, lc) if err != nil { panic(err) } diff --git a/avalanchego/vms/proposervm/state/mock_state.go b/avalanchego/vms/proposervm/state/mock_state.go index da70f134..6384528a 100644 --- a/avalanchego/vms/proposervm/state/mock_state.go +++ b/avalanchego/vms/proposervm/state/mock_state.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm/state (interfaces: State) +// +// Generated by this command: +// +// mockgen -package=state -destination=vms/proposervm/state/mock_state.go github.com/ava-labs/avalanchego/vms/proposervm/state State +// // Package state is a generated GoMock package. package state @@ -13,7 +15,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" choices "github.com/ava-labs/avalanchego/snow/choices" block "github.com/ava-labs/avalanchego/vms/proposervm/block" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockState is a mock of State interface. @@ -53,6 +55,34 @@ func (mr *MockStateMockRecorder) Commit() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockState)(nil).Commit)) } +// DeleteBlock mocks base method. +func (m *MockState) DeleteBlock(arg0 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteBlock indicates an expected call of DeleteBlock. +func (mr *MockStateMockRecorder) DeleteBlock(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBlock", reflect.TypeOf((*MockState)(nil).DeleteBlock), arg0) +} + +// DeleteBlockIDAtHeight mocks base method. +func (m *MockState) DeleteBlockIDAtHeight(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteBlockIDAtHeight", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteBlockIDAtHeight indicates an expected call of DeleteBlockIDAtHeight. +func (mr *MockStateMockRecorder) DeleteBlockIDAtHeight(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).DeleteBlockIDAtHeight), arg0) +} + // DeleteCheckpoint mocks base method. func (m *MockState) DeleteCheckpoint() error { m.ctrl.T.Helper() @@ -92,7 +122,7 @@ func (m *MockState) GetBlock(arg0 ids.ID) (block.Block, choices.Status, error) { } // GetBlock indicates an expected call of GetBlock. -func (mr *MockStateMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlock(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockState)(nil).GetBlock), arg0) } @@ -107,7 +137,7 @@ func (m *MockState) GetBlockIDAtHeight(arg0 uint64) (ids.ID, error) { } // GetBlockIDAtHeight indicates an expected call of GetBlockIDAtHeight. -func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) } @@ -157,6 +187,21 @@ func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) } +// GetMinimumHeight mocks base method. +func (m *MockState) GetMinimumHeight() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMinimumHeight") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMinimumHeight indicates an expected call of GetMinimumHeight. +func (mr *MockStateMockRecorder) GetMinimumHeight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinimumHeight", reflect.TypeOf((*MockState)(nil).GetMinimumHeight)) +} + // PutBlock mocks base method. func (m *MockState) PutBlock(arg0 block.Block, arg1 choices.Status) error { m.ctrl.T.Helper() @@ -166,7 +211,7 @@ func (m *MockState) PutBlock(arg0 block.Block, arg1 choices.Status) error { } // PutBlock indicates an expected call of PutBlock. -func (mr *MockStateMockRecorder) PutBlock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) PutBlock(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBlock", reflect.TypeOf((*MockState)(nil).PutBlock), arg0, arg1) } @@ -180,7 +225,7 @@ func (m *MockState) SetBlockIDAtHeight(arg0 uint64, arg1 ids.ID) error { } // SetBlockIDAtHeight indicates an expected call of SetBlockIDAtHeight. -func (mr *MockStateMockRecorder) SetBlockIDAtHeight(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetBlockIDAtHeight(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).SetBlockIDAtHeight), arg0, arg1) } @@ -194,7 +239,7 @@ func (m *MockState) SetCheckpoint(arg0 ids.ID) error { } // SetCheckpoint indicates an expected call of SetCheckpoint. -func (mr *MockStateMockRecorder) SetCheckpoint(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetCheckpoint(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCheckpoint", reflect.TypeOf((*MockState)(nil).SetCheckpoint), arg0) } @@ -208,7 +253,7 @@ func (m *MockState) SetForkHeight(arg0 uint64) error { } // SetForkHeight indicates an expected call of SetForkHeight. -func (mr *MockStateMockRecorder) SetForkHeight(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetForkHeight(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetForkHeight", reflect.TypeOf((*MockState)(nil).SetForkHeight), arg0) } @@ -222,7 +267,7 @@ func (m *MockState) SetLastAccepted(arg0 ids.ID) error { } // SetLastAccepted indicates an expected call of SetLastAccepted. -func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) } diff --git a/avalanchego/vms/proposervm/state/state.go b/avalanchego/vms/proposervm/state/state.go index c8b80b94..487e64f7 100644 --- a/avalanchego/vms/proposervm/state/state.go +++ b/avalanchego/vms/proposervm/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/state_test.go b/avalanchego/vms/proposervm/state/state_test.go index 97980fc3..682c3167 100644 --- a/avalanchego/vms/proposervm/state/state_test.go +++ b/avalanchego/vms/proposervm/state/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -7,7 +7,6 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" diff --git a/avalanchego/vms/proposervm/state_summary.go b/avalanchego/vms/proposervm/state_summary.go index 629d2c64..f61c29d6 100644 --- a/avalanchego/vms/proposervm/state_summary.go +++ b/avalanchego/vms/proposervm/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/avalanchego/vms/proposervm/state_syncable_vm.go b/avalanchego/vms/proposervm/state_syncable_vm.go index da86d8c3..08a321ca 100644 --- a/avalanchego/vms/proposervm/state_syncable_vm.go +++ b/avalanchego/vms/proposervm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm diff --git a/avalanchego/vms/proposervm/state_syncable_vm_test.go b/avalanchego/vms/proposervm/state_syncable_vm_test.go index a7e40131..0fa24de1 100644 --- a/avalanchego/vms/proposervm/state_syncable_vm_test.go +++ b/avalanchego/vms/proposervm/state_syncable_vm_test.go @@ -1,43 +1,39 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( - "bytes" "context" - "crypto" - "errors" "testing" "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/vms/proposervm/summary" statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var errUnknownSummary = errors.New("unknown summary") - func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { + require := require.New(t) + innerVM := &fullVM{ TestVM: &block.TestVM{ TestVM: common.TestVM{ T: t, }, }, - TestHeightIndexedVM: &block.TestHeightIndexedVM{ - T: t, - }, TestStateSyncableVM: &block.TestStateSyncableVM{ T: t, }, @@ -51,12 +47,12 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { // load innerVM expectations innerGenesisBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ - IDV: ids.ID{'i', 'n', 'n', 'e', 'r', 'G', 'e', 'n', 's', 'y', 's', 'I', 'D'}, + IDV: ids.ID{'i', 'n', 'n', 'e', 'r', 'G', 'e', 'n', 'e', 's', 'i', 's', 'I', 'D'}, }, HeightV: 0, BytesV: []byte("genesis state"), } - innerVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -72,36 +68,34 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { return innerGenesisBlk, nil } - // createVM - dbManager := manager.NewMemDB(version.Semantic1_0_0) - dbManager = dbManager.NewPrefixDBManager([]byte{}) - + // create the VM vm := New( innerVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, - dbManager, + prefixdb.New([]byte{}, memdb.New()), innerGenesisBlk.Bytes(), nil, nil, nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) return innerVM, vm } @@ -110,6 +104,9 @@ func TestStateSyncEnabled(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() // ProposerVM State Sync disabled if innerVM State sync is disabled vm.hIndexer.MarkRepaired(true) @@ -133,6 +130,9 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() innerSummary := &block.TestStateSummary{ IDV: ids.ID{'s', 'u', 'm', 'm', 'a', 'r', 'y', 'I', 'D'}, @@ -145,20 +145,20 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { return nil, database.ErrNotFound } summary, err := vm.GetOngoingSyncStateSummary(context.Background()) - require.True(err == database.ErrNotFound) - require.True(summary == nil) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(summary) // Pre fork summary case, fork height not reached hence not set yet innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } _, err = vm.GetForkHeight() - require.Equal(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Pre fork summary case, fork height already reached innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { @@ -167,9 +167,9 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case vm.hIndexer.MarkRepaired(true) @@ -177,12 +177,12 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -190,10 +190,10 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -204,17 +204,20 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { status: choices.Accepted, }, } - require.NoError(vm.storePostForkBlock(proBlk)) + require.NoError(vm.acceptPostForkBlock(proBlk)) summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) - require.True(summary.Height() == innerSummary.Height()) + require.Equal(innerSummary.Height(), summary.Height()) } func TestStateSyncGetLastStateSummary(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() innerSummary := &block.TestStateSummary{ IDV: ids.ID{'s', 'u', 'm', 'm', 'a', 'r', 'y', 'I', 'D'}, @@ -227,20 +230,20 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { return nil, database.ErrNotFound } summary, err := vm.GetLastStateSummary(context.Background()) - require.True(err == database.ErrNotFound) - require.True(summary == nil) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(summary) // Pre fork summary case, fork height not reached hence not set yet innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } _, err = vm.GetForkHeight() - require.Equal(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Pre fork summary case, fork height already reached innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { @@ -249,9 +252,9 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case vm.hIndexer.MarkRepaired(true) @@ -259,12 +262,12 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -272,10 +275,10 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -286,17 +289,20 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { status: choices.Accepted, }, } - require.NoError(vm.storePostForkBlock(proBlk)) + require.NoError(vm.acceptPostForkBlock(proBlk)) summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) - require.True(summary.Height() == innerSummary.Height()) + require.Equal(innerSummary.Height(), summary.Height()) } func TestStateSyncGetStateSummary(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() reqHeight := uint64(1969) innerSummary := &block.TestStateSummary{ @@ -310,33 +316,33 @@ func TestStateSyncGetStateSummary(t *testing.T) { return nil, database.ErrNotFound } summary, err := vm.GetStateSummary(context.Background(), reqHeight) - require.True(err == database.ErrNotFound) - require.True(summary == nil) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(summary) // Pre fork summary case, fork height not reached hence not set yet innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { - require.True(h == reqHeight) + require.Equal(reqHeight, h) return innerSummary, nil } _, err = vm.GetForkHeight() - require.Equal(err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Pre fork summary case, fork height already reached innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { - require.True(h == reqHeight) + require.Equal(reqHeight, h) return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) - require.True(summary.ID() == innerSummary.ID()) - require.True(summary.Height() == innerSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) + require.Equal(innerSummary.ID(), summary.ID()) + require.Equal(innerSummary.Height(), summary.Height()) + require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case vm.hIndexer.MarkRepaired(true) @@ -344,12 +350,12 @@ func TestStateSyncGetStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -357,10 +363,10 @@ func TestStateSyncGetStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -371,16 +377,19 @@ func TestStateSyncGetStateSummary(t *testing.T) { status: choices.Accepted, }, } - require.NoError(vm.storePostForkBlock(proBlk)) + require.NoError(vm.acceptPostForkBlock(proBlk)) summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) - require.True(summary.Height() == innerSummary.Height()) + require.Equal(innerSummary.Height(), summary.Height()) } func TestParseStateSummary(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() reqHeight := uint64(1969) innerSummary := &block.TestStateSummary{ @@ -393,7 +402,7 @@ func TestParseStateSummary(t *testing.T) { return innerSummary, nil } innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { - require.True(h == reqHeight) + require.Equal(reqHeight, h) return innerSummary, nil } @@ -404,9 +413,9 @@ func TestParseStateSummary(t *testing.T) { parsedSummary, err := vm.ParseStateSummary(context.Background(), summary.Bytes()) require.NoError(err) - require.True(summary.ID() == parsedSummary.ID()) - require.True(summary.Height() == parsedSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), parsedSummary.Bytes())) + require.Equal(summary.ID(), parsedSummary.ID()) + require.Equal(summary.Height(), parsedSummary.Height()) + require.Equal(summary.Bytes(), parsedSummary.Bytes()) // Get a post fork block than parse it vm.hIndexer.MarkRepaired(true) @@ -414,12 +423,12 @@ func TestParseStateSummary(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -427,10 +436,10 @@ func TestParseStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -441,22 +450,25 @@ func TestParseStateSummary(t *testing.T) { status: choices.Accepted, }, } - require.NoError(vm.storePostForkBlock(proBlk)) + require.NoError(vm.acceptPostForkBlock(proBlk)) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) parsedSummary, err = vm.ParseStateSummary(context.Background(), summary.Bytes()) require.NoError(err) - require.True(summary.ID() == parsedSummary.ID()) - require.True(summary.Height() == parsedSummary.Height()) - require.True(bytes.Equal(summary.Bytes(), parsedSummary.Bytes())) + require.Equal(summary.ID(), parsedSummary.ID()) + require.Equal(summary.Height(), parsedSummary.Height()) + require.Equal(summary.Bytes(), parsedSummary.Bytes()) } func TestStateSummaryAccept(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() reqHeight := uint64(1969) innerSummary := &block.TestStateSummary{ @@ -470,40 +482,35 @@ func TestStateSummaryAccept(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), - } - innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { - require.True(h == reqHeight) - return innerSummary, nil - } - innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) - return innerBlk, nil + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) - proBlk := &postForkBlock{ - SignedBlock: slb, - postForkCommonComponents: postForkCommonComponents{ - vm: vm, - innerBlk: innerBlk, - status: choices.Accepted, - }, + + statelessSummary, err := summary.Build(innerSummary.Height()-1, slb.Bytes(), innerSummary.Bytes()) + require.NoError(err) + + innerVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { + require.Equal(innerSummary.BytesV, summaryBytes) + return innerSummary, nil + } + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { + require.Equal(innerBlk.Bytes(), b) + return innerBlk, nil } - require.NoError(vm.storePostForkBlock(proBlk)) - summary, err := vm.GetStateSummary(context.Background(), reqHeight) + summary, err := vm.ParseStateSummary(context.Background(), statelessSummary.Bytes()) require.NoError(err) // test Accept accepted @@ -527,6 +534,9 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() reqHeight := uint64(1969) innerSummary := &block.TestStateSummary{ @@ -544,16 +554,16 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { // store post fork block associated with summary innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: vm.Time(), - HeightV: innerSummary.Height(), + BytesV: []byte{1}, + ParentV: ids.GenerateTestID(), + HeightV: innerSummary.Height(), } innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { - require.True(h == reqHeight) + require.Equal(reqHeight, h) return innerSummary, nil } innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.True(bytes.Equal(b, innerBlk.Bytes())) + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -561,10 +571,10 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -575,7 +585,7 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { status: choices.Accepted, }, } - require.NoError(vm.storePostForkBlock(proBlk)) + require.NoError(vm.acceptPostForkBlock(proBlk)) summary, err := vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) @@ -593,7 +603,15 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { require := require.New(t) // Note: by default proVM is built such that heightIndex will be considered complete - coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + require.NoError(proVM.VerifyHeightIndex(context.Background())) // let coreVM be always ready to serve summaries @@ -608,17 +626,16 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { return coreStateSummary, nil } coreVM.GetStateSummaryF = func(_ context.Context, height uint64) (block.StateSummary, error) { - if height != summaryHeight { - return nil, errUnknownSummary - } + require.Equal(summaryHeight, height) return coreStateSummary, nil } // set height index to reindexing proVM.hIndexer.MarkRepaired(false) - require.ErrorIs(proVM.VerifyHeightIndex(context.Background()), block.ErrIndexIncomplete) + err := proVM.VerifyHeightIndex(context.Background()) + require.ErrorIs(err, block.ErrIndexIncomplete) - _, err := proVM.GetLastStateSummary(context.Background()) + _, err = proVM.GetLastStateSummary(context.Background()) require.ErrorIs(err, block.ErrIndexIncomplete) _, err = proVM.GetStateSummary(context.Background(), summaryHeight) @@ -630,5 +647,5 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { summary, err := proVM.GetLastStateSummary(context.Background()) require.NoError(err) - require.True(summary.Height() == summaryHeight) + require.Equal(summaryHeight, summary.Height()) } diff --git a/avalanchego/vms/proposervm/summary/build.go b/avalanchego/vms/proposervm/summary/build.go index 35e2e179..516f9d1a 100644 --- a/avalanchego/vms/proposervm/summary/build.go +++ b/avalanchego/vms/proposervm/summary/build.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -20,7 +20,7 @@ func Build( InnerSummary: coreSummary, } - bytes, err := c.Marshal(codecVersion, &summary) + bytes, err := Codec.Marshal(CodecVersion, &summary) if err != nil { return nil, fmt.Errorf("cannot marshal proposer summary due to: %w", err) } diff --git a/avalanchego/vms/proposervm/summary/build_test.go b/avalanchego/vms/proposervm/summary/build_test.go index 0e15ac3c..ad7e5df5 100644 --- a/avalanchego/vms/proposervm/summary/build_test.go +++ b/avalanchego/vms/proposervm/summary/build_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/codec.go b/avalanchego/vms/proposervm/summary/codec.go index a71350f3..41a9eb9a 100644 --- a/avalanchego/vms/proposervm/summary/codec.go +++ b/avalanchego/vms/proposervm/summary/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -6,23 +6,24 @@ package summary import ( "errors" "math" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) -const codecVersion = 0 +const CodecVersion = 0 var ( - c codec.Manager + Codec codec.Manager errWrongCodecVersion = errors.New("wrong codec version") ) func init() { - lc := linearcodec.NewCustomMaxLength(math.MaxUint32) - c = codec.NewManager(math.MaxInt32) - if err := c.RegisterCodec(codecVersion, lc); err != nil { + lc := linearcodec.NewDefault(time.Time{}) + Codec = codec.NewManager(math.MaxInt32) + if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) } } diff --git a/avalanchego/vms/proposervm/summary/parse.go b/avalanchego/vms/proposervm/summary/parse.go index 3d929544..670bd43a 100644 --- a/avalanchego/vms/proposervm/summary/parse.go +++ b/avalanchego/vms/proposervm/summary/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -14,11 +14,11 @@ func Parse(bytes []byte) (StateSummary, error) { id: hashing.ComputeHash256Array(bytes), bytes: bytes, } - version, err := c.Unmarshal(bytes, &summary) + version, err := Codec.Unmarshal(bytes, &summary) if err != nil { return nil, fmt.Errorf("could not unmarshal summary due to: %w", err) } - if version != codecVersion { + if version != CodecVersion { return nil, errWrongCodecVersion } return &summary, nil diff --git a/avalanchego/vms/proposervm/summary/parse_test.go b/avalanchego/vms/proposervm/summary/parse_test.go index 3d527e27..16fb2aec 100644 --- a/avalanchego/vms/proposervm/summary/parse_test.go +++ b/avalanchego/vms/proposervm/summary/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -7,6 +7,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" ) func TestParse(t *testing.T) { @@ -35,5 +37,5 @@ func TestParseGibberish(t *testing.T) { bytes := []byte{0, 1, 2, 3, 4, 5} _, err := Parse(bytes) - require.Error(err) + require.ErrorIs(err, codec.ErrUnknownVersion) } diff --git a/avalanchego/vms/proposervm/summary/state_summary.go b/avalanchego/vms/proposervm/summary/state_summary.go index 25c29b6e..14213a66 100644 --- a/avalanchego/vms/proposervm/summary/state_summary.go +++ b/avalanchego/vms/proposervm/summary/state_summary.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary -import ( - "github.com/ava-labs/avalanchego/ids" -) +import "github.com/ava-labs/avalanchego/ids" var _ StateSummary = (*stateSummary)(nil) diff --git a/avalanchego/vms/proposervm/tree/tree.go b/avalanchego/vms/proposervm/tree/tree.go index 63a36dd6..38125ba9 100644 --- a/avalanchego/vms/proposervm/tree/tree.go +++ b/avalanchego/vms/proposervm/tree/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree @@ -12,6 +12,23 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) +// Tree handles the propagation of block acceptance and rejection to inner +// blocks. +// +// The Tree is needed because: +// 1. The consensus engine guarantees that for each verified block, either +// Accept() or Reject() are eventually called, and they are called only once. +// The proposervm must maintain these invariants for the wrapped VM. +// 2. A given inner block may be wrapped into multiple different proposervm +// blocks (e.g. same inner block generated by two validators). +// +// The Tree prevents Accept() and Reject() from being called multiple times on +// the same inner block by: +// 1. tracking inner blocks in a tree-like structure, to be able to easily spot +// siblings +// 2. rejecting an inner block only when one of the siblings is accepted. +// Rejection of a proposervm block does not imply its inner block rejection +// (it may be held by a different proposervm block). type Tree interface { // Add places the block in the tree Add(snowman.Block) diff --git a/avalanchego/vms/proposervm/tree/tree_test.go b/avalanchego/vms/proposervm/tree/tree_test.go index 979943b8..1e826e41 100644 --- a/avalanchego/vms/proposervm/tree/tree_test.go +++ b/avalanchego/vms/proposervm/tree/tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree @@ -43,9 +43,11 @@ func TestAcceptSingleBlock(t *testing.T) { _, contains = tr.Get(block) require.True(contains) - err := tr.Accept(context.Background(), block) - require.NoError(err) + require.NoError(tr.Accept(context.Background(), block)) require.Equal(choices.Accepted, block.Status()) + + _, contains = tr.Get(block) + require.False(contains) } func TestAcceptBlockConflict(t *testing.T) { @@ -69,19 +71,26 @@ func TestAcceptBlockConflict(t *testing.T) { tr := New() + // add conflicting blocks tr.Add(blockToAccept) - tr.Add(blockToReject) - _, contains := tr.Get(blockToAccept) require.True(contains) + tr.Add(blockToReject) _, contains = tr.Get(blockToReject) require.True(contains) - err := tr.Accept(context.Background(), blockToAccept) - require.NoError(err) + // accept one of them + require.NoError(tr.Accept(context.Background(), blockToAccept)) + + // check their statuses and that they are removed from the tree require.Equal(choices.Accepted, blockToAccept.Status()) + _, contains = tr.Get(blockToAccept) + require.False(contains) + require.Equal(choices.Rejected, blockToReject.Status()) + _, contains = tr.Get(blockToReject) + require.False(contains) } func TestAcceptChainConflict(t *testing.T) { @@ -113,22 +122,32 @@ func TestAcceptChainConflict(t *testing.T) { tr := New() + // add conflicting blocks. tr.Add(blockToAccept) - tr.Add(blockToReject) - tr.Add(blockToRejectChild) - _, contains := tr.Get(blockToAccept) require.True(contains) + tr.Add(blockToReject) _, contains = tr.Get(blockToReject) require.True(contains) + tr.Add(blockToRejectChild) _, contains = tr.Get(blockToRejectChild) require.True(contains) - err := tr.Accept(context.Background(), blockToAccept) - require.NoError(err) + // accept one of them + require.NoError(tr.Accept(context.Background(), blockToAccept)) + + // check their statuses and whether they are removed from tree require.Equal(choices.Accepted, blockToAccept.Status()) + _, contains = tr.Get(blockToAccept) + require.False(contains) + require.Equal(choices.Rejected, blockToReject.Status()) + _, contains = tr.Get(blockToReject) + require.False(contains) + require.Equal(choices.Rejected, blockToRejectChild.Status()) + _, contains = tr.Get(blockToRejectChild) + require.False(contains) } diff --git a/avalanchego/vms/proposervm/vm.go b/avalanchego/vms/proposervm/vm.go index 443f1e11..bcc2fa61 100644 --- a/avalanchego/vms/proposervm/vm.go +++ b/avalanchego/vms/proposervm/vm.go @@ -1,24 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "context" - "crypto" - "crypto/x509" + "errors" "fmt" "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -27,9 +24,10 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/proposervm/indexer" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" "github.com/ava-labs/avalanchego/vms/proposervm/scheduler" @@ -43,35 +41,39 @@ const ( // DefaultMinBlockDelay should be kept as whole seconds because block // timestamps are only specific to the second. DefaultMinBlockDelay = time.Second + // DefaultNumHistoricalBlocks as 0 results in never deleting any historical + // blocks. + DefaultNumHistoricalBlocks uint64 = 0 checkIndexedFrequency = 10 * time.Second - innerBlkCacheSize = 512 + innerBlkCacheSize = 64 * units.MiB ) var ( - _ block.ChainVM = (*VM)(nil) - _ block.BatchedChainVM = (*VM)(nil) - _ block.HeightIndexedChainVM = (*VM)(nil) - _ block.StateSyncableVM = (*VM)(nil) + _ block.ChainVM = (*VM)(nil) + _ block.BatchedChainVM = (*VM)(nil) + _ block.StateSyncableVM = (*VM)(nil) + + // TODO: remove after the X-chain supports height indexing. + mainnetXChainID = ids.FromStringOrPanic("2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM") + fujiXChainID = ids.FromStringOrPanic("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") dbPrefix = []byte("proposervm") + + errHeightIndexInvalidWhilePruning = errors.New("height index invalid while pruning old blocks") ) +func cachedBlockSize(_ ids.ID, blk snowman.Block) int { + return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead +} + type VM struct { block.ChainVM + Config blockBuilderVM block.BuildBlockWithContextChainVM batchedVM block.BatchedChainVM - hVM block.HeightIndexedChainVM ssVM block.StateSyncableVM - activationTime time.Time - minimumPChainHeight uint64 - minBlkDelay time.Duration - // block signer - stakingLeafSigner crypto.Signer - // block certificate - stakingCertLeaf *x509.Certificate - state.State hIndexer indexer.HeightIndexer @@ -111,35 +113,24 @@ type VM struct { // timestamps are only specific to the second. func New( vm block.ChainVM, - activationTime time.Time, - minimumPChainHeight uint64, - minBlkDelay time.Duration, - stakingLeafSigner crypto.Signer, - stakingCertLeaf *x509.Certificate, + config Config, ) *VM { blockBuilderVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) - hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VM{ ChainVM: vm, + Config: config, blockBuilderVM: blockBuilderVM, batchedVM: batchedVM, - hVM: hVM, ssVM: ssVM, - - activationTime: activationTime, - minimumPChainHeight: minimumPChainHeight, - minBlkDelay: minBlkDelay, - stakingLeafSigner: stakingLeafSigner, - stakingCertLeaf: stakingCertLeaf, } } func (vm *VM) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -165,16 +156,21 @@ func (vm *VM) Initialize( chainCtx.Metrics = optionalGatherer vm.ctx = chainCtx - rawDB := dbManager.Current().Database - prefixDB := prefixdb.New(dbPrefix, rawDB) - vm.db = versiondb.New(prefixDB) - vm.State = state.New(vm.db) + vm.db = versiondb.New(prefixdb.New(dbPrefix, db)) + baseState, err := state.NewMetered(vm.db, "state", registerer) + if err != nil { + return err + } + vm.State = baseState vm.Windower = proposer.New(chainCtx.ValidatorState, chainCtx.SubnetID, chainCtx.ChainID) vm.Tree = tree.New() - innerBlkCache, err := metercacher.New[ids.ID, snowman.Block]( + innerBlkCache, err := metercacher.New( "inner_block_cache", registerer, - &cache.LRU[ids.ID, snowman.Block]{Size: innerBlkCacheSize}, + cache.NewSizedLRU( + innerBlkCacheSize, + cachedBlockSize, + ), ) if err != nil { return err @@ -182,7 +178,6 @@ func (vm *VM) Initialize( vm.innerBlkCache = innerBlkCache indexerDB := versiondb.New(vm.db) - // TODO: Use [state.NewMetered] here to populate additional metrics. indexerState := state.New(indexerDB) vm.hIndexer = indexer.NewHeightIndexer(vm, vm.ctx.Log, indexerState) @@ -195,7 +190,7 @@ func (vm *VM) Initialize( }) vm.verifiedBlocks = make(map[ids.ID]PostForkBlock) - detachedCtx := utils.Detach(ctx) + detachedCtx := context.WithoutCancel(ctx) context, cancel := context.WithCancel(detachedCtx) vm.context = context vm.onShutdown = cancel @@ -203,7 +198,7 @@ func (vm *VM) Initialize( err = vm.ChainVM.Initialize( ctx, chainCtx, - dbManager, + db, genesisBytes, upgradeBytes, configBytes, @@ -219,13 +214,38 @@ func (vm *VM) Initialize( return err } - return vm.setLastAcceptedMetadata(ctx) + if err := vm.setLastAcceptedMetadata(ctx); err != nil { + return err + } + + if err := vm.pruneOldBlocks(); err != nil { + return err + } + + forkHeight, err := vm.getForkHeight() + switch err { + case nil: + chainCtx.Log.Info("initialized proposervm", + zap.String("state", "after fork"), + zap.Uint64("forkHeight", forkHeight), + zap.Uint64("lastAcceptedHeight", vm.lastAcceptedHeight), + ) + case database.ErrNotFound: + chainCtx.Log.Info("initialized proposervm", + zap.String("state", "before fork"), + ) + default: + return err + } + return nil } // shutdown ops then propagate shutdown to innerVM func (vm *VM) Shutdown(ctx context.Context) error { vm.onShutdown() + vm.Scheduler.Close() + if err := vm.db.Commit(); err != nil { return err } @@ -256,6 +276,11 @@ func (vm *VM) SetState(ctx context.Context, newState snow.State) error { func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { preferredBlock, err := vm.getBlock(ctx, vm.preferred) if err != nil { + vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to fetch preferred block"), + zap.Stringer("parentID", vm.preferred), + zap.Error(err), + ) return nil, err } @@ -293,18 +318,59 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { return err } - // reset scheduler - minDelay, err := vm.Windower.Delay(ctx, blk.Height()+1, pChainHeight, vm.ctx.NodeID) + var ( + childBlockHeight = blk.Height() + 1 + parentTimestamp = blk.Timestamp() + nextStartTime time.Time + ) + if vm.IsDurangoActivated(parentTimestamp) { + currentTime := vm.Clock.Time().Truncate(time.Second) + nextStartTime, err = vm.getPostDurangoSlotTime( + ctx, + childBlockHeight, + pChainHeight, + proposer.TimeToSlot(parentTimestamp, currentTime), + parentTimestamp, + ) + } else { + nextStartTime, err = vm.getPreDurangoSlotTime( + ctx, + childBlockHeight, + pChainHeight, + parentTimestamp, + ) + } if err != nil { vm.ctx.Log.Debug("failed to fetch the expected delay", zap.Error(err), ) + // A nil error is returned here because it is possible that // bootstrapping caused the last accepted block to move past the latest // P-chain height. This will cause building blocks to return an error // until the P-chain's height has advanced. return nil } + vm.Scheduler.SetBuildBlockTime(nextStartTime) + + vm.ctx.Log.Debug("set preference", + zap.Stringer("blkID", blk.ID()), + zap.Time("blockTimestamp", parentTimestamp), + zap.Time("nextStartTime", nextStartTime), + ) + return nil +} + +func (vm *VM) getPreDurangoSlotTime( + ctx context.Context, + blkHeight, + pChainHeight uint64, + parentTimestamp time.Time, +) (time.Time, error) { + delay, err := vm.Windower.Delay(ctx, blkHeight, pChainHeight, vm.ctx.NodeID, proposer.MaxBuildWindows) + if err != nil { + return time.Time{}, err + } // Note: The P-chain does not currently try to target any block time. It // notifies the consensus engine as soon as a new block may be built. To @@ -312,20 +378,39 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { // validators can specify. This delay may be an issue for high performance, // custom VMs. Until the P-chain is modified to target a specific block // time, ProposerMinBlockDelay can be configured in the subnet config. - if minDelay < vm.minBlkDelay { - minDelay = vm.minBlkDelay - } - - preferredTime := blk.Timestamp() - nextStartTime := preferredTime.Add(minDelay) - vm.Scheduler.SetBuildBlockTime(nextStartTime) + delay = max(delay, vm.MinBlkDelay) + return parentTimestamp.Add(delay), nil +} - vm.ctx.Log.Debug("set preference", - zap.Stringer("blkID", blk.ID()), - zap.Time("blockTimestamp", preferredTime), - zap.Time("nextStartTime", nextStartTime), +func (vm *VM) getPostDurangoSlotTime( + ctx context.Context, + blkHeight, + pChainHeight, + slot uint64, + parentTimestamp time.Time, +) (time.Time, error) { + delay, err := vm.Windower.MinDelayForProposer( + ctx, + blkHeight, + pChainHeight, + vm.ctx.NodeID, + slot, ) - return nil + // Note: The P-chain does not currently try to target any block time. It + // notifies the consensus engine as soon as a new block may be built. To + // avoid fast runs of blocks there is an additional minimum delay that + // validators can specify. This delay may be an issue for high performance, + // custom VMs. Until the P-chain is modified to target a specific block + // time, ProposerMinBlockDelay can be configured in the subnet config. + switch { + case err == nil: + delay = max(delay, vm.MinBlkDelay) + return parentTimestamp.Add(delay), err + case errors.Is(err, proposer.ErrAnyoneCanPropose): + return parentTimestamp.Add(vm.MinBlkDelay), err + default: + return time.Time{}, err + } } func (vm *VM) LastAccepted(ctx context.Context) (ids.ID, error) { @@ -339,12 +424,7 @@ func (vm *VM) LastAccepted(ctx context.Context) (ids.ID, error) { // repair makes sure that vm and innerVM chains are in sync. // Moreover it fixes vm's height index if defined. func (vm *VM) repair(ctx context.Context) error { - if vm.hVM == nil { - // height index not defined. Just sync vms and innerVM chains. - return vm.repairAcceptedChainByIteration(ctx) - } - - switch vm.hVM.VerifyHeightIndex(ctx) { + switch err := vm.ChainVM.VerifyHeightIndex(ctx); err { case nil: // InnerVM height index is complete. We can immediately verify // and repair this VM height index. @@ -359,7 +439,12 @@ func (vm *VM) repair(ctx context.Context) error { } case block.ErrIndexIncomplete: default: - return nil + return err + } + + if vm.NumHistoricalBlocks != 0 { + vm.ctx.Log.Fatal("block height index must be valid when pruning historical blocks") + return errHeightIndexInvalidWhilePruning } // innerVM height index is incomplete. Sync vm and innerVM chains first. @@ -376,7 +461,7 @@ func (vm *VM) repair(ctx context.Context) error { for { // The underlying VM expects the lock to be held here. vm.ctx.Lock.Lock() - err := vm.hVM.VerifyHeightIndex(ctx) + err := vm.ChainVM.VerifyHeightIndex(ctx) vm.ctx.Lock.Unlock() if err == nil { @@ -537,14 +622,21 @@ func (vm *VM) repairAcceptedChainByHeight(ctx context.Context) error { return nil } - // The inner vm must be behind the proposer vm, so we must roll the proposervm back. + vm.ctx.Log.Info("repairing accepted chain by height", + zap.Uint64("outerHeight", proLastAcceptedHeight), + zap.Uint64("innerHeight", innerLastAcceptedHeight), + ) + + // The inner vm must be behind the proposer vm, so we must roll the + // proposervm back. forkHeight, err := vm.State.GetForkHeight() if err != nil { return err } if forkHeight > innerLastAcceptedHeight { - // We are rolling back past the fork, so we should just forget about all of our proposervm indices. + // We are rolling back past the fork, so we should just forget about all + // of our proposervm indices. if err := vm.State.DeleteLastAccepted(); err != nil { return err } @@ -553,7 +645,10 @@ func (vm *VM) repairAcceptedChainByHeight(ctx context.Context) error { newProLastAcceptedID, err := vm.State.GetBlockIDAtHeight(innerLastAcceptedHeight) if err != nil { - return err + // This fatal error can happen if NumHistoricalBlocks is set too + // aggressively and the inner vm rolled back before the oldest + // proposervm block. + return fmt.Errorf("proposervm failed to rollback last accepted block to height (%d): %w", innerLastAcceptedHeight, err) } if err := vm.State.SetLastAccepted(newProLastAcceptedID); err != nil { @@ -598,7 +693,7 @@ func (vm *VM) setLastAcceptedMetadata(ctx context.Context) error { } func (vm *VM) parsePostForkBlock(ctx context.Context, b []byte) (PostForkBlock, error) { - statelessBlock, err := statelessblock.Parse(b) + statelessBlock, err := statelessblock.Parse(b, vm.DurangoTime) if err != nil { return nil, err } @@ -656,6 +751,30 @@ func (vm *VM) getBlock(ctx context.Context, id ids.ID) (Block, error) { return vm.getPreForkBlock(ctx, id) } +// TODO: remove after the P-chain and X-chain support height indexing. +func (vm *VM) getForkHeight() (uint64, error) { + // The fork block can be easily identified with the provided links because + // the `Parent Hash` is equal to the `Proposer Parent ID`. + switch vm.ctx.ChainID { + case constants.PlatformChainID: + switch vm.ctx.NetworkID { + case constants.MainnetID: + return 805732, nil // https://subnets.avax.network/p-chain/block/805732 + case constants.SongbirdID: + return 9, nil + case constants.CostonID: + return 6, nil + case constants.FlareID, constants.CostwoID: + return 1, nil + } + case mainnetXChainID: + return 1, nil // https://subnets.avax.network/x-chain/block/1 + case fujiXChainID: + return 1, nil // https://subnets-test.avax.network/x-chain/block/1 + } + return vm.GetForkHeight() +} + func (vm *VM) getPostForkBlock(ctx context.Context, blkID ids.ID) (PostForkBlock, error) { block, exists := vm.verifiedBlocks[blkID] if exists { @@ -701,12 +820,20 @@ func (vm *VM) getPreForkBlock(ctx context.Context, blkID ids.ID) (*preForkBlock, }, err } -func (vm *VM) storePostForkBlock(blk PostForkBlock) error { - if err := vm.State.PutBlock(blk.getStatelessBlk(), blk.Status()); err != nil { - return err - } +func (vm *VM) acceptPostForkBlock(blk PostForkBlock) error { height := blk.Height() blkID := blk.ID() + + vm.lastAcceptedHeight = height + delete(vm.verifiedBlocks, blkID) + + // Persist this block, its height index, and its status + if err := vm.State.SetLastAccepted(blkID); err != nil { + return err + } + if err := vm.State.PutBlock(blk.getStatelessBlk(), choices.Accepted); err != nil { + return err + } if err := vm.updateHeightIndex(height, blkID); err != nil { return err } @@ -782,7 +909,7 @@ func (vm *VM) optimalPChainHeight(ctx context.Context, minPChainHeight uint64) ( return 0, err } - return math.Max(minimumHeight, minPChainHeight), nil + return max(minimumHeight, minPChainHeight), nil } // parseInnerBlock attempts to parse the provided bytes as an inner block. If diff --git a/avalanchego/vms/proposervm/vm_byzantine_test.go b/avalanchego/vms/proposervm/vm_byzantine_test.go index d471fd70..c9ad1b98 100644 --- a/avalanchego/vms/proposervm/vm_byzantine_test.go +++ b/avalanchego/vms/proposervm/vm_byzantine_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -7,17 +7,17 @@ import ( "bytes" "context" "encoding/hex" - "errors" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) // Ensure that a byzantine node issuing an invalid PreForkBlock (Y) when the @@ -30,37 +30,37 @@ import ( // | // Y func TestInvalidByzantineProposerParent(t *testing.T) { - forkTime := time.Unix(0, 0) // enable ProBlks - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlock := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } - - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } + require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Accept(context.Background())) yBlockBytes := []byte{2} yBlock := &snowman.TestBlock{ @@ -68,10 +68,9 @@ func TestInvalidByzantineProposerParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: yBlockBytes, + ParentV: xBlock.ID(), + HeightV: xBlock.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { @@ -88,9 +87,8 @@ func TestInvalidByzantineProposerParent(t *testing.T) { } // If there wasn't an error parsing - verify must return an error - if err := parsedBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + err = parsedBlock.Verify(context.Background()) + require.ErrorIs(err, errUnknownBlock) } // Ensure that a byzantine node issuing an invalid PreForkBlock (Y or Z) when @@ -103,8 +101,17 @@ func TestInvalidByzantineProposerParent(t *testing.T) { // / \ // Y Z func TestInvalidByzantineProposerOracleParent(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlockID := ids.GenerateTestID() xBlock := &TestOptionsBlock{ @@ -113,9 +120,8 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { IDV: xBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -123,18 +129,16 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: xBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: xBlockID, }, }, } @@ -172,46 +176,28 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } - - aBlock, ok := aBlockIntf.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.NoError(err) + require.IsType(&postForkBlock{}, aBlockIntf) + aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) - yBlock, err := proVM.ParseBlock(context.Background(), xBlock.opts[0].Bytes()) - if err != nil { - // It's okay for this block not to be parsed - return - } - if err := yBlock.Verify(context.Background()); err == nil { - t.Fatal("unexpectedly passed block verification") - } + wrappedXBlock, err := proVM.ParseBlock(context.Background(), xBlock.Bytes()) + require.NoError(err) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + err = wrappedXBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) - if err := yBlock.Verify(context.Background()); err == nil { - t.Fatal("unexpectedly passed block verification") - } + require.NoError(aBlock.Accept(context.Background())) + + // Because the wrappedXBlock never passed verification and is now rejected, + // the consensus engine will never verify any of its children. + require.Equal(choices.Rejected, wrappedXBlock.Status()) } // Ensure that a byzantine node issuing an invalid PostForkBlock (B) when the @@ -224,40 +210,39 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { // / | // B - Y func TestInvalidByzantineProposerPreForkParent(t *testing.T) { - forkTime := time.Unix(0, 0) // enable ProBlks - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlock := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } - - coreVM.BuildBlockF = nil - yBlockBytes := []byte{2} yBlock := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), + BytesV: yBlockBytes, + ParentV: xBlock.ID(), + HeightV: xBlock.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -285,39 +270,24 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { } } - bStatelessBlock, err := block.BuildUnsigned( - xBlock.ID(), - yBlock.Timestamp(), - 0, - yBlockBytes, - ) - if err != nil { - t.Fatal(err) - } + aBlock, err := proVM.BuildBlock(context.Background()) + require.NoError(err) + coreVM.BuildBlockF = nil - bBlock, err := proVM.ParseBlock(context.Background(), bStatelessBlock.Bytes()) - if err != nil { - // If there was an error parsing, then this is fine. - return - } + require.NoError(aBlock.Verify(context.Background())) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + wrappedXBlock, err := proVM.ParseBlock(context.Background(), xBlock.Bytes()) + require.NoError(err) // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + err = wrappedXBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } + require.NoError(aBlock.Accept(context.Background())) - // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + // Because the wrappedXBlock never passed verification and is now rejected, + // the consensus engine will never verify any of its children. + require.Equal(choices.Rejected, wrappedXBlock.Status()) } // Ensure that a byzantine node issuing an invalid OptionBlock (B) which @@ -330,8 +300,17 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { // | / // B - Y func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlock := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -339,9 +318,8 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -349,18 +327,16 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), // valid block should reference xBlock }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: coreGenBlk.ID(), // valid block should reference xBlock }, }, } @@ -398,28 +374,18 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - aBlock, ok := aBlockIntf.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, aBlockIntf) + aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[0].Verify(context.Background()); err == nil { - t.Fatal("option 0 has invalid parent, should not verify") - } - if err := opts[1].Verify(context.Background()); err == nil { - t.Fatal("option 1 has invalid parent, should not verify") - } + require.NoError(aBlock.Verify(context.Background())) + err = opts[0].Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) + err = opts[1].Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } // ,--G ----. @@ -434,8 +400,17 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { // O2.parent = A (original), O2.inner = first option of X (valid) // O3.parent = C (Oracle), O3.inner = first option of X (invalid parent) func TestBlockVerify_InvalidPostForkOption(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(coreGenBlk.Timestamp()) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create an Oracle pre-fork block X xBlockID := ids.GenerateTestID() @@ -445,9 +420,8 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: xBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -455,26 +429,22 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: xBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: xBlockID, }, }, } xInnerOptions, err := xBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) xInnerOption := xInnerOptions[0] // create a non-Oracle pre-fork block Y @@ -483,10 +453,9 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } ySlb, err := block.BuildUnsigned( @@ -495,9 +464,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { uint64(2000), yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) // create post-fork block B from Y bBlock := postForkBlock{ @@ -509,18 +476,14 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Verify(context.Background())) // generate O1 statelessOuterOption, err := block.BuildOption( bBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption := &postForkOption{ Block: statelessOuterOption, @@ -531,30 +494,23 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); !errors.Is(err, errUnexpectedBlockType) { - t.Fatal(err) - } + err = outerOption.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) // generate A from X and O2 coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) statelessOuterOption, err = block.BuildOption( aBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption = &postForkOption{ Block: statelessOuterOption, @@ -565,9 +521,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(outerOption.Verify(context.Background())) // create an Oracle pre-fork block Z // create post-fork block B from Y @@ -578,9 +532,8 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: zBlockID, StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), }, opts: [2]snowman.Block{ &snowman.TestBlock{ @@ -588,18 +541,16 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: zBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: zBlockID, }, &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: zBlockID, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: zBlockID, }, }, } @@ -608,22 +559,16 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { return zBlock, nil } cBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(cBlock.Verify(context.Background())) // generate O3 statelessOuterOption, err = block.BuildOption( cBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption = &postForkOption{ Block: statelessOuterOption, @@ -634,13 +579,21 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); err != errInnerParentMismatch { - t.Fatal(err) - } + err = outerOption.Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } func TestGetBlock_MutatedSignature(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // Make sure that we will be sampled to perform the proposals. valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -660,10 +613,9 @@ func TestGetBlock_MutatedSignature(t *testing.T) { IDV: ids.Empty.Prefix(1111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreBlk1 := &snowman.TestBlock{ @@ -671,10 +623,9 @@ func TestGetBlock_MutatedSignature(t *testing.T) { IDV: ids.Empty.Prefix(2222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk0.ID(), + HeightV: coreBlk0.Height() + 1, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -708,19 +659,13 @@ func TestGetBlock_MutatedSignature(t *testing.T) { } builtBlk0, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("could not build post fork block %s", err) - } + require.NoError(err) - if err := builtBlk0.Verify(context.Background()); err != nil { - t.Fatalf("failed to verify newly created block %s", err) - } + require.NoError(builtBlk0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), builtBlk0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), builtBlk0.ID())) - // The second propsal block will need to be signed because the timestamp + // The second proposal block will need to be signed because the timestamp // hasn't moved forward // Craft what would be the next block, but with an invalid signature: @@ -729,9 +674,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // Invalid Bytes: 000000000000fd81ce4f1ab2650176d46a3d1fbb593af5717a2ada7dabdcef19622325a8ce8400000000000003e800000000000006d0000004a13082049d30820285a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313231313132333130313030305a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100b9c3615c42d501f3b9d21ed127b31855827dbe12652e6e6f278991a3ad1ca55e2241b1cac69a0aeeefdd913db8ae445ff847789fdcbc1cbe6cce0a63109d1c1fb9d441c524a6eb1412f9b8090f1507e3e50a725f9d0a9d5db424ea229a7c11d8b91c73fecbad31c7b216bb2ac5e4d5ff080a80fabc73b34beb8fa46513ab59d489ce3f273c0edab43ded4d4914e081e6e850f9e502c3c4a54afc8a3a89d889aec275b7162a7616d53a61cd3ee466394212e5bef307790100142ad9e0b6c95ad2424c6e84d06411ad066d0c37d4d14125bae22b49ad2a761a09507bbfe43d023696d278d9fbbaf06c4ff677356113d3105e248078c33caed144d85929b1dd994df33c5d3445675104659ca9642c269b5cfa39c7bad5e399e7ebce3b5e6661f989d5f388006ebd90f0e035d533f5662cb925df8744f61289e66517b51b9a2f54792dca9078d5e12bf8ad79e35a68d4d661d15f0d3029d6c5903c845323d5426e49deaa2be2bc261423a9cd77df9a2706afaca27f589cc2c8f53e2a1f90eb5a3f8bcee0769971db6bacaec265d86b39380f69e3e0e06072de986feede26fe856c55e24e88ee5ac342653ac55a04e21b8517310c717dff0e22825c0944c6ba263f8f060099ea6e44a57721c7aa54e2790a4421fb85e3347e4572cba44e62b2cad19c1623c1cab4a715078e56458554cef8442769e6d5dd7f99a6234653a46828804f0203010001a320301e300e0603551d0f0101ff0404030204b0300c0603551d130101ff04023000300d06092a864886f70d01010b050003820201004ee2229d354720a751e2d2821134994f5679997113192626cf61594225cfdf51e6479e2c17e1013ab9dceb713bc0f24649e5cab463a8cf8617816ed736ac5251a853ff35e859ac6853ebb314f967ff7867c53512d42e329659375682c854ca9150cfa4c3964680e7650beb93e8b4a0d6489a9ca0ce0104752ba4d9cf3e2dc9436b56ecd0bd2e33cbbeb5a107ec4fd6f41a943c8bee06c0b32f4291a3e3759a7984d919a97d5d6517b841053df6e795ed33b52ed5e41357c3e431beb725e4e4f2ef956c44fd1f76fa4d847602e491c3585a90cdccfff982405d388b83d6f32ea16da2f5e4595926a7d26078e32992179032d30831b1f1b42de1781c507536a49adb4c95bad04c171911eed30d63c73712873d1e8094355efb9aeee0c16f8599575fd7f8bb027024bad63b097d2230d8f0ba12a8ed23e618adc3d7cb6a63e02b82a6d4d74b21928dbcb6d3788c6fd45022d69f3ab94d914d97cd651db662e92918a5d891ef730a813f03aade2fe385b61f44840f8925ad3345df1c82c9de882bb7184b4cd0bbd9db8322aaedb4ff86e5be9635987e6c40455ab9b063cdb423bee2edcac47cf654487e9286f33bdbad10018f4db9564cee6e048570e1517a2e396501b5978a53d10a548aed26938c2f9aada3ae62d3fdae486deb9413dffb6524666453633d665c3712d0fec9f844632b2b3eaf0267ca495eb41dba8273862609de00000001020000000101 invalidBlkBytesHex := "000000000000fd81ce4f1ab2650176d46a3d1fbb593af5717a2ada7dabdcef19622325a8ce8400000000000003e800000000000006d0000004a13082049d30820285a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313231313132333130313030305a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100b9c3615c42d501f3b9d21ed127b31855827dbe12652e6e6f278991a3ad1ca55e2241b1cac69a0aeeefdd913db8ae445ff847789fdcbc1cbe6cce0a63109d1c1fb9d441c524a6eb1412f9b8090f1507e3e50a725f9d0a9d5db424ea229a7c11d8b91c73fecbad31c7b216bb2ac5e4d5ff080a80fabc73b34beb8fa46513ab59d489ce3f273c0edab43ded4d4914e081e6e850f9e502c3c4a54afc8a3a89d889aec275b7162a7616d53a61cd3ee466394212e5bef307790100142ad9e0b6c95ad2424c6e84d06411ad066d0c37d4d14125bae22b49ad2a761a09507bbfe43d023696d278d9fbbaf06c4ff677356113d3105e248078c33caed144d85929b1dd994df33c5d3445675104659ca9642c269b5cfa39c7bad5e399e7ebce3b5e6661f989d5f388006ebd90f0e035d533f5662cb925df8744f61289e66517b51b9a2f54792dca9078d5e12bf8ad79e35a68d4d661d15f0d3029d6c5903c845323d5426e49deaa2be2bc261423a9cd77df9a2706afaca27f589cc2c8f53e2a1f90eb5a3f8bcee0769971db6bacaec265d86b39380f69e3e0e06072de986feede26fe856c55e24e88ee5ac342653ac55a04e21b8517310c717dff0e22825c0944c6ba263f8f060099ea6e44a57721c7aa54e2790a4421fb85e3347e4572cba44e62b2cad19c1623c1cab4a715078e56458554cef8442769e6d5dd7f99a6234653a46828804f0203010001a320301e300e0603551d0f0101ff0404030204b0300c0603551d130101ff04023000300d06092a864886f70d01010b050003820201004ee2229d354720a751e2d2821134994f5679997113192626cf61594225cfdf51e6479e2c17e1013ab9dceb713bc0f24649e5cab463a8cf8617816ed736ac5251a853ff35e859ac6853ebb314f967ff7867c53512d42e329659375682c854ca9150cfa4c3964680e7650beb93e8b4a0d6489a9ca0ce0104752ba4d9cf3e2dc9436b56ecd0bd2e33cbbeb5a107ec4fd6f41a943c8bee06c0b32f4291a3e3759a7984d919a97d5d6517b841053df6e795ed33b52ed5e41357c3e431beb725e4e4f2ef956c44fd1f76fa4d847602e491c3585a90cdccfff982405d388b83d6f32ea16da2f5e4595926a7d26078e32992179032d30831b1f1b42de1781c507536a49adb4c95bad04c171911eed30d63c73712873d1e8094355efb9aeee0c16f8599575fd7f8bb027024bad63b097d2230d8f0ba12a8ed23e618adc3d7cb6a63e02b82a6d4d74b21928dbcb6d3788c6fd45022d69f3ab94d914d97cd651db662e92918a5d891ef730a813f03aade2fe385b61f44840f8925ad3345df1c82c9de882bb7184b4cd0bbd9db8322aaedb4ff86e5be9635987e6c40455ab9b063cdb423bee2edcac47cf654487e9286f33bdbad10018f4db9564cee6e048570e1517a2e396501b5978a53d10a548aed26938c2f9aada3ae62d3fdae486deb9413dffb6524666453633d665c3712d0fec9f844632b2b3eaf0267ca495eb41dba8273862609de00000001020000000101" invalidBlkBytes, err := hex.DecodeString(invalidBlkBytesHex) - if err != nil { - t.Fatal(err) - } + require.NoError(err) invalidBlk, err := proVM.ParseBlock(context.Background(), invalidBlkBytes) if err != nil { @@ -739,20 +682,14 @@ func TestGetBlock_MutatedSignature(t *testing.T) { t.Skip(err) } - if err := invalidBlk.Verify(context.Background()); err == nil { - t.Fatalf("verified block without valid signature") - } + err = invalidBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) // Note that the invalidBlk.ID() is the same as the correct blk ID because // the signature isn't part of the blk ID. blkID, err := ids.FromString("2R3Uz98YmxHUJARWv6suApPdAbbZ7X7ipat1gZuZNNhC5wPwJW") - if err != nil { - t.Fatal(err) - } - - if blkID != invalidBlk.ID() { - t.Fatalf("unexpected block ID; expected = %s , got = %s", blkID, invalidBlk.ID()) - } + require.NoError(err) + require.Equal(blkID, invalidBlk.ID()) // GetBlock shouldn't really be able to succeed, as we don't have a valid // representation of [blkID] @@ -764,7 +701,5 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // GetBlock returned, so it must have somehow gotten a valid representation // of [blkID]. - if err := fetchedBlk.Verify(context.Background()); err != nil { - t.Fatalf("GetBlock returned an invalid block when the ID represented a potentially valid block: %s", err) - } + require.NoError(fetchedBlk.Verify(context.Background())) } diff --git a/avalanchego/vms/proposervm/vm_regression_test.go b/avalanchego/vms/proposervm/vm_regression_test.go new file mode 100644 index 00000000..ac34df12 --- /dev/null +++ b/avalanchego/vms/proposervm/vm_regression_test.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/snowtest" +) + +func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *testing.T) { + require := require.New(t) + + innerVM := &fullVM{ + TestVM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + }, + } + + // let innerVM fail verifying its height index with + // a non-special error (like block.ErrIndexIncomplete) + customError := errors.New("custom error") + innerVM.VerifyHeightIndexF = func(_ context.Context) error { + return customError + } + + innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, + []byte, []byte, []byte, chan<- common.Message, + []*common.Fx, common.AppSender, + ) error { + return nil + } + + proVM := New( + innerVM, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, + ) + + defer func() { + // avoids leaking goroutines + require.NoError(proVM.Shutdown(context.Background())) + }() + + ctx := snowtest.Context(t, snowtest.CChainID) + initialState := []byte("genesis state") + + err := proVM.Initialize( + context.Background(), + ctx, + prefixdb.New([]byte{}, memdb.New()), + initialState, + nil, + nil, + nil, + nil, + nil, + ) + require.ErrorIs(err, customError) +} diff --git a/avalanchego/vms/proposervm/vm_test.go b/avalanchego/vms/proposervm/vm_test.go index 0aecbe12..7ad266d7 100644 --- a/avalanchego/vms/proposervm/vm_test.go +++ b/avalanchego/vms/proposervm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm @@ -7,27 +7,28 @@ import ( "bytes" "context" "crypto" - "crypto/tls" "errors" + "fmt" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" "github.com/ava-labs/avalanchego/vms/proposervm/state" @@ -35,19 +36,18 @@ import ( ) var ( - _ block.ChainVM = (*fullVM)(nil) - _ block.HeightIndexedChainVM = (*fullVM)(nil) - _ block.StateSyncableVM = (*fullVM)(nil) + _ block.ChainVM = (*fullVM)(nil) + _ block.StateSyncableVM = (*fullVM)(nil) ) type fullVM struct { *block.TestVM - *block.TestHeightIndexedVM *block.TestStateSyncableVM } var ( - pTestCert *tls.Certificate + pTestSigner crypto.Signer + pTestCert *staking.Certificate genesisUnixTimestamp int64 = 1000 genesisTimestamp = time.Unix(genesisUnixTimestamp, 0) @@ -58,27 +58,32 @@ var ( errUnverifiedBlock = errors.New("unverified block") errMarshallingFailed = errors.New("marshalling failed") errTooHigh = errors.New("too high") + errUnexpectedCall = errors.New("unexpected call") ) func init() { - var err error - pTestCert, err = staking.NewTLSCert() + tlsCert, err := staking.NewTLSCert() if err != nil { panic(err) } + pTestSigner = tlsCert.PrivateKey.(crypto.Signer) + pTestCert = staking.CertificateFromX509(tlsCert.Leaf) } func initTestProposerVM( t *testing.T, proBlkStartTime time.Time, + durangoTime time.Time, minPChainHeight uint64, ) ( *fullVM, *validators.TestState, *VM, *snowman.TestBlock, - manager.Manager, + database.Database, ) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -96,15 +101,12 @@ func initTestProposerVM( T: t, }, }, - TestHeightIndexedVM: &block.TestHeightIndexedVM{ - T: t, - }, TestStateSyncableVM: &block.TestStateSyncableVM{ T: t, }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -132,11 +134,15 @@ func initTestProposerVM( proVM := New( coreVM, - proBlkStartTime, - minPChainHeight, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: proBlkStartTime, + DurangoTime: durangoTime, + MinimumPChainHeight: minPChainHeight, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -149,72 +155,108 @@ func initTestProposerVM( return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, ids.ID{1}) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + db := prefixdb.New([]byte{0}, memdb.New()) // signal height index is complete coreVM.VerifyHeightIndexF = func(context.Context) error { return nil } - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + db, initialState, nil, nil, nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + proVM.Set(coreGenBlk.Timestamp()) + + return coreVM, valState, proVM, coreGenBlk, db +} + +func waitForProposerWindow(vm *VM, chainTip snowman.Block, pchainHeight uint64) error { + var ( + ctx = context.Background() + childBlockHeight = chainTip.Height() + 1 + parentTimestamp = chainTip.Timestamp() + ) + + for { + slot := proposer.TimeToSlot(parentTimestamp, vm.Clock.Time().Truncate(time.Second)) + delay, err := vm.MinDelayForProposer( + ctx, + childBlockHeight, + pchainHeight, + vm.ctx.NodeID, + slot, + ) + if err != nil { + return err + } - return coreVM, valState, proVM, coreGenBlk, dummyDBManager + vm.Clock.Set(parentTimestamp.Add(delay)) + if delay < proposer.MaxLookAheadWindow { + return nil + } + } } // VM.BuildBlock tests section func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { + require := require.New(t) + // given the same core block, BuildBlock returns the same proposer block - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + skewedTimestamp := time.Now().Truncate(time.Second).Add(time.Millisecond) proVM.Set(skewedTimestamp) @@ -223,10 +265,9 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -234,62 +275,70 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { // test builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) - if builtBlk.Timestamp().Truncate(time.Second) != builtBlk.Timestamp() { - t.Fatal("Timestamp should be rounded to second") - } + require.Equal(builtBlk.Timestamp().Truncate(time.Second), builtBlk.Timestamp()) } func TestBuildBlockIsIdempotent(t *testing.T) { + require := require.New(t) + // given the same core block, BuildBlock returns the same proposer block - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } - // test + // Mock the clock time to make sure that block timestamps will be equal + proVM.Clock.Set(time.Now()) + builtBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) builtBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) - if !bytes.Equal(builtBlk1.Bytes(), builtBlk2.Bytes()) { - t.Fatal("proposer blocks wrapping the same core block are different") - } + require.Equal(builtBlk1.Bytes(), builtBlk2.Bytes()) } func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { + require := require.New(t) + // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -297,24 +346,27 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { // test snowBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build block") - } + require.NoError(err) // checks - proBlock, ok := snowBlock.(*postForkBlock) - if !ok { - t.Fatal("proposerVM.BuildBlock() does not return a proposervm.Block") - } + require.IsType(&postForkBlock{}, snowBlock) + proBlock := snowBlock.(*postForkBlock) - if proBlock.innerBlk != coreBlk { - t.Fatal("different block was expected to be built") - } + require.Equal(coreBlk, proBlock.innerBlk) } // both core blocks and pro blocks must be built on preferred func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // add two proBlks... coreBlk1 := &snowman.TestBlock{ @@ -322,43 +374,32 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proBlk1 due to %s", err) - } + require.NoError(err) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } - - if err := proBlk2.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NotEqual(proBlk2.ID(), proBlk1.ID()) + require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred var prefcoreBlk *snowman.TestBlock @@ -371,7 +412,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { prefcoreBlk = coreBlk2 return nil default: - t.Fatal("Unknown core Blocks set as preferred") + require.FailNow("prefID does not match coreBlk1 or coreBlk2") return nil } } @@ -382,14 +423,12 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { case bytes.Equal(b, coreBlk2.Bytes()): return coreBlk2, nil default: - t.Fatalf("Wrong bytes") + require.FailNow("bytes do not match coreBlk1 or coreBlk2") return nil, nil } } - if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { - t.Fatal("Could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... coreBlk3 := &snowman.TestBlock{ @@ -397,72 +436,66 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: prefcoreBlk.ID(), - HeightV: prefcoreBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: prefcoreBlk.ID(), + HeightV: prefcoreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proVM, proBlk2, proBlk2.(*postForkBlock).PChainHeight())) builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) // ...show that parent is the preferred one - if builtBlk.Parent() != proBlk2.ID() { - t.Fatal("proposer block not built on preferred parent") - } + require.Equal(proBlk2.ID(), builtBlk.Parent()) } func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(111), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk1") - } + require.NoError(err) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(222), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{2}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } + require.NoError(err) + require.NotEqual(proBlk1.ID(), proBlk2.ID()) - if err := proBlk2.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred var wronglyPreferredcoreBlk *snowman.TestBlock @@ -475,7 +508,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { wronglyPreferredcoreBlk = coreBlk1 return nil default: - t.Fatal("Unknown core Blocks set as preferred") + require.FailNow("Unknown core Blocks set as preferred") return nil } } @@ -486,14 +519,12 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { case bytes.Equal(b, coreBlk2.Bytes()): return coreBlk2, nil default: - t.Fatalf("Wrong bytes") + require.FailNow("Wrong bytes") return nil, nil } } - if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { - t.Fatal("Could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... coreBlk3 := &snowman.TestBlock{ @@ -501,49 +532,51 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: wronglyPreferredcoreBlk.ID(), - HeightV: wronglyPreferredcoreBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{3}, + ParentV: wronglyPreferredcoreBlk.ID(), + HeightV: wronglyPreferredcoreBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - proVM.Set(proVM.Time().Add(proposer.MaxDelay)) + require.NoError(waitForProposerWindow(proVM, proBlk2, proBlk2.(*postForkBlock).PChainHeight())) blk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := blk.Verify(context.Background()); err == nil { - t.Fatal("coreVM does not build on preferred coreBlock. It should err") - } + err = blk.Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } // VM.ParseBlock tests section func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { - coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - TimestampV: proVM.Time(), + BytesV: []byte{1}, } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errMarshallingFailed } slb, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + proVM.Time(), 100, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk := postForkBlock{ SignedBlock: slb, postForkCommonComponents: postForkCommonComponents{ @@ -554,41 +587,45 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { } // test - - if _, err := proVM.ParseBlock(context.Background(), proBlk.Bytes()); err == nil { - t.Fatal("failed parsing proposervm.Block. Error:", err) - } + _, err = proVM.ParseBlock(context.Background(), proBlk.Bytes()) + require.ErrorIs(err, errMarshallingFailed) } func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { - coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create two Proposer blocks at the same height innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - ParentV: gencoreBlk.ID(), - HeightV: gencoreBlk.Height() + 1, - TimestampV: proVM.Time(), + BytesV: []byte{1}, + ParentV: gencoreBlk.ID(), + HeightV: gencoreBlk.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, innerBlk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } + blkTimestamp := proVM.Time() + slb1, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + blkTimestamp, 100, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk1 := postForkBlock{ SignedBlock: slb1, postForkCommonComponents: postForkCommonComponents{ @@ -600,16 +637,14 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { slb2, err := statelessblock.Build( proVM.preferred, - innerBlk.Timestamp(), + blkTimestamp, 200, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk2 := postForkBlock{ SignedBlock: slb2, postForkCommonComponents: postForkCommonComponents{ @@ -619,57 +654,50 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { }, } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("Test requires proBlk1 and proBlk2 to be different") - } + require.NotEqual(proBlk1.ID(), proBlk2.ID()) // Show that both can be parsed and retrieved parsedBlk1, err := proVM.ParseBlock(context.Background(), proBlk1.Bytes()) - if err != nil { - t.Fatal("proposerVM could not parse parsedBlk1") - } + require.NoError(err) parsedBlk2, err := proVM.ParseBlock(context.Background(), proBlk2.Bytes()) - if err != nil { - t.Fatal("proposerVM could not parse parsedBlk2") - } + require.NoError(err) - if parsedBlk1.ID() != proBlk1.ID() { - t.Fatal("error in parsing block") - } - if parsedBlk2.ID() != proBlk2.ID() { - t.Fatal("error in parsing block") - } + require.Equal(proBlk1.ID(), parsedBlk1.ID()) + require.Equal(proBlk2.ID(), parsedBlk2.ID()) } // VM.BuildBlock and VM.ParseBlock interoperability tests section func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // one block is built from this proVM localcoreBlk := &snowman.TestBlock{ - BytesV: []byte{111}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: genesisTimestamp, + BytesV: []byte{111}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return localcoreBlk, nil } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build block") - } - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal("Built block does not verify") - } + require.NoError(err) + require.NoError(builtBlk.Verify(context.Background())) // another block with same parent comes from network and is parsed netcoreBlk := &snowman.TestBlock{ - BytesV: []byte{222}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: genesisTimestamp, + BytesV: []byte{222}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -680,25 +708,21 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { case bytes.Equal(b, netcoreBlk.Bytes()): return netcoreBlk, nil default: - t.Fatalf("Unknown bytes") + require.FailNow("Unknown bytes") return nil, nil } } pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - if err != nil { - t.Fatal("could not retrieve pChain height") - } + require.NoError(err) netSlb, err := statelessblock.BuildUnsigned( proVM.preferred, - netcoreBlk.Timestamp(), + proVM.Time(), pChainHeight, netcoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) netProBlk := postForkBlock{ SignedBlock: netSlb, postForkCommonComponents: postForkCommonComponents{ @@ -709,47 +733,53 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } // prove that also block from network verifies - if err := netProBlk.Verify(context.Background()); err != nil { - t.Fatal("block from network does not verify") - } + require.NoError(netProBlk.Verify(context.Background())) } // Pre Fork tests section func TestPreFork_Initialize(t *testing.T) { - _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // checks blkID, err := proVM.LastAccepted(context.Background()) - if err != nil { - t.Fatal("failed to retrieve last accepted block") - } + require.NoError(err) rtvdBlk, err := proVM.GetBlock(context.Background(), blkID) - if err != nil { - t.Fatal("Block should be returned without calling core vm") - } + require.NoError(err) - if _, ok := rtvdBlk.(*preForkBlock); !ok { - t.Fatal("Block retrieved from proposerVM should be proposerBlocks") - } - if !bytes.Equal(rtvdBlk.Bytes(), coreGenBlk.Bytes()) { - t.Fatal("Stored block is not genesis") - } + require.IsType(&preForkBlock{}, rtvdBlk) + require.Equal(coreGenBlk.Bytes(), rtvdBlk.Bytes()) } func TestPreFork_BuildBlock(t *testing.T) { - // setup - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(333), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), + BytesV: []byte{3}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil @@ -757,35 +787,31 @@ func TestPreFork_BuildBlock(t *testing.T) { // test builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } - if _, ok := builtBlk.(*preForkBlock); !ok { - t.Fatal("Block built by proposerVM should be proposerBlocks") - } - if builtBlk.ID() != coreBlk.ID() { - t.Fatal("unexpected built block") - } - if !bytes.Equal(builtBlk.Bytes(), coreBlk.Bytes()) { - t.Fatal("unexpected built block") - } + require.NoError(err) + require.IsType(&preForkBlock{}, builtBlk) + require.Equal(coreBlk.ID(), builtBlk.ID()) + require.Equal(coreBlk.Bytes(), builtBlk.Bytes()) // test coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { return coreBlk, nil } storedBlk, err := proVM.GetBlock(context.Background(), builtBlk.ID()) - if err != nil { - t.Fatal("proposerVM has not cached built block") - } - if storedBlk.ID() != builtBlk.ID() { - t.Fatal("proposerVM retrieved wrong block") - } + require.NoError(err) + require.Equal(builtBlk.ID(), storedBlk.ID()) } func TestPreFork_ParseBlock(t *testing.T) { - // setup - coreVM, _, proVM, _, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -795,43 +821,36 @@ func TestPreFork_ParseBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, coreBlk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(coreBlk.Bytes(), b) return coreBlk, nil } parsedBlk, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal("Could not parse naked core block") - } - if _, ok := parsedBlk.(*preForkBlock); !ok { - t.Fatal("Block parsed by proposerVM should be proposerBlocks") - } - if parsedBlk.ID() != coreBlk.ID() { - t.Fatal("Parsed block does not match expected block") - } - if !bytes.Equal(parsedBlk.Bytes(), coreBlk.Bytes()) { - t.Fatal("Parsed block does not match expected block") - } + require.NoError(err) + require.IsType(&preForkBlock{}, parsedBlk) + require.Equal(coreBlk.ID(), parsedBlk.ID()) + require.Equal(coreBlk.Bytes(), parsedBlk.Bytes()) coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id != coreBlk.ID() { - t.Fatalf("Unknown core block") - } + require.Equal(coreBlk.ID(), id) return coreBlk, nil } storedBlk, err := proVM.GetBlock(context.Background(), parsedBlk.ID()) - if err != nil { - t.Fatal("proposerVM has not cached parsed block") - } - if storedBlk.ID() != parsedBlk.ID() { - t.Fatal("proposerVM retrieved wrong block") - } + require.NoError(err) + require.Equal(parsedBlk.ID(), storedBlk.ID()) } func TestPreFork_SetPreference(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = mockable.MaxTime + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk0 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -847,9 +866,7 @@ func TestPreFork_SetPreference(t *testing.T) { return coreBlk0, nil } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -871,9 +888,7 @@ func TestPreFork_SetPreference(t *testing.T) { return nil, errUnknownBlock } } - if err := proVM.SetPreference(context.Background(), builtBlk.ID()); err != nil { - t.Fatal("Could not set preference on proposer Block") - } + require.NoError(proVM.SetPreference(context.Background(), builtBlk.ID())) coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -889,15 +904,13 @@ func TestPreFork_SetPreference(t *testing.T) { return coreBlk1, nil } nextBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proposer block %s", err) - } - if nextBlk.Parent() != builtBlk.ID() { - t.Fatal("Preferred block should be parent of next built block") - } + require.NoError(err) + require.Equal(builtBlk.ID(), nextBlk.Parent()) } func TestExpiredBuildBlock(t *testing.T) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -933,11 +946,15 @@ func TestExpiredBuildBlock(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -950,26 +967,26 @@ func TestExpiredBuildBlock(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dbManager := manager.NewMemDB(version.Semantic1_0_0) toEngine := make(chan common.Message, 1) var toScheduler chan<- common.Message coreVM.InitializeF = func( _ context.Context, _ *snow.Context, - _ manager.Manager, + _ database.Database, _ []byte, _ []byte, _ []byte, @@ -980,60 +997,55 @@ func TestExpiredBuildBlock(t *testing.T) { toScheduler = toEngineChan return nil } + coreVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } // make sure that DBs are compressed correctly - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + memdb.New(), nil, nil, nil, toEngine, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } - - // Make sure that passing a message works - toScheduler <- common.PendingTxs - <-toEngine + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) // Notify the proposer VM of a new block on the inner block side toScheduler <- common.PendingTxs + // The first notification will be read from the consensus engine + <-toEngine + // Before calling BuildBlock, verify a remote block and set it as the + // preferred block. coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } statelessBlock, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), - coreBlk.Timestamp(), + proVM.Time(), 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1059,35 +1071,28 @@ func TestExpiredBuildBlock(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } - - if err := parsedBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - t.Fatal("unexpectedly called build block") - panic("unexpectedly called build block") + require.FailNow(fmt.Errorf("%w: BuildBlock", errUnexpectedCall).Error()) + return nil, errUnexpectedCall } - // The first notification will be read from the consensus engine - <-toEngine + // Because we are now building on a different block, the proposer window + // shouldn't have started. + _, err = proVM.BuildBlock(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) - if _, err := proVM.BuildBlock(context.Background()); err == nil { - t.Fatal("build block when the proposer window hasn't started") - } - - proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxDelay)) + proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxBuildDelay)) proVM.Scheduler.SetBuildBlockTime(time.Now()) // The engine should have been notified to attempt to build a block now that - // the window has started again + // the window has started again. This is to guarantee that the inner VM has + // build block called after it sent a pendingTxs message on its internal + // engine channel. <-toEngine } @@ -1112,7 +1117,16 @@ func (b *wrappedBlock) Verify(ctx context.Context) error { } func TestInnerBlockDeduplication(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // disable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1136,18 +1150,14 @@ func TestInnerBlockDeduplication(t *testing.T) { 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) statelessBlock1, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 1, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1171,17 +1181,11 @@ func TestInnerBlockDeduplication(t *testing.T) { } parsedBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedBlock0.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock0.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1205,24 +1209,18 @@ func TestInnerBlockDeduplication(t *testing.T) { } parsedBlock1, err := proVM.ParseBlock(context.Background(), statelessBlock1.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedBlock1.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock1.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock1.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock1.ID())) - if err := parsedBlock1.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock1.Accept(context.Background())) } func TestInnerVMRollback(t *testing.T) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1240,9 +1238,10 @@ func TestInnerVMRollback(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + nodeID := ids.BuildTestNodeID([]byte{1}) return map[ids.NodeID]*validators.GetValidatorOutput{ - {1}: { - NodeID: ids.NodeID{1}, + nodeID: { + NodeID: nodeID, Weight: 100, }, }, nil @@ -1271,14 +1270,14 @@ func TestInnerVMRollback(t *testing.T) { } } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState coreVM.InitializeF = func( context.Context, *snow.Context, - manager.Manager, + database.Database, []byte, []byte, []byte, @@ -1288,40 +1287,39 @@ func TestInnerVMRollback(t *testing.T) { ) error { return nil } + coreVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } - dbManager := manager.NewMemDB(version.Semantic1_0_0) + db := memdb.New() proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + db, nil, nil, nil, nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1339,9 +1337,7 @@ func TestInnerVMRollback(t *testing.T) { 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1367,84 +1363,76 @@ func TestInnerVMRollback(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } - - if status := parsedBlock.Status(); status != choices.Processing { - t.Fatalf("expected status to be %s but was %s", choices.Processing, status) - } - - if err := parsedBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { - t.Fatal(err) - } + require.Equal(choices.Processing, parsedBlock.Status()) - if err := parsedBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) + require.NoError(parsedBlock.Accept(context.Background())) fetchedBlock, err := proVM.GetBlock(context.Background(), parsedBlock.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := fetchedBlock.Status(); status != choices.Accepted { - t.Fatalf("unexpected status %s. Expected %s", status, choices.Accepted) - } + require.Equal(choices.Accepted, fetchedBlock.Status()) // Restart the node and have the inner VM rollback state. - + require.NoError(proVM.Shutdown(context.Background())) coreBlk.StatusV = choices.Processing + coreVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - err = proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dbManager, + db, nil, nil, nil, nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() lastAcceptedID, err := proVM.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if lastAcceptedID != coreGenBlk.IDV { - t.Fatalf("failed to roll back the VM to the last accepted block") - } + require.Equal(coreGenBlk.IDV, lastAcceptedID) parsedBlock, err = proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := parsedBlock.Status(); status != choices.Processing { - t.Fatalf("expected status to be %s but was %s", choices.Processing, status) - } + require.Equal(choices.Processing, parsedBlock.Status()) } func TestBuildBlockDuringWindow(t *testing.T) { - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { return map[ids.NodeID]*validators.GetValidatorOutput{ @@ -1460,30 +1448,26 @@ func TestBuildBlockDuringWindow(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), + BytesV: []byte{1}, + ParentV: coreGenBlk.ID(), + HeightV: coreGenBlk.Height() + 1, } coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - TimestampV: coreBlk0.Timestamp(), + BytesV: []byte{2}, + ParentV: coreBlk0.ID(), + HeightV: coreBlk0.Height() + 1, } statelessBlock0, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), - coreBlk0.Timestamp(), + proVM.Time(), 0, coreBlk0.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1513,42 +1497,26 @@ func TestBuildBlockDuringWindow(t *testing.T) { proVM.Clock.Set(statelessBlock0.Timestamp()) statefulBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock0.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), statefulBlock0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), statefulBlock0.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } statefulBlock1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock1.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock1.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), statefulBlock1.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), statefulBlock1.ID())) - if err := statefulBlock0.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock0.Accept(context.Background())) - if err := statefulBlock1.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock1.Accept(context.Background())) } // Ensure that Accepting a PostForkBlock (A) containing core block (X) causes @@ -1560,8 +1528,16 @@ func TestBuildBlockDuringWindow(t *testing.T) { // | // C(Z) func TestTwoForks_OneIsAccepted(t *testing.T) { - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create pre-fork block X and post-fork block A xBlock := &snowman.TestBlock{ @@ -1569,23 +1545,18 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(aBlock.Verify(context.Background())) // use a different way to construct pre-fork block Y and post-fork block B yBlock := &snowman.TestBlock{ @@ -1593,21 +1564,18 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{2}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), + BytesV: []byte{2}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, } ySlb, err := statelessblock.BuildUnsigned( gBlock.ID(), - gBlock.Timestamp(), + proVM.Time(), defaultPChainHeight, yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock := postForkBlock{ SignedBlock: ySlb, @@ -1618,9 +1586,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(bBlock.Verify(context.Background())) // append Z/C to Y/B zBlock := &snowman.TestBlock{ @@ -1628,58 +1594,47 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - BytesV: []byte{3}, - ParentV: yBlock.ID(), - HeightV: yBlock.Height() + 1, - TimestampV: yBlock.Timestamp(), + BytesV: []byte{3}, + ParentV: yBlock.ID(), + HeightV: yBlock.Height() + 1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil } - if err := proVM.SetPreference(context.Background(), bBlock.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), bBlock.ID())) + proVM.Set(proVM.Time().Add(proposer.MaxBuildDelay)) cBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(cBlock.Verify(context.Background())) - if aBlock.Parent() != bBlock.Parent() || - zBlock.Parent() != yBlock.ID() || - cBlock.Parent() != bBlock.ID() { - t.Fatal("inconsistent parent") - } + require.Equal(bBlock.Parent(), aBlock.Parent()) + require.Equal(yBlock.ID(), zBlock.Parent()) + require.Equal(bBlock.ID(), cBlock.Parent()) - if yBlock.Status() == choices.Rejected { - t.Fatal("yBlock should not be rejected") - } + require.NotEqual(choices.Rejected, yBlock.Status()) // accept A - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } - - if xBlock.Status() != choices.Accepted { - t.Fatal("xBlock should be accepted because aBlock is accepted") - } + require.NoError(aBlock.Accept(context.Background())) - if yBlock.Status() != choices.Rejected { - t.Fatal("yBlock should be rejected") - } - if zBlock.Status() != choices.Rejected { - t.Fatal("zBlock should be rejected") - } + require.Equal(choices.Accepted, xBlock.Status()) + require.Equal(choices.Rejected, yBlock.Status()) + require.Equal(choices.Rejected, zBlock.Status()) } func TestTooFarAdvanced(t *testing.T) { - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlock := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1707,12 +1662,8 @@ func TestTooFarAdvanced(t *testing.T) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) ySlb, err := statelessblock.BuildUnsigned( aBlock.ID(), @@ -1720,9 +1671,7 @@ func TestTooFarAdvanced(t *testing.T) { defaultPChainHeight, yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock := postForkBlock{ SignedBlock: ySlb, @@ -1733,20 +1682,17 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != errProposerWindowNotStarted { - t.Fatal("should have errored errProposerWindowNotStarted") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) ySlb, err = statelessblock.BuildUnsigned( aBlock.ID(), - aBlock.Timestamp().Add(proposer.MaxDelay), + aBlock.Timestamp().Add(proposer.MaxVerifyDelay), defaultPChainHeight, yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock = postForkBlock{ SignedBlock: ySlb, @@ -1757,9 +1703,8 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != errTimeTooAdvanced { - t.Fatal("should have errored errTimeTooAdvanced") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) } // Ensure that Accepting a PostForkOption (B) causes both the other option and @@ -1774,8 +1719,16 @@ func TestTooFarAdvanced(t *testing.T) { // B(...) is B(X.opts[0]) // B(...) is C(X.opts[1]) func TestTwoOptions_OneIsAccepted(t *testing.T) { - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + require := require.New(t) + + var ( + activationTime = time.Unix(0, 0) + durangoTime = mockable.MaxTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() xBlockID := ids.GenerateTestID() xBlock := &TestOptionsBlock{ @@ -1814,53 +1767,31 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { return xBlock, nil } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - aBlock, ok := aBlockIntf.(*postForkBlock) - if !ok { - t.Fatal("expected post fork block") - } + require.IsType(&postForkBlock{}, aBlockIntf) + aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) bBlock := opts[0] - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Verify(context.Background())) cBlock := opts[1] - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(cBlock.Verify(context.Background())) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Accept(context.Background())) - if err := bBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Accept(context.Background())) // the other pre-fork option should be rejected - if xBlock.opts[1].Status() != choices.Rejected { - t.Fatal("the pre-fork option block should have be rejected") - } + require.Equal(choices.Rejected, xBlock.opts[1].Status()) // the other post-fork option should also be rejected - if err := cBlock.Reject(context.Background()); err != nil { - t.Fatal("the post-fork option block should have be rejected") - } + require.NoError(cBlock.Reject(context.Background())) - if cBlock.Status() != choices.Rejected { - t.Fatal("cBlock status should not be accepted") - } + require.Equal(choices.Rejected, cBlock.Status()) } // Ensure that given the chance, built blocks will reference a lagged P-chain @@ -1868,8 +1799,14 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { func TestLaggedPChainHeight(t *testing.T) { require := require.New(t) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) - proVM.Set(coreGenBlk.Timestamp()) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() innerBlock := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1887,8 +1824,8 @@ func TestLaggedPChainHeight(t *testing.T) { blockIntf, err := proVM.BuildBlock(context.Background()) require.NoError(err) - block, ok := blockIntf.(*postForkBlock) - require.True(ok, "expected post fork block") + require.IsType(&postForkBlock{}, blockIntf) + block := blockIntf.(*postForkBlock) pChainHeight := block.PChainHeight() require.Equal(pChainHeight, coreGenBlk.Height()) @@ -1912,30 +1849,22 @@ func TestRejectedHeightNotIndexed(t *testing.T) { coreHeights := []ids.ID{coreGenBlk.ID()} initialState := []byte("genesis state") - coreVM := &struct { - block.TestVM - block.TestHeightIndexedVM - }{ - TestVM: block.TestVM{ - TestVM: common.TestVM{ - T: t, - }, - }, - TestHeightIndexedVM: block.TestHeightIndexedVM{ + coreVM := &block.TestVM{ + TestVM: common.TestVM{ T: t, - VerifyHeightIndexF: func(context.Context) error { - return nil - }, - GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { - if height >= uint64(len(coreHeights)) { - return ids.ID{}, errTooHigh - } - return coreHeights[height], nil - }, + }, + VerifyHeightIndexF: func(context.Context) error { + return nil + }, + GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { + if height >= uint64(len(coreHeights)) { + return ids.ID{}, errTooHigh + } + return coreHeights[height], nil }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -1963,11 +1892,15 @@ func TestRejectedHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -1980,54 +1913,57 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, nil, nil, nil, - ) - require.NoError(err) + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) - require.NoError(err) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) ctx.Lock.Lock() for proVM.VerifyHeightIndex(context.Background()) != nil { @@ -2056,8 +1992,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.NoError(err) coreVM.BuildBlockF = nil - err = aBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) // use a different way to construct inner block Y and outer block B yBlock := &snowman.TestBlock{ @@ -2088,12 +2023,10 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, } - err = bBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(bBlock.Verify(context.Background())) // accept A - err = aBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(aBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.ID()) blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) @@ -2101,8 +2034,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.Equal(aBlock.ID(), blkID) // reject B - err = bBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(bBlock.Reject(context.Background())) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) require.NoError(err) @@ -2127,30 +2059,22 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { coreHeights := []ids.ID{coreGenBlk.ID()} initialState := []byte("genesis state") - coreVM := &struct { - block.TestVM - block.TestHeightIndexedVM - }{ - TestVM: block.TestVM{ - TestVM: common.TestVM{ - T: t, - }, - }, - TestHeightIndexedVM: block.TestHeightIndexedVM{ + coreVM := &block.TestVM{ + TestVM: common.TestVM{ T: t, - VerifyHeightIndexF: func(context.Context) error { - return nil - }, - GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { - if height >= uint64(len(coreHeights)) { - return ids.ID{}, errTooHigh - } - return coreHeights[height], nil - }, + }, + VerifyHeightIndexF: func(context.Context) error { + return nil + }, + GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { + if height >= uint64(len(coreHeights)) { + return ids.ID{}, errTooHigh + } + return coreHeights[height], nil }, } - coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { @@ -2178,11 +2102,15 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -2195,54 +2123,57 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return defaultPChainHeight, nil } valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + var ( + thisNode = proVM.ctx.NodeID + nodeID1 = ids.BuildTestNodeID([]byte{1}) + nodeID2 = ids.BuildTestNodeID([]byte{2}) + nodeID3 = ids.BuildTestNodeID([]byte{3}) + ) return map[ids.NodeID]*validators.GetValidatorOutput{ - proVM.ctx.NodeID: { - NodeID: proVM.ctx.NodeID, + thisNode: { + NodeID: thisNode, Weight: 10, }, - {1}: { - NodeID: ids.NodeID{1}, + nodeID1: { + NodeID: nodeID1, Weight: 5, }, - {2}: { - NodeID: ids.NodeID{2}, + nodeID2: { + NodeID: nodeID2, Weight: 6, }, - {3}: { - NodeID: ids.NodeID{3}, + nodeID3: { + NodeID: nodeID3, Weight: 7, }, }, nil } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) ctx.ValidatorState = valState - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, nil, nil, nil, nil, nil, - ) - require.NoError(err) + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) - require.NoError(err) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) ctx.Lock.Lock() for proVM.VerifyHeightIndex(context.Background()) != nil { @@ -2291,26 +2222,22 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { aBlockIntf, err := proVM.BuildBlock(context.Background()) require.NoError(err) - aBlock, ok := aBlockIntf.(*postForkBlock) - require.True(ok) + require.IsType(&postForkBlock{}, aBlockIntf) + aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) require.NoError(err) - err = aBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) bBlock := opts[0] - err = bBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(bBlock.Verify(context.Background())) cBlock := opts[1] - err = cBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(cBlock.Verify(context.Background())) // accept A - err = aBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(aBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.ID()) blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) @@ -2318,8 +2245,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { require.Equal(aBlock.ID(), blkID) // accept B - err = bBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(bBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.opts[0].ID()) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), bBlock.Height()) @@ -2327,8 +2253,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { require.Equal(bBlock.ID(), blkID) // reject C - err = cBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(cBlock.Reject(context.Background())) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), cBlock.Height()) require.NoError(err) @@ -2338,23 +2263,22 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { func TestVMInnerBlkCache(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create a VM - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) - // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - innerVM.EXPECT().Initialize( gomock.Any(), gomock.Any(), @@ -2366,22 +2290,34 @@ func TestVMInnerBlkCache(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(nil) + innerVM.EXPECT().VerifyHeightIndex(gomock.Any()).Return(nil) + innerVM.EXPECT().Shutdown(gomock.Any()).Return(nil) + + { + innerBlk := snowman.NewMockBlock(ctrl) + innerBlkID := ids.GenerateTestID() + innerVM.EXPECT().LastAccepted(gomock.Any()).Return(innerBlkID, nil) + innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) + } - ctx := snow.DefaultContextTest() - ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, - dummyDBManager, + prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly nil, nil, nil, nil, nil, nil, - ) - require.NoError(err) + )) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() + state := state.NewMockState(ctrl) // mock state vm.State = state @@ -2391,10 +2327,10 @@ func TestVMInnerBlkCache(t *testing.T) { ids.GenerateTestID(), // parent time.Time{}, // timestamp 1, // pChainHeight, - vm.stakingCertLeaf, // cert + vm.StakingCertLeaf, // cert blkNearTipInnerBytes, // inner blk bytes vm.ctx.ChainID, // chain ID - vm.stakingLeafSigner, // key + vm.StakingLeafSigner, // key ) require.NoError(err) @@ -2404,6 +2340,8 @@ func TestVMInnerBlkCache(t *testing.T) { // We will ask the inner VM to parse. mockInnerBlkNearTip := snowman.NewMockBlock(ctrl) mockInnerBlkNearTip.EXPECT().Height().Return(uint64(1)).Times(2) + mockInnerBlkNearTip.EXPECT().Bytes().Return(blkNearTipInnerBytes).Times(1) + innerVM.EXPECT().ParseBlock(gomock.Any(), blkNearTipInnerBytes).Return(mockInnerBlkNearTip, nil).Times(2) _, err = vm.ParseBlock(context.Background(), blkNearTip.Bytes()) require.NoError(err) @@ -2413,7 +2351,7 @@ func TestVMInnerBlkCache(t *testing.T) { gotBlk, ok := vm.innerBlkCache.Get(blkNearTip.ID()) require.True(ok) require.Equal(mockInnerBlkNearTip, gotBlk) - require.EqualValues(0, vm.lastAcceptedHeight) + require.Zero(vm.lastAcceptedHeight) // Clear the cache vm.innerBlkCache.Flush() @@ -2432,8 +2370,14 @@ func TestVMInnerBlkCache(t *testing.T) { func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { require := require.New(t) - forkTime := time.Unix(0, 0) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() // create pre-fork block X and post-fork block A xBlock := &snowman.TestBlock{ @@ -2480,17 +2424,10 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { bBlock, err := proVM.ParseBlock(context.Background(), bBlockBytes) require.NoError(err) - err = aBlock.Verify(context.Background()) - require.NoError(err) - - err = bBlock.Verify(context.Background()) - require.NoError(err) - - err = aBlock.Accept(context.Background()) - require.NoError(err) - - err = bBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(context.Background())) + require.NoError(aBlock.Accept(context.Background())) + require.NoError(bBlock.Reject(context.Background())) require.Equal( choices.Accepted, @@ -2510,9 +2447,52 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { ) } +func TestVMInnerBlkMarkedAcceptedRegression(t *testing.T) { + require := require.New(t) + var ( + activationTime = time.Unix(0, 0) + durangoTime = activationTime + ) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + // create an inner block and wrap it in an postForkBlock. + innerBlock := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, + TimestampV: gBlock.Timestamp(), + } + + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return innerBlock, nil + } + outerBlock, err := proVM.BuildBlock(context.Background()) + require.NoError(err) + coreVM.BuildBlockF = nil + + require.NoError(outerBlock.Verify(context.Background())) + require.NoError(outerBlock.Accept(context.Background())) + + coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + require.Equal(innerBlock.ID(), id) + return innerBlock, nil + } + + wrappedInnerBlock, err := proVM.GetBlock(context.Background(), innerBlock.ID()) + require.NoError(err) + require.Equal(choices.Rejected, wrappedInnerBlock.Status()) +} + type blockWithVerifyContext struct { *snowman.MockBlock - *mocks.MockWithVerifyContext + *block.MockWithVerifyContext } // Ensures that we call [VerifyWithContext] rather than [Verify] on blocks that @@ -2521,22 +2501,24 @@ type blockWithVerifyContext struct { func TestVM_VerifyBlockWithContext(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() // Create a VM - innerVM := mocks.NewMockChainVM(ctrl) + innerVM := block.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - pTestCert.PrivateKey.(crypto.Signer), - pTestCert.Leaf, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: time.Unix(0, 0), + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) - dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly - dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + db := prefixdb.New([]byte{}, memdb.New()) innerVM.EXPECT().Initialize( gomock.Any(), @@ -2549,28 +2531,39 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(nil) + innerVM.EXPECT().VerifyHeightIndex(gomock.Any()).Return(nil) + innerVM.EXPECT().Shutdown(gomock.Any()).Return(nil) + + { + innerBlk := snowman.NewMockBlock(ctrl) + innerBlkID := ids.GenerateTestID() + innerVM.EXPECT().LastAccepted(gomock.Any()).Return(innerBlkID, nil) + innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) + } - snowCtx := snow.DefaultContextTest() - snowCtx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + snowCtx := snowtest.Context(t, snowtest.CChainID) + snowCtx.NodeID = ids.NodeIDFromCert(pTestCert) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), snowCtx, - dummyDBManager, + db, nil, nil, nil, nil, nil, nil, - ) - require.NoError(err) + )) + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() { pChainHeight := uint64(0) innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(2) innerBlk.MockWithVerifyContext.EXPECT().VerifyWithContext(context.Background(), @@ -2580,20 +2573,20 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { ).Return(nil) innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() innerBlk.MockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + innerBlk.MockBlock.EXPECT().Bytes().Return(utils.RandomBytes(1024)).AnyTimes() blk := NewMockPostForkBlock(ctrl) blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: pChainHeight, }, blk, - ) - require.NoError(err) + )) // Call VerifyWithContext again but with a different P-Chain height blk.EXPECT().setInnerBlk(innerBlk).AnyTimes() @@ -2604,14 +2597,13 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { }, ).Return(nil) - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: pChainHeight, }, blk, - ) - require.NoError(err) + )) } { @@ -2619,7 +2611,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { // false for ShouldVerifyWithContext innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(false, nil) innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) @@ -2629,21 +2621,20 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: 1, }, blk, - ) - require.NoError(err) + )) } { // Ensure we call Verify on a block that doesn't have a valid context innerBlk := blockWithVerifyContext{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() @@ -2652,7 +2643,273 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk(context.Background(), nil, blk) + require.NoError(vm.verifyAndRecordInnerBlk(context.Background(), nil, blk)) + } +} + +func TestHistoricalBlockDeletion(t *testing.T) { + require := require.New(t) + + coreGenBlk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }, + HeightV: 0, + TimestampV: genesisTimestamp, + BytesV: utils.RandomBytes(1024), + } + acceptedBlocks := []snowman.Block{coreGenBlk} + currentHeight := uint64(0) + + initialState := []byte("genesis state") + coreVM := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender) error { + return nil + }, + }, + LastAcceptedF: func(context.Context) (ids.ID, error) { + return acceptedBlocks[currentHeight].ID(), nil + }, + GetBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + for _, blk := range acceptedBlocks { + if blkID == blk.ID() { + return blk, nil + } + } + return nil, errUnknownBlock + }, + ParseBlockF: func(_ context.Context, b []byte) (snowman.Block, error) { + for _, blk := range acceptedBlocks { + if bytes.Equal(b, blk.Bytes()) { + return blk, nil + } + } + return nil, errUnknownBlock + }, + VerifyHeightIndexF: func(context.Context) error { + return nil + }, + GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { + if height >= uint64(len(acceptedBlocks)) { + return ids.ID{}, errTooHigh + } + return acceptedBlocks[height].ID(), nil + }, + } + + ctx := snowtest.Context(t, snowtest.CChainID) + ctx.NodeID = ids.NodeIDFromCert(pTestCert) + ctx.ValidatorState = &validators.TestState{ + T: t, + GetMinimumHeightF: func(context.Context) (uint64, error) { + return coreGenBlk.HeightV, nil + }, + GetCurrentHeightF: func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return nil, nil + }, + } + + // make sure that DBs are compressed correctly + db := prefixdb.New([]byte{}, memdb.New()) + + proVM := New( + coreVM, + Config{ + ActivationTime: time.Unix(0, 0), + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, + ) + + require.NoError(proVM.Initialize( + context.Background(), + ctx, + db, + initialState, + nil, + nil, + nil, + nil, + nil, + )) + + lastAcceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.VerifyHeightIndex(context.Background())) + + issueBlock := func() { + lastAcceptedBlock := acceptedBlocks[currentHeight] + innerBlock := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: lastAcceptedBlock.ID(), + HeightV: lastAcceptedBlock.Height() + 1, + TimestampV: lastAcceptedBlock.Timestamp(), + BytesV: utils.RandomBytes(1024), + } + + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return innerBlock, nil + } + proBlock, err := proVM.BuildBlock(context.Background()) require.NoError(err) + + require.NoError(proBlock.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), proBlock.ID())) + require.NoError(proBlock.Accept(context.Background())) + + acceptedBlocks = append(acceptedBlocks, innerBlock) + currentHeight++ + } + + requireHeights := func(start, end uint64) { + for i := start; i <= end; i++ { + _, err := proVM.GetBlockIDAtHeight(context.Background(), i) + require.NoError(err) + } + } + + requireMissingHeights := func(start, end uint64) { + for i := start; i <= end; i++ { + _, err := proVM.GetBlockIDAtHeight(context.Background(), i) + require.ErrorIs(err, database.ErrNotFound) + } + } + + requireNumHeights := func(numIndexed uint64) { + requireHeights(0, 0) + requireMissingHeights(1, currentHeight-numIndexed-1) + requireHeights(currentHeight-numIndexed, currentHeight) } + + // Because block pruning is disabled by default, the heights should be + // populated for every accepted block. + requireHeights(0, currentHeight) + + issueBlock() + requireHeights(0, currentHeight) + + issueBlock() + requireHeights(0, currentHeight) + + issueBlock() + requireHeights(0, currentHeight) + + issueBlock() + requireHeights(0, currentHeight) + + issueBlock() + requireHeights(0, currentHeight) + + require.NoError(proVM.Shutdown(context.Background())) + + numHistoricalBlocks := uint64(2) + proVM = New( + coreVM, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, + ) + + require.NoError(proVM.Initialize( + context.Background(), + ctx, + db, + initialState, + nil, + nil, + nil, + nil, + nil, + )) + + lastAcceptedID, err = proVM.LastAccepted(context.Background()) + require.NoError(err) + + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.VerifyHeightIndex(context.Background())) + + // Verify that old blocks were pruned during startup + requireNumHeights(numHistoricalBlocks) + + // As we issue new blocks, the oldest indexed height should be pruned. + issueBlock() + requireNumHeights(numHistoricalBlocks) + + issueBlock() + requireNumHeights(numHistoricalBlocks) + + require.NoError(proVM.Shutdown(context.Background())) + + newNumHistoricalBlocks := numHistoricalBlocks + 2 + proVM = New( + coreVM, + Config{ + ActivationTime: time.Time{}, + DurangoTime: mockable.MaxTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: newNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, + ) + + require.NoError(proVM.Initialize( + context.Background(), + ctx, + db, + initialState, + nil, + nil, + nil, + nil, + nil, + )) + defer func() { + require.NoError(proVM.Shutdown(context.Background())) + }() + + lastAcceptedID, err = proVM.LastAccepted(context.Background()) + require.NoError(err) + + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.VerifyHeightIndex(context.Background())) + + // The height index shouldn't be modified at this point + requireNumHeights(numHistoricalBlocks) + + // As we issue new blocks, the number of indexed blocks should increase + // until we hit our target again. + issueBlock() + requireNumHeights(numHistoricalBlocks + 1) + + issueBlock() + requireNumHeights(newNumHistoricalBlocks) + + issueBlock() + requireNumHeights(newNumHistoricalBlocks) } diff --git a/avalanchego/vms/registry/mock_vm_getter.go b/avalanchego/vms/registry/mock_vm_getter.go index c6b4e1dd..30c38f1b 100644 --- a/avalanchego/vms/registry/mock_vm_getter.go +++ b/avalanchego/vms/registry/mock_vm_getter.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMGetter) +// +// Generated by this command: +// +// mockgen -package=registry -destination=vms/registry/mock_vm_getter.go github.com/ava-labs/avalanchego/vms/registry VMGetter +// // Package registry is a generated GoMock package. package registry @@ -12,7 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" vms "github.com/ava-labs/avalanchego/vms" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockVMGetter is a mock of VMGetter interface. diff --git a/avalanchego/vms/registry/mock_vm_registerer.go b/avalanchego/vms/registry/mock_vm_registerer.go deleted file mode 100644 index f068d77a..00000000 --- a/avalanchego/vms/registry/mock_vm_registerer.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegisterer) - -// Package registry is a generated GoMock package. -package registry - -import ( - context "context" - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - vms "github.com/ava-labs/avalanchego/vms" - gomock "github.com/golang/mock/gomock" -) - -// MockVMRegisterer is a mock of VMRegisterer interface. -type MockVMRegisterer struct { - ctrl *gomock.Controller - recorder *MockVMRegistererMockRecorder -} - -// MockVMRegistererMockRecorder is the mock recorder for MockVMRegisterer. -type MockVMRegistererMockRecorder struct { - mock *MockVMRegisterer -} - -// NewMockVMRegisterer creates a new mock instance. -func NewMockVMRegisterer(ctrl *gomock.Controller) *MockVMRegisterer { - mock := &MockVMRegisterer{ctrl: ctrl} - mock.recorder = &MockVMRegistererMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockVMRegisterer) EXPECT() *MockVMRegistererMockRecorder { - return m.recorder -} - -// Register mocks base method. -func (m *MockVMRegisterer) Register(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockVMRegistererMockRecorder) Register(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockVMRegisterer)(nil).Register), arg0, arg1, arg2) -} - -// RegisterWithReadLock mocks base method. -func (m *MockVMRegisterer) RegisterWithReadLock(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterWithReadLock", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// RegisterWithReadLock indicates an expected call of RegisterWithReadLock. -func (mr *MockVMRegistererMockRecorder) RegisterWithReadLock(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterWithReadLock", reflect.TypeOf((*MockVMRegisterer)(nil).RegisterWithReadLock), arg0, arg1, arg2) -} diff --git a/avalanchego/vms/registry/mock_vm_registry.go b/avalanchego/vms/registry/mock_vm_registry.go index 6d6dd047..43efd85a 100644 --- a/avalanchego/vms/registry/mock_vm_registry.go +++ b/avalanchego/vms/registry/mock_vm_registry.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegistry) +// +// Generated by this command: +// +// mockgen -package=registry -destination=vms/registry/mock_vm_registry.go github.com/ava-labs/avalanchego/vms/registry VMRegistry +// // Package registry is a generated GoMock package. package registry @@ -12,7 +14,7 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockVMRegistry is a mock of VMRegistry interface. @@ -49,23 +51,7 @@ func (m *MockVMRegistry) Reload(arg0 context.Context) ([]ids.ID, map[ids.ID]erro } // Reload indicates an expected call of Reload. -func (mr *MockVMRegistryMockRecorder) Reload(arg0 interface{}) *gomock.Call { +func (mr *MockVMRegistryMockRecorder) Reload(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reload", reflect.TypeOf((*MockVMRegistry)(nil).Reload), arg0) } - -// ReloadWithReadLock mocks base method. -func (m *MockVMRegistry) ReloadWithReadLock(arg0 context.Context) ([]ids.ID, map[ids.ID]error, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReloadWithReadLock", arg0) - ret0, _ := ret[0].([]ids.ID) - ret1, _ := ret[1].(map[ids.ID]error) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// ReloadWithReadLock indicates an expected call of ReloadWithReadLock. -func (mr *MockVMRegistryMockRecorder) ReloadWithReadLock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadWithReadLock", reflect.TypeOf((*MockVMRegistry)(nil).ReloadWithReadLock), arg0) -} diff --git a/avalanchego/vms/registry/vm_getter.go b/avalanchego/vms/registry/vm_getter.go index 5115af9e..82662474 100644 --- a/avalanchego/vms/registry/vm_getter.go +++ b/avalanchego/vms/registry/vm_getter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry diff --git a/avalanchego/vms/registry/vm_getter_test.go b/avalanchego/vms/registry/vm_getter_test.go index ce659cf4..fd42ee4c 100644 --- a/avalanchego/vms/registry/vm_getter_test.go +++ b/avalanchego/vms/registry/vm_getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -9,12 +9,13 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/filesystem" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/vms" ) @@ -63,7 +64,6 @@ var ( // Get should fail if we hit an io issue when reading files on the disk func TestGet_ReadDirFails(t *testing.T) { resources := initVMGetterTest(t) - defer resources.ctrl.Finish() // disk read fails resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(nil, errTest) @@ -75,7 +75,6 @@ func TestGet_ReadDirFails(t *testing.T) { // Get should fail if we see an invalid VM id func TestGet_InvalidVMName(t *testing.T) { resources := initVMGetterTest(t) - defer resources.ctrl.Finish() resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(invalidVMs, nil) // didn't find an alias, so we'll try using this invalid vm name @@ -88,7 +87,6 @@ func TestGet_InvalidVMName(t *testing.T) { // Get should fail if we can't get the VM factory func TestGet_GetFactoryFails(t *testing.T) { resources := initVMGetterTest(t) - defer resources.ctrl.Finish() vm, _ := ids.FromString("vmId") @@ -106,7 +104,6 @@ func TestGet_Success(t *testing.T) { require := require.New(t) resources := initVMGetterTest(t) - defer resources.ctrl.Finish() registeredVMId := ids.GenerateTestID() unregisteredVMId := ids.GenerateTestID() @@ -143,13 +140,23 @@ func initVMGetterTest(t *testing.T) *vmGetterTestResources { mockReader := filesystem.NewMockReader(ctrl) mockManager := vms.NewMockManager(ctrl) + mockRegistry := prometheus.NewRegistry() + mockCPUTracker, err := resource.NewManager( + logging.NoLog{}, + "", + time.Hour, + time.Hour, + time.Hour, + mockRegistry, + ) + require.NoError(t, err) getter := NewVMGetter( VMGetterConfig{ FileReader: mockReader, Manager: mockManager, PluginDirectory: pluginDir, - CPUTracker: resource.NewManager("", time.Hour, time.Hour, time.Hour), + CPUTracker: mockCPUTracker, }, ) diff --git a/avalanchego/vms/registry/vm_registerer.go b/avalanchego/vms/registry/vm_registerer.go deleted file mode 100644 index 6a6fdd85..00000000 --- a/avalanchego/vms/registry/vm_registerer.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package registry - -import ( - "context" - "fmt" - "path" - "sync" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" -) - -var _ VMRegisterer = (*vmRegisterer)(nil) - -// VMRegisterer defines functionality to install a virtual machine. -type VMRegisterer interface { - registerer - // RegisterWithReadLock installs the VM assuming that the http read-lock is - // held. - RegisterWithReadLock(context.Context, ids.ID, vms.Factory) error -} - -type registerer interface { - // Register installs the VM. - Register(context.Context, ids.ID, vms.Factory) error -} - -// VMRegistererConfig configures settings for VMRegisterer. -type VMRegistererConfig struct { - APIServer server.Server - Log logging.Logger - VMFactoryLog logging.Logger - VMManager vms.Manager -} - -type vmRegisterer struct { - config VMRegistererConfig -} - -// NewVMRegisterer returns an instance of VMRegisterer -func NewVMRegisterer(config VMRegistererConfig) VMRegisterer { - return &vmRegisterer{ - config: config, - } -} - -func (r *vmRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.register(ctx, r.config.APIServer, vmID, factory) -} - -func (r *vmRegisterer) RegisterWithReadLock(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.register(ctx, server.PathWriterFromWithReadLock(r.config.APIServer), vmID, factory) -} - -func (r *vmRegisterer) register(ctx context.Context, pathAdder server.PathAdder, vmID ids.ID, factory vms.Factory) error { - if err := r.config.VMManager.RegisterFactory(ctx, vmID, factory); err != nil { - return err - } - handlers, err := r.createStaticHandlers(ctx, vmID, factory) - if err != nil { - return err - } - - // all static endpoints go to the vm endpoint, defaulting to the vm id - defaultEndpoint := path.Join(constants.VMAliasPrefix, vmID.String()) - - if err := r.createStaticEndpoints(pathAdder, handlers, defaultEndpoint); err != nil { - return err - } - urlAliases, err := r.getURLAliases(vmID, defaultEndpoint) - if err != nil { - return err - } - return pathAdder.AddAliases(defaultEndpoint, urlAliases...) -} - -// Creates a dedicated VM instance for the sole purpose of serving the static -// handlers. -func (r *vmRegisterer) createStaticHandlers( - ctx context.Context, - vmID ids.ID, - factory vms.Factory, -) (map[string]*common.HTTPHandler, error) { - vm, err := factory.New(r.config.VMFactoryLog) - if err != nil { - return nil, err - } - - commonVM, ok := vm.(common.VM) - if !ok { - return nil, fmt.Errorf("%s doesn't implement VM", vmID) - } - - handlers, err := commonVM.CreateStaticHandlers(ctx) - if err != nil { - r.config.Log.Error("failed to create static API endpoints", - zap.Stringer("vmID", vmID), - zap.Error(err), - ) - - if err := commonVM.Shutdown(ctx); err != nil { - return nil, fmt.Errorf("shutting down VM errored with: %w", err) - } - return nil, err - } - return handlers, nil -} - -func (r *vmRegisterer) createStaticEndpoints(pathAdder server.PathAdder, handlers map[string]*common.HTTPHandler, defaultEndpoint string) error { - // use a single lock for this entire vm - lock := new(sync.RWMutex) - // register the static endpoints - for extension, service := range handlers { - r.config.Log.Verbo("adding static API endpoint", - zap.String("endpoint", defaultEndpoint), - zap.String("extension", extension), - ) - if err := pathAdder.AddRoute(service, lock, defaultEndpoint, extension); err != nil { - return fmt.Errorf( - "failed to add static API endpoint %s%s: %w", - defaultEndpoint, - extension, - err, - ) - } - } - return nil -} - -func (r vmRegisterer) getURLAliases(vmID ids.ID, defaultEndpoint string) ([]string, error) { - aliases, err := r.config.VMManager.Aliases(vmID) - if err != nil { - return nil, err - } - - var urlAliases []string - for _, alias := range aliases { - urlAlias := path.Join(constants.VMAliasPrefix, alias) - if urlAlias != defaultEndpoint { - urlAliases = append(urlAliases, urlAlias) - } - } - return urlAliases, err -} - -type readRegisterer struct { - registerer VMRegisterer -} - -func (r readRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { - return r.registerer.RegisterWithReadLock(ctx, vmID, factory) -} diff --git a/avalanchego/vms/registry/vm_registerer_test.go b/avalanchego/vms/registry/vm_registerer_test.go deleted file mode 100644 index e469277b..00000000 --- a/avalanchego/vms/registry/vm_registerer_test.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package registry - -import ( - "context" - "path" - "testing" - - "github.com/golang/mock/gomock" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/api/server" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" -) - -var id = ids.GenerateTestID() - -// Register should succeed even if we can't register a VM -func TestRegisterRegisterVMFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - - // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register if a VM doesn't actually implement VM. -func TestRegisterBadVM(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := "this is not a vm..." - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - - require.Error(t, resources.registerer.Register(context.Background(), id, vmFactory)) -} - -// Tests Register if creating endpoints for a VM fails + shutdown fails -func TestRegisterCreateHandlersAndShutdownFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register if creating endpoints for a VM fails + shutdown succeeds -func TestRegisterCreateHandlersFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register if we fail to register the new endpoint on the server. -func TestRegisterAddRouteFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // We fail to create an endpoint for the handler - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(errTest) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register we can't find the alias for the newly registered vm -func TestRegisterAliasLookupFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // Registering the route fails - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register if adding aliases for the newly registered vm fails -func TestRegisterAddAliasesFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - // Adding aliases fails - resources.mockServer.EXPECT(). - AddAliases( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Return(errTest) - - require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) -} - -// Tests Register if no errors are thrown -func TestRegisterHappyCase(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRoute( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - resources.mockServer.EXPECT(). - AddAliases( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Times(1). - Return(nil) - - require.NoError(t, resources.registerer.Register(context.Background(), id, vmFactory)) -} - -// RegisterWithReadLock should succeed even if we can't register a VM -func TestRegisterWithReadLockRegisterVMFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - - // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock if a VM doesn't actually implement VM. -func TestRegisterWithReadLockBadVM(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := "this is not a vm..." - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - - require.Error(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) -} - -// Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown fails -func TestRegisterWithReadLockCreateHandlersAndShutdownFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown succeeds -func TestRegisterWithReadLockCreateHandlersFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) - vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock if we fail to register the new endpoint on the server. -func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // We fail to create an endpoint for the handler - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(errTest) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock we can't find the alias for the newly registered vm -func TestRegisterWithReadLockAliasLookupFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - // RegisterWithReadLocking the route fails - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock if adding aliases for the newly registered vm fails -func TestRegisterWithReadLockAddAliasesFails(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - // Adding aliases fails - resources.mockServer.EXPECT(). - AddAliasesWithReadLock( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Return(errTest) - - require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) -} - -// Tests RegisterWithReadLock if no errors are thrown -func TestRegisterWithReadLockHappyCase(t *testing.T) { - resources := initRegistererTest(t) - defer resources.ctrl.Finish() - - vmFactory := vms.NewMockFactory(resources.ctrl) - vm := mocks.NewMockChainVM(resources.ctrl) - - handlers := map[string]*common.HTTPHandler{ - "foo": {}, - } - aliases := []string{"alias-1", "alias-2"} - - resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) - resources.mockServer.EXPECT(). - AddRouteWithReadLock( - handlers["foo"], - gomock.Any(), - path.Join(constants.VMAliasPrefix, id.String()), - "foo", - ). - Times(1). - Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(aliases, nil) - resources.mockServer.EXPECT(). - AddAliasesWithReadLock( - path.Join(constants.VMAliasPrefix, id.String()), - path.Join(constants.VMAliasPrefix, aliases[0]), - path.Join(constants.VMAliasPrefix, aliases[1]), - ). - Times(1). - Return(nil) - - require.NoError(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) -} - -type vmRegistererTestResources struct { - ctrl *gomock.Controller - mockManager *vms.MockManager - mockServer *server.MockServer - mockLogger *logging.MockLogger - registerer VMRegisterer -} - -func initRegistererTest(t *testing.T) *vmRegistererTestResources { - ctrl := gomock.NewController(t) - - mockManager := vms.NewMockManager(ctrl) - mockServer := server.NewMockServer(ctrl) - mockLog := logging.NewMockLogger(ctrl) - - registerer := NewVMRegisterer(VMRegistererConfig{ - APIServer: mockServer, - Log: mockLog, - VMFactoryLog: logging.NoLog{}, - VMManager: mockManager, - }) - - mockLog.EXPECT().Error(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Warn(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Info(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Trace(gomock.Any(), gomock.Any()).AnyTimes() - mockLog.EXPECT().Verbo(gomock.Any(), gomock.Any()).AnyTimes() - - return &vmRegistererTestResources{ - ctrl: ctrl, - mockManager: mockManager, - mockServer: mockServer, - mockLogger: mockLog, - registerer: registerer, - } -} diff --git a/avalanchego/vms/registry/vm_registry.go b/avalanchego/vms/registry/vm_registry.go index dd6f96d4..1374c4d4 100644 --- a/avalanchego/vms/registry/vm_registry.go +++ b/avalanchego/vms/registry/vm_registry.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -7,6 +7,7 @@ import ( "context" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms" ) var _ VMRegistry = (*vmRegistry)(nil) @@ -16,15 +17,12 @@ var _ VMRegistry = (*vmRegistry)(nil) type VMRegistry interface { // Reload installs all non-installed vms on the node. Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) - // ReloadWithReadLock installs all non-installed vms on the node assuming - // the http read lock is currently held. - ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) } // VMRegistryConfig defines configurations for VMRegistry type VMRegistryConfig struct { - VMGetter VMGetter - VMRegisterer VMRegisterer + VMGetter VMGetter + VMManager vms.Manager } type vmRegistry struct { @@ -39,16 +37,6 @@ func NewVMRegistry(config VMRegistryConfig) VMRegistry { } func (r *vmRegistry) Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { - return r.reload(ctx, r.config.VMRegisterer) -} - -func (r *vmRegistry) ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { - return r.reload(ctx, readRegisterer{ - registerer: r.config.VMRegisterer, - }) -} - -func (r *vmRegistry) reload(ctx context.Context, registerer registerer) ([]ids.ID, map[ids.ID]error, error) { _, unregisteredVMs, err := r.config.VMGetter.Get() if err != nil { return nil, nil, err @@ -58,7 +46,7 @@ func (r *vmRegistry) reload(ctx context.Context, registerer registerer) ([]ids.I failedVMs := make(map[ids.ID]error) for vmID, factory := range unregisteredVMs { - if err := registerer.Register(ctx, vmID, factory); err != nil { + if err := r.config.VMManager.RegisterFactory(ctx, vmID, factory); err != nil { failedVMs[vmID] = err continue } diff --git a/avalanchego/vms/registry/vm_registry_test.go b/avalanchego/vms/registry/vm_registry_test.go index fd8f096a..abe6ba67 100644 --- a/avalanchego/vms/registry/vm_registry_test.go +++ b/avalanchego/vms/registry/vm_registry_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -7,9 +7,8 @@ import ( "context" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms" @@ -24,8 +23,9 @@ var ( // Tests the happy case where Reload succeeds. func TestReload_Success(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() factory1 := vms.NewMockFactory(resources.ctrl) factory2 := vms.NewMockFactory(resources.ctrl) @@ -46,80 +46,40 @@ func TestReload_Success(t *testing.T) { Get(). Times(1). Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id3, factory3). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id3, factory3). Times(1). Return(nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id4, factory4). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id4, factory4). Times(1). Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) - require.Empty(t, failedVMs) - require.NoError(t, err) + require.NoError(err) + require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) + require.Empty(failedVMs) } // Tests that we fail if we're not able to get the vms on disk func TestReload_GetNewVMsFails(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - require.Empty(t, installedVMs) - require.Empty(t, failedVMs) - require.ErrorIs(t, err, errTest) + require.ErrorIs(err, errTest) + require.Empty(installedVMs) + require.Empty(failedVMs) } // Tests that if we fail to register a VM, we fail. func TestReload_PartialRegisterFailure(t *testing.T) { - resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() - - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) - - registeredVms := map[ids.ID]vms.Factory{ - id1: factory1, - id2: factory2, - } - - unregisteredVms := map[ids.ID]vms.Factory{ - id3: factory3, - id4: factory4, - } - - resources.mockVMGetter.EXPECT(). - Get(). - Times(1). - Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id3, factory3). - Times(1). - Return(errTest) - resources.mockVMRegisterer.EXPECT(). - Register(gomock.Any(), id4, factory4). - Times(1). - Return(nil) - - installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - - require.Len(t, failedVMs, 1) - require.ErrorIs(t, failedVMs[id3], errTest) - require.Len(t, installedVMs, 1) - require.Equal(t, id4, installedVMs[0]) - require.NoError(t, err) -} + require := require.New(t) -// Tests the happy case where Reload succeeds. -func TestReloadWithReadLock_Success(t *testing.T) { resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() factory1 := vms.NewMockFactory(resources.ctrl) factory2 := vms.NewMockFactory(resources.ctrl) @@ -140,100 +100,47 @@ func TestReloadWithReadLock_Success(t *testing.T) { Get(). Times(1). Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id3, factory3). - Times(1). - Return(nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id4, factory4). - Times(1). - Return(nil) - - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) - require.Empty(t, failedVMs) - require.NoError(t, err) -} - -// Tests that we fail if we're not able to get the vms on disk -func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) { - resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() - - resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) - - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.Empty(t, installedVMs) - require.Empty(t, failedVMs) - require.ErrorIs(t, err, errTest) -} - -// Tests that if we fail to register a VM, we fail. -func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { - resources := initVMRegistryTest(t) - defer resources.ctrl.Finish() - - factory1 := vms.NewMockFactory(resources.ctrl) - factory2 := vms.NewMockFactory(resources.ctrl) - factory3 := vms.NewMockFactory(resources.ctrl) - factory4 := vms.NewMockFactory(resources.ctrl) - - registeredVms := map[ids.ID]vms.Factory{ - id1: factory1, - id2: factory2, - } - - unregisteredVms := map[ids.ID]vms.Factory{ - id3: factory3, - id4: factory4, - } - - resources.mockVMGetter.EXPECT(). - Get(). - Times(1). - Return(registeredVms, unregisteredVms, nil) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id3, factory3). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id3, factory3). Times(1). Return(errTest) - resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(gomock.Any(), id4, factory4). + resources.mockVMManager.EXPECT(). + RegisterFactory(gomock.Any(), id4, factory4). Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - - require.Len(t, failedVMs, 1) - require.ErrorIs(t, failedVMs[id3], errTest) - require.Len(t, installedVMs, 1) - require.Equal(t, id4, installedVMs[0]) - require.NoError(t, err) + installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + require.NoError(err) + require.Len(failedVMs, 1) + require.ErrorIs(failedVMs[id3], errTest) + require.Len(installedVMs, 1) + require.Equal(id4, installedVMs[0]) } type registryTestResources struct { - ctrl *gomock.Controller - mockVMGetter *MockVMGetter - mockVMRegisterer *MockVMRegisterer - vmRegistry VMRegistry + ctrl *gomock.Controller + mockVMGetter *MockVMGetter + mockVMManager *vms.MockManager + vmRegistry VMRegistry } func initVMRegistryTest(t *testing.T) *registryTestResources { ctrl := gomock.NewController(t) mockVMGetter := NewMockVMGetter(ctrl) - mockVMRegisterer := NewMockVMRegisterer(ctrl) + mockVMManager := vms.NewMockManager(ctrl) vmRegistry := NewVMRegistry( VMRegistryConfig{ - VMGetter: mockVMGetter, - VMRegisterer: mockVMRegisterer, + VMGetter: mockVMGetter, + VMManager: mockVMManager, }, ) return ®istryTestResources{ - ctrl: ctrl, - mockVMGetter: mockVMGetter, - mockVMRegisterer: mockVMRegisterer, - vmRegistry: vmRegistry, + ctrl: ctrl, + mockVMGetter: mockVMGetter, + mockVMManager: mockVMManager, + vmRegistry: vmRegistry, } } diff --git a/avalanchego/vms/rpcchainvm/batched_vm_test.go b/avalanchego/vms/rpcchainvm/batched_vm_test.go index cd554af9..8039c9fb 100644 --- a/avalanchego/vms/rpcchainvm/batched_vm_test.go +++ b/avalanchego/vms/rpcchainvm/batched_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -8,18 +8,15 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/chain" ) @@ -38,12 +35,12 @@ var ( time2 = time.Unix(2, 0) ) -func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "batchedParseBlockCachingTestKey" // create mock ctrl := gomock.NewController(t) - vm := mocks.NewMockChainVM(ctrl) + vm := block.NewMockChainVM(ctrl) if loadExpectations { blk1 := snowman.NewMockBlock(ctrl) @@ -76,7 +73,7 @@ func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) (bl ) } - return vm, ctrl + return vm } func TestBatchedParseBlockCaching(t *testing.T) { @@ -87,19 +84,16 @@ func TestBatchedParseBlockCaching(t *testing.T) { vm, stopper := buildClientHelper(require, testKey) defer stopper.Stop(context.Background()) - ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) + ctx := snowtest.Context(t, snowtest.CChainID) - err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) - require.NoError(err) + require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) // Call should parse the first block blk, err := vm.ParseBlock(context.Background(), blkBytes1) require.NoError(err) require.Equal(blkID1, blk.ID()) - _, typeChecked := blk.(*chain.BlockWrapper) - require.True(typeChecked) + require.IsType(&chain.BlockWrapper{}, blk) // Call should cache the first block and parse the second block blks, err := vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) @@ -108,11 +102,8 @@ func TestBatchedParseBlockCaching(t *testing.T) { require.Equal(blkID1, blks[0].ID()) require.Equal(blkID2, blks[1].ID()) - _, typeChecked = blks[0].(*chain.BlockWrapper) - require.True(typeChecked) - - _, typeChecked = blks[1].(*chain.BlockWrapper) - require.True(typeChecked) + require.IsType(&chain.BlockWrapper{}, blks[0]) + require.IsType(&chain.BlockWrapper{}, blks[1]) // Call should be fully cached and not result in a grpc call blks, err = vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) @@ -121,9 +112,6 @@ func TestBatchedParseBlockCaching(t *testing.T) { require.Equal(blkID1, blks[0].ID()) require.Equal(blkID2, blks[1].ID()) - _, typeChecked = blks[0].(*chain.BlockWrapper) - require.True(typeChecked) - - _, typeChecked = blks[1].(*chain.BlockWrapper) - require.True(typeChecked) + require.IsType(&chain.BlockWrapper{}, blks[0]) + require.IsType(&chain.BlockWrapper{}, blks[1]) } diff --git a/avalanchego/vms/rpcchainvm/errors.go b/avalanchego/vms/rpcchainvm/errors.go index e1456d87..4b434b51 100644 --- a/avalanchego/vms/rpcchainvm/errors.go +++ b/avalanchego/vms/rpcchainvm/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -12,16 +12,14 @@ import ( var ( errEnumToError = map[vmpb.Error]error{ - vmpb.Error_ERROR_CLOSED: database.ErrClosed, - vmpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, - vmpb.Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED: block.ErrHeightIndexedVMNotImplemented, - vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE: block.ErrIndexIncomplete, - vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED: block.ErrStateSyncableVMNotImplemented, + vmpb.Error_ERROR_CLOSED: database.ErrClosed, + vmpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, + vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE: block.ErrIndexIncomplete, + vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED: block.ErrStateSyncableVMNotImplemented, } errorToErrEnum = map[error]vmpb.Error{ database.ErrClosed: vmpb.Error_ERROR_CLOSED, database.ErrNotFound: vmpb.Error_ERROR_NOT_FOUND, - block.ErrHeightIndexedVMNotImplemented: vmpb.Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED, block.ErrIndexIncomplete: vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE, block.ErrStateSyncableVMNotImplemented: vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED, } diff --git a/avalanchego/vms/rpcchainvm/factory.go b/avalanchego/vms/rpcchainvm/factory.go index f121c7bb..d61c41d1 100644 --- a/avalanchego/vms/rpcchainvm/factory.go +++ b/avalanchego/vms/rpcchainvm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -13,8 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" - - vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) var _ vms.Factory = (*factory)(nil) @@ -61,7 +59,7 @@ func (f *factory) New(log logging.Logger) (interface{}, error) { return nil, err } - vm := NewClient(vmpb.NewVMClient(clientConn)) + vm := NewClient(clientConn) vm.SetProcess(stopper, status.Pid, f.processTracker) f.runtimeTracker.TrackRuntime(stopper) diff --git a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go index b4bc5a5a..cfa3094b 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn diff --git a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go index 07ca0f5a..57f1cfdb 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn diff --git a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go index c06bdce9..be0f2a1a 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader diff --git a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go index a5f8f5d7..4d85f674 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go index c89eb509..40528dc7 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go index 769d8edc..1c455670 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go index a78e6b00..86dd9963 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter @@ -8,8 +8,6 @@ import ( "errors" "net/http" - "golang.org/x/exp/maps" - "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp/gconn" @@ -48,7 +46,7 @@ func (s *Server) Write( req *responsewriterpb.WriteRequest, ) (*responsewriterpb.WriteResponse, error) { headers := s.writer.Header() - maps.Clear(headers) + clear(headers) for _, header := range req.Headers { headers[header.Key] = header.Values } @@ -67,7 +65,7 @@ func (s *Server) WriteHeader( req *responsewriterpb.WriteHeaderRequest, ) (*emptypb.Empty, error) { headers := s.writer.Header() - maps.Clear(headers) + clear(headers) for _, header := range req.Headers { headers[header.Key] = header.Values } diff --git a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go index d9a561f2..f68cefa7 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter diff --git a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go index ce85aace..1b216dc2 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_client.go b/avalanchego/vms/rpcchainvm/ghttp/http_client.go index 62a6b705..cd06c46c 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_server.go b/avalanchego/vms/rpcchainvm/ghttp/http_server.go index adece6f9..c6029653 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_test.go b/avalanchego/vms/rpcchainvm/ghttp/http_test.go index 7cafe62a..22d5095d 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_test.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp @@ -13,8 +13,6 @@ import ( ) func TestConvertWriteResponse(t *testing.T) { - require := require.New(t) - scenerios := map[string]struct { resp *httppb.HandleSimpleHTTPResponse }{ @@ -47,8 +45,7 @@ func TestConvertWriteResponse(t *testing.T) { for testName, scenerio := range scenerios { t.Run(testName, func(t *testing.T) { w := httptest.NewRecorder() - err := convertWriteResponse(w, scenerio.resp) - require.NoError(err) + require.NoError(t, convertWriteResponse(w, scenerio.resp)) }) } } diff --git a/avalanchego/vms/rpcchainvm/grpcutils/client.go b/avalanchego/vms/rpcchainvm/grpcutils/client.go index 0a9dfcff..40d219c8 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/client.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/client.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils import ( - "fmt" "math" "time" @@ -62,7 +61,7 @@ var DefaultDialOptions = []grpc.DialOption{ // Dial returns a gRPC ClientConn with the dial options as defined by // DefaultDialOptions. DialOption can also optionally be passed. func Dial(addr string, opts ...DialOption) (*grpc.ClientConn, error) { - return grpc.Dial(fmt.Sprintf("passthrough:///%s", addr), newDialOpts(opts...)...) + return grpc.Dial("passthrough:///"+addr, newDialOpts(opts...)...) } // DialOptions are options which can be applied to a gRPC client in addition to diff --git a/avalanchego/vms/rpcchainvm/grpcutils/client_test.go b/avalanchego/vms/rpcchainvm/grpcutils/client_test.go index 0e48a837..746c0fc4 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/client_test.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/client_test.go @@ -1,25 +1,25 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils import ( + "context" + "fmt" "testing" "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" - "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/rpcdb" pb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" ) func TestDialOptsSmoke(t *testing.T) { @@ -55,14 +55,12 @@ func TestWaitForReady(t *testing.T) { Serve(listener, server) }() - // The default includes grpc.WaitForReady(true). + // The default is WaitForReady = true. conn, err := Dial(listener.Addr().String()) require.NoError(err) db := rpcdb.NewClient(pb.NewDatabaseClient(conn)) - - err = db.Put([]byte("foo"), []byte("bar")) - require.NoError(err) + require.NoError(db.Put([]byte("foo"), []byte("bar"))) noWaitListener, err := NewListener() require.NoError(err) @@ -85,3 +83,21 @@ func TestWaitForReady(t *testing.T) { require.True(ok) require.Equal(codes.Unavailable, status.Code()) } + +func TestWaitForReadyCallOption(t *testing.T) { + require := require.New(t) + + listener, err := NewListener() + require.NoError(err) + conn, err := Dial(listener.Addr().String()) + require.NoError(err) + // close listener causes RPC to fail fast. + _ = listener.Close() + + db := pb.NewDatabaseClient(conn) + _, err = db.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}, grpc.WaitForReady(false)) + s, ok := status.FromError(err) + fmt.Printf("status: %v\n", s) + require.True(ok) + require.Equal(codes.Unavailable, s.Code()) +} diff --git a/avalanchego/vms/rpcchainvm/grpcutils/server.go b/avalanchego/vms/rpcchainvm/grpcutils/server.go index b262cf74..dbcc439c 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/server.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils @@ -25,21 +25,6 @@ const ( // of Timeout and if no activity is seen even after that the connection is // closed. grpc-go default 20s defaultServerKeepAliveTimeout = 20 * time.Second - // Duration for the maximum amount of time a http2 connection can exist - // before sending GOAWAY. Internally in gRPC a +-10% jitter is added to - // mitigate retry storms. - defaultServerMaxConnectionAge = 10 * time.Minute - // After MaxConnectionAge, MaxConnectionAgeGrace specifies the amount of time - // between when the server sends a GOAWAY to the client to initiate graceful - // shutdown, and when the server closes the connection. - // - // The server expects that this grace period will allow the client to complete - // any ongoing requests, after which it will forcefully terminate the connection. - // If a request takes longer than this grace period, it will *fail*. - // We *never* want an RPC to live longer than this value. - // - // invariant: Any value < 1 second will be internally overridden by gRPC. - defaultServerMaxConnectionAgeGrace = math.MaxInt64 ) var DefaultServerOptions = []grpc.ServerOption{ @@ -51,10 +36,8 @@ var DefaultServerOptions = []grpc.ServerOption{ PermitWithoutStream: defaultPermitWithoutStream, }), grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: defaultServerKeepAliveInterval, - Timeout: defaultServerKeepAliveTimeout, - MaxConnectionAge: defaultServerMaxConnectionAge, - MaxConnectionAgeGrace: defaultServerMaxConnectionAgeGrace, + Time: defaultServerKeepAliveInterval, + Timeout: defaultServerKeepAliveTimeout, }), } diff --git a/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go b/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go index 35ca2b73..67a4141d 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/avalanchego/vms/rpcchainvm/grpcutils/util.go b/avalanchego/vms/rpcchainvm/grpcutils/util.go index 8ad042ea..69e165c2 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/util.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/util.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils @@ -12,11 +12,9 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" + httppb "github.com/ava-labs/avalanchego/proto/pb/http" spb "google.golang.org/genproto/googleapis/rpc/status" - tspb "google.golang.org/protobuf/types/known/timestamppb" - - httppb "github.com/ava-labs/avalanchego/proto/pb/http" ) func Errorf(code int, tmpl string, args ...interface{}) error { diff --git a/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go b/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go index 67a1e986..8db4adbe 100644 --- a/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go +++ b/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gruntime diff --git a/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go b/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go index 25954ee9..09be6c12 100644 --- a/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go +++ b/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gruntime @@ -13,7 +13,7 @@ import ( pb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" ) -var _ pb.RuntimeServer = &Server{} +var _ pb.RuntimeServer = (*Server)(nil) // Server is a VM runtime initializer controlled by RPC. type Server struct { diff --git a/avalanchego/vms/rpcchainvm/messenger/messenger_client.go b/avalanchego/vms/rpcchainvm/messenger/messenger_client.go index e7910eb0..d392b9af 100644 --- a/avalanchego/vms/rpcchainvm/messenger/messenger_client.go +++ b/avalanchego/vms/rpcchainvm/messenger/messenger_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger diff --git a/avalanchego/vms/rpcchainvm/messenger/messenger_server.go b/avalanchego/vms/rpcchainvm/messenger/messenger_server.go index 273ffdfd..fc28a075 100644 --- a/avalanchego/vms/rpcchainvm/messenger/messenger_server.go +++ b/avalanchego/vms/rpcchainvm/messenger/messenger_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger diff --git a/avalanchego/vms/rpcchainvm/runtime/README.md b/avalanchego/vms/rpcchainvm/runtime/README.md index 6e09e41f..1a2fe264 100644 --- a/avalanchego/vms/rpcchainvm/runtime/README.md +++ b/avalanchego/vms/rpcchainvm/runtime/README.md @@ -17,7 +17,7 @@ It works by starting the VM's as a subprocess of AvalancheGo by `os.Exec`. ## Workflow - `VMRegistry` calls the RPC Chain VM `Factory`. -- Factory Starts an instanace of a `VMRE` server that consumes a `runtime.Initializer` interface implementation. +- Factory Starts an instance of a `VMRE` server that consumes a `runtime.Initializer` interface implementation. - The address of this server is passed as a ENV variable `AVALANCHE_VM_RUNTIME_ENGINE_ADDR` via `os.Exec` which starts the VM binary. - The VM uses the address of the `VMRE` server to create a client. - Client sends a `Initialize` RPC informing the server of the `Protocol Version` and future `Address` of the RPC Chain VM server allowing it to perform a validation `Handshake`. diff --git a/avalanchego/vms/rpcchainvm/runtime/manager.go b/avalanchego/vms/rpcchainvm/runtime/manager.go index 3e1a9eaa..425faa73 100644 --- a/avalanchego/vms/rpcchainvm/runtime/manager.go +++ b/avalanchego/vms/rpcchainvm/runtime/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package runtime diff --git a/avalanchego/vms/rpcchainvm/runtime/runtime.go b/avalanchego/vms/rpcchainvm/runtime/runtime.go index f5d9666e..1a1a198a 100644 --- a/avalanchego/vms/rpcchainvm/runtime/runtime.go +++ b/avalanchego/vms/rpcchainvm/runtime/runtime.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package runtime @@ -21,7 +21,7 @@ const ( ) var ( - ErrProtocolVersionMismatch = errors.New("protocol version mismatch") + ErrProtocolVersionMismatch = errors.New("RPCChainVM protocol version mismatch between AvalancheGo and Virtual Machine plugin") ErrHandshakeFailed = errors.New("handshake failed") ErrInvalidConfig = errors.New("invalid config") ErrProcessNotFound = errors.New("vm process not found") diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go index 47a50984..bc8d4e41 100644 --- a/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess @@ -34,9 +34,9 @@ func newInitializer() *initializer { func (i *initializer) Initialize(_ context.Context, protocolVersion uint, vmAddr string) error { i.once.Do(func() { if version.RPCChainVMProtocol != protocolVersion { - i.err = fmt.Errorf( - "%w avalanchego: %d, vm: %d", + i.err = fmt.Errorf("%w. AvalancheGo version %s implements RPCChainVM protocol version %d. The VM implements RPCChainVM protocol version %d. Please make sure that there is an exact match of the protocol versions. This can be achieved by updating your VM or running an older/newer version of AvalancheGo. Please be advised that some virtual machines may not yet support the latest RPCChainVM protocol version", runtime.ErrProtocolVersionMismatch, + version.Current, version.RPCChainVMProtocol, protocolVersion, ) diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go index e056b82f..5205ea40 100644 --- a/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go @@ -1,12 +1,12 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build linux // +build linux // ^ SIGTERM signal is not available on Windows // ^ syscall.SysProcAttr only has field Pdeathsig on Linux -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package subprocess import ( diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go index facbf58b..c1a590e3 100644 --- a/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go @@ -1,9 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + //go:build !linux // +build !linux -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package subprocess import ( diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go index 10f5ebed..2cd92a00 100644 --- a/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess @@ -132,12 +132,12 @@ func Bootstrap( case <-intitializer.initialized: case <-timeout.C: stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) + return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) } if intitializer.err != nil { stopper.Stop(ctx) - return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, intitializer.err) + return nil, nil, fmt.Errorf("%w: %w", runtime.ErrHandshakeFailed, intitializer.err) } log.Info("plugin handshake succeeded", diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go index b4d02659..4dfd33c2 100644 --- a/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package subprocess diff --git a/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go b/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go index d2c47856..3b71aaa8 100644 --- a/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go +++ b/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -9,24 +9,21 @@ import ( "io" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" - - vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) var ( @@ -68,18 +65,18 @@ var ( ) type StateSyncEnabledMock struct { - *mocks.MockChainVM - *mocks.MockStateSyncableVM + *block.MockChainVM + *block.MockStateSyncableVM } -func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "stateSyncEnabledTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -91,17 +88,17 @@ func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.Chai ) } - return ssVM, ctrl + return ssVM } -func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "getOngoingSyncStateSummaryTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -112,17 +109,17 @@ func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) ( ) } - return ssVM, ctrl + return ssVM } -func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "getLastStateSummaryTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -133,17 +130,17 @@ func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.C ) } - return ssVM, ctrl + return ssVM } -func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "parseStateSummaryTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -155,17 +152,17 @@ func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.Cha ) } - return ssVM, ctrl + return ssVM } -func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "getStateSummaryTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -176,17 +173,17 @@ func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.Chain ) } - return ssVM, ctrl + return ssVM } -func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "acceptStateSummaryTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -222,17 +219,17 @@ func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.Ch ) } - return ssVM, ctrl + return ssVM } -func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "lastAcceptedBlockPostStateSummaryAcceptTestKey" // create mock ctrl := gomock.NewController(t) ssVM := StateSyncEnabledMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockStateSyncableVM: mocks.NewMockStateSyncableVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockStateSyncableVM: block.NewMockStateSyncableVM(ctrl), } if loadExpectations { @@ -261,7 +258,7 @@ func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpecta ) } - return ssVM, ctrl + return ssVM } func buildClientHelper(require *require.Assertions, testKey string) (*VMClient, runtime.Stopper) { @@ -295,7 +292,7 @@ func buildClientHelper(require *require.Assertions, testKey string) (*VMClient, clientConn, err := grpcutils.Dial(status.Addr) require.NoError(err) - return NewClient(vmpb.NewVMClient(clientConn)), stopper + return NewClient(clientConn), stopper } func TestStateSyncEnabled(t *testing.T) { @@ -326,7 +323,7 @@ func TestStateSyncEnabled(t *testing.T) { // test a non-special error. // TODO: retrieve exact error _, err = vm.StateSyncEnabled(context.Background()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } func TestGetOngoingSyncStateSummary(t *testing.T) { @@ -351,7 +348,7 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error _, err = vm.GetOngoingSyncStateSummary(context.Background()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } func TestGetLastStateSummary(t *testing.T) { @@ -376,7 +373,7 @@ func TestGetLastStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error _, err = vm.GetLastStateSummary(context.Background()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } func TestParseStateSummary(t *testing.T) { @@ -400,12 +397,12 @@ func TestParseStateSummary(t *testing.T) { // test parsing nil summary _, err = vm.ParseStateSummary(context.Background(), nil) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors // test a non-special error. // TODO: retrieve exact error _, err = vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } func TestGetStateSummary(t *testing.T) { @@ -430,7 +427,7 @@ func TestGetStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error _, err = vm.GetStateSummary(context.Background(), mockedSummary.Height()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } func TestAcceptStateSummary(t *testing.T) { @@ -458,7 +455,7 @@ func TestAcceptStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error _, err = summary.Accept(context.Background()) - require.Error(err) + require.Error(err) //nolint:forbidigo // currently returns grpc errors } // Show that LastAccepted call returns the right answer after a StateSummary @@ -472,11 +469,9 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { defer stopper.Stop(context.Background()) // Step 1: initialize VM and check initial LastAcceptedBlock - ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) - dbManager = dbManager.NewPrefixDBManager([]byte{}) + ctx := snowtest.Context(t, snowtest.CChainID) - require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(context.Background(), ctx, prefixdb.New([]byte{}, memdb.New()), nil, nil, nil, nil, nil, nil)) blkID, err := vm.LastAccepted(context.Background()) require.NoError(err) diff --git a/avalanchego/vms/rpcchainvm/vm.go b/avalanchego/vms/rpcchainvm/vm.go index 834ab9f1..94dee55b 100644 --- a/avalanchego/vms/rpcchainvm/vm.go +++ b/avalanchego/vms/rpcchainvm/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -14,9 +14,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/gruntime" @@ -24,6 +23,7 @@ import ( vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" runtimepb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) const defaultRuntimeDialTimeout = 5 * time.Second @@ -35,8 +35,10 @@ const defaultRuntimeDialTimeout = 5 * time.Second func Serve(ctx context.Context, vm block.ChainVM, opts ...grpcutils.ServerOption) error { signals := make(chan os.Signal, 2) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(signals) - server := newVMServer(vm, opts...) + var allowShutdown utils.Atomic[bool] + server := newVMServer(vm, &allowShutdown, opts...) go func(ctx context.Context) { defer func() { server.GracefulStop() @@ -46,11 +48,19 @@ func Serve(ctx context.Context, vm block.ChainVM, opts ...grpcutils.ServerOption for { select { case s := <-signals: + // We drop all signals until our parent process has notified us + // that we are shutting down. Once we are in the shutdown + // workflow, we will gracefully exit upon receiving a SIGTERM. + if !allowShutdown.Get() { + fmt.Printf("runtime engine: ignoring signal: %s\n", s) + continue + } + switch s { case syscall.SIGINT: - fmt.Println("runtime engine: ignoring signal: SIGINT") + fmt.Printf("runtime engine: ignoring signal: %s\n", s) case syscall.SIGTERM: - fmt.Println("runtime engine: received shutdown signal: SIGTERM") + fmt.Printf("runtime engine: received shutdown signal: %s\n", s) return } case <-ctx.Done(): @@ -93,9 +103,9 @@ func Serve(ctx context.Context, vm block.ChainVM, opts ...grpcutils.ServerOption } // Returns an RPC Chain VM server serving health and VM services. -func newVMServer(vm block.ChainVM, opts ...grpcutils.ServerOption) *grpc.Server { +func newVMServer(vm block.ChainVM, allowShutdown *utils.Atomic[bool], opts ...grpcutils.ServerOption) *grpc.Server { server := grpcutils.NewServer(opts...) - vmpb.RegisterVMServer(server, NewServer(vm)) + vmpb.RegisterVMServer(server, NewServer(vm, allowShutdown)) health := health.NewServer() health.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) diff --git a/avalanchego/vms/rpcchainvm/vm_client.go b/avalanchego/vms/rpcchainvm/vm_client.go index 2d18483d..7f03281c 100644 --- a/avalanchego/vms/rpcchainvm/vm_client.go +++ b/avalanchego/vms/rpcchainvm/vm_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -8,27 +8,19 @@ import ( "encoding/json" "errors" "fmt" + "net/http" "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/prometheus/client_golang/prometheus" - - dto "github.com/prometheus/client_model/go" - "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/health" "google.golang.org/protobuf/types/known/emptypb" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "github.com/ava-labs/avalanchego/api/keystore/gkeystore" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ids/galiasreader" @@ -41,6 +33,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators/gvalidators" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" @@ -60,13 +53,17 @@ import ( validatorstatepb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" warppb "github.com/ava-labs/avalanchego/proto/pb/warp" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + dto "github.com/prometheus/client_model/go" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) +// TODO: Enable these to be configured by the user const ( - decidedCacheSize = 2048 + decidedCacheSize = 64 * units.MiB missingCacheSize = 2048 - unverifiedCacheSize = 2048 - bytesToIDCacheSize = 2048 + unverifiedCacheSize = 64 * units.MiB + bytesToIDCacheSize = 64 * units.MiB ) var ( @@ -76,7 +73,6 @@ var ( _ block.ChainVM = (*VMClient)(nil) _ block.BuildBlockWithContextChainVM = (*VMClient)(nil) _ block.BatchedChainVM = (*VMClient)(nil) - _ block.HeightIndexedChainVM = (*VMClient)(nil) _ block.StateSyncableVM = (*VMClient)(nil) _ prometheus.Gatherer = (*VMClient)(nil) @@ -109,9 +105,10 @@ type VMClient struct { } // NewClient returns a VM connected to a remote VM -func NewClient(client vmpb.VMClient) *VMClient { +func NewClient(clientConn *grpc.ClientConn) *VMClient { return &VMClient{ - client: client, + client: vmpb.NewVMClient(clientConn), + conns: []*grpc.ClientConn{clientConn}, } } @@ -126,7 +123,7 @@ func (vm *VMClient) SetProcess(runtime runtime.Stopper, pid int, processTracker func (vm *VMClient) Initialize( ctx context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -152,33 +149,21 @@ func (vm *VMClient) Initialize( return err } - // Initialize and serve each database and construct the db manager - // initialize request parameters - versionedDBs := dbManager.GetDatabases() - versionedDBServers := make([]*vmpb.VersionedDBServer, len(versionedDBs)) - for i, semDB := range versionedDBs { - dbVersion := semDB.Version.String() - serverListener, err := grpcutils.NewListener() - if err != nil { - return err - } - serverAddr := serverListener.Addr().String() - - go grpcutils.Serve(serverListener, vm.newDBServer(semDB.Database)) - chainCtx.Log.Info("grpc: serving database", - zap.String("version", dbVersion), - zap.String("address", serverAddr), - ) - - versionedDBServers[i] = &vmpb.VersionedDBServer{ - ServerAddr: serverAddr, - Version: dbVersion, - } + // Initialize the database + dbServerListener, err := grpcutils.NewListener() + if err != nil { + return err } + dbServerAddr := dbServerListener.Addr().String() + + go grpcutils.Serve(dbServerListener, vm.newDBServer(db)) + chainCtx.Log.Info("grpc: serving database", + zap.String("address", dbServerAddr), + ) vm.messenger = messenger.NewServer(toEngine) vm.keystore = gkeystore.NewServer(chainCtx.Keystore) - vm.sharedMemory = gsharedmemory.NewServer(chainCtx.SharedMemory, dbManager.Current().Database) + vm.sharedMemory = gsharedmemory.NewServer(chainCtx.SharedMemory, db) vm.bcLookup = galiasreader.NewServer(chainCtx.BCLookup) vm.appSender = appsender.NewServer(appSender) vm.validatorStateServer = gvalidators.NewServer(chainCtx.ValidatorState) @@ -208,7 +193,7 @@ func (vm *VMClient) Initialize( GenesisBytes: genesisBytes, UpgradeBytes: upgradeBytes, ConfigBytes: configBytes, - DbServers: versionedDBServers, + DbServerAddr: dbServerAddr, ServerAddr: serverAddr, }) if err != nil { @@ -366,35 +351,13 @@ func (vm *VMClient) Shutdown(ctx context.Context) error { return errs.Err } -func (vm *VMClient) CreateHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { +func (vm *VMClient) CreateHandlers(ctx context.Context) (map[string]http.Handler, error) { resp, err := vm.client.CreateHandlers(ctx, &emptypb.Empty{}) if err != nil { return nil, err } - handlers := make(map[string]*common.HTTPHandler, len(resp.Handlers)) - for _, handler := range resp.Handlers { - clientConn, err := grpcutils.Dial(handler.ServerAddr) - if err != nil { - return nil, err - } - - vm.conns = append(vm.conns, clientConn) - handlers[handler.Prefix] = &common.HTTPHandler{ - LockOptions: common.LockOption(handler.LockOptions), - Handler: ghttp.NewClient(httppb.NewHTTPClient(clientConn)), - } - } - return handlers, nil -} - -func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { - resp, err := vm.client.CreateStaticHandlers(ctx, &emptypb.Empty{}) - if err != nil { - return nil, err - } - - handlers := make(map[string]*common.HTTPHandler, len(resp.Handlers)) + handlers := make(map[string]http.Handler, len(resp.Handlers)) for _, handler := range resp.Handlers { clientConn, err := grpcutils.Dial(handler.ServerAddr) if err != nil { @@ -402,25 +365,25 @@ func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]*commo } vm.conns = append(vm.conns, clientConn) - handlers[handler.Prefix] = &common.HTTPHandler{ - LockOptions: common.LockOption(handler.LockOptions), - Handler: ghttp.NewClient(httppb.NewHTTPClient(clientConn)), - } + handlers[handler.Prefix] = ghttp.NewClient(httppb.NewHTTPClient(clientConn)) } return handlers, nil } func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { _, err := vm.client.Connected(ctx, &vmpb.ConnectedRequest{ - NodeId: nodeID[:], - Version: nodeVersion.String(), + NodeId: nodeID.Bytes(), + Name: nodeVersion.Name, + Major: uint32(nodeVersion.Major), + Minor: uint32(nodeVersion.Minor), + Patch: uint32(nodeVersion.Patch), }) return err } func (vm *VMClient) Disconnected(ctx context.Context, nodeID ids.NodeID) error { _, err := vm.client.Disconnected(ctx, &vmpb.DisconnectedRequest{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), }) return err } @@ -526,7 +489,9 @@ func (vm *VMClient) SetPreference(ctx context.Context, blkID ids.ID) error { } func (vm *VMClient) HealthCheck(ctx context.Context) (interface{}, error) { - health, err := vm.client.Health(ctx, &emptypb.Empty{}) + // HealthCheck is a special case, where we want to fail fast instead of block. + failFast := grpc.WaitForReady(false) + health, err := vm.client.Health(ctx, &emptypb.Empty{}, failFast) if err != nil { return nil, fmt.Errorf("health check failed: %w", err) } @@ -555,14 +520,15 @@ func (vm *VMClient) CrossChainAppRequest(ctx context.Context, chainID ids.ID, re return err } -func (vm *VMClient) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { - _, err := vm.client.CrossChainAppRequestFailed( - ctx, - &vmpb.CrossChainAppRequestFailedMsg{ - ChainId: chainID[:], - RequestId: requestID, - }, - ) +func (vm *VMClient) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { + msg := &vmpb.CrossChainAppRequestFailedMsg{ + ChainId: chainID[:], + RequestId: requestID, + ErrorCode: appErr.Code, + ErrorMessage: appErr.Message, + } + + _, err := vm.client.CrossChainAppRequestFailed(ctx, msg) return err } @@ -582,7 +548,7 @@ func (vm *VMClient) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID _, err := vm.client.AppRequest( ctx, &vmpb.AppRequestMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Request: request, Deadline: grpcutils.TimestampFromTime(deadline), @@ -595,7 +561,7 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI _, err := vm.client.AppResponse( ctx, &vmpb.AppResponseMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), RequestId: requestID, Response: response, }, @@ -603,14 +569,15 @@ func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestI return err } -func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - _, err := vm.client.AppRequestFailed( - ctx, - &vmpb.AppRequestFailedMsg{ - NodeId: nodeID[:], - RequestId: requestID, - }, - ) +func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { + msg := &vmpb.AppRequestFailedMsg{ + NodeId: nodeID.Bytes(), + RequestId: requestID, + ErrorCode: appErr.Code, + ErrorMessage: appErr.Message, + } + + _, err := vm.client.AppRequestFailed(ctx, msg) return err } @@ -618,7 +585,7 @@ func (vm *VMClient) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte _, err := vm.client.AppGossip( ctx, &vmpb.AppGossipMsg{ - NodeId: nodeID[:], + NodeId: nodeID.Bytes(), Msg: msg, }, ) diff --git a/avalanchego/vms/rpcchainvm/vm_server.go b/avalanchego/vms/rpcchainvm/vm_server.go index b6701eff..82bafe42 100644 --- a/avalanchego/vms/rpcchainvm/vm_server.go +++ b/avalanchego/vms/rpcchainvm/vm_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -11,18 +11,15 @@ import ( "os" "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" - "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/api/keystore/gkeystore" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/corruptabledb" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ids/galiasreader" @@ -32,6 +29,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/appsender" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators/gvalidators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -51,6 +49,7 @@ import ( validatorstatepb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" warppb "github.com/ava-labs/avalanchego/proto/pb/warp" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" ) var ( @@ -69,12 +68,13 @@ type VMServer struct { // If nil, the underlying VM doesn't implement the interface. bVM block.BuildBlockWithContextChainVM // If nil, the underlying VM doesn't implement the interface. - hVM block.HeightIndexedChainVM - // If nil, the underlying VM doesn't implement the interface. ssVM block.StateSyncableVM + allowShutdown *utils.Atomic[bool] + processMetrics prometheus.Gatherer - dbManager manager.Manager + db database.Database + log logging.Logger serverCloser grpcutils.ServerCloser connCloser wrappers.Closer @@ -84,15 +84,14 @@ type VMServer struct { } // NewServer returns a vm instance connected to a remote vm instance -func NewServer(vm block.ChainVM) *VMServer { +func NewServer(vm block.ChainVM, allowShutdown *utils.Atomic[bool]) *VMServer { bVM, _ := vm.(block.BuildBlockWithContextChainVM) - hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VMServer{ - vm: vm, - bVM: bVM, - hVM: hVM, - ssVM: ssVM, + vm: vm, + bVM: bVM, + ssVM: ssVM, + allowShutdown: allowShutdown, } } @@ -149,40 +148,29 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) // Register metrics for each Go plugin processes vm.processMetrics = registerer - // Dial each database in the request and construct the database manager - versionedDBs := make([]*manager.VersionedDatabase, len(req.DbServers)) - for i, vDBReq := range req.DbServers { - version, err := version.Parse(vDBReq.Version) - if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() - return nil, err - } - - clientConn, err := grpcutils.Dial( - vDBReq.ServerAddr, - grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), - grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), - ) - if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() - return nil, err - } - vm.connCloser.Add(clientConn) - db := rpcdb.NewClient(rpcdbpb.NewDatabaseClient(clientConn)) - versionedDBs[i] = &manager.VersionedDatabase{ - Database: corruptabledb.New(db), - Version: version, - } - } - dbManager, err := manager.NewManagerFromDBs(versionedDBs) + // Dial the database + dbClientConn, err := grpcutils.Dial( + req.DbServerAddr, + grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), + grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), + ) if err != nil { - // Ignore closing errors to return the original error - _ = vm.connCloser.Close() return nil, err } - vm.dbManager = dbManager + vm.connCloser.Add(dbClientConn) + vm.db = corruptabledb.New( + rpcdb.NewClient(rpcdbpb.NewDatabaseClient(dbClientConn)), + ) + + // TODO: Allow the logger to be configured by the client + vm.log = logging.NewLogger( + fmt.Sprintf("<%s Chain>", chainID), + logging.NewWrappedCore( + logging.Info, + originalStderr, + logging.Colors.ConsoleEncoder(), + ), + ) clientConn, err := grpcutils.Dial( req.ServerAddr, @@ -233,15 +221,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) CChainID: cChainID, AVAXAssetID: avaxAssetID, - // TODO: Allow the logger to be configured by the client - Log: logging.NewLogger( - fmt.Sprintf("<%s Chain>", chainID), - logging.NewWrappedCore( - logging.Info, - originalStderr, - logging.Colors.ConsoleEncoder(), - ), - ), + Log: vm.log, Keystore: keystoreClient, SharedMemory: sharedMemoryClient, BCLookup: bcLookupClient, @@ -256,7 +236,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) ChainDataDir: req.ChainDataDir, } - if err := vm.vm.Initialize(ctx, vm.ctx, dbManager, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { + if err := vm.vm.Initialize(ctx, vm.ctx, vm.db, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { // Ignore errors closing resources to return the original error _ = vm.connCloser.Close() close(vm.closed) @@ -317,6 +297,7 @@ func (vm *VMServer) SetState(ctx context.Context, stateReq *vmpb.SetStateRequest } func (vm *VMServer) Shutdown(ctx context.Context, _ *emptypb.Empty) (*emptypb.Empty, error) { + vm.allowShutdown.Set(true) if vm.closed == nil { return &emptypb.Empty{}, nil } @@ -334,53 +315,21 @@ func (vm *VMServer) CreateHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb return nil, err } resp := &vmpb.CreateHandlersResponse{} - for prefix, h := range handlers { - handler := h - - serverListener, err := grpcutils.NewListener() - if err != nil { - return nil, err - } - server := grpcutils.NewServer() - vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - - // Start HTTP service - go grpcutils.Serve(serverListener, server) - - resp.Handlers = append(resp.Handlers, &vmpb.Handler{ - Prefix: prefix, - LockOptions: uint32(handler.LockOptions), - ServerAddr: serverListener.Addr().String(), - }) - } - return resp, nil -} - -func (vm *VMServer) CreateStaticHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb.CreateStaticHandlersResponse, error) { - handlers, err := vm.vm.CreateStaticHandlers(ctx) - if err != nil { - return nil, err - } - resp := &vmpb.CreateStaticHandlersResponse{} - for prefix, h := range handlers { - handler := h - + for prefix, handler := range handlers { serverListener, err := grpcutils.NewListener() if err != nil { return nil, err } server := grpcutils.NewServer() vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) + httppb.RegisterHTTPServer(server, ghttp.NewServer(handler)) // Start HTTP service go grpcutils.Serve(serverListener, server) resp.Handlers = append(resp.Handlers, &vmpb.Handler{ - Prefix: prefix, - LockOptions: uint32(handler.LockOptions), - ServerAddr: serverListener.Addr().String(), + Prefix: prefix, + ServerAddr: serverListener.Addr().String(), }) } return resp, nil @@ -392,11 +341,12 @@ func (vm *VMServer) Connected(ctx context.Context, req *vmpb.ConnectedRequest) ( return nil, err } - peerVersion, err := version.ParseApplication(req.Version) - if err != nil { - return nil, err + peerVersion := &version.Application{ + Name: req.Name, + Major: int(req.Major), + Minor: int(req.Minor), + Patch: int(req.Patch), } - return &emptypb.Empty{}, vm.vm.Connected(ctx, nodeID, peerVersion) } @@ -520,7 +470,7 @@ func (vm *VMServer) Health(ctx context.Context, _ *emptypb.Empty) (*vmpb.HealthR if err != nil { return &vmpb.HealthResponse{}, err } - dbHealth, err := vm.dbHealthChecks(ctx) + dbHealth, err := vm.db.HealthCheck(ctx) if err != nil { return &vmpb.HealthResponse{}, err } @@ -535,22 +485,6 @@ func (vm *VMServer) Health(ctx context.Context, _ *emptypb.Empty) (*vmpb.HealthR }, err } -func (vm *VMServer) dbHealthChecks(ctx context.Context) (interface{}, error) { - details := make(map[string]interface{}, len(vm.dbManager.GetDatabases())) - - // Check Database health - for _, client := range vm.dbManager.GetDatabases() { - // Shared gRPC client don't close - health, err := client.Database.HealthCheck(ctx) - if err != nil { - return nil, fmt.Errorf("failed to check db health %q: %w", client.Version.String(), err) - } - details[client.Version.String()] = health - } - - return details, nil -} - func (vm *VMServer) Version(ctx context.Context, _ *emptypb.Empty) (*vmpb.VersionResponse, error) { version, err := vm.vm.Version(ctx) return &vmpb.VersionResponse{ @@ -575,7 +509,12 @@ func (vm *VMServer) CrossChainAppRequestFailed(ctx context.Context, msg *vmpb.Cr if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.CrossChainAppRequestFailed(ctx, chainID, msg.RequestId) + + appErr := &common.AppError{ + Code: msg.ErrorCode, + Message: msg.ErrorMessage, + } + return &emptypb.Empty{}, vm.vm.CrossChainAppRequestFailed(ctx, chainID, msg.RequestId, appErr) } func (vm *VMServer) CrossChainAppResponse(ctx context.Context, msg *vmpb.CrossChainAppResponseMsg) (*emptypb.Empty, error) { @@ -603,7 +542,12 @@ func (vm *VMServer) AppRequestFailed(ctx context.Context, req *vmpb.AppRequestFa if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppRequestFailed(ctx, nodeID, req.RequestId) + + appErr := &common.AppError{ + Code: req.ErrorCode, + Message: req.ErrorMessage, + } + return &emptypb.Empty{}, vm.vm.AppRequestFailed(ctx, nodeID, req.RequestId, appErr) } func (vm *VMServer) AppResponse(ctx context.Context, req *vmpb.AppResponseMsg) (*emptypb.Empty, error) { @@ -652,6 +596,7 @@ func (vm *VMServer) GetAncestors(ctx context.Context, req *vmpb.GetAncestorsRequ blocks, err := block.GetAncestors( ctx, + vm.log, vm.vm, blkID, maxBlksNum, @@ -683,13 +628,7 @@ func (vm *VMServer) BatchedParseBlock( } func (vm *VMServer) VerifyHeightIndex(ctx context.Context, _ *emptypb.Empty) (*vmpb.VerifyHeightIndexResponse, error) { - var err error - if vm.hVM != nil { - err = vm.hVM.VerifyHeightIndex(ctx) - } else { - err = block.ErrHeightIndexedVMNotImplemented - } - + err := vm.vm.VerifyHeightIndex(ctx) return &vmpb.VerifyHeightIndexResponse{ Err: errorToErrEnum[err], }, errorToRPCError(err) @@ -699,16 +638,7 @@ func (vm *VMServer) GetBlockIDAtHeight( ctx context.Context, req *vmpb.GetBlockIDAtHeightRequest, ) (*vmpb.GetBlockIDAtHeightResponse, error) { - var ( - blkID ids.ID - err error - ) - if vm.hVM != nil { - blkID, err = vm.hVM.GetBlockIDAtHeight(ctx, req.Height) - } else { - err = block.ErrHeightIndexedVMNotImplemented - } - + blkID, err := vm.vm.GetBlockIDAtHeight(ctx, req.Height) return &vmpb.GetBlockIDAtHeightResponse{ BlkId: blkID[:], Err: errorToErrEnum[err], diff --git a/avalanchego/vms/rpcchainvm/vm_test.go b/avalanchego/vms/rpcchainvm/vm_test.go index 5c1953d3..7aeec999 100644 --- a/avalanchego/vms/rpcchainvm/vm_test.go +++ b/avalanchego/vms/rpcchainvm/vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -9,17 +9,14 @@ import ( "os" "os/exec" "reflect" + "slices" "testing" "time" - "golang.org/x/exp/slices" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" @@ -41,7 +38,7 @@ const ( batchedParseBlockCachingTestKey = "batchedParseBlockCachingTest" ) -var TestServerPluginMap = map[string]func(*testing.T, bool) (block.ChainVM, *gomock.Controller){ +var TestServerPluginMap = map[string]func(*testing.T, bool) block.ChainVM{ stateSyncEnabledTestKey: stateSyncEnabledTestPlugin, getOngoingSyncStateSummaryTestKey: getOngoingSyncStateSummaryTestPlugin, getLastStateSummaryTestKey: getLastStateSummaryTestPlugin, @@ -92,12 +89,11 @@ func TestHelperProcess(t *testing.T) { select {} } - mockedVM, ctrl := TestServerPluginMap[testKey](t, true /*loadExpectations*/) + mockedVM := TestServerPluginMap[testKey](t, true /*loadExpectations*/) err := Serve(context.Background(), mockedVM) if err != nil { os.Exit(1) } - ctrl.Finish() os.Exit(0) } @@ -118,9 +114,7 @@ func TestVMServerInterface(t *testing.T) { } slices.Sort(gotMethods) - if !reflect.DeepEqual(gotMethods, wantMethods) { - t.Errorf("\ngot: %q\nwant: %q", gotMethods, wantMethods) - } + require.Equal(t, wantMethods, gotMethods) } func TestRuntimeSubprocessBootstrap(t *testing.T) { @@ -175,14 +169,12 @@ func TestRuntimeSubprocessBootstrap(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - vm := mocks.NewMockChainVM(ctrl) - defer ctrl.Finish() + vm := block.NewMockChainVM(ctrl) listener, err := grpcutils.NewListener() require.NoError(err) - err = os.Setenv(runtime.EngineAddressKey, listener.Addr().String()) - require.NoError(err) + require.NoError(os.Setenv(runtime.EngineAddressKey, listener.Addr().String())) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/avalanchego/vms/rpcchainvm/with_context_vm_test.go b/avalanchego/vms/rpcchainvm/with_context_vm_test.go index 8fd85f30..f9216f5a 100644 --- a/avalanchego/vms/rpcchainvm/with_context_vm_test.go +++ b/avalanchego/vms/rpcchainvm/with_context_vm_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -8,17 +8,14 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/snow/snowtest" ) var ( @@ -38,29 +35,29 @@ var ( ) type ContextEnabledVMMock struct { - *mocks.MockChainVM - *mocks.MockBuildBlockWithContextChainVM + *block.MockChainVM + *block.MockBuildBlockWithContextChainVM } type ContextEnabledBlockMock struct { *snowman.MockBlock - *mocks.MockWithVerifyContext + *block.MockWithVerifyContext } -func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { +func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM { // test key is "contextTestKey" // create mock ctrl := gomock.NewController(t) ctxVM := ContextEnabledVMMock{ - MockChainVM: mocks.NewMockChainVM(ctrl), - MockBuildBlockWithContextChainVM: mocks.NewMockBuildBlockWithContextChainVM(ctrl), + MockChainVM: block.NewMockChainVM(ctrl), + MockBuildBlockWithContextChainVM: block.NewMockBuildBlockWithContextChainVM(ctrl), } if loadExpectations { ctxBlock := ContextEnabledBlockMock{ MockBlock: snowman.NewMockBlock(ctrl), - MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } gomock.InOrder( // Initialize @@ -88,7 +85,7 @@ func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.ChainV ) } - return ctxVM, ctrl + return ctxVM } func TestContextVMSummary(t *testing.T) { @@ -99,11 +96,9 @@ func TestContextVMSummary(t *testing.T) { vm, stopper := buildClientHelper(require, testKey) defer stopper.Stop(context.Background()) - ctx := snow.DefaultContextTest() - dbManager := manager.NewMemDB(version.Semantic1_0_0) + ctx := snowtest.Context(t, snowtest.CChainID) - err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) - require.NoError(err) + require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil, nil)) blkIntf, err := vm.BuildBlockWithContext(context.Background(), blockContext) require.NoError(err) @@ -115,6 +110,5 @@ func TestContextVMSummary(t *testing.T) { require.NoError(err) require.True(shouldVerify) - err = blk.VerifyWithContext(context.Background(), blockContext) - require.NoError(err) + require.NoError(blk.VerifyWithContext(context.Background(), blockContext)) } diff --git a/avalanchego/vms/secp256k1fx/credential.go b/avalanchego/vms/secp256k1fx/credential.go index 707a6b3f..0367c9af 100644 --- a/avalanchego/vms/secp256k1fx/credential.go +++ b/avalanchego/vms/secp256k1fx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/avalanchego/vms/secp256k1fx/credential_test.go b/avalanchego/vms/secp256k1fx/credential_test.go index a17daa3c..e69b98b2 100644 --- a/avalanchego/vms/secp256k1fx/credential_test.go +++ b/avalanchego/vms/secp256k1fx/credential_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -15,20 +16,19 @@ import ( ) func TestCredentialVerify(t *testing.T) { - require := require.New(t) cred := Credential{} - require.NoError(cred.Verify()) + require.NoError(t, cred.Verify()) } func TestCredentialVerifyNil(t *testing.T) { - require := require.New(t) cred := (*Credential)(nil) - require.ErrorIs(cred.Verify(), ErrNilCredential) + err := cred.Verify() + require.ErrorIs(t, err, ErrNilCredential) } func TestCredentialSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) @@ -90,8 +90,7 @@ func TestCredentialSerialize(t *testing.T) { } func TestCredentialNotState(t *testing.T) { - require := require.New(t) intf := interface{}(&Credential{}) _, ok := intf.(verify.State) - require.False(ok) + require.False(t, ok) } diff --git a/avalanchego/vms/secp256k1fx/factory.go b/avalanchego/vms/secp256k1fx/factory.go index ae2463a1..9630795e 100644 --- a/avalanchego/vms/secp256k1fx/factory.go +++ b/avalanchego/vms/secp256k1fx/factory.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "secp256k1fx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/avalanchego/vms/secp256k1fx/factory_test.go b/avalanchego/vms/secp256k1fx/factory_test.go index 43516499..d7653d36 100644 --- a/avalanchego/vms/secp256k1fx/factory_test.go +++ b/avalanchego/vms/secp256k1fx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -7,14 +7,10 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/avalanchego/vms/secp256k1fx/fx.go b/avalanchego/vms/secp256k1fx/fx.go index 15e10617..e0da595d 100644 --- a/avalanchego/vms/secp256k1fx/fx.go +++ b/avalanchego/vms/secp256k1fx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -10,9 +10,9 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/coreth/accounts" ) @@ -42,8 +42,9 @@ var ( // Fx describes the secp256k1 feature extension type Fx struct { + secp256k1.RecoverCache + VM VM - SECPFactory secp256k1.Factory bootstrapped bool } @@ -55,21 +56,19 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log := fx.VM.Logger() log.Debug("initializing secp256k1 fx") - fx.SECPFactory = secp256k1.Factory{ - Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + fx.RecoverCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ Size: defaultCacheSize, }, } c := fx.VM.CodecRegistry() - errs := wrappers.Errs{} - errs.Add( + return utils.Err( c.RegisterType(&TransferInput{}), c.RegisterType(&MintOutput{}), c.RegisterType(&TransferOutput{}), c.RegisterType(&MintOperation{}), c.RegisterType(&Credential{}), ) - return errs.Err } func (fx *Fx) InitializeVM(vmIntf interface{}) error { @@ -206,27 +205,27 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out } // Make sure each signature in the signature list is from an owner of // the output being consumed - sig := cred.Sigs[i][:] + sig := cred.Sigs[i] + pk, err := fx.RecoverPublicKeyFromHash(txHash, sig[:]) + if err != nil { + return err + } expectedAddress := out.Addrs[index] // Try to recover the address from the signature of the transaction hash without a prefix // (Standard Avalanche approach, but unsupported/deprecated by most signing tools) - recoveredAddress, err := fx.recoverAddress(txHash, sig) - if err != nil { - return err - } - if recoveredAddress == expectedAddress { + if expectedAddress == pk.Address() { continue } // Try to recover the address from the signature of the prefixed message hash // (with the standard Ethereum prefix, see accounts.TextHash) if isEthVerificationEnabled { - recoveredAddress, err = fx.recoverAddress(txHashEth, sig) + pk, err := fx.RecoverPublicKeyFromHash(txHashEth, sig[:]) if err != nil { return err } - if recoveredAddress == expectedAddress { + if expectedAddress == pk.Address() { continue } } @@ -252,11 +251,3 @@ func (*Fx) CreateOutput(amount uint64, ownerIntf interface{}) (interface{}, erro OutputOwners: *owner, }, nil } - -func (fx *Fx) recoverAddress(txHash []byte, sig []byte) (ids.ShortID, error) { - pk, err := fx.SECPFactory.RecoverHashPublicKey(txHash, sig) - if err != nil { - return ids.ShortEmpty, err - } - return pk.Address(), nil -} diff --git a/avalanchego/vms/secp256k1fx/fx_test.go b/avalanchego/vms/secp256k1fx/fx_test.go index cc48fb64..dcdf7838 100644 --- a/avalanchego/vms/secp256k1fx/fx_test.go +++ b/avalanchego/vms/secp256k1fx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -52,25 +52,24 @@ func init() { } func TestFxInitialize(t *testing.T) { - require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} - require.NoError(fx.Initialize(&vm)) + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { - require := require.New(t) fx := Fx{} - require.ErrorIs(fx.Initialize(nil), ErrWrongVMType) + err := fx.Initialize(nil) + require.ErrorIs(t, err, ErrWrongVMType) } func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -108,7 +107,7 @@ func TestFxVerifyTransfer(t *testing.T) { func TestFxVerifyTransferNilTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -137,13 +136,14 @@ func TestFxVerifyTransferNilTx(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(nil, in, cred, out), ErrWrongTxType) + err := fx.VerifyTransfer(nil, in, cred, out) + require.ErrorIs(err, ErrWrongTxType) } func TestFxVerifyTransferNilOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -163,13 +163,14 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, nil), ErrWrongUTXOType) + err := fx.VerifyTransfer(tx, in, cred, nil) + require.ErrorIs(err, ErrWrongUTXOType) } func TestFxVerifyTransferNilInput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -193,13 +194,14 @@ func TestFxVerifyTransferNilInput(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, nil, cred, out), ErrWrongInputType) + err := fx.VerifyTransfer(tx, nil, cred, out) + require.ErrorIs(err, ErrWrongInputType) } func TestFxVerifyTransferNilCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -224,13 +226,14 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, nil, out), ErrWrongCredentialType) + err := fx.VerifyTransfer(tx, in, nil, out) + require.ErrorIs(err, ErrWrongCredentialType) } func TestFxVerifyTransferInvalidOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -260,13 +263,14 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), errOutputUnoptimized) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrOutputUnoptimized) } func TestFxVerifyTransferWrongAmounts(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -296,13 +300,14 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { }, } - require.Error(fx.VerifyTransfer(tx, in, cred, out)) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrMismatchedAmounts) } func TestFxVerifyTransferTimelocked(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -332,13 +337,14 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTimelocked) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrTimelocked) } func TestFxVerifyTransferTooManySigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -369,13 +375,14 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTooManySigners) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrTooManySigners) } func TestFxVerifyTransferTooFewSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -403,13 +410,14 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { Sigs: [][secp256k1.SignatureLen]byte{}, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTooFewSigners) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrTooFewSigners) } func TestFxVerifyTransferMismatchedSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -440,13 +448,14 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrInputCredentialSignersMismatch) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrInputCredentialSignersMismatch) } func TestFxVerifyTransferInvalidSignature(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -479,13 +488,14 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { require.NoError(fx.VerifyTransfer(tx, in, cred, out)) require.NoError(fx.Bootstrapped()) - require.Error(fx.VerifyTransfer(tx, in, cred, out), errAddrsNotSortedUnique) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, secp256k1.ErrInvalidSig) } func TestFxVerifyTransferWrongSigner(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -518,13 +528,14 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { require.NoError(fx.VerifyTransfer(tx, in, cred, out)) require.NoError(fx.Bootstrapped()) - require.Error(fx.VerifyTransfer(tx, in, cred, out)) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrWrongSig) } func TestFxVerifyTransferSigIndexOOB(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -557,13 +568,14 @@ func TestFxVerifyTransferSigIndexOOB(t *testing.T) { require.NoError(fx.VerifyTransfer(tx, in, cred, out)) require.NoError(fx.Bootstrapped()) - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrInputOutputIndexOutOfBounds) + err := fx.VerifyTransfer(tx, in, cred, out) + require.ErrorIs(err, ErrInputOutputIndexOutOfBounds) } func TestFxVerifyOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -615,7 +627,7 @@ func TestFxVerifyOperation(t *testing.T) { func TestFxVerifyOperationUnknownTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -660,13 +672,14 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(nil, op, cred, utxos), ErrWrongTxType) + err := fx.VerifyOperation(nil, op, cred, utxos) + require.ErrorIs(err, ErrWrongTxType) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -689,13 +702,14 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, nil, cred, utxos), ErrWrongOpType) + err := fx.VerifyOperation(tx, nil, cred, utxos) + require.ErrorIs(err, ErrWrongOpType) } func TestFxVerifyOperationUnknownCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -736,13 +750,14 @@ func TestFxVerifyOperationUnknownCredential(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, nil, utxos), ErrWrongCredentialType) + err := fx.VerifyOperation(tx, op, nil, utxos) + require.ErrorIs(err, ErrWrongCredentialType) } func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -788,13 +803,14 @@ func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { } utxos := []interface{}{utxo, utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongNumberOfUTXOs) + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, ErrWrongNumberOfUTXOs) } func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -832,13 +848,14 @@ func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { } utxos := []interface{}{nil} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongUTXOType) + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, ErrWrongUTXOType) } func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -881,13 +898,14 @@ func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), errOutputUnspendable) + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, ErrOutputUnspendable) } func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -928,12 +946,13 @@ func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongMintCreated) + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, ErrWrongMintCreated) } func TestVerifyPermission(t *testing.T) { vm := TestVM{ - Codec: linearcodec.NewDefault(), + Codec: linearcodec.NewDefault(time.Time{}), Log: logging.NoLog{}, } fx := Fx{} @@ -962,7 +981,7 @@ func TestVerifyPermission(t *testing.T) { Threshold: 0, Addrs: []ids.ShortID{addr}, }, - errOutputUnoptimized, + ErrOutputUnoptimized, }, { "threshold 0, no sigs, no addrs", @@ -995,7 +1014,7 @@ func TestVerifyPermission(t *testing.T) { Threshold: 0, Addrs: []ids.ShortID{addr}, }, - errOutputUnoptimized, + ErrOutputUnoptimized, }, { "threshold 1, 0 sigs (too few sigs)", @@ -1028,7 +1047,7 @@ func TestVerifyPermission(t *testing.T) { Threshold: 2, Addrs: []ids.ShortID{addr, addr2}, }, - errNotSortedUnique, + ErrInputIndicesNotSortedUnique, }, { "threshold 2, repeated address and repeated sig", @@ -1039,7 +1058,7 @@ func TestVerifyPermission(t *testing.T) { Threshold: 2, Addrs: []ids.ShortID{addr, addr}, }, - errAddrsNotSortedUnique, + ErrAddrsNotSortedUnique, }, { "threshold 2, 2 sigs", @@ -1061,7 +1080,7 @@ func TestVerifyPermission(t *testing.T) { Threshold: 2, Addrs: []ids.ShortID{addr, addr2}, }, - errNotSortedUnique, + ErrInputIndicesNotSortedUnique, }, { "threshold 1, 1 sig, index out of bounds", diff --git a/avalanchego/vms/secp256k1fx/input.go b/avalanchego/vms/secp256k1fx/input.go index 5659727c..7e11556b 100644 --- a/avalanchego/vms/secp256k1fx/input.go +++ b/avalanchego/vms/secp256k1fx/input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -15,8 +15,8 @@ const ( ) var ( - errNilInput = errors.New("nil input") - errNotSortedUnique = errors.New("signatures not sorted and unique") + ErrNilInput = errors.New("nil input") + ErrInputIndicesNotSortedUnique = errors.New("address indices not sorted and unique") ) type Input struct { @@ -35,9 +35,9 @@ func (in *Input) Cost() (uint64, error) { func (in *Input) Verify() error { switch { case in == nil: - return errNilInput + return ErrNilInput case !utils.IsSortedAndUniqueOrdered(in.SigIndices): - return errNotSortedUnique + return ErrInputIndicesNotSortedUnique default: return nil } diff --git a/avalanchego/vms/secp256k1fx/input_test.go b/avalanchego/vms/secp256k1fx/input_test.go index 72088530..f80b824d 100644 --- a/avalanchego/vms/secp256k1fx/input_test.go +++ b/avalanchego/vms/secp256k1fx/input_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -18,17 +18,17 @@ func TestInputVerifyNil(t *testing.T) { { name: "nil input", in: nil, - expectedErr: errNilInput, + expectedErr: ErrNilInput, }, { name: "not sorted", in: &Input{SigIndices: []uint32{2, 1}}, - expectedErr: errNotSortedUnique, + expectedErr: ErrInputIndicesNotSortedUnique, }, { name: "not unique", in: &Input{SigIndices: []uint32{2, 2}}, - expectedErr: errNotSortedUnique, + expectedErr: ErrInputIndicesNotSortedUnique, }, { name: "passes verification", @@ -39,8 +39,8 @@ func TestInputVerifyNil(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.ErrorIs(tt.in.Verify(), tt.expectedErr) + err := tt.in.Verify() + require.ErrorIs(t, err, tt.expectedErr) }) } } diff --git a/avalanchego/vms/secp256k1fx/keychain.go b/avalanchego/vms/secp256k1fx/keychain.go index b09c29eb..ecb42f20 100644 --- a/avalanchego/vms/secp256k1fx/keychain.go +++ b/avalanchego/vms/secp256k1fx/keychain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -8,6 +8,9 @@ import ( "fmt" "strings" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -24,19 +27,21 @@ var ( // Keychain is a collection of keys that can be used to spend outputs type Keychain struct { - factory *secp256k1.Factory - addrToKeyIndex map[ids.ShortID]int - - // These can be used to iterate over. However, they should not be modified externally. - Addrs set.Set[ids.ShortID] - Keys []*secp256k1.PrivateKey + avaxAddrToKeyIndex map[ids.ShortID]int + ethAddrToKeyIndex map[common.Address]int + + // These can be used to iterate over. However, they should not be modified + // externally. + Addrs set.Set[ids.ShortID] + EthAddrs set.Set[common.Address] + Keys []*secp256k1.PrivateKey } // NewKeychain returns a new keychain containing [keys] func NewKeychain(keys ...*secp256k1.PrivateKey) *Keychain { kc := &Keychain{ - factory: &secp256k1.Factory{}, - addrToKeyIndex: make(map[ids.ShortID]int), + avaxAddrToKeyIndex: make(map[ids.ShortID]int), + ethAddrToKeyIndex: make(map[common.Address]int), } for _, key := range keys { kc.Add(key) @@ -46,28 +51,44 @@ func NewKeychain(keys ...*secp256k1.PrivateKey) *Keychain { // Add a new key to the key chain func (kc *Keychain) Add(key *secp256k1.PrivateKey) { - addr := key.PublicKey().Address() - if _, ok := kc.addrToKeyIndex[addr]; !ok { - kc.addrToKeyIndex[addr] = len(kc.Keys) + pk := key.PublicKey() + avaxAddr := pk.Address() + if _, ok := kc.avaxAddrToKeyIndex[avaxAddr]; !ok { + kc.avaxAddrToKeyIndex[avaxAddr] = len(kc.Keys) + ethAddr := publicKeyToEthAddress(pk) + kc.ethAddrToKeyIndex[ethAddr] = len(kc.Keys) kc.Keys = append(kc.Keys, key) - kc.Addrs.Add(addr) + kc.Addrs.Add(avaxAddr) + kc.EthAddrs.Add(ethAddr) } } -// Get a key from the keychain. If the key is unknown, return a pointer to an empty key. -// In both cases also return a boolean telling whether the key is known. +// Get a key from the keychain and return whether the key existed. func (kc Keychain) Get(id ids.ShortID) (keychain.Signer, bool) { return kc.get(id) } +// Get a key from the keychain and return whether the key existed. +func (kc Keychain) GetEth(addr common.Address) (keychain.Signer, bool) { + if i, ok := kc.ethAddrToKeyIndex[addr]; ok { + return kc.Keys[i], true + } + return nil, false +} + // Addresses returns a list of addresses this keychain manages func (kc Keychain) Addresses() set.Set[ids.ShortID] { return kc.Addrs } +// EthAddresses returns a list of addresses this keychain manages +func (kc Keychain) EthAddresses() set.Set[common.Address] { + return kc.EthAddrs +} + // New returns a newly generated private key func (kc *Keychain) New() (*secp256k1.PrivateKey, error) { - sk, err := kc.factory.NewPrivateKey() + sk, err := secp256k1.NewPrivateKey() if err != nil { return nil, err } @@ -143,8 +164,12 @@ func (kc *Keychain) String() string { // to avoid internals type assertions func (kc Keychain) get(id ids.ShortID) (*secp256k1.PrivateKey, bool) { - if i, ok := kc.addrToKeyIndex[id]; ok { + if i, ok := kc.avaxAddrToKeyIndex[id]; ok { return kc.Keys[i], true } return nil, false } + +func publicKeyToEthAddress(pk *secp256k1.PublicKey) common.Address { + return crypto.PubkeyToAddress(*(pk.ToECDSA())) +} diff --git a/avalanchego/vms/secp256k1fx/keychain_test.go b/avalanchego/vms/secp256k1fx/keychain_test.go index 56a3a592..65dd984b 100644 --- a/avalanchego/vms/secp256k1fx/keychain_test.go +++ b/avalanchego/vms/secp256k1fx/keychain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -27,8 +27,7 @@ var ( ) func TestNewKeychain(t *testing.T) { - require := require.New(t) - require.NotNil(NewKeychain()) + require.NotNil(t, NewKeychain()) } func TestKeychainGetUnknownAddr(t *testing.T) { @@ -47,15 +46,15 @@ func TestKeychainAdd(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) addr, _ := ids.ShortFromString(addrs[0]) rsk, exists := kc.Get(addr) require.True(exists) - rsksecp, ok := rsk.(*secp256k1.PrivateKey) - require.True(ok, "Factory should have returned secp256k1r private key") + require.IsType(&secp256k1.PrivateKey{}, rsk) + rsksecp := rsk.(*secp256k1.PrivateKey) require.Equal(sk.Bytes(), rsksecp.Bytes()) addrs := kc.Addresses() @@ -67,7 +66,7 @@ func TestKeychainNew(t *testing.T) { require := require.New(t) kc := NewKeychain() - require.Equal(0, kc.Addresses().Len()) + require.Zero(kc.Addresses().Len()) sk, err := kc.New() require.NoError(err) @@ -88,7 +87,7 @@ func TestKeychainMatch(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -133,7 +132,7 @@ func TestKeychainSpendMint(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -157,8 +156,8 @@ func TestKeychainSpendMint(t *testing.T) { vinput, keys, err := kc.Spend(&mint, 0) require.NoError(err) - input, ok := vinput.(*Input) - require.True(ok) + require.IsType(&Input{}, vinput) + input := vinput.(*Input) require.NoError(input.Verify()) require.Equal([]uint32{0, 1}, input.SigIndices) require.Len(keys, 2) @@ -175,7 +174,7 @@ func TestKeychainSpendTransfer(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) sks = append(sks, sk) } @@ -206,8 +205,8 @@ func TestKeychainSpendTransfer(t *testing.T) { vinput, keys, err := kc.Spend(&transfer, 54321) require.NoError(err) - input, ok := vinput.(*TransferInput) - require.True(ok) + require.IsType(&TransferInput{}, vinput) + input := vinput.(*TransferInput) require.NoError(input.Verify()) require.Equal(uint64(12345), input.Amount()) require.Equal([]uint32{0, 1}, input.SigIndices) @@ -223,7 +222,7 @@ func TestKeychainString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) @@ -238,7 +237,7 @@ func TestKeychainPrefixedString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - sk, err := kc.factory.ToPrivateKey(skBytes) + sk, err := secp256k1.ToPrivateKey(skBytes) require.NoError(err) kc.Add(sk) diff --git a/avalanchego/vms/secp256k1fx/mint_operation.go b/avalanchego/vms/secp256k1fx/mint_operation.go index a21f3061..80728ca7 100644 --- a/avalanchego/vms/secp256k1fx/mint_operation.go +++ b/avalanchego/vms/secp256k1fx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/avalanchego/vms/secp256k1fx/mint_operation_test.go b/avalanchego/vms/secp256k1fx/mint_operation_test.go index 9b68b1c8..3b751c8d 100644 --- a/avalanchego/vms/secp256k1fx/mint_operation_test.go +++ b/avalanchego/vms/secp256k1fx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -49,7 +49,7 @@ func TestMintOperationVerify(t *testing.T) { MintOutput: validMintOutput, TransferOutput: validTransferOutput, }, - expectedErr: errNotSortedUnique, + expectedErr: ErrInputIndicesNotSortedUnique, }, { name: "invalid mint output", @@ -63,7 +63,7 @@ func TestMintOperationVerify(t *testing.T) { }, TransferOutput: validTransferOutput, }, - expectedErr: errOutputUnspendable, + expectedErr: ErrOutputUnspendable, }, { name: "invalid transfer output", @@ -78,7 +78,7 @@ func TestMintOperationVerify(t *testing.T) { }, }, }, - expectedErr: errOutputUnoptimized, + expectedErr: ErrOutputUnoptimized, }, { name: "addresses not unique", @@ -93,7 +93,7 @@ func TestMintOperationVerify(t *testing.T) { }, }, }, - expectedErr: errAddrsNotSortedUnique, + expectedErr: ErrAddrsNotSortedUnique, }, { name: "addresses not sorted", @@ -108,7 +108,7 @@ func TestMintOperationVerify(t *testing.T) { }, }, }, - expectedErr: errAddrsNotSortedUnique, + expectedErr: ErrAddrsNotSortedUnique, }, { name: "passes verification", @@ -123,8 +123,8 @@ func TestMintOperationVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.ErrorIs(tt.op.Verify(), tt.expectedErr) + err := tt.op.Verify() + require.ErrorIs(t, err, tt.expectedErr) }) } } diff --git a/avalanchego/vms/secp256k1fx/mint_output.go b/avalanchego/vms/secp256k1fx/mint_output.go index 7655fc5c..e52ba470 100644 --- a/avalanchego/vms/secp256k1fx/mint_output.go +++ b/avalanchego/vms/secp256k1fx/mint_output.go @@ -1,27 +1,23 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx -import ( - "github.com/ava-labs/avalanchego/vms/components/verify" -) +import "github.com/ava-labs/avalanchego/vms/components/verify" var _ verify.State = (*MintOutput)(nil) type MintOutput struct { + verify.IsState `json:"-"` + OutputOwners `serialize:"true"` } func (out *MintOutput) Verify() error { switch { case out == nil: - return errNilOutput + return ErrNilOutput default: return out.OutputOwners.Verify() } } - -func (out *MintOutput) VerifyState() error { - return out.Verify() -} diff --git a/avalanchego/vms/secp256k1fx/mint_output_test.go b/avalanchego/vms/secp256k1fx/mint_output_test.go index 7bd55cc0..60a72dfc 100644 --- a/avalanchego/vms/secp256k1fx/mint_output_test.go +++ b/avalanchego/vms/secp256k1fx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -20,7 +20,7 @@ func TestMintOutputVerify(t *testing.T) { { name: "nil", out: nil, - expectedErr: errNilOutput, + expectedErr: ErrNilOutput, }, { name: "invalid output owners", @@ -30,7 +30,7 @@ func TestMintOutputVerify(t *testing.T) { Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, }, - expectedErr: errOutputUnspendable, + expectedErr: ErrOutputUnspendable, }, { name: "passes verification", @@ -46,8 +46,9 @@ func TestMintOutputVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.ErrorIs(t, tt.out.Verify(), tt.expectedErr) - require.ErrorIs(t, tt.out.VerifyState(), tt.expectedErr) + require := require.New(t) + err := tt.out.Verify() + require.ErrorIs(err, tt.expectedErr) }) } } diff --git a/avalanchego/vms/secp256k1fx/output_owners.go b/avalanchego/vms/secp256k1fx/output_owners.go index eb601eb3..0f838c69 100644 --- a/avalanchego/vms/secp256k1fx/output_owners.go +++ b/avalanchego/vms/secp256k1fx/output_owners.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -17,16 +17,15 @@ import ( ) var ( - errNilOutput = errors.New("nil output") - errOutputUnspendable = errors.New("output is unspendable") - errOutputUnoptimized = errors.New("output representation should be optimized") - errAddrsNotSortedUnique = errors.New("addresses not sorted and unique") - errMarshal = errors.New("cannot marshal without ctx") - - _ verify.State = (*OutputOwners)(nil) + ErrNilOutput = errors.New("nil output") + ErrOutputUnspendable = errors.New("output is unspendable") + ErrOutputUnoptimized = errors.New("output representation should be optimized") + ErrAddrsNotSortedUnique = errors.New("addresses not sorted and unique") ) type OutputOwners struct { + verify.IsNotState `json:"-"` + Locktime uint64 `serialize:"true" json:"locktime"` Threshold uint32 `serialize:"true" json:"threshold"` Addrs []ids.ShortID `serialize:"true" json:"addresses"` @@ -37,8 +36,8 @@ type OutputOwners struct { ctx *snow.Context } -// InitCtx assigns the OutputOwners.ctx object to given [ctx] object -// Must be called at least once for MarshalJSON to work successfully +// InitCtx allows addresses to be formatted into their human readable format +// during json marshalling. func (out *OutputOwners) InitCtx(ctx *snow.Context) { out.ctx = ctx } @@ -57,16 +56,9 @@ func (out *OutputOwners) MarshalJSON() ([]byte, error) { } // Fields returns JSON keys in a map that can be used with marshal JSON -// to serialise OutputOwners struct +// to serialize OutputOwners struct func (out *OutputOwners) Fields() (map[string]interface{}, error) { - addrsLen := len(out.Addrs) - - // we need out.ctx to do this, if its absent, throw error - if addrsLen > 0 && out.ctx == nil { - return nil, errMarshal - } - - addresses := make([]string, addrsLen) + addresses := make([]string, len(out.Addrs)) for i, addr := range out.Addrs { // for each [addr] in [Addrs] we attempt to format it given // the [out.ctx] object @@ -98,9 +90,7 @@ func (out *OutputOwners) Addresses() [][]byte { // AddressesSet returns addresses as a set func (out *OutputOwners) AddressesSet() set.Set[ids.ShortID] { - set := set.NewSet[ids.ShortID](len(out.Addrs)) - set.Add(out.Addrs...) - return set + return set.Of(out.Addrs...) } // Equals returns true if the provided owners create the same condition @@ -123,29 +113,30 @@ func (out *OutputOwners) Equals(other *OutputOwners) bool { func (out *OutputOwners) Verify() error { switch { case out == nil: - return errNilOutput + return ErrNilOutput case out.Threshold > uint32(len(out.Addrs)): - return errOutputUnspendable + return ErrOutputUnspendable case out.Threshold == 0 && len(out.Addrs) > 0: - return errOutputUnoptimized - case !utils.IsSortedAndUniqueSortable(out.Addrs): - return errAddrsNotSortedUnique + return ErrOutputUnoptimized + case !utils.IsSortedAndUnique(out.Addrs): + return ErrAddrsNotSortedUnique default: return nil } } -func (out *OutputOwners) VerifyState() error { - return out.Verify() -} - func (out *OutputOwners) Sort() { utils.Sort(out.Addrs) } // formatAddress formats a given [addr] into human readable format using -// [ChainID] and [NetworkID] from the provided [ctx]. +// [ChainID] and [NetworkID] if a non-nil [ctx] is provided. If [ctx] is not +// provided, the address will be returned in cb58 format. func formatAddress(ctx *snow.Context, addr ids.ShortID) (string, error) { + if ctx == nil { + return addr.String(), nil + } + chainIDAlias, err := ctx.BCLookup.PrimaryAlias(ctx.ChainID) if err != nil { return "", err diff --git a/avalanchego/vms/secp256k1fx/output_owners_test.go b/avalanchego/vms/secp256k1fx/output_owners_test.go index db8e8436..02b41f68 100644 --- a/avalanchego/vms/secp256k1fx/output_owners_test.go +++ b/avalanchego/vms/secp256k1fx/output_owners_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -20,7 +20,7 @@ func TestOutputOwnersVerify(t *testing.T) { { name: "nil", out: nil, - expectedErr: errNilOutput, + expectedErr: ErrNilOutput, }, { name: "threshold > num addrs", @@ -28,7 +28,7 @@ func TestOutputOwnersVerify(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{}, }, - expectedErr: errOutputUnspendable, + expectedErr: ErrOutputUnspendable, }, { name: "unoptimized", @@ -36,7 +36,7 @@ func TestOutputOwnersVerify(t *testing.T) { Threshold: 0, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - expectedErr: errOutputUnoptimized, + expectedErr: ErrOutputUnoptimized, }, { name: "not sorted", @@ -44,7 +44,7 @@ func TestOutputOwnersVerify(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{{2}, {1}}, }, - expectedErr: errAddrsNotSortedUnique, + expectedErr: ErrAddrsNotSortedUnique, }, { name: "not unique", @@ -52,7 +52,7 @@ func TestOutputOwnersVerify(t *testing.T) { Threshold: 1, Addrs: []ids.ShortID{{2}, {2}}, }, - expectedErr: errAddrsNotSortedUnique, + expectedErr: ErrAddrsNotSortedUnique, }, { name: "passes verification", @@ -67,8 +67,8 @@ func TestOutputOwnersVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - require.ErrorIs(tt.out.Verify(), tt.expectedErr) - require.ErrorIs(tt.out.VerifyState(), tt.expectedErr) + err := tt.out.Verify() + require.ErrorIs(err, tt.expectedErr) }) } } @@ -149,31 +149,19 @@ func TestOutputOwnerEquals(t *testing.T) { } } -func TestMarshalJSONRequiresCtxWhenAddrsArePresent(t *testing.T) { +func TestMarshalJSONDoesNotRequireCtx(t *testing.T) { require := require.New(t) out := &OutputOwners{ Threshold: 1, + Locktime: 2, Addrs: []ids.ShortID{ {1}, {0}, }, } - _, err := out.MarshalJSON() - require.ErrorIs(err, errMarshal) -} - -func TestMarshalJSONDoesNotRequireCtxWhenAddrsAreAbsent(t *testing.T) { - require := require.New(t) - out := &OutputOwners{ - Threshold: 1, - Locktime: 2, - Addrs: []ids.ShortID{}, - } - b, err := out.MarshalJSON() require.NoError(err) - jsonData := string(b) - require.Equal(jsonData, "{\"addresses\":[],\"locktime\":2,\"threshold\":1}") + require.Equal(`{"addresses":["6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt","111111111111111111116DBWJs"],"locktime":2,"threshold":1}`, string(b)) } diff --git a/avalanchego/vms/secp256k1fx/transfer_input.go b/avalanchego/vms/secp256k1fx/transfer_input.go index 2fb69c64..1659820c 100644 --- a/avalanchego/vms/secp256k1fx/transfer_input.go +++ b/avalanchego/vms/secp256k1fx/transfer_input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -27,7 +27,7 @@ func (in *TransferInput) Amount() uint64 { func (in *TransferInput) Verify() error { switch { case in == nil: - return errNilInput + return ErrNilInput case in.Amt == 0: return ErrNoValueInput default: diff --git a/avalanchego/vms/secp256k1fx/transfer_input_test.go b/avalanchego/vms/secp256k1fx/transfer_input_test.go index 96d2c40e..c155d848 100644 --- a/avalanchego/vms/secp256k1fx/transfer_input_test.go +++ b/avalanchego/vms/secp256k1fx/transfer_input_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -38,7 +39,8 @@ func TestTransferInputVerify(t *testing.T) { func TestTransferInputVerifyNil(t *testing.T) { require := require.New(t) in := (*TransferInput)(nil) - require.ErrorIs(in.Verify(), errNilInput) + err := in.Verify() + require.ErrorIs(err, ErrNilInput) } func TestTransferInputVerifyNoValue(t *testing.T) { @@ -49,7 +51,8 @@ func TestTransferInputVerifyNoValue(t *testing.T) { SigIndices: []uint32{0, 1}, }, } - require.ErrorIs(in.Verify(), ErrNoValueInput) + err := in.Verify() + require.ErrorIs(err, ErrNoValueInput) } func TestTransferInputVerifyDuplicated(t *testing.T) { @@ -60,7 +63,8 @@ func TestTransferInputVerifyDuplicated(t *testing.T) { SigIndices: []uint32{0, 0}, }, } - require.ErrorIs(in.Verify(), errNotSortedUnique) + err := in.Verify() + require.ErrorIs(err, ErrInputIndicesNotSortedUnique) } func TestTransferInputVerifyUnsorted(t *testing.T) { @@ -71,12 +75,13 @@ func TestTransferInputVerifyUnsorted(t *testing.T) { SigIndices: []uint32{1, 0}, }, } - require.ErrorIs(in.Verify(), errNotSortedUnique) + err := in.Verify() + require.ErrorIs(err, ErrInputIndicesNotSortedUnique) } func TestTransferInputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/avalanchego/vms/secp256k1fx/transfer_output.go b/avalanchego/vms/secp256k1fx/transfer_output.go index 285ffef5..ee4c5796 100644 --- a/avalanchego/vms/secp256k1fx/transfer_output.go +++ b/avalanchego/vms/secp256k1fx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -11,12 +11,14 @@ import ( ) var ( - _ verify.State = (*OutputOwners)(nil) + _ verify.State = (*TransferOutput)(nil) ErrNoValueOutput = errors.New("output has no value") ) type TransferOutput struct { + verify.IsState `json:"-"` + Amt uint64 `serialize:"true" json:"amount"` OutputOwners `serialize:"true"` @@ -43,7 +45,7 @@ func (out *TransferOutput) Amount() uint64 { func (out *TransferOutput) Verify() error { switch { case out == nil: - return errNilOutput + return ErrNilOutput case out.Amt == 0: return ErrNoValueOutput default: @@ -51,10 +53,6 @@ func (out *TransferOutput) Verify() error { } } -func (out *TransferOutput) VerifyState() error { - return out.Verify() -} - func (out *TransferOutput) Owners() interface{} { return &out.OutputOwners } diff --git a/avalanchego/vms/secp256k1fx/transfer_output_test.go b/avalanchego/vms/secp256k1fx/transfer_output_test.go index 08bfd173..864fb85b 100644 --- a/avalanchego/vms/secp256k1fx/transfer_output_test.go +++ b/avalanchego/vms/secp256k1fx/transfer_output_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -47,7 +48,8 @@ func TestOutputVerify(t *testing.T) { func TestOutputVerifyNil(t *testing.T) { require := require.New(t) out := (*TransferOutput)(nil) - require.ErrorIs(out.Verify(), errNilOutput) + err := out.Verify() + require.ErrorIs(err, ErrNilOutput) } func TestOutputVerifyNoValue(t *testing.T) { @@ -62,7 +64,8 @@ func TestOutputVerifyNoValue(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), ErrNoValueOutput) + err := out.Verify() + require.ErrorIs(err, ErrNoValueOutput) } func TestOutputVerifyUnspendable(t *testing.T) { @@ -77,7 +80,8 @@ func TestOutputVerifyUnspendable(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), errOutputUnspendable) + err := out.Verify() + require.ErrorIs(err, ErrOutputUnspendable) } func TestOutputVerifyUnoptimized(t *testing.T) { @@ -92,7 +96,8 @@ func TestOutputVerifyUnoptimized(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), errOutputUnoptimized) + err := out.Verify() + require.ErrorIs(err, ErrOutputUnoptimized) } func TestOutputVerifyUnsorted(t *testing.T) { @@ -108,7 +113,8 @@ func TestOutputVerifyUnsorted(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), errAddrsNotSortedUnique) + err := out.Verify() + require.ErrorIs(err, ErrAddrsNotSortedUnique) } func TestOutputVerifyDuplicated(t *testing.T) { @@ -124,12 +130,13 @@ func TestOutputVerifyDuplicated(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), errAddrsNotSortedUnique) + err := out.Verify() + require.ErrorIs(err, ErrAddrsNotSortedUnique) } func TestOutputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/avalanchego/vms/secp256k1fx/tx.go b/avalanchego/vms/secp256k1fx/tx.go index 81f4ee4d..5cc483c7 100644 --- a/avalanchego/vms/secp256k1fx/tx.go +++ b/avalanchego/vms/secp256k1fx/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/avalanchego/vms/secp256k1fx/vm.go b/avalanchego/vms/secp256k1fx/vm.go index 9fe16d58..1f3cb165 100644 --- a/avalanchego/vms/secp256k1fx/vm.go +++ b/avalanchego/vms/secp256k1fx/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/avalanchego/vms/tracedvm/batched_vm.go b/avalanchego/vms/tracedvm/batched_vm.go index 47f81c9f..c4a37747 100644 --- a/avalanchego/vms/tracedvm/batched_vm.go +++ b/avalanchego/vms/tracedvm/batched_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -9,11 +9,11 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + oteltrace "go.opentelemetry.io/otel/trace" ) func (vm *blockVM) GetAncestors( diff --git a/avalanchego/vms/tracedvm/block.go b/avalanchego/vms/tracedvm/block.go index a90a1103..e00cc359 100644 --- a/avalanchego/vms/tracedvm/block.go +++ b/avalanchego/vms/tracedvm/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -10,10 +10,10 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + oteltrace "go.opentelemetry.io/otel/trace" ) var ( diff --git a/avalanchego/vms/tracedvm/block_vm.go b/avalanchego/vms/tracedvm/block_vm.go index 1092c252..b32b3bcd 100644 --- a/avalanchego/vms/tracedvm/block_vm.go +++ b/avalanchego/vms/tracedvm/block_vm.go @@ -1,30 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm import ( "context" - "fmt" "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var ( _ block.ChainVM = (*blockVM)(nil) _ block.BuildBlockWithContextChainVM = (*blockVM)(nil) _ block.BatchedChainVM = (*blockVM)(nil) - _ block.HeightIndexedChainVM = (*blockVM)(nil) _ block.StateSyncableVM = (*blockVM)(nil) ) @@ -32,7 +30,6 @@ type blockVM struct { block.ChainVM buildBlockVM block.BuildBlockWithContextChainVM batchedVM block.BatchedChainVM - hVM block.HeightIndexedChainVM ssVM block.StateSyncableVM // ChainVM tags initializeTag string @@ -67,36 +64,34 @@ type blockVM struct { func NewBlockVM(vm block.ChainVM, name string, tracer trace.Tracer) block.ChainVM { buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) - hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &blockVM{ ChainVM: vm, buildBlockVM: buildBlockVM, batchedVM: batchedVM, - hVM: hVM, ssVM: ssVM, - initializeTag: fmt.Sprintf("%s.initialize", name), - buildBlockTag: fmt.Sprintf("%s.buildBlock", name), - parseBlockTag: fmt.Sprintf("%s.parseBlock", name), - getBlockTag: fmt.Sprintf("%s.getBlock", name), - setPreferenceTag: fmt.Sprintf("%s.setPreference", name), - lastAcceptedTag: fmt.Sprintf("%s.lastAccepted", name), - verifyTag: fmt.Sprintf("%s.verify", name), - acceptTag: fmt.Sprintf("%s.accept", name), - rejectTag: fmt.Sprintf("%s.reject", name), - optionsTag: fmt.Sprintf("%s.options", name), - shouldVerifyWithContextTag: fmt.Sprintf("%s.shouldVerifyWithContext", name), - verifyWithContextTag: fmt.Sprintf("%s.verifyWithContext", name), - buildBlockWithContextTag: fmt.Sprintf("%s.buildBlockWithContext", name), - getAncestorsTag: fmt.Sprintf("%s.getAncestors", name), - batchedParseBlockTag: fmt.Sprintf("%s.batchedParseBlock", name), - verifyHeightIndexTag: fmt.Sprintf("%s.verifyHeightIndex", name), - getBlockIDAtHeightTag: fmt.Sprintf("%s.getBlockIDAtHeight", name), - stateSyncEnabledTag: fmt.Sprintf("%s.stateSyncEnabled", name), - getOngoingSyncStateSummaryTag: fmt.Sprintf("%s.getOngoingSyncStateSummary", name), - getLastStateSummaryTag: fmt.Sprintf("%s.getLastStateSummary", name), - parseStateSummaryTag: fmt.Sprintf("%s.parseStateSummary", name), - getStateSummaryTag: fmt.Sprintf("%s.getStateSummary", name), + initializeTag: name + ".initialize", + buildBlockTag: name + ".buildBlock", + parseBlockTag: name + ".parseBlock", + getBlockTag: name + ".getBlock", + setPreferenceTag: name + ".setPreference", + lastAcceptedTag: name + ".lastAccepted", + verifyTag: name + ".verify", + acceptTag: name + ".accept", + rejectTag: name + ".reject", + optionsTag: name + ".options", + shouldVerifyWithContextTag: name + ".shouldVerifyWithContext", + verifyWithContextTag: name + ".verifyWithContext", + buildBlockWithContextTag: name + ".buildBlockWithContext", + getAncestorsTag: name + ".getAncestors", + batchedParseBlockTag: name + ".batchedParseBlock", + verifyHeightIndexTag: name + ".verifyHeightIndex", + getBlockIDAtHeightTag: name + ".getBlockIDAtHeight", + stateSyncEnabledTag: name + ".stateSyncEnabled", + getOngoingSyncStateSummaryTag: name + ".getOngoingSyncStateSummary", + getLastStateSummaryTag: name + ".getLastStateSummary", + parseStateSummaryTag: name + ".parseStateSummary", + getStateSummaryTag: name + ".getStateSummary", tracer: tracer, } } @@ -104,7 +99,7 @@ func NewBlockVM(vm block.ChainVM, name string, tracer trace.Tracer) block.ChainV func (vm *blockVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, @@ -180,3 +175,19 @@ func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { return vm.ChainVM.LastAccepted(ctx) } + +func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { + ctx, span := vm.tracer.Start(ctx, vm.verifyHeightIndexTag) + defer span.End() + + return vm.ChainVM.VerifyHeightIndex(ctx) +} + +func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { + ctx, span := vm.tracer.Start(ctx, vm.getBlockIDAtHeightTag, oteltrace.WithAttributes( + attribute.Int64("height", int64(height)), + )) + defer span.End() + + return vm.ChainVM.GetBlockIDAtHeight(ctx, height) +} diff --git a/avalanchego/vms/tracedvm/build_block_with_context_vm.go b/avalanchego/vms/tracedvm/build_block_with_context_vm.go index 1d9e9319..e4af33bd 100644 --- a/avalanchego/vms/tracedvm/build_block_with_context_vm.go +++ b/avalanchego/vms/tracedvm/build_block_with_context_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -8,10 +8,10 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + oteltrace "go.opentelemetry.io/otel/trace" ) func (vm *blockVM) BuildBlockWithContext(ctx context.Context, blockCtx *block.Context) (snowman.Block, error) { diff --git a/avalanchego/vms/tracedvm/height_indexed_vm.go b/avalanchego/vms/tracedvm/height_indexed_vm.go deleted file mode 100644 index 4d240461..00000000 --- a/avalanchego/vms/tracedvm/height_indexed_vm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package tracedvm - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" -) - -func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { - if vm.hVM == nil { - return block.ErrHeightIndexedVMNotImplemented - } - - ctx, span := vm.tracer.Start(ctx, vm.verifyHeightIndexTag) - defer span.End() - - return vm.hVM.VerifyHeightIndex(ctx) -} - -func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { - if vm.hVM == nil { - return ids.Empty, block.ErrHeightIndexedVMNotImplemented - } - - ctx, span := vm.tracer.Start(ctx, vm.getBlockIDAtHeightTag, oteltrace.WithAttributes( - attribute.Int64("height", int64(height)), - )) - defer span.End() - - return vm.hVM.GetBlockIDAtHeight(ctx, height) -} diff --git a/avalanchego/vms/tracedvm/state_syncable_vm.go b/avalanchego/vms/tracedvm/state_syncable_vm.go index 75738462..4d3b6921 100644 --- a/avalanchego/vms/tracedvm/state_syncable_vm.go +++ b/avalanchego/vms/tracedvm/state_syncable_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -8,9 +8,9 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + oteltrace "go.opentelemetry.io/otel/trace" ) func (vm *blockVM) StateSyncEnabled(ctx context.Context) (bool, error) { diff --git a/avalanchego/vms/tracedvm/tx.go b/avalanchego/vms/tracedvm/tx.go index 7e18efcb..2f04e72d 100644 --- a/avalanchego/vms/tracedvm/tx.go +++ b/avalanchego/vms/tracedvm/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -8,10 +8,10 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ snowstorm.Tx = (*tracedTx)(nil) diff --git a/avalanchego/vms/tracedvm/vertex_vm.go b/avalanchego/vms/tracedvm/vertex_vm.go index 465e0077..c4cf1998 100644 --- a/avalanchego/vms/tracedvm/vertex_vm.go +++ b/avalanchego/vms/tracedvm/vertex_vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracedvm @@ -8,15 +8,14 @@ import ( "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" + + oteltrace "go.opentelemetry.io/otel/trace" ) var _ vertex.LinearizableVMWithEngine = (*vertexVM)(nil) @@ -36,7 +35,7 @@ func NewVertexVM(vm vertex.LinearizableVMWithEngine, tracer trace.Tracer) vertex func (vm *vertexVM) Initialize( ctx context.Context, chainCtx *snow.Context, - db manager.Manager, + db database.Database, genesisBytes, upgradeBytes, configBytes []byte, @@ -60,13 +59,6 @@ func (vm *vertexVM) Initialize( ) } -func (vm *vertexVM) PendingTxs(ctx context.Context) []snowstorm.Tx { - ctx, span := vm.tracer.Start(ctx, "vertexVM.PendingTxs") - defer span.End() - - return vm.LinearizableVMWithEngine.PendingTxs(ctx) -} - func (vm *vertexVM) ParseTx(ctx context.Context, txBytes []byte) (snowstorm.Tx, error) { ctx, span := vm.tracer.Start(ctx, "vertexVM.ParseTx", oteltrace.WithAttributes( attribute.Int("txLen", len(txBytes)), @@ -79,16 +71,3 @@ func (vm *vertexVM) ParseTx(ctx context.Context, txBytes []byte) (snowstorm.Tx, tracer: vm.tracer, }, err } - -func (vm *vertexVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { - ctx, span := vm.tracer.Start(ctx, "vertexVM.GetTx", oteltrace.WithAttributes( - attribute.Stringer("txID", txID), - )) - defer span.End() - - tx, err := vm.LinearizableVMWithEngine.GetTx(ctx, txID) - return &tracedTx{ - Tx: tx, - tracer: vm.tracer, - }, err -} diff --git a/avalanchego/vms/types/blob_data.go b/avalanchego/vms/types/blob_data.go index cf5855ad..cee4fe7b 100644 --- a/avalanchego/vms/types/blob_data.go +++ b/avalanchego/vms/types/blob_data.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package types diff --git a/avalanchego/wallet/chain/c/backend.go b/avalanchego/wallet/chain/c/backend.go new file mode 100644 index 00000000..3301015f --- /dev/null +++ b/avalanchego/wallet/chain/c/backend.go @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "errors" + "fmt" + "math/big" + "sync" + + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" + ethcommon "github.com/ethereum/go-ethereum/common" +) + +var ( + _ Backend = (*backend)(nil) + + errUnknownTxType = errors.New("unknown tx type") +) + +// Backend defines the full interface required to support a C-chain wallet. +type Backend interface { + common.ChainUTXOs + BuilderBackend + SignerBackend + + AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error +} + +type backend struct { + Context + common.ChainUTXOs + + accountsLock sync.RWMutex + accounts map[ethcommon.Address]*Account +} + +type Account struct { + Balance *big.Int + Nonce uint64 +} + +func NewBackend( + ctx Context, + utxos common.ChainUTXOs, + accounts map[ethcommon.Address]*Account, +) Backend { + return &backend{ + Context: ctx, + ChainUTXOs: utxos, + accounts: accounts, + } +} + +func (b *backend) AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error { + switch tx := tx.UnsignedAtomicTx.(type) { + case *evm.UnsignedImportTx: + for _, input := range tx.ImportedInputs { + utxoID := input.InputID() + if err := b.RemoveUTXO(ctx, tx.SourceChain, utxoID); err != nil { + return err + } + } + + b.accountsLock.Lock() + defer b.accountsLock.Unlock() + + for _, output := range tx.Outs { + account, ok := b.accounts[output.Address] + if !ok { + continue + } + + balance := new(big.Int).SetUint64(output.Amount) + balance.Mul(balance, avaxConversionRate) + account.Balance.Add(account.Balance, balance) + } + case *evm.UnsignedExportTx: + txID := tx.ID() + for i, out := range tx.ExportedOutputs { + err := b.AddUTXO( + ctx, + tx.DestinationChain, + &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(i), + }, + Asset: avax.Asset{ID: out.AssetID()}, + Out: out.Out, + }, + ) + if err != nil { + return err + } + } + + b.accountsLock.Lock() + defer b.accountsLock.Unlock() + + for _, input := range tx.Ins { + account, ok := b.accounts[input.Address] + if !ok { + continue + } + + balance := new(big.Int).SetUint64(input.Amount) + balance.Mul(balance, avaxConversionRate) + if account.Balance.Cmp(balance) == -1 { + return errInsufficientFunds + } + account.Balance.Sub(account.Balance, balance) + + newNonce, err := math.Add64(input.Nonce, 1) + if err != nil { + return err + } + account.Nonce = newNonce + } + default: + return fmt.Errorf("%w: %T", errUnknownTxType, tx) + } + return nil +} + +func (b *backend) Balance(_ stdcontext.Context, addr ethcommon.Address) (*big.Int, error) { + b.accountsLock.RLock() + defer b.accountsLock.RUnlock() + + account, exists := b.accounts[addr] + if !exists { + return nil, database.ErrNotFound + } + return account.Balance, nil +} + +func (b *backend) Nonce(_ stdcontext.Context, addr ethcommon.Address) (uint64, error) { + b.accountsLock.RLock() + defer b.accountsLock.RUnlock() + + account, exists := b.accounts[addr] + if !exists { + return 0, database.ErrNotFound + } + return account.Nonce, nil +} diff --git a/avalanchego/wallet/chain/c/builder.go b/avalanchego/wallet/chain/c/builder.go new file mode 100644 index 00000000..28e1eccc --- /dev/null +++ b/avalanchego/wallet/chain/c/builder.go @@ -0,0 +1,410 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "errors" + "math/big" + + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" + ethcommon "github.com/ethereum/go-ethereum/common" +) + +const avaxConversionRateInt = 1_000_000_000 + +var ( + _ Builder = (*builder)(nil) + + errInsufficientFunds = errors.New("insufficient funds") + + // avaxConversionRate is the conversion rate between the smallest + // denomination on the X-Chain and P-chain, 1 nAVAX, and the smallest + // denomination on the C-Chain 1 wei. Where 1 nAVAX = 1 gWei. + // + // This is only required for AVAX because the denomination of 1 AVAX is 9 + // decimal places on the X and P chains, but is 18 decimal places within the + // EVM. + avaxConversionRate = big.NewInt(avaxConversionRateInt) +) + +// Builder provides a convenient interface for building unsigned C-chain +// transactions. +type Builder interface { + // GetBalance calculates the amount of AVAX that this builder has control + // over. + GetBalance( + options ...common.Option, + ) (*big.Int, error) + + // GetImportableBalance calculates the amount of AVAX that this builder + // could import from the provided chain. + // + // - [chainID] specifies the chain the funds are from. + GetImportableBalance( + chainID ids.ID, + options ...common.Option, + ) (uint64, error) + + // NewImportTx creates an import transaction that attempts to consume all + // the available UTXOs and import the funds to [to]. + // + // - [chainID] specifies the chain to be importing funds from. + // - [to] specifies where to send the imported funds to. + // - [baseFee] specifies the fee price willing to be paid by this tx. + NewImportTx( + chainID ids.ID, + to ethcommon.Address, + baseFee *big.Int, + options ...common.Option, + ) (*evm.UnsignedImportTx, error) + + // NewExportTx creates an export transaction that attempts to send all the + // provided [outputs] to the requested [chainID]. + // + // - [chainID] specifies the chain to be exporting the funds to. + // - [outputs] specifies the outputs to send to the [chainID]. + // - [baseFee] specifies the fee price willing to be paid by this tx. + NewExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + baseFee *big.Int, + options ...common.Option, + ) (*evm.UnsignedExportTx, error) +} + +// BuilderBackend specifies the required information needed to build unsigned +// C-chain transactions. +type BuilderBackend interface { + Context + + UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) + Balance(ctx stdcontext.Context, addr ethcommon.Address) (*big.Int, error) + Nonce(ctx stdcontext.Context, addr ethcommon.Address) (uint64, error) +} + +type builder struct { + avaxAddrs set.Set[ids.ShortID] + ethAddrs set.Set[ethcommon.Address] + backend BuilderBackend +} + +// NewBuilder returns a new transaction builder. +// +// - [avaxAddrs] is the set of addresses in the AVAX format that the builder +// assumes can be used when signing the transactions in the future. +// - [ethAddrs] is the set of addresses in the Eth format that the builder +// assumes can be used when signing the transactions in the future. +// - [backend] provides the required access to the chain's context and state +// to build out the transactions. +func NewBuilder( + avaxAddrs set.Set[ids.ShortID], + ethAddrs set.Set[ethcommon.Address], + backend BuilderBackend, +) Builder { + return &builder{ + avaxAddrs: avaxAddrs, + ethAddrs: ethAddrs, + backend: backend, + } +} + +func (b *builder) GetBalance( + options ...common.Option, +) (*big.Int, error) { + var ( + ops = common.NewOptions(options) + ctx = ops.Context() + addrs = ops.EthAddresses(b.ethAddrs) + totalBalance = new(big.Int) + ) + for addr := range addrs { + balance, err := b.backend.Balance(ctx, addr) + if err != nil { + return nil, err + } + totalBalance.Add(totalBalance, balance) + } + + return totalBalance, nil +} + +func (b *builder) GetImportableBalance( + chainID ids.ID, + options ...common.Option, +) (uint64, error) { + ops := common.NewOptions(options) + utxos, err := b.backend.UTXOs(ops.Context(), chainID) + if err != nil { + return 0, err + } + + var ( + addrs = ops.Addresses(b.avaxAddrs) + minIssuanceTime = ops.MinIssuanceTime() + avaxAssetID = b.backend.AVAXAssetID() + balance uint64 + ) + for _, utxo := range utxos { + amount, _, ok := getSpendableAmount(utxo, addrs, minIssuanceTime, avaxAssetID) + if !ok { + continue + } + + newBalance, err := math.Add64(balance, amount) + if err != nil { + return 0, err + } + balance = newBalance + } + + return balance, nil +} + +func (b *builder) NewImportTx( + chainID ids.ID, + to ethcommon.Address, + baseFee *big.Int, + options ...common.Option, +) (*evm.UnsignedImportTx, error) { + ops := common.NewOptions(options) + utxos, err := b.backend.UTXOs(ops.Context(), chainID) + if err != nil { + return nil, err + } + + var ( + addrs = ops.Addresses(b.avaxAddrs) + minIssuanceTime = ops.MinIssuanceTime() + avaxAssetID = b.backend.AVAXAssetID() + + importedInputs = make([]*avax.TransferableInput, 0, len(utxos)) + importedAmount uint64 + ) + for _, utxo := range utxos { + amount, inputSigIndices, ok := getSpendableAmount(utxo, addrs, minIssuanceTime, avaxAssetID) + if !ok { + continue + } + + importedInputs = append(importedInputs, &avax.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: utxo.Asset, + FxID: secp256k1fx.ID, + In: &secp256k1fx.TransferInput{ + Amt: amount, + Input: secp256k1fx.Input{ + SigIndices: inputSigIndices, + }, + }, + }) + + newImportedAmount, err := math.Add64(importedAmount, amount) + if err != nil { + return nil, err + } + importedAmount = newImportedAmount + } + + utils.Sort(importedInputs) + tx := &evm.UnsignedImportTx{ + NetworkID: b.backend.NetworkID(), + BlockchainID: b.backend.BlockchainID(), + SourceChain: chainID, + ImportedInputs: importedInputs, + } + + // We must initialize the bytes of the tx to calculate the initial cost + wrappedTx := &evm.Tx{UnsignedAtomicTx: tx} + if err := wrappedTx.Sign(evm.Codec, nil); err != nil { + return nil, err + } + + gasUsedWithoutOutput, err := tx.GasUsed(true /*=IsApricotPhase5*/) + if err != nil { + return nil, err + } + gasUsedWithOutput := gasUsedWithoutOutput + evm.EVMOutputGas + + txFee, err := evm.CalculateDynamicFee(gasUsedWithOutput, baseFee) + if err != nil { + return nil, err + } + + if importedAmount <= txFee { + return nil, errInsufficientFunds + } + + tx.Outs = []evm.EVMOutput{{ + Address: to, + Amount: importedAmount - txFee, + AssetID: avaxAssetID, + }} + return tx, nil +} + +func (b *builder) NewExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + baseFee *big.Int, + options ...common.Option, +) (*evm.UnsignedExportTx, error) { + var ( + avaxAssetID = b.backend.AVAXAssetID() + exportedOutputs = make([]*avax.TransferableOutput, len(outputs)) + exportedAmount uint64 + ) + for i, output := range outputs { + exportedOutputs[i] = &avax.TransferableOutput{ + Asset: avax.Asset{ID: avaxAssetID}, + FxID: secp256k1fx.ID, + Out: output, + } + + newExportedAmount, err := math.Add64(exportedAmount, output.Amt) + if err != nil { + return nil, err + } + exportedAmount = newExportedAmount + } + + avax.SortTransferableOutputs(exportedOutputs, evm.Codec) + tx := &evm.UnsignedExportTx{ + NetworkID: b.backend.NetworkID(), + BlockchainID: b.backend.BlockchainID(), + DestinationChain: chainID, + ExportedOutputs: exportedOutputs, + } + + // We must initialize the bytes of the tx to calculate the initial cost + wrappedTx := &evm.Tx{UnsignedAtomicTx: tx} + if err := wrappedTx.Sign(evm.Codec, nil); err != nil { + return nil, err + } + + cost, err := tx.GasUsed(true /*=IsApricotPhase5*/) + if err != nil { + return nil, err + } + + initialFee, err := evm.CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, err + } + + amountToConsume, err := math.Add64(exportedAmount, initialFee) + if err != nil { + return nil, err + } + + var ( + ops = common.NewOptions(options) + ctx = ops.Context() + addrs = ops.EthAddresses(b.ethAddrs) + inputs = make([]evm.EVMInput, 0, addrs.Len()) + ) + for addr := range addrs { + if amountToConsume == 0 { + break + } + + prevFee, err := evm.CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, err + } + + newCost := cost + evm.EVMInputGas + newFee, err := evm.CalculateDynamicFee(newCost, baseFee) + if err != nil { + return nil, err + } + + additionalFee := newFee - prevFee + + balance, err := b.backend.Balance(ctx, addr) + if err != nil { + return nil, err + } + + // Since the asset is AVAX, we divide by the avaxConversionRate to + // convert back to the correct denomination of AVAX that can be + // exported. + avaxBalance := new(big.Int).Div(balance, avaxConversionRate).Uint64() + + // If the balance for [addr] is insufficient to cover the additional + // cost of adding an input to the transaction, skip adding the input + // altogether. + if avaxBalance <= additionalFee { + continue + } + + // Update the cost for the next iteration + cost = newCost + + amountToConsume, err = math.Add64(amountToConsume, additionalFee) + if err != nil { + return nil, err + } + + nonce, err := b.backend.Nonce(ctx, addr) + if err != nil { + return nil, err + } + + inputAmount := min(amountToConsume, avaxBalance) + inputs = append(inputs, evm.EVMInput{ + Address: addr, + Amount: inputAmount, + AssetID: avaxAssetID, + Nonce: nonce, + }) + amountToConsume -= inputAmount + } + + if amountToConsume > 0 { + return nil, errInsufficientFunds + } + + utils.Sort(inputs) + tx.Ins = inputs + + snowCtx, err := newSnowContext(b.backend) + if err != nil { + return nil, err + } + for _, out := range tx.ExportedOutputs { + out.InitCtx(snowCtx) + } + return tx, nil +} + +func getSpendableAmount( + utxo *avax.UTXO, + addrs set.Set[ids.ShortID], + minIssuanceTime uint64, + avaxAssetID ids.ID, +) (uint64, []uint32, bool) { + if utxo.Asset.ID != avaxAssetID { + // Only AVAX can be imported + return 0, nil, false + } + + out, ok := utxo.Out.(*secp256k1fx.TransferOutput) + if !ok { + // Can't import an unknown transfer output type + return 0, nil, false + } + + inputSigIndices, ok := common.MatchOwners(&out.OutputOwners, addrs, minIssuanceTime) + return out.Amt, inputSigIndices, ok +} diff --git a/avalanchego/wallet/chain/c/builder_with_options.go b/avalanchego/wallet/chain/c/builder_with_options.go new file mode 100644 index 00000000..c11c41d6 --- /dev/null +++ b/avalanchego/wallet/chain/c/builder_with_options.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "math/big" + + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ethcommon "github.com/ethereum/go-ethereum/common" +) + +var _ Builder = (*builderWithOptions)(nil) + +type builderWithOptions struct { + Builder + options []common.Option +} + +// NewBuilderWithOptions returns a new transaction builder that will use the +// given options by default. +// +// - [builder] is the builder that will be called to perform the underlying +// operations. +// - [options] will be provided to the builder in addition to the options +// provided in the method calls. +func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { + return &builderWithOptions{ + Builder: builder, + options: options, + } +} + +func (b *builderWithOptions) GetBalance( + options ...common.Option, +) (*big.Int, error) { + return b.Builder.GetBalance( + common.UnionOptions(b.options, options)..., + ) +} + +func (b *builderWithOptions) GetImportableBalance( + chainID ids.ID, + options ...common.Option, +) (uint64, error) { + return b.Builder.GetImportableBalance( + chainID, + common.UnionOptions(b.options, options)..., + ) +} + +func (b *builderWithOptions) NewImportTx( + chainID ids.ID, + to ethcommon.Address, + baseFee *big.Int, + options ...common.Option, +) (*evm.UnsignedImportTx, error) { + return b.Builder.NewImportTx( + chainID, + to, + baseFee, + common.UnionOptions(b.options, options)..., + ) +} + +func (b *builderWithOptions) NewExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + baseFee *big.Int, + options ...common.Option, +) (*evm.UnsignedExportTx, error) { + return b.Builder.NewExportTx( + chainID, + outputs, + baseFee, + common.UnionOptions(b.options, options)..., + ) +} diff --git a/avalanchego/wallet/chain/c/context.go b/avalanchego/wallet/chain/c/context.go new file mode 100644 index 00000000..dc0537e2 --- /dev/null +++ b/avalanchego/wallet/chain/c/context.go @@ -0,0 +1,102 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm" + + stdcontext "context" +) + +const Alias = "C" + +var _ Context = (*context)(nil) + +type Context interface { + NetworkID() uint32 + BlockchainID() ids.ID + AVAXAssetID() ids.ID +} + +type context struct { + networkID uint32 + blockchainID ids.ID + avaxAssetID ids.ID +} + +func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { + infoClient := info.NewClient(uri) + xChainClient := avm.NewClient(uri, "X") + return NewContextFromClients(ctx, infoClient, xChainClient) +} + +func NewContextFromClients( + ctx stdcontext.Context, + infoClient info.Client, + xChainClient avm.Client, +) (Context, error) { + networkID, err := infoClient.GetNetworkID(ctx) + if err != nil { + return nil, err + } + + chainID, err := infoClient.GetBlockchainID(ctx, Alias) + if err != nil { + return nil, err + } + + asset, err := xChainClient.GetAssetDescription(ctx, "AVAX") + if err != nil { + return nil, err + } + + return NewContext( + networkID, + chainID, + asset.AssetID, + ), nil +} + +func NewContext( + networkID uint32, + blockchainID ids.ID, + avaxAssetID ids.ID, +) Context { + return &context{ + networkID: networkID, + blockchainID: blockchainID, + avaxAssetID: avaxAssetID, + } +} + +func (c *context) NetworkID() uint32 { + return c.networkID +} + +func (c *context) BlockchainID() ids.ID { + return c.blockchainID +} + +func (c *context) AVAXAssetID() ids.ID { + return c.avaxAssetID +} + +func newSnowContext(c Context) (*snow.Context, error) { + chainID := c.BlockchainID() + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + CChainID: chainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(chainID, Alias) +} diff --git a/avalanchego/wallet/chain/c/signer.go b/avalanchego/wallet/chain/c/signer.go new file mode 100644 index 00000000..24de72c1 --- /dev/null +++ b/avalanchego/wallet/chain/c/signer.go @@ -0,0 +1,229 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "errors" + "fmt" + + "github.com/ava-labs/coreth/plugin/evm" + "github.com/ethereum/go-ethereum/common" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + stdcontext "context" +) + +const version = 0 + +var ( + _ Signer = (*txSigner)(nil) + + errUnknownInputType = errors.New("unknown input type") + errUnknownCredentialType = errors.New("unknown credential type") + errUnknownOutputType = errors.New("unknown output type") + errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") + + emptySig [secp256k1.SignatureLen]byte +) + +type Signer interface { + // SignAtomic adds as many missing signatures as possible to the provided + // transaction. + // + // If there are already some signatures on the transaction, those signatures + // will not be removed. + // + // If the signer doesn't have the ability to provide a required signature, + // the signature slot will be skipped without reporting an error. + SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error +} + +type EthKeychain interface { + // The returned Signer can provide a signature for [addr] + GetEth(addr common.Address) (keychain.Signer, bool) + // Returns the set of addresses for which the accessor keeps an associated + // signer + EthAddresses() set.Set[common.Address] +} + +type SignerBackend interface { + GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) +} + +type txSigner struct { + avaxKC keychain.Keychain + ethKC EthKeychain + backend SignerBackend +} + +func NewSigner(avaxKC keychain.Keychain, ethKC EthKeychain, backend SignerBackend) Signer { + return &txSigner{ + avaxKC: avaxKC, + ethKC: ethKC, + backend: backend, + } +} + +func (s *txSigner) SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error { + switch utx := tx.UnsignedAtomicTx.(type) { + case *evm.UnsignedImportTx: + signers, err := s.getImportSigners(ctx, utx.SourceChain, utx.ImportedInputs) + if err != nil { + return err + } + return sign(tx, true, signers) + case *evm.UnsignedExportTx: + signers := s.getExportSigners(utx.Ins) + return sign(tx, true, signers) + default: + return fmt.Errorf("%w: %T", errUnknownTxType, tx) + } +} + +func (s *txSigner) getImportSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { + txSigners := make([][]keychain.Signer, len(ins)) + for credIndex, transferInput := range ins { + input, ok := transferInput.In.(*secp256k1fx.TransferInput) + if !ok { + return nil, errUnknownInputType + } + + inputSigners := make([]keychain.Signer, len(input.SigIndices)) + txSigners[credIndex] = inputSigners + + utxoID := transferInput.InputID() + utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) + if err == database.ErrNotFound { + // If we don't have access to the UTXO, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + if err != nil { + return nil, err + } + + out, ok := utxo.Out.(*secp256k1fx.TransferOutput) + if !ok { + return nil, errUnknownOutputType + } + + for sigIndex, addrIndex := range input.SigIndices { + if addrIndex >= uint32(len(out.Addrs)) { + return nil, errInvalidUTXOSigIndex + } + + addr := out.Addrs[addrIndex] + key, ok := s.avaxKC.Get(addr) + if !ok { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + inputSigners[sigIndex] = key + } + } + return txSigners, nil +} + +func (s *txSigner) getExportSigners(ins []evm.EVMInput) [][]keychain.Signer { + txSigners := make([][]keychain.Signer, len(ins)) + for credIndex, input := range ins { + inputSigners := make([]keychain.Signer, 1) + txSigners[credIndex] = inputSigners + + key, ok := s.ethKC.GetEth(input.Address) + if !ok { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + inputSigners[0] = key + } + return txSigners +} + +func SignUnsignedAtomic(ctx stdcontext.Context, signer Signer, utx evm.UnsignedAtomicTx) (*evm.Tx, error) { + tx := &evm.Tx{UnsignedAtomicTx: utx} + return tx, signer.SignAtomic(ctx, tx) +} + +// TODO: remove [signHash] after the ledger supports signing all transactions. +func sign(tx *evm.Tx, signHash bool, txSigners [][]keychain.Signer) error { + unsignedBytes, err := evm.Codec.Marshal(version, &tx.UnsignedAtomicTx) + if err != nil { + return fmt.Errorf("couldn't marshal unsigned tx: %w", err) + } + unsignedHash := hashing.ComputeHash256(unsignedBytes) + + if expectedLen := len(txSigners); expectedLen != len(tx.Creds) { + tx.Creds = make([]verify.Verifiable, expectedLen) + } + + sigCache := make(map[ids.ShortID][secp256k1.SignatureLen]byte) + for credIndex, inputSigners := range txSigners { + credIntf := tx.Creds[credIndex] + if credIntf == nil { + credIntf = &secp256k1fx.Credential{} + tx.Creds[credIndex] = credIntf + } + + cred, ok := credIntf.(*secp256k1fx.Credential) + if !ok { + return errUnknownCredentialType + } + if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { + cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) + } + + for sigIndex, signer := range inputSigners { + if signer == nil { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + addr := signer.Address() + if sig := cred.Sigs[sigIndex]; sig != emptySig { + // If this signature has already been populated, we can just + // copy the needed signature for the future. + sigCache[addr] = sig + continue + } + + if sig, exists := sigCache[addr]; exists { + // If this key has already produced a signature, we can just + // copy the previous signature. + cred.Sigs[sigIndex] = sig + continue + } + + var sig []byte + if signHash { + sig, err = signer.SignHash(unsignedHash) + } else { + sig, err = signer.Sign(unsignedBytes) + } + if err != nil { + return fmt.Errorf("problem signing tx: %w", err) + } + copy(cred.Sigs[sigIndex][:], sig) + sigCache[addr] = cred.Sigs[sigIndex] + } + } + + signedBytes, err := evm.Codec.Marshal(version, tx) + if err != nil { + return fmt.Errorf("couldn't marshal tx: %w", err) + } + tx.Initialize(unsignedBytes, signedBytes) + return nil +} diff --git a/avalanchego/wallet/chain/c/wallet.go b/avalanchego/wallet/chain/c/wallet.go new file mode 100644 index 00000000..1f8d6d25 --- /dev/null +++ b/avalanchego/wallet/chain/c/wallet.go @@ -0,0 +1,206 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "errors" + "math/big" + "time" + + "github.com/ava-labs/coreth/ethclient" + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ethcommon "github.com/ethereum/go-ethereum/common" +) + +var ( + _ Wallet = (*wallet)(nil) + + errNotCommitted = errors.New("not committed") +) + +type Wallet interface { + Context + + // Builder returns the builder that will be used to create the transactions. + Builder() Builder + + // Signer returns the signer that will be used to sign the transactions. + Signer() Signer + + // IssueImportTx creates, signs, and issues an import transaction that + // attempts to consume all the available UTXOs and import the funds to [to]. + // + // - [chainID] specifies the chain to be importing funds from. + // - [to] specifies where to send the imported funds to. + IssueImportTx( + chainID ids.ID, + to ethcommon.Address, + options ...common.Option, + ) (*evm.Tx, error) + + // IssueExportTx creates, signs, and issues an export transaction that + // attempts to send all the provided [outputs] to the requested [chainID]. + // + // - [chainID] specifies the chain to be exporting the funds to. + // - [outputs] specifies the outputs to send to the [chainID]. + IssueExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + options ...common.Option, + ) (*evm.Tx, error) + + // IssueUnsignedTx signs and issues the unsigned tx. + IssueUnsignedAtomicTx( + utx evm.UnsignedAtomicTx, + options ...common.Option, + ) (*evm.Tx, error) + + // IssueAtomicTx issues the signed tx. + IssueAtomicTx( + tx *evm.Tx, + options ...common.Option, + ) error +} + +func NewWallet( + builder Builder, + signer Signer, + avaxClient evm.Client, + ethClient ethclient.Client, + backend Backend, +) Wallet { + return &wallet{ + Backend: backend, + builder: builder, + signer: signer, + avaxClient: avaxClient, + ethClient: ethClient, + } +} + +type wallet struct { + Backend + builder Builder + signer Signer + avaxClient evm.Client + ethClient ethclient.Client +} + +func (w *wallet) Builder() Builder { + return w.builder +} + +func (w *wallet) Signer() Signer { + return w.signer +} + +func (w *wallet) IssueImportTx( + chainID ids.ID, + to ethcommon.Address, + options ...common.Option, +) (*evm.Tx, error) { + baseFee, err := w.baseFee(options) + if err != nil { + return nil, err + } + + utx, err := w.builder.NewImportTx(chainID, to, baseFee, options...) + if err != nil { + return nil, err + } + return w.IssueUnsignedAtomicTx(utx, options...) +} + +func (w *wallet) IssueExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + options ...common.Option, +) (*evm.Tx, error) { + baseFee, err := w.baseFee(options) + if err != nil { + return nil, err + } + + utx, err := w.builder.NewExportTx(chainID, outputs, baseFee, options...) + if err != nil { + return nil, err + } + return w.IssueUnsignedAtomicTx(utx, options...) +} + +func (w *wallet) IssueUnsignedAtomicTx( + utx evm.UnsignedAtomicTx, + options ...common.Option, +) (*evm.Tx, error) { + ops := common.NewOptions(options) + ctx := ops.Context() + tx, err := SignUnsignedAtomic(ctx, w.signer, utx) + if err != nil { + return nil, err + } + + return tx, w.IssueAtomicTx(tx, options...) +} + +func (w *wallet) IssueAtomicTx( + tx *evm.Tx, + options ...common.Option, +) error { + ops := common.NewOptions(options) + ctx := ops.Context() + txID, err := w.avaxClient.IssueTx(ctx, tx.SignedBytes()) + if err != nil { + return err + } + + if f := ops.PostIssuanceFunc(); f != nil { + f(txID) + } + + if ops.AssumeDecided() { + return w.Backend.AcceptAtomicTx(ctx, tx) + } + + pollFrequency := ops.PollFrequency() + ticker := time.NewTicker(pollFrequency) + defer ticker.Stop() + + for { + status, err := w.avaxClient.GetAtomicTxStatus(ctx, txID) + if err != nil { + return err + } + + switch status { + case evm.Accepted: + return w.Backend.AcceptAtomicTx(ctx, tx) + case evm.Dropped, evm.Unknown: + return errNotCommitted + } + + // The tx is Processing. + + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (w *wallet) baseFee(options []common.Option) (*big.Int, error) { + ops := common.NewOptions(options) + baseFee := ops.BaseFee(nil) + if baseFee != nil { + return baseFee, nil + } + + ctx := ops.Context() + return w.ethClient.EstimateBaseFee(ctx) +} diff --git a/avalanchego/wallet/chain/c/wallet_with_options.go b/avalanchego/wallet/chain/c/wallet_with_options.go new file mode 100644 index 00000000..3b598721 --- /dev/null +++ b/avalanchego/wallet/chain/c/wallet_with_options.go @@ -0,0 +1,82 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package c + +import ( + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + ethcommon "github.com/ethereum/go-ethereum/common" +) + +var _ Wallet = (*walletWithOptions)(nil) + +func NewWalletWithOptions( + wallet Wallet, + options ...common.Option, +) Wallet { + return &walletWithOptions{ + Wallet: wallet, + options: options, + } +} + +type walletWithOptions struct { + Wallet + options []common.Option +} + +func (w *walletWithOptions) Builder() Builder { + return NewBuilderWithOptions( + w.Wallet.Builder(), + w.options..., + ) +} + +func (w *walletWithOptions) IssueImportTx( + chainID ids.ID, + to ethcommon.Address, + options ...common.Option, +) (*evm.Tx, error) { + return w.Wallet.IssueImportTx( + chainID, + to, + common.UnionOptions(w.options, options)..., + ) +} + +func (w *walletWithOptions) IssueExportTx( + chainID ids.ID, + outputs []*secp256k1fx.TransferOutput, + options ...common.Option, +) (*evm.Tx, error) { + return w.Wallet.IssueExportTx( + chainID, + outputs, + common.UnionOptions(w.options, options)..., + ) +} + +func (w *walletWithOptions) IssueUnsignedAtomicTx( + utx evm.UnsignedAtomicTx, + options ...common.Option, +) (*evm.Tx, error) { + return w.Wallet.IssueUnsignedAtomicTx( + utx, + common.UnionOptions(w.options, options)..., + ) +} + +func (w *walletWithOptions) IssueAtomicTx( + tx *evm.Tx, + options ...common.Option, +) error { + return w.Wallet.IssueAtomicTx( + tx, + common.UnionOptions(w.options, options)..., + ) +} diff --git a/avalanchego/wallet/chain/p/backend.go b/avalanchego/wallet/chain/p/backend.go index 37e63b0b..5b800180 100644 --- a/avalanchego/wallet/chain/p/backend.go +++ b/avalanchego/wallet/chain/p/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -6,29 +6,23 @@ package p import ( "sync" - stdcontext "context" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" ) var _ Backend = (*backend)(nil) -type ChainUTXOs interface { - AddUTXO(ctx stdcontext.Context, destinationChainID ids.ID, utxo *avax.UTXO) error - RemoveUTXO(ctx stdcontext.Context, sourceChainID, utxoID ids.ID) error - - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - GetUTXO(ctx stdcontext.Context, sourceChainID, utxoID ids.ID) (*avax.UTXO, error) -} - // Backend defines the full interface required to support a P-chain wallet. type Backend interface { - ChainUTXOs + common.ChainUTXOs BuilderBackend SignerBackend @@ -37,18 +31,32 @@ type Backend interface { type backend struct { Context - ChainUTXOs + common.ChainUTXOs - txsLock sync.RWMutex - // txID -> tx - txs map[ids.ID]*txs.Tx + subnetOwnerLock sync.RWMutex + subnetOwner map[ids.ID]fx.Owner // subnetID -> owner } -func NewBackend(ctx Context, utxos ChainUTXOs, txs map[ids.ID]*txs.Tx) Backend { +func NewBackend(ctx Context, utxos common.ChainUTXOs, subnetTxs map[ids.ID]*txs.Tx) Backend { + subnetOwner := make(map[ids.ID]fx.Owner) + for txID, tx := range subnetTxs { // first get owners from the CreateSubnetTx + createSubnetTx, ok := tx.Unsigned.(*txs.CreateSubnetTx) + if !ok { + continue + } + subnetOwner[txID] = createSubnetTx.Owner + } + for _, tx := range subnetTxs { // then check for TransferSubnetOwnershipTx + transferSubnetOwnershipTx, ok := tx.Unsigned.(*txs.TransferSubnetOwnershipTx) + if !ok { + continue + } + subnetOwner[transferSubnetOwnershipTx.Subnet] = transferSubnetOwnershipTx.Owner + } return &backend{ - Context: ctx, - ChainUTXOs: utxos, - txs: txs, + Context: ctx, + ChainUTXOs: utxos, + subnetOwner: subnetOwner, } } @@ -64,16 +72,7 @@ func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { } producedUTXOSlice := tx.UTXOs() - err = b.addUTXOs(ctx, constants.PlatformChainID, producedUTXOSlice) - if err != nil { - return err - } - - b.txsLock.Lock() - defer b.txsLock.Unlock() - - b.txs[txID] = tx - return nil + return b.addUTXOs(ctx, constants.PlatformChainID, producedUTXOSlice) } func (b *backend) addUTXOs(ctx stdcontext.Context, destinationChainID ids.ID, utxos []*avax.UTXO) error { @@ -94,13 +93,20 @@ func (b *backend) removeUTXOs(ctx stdcontext.Context, sourceChain ids.ID, utxoID return nil } -func (b *backend) GetTx(_ stdcontext.Context, txID ids.ID) (*txs.Tx, error) { - b.txsLock.RLock() - defer b.txsLock.RUnlock() +func (b *backend) GetSubnetOwner(_ stdcontext.Context, subnetID ids.ID) (fx.Owner, error) { + b.subnetOwnerLock.RLock() + defer b.subnetOwnerLock.RUnlock() - tx, exists := b.txs[txID] + owner, exists := b.subnetOwner[subnetID] if !exists { return nil, database.ErrNotFound } - return tx, nil + return owner, nil +} + +func (b *backend) setSubnetOwner(subnetID ids.ID, owner fx.Owner) { + b.subnetOwnerLock.Lock() + defer b.subnetOwnerLock.Unlock() + + b.subnetOwner[subnetID] = owner } diff --git a/avalanchego/wallet/chain/p/backend_visitor.go b/avalanchego/wallet/chain/p/backend_visitor.go index 9830d87a..d8b118fa 100644 --- a/avalanchego/wallet/chain/p/backend_visitor.go +++ b/avalanchego/wallet/chain/p/backend_visitor.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( - stdcontext "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + stdcontext "context" ) var _ txs.Visitor = (*backendVisitor)(nil) @@ -46,6 +46,10 @@ func (b *backendVisitor) CreateChainTx(tx *txs.CreateChainTx) error { } func (b *backendVisitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { + b.b.setSubnetOwner( + b.txID, + tx.Owner, + ) return b.baseTx(&tx.BaseTx) } @@ -53,6 +57,18 @@ func (b *backendVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx return b.baseTx(&tx.BaseTx) } +func (b *backendVisitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { + b.b.setSubnetOwner( + tx.Subnet, + tx.Owner, + ) + return b.baseTx(&tx.BaseTx) +} + +func (b *backendVisitor) BaseTx(tx *txs.BaseTx) error { + return b.baseTx(tx) +} + func (b *backendVisitor) ImportTx(tx *txs.ImportTx) error { err := b.b.removeUTXOs( b.ctx, diff --git a/avalanchego/wallet/chain/p/builder.go b/avalanchego/wallet/chain/p/builder.go index f890790d..85f9b611 100644 --- a/avalanchego/wallet/chain/p/builder.go +++ b/avalanchego/wallet/chain/p/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -8,24 +8,24 @@ import ( "fmt" "time" - stdcontext "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" ) var ( errNoChangeAddress = errors.New("no possible change address") - errWrongTxType = errors.New("wrong tx type") errUnknownOwnerType = errors.New("unknown owner type") errInsufficientAuthorization = errors.New("insufficient authorization") errInsufficientFunds = errors.New("insufficient funds") @@ -134,6 +134,17 @@ type Builder interface { options ...common.Option, ) (*txs.CreateSubnetTx, error) + // NewTransferSubnetOwnershipTx changes the owner of the named subnet. + // + // - [subnetID] specifies the subnet to be modified + // - [owner] specifies who has the ability to create new chains and add new + // validators to the subnet. + NewTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, + ) (*txs.TransferSubnetOwnershipTx, error) + // NewImportTx creates an import transaction that attempts to consume all // the available UTXOs and import the funds to [to]. // @@ -250,7 +261,7 @@ type Builder interface { type BuilderBackend interface { Context UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - GetTx(ctx stdcontext.Context, txID ids.ID) (*txs.Tx, error) + GetSubnetOwner(ctx stdcontext.Context, subnetID ids.ID) (fx.Owner, error) } type builder struct { @@ -311,7 +322,7 @@ func (b *builder) NewBaseTx( outputs = append(outputs, changeOutputs...) avax.SortTransferableOutputs(outputs, txs.Codec) // sort the outputs - return &txs.CreateSubnetTx{ + tx := &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -320,7 +331,8 @@ func (b *builder) NewBaseTx( Memo: ops.Memo(), }}, Owner: &secp256k1fx.OutputOwners{}, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddValidatorTx( @@ -343,7 +355,7 @@ func (b *builder) NewAddValidatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddValidatorTx{ + tx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -355,7 +367,8 @@ func (b *builder) NewAddValidatorTx( StakeOuts: stakeOutputs, RewardsOwner: rewardsOwner, DelegationShares: shares, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddSubnetValidatorTx( @@ -377,7 +390,7 @@ func (b *builder) NewAddSubnetValidatorTx( return nil, err } - return &txs.AddSubnetValidatorTx{ + tx := &txs.AddSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -387,7 +400,8 @@ func (b *builder) NewAddSubnetValidatorTx( }}, SubnetValidator: *vdr, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewRemoveSubnetValidatorTx( @@ -410,7 +424,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( return nil, err } - return &txs.RemoveSubnetValidatorTx{ + tx := &txs.RemoveSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -421,7 +435,8 @@ func (b *builder) NewRemoveSubnetValidatorTx( Subnet: subnetID, NodeID: nodeID, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddDelegatorTx( @@ -443,7 +458,7 @@ func (b *builder) NewAddDelegatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddDelegatorTx{ + tx := &txs.AddDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -454,7 +469,8 @@ func (b *builder) NewAddDelegatorTx( Validator: *vdr, StakeOuts: stakeOutputs, DelegationRewardsOwner: rewardsOwner, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewCreateChainTx( @@ -481,7 +497,7 @@ func (b *builder) NewCreateChainTx( } utils.Sort(fxIDs) - return &txs.CreateChainTx{ + tx := &txs.CreateChainTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -495,7 +511,8 @@ func (b *builder) NewCreateChainTx( FxIDs: fxIDs, GenesisData: genesis, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewCreateSubnetTx( @@ -513,7 +530,7 @@ func (b *builder) NewCreateSubnetTx( } utils.Sort(owner.Addrs) - return &txs.CreateSubnetTx{ + tx := &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -522,7 +539,44 @@ func (b *builder) NewCreateSubnetTx( Memo: ops.Memo(), }}, Owner: owner, - }, nil + } + return tx, b.initCtx(tx) +} + +func (b *builder) NewTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, +) (*txs.TransferSubnetOwnershipTx, error) { + toBurn := map[ids.ID]uint64{ + b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + } + toStake := map[ids.ID]uint64{} + ops := common.NewOptions(options) + inputs, outputs, _, err := b.spend(toBurn, toStake, ops) + if err != nil { + return nil, err + } + + subnetAuth, err := b.authorizeSubnet(subnetID, ops) + if err != nil { + return nil, err + } + + utils.Sort(owner.Addrs) + tx := &txs.TransferSubnetOwnershipTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: b.backend.NetworkID(), + BlockchainID: constants.PlatformChainID, + Ins: inputs, + Outs: outputs, + Memo: ops.Memo(), + }}, + Subnet: subnetID, + Owner: owner, + SubnetAuth: subnetAuth, + } + return tx, b.initCtx(tx) } func (b *builder) NewImportTx( @@ -618,7 +672,7 @@ func (b *builder) NewImportTx( } avax.SortTransferableOutputs(outputs, txs.Codec) // sort imported outputs - return &txs.ImportTx{ + tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -628,7 +682,8 @@ func (b *builder) NewImportTx( }}, SourceChain: sourceChainID, ImportedInputs: importedInputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewExportTx( @@ -656,7 +711,7 @@ func (b *builder) NewExportTx( } avax.SortTransferableOutputs(outputs, txs.Codec) // sort exported outputs - return &txs.ExportTx{ + tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -666,7 +721,8 @@ func (b *builder) NewExportTx( }}, DestinationChain: chainID, ExportedOutputs: outputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewTransformSubnetTx( @@ -702,7 +758,7 @@ func (b *builder) NewTransformSubnetTx( return nil, err } - return &txs.TransformSubnetTx{ + tx := &txs.TransformSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -725,7 +781,8 @@ func (b *builder) NewTransformSubnetTx( MaxValidatorWeightFactor: maxValidatorWeightFactor, UptimeRequirement: uptimeRequirement, SubnetAuth: subnetAuth, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddPermissionlessValidatorTx( @@ -755,7 +812,7 @@ func (b *builder) NewAddPermissionlessValidatorTx( utils.Sort(validationRewardsOwner.Addrs) utils.Sort(delegationRewardsOwner.Addrs) - return &txs.AddPermissionlessValidatorTx{ + tx := &txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -770,7 +827,8 @@ func (b *builder) NewAddPermissionlessValidatorTx( ValidatorRewardsOwner: validationRewardsOwner, DelegatorRewardsOwner: delegationRewardsOwner, DelegationShares: shares, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewAddPermissionlessDelegatorTx( @@ -796,7 +854,7 @@ func (b *builder) NewAddPermissionlessDelegatorTx( } utils.Sort(rewardsOwner.Addrs) - return &txs.AddPermissionlessDelegatorTx{ + tx := &txs.AddPermissionlessDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: constants.PlatformChainID, @@ -808,7 +866,8 @@ func (b *builder) NewAddPermissionlessDelegatorTx( Subnet: vdr.Subnet, StakeOuts: stakeOutputs, DelegationRewardsOwner: rewardsOwner, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) getBalance( @@ -947,7 +1006,7 @@ func (b *builder) spend( }) // Stake any value that should be staked - amountToStake := math.Min( + amountToStake := min( remainingAmountToStake, // Amount we still need to stake out.Amt, // Amount available to stake ) @@ -1025,7 +1084,7 @@ func (b *builder) spend( }) // Burn any value that should be burned - amountToBurn := math.Min( + amountToBurn := min( remainingAmountToBurn, // Amount we still need to burn out.Amt, // Amount available to burn ) @@ -1033,7 +1092,7 @@ func (b *builder) spend( amountAvalibleToStake := out.Amt - amountToBurn // Burn any value that should be burned - amountToStake := math.Min( + amountToStake := min( remainingAmountToStake, // Amount we still need to stake amountAvalibleToStake, // Amount available to stake ) @@ -1088,20 +1147,15 @@ func (b *builder) spend( } func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*secp256k1fx.Input, error) { - subnetTx, err := b.backend.GetTx(options.Context(), subnetID) + ownerIntf, err := b.backend.GetSubnetOwner(options.Context(), subnetID) if err != nil { return nil, fmt.Errorf( - "failed to fetch subnet %q: %w", + "failed to fetch subnet owner for %q: %w", subnetID, err, ) } - subnet, ok := subnetTx.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return nil, errWrongTxType - } - - owner, ok := subnet.Owner.(*secp256k1fx.OutputOwners) + owner, ok := ownerIntf.(*secp256k1fx.OutputOwners) if !ok { return nil, errUnknownOwnerType } @@ -1117,3 +1171,13 @@ func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*se SigIndices: inputSigIndices, }, nil } + +func (b *builder) initCtx(tx txs.UnsignedTx) error { + ctx, err := newSnowContext(b.backend) + if err != nil { + return err + } + + tx.InitCtx(ctx) + return nil +} diff --git a/avalanchego/wallet/chain/p/builder_test.go b/avalanchego/wallet/chain/p/builder_test.go new file mode 100644 index 00000000..47310314 --- /dev/null +++ b/avalanchego/wallet/chain/p/builder_test.go @@ -0,0 +1,746 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +var ( + testKeys = secp256k1.TestKeys() + + // We hard-code [avaxAssetID] and [subnetAssetID] to make + // ordering of UTXOs generated by [testUTXOsList] is reproducible + avaxAssetID = ids.Empty.Prefix(1789) + subnetAssetID = ids.Empty.Prefix(2024) + + testCtx = NewContext( + constants.UnitTestID, + avaxAssetID, + units.MicroAvax, // BaseTxFee + 19*units.MicroAvax, // CreateSubnetTxFee + 789*units.MicroAvax, // TransformSubnetTxFee + 1234*units.MicroAvax, // CreateBlockchainTxFee + 19*units.MilliAvax, // AddPrimaryNetworkValidatorFee + 765*units.MilliAvax, // AddPrimaryNetworkDelegatorFee + 1010*units.MilliAvax, // AddSubnetValidatorFee + 9*units.Avax, // AddSubnetDelegatorFee + ) +) + +// These tests create and sign a tx, then verify that utxos included +// in the tx are exactly necessary to pay fees for it + +func TestBaseTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + backend = NewBackend(testCtx, chainUTXOs, nil) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + outputsToMove = []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 7 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + }, + }, + }} + ) + + utx, err := builder.NewBaseTx(outputsToMove) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 2) + + expectedConsumed := testCtx.CreateSubnetTxFee() + outputsToMove[0].Out.Amount() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) + require.Equal(outputsToMove[0], outs[1]) +} + +func TestAddSubnetValidatorTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + + // data to build the transaction + subnetValidator = &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(time.Now().Add(time.Hour).Unix()), + }, + Subnet: subnetID, + } + ) + + // build the transaction + utx, err := builder.NewAddSubnetValidatorTx(subnetValidator) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 1) + + expectedConsumed := testCtx.AddSubnetValidatorFee() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestRemoveSubnetValidatorTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + ) + + // build the transaction + utx, err := builder.NewRemoveSubnetValidatorTx( + ids.GenerateTestNodeID(), + subnetID, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestCreateChainTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + + // data to build the transaction + genesisBytes = []byte{'a', 'b', 'c'} + vmID = ids.GenerateTestID() + fxIDs = []ids.ID{ids.GenerateTestID()} + chainName = "dummyChain" + ) + + // build the transaction + utx, err := builder.NewCreateChainTx( + subnetID, + genesisBytes, + vmID, + fxIDs, + chainName, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.CreateBlockchainTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestCreateSubnetTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + ) + + // build the transaction + utx, err := builder.NewCreateSubnetTx(subnetOwner) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.CreateSubnetTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestTransferSubnetOwnershipTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + ) + + // build the transaction + utx, err := builder.NewTransferSubnetOwnershipTx( + subnetID, + subnetOwner, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestImportTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + sourceChainID = ids.GenerateTestID() + importedUTXOs = utxos[:1] + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + sourceChainID: importedUTXOs, + }) + + backend = NewBackend(testCtx, chainUTXOs, nil) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + importKey = testKeys[0] + importTo = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + importKey.Address(), + }, + } + ) + + // build the transaction + utx, err := builder.NewImportTx( + sourceChainID, + importTo, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + importedIns := utx.ImportedInputs + require.Empty(ins) // we spend the imported input (at least partially) + require.Len(importedIns, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := importedIns[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestExportTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + backend = NewBackend(testCtx, chainUTXOs, nil) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + subnetID = ids.GenerateTestID() + exportedOutputs = []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 7 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + }, + }, + }} + ) + + // build the transaction + utx, err := builder.NewExportTx( + subnetID, + exportedOutputs, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + exportedOutputs[0].Out.Amount() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) + require.Equal(utx.ExportedOutputs, exportedOutputs) +} + +func TestTransformSubnetTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + + subnetID = ids.GenerateTestID() + subnetAuthKey = testKeys[0] + subnetAuthAddr = subnetAuthKey.Address() + subnetOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{subnetAuthAddr}, + } + subnets = map[ids.ID]*txs.Tx{ + subnetID: { + Unsigned: &txs.CreateSubnetTx{ + Owner: subnetOwner, + }, + }, + } + + backend = NewBackend(testCtx, chainUTXOs, subnets) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + + // data to build the transaction + initialSupply = 40 * units.MegaAvax + maxSupply = 100 * units.MegaAvax + ) + + // build the transaction + utx, err := builder.NewTransformSubnetTx( + subnetID, + subnetAssetID, + initialSupply, // initial supply + maxSupply, // max supply + reward.PercentDenominator, // min consumption rate + reward.PercentDenominator, // max consumption rate + 1, // min validator stake + 100*units.MegaAvax, // max validator stake + time.Second, // min stake duration + 365*24*time.Hour, // max stake duration + 0, // min delegation fee + 1, // min delegator stake + 5, // max validator weight factor + .80*reward.PercentDenominator, // uptime requirement + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 2) + + expectedConsumedSubnetAsset := maxSupply - initialSupply + consumedSubnetAsset := ins[0].In.Amount() - outs[1].Out.Amount() + require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) + expectedConsumed := testCtx.TransformSubnetTxFee() + consumed := ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestAddPermissionlessValidatorTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + backend = NewBackend(testCtx, chainUTXOs, nil) + + // builder + utxoAddr = utxosKey.Address() + rewardKey = testKeys[0] + rewardAddr = rewardKey.Address() + builder = NewBuilder(set.Of(utxoAddr, rewardAddr), backend) + + // data to build the transaction + validationRewardsOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + rewardAddr, + }, + } + delegationRewardsOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + rewardAddr, + }, + } + ) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + // build the transaction + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(time.Now().Add(time.Hour).Unix()), + Wght: 2 * units.Avax, + }, + Subnet: constants.PrimaryNetworkID, + }, + signer.NewProofOfPossession(sk), + avaxAssetID, + validationRewardsOwner, + delegationRewardsOwner, + reward.PercentDenominator, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + staked := utx.StakeOuts + outs := utx.Outs + require.Len(ins, 4) + require.Len(staked, 2) + require.Len(outs, 2) + + expectedConsumedSubnetAsset := utx.Validator.Weight() + consumedSubnetAsset := staked[0].Out.Amount() + staked[1].Out.Amount() + require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) + expectedConsumed := testCtx.AddPrimaryNetworkValidatorFee() + consumed := ins[1].In.Amount() + ins[3].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestAddPermissionlessDelegatorTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ + constants.PlatformChainID: utxos, + }) + backend = NewBackend(testCtx, chainUTXOs, nil) + + // builder + utxoAddr = utxosKey.Address() + rewardKey = testKeys[0] + rewardAddr = rewardKey.Address() + builder = NewBuilder(set.Of(utxoAddr, rewardAddr), backend) + + // data to build the transaction + rewardsOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + rewardAddr, + }, + } + ) + + // build the transaction + utx, err := builder.NewAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + End: uint64(time.Now().Add(time.Hour).Unix()), + Wght: 2 * units.Avax, + }, + Subnet: constants.PrimaryNetworkID, + }, + avaxAssetID, + rewardsOwner, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + staked := utx.StakeOuts + outs := utx.Outs + require.Len(ins, 4) + require.Len(staked, 2) + require.Len(outs, 2) + + expectedConsumedSubnetAsset := utx.Validator.Weight() + consumedSubnetAsset := staked[0].Out.Amount() + staked[1].Out.Amount() + require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) + expectedConsumed := testCtx.AddPrimaryNetworkDelegatorFee() + consumed := ins[1].In.Amount() + ins[3].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func makeTestUTXOs(utxosKey *secp256k1.PrivateKey) []*avax.UTXO { + // Note: we avoid ids.GenerateTestNodeID here to make sure that UTXO IDs won't change + // run by run. This simplifies checking what utxos are included in the built txs. + const utxosOffset uint64 = 2024 + + utxosAddr := utxosKey.Address() + return []*avax.UTXO{ + { // a small UTXO first, which should not be enough to pay fees + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset), + OutputIndex: uint32(utxosOffset), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.MilliAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosAddr}, + Threshold: 1, + }, + }, + }, + { // a locked, small UTXO + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 1), + OutputIndex: uint32(utxosOffset + 1), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &stakeable.LockOut{ + Locktime: uint64(time.Now().Add(time.Hour).Unix()), + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 3 * units.MilliAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxosAddr}, + }, + }, + }, + }, + { // a subnetAssetID denominated UTXO + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 2), + OutputIndex: uint32(utxosOffset + 2), + }, + Asset: avax.Asset{ID: subnetAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 99 * units.MegaAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosAddr}, + Threshold: 1, + }, + }, + }, + { // a locked, large UTXO + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 3), + OutputIndex: uint32(utxosOffset + 3), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &stakeable.LockOut{ + Locktime: uint64(time.Now().Add(time.Hour).Unix()), + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: 88 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxosAddr}, + }, + }, + }, + }, + { // a large UTXO last, which should be enough to pay any fee by itself + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 4), + OutputIndex: uint32(utxosOffset + 4), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 9 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosAddr}, + Threshold: 1, + }, + }, + }, + } +} diff --git a/avalanchego/wallet/chain/p/builder_with_options.go b/avalanchego/wallet/chain/p/builder_with_options.go index 9060d763..a402355b 100644 --- a/avalanchego/wallet/chain/p/builder_with_options.go +++ b/avalanchego/wallet/chain/p/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -129,6 +129,18 @@ func (b *builderWithOptions) NewCreateSubnetTx( ) } +func (b *builderWithOptions) NewTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, +) (*txs.TransferSubnetOwnershipTx, error) { + return b.Builder.NewTransferSubnetOwnershipTx( + subnetID, + owner, + common.UnionOptions(b.options, options)..., + ) +} + func (b *builderWithOptions) NewImportTx( sourceChainID ids.ID, to *secp256k1fx.OutputOwners, diff --git a/avalanchego/wallet/chain/p/context.go b/avalanchego/wallet/chain/p/context.go index 75bcdb19..2511a19a 100644 --- a/avalanchego/wallet/chain/p/context.go +++ b/avalanchego/wallet/chain/p/context.go @@ -1,16 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( - stdcontext "context" - "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" + + stdcontext "context" ) +const Alias = "P" + var _ Context = (*context)(nil) type Context interface { @@ -144,3 +149,15 @@ func (c *context) AddSubnetValidatorFee() uint64 { func (c *context) AddSubnetDelegatorFee() uint64 { return c.addSubnetDelegatorFee } + +func newSnowContext(c Context) (*snow.Context, error) { + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: constants.PlatformChainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(constants.PlatformChainID, Alias) +} diff --git a/avalanchego/wallet/chain/p/signer.go b/avalanchego/wallet/chain/p/signer.go index a795dd63..bedbbdbf 100644 --- a/avalanchego/wallet/chain/p/signer.go +++ b/avalanchego/wallet/chain/p/signer.go @@ -1,27 +1,35 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( - stdcontext "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + + stdcontext "context" ) var _ Signer = (*txSigner)(nil) type Signer interface { - SignUnsigned(ctx stdcontext.Context, tx txs.UnsignedTx) (*txs.Tx, error) + // Sign adds as many missing signatures as possible to the provided + // transaction. + // + // If there are already some signatures on the transaction, those signatures + // will not be removed. + // + // If the signer doesn't have the ability to provide a required signature, + // the signature slot will be skipped without reporting an error. Sign(ctx stdcontext.Context, tx *txs.Tx) error } type SignerBackend interface { GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) - GetTx(ctx stdcontext.Context, txID ids.ID) (*txs.Tx, error) + GetSubnetOwner(ctx stdcontext.Context, subnetID ids.ID) (fx.Owner, error) } type txSigner struct { @@ -36,11 +44,6 @@ func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { } } -func (s *txSigner) SignUnsigned(ctx stdcontext.Context, utx txs.UnsignedTx) (*txs.Tx, error) { - tx := &txs.Tx{Unsigned: utx} - return tx, s.Sign(ctx, tx) -} - func (s *txSigner) Sign(ctx stdcontext.Context, tx *txs.Tx) error { return tx.Unsigned.Visit(&signerVisitor{ kc: s.kc, @@ -49,3 +52,12 @@ func (s *txSigner) Sign(ctx stdcontext.Context, tx *txs.Tx) error { tx: tx, }) } + +func SignUnsigned( + ctx stdcontext.Context, + signer Signer, + utx txs.UnsignedTx, +) (*txs.Tx, error) { + tx := &txs.Tx{Unsigned: utx} + return tx, signer.Sign(ctx, tx) +} diff --git a/avalanchego/wallet/chain/p/signer_visitor.go b/avalanchego/wallet/chain/p/signer_visitor.go index 52269ee6..7c9dd4cb 100644 --- a/avalanchego/wallet/chain/p/signer_visitor.go +++ b/avalanchego/wallet/chain/p/signer_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -7,8 +7,6 @@ import ( "errors" "fmt" - stdcontext "context" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" @@ -20,6 +18,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + stdcontext "context" ) var ( @@ -51,6 +51,14 @@ func (*signerVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errUnsupportedTxType } +func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { + txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) + if err != nil { + return err + } + return sign(s.tx, false, txSigners) +} + func (s *signerVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { @@ -135,6 +143,19 @@ func (s *signerVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) return sign(s.tx, true, txSigners) } +func (s *signerVisitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { + txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) + if err != nil { + return err + } + subnetAuthSigners, err := s.getSubnetSigners(tx.Subnet, tx.SubnetAuth) + if err != nil { + return err + } + txSigners = append(txSigners, subnetAuthSigners) + return sign(s.tx, true, txSigners) +} + func (s *signerVisitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { @@ -225,20 +246,15 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri return nil, errUnknownSubnetAuthType } - subnetTx, err := s.backend.GetTx(s.ctx, subnetID) + ownerIntf, err := s.backend.GetSubnetOwner(s.ctx, subnetID) if err != nil { return nil, fmt.Errorf( - "failed to fetch subnet %q: %w", + "failed to fetch subnet owner for %q: %w", subnetID, err, ) } - subnet, ok := subnetTx.Unsigned.(*txs.CreateSubnetTx) - if !ok { - return nil, errWrongTxType - } - - owner, ok := subnet.Owner.(*secp256k1fx.OutputOwners) + owner, ok := ownerIntf.(*secp256k1fx.OutputOwners) if !ok { return nil, errUnknownOwnerType } @@ -263,7 +279,7 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri // TODO: remove [signHash] after the ledger supports signing all transactions. func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { - unsignedBytes, err := txs.Codec.Marshal(txs.Version, &tx.Unsigned) + unsignedBytes, err := txs.Codec.Marshal(txs.CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal unsigned tx: %w", err) } @@ -324,7 +340,7 @@ func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { } } - signedBytes, err := txs.Codec.Marshal(txs.Version, tx) + signedBytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal tx: %w", err) } diff --git a/avalanchego/wallet/chain/p/wallet.go b/avalanchego/wallet/chain/p/wallet.go index e25dfac5..44cc7e2a 100644 --- a/avalanchego/wallet/chain/p/wallet.go +++ b/avalanchego/wallet/chain/p/wallet.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p import ( "errors" + "fmt" "time" "github.com/ava-labs/avalanchego/ids" @@ -41,7 +42,7 @@ type Wallet interface { IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddValidatorTx creates, signs, and issues a new validator of the // primary network. @@ -58,7 +59,7 @@ type Wallet interface { rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddSubnetValidatorTx creates, signs, and issues a new validator of a // subnet. @@ -68,7 +69,7 @@ type Wallet interface { IssueAddSubnetValidatorTx( vdr *txs.SubnetValidator, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddSubnetValidatorTx creates, signs, and issues a transaction that // removes a validator of a subnet. @@ -78,7 +79,7 @@ type Wallet interface { nodeID ids.NodeID, subnetID ids.ID, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddDelegatorTx creates, signs, and issues a new delegator to a // validator on the primary network. @@ -91,7 +92,7 @@ type Wallet interface { vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueCreateChainTx creates, signs, and issues a new chain in the named // subnet. @@ -109,7 +110,7 @@ type Wallet interface { fxIDs []ids.ID, chainName string, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueCreateSubnetTx creates, signs, and issues a new subnet with the // specified owner. @@ -119,7 +120,19 @@ type Wallet interface { IssueCreateSubnetTx( owner *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) + + // IssueTransferSubnetOwnershipTx creates, signs, and issues a transaction that + // changes the owner of the named subnet. + // + // - [subnetID] specifies the subnet to be modified + // - [owner] specifies who has the ability to create new chains and add new + // validators to the subnet. + IssueTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, + ) (*txs.Tx, error) // IssueImportTx creates, signs, and issues an import transaction that // attempts to consume all the available UTXOs and import the funds to [to]. @@ -130,7 +143,7 @@ type Wallet interface { chainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueExportTx creates, signs, and issues an export transaction that // attempts to send all the provided [outputs] to the requested [chainID]. @@ -141,7 +154,7 @@ type Wallet interface { chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueTransformSubnetTx creates a transform subnet transaction that attempts // to convert the provided [subnetID] from a permissioned subnet to a @@ -189,7 +202,7 @@ type Wallet interface { maxValidatorWeightFactor byte, uptimeRequirement uint32, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddPermissionlessValidatorTx creates, signs, and issues a new // validator of the specified subnet. @@ -214,7 +227,7 @@ type Wallet interface { delegationRewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueAddPermissionlessDelegatorTx creates, signs, and issues a new // delegator of the specified subnet on the specified nodeID. @@ -229,19 +242,19 @@ type Wallet interface { assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueUnsignedTx signs and issues the unsigned tx. IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueTx issues the signed tx. IssueTx( tx *txs.Tx, options ...common.Option, - ) (ids.ID, error) + ) error } func NewWallet( @@ -276,10 +289,10 @@ func (w *wallet) Signer() Signer { func (w *wallet) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewBaseTx(outputs, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -289,10 +302,10 @@ func (w *wallet) IssueAddValidatorTx( rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewAddValidatorTx(vdr, rewardsOwner, shares, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -300,10 +313,10 @@ func (w *wallet) IssueAddValidatorTx( func (w *wallet) IssueAddSubnetValidatorTx( vdr *txs.SubnetValidator, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewAddSubnetValidatorTx(vdr, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -312,10 +325,10 @@ func (w *wallet) IssueRemoveSubnetValidatorTx( nodeID ids.NodeID, subnetID ids.ID, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewRemoveSubnetValidatorTx(nodeID, subnetID, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -324,10 +337,10 @@ func (w *wallet) IssueAddDelegatorTx( vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewAddDelegatorTx(vdr, rewardsOwner, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -339,10 +352,10 @@ func (w *wallet) IssueCreateChainTx( fxIDs []ids.ID, chainName string, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewCreateChainTx(subnetID, genesis, vmID, fxIDs, chainName, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -350,10 +363,22 @@ func (w *wallet) IssueCreateChainTx( func (w *wallet) IssueCreateSubnetTx( owner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewCreateSubnetTx(owner, options...) if err != nil { - return ids.Empty, err + return nil, err + } + return w.IssueUnsignedTx(utx, options...) +} + +func (w *wallet) IssueTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, +) (*txs.Tx, error) { + utx, err := w.builder.NewTransferSubnetOwnershipTx(subnetID, owner, options...) + if err != nil { + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -362,10 +387,10 @@ func (w *wallet) IssueImportTx( sourceChainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewImportTx(sourceChainID, to, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -374,10 +399,10 @@ func (w *wallet) IssueExportTx( chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewExportTx(chainID, outputs, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -398,7 +423,7 @@ func (w *wallet) IssueTransformSubnetTx( maxValidatorWeightFactor byte, uptimeRequirement uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewTransformSubnetTx( subnetID, assetID, @@ -417,7 +442,7 @@ func (w *wallet) IssueTransformSubnetTx( options..., ) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -430,7 +455,7 @@ func (w *wallet) IssueAddPermissionlessValidatorTx( delegationRewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewAddPermissionlessValidatorTx( vdr, signer, @@ -441,7 +466,7 @@ func (w *wallet) IssueAddPermissionlessValidatorTx( options..., ) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -451,7 +476,7 @@ func (w *wallet) IssueAddPermissionlessDelegatorTx( assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewAddPermissionlessDelegatorTx( vdr, assetID, @@ -459,7 +484,7 @@ func (w *wallet) IssueAddPermissionlessDelegatorTx( options..., ) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -467,43 +492,47 @@ func (w *wallet) IssueAddPermissionlessDelegatorTx( func (w *wallet) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { ops := common.NewOptions(options) ctx := ops.Context() - tx, err := w.signer.SignUnsigned(ctx, utx) + tx, err := SignUnsigned(ctx, w.signer, utx) if err != nil { - return ids.Empty, err + return nil, err } - return w.IssueTx(tx, options...) + return tx, w.IssueTx(tx, options...) } func (w *wallet) IssueTx( tx *txs.Tx, options ...common.Option, -) (ids.ID, error) { +) error { ops := common.NewOptions(options) ctx := ops.Context() txID, err := w.client.IssueTx(ctx, tx.Bytes()) if err != nil { - return ids.Empty, err + return err + } + + if f := ops.PostIssuanceFunc(); f != nil { + f(txID) } if ops.AssumeDecided() { - return txID, w.Backend.AcceptTx(ctx, tx) + return w.Backend.AcceptTx(ctx, tx) } txStatus, err := w.client.AwaitTxDecided(ctx, txID, ops.PollFrequency()) if err != nil { - return txID, err + return err } if err := w.Backend.AcceptTx(ctx, tx); err != nil { - return txID, err + return err } if txStatus.Status != status.Committed { - return txID, errNotCommitted + return fmt.Errorf("%w: %s", errNotCommitted, txStatus.Reason) } - return txID, nil + return nil } diff --git a/avalanchego/wallet/chain/p/wallet_with_options.go b/avalanchego/wallet/chain/p/wallet_with_options.go index 2d53d12c..4982e77f 100644 --- a/avalanchego/wallet/chain/p/wallet_with_options.go +++ b/avalanchego/wallet/chain/p/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -41,7 +41,7 @@ func (w *walletWithOptions) Builder() Builder { func (w *walletWithOptions) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueBaseTx( outputs, common.UnionOptions(w.options, options)..., @@ -53,7 +53,7 @@ func (w *walletWithOptions) IssueAddValidatorTx( rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueAddValidatorTx( vdr, rewardsOwner, @@ -65,7 +65,7 @@ func (w *walletWithOptions) IssueAddValidatorTx( func (w *walletWithOptions) IssueAddSubnetValidatorTx( vdr *txs.SubnetValidator, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueAddSubnetValidatorTx( vdr, common.UnionOptions(w.options, options)..., @@ -76,7 +76,7 @@ func (w *walletWithOptions) IssueRemoveSubnetValidatorTx( nodeID ids.NodeID, subnetID ids.ID, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueRemoveSubnetValidatorTx( nodeID, subnetID, @@ -88,7 +88,7 @@ func (w *walletWithOptions) IssueAddDelegatorTx( vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueAddDelegatorTx( vdr, rewardsOwner, @@ -103,7 +103,7 @@ func (w *walletWithOptions) IssueCreateChainTx( fxIDs []ids.ID, chainName string, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueCreateChainTx( subnetID, genesis, @@ -117,18 +117,30 @@ func (w *walletWithOptions) IssueCreateChainTx( func (w *walletWithOptions) IssueCreateSubnetTx( owner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueCreateSubnetTx( owner, common.UnionOptions(w.options, options)..., ) } +func (w *walletWithOptions) IssueTransferSubnetOwnershipTx( + subnetID ids.ID, + owner *secp256k1fx.OutputOwners, + options ...common.Option, +) (*txs.Tx, error) { + return w.Wallet.IssueTransferSubnetOwnershipTx( + subnetID, + owner, + common.UnionOptions(w.options, options)..., + ) +} + func (w *walletWithOptions) IssueImportTx( sourceChainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueImportTx( sourceChainID, to, @@ -140,7 +152,7 @@ func (w *walletWithOptions) IssueExportTx( chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueExportTx( chainID, outputs, @@ -164,7 +176,7 @@ func (w *walletWithOptions) IssueTransformSubnetTx( maxValidatorWeightFactor byte, uptimeRequirement uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueTransformSubnetTx( subnetID, assetID, @@ -192,7 +204,7 @@ func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( delegationRewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueAddPermissionlessValidatorTx( vdr, signer, @@ -209,7 +221,7 @@ func (w *walletWithOptions) IssueAddPermissionlessDelegatorTx( assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueAddPermissionlessDelegatorTx( vdr, assetID, @@ -221,7 +233,7 @@ func (w *walletWithOptions) IssueAddPermissionlessDelegatorTx( func (w *walletWithOptions) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueUnsignedTx( utx, common.UnionOptions(w.options, options)..., @@ -231,7 +243,7 @@ func (w *walletWithOptions) IssueUnsignedTx( func (w *walletWithOptions) IssueTx( tx *txs.Tx, options ...common.Option, -) (ids.ID, error) { +) error { return w.Wallet.IssueTx( tx, common.UnionOptions(w.options, options)..., diff --git a/avalanchego/wallet/chain/x/backend.go b/avalanchego/wallet/chain/x/backend.go index 194440e0..a87e799f 100644 --- a/avalanchego/wallet/chain/x/backend.go +++ b/avalanchego/wallet/chain/x/backend.go @@ -1,29 +1,20 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( - stdcontext "context" - - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" ) var _ Backend = (*backend)(nil) -type ChainUTXOs interface { - AddUTXO(ctx stdcontext.Context, destinationChainID ids.ID, utxo *avax.UTXO) error - RemoveUTXO(ctx stdcontext.Context, sourceChainID, utxoID ids.ID) error - - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - GetUTXO(ctx stdcontext.Context, sourceChainID, utxoID ids.ID) (*avax.UTXO, error) -} - // Backend defines the full interface required to support an X-chain wallet. type Backend interface { - ChainUTXOs + common.ChainUTXOs BuilderBackend SignerBackend @@ -32,17 +23,13 @@ type Backend interface { type backend struct { Context - ChainUTXOs - - chainID ids.ID + common.ChainUTXOs } -func NewBackend(ctx Context, chainID ids.ID, utxos ChainUTXOs) Backend { +func NewBackend(ctx Context, utxos common.ChainUTXOs) Backend { return &backend{ Context: ctx, ChainUTXOs: utxos, - - chainID: chainID, } } @@ -56,19 +43,20 @@ func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { return err } + chainID := b.Context.BlockchainID() inputUTXOs := tx.Unsigned.InputUTXOs() for _, utxoID := range inputUTXOs { if utxoID.Symbol { continue } - if err := b.RemoveUTXO(ctx, b.chainID, utxoID.InputID()); err != nil { + if err := b.RemoveUTXO(ctx, chainID, utxoID.InputID()); err != nil { return err } } outputUTXOs := tx.UTXOs() for _, utxo := range outputUTXOs { - if err := b.AddUTXO(ctx, b.chainID, utxo); err != nil { + if err := b.AddUTXO(ctx, chainID, utxo); err != nil { return err } } diff --git a/avalanchego/wallet/chain/x/backend_visitor.go b/avalanchego/wallet/chain/x/backend_visitor.go index d6176384..0bf9ac04 100644 --- a/avalanchego/wallet/chain/x/backend_visitor.go +++ b/avalanchego/wallet/chain/x/backend_visitor.go @@ -1,14 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( - stdcontext "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" + + stdcontext "context" ) var _ txs.Visitor = (*backendVisitor)(nil) diff --git a/avalanchego/wallet/chain/x/builder.go b/avalanchego/wallet/chain/x/builder.go index 0b639a77..330ed69a 100644 --- a/avalanchego/wallet/chain/x/builder.go +++ b/avalanchego/wallet/chain/x/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -7,8 +7,6 @@ import ( "errors" "fmt" - stdcontext "context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" @@ -20,12 +18,20 @@ import ( "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + stdcontext "context" ) var ( errNoChangeAddress = errors.New("no possible change address") errInsufficientFunds = errors.New("insufficient funds") + fxIndexToID = map[uint32]ids.ID{ + 0: secp256k1fx.ID, + 1: nftfx.ID, + 2: propertyfx.ID, + } + _ Builder = (*builder)(nil) ) @@ -213,13 +219,14 @@ func (b *builder) NewBaseTx( outputs = append(outputs, changeOutputs...) avax.SortTransferableOutputs(outputs, Parser.Codec()) // sort the outputs - return &txs.BaseTx{BaseTx: avax.BaseTx{ + tx := &txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), Ins: inputs, Outs: outputs, Memo: ops.Memo(), - }}, nil + }} + return tx, b.initCtx(tx) } func (b *builder) NewCreateAssetTx( @@ -243,12 +250,14 @@ func (b *builder) NewCreateAssetTx( for fxIndex, outs := range initialState { state := &txs.InitialState{ FxIndex: fxIndex, + FxID: fxIndexToID[fxIndex], Outs: outs, } state.Sort(codec) // sort the outputs states = append(states, state) } + utils.Sort(states) // sort the initial states tx := &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -262,8 +271,7 @@ func (b *builder) NewCreateAssetTx( Denomination: denomination, States: states, } - utils.Sort(tx.States) // sort the initial states - return tx, nil + return tx, b.initCtx(tx) } func (b *builder) NewOperationTx( @@ -280,7 +288,7 @@ func (b *builder) NewOperationTx( } txs.SortOperations(operations, Parser.Codec()) - return &txs.OperationTx{ + tx := &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -289,7 +297,8 @@ func (b *builder) NewOperationTx( Memo: ops.Memo(), }}, Ops: operations, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewOperationTxMintFT( @@ -380,6 +389,7 @@ func (b *builder) NewImportTx( importedInputs = append(importedInputs, &avax.TransferableInput{ UTXOID: utxo.UTXOID, Asset: utxo.Asset, + FxID: secp256k1fx.ID, In: &secp256k1fx.TransferInput{ Amt: out.Amt, Input: secp256k1fx.Input{ @@ -428,6 +438,7 @@ func (b *builder) NewImportTx( for assetID, amount := range importedAmounts { outputs = append(outputs, &avax.TransferableOutput{ Asset: avax.Asset{ID: assetID}, + FxID: secp256k1fx.ID, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: *to, @@ -436,7 +447,7 @@ func (b *builder) NewImportTx( } avax.SortTransferableOutputs(outputs, Parser.Codec()) - return &txs.ImportTx{ + tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -446,7 +457,8 @@ func (b *builder) NewImportTx( }}, SourceChain: chainID, ImportedIns: importedInputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) NewExportTx( @@ -473,7 +485,7 @@ func (b *builder) NewExportTx( } avax.SortTransferableOutputs(outputs, Parser.Codec()) - return &txs.ExportTx{ + tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), BlockchainID: b.backend.BlockchainID(), @@ -483,7 +495,8 @@ func (b *builder) NewExportTx( }}, DestinationChain: chainID, ExportedOuts: outputs, - }, nil + } + return tx, b.initCtx(tx) } func (b *builder) getBalance( @@ -578,6 +591,7 @@ func (b *builder) spend( inputs = append(inputs, &avax.TransferableInput{ UTXOID: utxo.UTXOID, Asset: utxo.Asset, + FxID: secp256k1fx.ID, In: &secp256k1fx.TransferInput{ Amt: out.Amt, Input: secp256k1fx.Input{ @@ -587,7 +601,7 @@ func (b *builder) spend( }) // Burn any value that should be burned - amountToBurn := math.Min( + amountToBurn := min( remainingAmountToBurn, // Amount we still need to burn out.Amt, // Amount available to burn ) @@ -596,6 +610,7 @@ func (b *builder) spend( // This input had extra value, so some of it must be returned outputs = append(outputs, &avax.TransferableOutput{ Asset: utxo.Asset, + FxID: secp256k1fx.ID, Out: &secp256k1fx.TransferOutput{ Amt: remainingAmount, OutputOwners: *changeOwner, @@ -656,6 +671,7 @@ func (b *builder) mintFTs( operations = append(operations, &txs.Operation{ Asset: utxo.Asset, UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + FxID: secp256k1fx.ID, Op: &secp256k1fx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -719,6 +735,7 @@ func (b *builder) mintNFTs( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: nftfx.ID, Op: &nftfx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -775,6 +792,7 @@ func (b *builder) mintProperty( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: propertyfx.ID, Op: &propertyfx.MintOperation{ MintInput: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -831,6 +849,7 @@ func (b *builder) burnProperty( UTXOIDs: []*avax.UTXOID{ &utxo.UTXOID, }, + FxID: propertyfx.ID, Op: &propertyfx.BurnOperation{ Input: secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -847,3 +866,13 @@ func (b *builder) burnProperty( } return operations, nil } + +func (b *builder) initCtx(tx txs.UnsignedTx) error { + ctx, err := newSnowContext(b.backend) + if err != nil { + return err + } + + tx.InitCtx(ctx) + return nil +} diff --git a/avalanchego/wallet/chain/x/builder_test.go b/avalanchego/wallet/chain/x/builder_test.go new file mode 100644 index 00000000..f4eb916b --- /dev/null +++ b/avalanchego/wallet/chain/x/builder_test.go @@ -0,0 +1,550 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package x + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +var ( + testKeys = secp256k1.TestKeys() + + // We hard-code [avaxAssetID] and [subnetAssetID] to make + // ordering of UTXOs generated by [testUTXOsList] is reproducible + avaxAssetID = ids.Empty.Prefix(1789) + xChainID = ids.Empty.Prefix(2021) + nftAssetID = ids.Empty.Prefix(2022) + propertyAssetID = ids.Empty.Prefix(2023) + + testCtx = NewContext( + constants.UnitTestID, + xChainID, + avaxAssetID, + units.MicroAvax, // BaseTxFee + 99*units.MilliAvax, // CreateAssetTxFee + ) +) + +// These tests create and sign a tx, then verify that utxos included +// in the tx are exactly necessary to pay fees for it + +func TestBaseTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + outputsToMove = []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 7 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + }, + }, + }} + ) + + utx, err := builder.NewBaseTx( + outputsToMove, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 2) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() - outs[1].Out.Amount() + require.Equal(expectedConsumed, consumed) + require.Equal(outputsToMove[0], outs[1]) +} + +func TestCreateAssetTx(t *testing.T) { + require := require.New(t) + + var ( + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + assetName = "Team Rocket" + symbol = "TR" + denomination uint8 = 0 + initialState = map[uint32][]verify.State{ + 0: { + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + }, + }, &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + }, + }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[1].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[1].PublicKey().Address()}, + }, + }, + }, + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[2].PublicKey().Address()}, + }, + }, + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testKeys[2].PublicKey().Address()}, + }, + }, + }, + } + ) + + utx, err := builder.NewCreateAssetTx( + assetName, + symbol, + denomination, + initialState, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 1) + + expectedConsumed := testCtx.CreateAssetTxFee() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestMintNFTOperation(t *testing.T) { + require := require.New(t) + + var ( + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + payload = []byte{'h', 'e', 'l', 'l', 'o'} + NFTOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + } + ) + + utx, err := builder.NewOperationTxMintNFT( + nftAssetID, + payload, + []*secp256k1fx.OutputOwners{NFTOwner}, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestMintFTOperation(t *testing.T) { + require := require.New(t) + + var ( + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + outputs = map[ids.ID]*secp256k1fx.TransferOutput{ + nftAssetID: { + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + }, + }, + } + ) + + utx, err := builder.NewOperationTxMintFT( + outputs, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestMintPropertyOperation(t *testing.T) { + require := require.New(t) + + var ( + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + propertyOwner = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + } + ) + + utx, err := builder.NewOperationTxMintProperty( + propertyAssetID, + propertyOwner, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestBurnPropertyOperation(t *testing.T) { + require := require.New(t) + + var ( + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + ) + + utx, err := builder.NewOperationTxBurnProperty( + propertyAssetID, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := ins[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestImportTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + sourceChainID = ids.GenerateTestID() + importedUTXOs = utxos[:1] + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + sourceChainID: importedUTXOs, + }, + ) + + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + importKey = testKeys[0] + importTo = &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + importKey.Address(), + }, + } + ) + + utx, err := builder.NewImportTx( + sourceChainID, + importTo, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + importedIns := utx.ImportedIns + require.Empty(ins) + require.Len(importedIns, 1) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + consumed := importedIns[0].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) +} + +func TestExportTx(t *testing.T) { + var ( + require = require.New(t) + + // backend + utxosKey = testKeys[1] + utxos = makeTestUTXOs(utxosKey) + genericBackend = common.NewDeterministicChainUTXOs( + require, + map[ids.ID][]*avax.UTXO{ + xChainID: utxos, + }, + ) + backend = NewBackend(testCtx, genericBackend) + + // builder + utxoAddr = utxosKey.Address() + builder = NewBuilder(set.Of(utxoAddr), backend) + + // data to build the transaction + subnetID = ids.GenerateTestID() + exportedOutputs = []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 7 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxoAddr}, + }, + }, + }} + ) + + utx, err := builder.NewExportTx( + subnetID, + exportedOutputs, + ) + require.NoError(err) + + // check UTXOs selection and fee financing + ins := utx.Ins + outs := utx.Outs + require.Len(ins, 2) + require.Len(outs, 1) + + expectedConsumed := testCtx.BaseTxFee() + exportedOutputs[0].Out.Amount() + consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() + require.Equal(expectedConsumed, consumed) + require.Equal(utx.ExportedOuts, exportedOutputs) +} + +func makeTestUTXOs(utxosKey *secp256k1.PrivateKey) []*avax.UTXO { + // Note: we avoid ids.GenerateTestNodeID here to make sure that UTXO IDs won't change + // run by run. This simplifies checking what utxos are included in the built txs. + const utxosOffset uint64 = 2024 + + return []*avax.UTXO{ // currently, the wallet scans UTXOs in the order provided here + { // a small UTXO first, which should not be enough to pay fees + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset), + OutputIndex: uint32(utxosOffset), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 2 * units.MilliAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + Threshold: 1, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 2), + OutputIndex: uint32(utxosOffset + 2), + }, + Asset: avax.Asset{ID: nftAssetID}, + Out: &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 3), + OutputIndex: uint32(utxosOffset + 3), + }, + Asset: avax.Asset{ID: nftAssetID}, + Out: &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 4), + OutputIndex: uint32(utxosOffset + 4), + }, + Asset: avax.Asset{ID: propertyAssetID}, + Out: &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + Threshold: 1, + }, + }, + }, + { + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 5), + OutputIndex: uint32(utxosOffset + 5), + }, + Asset: avax.Asset{ID: propertyAssetID}, + Out: &propertyfx.OwnedOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + Threshold: 1, + }, + }, + }, + { // a large UTXO last, which should be enough to pay any fee by itself + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(utxosOffset + 6), + OutputIndex: uint32(utxosOffset + 6), + }, + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 9 * units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Addrs: []ids.ShortID{utxosKey.PublicKey().Address()}, + Threshold: 1, + }, + }, + }, + } +} diff --git a/avalanchego/wallet/chain/x/builder_with_options.go b/avalanchego/wallet/chain/x/builder_with_options.go index 63d55400..c2b65b05 100644 --- a/avalanchego/wallet/chain/x/builder_with_options.go +++ b/avalanchego/wallet/chain/x/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x diff --git a/avalanchego/wallet/chain/x/constants.go b/avalanchego/wallet/chain/x/constants.go index 943b3e1a..346eed9b 100644 --- a/avalanchego/wallet/chain/x/constants.go +++ b/avalanchego/wallet/chain/x/constants.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -6,7 +6,7 @@ package x import ( "time" - "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" @@ -20,11 +20,12 @@ const ( ) // Parser to support serialization and deserialization -var Parser blocks.Parser +var Parser block.Parser func init() { var err error - Parser, err = blocks.NewParser( + Parser, err = block.NewParser( + time.Time{}, time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, diff --git a/avalanchego/wallet/chain/x/context.go b/avalanchego/wallet/chain/x/context.go index bdabe2d9..7218bc8c 100644 --- a/avalanchego/wallet/chain/x/context.go +++ b/avalanchego/wallet/chain/x/context.go @@ -1,16 +1,21 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( - stdcontext "context" - "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" + + stdcontext "context" ) +const Alias = "X" + var _ Context = (*context)(nil) type Context interface { @@ -31,7 +36,7 @@ type context struct { func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { infoClient := info.NewClient(uri) - xChainClient := avm.NewClient(uri, "X") + xChainClient := avm.NewClient(uri, Alias) return NewContextFromClients(ctx, infoClient, xChainClient) } @@ -45,7 +50,7 @@ func NewContextFromClients( return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, "X") + chainID, err := infoClient.GetBlockchainID(ctx, Alias) if err != nil { return nil, err } @@ -104,3 +109,17 @@ func (c *context) BaseTxFee() uint64 { func (c *context) CreateAssetTxFee() uint64 { return c.createAssetTxFee } + +func newSnowContext(c Context) (*snow.Context, error) { + chainID := c.BlockchainID() + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: c.NetworkID(), + SubnetID: constants.PrimaryNetworkID, + ChainID: chainID, + XChainID: chainID, + AVAXAssetID: c.AVAXAssetID(), + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(chainID, Alias) +} diff --git a/avalanchego/wallet/chain/x/signer.go b/avalanchego/wallet/chain/x/signer.go index b393d767..9bc8734e 100644 --- a/avalanchego/wallet/chain/x/signer.go +++ b/avalanchego/wallet/chain/x/signer.go @@ -1,43 +1,28 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( - "errors" - "fmt" - - stdcontext "context" - - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/nftfx" - "github.com/ava-labs/avalanchego/vms/propertyfx" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) -var ( - errUnknownTxType = errors.New("unknown tx type") - errUnknownInputType = errors.New("unknown input type") - errUnknownOpType = errors.New("unknown operation type") - errInvalidNumUTXOsInOp = errors.New("invalid number of UTXOs in operation") - errUnknownCredentialType = errors.New("unknown credential type") - errUnknownOutputType = errors.New("unknown output type") - errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") - - emptySig [secp256k1.SignatureLen]byte - - _ Signer = (*signer)(nil) + stdcontext "context" ) +var _ Signer = (*signer)(nil) + type Signer interface { - SignUnsigned(ctx stdcontext.Context, tx txs.UnsignedTx) (*txs.Tx, error) + // Sign adds as many missing signatures as possible to the provided + // transaction. + // + // If there are already some signatures on the transaction, those signatures + // will not be removed. + // + // If the signer doesn't have the ability to provide a required signature, + // the signature slot will be skipped without reporting an error. Sign(ctx stdcontext.Context, tx *txs.Tx) error } @@ -57,278 +42,20 @@ func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { } } -func (s *signer) SignUnsigned(ctx stdcontext.Context, utx txs.UnsignedTx) (*txs.Tx, error) { - tx := &txs.Tx{Unsigned: utx} - return tx, s.Sign(ctx, tx) -} - -// TODO: implement txs.Visitor here func (s *signer) Sign(ctx stdcontext.Context, tx *txs.Tx) error { - switch utx := tx.Unsigned.(type) { - case *txs.BaseTx: - return s.signBaseTx(ctx, tx, utx) - case *txs.CreateAssetTx: - return s.signCreateAssetTx(ctx, tx, utx) - case *txs.OperationTx: - return s.signOperationTx(ctx, tx, utx) - case *txs.ImportTx: - return s.signImportTx(ctx, tx, utx) - case *txs.ExportTx: - return s.signExportTx(ctx, tx, utx) - default: - return fmt.Errorf("%w: %T", errUnknownTxType, tx.Unsigned) - } -} - -func (s *signer) signBaseTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.BaseTx) error { - txCreds, txSigners, err := s.getSigners(ctx, utx.BlockchainID, utx.Ins) - if err != nil { - return err - } - return sign(tx, txCreds, txSigners) -} - -func (s *signer) signCreateAssetTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.CreateAssetTx) error { - txCreds, txSigners, err := s.getSigners(ctx, utx.BlockchainID, utx.Ins) - if err != nil { - return err - } - return sign(tx, txCreds, txSigners) -} - -func (s *signer) signOperationTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.OperationTx) error { - txCreds, txSigners, err := s.getSigners(ctx, utx.BlockchainID, utx.Ins) - if err != nil { - return err - } - txOpsCreds, txOpsSigners, err := s.getOpsSigners(ctx, utx.BlockchainID, utx.Ops) - if err != nil { - return err - } - txCreds = append(txCreds, txOpsCreds...) - txSigners = append(txSigners, txOpsSigners...) - return sign(tx, txCreds, txSigners) -} - -func (s *signer) signImportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.ImportTx) error { - txCreds, txSigners, err := s.getSigners(ctx, utx.BlockchainID, utx.Ins) - if err != nil { - return err - } - txImportCreds, txImportSigners, err := s.getSigners(ctx, utx.SourceChain, utx.ImportedIns) - if err != nil { - return err - } - txCreds = append(txCreds, txImportCreds...) - txSigners = append(txSigners, txImportSigners...) - return sign(tx, txCreds, txSigners) -} - -func (s *signer) signExportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.ExportTx) error { - txCreds, txSigners, err := s.getSigners(ctx, utx.BlockchainID, utx.Ins) - if err != nil { - return err - } - return sign(tx, txCreds, txSigners) -} - -func (s *signer) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]keychain.Signer, error) { - txCreds := make([]verify.Verifiable, len(ins)) - txSigners := make([][]keychain.Signer, len(ins)) - for credIndex, transferInput := range ins { - txCreds[credIndex] = &secp256k1fx.Credential{} - input, ok := transferInput.In.(*secp256k1fx.TransferInput) - if !ok { - return nil, nil, errUnknownInputType - } - - inputSigners := make([]keychain.Signer, len(input.SigIndices)) - txSigners[credIndex] = inputSigners - - utxoID := transferInput.InputID() - utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) - if err == database.ErrNotFound { - // If we don't have access to the UTXO, then we can't sign this - // transaction. However, we can attempt to partially sign it. - continue - } - if err != nil { - return nil, nil, err - } - - out, ok := utxo.Out.(*secp256k1fx.TransferOutput) - if !ok { - return nil, nil, errUnknownOutputType - } - - for sigIndex, addrIndex := range input.SigIndices { - if addrIndex >= uint32(len(out.Addrs)) { - return nil, nil, errInvalidUTXOSigIndex - } - - addr := out.Addrs[addrIndex] - key, ok := s.kc.Get(addr) - if !ok { - // If we don't have access to the key, then we can't sign this - // transaction. However, we can attempt to partially sign it. - continue - } - inputSigners[sigIndex] = key - } - } - return txCreds, txSigners, nil -} - -func (s *signer) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]keychain.Signer, error) { - txCreds := make([]verify.Verifiable, len(ops)) - txSigners := make([][]keychain.Signer, len(ops)) - for credIndex, op := range ops { - var input *secp256k1fx.Input - switch op := op.Op.(type) { - case *secp256k1fx.MintOperation: - txCreds[credIndex] = &secp256k1fx.Credential{} - input = &op.MintInput - case *nftfx.MintOperation: - txCreds[credIndex] = &nftfx.Credential{} - input = &op.MintInput - case *nftfx.TransferOperation: - txCreds[credIndex] = &nftfx.Credential{} - input = &op.Input - case *propertyfx.MintOperation: - txCreds[credIndex] = &propertyfx.Credential{} - input = &op.MintInput - case *propertyfx.BurnOperation: - txCreds[credIndex] = &propertyfx.Credential{} - input = &op.Input - default: - return nil, nil, errUnknownOpType - } - - inputSigners := make([]keychain.Signer, len(input.SigIndices)) - txSigners[credIndex] = inputSigners - - if len(op.UTXOIDs) != 1 { - return nil, nil, errInvalidNumUTXOsInOp - } - utxoID := op.UTXOIDs[0].InputID() - utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) - if err == database.ErrNotFound { - // If we don't have access to the UTXO, then we can't sign this - // transaction. However, we can attempt to partially sign it. - continue - } - if err != nil { - return nil, nil, err - } - - var addrs []ids.ShortID - switch out := utxo.Out.(type) { - case *secp256k1fx.MintOutput: - addrs = out.Addrs - case *nftfx.MintOutput: - addrs = out.Addrs - case *nftfx.TransferOutput: - addrs = out.Addrs - case *propertyfx.MintOutput: - addrs = out.Addrs - case *propertyfx.OwnedOutput: - addrs = out.Addrs - default: - return nil, nil, errUnknownOutputType - } - - for sigIndex, addrIndex := range input.SigIndices { - if addrIndex >= uint32(len(addrs)) { - return nil, nil, errInvalidUTXOSigIndex - } - - addr := addrs[addrIndex] - key, ok := s.kc.Get(addr) - if !ok { - // If we don't have access to the key, then we can't sign this - // transaction. However, we can attempt to partially sign it. - continue - } - inputSigners[sigIndex] = key - } - } - return txCreds, txSigners, nil -} - -func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) error { - codec := Parser.Codec() - unsignedBytes, err := codec.Marshal(txs.CodecVersion, &tx.Unsigned) - if err != nil { - return fmt.Errorf("couldn't marshal unsigned tx: %w", err) - } - - if expectedLen := len(txSigners); expectedLen != len(tx.Creds) { - tx.Creds = make([]*fxs.FxCredential, expectedLen) - } - - sigCache := make(map[ids.ShortID][secp256k1.SignatureLen]byte) - for credIndex, inputSigners := range txSigners { - fxCred := tx.Creds[credIndex] - if fxCred == nil { - fxCred = &fxs.FxCredential{} - tx.Creds[credIndex] = fxCred - } - credIntf := fxCred.Verifiable - if credIntf == nil { - credIntf = creds[credIndex] - fxCred.Verifiable = credIntf - } - - var cred *secp256k1fx.Credential - switch credImpl := credIntf.(type) { - case *secp256k1fx.Credential: - cred = credImpl - case *nftfx.Credential: - cred = &credImpl.Credential - case *propertyfx.Credential: - cred = &credImpl.Credential - default: - return errUnknownCredentialType - } - - if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { - cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) - } - - for sigIndex, signer := range inputSigners { - if signer == nil { - // If we don't have access to the key, then we can't sign this - // transaction. However, we can attempt to partially sign it. - continue - } - addr := signer.Address() - if sig := cred.Sigs[sigIndex]; sig != emptySig { - // If this signature has already been populated, we can just - // copy the needed signature for the future. - sigCache[addr] = sig - continue - } - - if sig, exists := sigCache[addr]; exists { - // If this key has already produced a signature, we can just - // copy the previous signature. - cred.Sigs[sigIndex] = sig - continue - } - - sig, err := signer.Sign(unsignedBytes) - if err != nil { - return fmt.Errorf("problem signing tx: %w", err) - } - copy(cred.Sigs[sigIndex][:], sig) - sigCache[addr] = cred.Sigs[sigIndex] - } - } - - signedBytes, err := codec.Marshal(txs.CodecVersion, tx) - if err != nil { - return fmt.Errorf("couldn't marshal tx: %w", err) - } - tx.SetBytes(unsignedBytes, signedBytes) - return nil + return tx.Unsigned.Visit(&signerVisitor{ + kc: s.kc, + backend: s.backend, + ctx: ctx, + tx: tx, + }) +} + +func SignUnsigned( + ctx stdcontext.Context, + signer Signer, + utx txs.UnsignedTx, +) (*txs.Tx, error) { + tx := &txs.Tx{Unsigned: utx} + return tx, signer.Sign(ctx, tx) } diff --git a/avalanchego/wallet/chain/x/signer_visitor.go b/avalanchego/wallet/chain/x/signer_visitor.go new file mode 100644 index 00000000..be442f55 --- /dev/null +++ b/avalanchego/wallet/chain/x/signer_visitor.go @@ -0,0 +1,300 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package x + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + stdcontext "context" +) + +var ( + _ txs.Visitor = (*signerVisitor)(nil) + + errUnknownInputType = errors.New("unknown input type") + errUnknownOpType = errors.New("unknown operation type") + errInvalidNumUTXOsInOp = errors.New("invalid number of UTXOs in operation") + errUnknownCredentialType = errors.New("unknown credential type") + errUnknownOutputType = errors.New("unknown output type") + errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") + + emptySig [secp256k1.SignatureLen]byte +) + +// signerVisitor handles signing transactions for the signer +type signerVisitor struct { + kc keychain.Keychain + backend SignerBackend + ctx stdcontext.Context + tx *txs.Tx +} + +func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { + txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) + if err != nil { + return err + } + return sign(s.tx, txCreds, txSigners) +} + +func (s *signerVisitor) CreateAssetTx(tx *txs.CreateAssetTx) error { + txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) + if err != nil { + return err + } + return sign(s.tx, txCreds, txSigners) +} + +func (s *signerVisitor) OperationTx(tx *txs.OperationTx) error { + txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) + if err != nil { + return err + } + txOpsCreds, txOpsSigners, err := s.getOpsSigners(s.ctx, tx.BlockchainID, tx.Ops) + if err != nil { + return err + } + txCreds = append(txCreds, txOpsCreds...) + txSigners = append(txSigners, txOpsSigners...) + return sign(s.tx, txCreds, txSigners) +} + +func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { + txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) + if err != nil { + return err + } + txImportCreds, txImportSigners, err := s.getSigners(s.ctx, tx.SourceChain, tx.ImportedIns) + if err != nil { + return err + } + txCreds = append(txCreds, txImportCreds...) + txSigners = append(txSigners, txImportSigners...) + return sign(s.tx, txCreds, txSigners) +} + +func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { + txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) + if err != nil { + return err + } + return sign(s.tx, txCreds, txSigners) +} + +func (s *signerVisitor) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]keychain.Signer, error) { + txCreds := make([]verify.Verifiable, len(ins)) + txSigners := make([][]keychain.Signer, len(ins)) + for credIndex, transferInput := range ins { + txCreds[credIndex] = &secp256k1fx.Credential{} + input, ok := transferInput.In.(*secp256k1fx.TransferInput) + if !ok { + return nil, nil, errUnknownInputType + } + + inputSigners := make([]keychain.Signer, len(input.SigIndices)) + txSigners[credIndex] = inputSigners + + utxoID := transferInput.InputID() + utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) + if err == database.ErrNotFound { + // If we don't have access to the UTXO, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + if err != nil { + return nil, nil, err + } + + out, ok := utxo.Out.(*secp256k1fx.TransferOutput) + if !ok { + return nil, nil, errUnknownOutputType + } + + for sigIndex, addrIndex := range input.SigIndices { + if addrIndex >= uint32(len(out.Addrs)) { + return nil, nil, errInvalidUTXOSigIndex + } + + addr := out.Addrs[addrIndex] + key, ok := s.kc.Get(addr) + if !ok { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + inputSigners[sigIndex] = key + } + } + return txCreds, txSigners, nil +} + +func (s *signerVisitor) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]keychain.Signer, error) { + txCreds := make([]verify.Verifiable, len(ops)) + txSigners := make([][]keychain.Signer, len(ops)) + for credIndex, op := range ops { + var input *secp256k1fx.Input + switch op := op.Op.(type) { + case *secp256k1fx.MintOperation: + txCreds[credIndex] = &secp256k1fx.Credential{} + input = &op.MintInput + case *nftfx.MintOperation: + txCreds[credIndex] = &nftfx.Credential{} + input = &op.MintInput + case *nftfx.TransferOperation: + txCreds[credIndex] = &nftfx.Credential{} + input = &op.Input + case *propertyfx.MintOperation: + txCreds[credIndex] = &propertyfx.Credential{} + input = &op.MintInput + case *propertyfx.BurnOperation: + txCreds[credIndex] = &propertyfx.Credential{} + input = &op.Input + default: + return nil, nil, errUnknownOpType + } + + inputSigners := make([]keychain.Signer, len(input.SigIndices)) + txSigners[credIndex] = inputSigners + + if len(op.UTXOIDs) != 1 { + return nil, nil, errInvalidNumUTXOsInOp + } + utxoID := op.UTXOIDs[0].InputID() + utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) + if err == database.ErrNotFound { + // If we don't have access to the UTXO, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + if err != nil { + return nil, nil, err + } + + var addrs []ids.ShortID + switch out := utxo.Out.(type) { + case *secp256k1fx.MintOutput: + addrs = out.Addrs + case *nftfx.MintOutput: + addrs = out.Addrs + case *nftfx.TransferOutput: + addrs = out.Addrs + case *propertyfx.MintOutput: + addrs = out.Addrs + case *propertyfx.OwnedOutput: + addrs = out.Addrs + default: + return nil, nil, errUnknownOutputType + } + + for sigIndex, addrIndex := range input.SigIndices { + if addrIndex >= uint32(len(addrs)) { + return nil, nil, errInvalidUTXOSigIndex + } + + addr := addrs[addrIndex] + key, ok := s.kc.Get(addr) + if !ok { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + inputSigners[sigIndex] = key + } + } + return txCreds, txSigners, nil +} + +func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) error { + codec := Parser.Codec() + unsignedBytes, err := codec.Marshal(txs.CodecVersion, &tx.Unsigned) + if err != nil { + return fmt.Errorf("couldn't marshal unsigned tx: %w", err) + } + + if expectedLen := len(txSigners); expectedLen != len(tx.Creds) { + tx.Creds = make([]*fxs.FxCredential, expectedLen) + } + + sigCache := make(map[ids.ShortID][secp256k1.SignatureLen]byte) + for credIndex, inputSigners := range txSigners { + fxCred := tx.Creds[credIndex] + if fxCred == nil { + fxCred = &fxs.FxCredential{} + tx.Creds[credIndex] = fxCred + } + credIntf := fxCred.Credential + if credIntf == nil { + credIntf = creds[credIndex] + fxCred.Credential = credIntf + } + + var cred *secp256k1fx.Credential + switch credImpl := credIntf.(type) { + case *secp256k1fx.Credential: + fxCred.FxID = secp256k1fx.ID + cred = credImpl + case *nftfx.Credential: + fxCred.FxID = nftfx.ID + cred = &credImpl.Credential + case *propertyfx.Credential: + fxCred.FxID = propertyfx.ID + cred = &credImpl.Credential + default: + return errUnknownCredentialType + } + + if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { + cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) + } + + for sigIndex, signer := range inputSigners { + if signer == nil { + // If we don't have access to the key, then we can't sign this + // transaction. However, we can attempt to partially sign it. + continue + } + addr := signer.Address() + if sig := cred.Sigs[sigIndex]; sig != emptySig { + // If this signature has already been populated, we can just + // copy the needed signature for the future. + sigCache[addr] = sig + continue + } + + if sig, exists := sigCache[addr]; exists { + // If this key has already produced a signature, we can just + // copy the previous signature. + cred.Sigs[sigIndex] = sig + continue + } + + sig, err := signer.Sign(unsignedBytes) + if err != nil { + return fmt.Errorf("problem signing tx: %w", err) + } + copy(cred.Sigs[sigIndex][:], sig) + sigCache[addr] = cred.Sigs[sigIndex] + } + } + + signedBytes, err := codec.Marshal(txs.CodecVersion, tx) + if err != nil { + return fmt.Errorf("couldn't marshal tx: %w", err) + } + tx.SetBytes(unsignedBytes, signedBytes) + return nil +} diff --git a/avalanchego/wallet/chain/x/wallet.go b/avalanchego/wallet/chain/x/wallet.go index 8f0562a6..13491a24 100644 --- a/avalanchego/wallet/chain/x/wallet.go +++ b/avalanchego/wallet/chain/x/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -38,7 +38,7 @@ type Wallet interface { IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueCreateAssetTx creates, signs, and issues a new asset. // @@ -55,7 +55,7 @@ type Wallet interface { denomination byte, initialState map[uint32][]verify.State, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueOperationTx creates, signs, and issues state changes on the UTXO // set. These state changes may be more complex than simple value transfers. @@ -64,7 +64,7 @@ type Wallet interface { IssueOperationTx( operations []*txs.Operation, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueOperationTxMintFT creates, signs, and issues a set of state changes // that mint new tokens for the requested assets. @@ -74,7 +74,7 @@ type Wallet interface { IssueOperationTxMintFT( outputs map[ids.ID]*secp256k1fx.TransferOutput, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueOperationTxMintNFT creates, signs, and issues a state change that // mints new NFTs for the requested asset. @@ -87,7 +87,7 @@ type Wallet interface { payload []byte, owners []*secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueOperationTxMintProperty creates, signs, and issues a state change // that mints a new property for the requested asset. @@ -98,7 +98,7 @@ type Wallet interface { assetID ids.ID, owner *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueOperationTxBurnProperty creates, signs, and issues state changes // that burns all the properties of the requested asset. @@ -107,7 +107,7 @@ type Wallet interface { IssueOperationTxBurnProperty( assetID ids.ID, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueImportTx creates, signs, and issues an import transaction that // attempts to consume all the available UTXOs and import the funds to [to]. @@ -118,7 +118,7 @@ type Wallet interface { chainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueExportTx creates, signs, and issues an export transaction that // attempts to send all the provided [outputs] to the requested [chainID]. @@ -129,19 +129,19 @@ type Wallet interface { chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueUnsignedTx signs and issues the unsigned tx. IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, - ) (ids.ID, error) + ) (*txs.Tx, error) // IssueTx issues the signed tx. IssueTx( tx *txs.Tx, options ...common.Option, - ) (ids.ID, error) + ) error } func NewWallet( @@ -176,10 +176,10 @@ func (w *wallet) Signer() Signer { func (w *wallet) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewBaseTx(outputs, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -190,10 +190,10 @@ func (w *wallet) IssueCreateAssetTx( denomination byte, initialState map[uint32][]verify.State, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewCreateAssetTx(name, symbol, denomination, initialState, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -201,10 +201,10 @@ func (w *wallet) IssueCreateAssetTx( func (w *wallet) IssueOperationTx( operations []*txs.Operation, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewOperationTx(operations, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -212,10 +212,10 @@ func (w *wallet) IssueOperationTx( func (w *wallet) IssueOperationTxMintFT( outputs map[ids.ID]*secp256k1fx.TransferOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewOperationTxMintFT(outputs, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -225,10 +225,10 @@ func (w *wallet) IssueOperationTxMintNFT( payload []byte, owners []*secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewOperationTxMintNFT(assetID, payload, owners, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -237,10 +237,10 @@ func (w *wallet) IssueOperationTxMintProperty( assetID ids.ID, owner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewOperationTxMintProperty(assetID, owner, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -248,10 +248,10 @@ func (w *wallet) IssueOperationTxMintProperty( func (w *wallet) IssueOperationTxBurnProperty( assetID ids.ID, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewOperationTxBurnProperty(assetID, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -260,10 +260,10 @@ func (w *wallet) IssueImportTx( chainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewImportTx(chainID, to, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -272,10 +272,10 @@ func (w *wallet) IssueExportTx( chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { utx, err := w.builder.NewExportTx(chainID, outputs, options...) if err != nil { - return ids.Empty, err + return nil, err } return w.IssueUnsignedTx(utx, options...) } @@ -283,43 +283,47 @@ func (w *wallet) IssueExportTx( func (w *wallet) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { ops := common.NewOptions(options) ctx := ops.Context() - tx, err := w.signer.SignUnsigned(ctx, utx) + tx, err := SignUnsigned(ctx, w.signer, utx) if err != nil { - return ids.Empty, err + return nil, err } - return w.IssueTx(tx, options...) + return tx, w.IssueTx(tx, options...) } func (w *wallet) IssueTx( tx *txs.Tx, options ...common.Option, -) (ids.ID, error) { +) error { ops := common.NewOptions(options) ctx := ops.Context() txID, err := w.client.IssueTx(ctx, tx.Bytes()) if err != nil { - return ids.Empty, err + return err + } + + if f := ops.PostIssuanceFunc(); f != nil { + f(txID) } if ops.AssumeDecided() { - return txID, w.Backend.AcceptTx(ctx, tx) + return w.Backend.AcceptTx(ctx, tx) } txStatus, err := w.client.ConfirmTx(ctx, txID, ops.PollFrequency()) if err != nil { - return txID, err + return err } if err := w.Backend.AcceptTx(ctx, tx); err != nil { - return txID, err + return err } if txStatus != choices.Accepted { - return txID, errNotAccepted + return errNotAccepted } - return txID, nil + return nil } diff --git a/avalanchego/wallet/chain/x/wallet_with_options.go b/avalanchego/wallet/chain/x/wallet_with_options.go index cc22540c..d62d02ef 100644 --- a/avalanchego/wallet/chain/x/wallet_with_options.go +++ b/avalanchego/wallet/chain/x/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -39,7 +39,7 @@ func (w *walletWithOptions) Builder() Builder { func (w *walletWithOptions) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueBaseTx( outputs, common.UnionOptions(w.options, options)..., @@ -52,7 +52,7 @@ func (w *walletWithOptions) IssueCreateAssetTx( denomination byte, initialState map[uint32][]verify.State, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueCreateAssetTx( name, symbol, @@ -65,7 +65,7 @@ func (w *walletWithOptions) IssueCreateAssetTx( func (w *walletWithOptions) IssueOperationTx( operations []*txs.Operation, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueOperationTx( operations, common.UnionOptions(w.options, options)..., @@ -75,7 +75,7 @@ func (w *walletWithOptions) IssueOperationTx( func (w *walletWithOptions) IssueOperationTxMintFT( outputs map[ids.ID]*secp256k1fx.TransferOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueOperationTxMintFT( outputs, common.UnionOptions(w.options, options)..., @@ -87,7 +87,7 @@ func (w *walletWithOptions) IssueOperationTxMintNFT( payload []byte, owners []*secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueOperationTxMintNFT( assetID, payload, @@ -100,7 +100,7 @@ func (w *walletWithOptions) IssueOperationTxMintProperty( assetID ids.ID, owner *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueOperationTxMintProperty( assetID, owner, @@ -111,7 +111,7 @@ func (w *walletWithOptions) IssueOperationTxMintProperty( func (w *walletWithOptions) IssueOperationTxBurnProperty( assetID ids.ID, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueOperationTxBurnProperty( assetID, common.UnionOptions(w.options, options)..., @@ -122,7 +122,7 @@ func (w *walletWithOptions) IssueImportTx( chainID ids.ID, to *secp256k1fx.OutputOwners, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueImportTx( chainID, to, @@ -134,7 +134,7 @@ func (w *walletWithOptions) IssueExportTx( chainID ids.ID, outputs []*avax.TransferableOutput, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueExportTx( chainID, outputs, @@ -145,7 +145,7 @@ func (w *walletWithOptions) IssueExportTx( func (w *walletWithOptions) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, -) (ids.ID, error) { +) (*txs.Tx, error) { return w.Wallet.IssueUnsignedTx( utx, common.UnionOptions(w.options, options)..., @@ -155,7 +155,7 @@ func (w *walletWithOptions) IssueUnsignedTx( func (w *walletWithOptions) IssueTx( tx *txs.Tx, options ...common.Option, -) (ids.ID, error) { +) error { return w.Wallet.IssueTx( tx, common.UnionOptions(w.options, options)..., diff --git a/avalanchego/wallet/subnet/primary/api.go b/avalanchego/wallet/subnet/primary/api.go index 4818178f..b5a6aa35 100644 --- a/avalanchego/wallet/subnet/primary/api.go +++ b/avalanchego/wallet/subnet/primary/api.go @@ -1,10 +1,14 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary import ( "context" + "fmt" + + "github.com/ava-labs/coreth/ethclient" + "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/codec" @@ -16,8 +20,12 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/wallet/chain/c" "github.com/ava-labs/avalanchego/wallet/chain/p" "github.com/ava-labs/avalanchego/wallet/chain/x" + + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + ethcommon "github.com/ethereum/go-ethereum/common" ) const ( @@ -46,21 +54,45 @@ type UTXOClient interface { ) ([][]byte, ids.ShortID, ids.ID, error) } -func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p.Context, x.Context, UTXOs, error) { +type AVAXState struct { + PClient platformvm.Client + PCTX p.Context + XClient avm.Client + XCTX x.Context + CClient evm.Client + CCTX c.Context + UTXOs walletcommon.UTXOs +} + +func FetchState( + ctx context.Context, + uri string, + addrs set.Set[ids.ShortID], +) ( + *AVAXState, + error, +) { infoClient := info.NewClient(uri) + pClient := platformvm.NewClient(uri) xClient := avm.NewClient(uri, "X") + cClient := evm.NewCChainClient(uri) pCTX, err := p.NewContextFromClients(ctx, infoClient, xClient) if err != nil { - return nil, nil, nil, err + return nil, err } xCTX, err := x.NewContextFromClients(ctx, infoClient, xClient) if err != nil { - return nil, nil, nil, err + return nil, err } - utxos := NewUTXOs() + cCTX, err := c.NewContextFromClients(ctx, infoClient, xClient) + if err != nil { + return nil, err + } + + utxos := walletcommon.NewUTXOs() addrList := addrs.List() chains := []struct { id ids.ID @@ -69,7 +101,7 @@ func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p. }{ { id: constants.PlatformChainID, - client: platformvm.NewClient(uri), + client: pClient, codec: txs.Codec, }, { @@ -77,6 +109,11 @@ func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p. client: xClient, codec: x.Parser.Codec(), }, + { + id: cCTX.BlockchainID(), + client: cClient, + codec: evm.Codec, + }, } for _, destinationChain := range chains { for _, sourceChain := range chains { @@ -90,11 +127,60 @@ func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p. addrList, ) if err != nil { - return nil, nil, nil, err + return nil, err } } } - return pCTX, xCTX, utxos, nil + return &AVAXState{ + PClient: pClient, + PCTX: pCTX, + XClient: xClient, + XCTX: xCTX, + CClient: cClient, + CCTX: cCTX, + UTXOs: utxos, + }, nil +} + +type EthState struct { + Client ethclient.Client + Accounts map[ethcommon.Address]*c.Account +} + +func FetchEthState( + ctx context.Context, + uri string, + addrs set.Set[ethcommon.Address], +) (*EthState, error) { + path := fmt.Sprintf( + "%s/ext/%s/C/rpc", + uri, + constants.ChainAliasPrefix, + ) + client, err := ethclient.Dial(path) + if err != nil { + return nil, err + } + + accounts := make(map[ethcommon.Address]*c.Account, addrs.Len()) + for addr := range addrs { + balance, err := client.BalanceAt(ctx, addr, nil) + if err != nil { + return nil, err + } + nonce, err := client.NonceAt(ctx, addr, nil) + if err != nil { + return nil, err + } + accounts[addr] = &c.Account{ + Balance: balance, + Nonce: nonce, + } + } + return &EthState{ + Client: client, + Accounts: accounts, + }, nil } // AddAllUTXOs fetches all the UTXOs referenced by [addresses] that were sent @@ -103,7 +189,7 @@ func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p. // expires, then the returned error will be immediately reported. func AddAllUTXOs( ctx context.Context, - utxos UTXOs, + utxos walletcommon.UTXOs, client UTXOClient, codec codec.Manager, sourceChainID ids.ID, diff --git a/avalanchego/wallet/subnet/primary/common/options.go b/avalanchego/wallet/subnet/primary/common/options.go index 9cfaed98..d6803825 100644 --- a/avalanchego/wallet/subnet/primary/common/options.go +++ b/avalanchego/wallet/subnet/primary/common/options.go @@ -1,19 +1,26 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( "context" + "math/big" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + ethcommon "github.com/ethereum/go-ethereum/common" ) const defaultPollFrequency = 100 * time.Millisecond +// Signature of the function that will be called after a transaction +// has been issued with the ID of the issued transaction. +type PostIssuanceFunc func(ids.ID) + type Option func(*Options) type Options struct { @@ -22,6 +29,11 @@ type Options struct { customAddressesSet bool customAddresses set.Set[ids.ShortID] + customEthAddressesSet bool + customEthAddresses set.Set[ethcommon.Address] + + baseFee *big.Int + minIssuanceTimeSet bool minIssuanceTime uint64 @@ -35,6 +47,8 @@ type Options struct { pollFrequencySet bool pollFrequency time.Duration + + postIssuanceFunc PostIssuanceFunc } func NewOptions(ops []Option) *Options { @@ -71,6 +85,20 @@ func (o *Options) Addresses(defaultAddresses set.Set[ids.ShortID]) set.Set[ids.S return defaultAddresses } +func (o *Options) EthAddresses(defaultAddresses set.Set[ethcommon.Address]) set.Set[ethcommon.Address] { + if o.customEthAddressesSet { + return o.customEthAddresses + } + return defaultAddresses +} + +func (o *Options) BaseFee(defaultBaseFee *big.Int) *big.Int { + if o.baseFee != nil { + return o.baseFee + } + return defaultBaseFee +} + func (o *Options) MinIssuanceTime() uint64 { if o.minIssuanceTimeSet { return o.minIssuanceTime @@ -104,6 +132,10 @@ func (o *Options) PollFrequency() time.Duration { return defaultPollFrequency } +func (o *Options) PostIssuanceFunc() PostIssuanceFunc { + return o.postIssuanceFunc +} + func WithContext(ctx context.Context) Option { return func(o *Options) { o.ctx = ctx @@ -117,6 +149,19 @@ func WithCustomAddresses(addrs set.Set[ids.ShortID]) Option { } } +func WithCustomEthAddresses(addrs set.Set[ethcommon.Address]) Option { + return func(o *Options) { + o.customEthAddressesSet = true + o.customEthAddresses = addrs + } +} + +func WithBaseFee(baseFee *big.Int) Option { + return func(o *Options) { + o.baseFee = baseFee + } +} + func WithMinIssuanceTime(minIssuanceTime uint64) Option { return func(o *Options) { o.minIssuanceTimeSet = true @@ -154,3 +199,9 @@ func WithPollFrequency(pollFrequency time.Duration) Option { o.pollFrequency = pollFrequency } } + +func WithPostIssuanceFunc(f PostIssuanceFunc) Option { + return func(o *Options) { + o.postIssuanceFunc = f + } +} diff --git a/avalanchego/wallet/subnet/primary/common/spend.go b/avalanchego/wallet/subnet/primary/common/spend.go index d7511317..42c7fc02 100644 --- a/avalanchego/wallet/subnet/primary/common/spend.go +++ b/avalanchego/wallet/subnet/primary/common/spend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/wallet/subnet/primary/common/test_utxos.go b/avalanchego/wallet/subnet/primary/common/test_utxos.go new file mode 100644 index 00000000..094c57d5 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/common/test_utxos.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "context" + "slices" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +func NewDeterministicChainUTXOs(require *require.Assertions, utxoSets map[ids.ID][]*avax.UTXO) *DeterministicChainUTXOs { + globalUTXOs := NewUTXOs() + for subnetID, utxos := range utxoSets { + for _, utxo := range utxos { + require.NoError( + globalUTXOs.AddUTXO(context.Background(), subnetID, constants.PlatformChainID, utxo), + ) + } + } + return &DeterministicChainUTXOs{ + ChainUTXOs: NewChainUTXOs(constants.PlatformChainID, globalUTXOs), + } +} + +type DeterministicChainUTXOs struct { + ChainUTXOs +} + +func (c *DeterministicChainUTXOs) UTXOs(ctx context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) { + utxos, err := c.ChainUTXOs.UTXOs(ctx, sourceChainID) + if err != nil { + return nil, err + } + + slices.SortFunc(utxos, func(a, b *avax.UTXO) int { + return a.Compare(&b.UTXOID) + }) + return utxos, nil +} diff --git a/avalanchego/wallet/subnet/primary/utxos.go b/avalanchego/wallet/subnet/primary/common/utxos.go similarity index 91% rename from avalanchego/wallet/subnet/primary/utxos.go rename to avalanchego/wallet/subnet/primary/common/utxos.go index d0bbd2be..6c96e249 100644 --- a/avalanchego/wallet/subnet/primary/utxos.go +++ b/avalanchego/wallet/subnet/primary/common/utxos.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package primary +package common import ( "context" @@ -12,18 +12,11 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/wallet/chain/p" - "github.com/ava-labs/avalanchego/wallet/chain/x" ) var ( _ UTXOs = (*utxos)(nil) _ ChainUTXOs = (*chainUTXOs)(nil) - - // TODO: refactor ChainUTXOs definition to allow the client implementations - // to perform their own assertions. - _ ChainUTXOs = p.ChainUTXOs(nil) - _ ChainUTXOs = x.ChainUTXOs(nil) ) type UTXOs interface { diff --git a/avalanchego/wallet/subnet/primary/example_test.go b/avalanchego/wallet/subnet/primary/example_test.go index 3d9da8ac..2b8d8b8e 100644 --- a/avalanchego/wallet/subnet/primary/example_test.go +++ b/avalanchego/wallet/subnet/primary/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -24,10 +24,14 @@ func ExampleWallet() { ctx := context.Background() kc := secp256k1fx.NewKeychain(genesis.EWOQKey) - // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network - // that [LocalAPIURI] is hosting. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [LocalAPIURI] is hosting. walletSyncStartTime := time.Now() - wallet, err := NewWalletFromURI(ctx, LocalAPIURI, kc) + wallet, err := MakeWallet(ctx, &WalletConfig{ + URI: LocalAPIURI, + AVAXKeychain: kc, + EthKeychain: kc, + }) if err != nil { log.Fatalf("failed to initialize wallet with: %s\n", err) return @@ -49,7 +53,7 @@ func ExampleWallet() { // Create a custom asset to send to the P-chain. createAssetStartTime := time.Now() - createAssetTxID, err := xWallet.IssueCreateAssetTx( + createAssetTx, err := xWallet.IssueCreateAssetTx( "RnM", "RNM", 9, @@ -66,11 +70,12 @@ func ExampleWallet() { log.Fatalf("failed to create new X-chain asset with: %s\n", err) return } + createAssetTxID := createAssetTx.ID() log.Printf("created X-chain asset %s in %s\n", createAssetTxID, time.Since(createAssetStartTime)) // Send 100 MegaAvax to the P-chain. exportStartTime := time.Now() - exportTxID, err := xWallet.IssueExportTx( + exportTx, err := xWallet.IssueExportTx( constants.PlatformChainID, []*avax.TransferableOutput{ { @@ -88,27 +93,30 @@ func ExampleWallet() { log.Fatalf("failed to issue X->P export transaction with: %s\n", err) return } + exportTxID := exportTx.ID() log.Printf("issued X->P export %s in %s\n", exportTxID, time.Since(exportStartTime)) // Import the 100 MegaAvax from the X-chain into the P-chain. importStartTime := time.Now() - importTxID, err := pWallet.IssueImportTx(xChainID, owner) + importTx, err := pWallet.IssueImportTx(xChainID, owner) if err != nil { log.Fatalf("failed to issue X->P import transaction with: %s\n", err) return } + importTxID := importTx.ID() log.Printf("issued X->P import %s in %s\n", importTxID, time.Since(importStartTime)) createSubnetStartTime := time.Now() - createSubnetTxID, err := pWallet.IssueCreateSubnetTx(owner) + createSubnetTx, err := pWallet.IssueCreateSubnetTx(owner) if err != nil { log.Fatalf("failed to issue create subnet transaction with: %s\n", err) return } + createSubnetTxID := createSubnetTx.ID() log.Printf("issued create subnet transaction %s in %s\n", createSubnetTxID, time.Since(createSubnetStartTime)) transformSubnetStartTime := time.Now() - transformSubnetTxID, err := pWallet.IssueTransformSubnetTx( + transformSubnetTx, err := pWallet.IssueTransformSubnetTx( createSubnetTxID, createAssetTxID, 50*units.MegaAvax, @@ -128,11 +136,12 @@ func ExampleWallet() { log.Fatalf("failed to issue transform subnet transaction with: %s\n", err) return } + transformSubnetTxID := transformSubnetTx.ID() log.Printf("issued transform subnet transaction %s in %s\n", transformSubnetTxID, time.Since(transformSubnetStartTime)) addPermissionlessValidatorStartTime := time.Now() startTime := time.Now().Add(time.Minute) - addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( + addSubnetValidatorTx, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, @@ -143,7 +152,7 @@ func ExampleWallet() { Subnet: createSubnetTxID, }, &signer.Empty{}, - createAssetTxID, + createAssetTx.ID(), &secp256k1fx.OutputOwners{}, &secp256k1fx.OutputOwners{}, reward.PercentDenominator, @@ -152,10 +161,11 @@ func ExampleWallet() { log.Fatalf("failed to issue add subnet validator with: %s\n", err) return } + addSubnetValidatorTxID := addSubnetValidatorTx.ID() log.Printf("issued add subnet validator transaction %s in %s\n", addSubnetValidatorTxID, time.Since(addPermissionlessValidatorStartTime)) addPermissionlessDelegatorStartTime := time.Now() - addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( + addSubnetDelegatorTx, err := pWallet.IssueAddPermissionlessDelegatorTx( &txs.SubnetValidator{ Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, @@ -172,5 +182,6 @@ func ExampleWallet() { log.Fatalf("failed to issue add subnet delegator with: %s\n", err) return } + addSubnetDelegatorTxID := addSubnetDelegatorTx.ID() log.Printf("issued add subnet validator delegator %s in %s\n", addSubnetDelegatorTxID, time.Since(addPermissionlessDelegatorStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go b/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go index c90ea345..33695b35 100644 --- a/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go +++ b/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -41,10 +42,15 @@ func main() { } log.Printf("fetched node ID %s in %s\n", nodeID, time.Since(nodeInfoStartTime)) - // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting and registers [subnetID]. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting and registers [subnetID]. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + PChainTxsToFetch: set.Of(subnetID), + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -54,7 +60,7 @@ func main() { pWallet := wallet.P() addValidatorStartTime := time.Now() - addValidatorTxID, err := pWallet.IssueAddSubnetValidatorTx(&txs.SubnetValidator{ + addValidatorTx, err := pWallet.IssueAddSubnetValidatorTx(&txs.SubnetValidator{ Validator: txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -66,5 +72,5 @@ func main() { if err != nil { log.Fatalf("failed to issue add subnet validator transaction: %s\n", err) } - log.Printf("added new subnet validator %s to %s with %s in %s\n", nodeID, subnetID, addValidatorTxID, time.Since(addValidatorStartTime)) + log.Printf("added new subnet validator %s to %s with %s in %s\n", nodeID, subnetID, addValidatorTx.ID(), time.Since(addValidatorStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go b/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go index d2b7d5d7..987229d1 100644 --- a/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go +++ b/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -39,10 +39,14 @@ func main() { } log.Printf("fetched node ID %s in %s\n", nodeID, time.Since(nodeInfoStartTime)) - // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -53,7 +57,7 @@ func main() { avaxAssetID := pWallet.AVAXAssetID() addValidatorStartTime := time.Now() - addValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( + addValidatorTx, err := pWallet.IssueAddPermissionlessValidatorTx( &txs.SubnetValidator{Validator: txs.Validator{ NodeID: nodeID, Start: uint64(startTime.Unix()), @@ -75,5 +79,5 @@ func main() { if err != nil { log.Fatalf("failed to issue add permissionless validator transaction: %s\n", err) } - log.Printf("added new primary network validator %s with %s in %s\n", nodeID, addValidatorTxID, time.Since(addValidatorStartTime)) + log.Printf("added new primary network validator %s with %s in %s\n", nodeID, addValidatorTx.ID(), time.Since(addValidatorStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/c-chain-export/main.go b/avalanchego/wallet/subnet/primary/examples/c-chain-export/main.go new file mode 100644 index 00000000..41ecb5ca --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/c-chain-export/main.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + avaxAddr := key.Address() + + ctx := context.Background() + + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + cWallet := wallet.C() + + // Pull out useful constants to use when issuing transactions. + cChainID := cWallet.BlockchainID() + owner := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + avaxAddr, + }, + } + + exportStartTime := time.Now() + exportTx, err := cWallet.IssueExportTx( + constants.PlatformChainID, + []*secp256k1fx.TransferOutput{{ + Amt: units.Avax, + OutputOwners: owner, + }}, + ) + if err != nil { + log.Fatalf("failed to issue export transaction: %s\n", err) + } + log.Printf("issued export %s in %s\n", exportTx.ID(), time.Since(exportStartTime)) + + importStartTime := time.Now() + importTx, err := pWallet.IssueImportTx(cChainID, &owner) + if err != nil { + log.Fatalf("failed to issue import transaction: %s\n", err) + } + log.Printf("issued import %s in %s\n", importTx.ID(), time.Since(importStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/c-chain-import/main.go b/avalanchego/wallet/subnet/primary/examples/c-chain-import/main.go new file mode 100644 index 00000000..387d435d --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/c-chain-import/main.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/coreth/plugin/evm" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + avaxAddr := key.Address() + ethAddr := evm.PublicKeyToEthAddress(key.PublicKey()) + + ctx := context.Background() + + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + cWallet := wallet.C() + + // Pull out useful constants to use when issuing transactions. + cChainID := cWallet.BlockchainID() + avaxAssetID := cWallet.AVAXAssetID() + owner := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + avaxAddr, + }, + } + + exportStartTime := time.Now() + exportTx, err := pWallet.IssueExportTx(cChainID, []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: avaxAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Avax, + OutputOwners: owner, + }, + }}) + if err != nil { + log.Fatalf("failed to issue export transaction: %s\n", err) + } + log.Printf("issued export %s in %s\n", exportTx.ID(), time.Since(exportStartTime)) + + importStartTime := time.Now() + importTx, err := cWallet.IssueImportTx(constants.PlatformChainID, ethAddr) + if err != nil { + log.Fatalf("failed to issue import transaction: %s\n", err) + } + log.Printf("issued import %s to %s in %s\n", importTx.ID(), ethAddr.Hex(), time.Since(importStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/create-asset/main.go b/avalanchego/wallet/subnet/primary/examples/create-asset/main.go index d1eda231..54015dda 100644 --- a/avalanchego/wallet/subnet/primary/examples/create-asset/main.go +++ b/avalanchego/wallet/subnet/primary/examples/create-asset/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -24,10 +24,14 @@ func main() { ctx := context.Background() - // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -45,7 +49,7 @@ func main() { } createAssetStartTime := time.Now() - createAssetTxID, err := xWallet.IssueCreateAssetTx( + createAssetTx, err := xWallet.IssueCreateAssetTx( "HI", "HI", 1, @@ -61,5 +65,5 @@ func main() { if err != nil { log.Fatalf("failed to issue create asset transaction: %s\n", err) } - log.Printf("created new asset %s in %s\n", createAssetTxID, time.Since(createAssetStartTime)) + log.Printf("created new asset %s in %s\n", createAssetTx.ID(), time.Since(createAssetStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/create-chain/main.go b/avalanchego/wallet/subnet/primary/examples/create-chain/main.go index 1b4f02b0..ea98579f 100644 --- a/avalanchego/wallet/subnet/primary/examples/create-chain/main.go +++ b/avalanchego/wallet/subnet/primary/examples/create-chain/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" ) @@ -36,10 +37,15 @@ func main() { ctx := context.Background() - // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting and registers [subnetID]. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting and registers [subnetID]. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + PChainTxsToFetch: set.Of(subnetID), + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -49,7 +55,7 @@ func main() { pWallet := wallet.P() createChainStartTime := time.Now() - createChainTxID, err := pWallet.IssueCreateChainTx( + createChainTx, err := pWallet.IssueCreateChainTx( subnetID, genesisBytes, vmID, @@ -59,5 +65,5 @@ func main() { if err != nil { log.Fatalf("failed to issue create chain transaction: %s\n", err) } - log.Printf("created new chain %s in %s\n", createChainTxID, time.Since(createChainStartTime)) + log.Printf("created new chain %s in %s\n", createChainTx.ID(), time.Since(createChainStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go b/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go index 54d0eefb..32cdcf98 100644 --- a/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go +++ b/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -33,10 +33,14 @@ func main() { ctx := context.Background() - // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -47,7 +51,7 @@ func main() { avaxAssetID := pWallet.AVAXAssetID() issueTxStartTime := time.Now() - txID, err := pWallet.IssueBaseTx([]*avax.TransferableOutput{ + tx, err := pWallet.IssueBaseTx([]*avax.TransferableOutput{ { Asset: avax.Asset{ ID: avaxAssetID, @@ -69,5 +73,5 @@ func main() { if err != nil { log.Fatalf("failed to issue transaction: %s\n", err) } - log.Printf("issued %s in %s\n", txID, time.Since(issueTxStartTime)) + log.Printf("issued %s in %s\n", tx.ID(), time.Since(issueTxStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go b/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go index 315712b3..e471e68f 100644 --- a/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go +++ b/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -22,10 +22,14 @@ func main() { ctx := context.Background() - // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -43,9 +47,9 @@ func main() { } createSubnetStartTime := time.Now() - createSubnetTxID, err := pWallet.IssueCreateSubnetTx(owner) + createSubnetTx, err := pWallet.IssueCreateSubnetTx(owner) if err != nil { log.Fatalf("failed to issue create subnet transaction: %s\n", err) } - log.Printf("created new subnet %s in %s\n", createSubnetTxID, time.Since(createSubnetStartTime)) + log.Printf("created new subnet %s in %s\n", createSubnetTx.ID(), time.Since(createSubnetStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/examples/get-p-chain-balance/main.go b/avalanchego/wallet/subnet/primary/examples/get-p-chain-balance/main.go new file mode 100644 index 00000000..08f2cd53 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/get-p-chain-balance/main.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/wallet/chain/p" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +func main() { + uri := primary.LocalAPIURI + addrStr := "P-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u" + + addr, err := address.ParseToID(addrStr) + if err != nil { + log.Fatalf("failed to parse address: %s\n", err) + } + + addresses := set.Of(addr) + + ctx := context.Background() + + fetchStartTime := time.Now() + state, err := primary.FetchState(ctx, uri, addresses) + if err != nil { + log.Fatalf("failed to fetch state: %s\n", err) + } + log.Printf("fetched state of %s in %s\n", addrStr, time.Since(fetchStartTime)) + + pUTXOs := common.NewChainUTXOs(constants.PlatformChainID, state.UTXOs) + pBackend := p.NewBackend(state.PCTX, pUTXOs, nil) + pBuilder := p.NewBuilder(addresses, pBackend) + + currentBalances, err := pBuilder.GetBalance() + if err != nil { + log.Fatalf("failed to get the balance: %s\n", err) + } + + avaxID := state.PCTX.AVAXAssetID() + avaxBalance := currentBalances[avaxID] + log.Printf("current AVAX balance of %s is %d nAVAX\n", addrStr, avaxBalance) +} diff --git a/avalanchego/wallet/subnet/primary/examples/get-x-chain-balance/main.go b/avalanchego/wallet/subnet/primary/examples/get-x-chain-balance/main.go new file mode 100644 index 00000000..98955468 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/get-x-chain-balance/main.go @@ -0,0 +1,52 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/wallet/chain/x" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +func main() { + uri := primary.LocalAPIURI + addrStr := "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u" + + addr, err := address.ParseToID(addrStr) + if err != nil { + log.Fatalf("failed to parse address: %s\n", err) + } + + addresses := set.Of(addr) + + ctx := context.Background() + + fetchStartTime := time.Now() + state, err := primary.FetchState(ctx, uri, addresses) + if err != nil { + log.Fatalf("failed to fetch state: %s\n", err) + } + log.Printf("fetched state of %s in %s\n", addrStr, time.Since(fetchStartTime)) + + xChainID := state.XCTX.BlockchainID() + + xUTXOs := common.NewChainUTXOs(xChainID, state.UTXOs) + xBackend := x.NewBackend(state.XCTX, xUTXOs) + xBuilder := x.NewBuilder(addresses, xBackend) + + currentBalances, err := xBuilder.GetFTBalance() + if err != nil { + log.Fatalf("failed to get the balance: %s\n", err) + } + + avaxID := state.XCTX.AVAXAssetID() + avaxBalance := currentBalances[avaxID] + log.Printf("current AVAX balance of %s is %d nAVAX\n", addrStr, avaxBalance) +} diff --git a/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go b/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go index 05c54d27..50639943 100644 --- a/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go +++ b/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" ) @@ -33,10 +34,15 @@ func main() { ctx := context.Background() - // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network - // that [uri] is hosting and registers [subnetID]. + // MakeWallet fetches the available UTXOs owned by [kc] on the network that + // [uri] is hosting and registers [subnetID]. walletSyncStartTime := time.Now() - wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + PChainTxsToFetch: set.Of(subnetID), + }) if err != nil { log.Fatalf("failed to initialize wallet: %s\n", err) } @@ -46,12 +52,12 @@ func main() { pWallet := wallet.P() removeValidatorStartTime := time.Now() - removeValidatorTxID, err := pWallet.IssueRemoveSubnetValidatorTx( + removeValidatorTx, err := pWallet.IssueRemoveSubnetValidatorTx( nodeID, subnetID, ) if err != nil { log.Fatalf("failed to issue remove subnet validator transaction: %s\n", err) } - log.Printf("removed subnet validator %s from %s with %s in %s\n", nodeID, subnetID, removeValidatorTxID, time.Since(removeValidatorStartTime)) + log.Printf("removed subnet validator %s from %s with %s in %s\n", nodeID, subnetID, removeValidatorTx.ID(), time.Since(removeValidatorStartTime)) } diff --git a/avalanchego/wallet/subnet/primary/wallet.go b/avalanchego/wallet/subnet/primary/wallet.go index ce3bc3e4..9aabf651 100644 --- a/avalanchego/wallet/subnet/primary/wallet.go +++ b/avalanchego/wallet/subnet/primary/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -9,9 +9,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/keychain" - "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/wallet/chain/c" "github.com/ava-labs/avalanchego/wallet/chain/p" "github.com/ava-labs/avalanchego/wallet/chain/x" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -23,11 +23,13 @@ var _ Wallet = (*wallet)(nil) type Wallet interface { P() p.Wallet X() x.Wallet + C() c.Wallet } type wallet struct { p p.Wallet x x.Wallet + c c.Wallet } func (w *wallet) P() p.Wallet { @@ -38,33 +40,72 @@ func (w *wallet) X() x.Wallet { return w.x } -// NewWalletFromURI returns a wallet that supports issuing transactions to the -// chains living in the primary network to a provided [uri]. +func (w *wallet) C() c.Wallet { + return w.c +} + +// Creates a new default wallet +func NewWallet(p p.Wallet, x x.Wallet, c c.Wallet) Wallet { + return &wallet{ + p: p, + x: x, + c: c, + } +} + +// Creates a Wallet with the given set of options +func NewWalletWithOptions(w Wallet, options ...common.Option) Wallet { + return NewWallet( + p.NewWalletWithOptions(w.P(), options...), + x.NewWalletWithOptions(w.X(), options...), + c.NewWalletWithOptions(w.C(), options...), + ) +} + +type WalletConfig struct { + // Base URI to use for all node requests. + URI string // required + // Keys to use for signing all transactions. + AVAXKeychain keychain.Keychain // required + EthKeychain c.EthKeychain // required + // Set of P-chain transactions that the wallet should know about to be able + // to generate transactions. + PChainTxs map[ids.ID]*txs.Tx // optional + // Set of P-chain transactions that the wallet should fetch to be able to + // generate transactions. + PChainTxsToFetch set.Set[ids.ID] // optional +} + +// MakeWallet returns a wallet that supports issuing transactions to the chains +// living in the primary network. // -// On creation, the wallet attaches to the provided [uri] and fetches all UTXOs -// that reference any of the keys contained in [kc]. If the UTXOs are modified -// through an external issuance process, such as another instance of the wallet, -// the UTXOs may become out of sync. +// On creation, the wallet attaches to the provided uri and fetches all UTXOs +// that reference any of the provided keys. If the UTXOs are modified through an +// external issuance process, such as another instance of the wallet, the UTXOs +// may become out of sync. The wallet will also fetch all requested P-chain +// transactions. // -// The wallet manages all UTXOs locally, and performs all tx signing locally. -func NewWalletFromURI(ctx context.Context, uri string, kc keychain.Keychain) (Wallet, error) { - pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addresses()) +// The wallet manages all state locally, and performs all tx signing locally. +func MakeWallet(ctx context.Context, config *WalletConfig) (Wallet, error) { + avaxAddrs := config.AVAXKeychain.Addresses() + avaxState, err := FetchState(ctx, config.URI, avaxAddrs) if err != nil { return nil, err } - return NewWalletWithState(uri, pCTX, xCTX, utxos, kc), nil -} -// Creates a wallet with pre-loaded/cached P-chain transactions. -func NewWalletWithTxs(ctx context.Context, uri string, kc keychain.Keychain, preloadTXs ...ids.ID) (Wallet, error) { - pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addresses()) + ethAddrs := config.EthKeychain.EthAddresses() + ethState, err := FetchEthState(ctx, config.URI, ethAddrs) if err != nil { return nil, err } - pTXs := make(map[ids.ID]*txs.Tx) - pClient := platformvm.NewClient(uri) - for _, id := range preloadTXs { - txBytes, err := pClient.GetTx(ctx, id) + + pChainTxs := config.PChainTxs + if pChainTxs == nil { + pChainTxs = make(map[ids.ID]*txs.Tx) + } + + for txID := range config.PChainTxsToFetch { + txBytes, err := avaxState.PClient.GetTx(ctx, txID) if err != nil { return nil, err } @@ -72,64 +113,29 @@ func NewWalletWithTxs(ctx context.Context, uri string, kc keychain.Keychain, pre if err != nil { return nil, err } - pTXs[id] = tx + pChainTxs[txID] = tx } - return NewWalletWithTxsAndState(uri, pCTX, xCTX, utxos, kc, pTXs), nil -} -// Creates a wallet with pre-loaded/cached P-chain transactions and state. -func NewWalletWithTxsAndState( - uri string, - pCTX p.Context, - xCTX x.Context, - utxos UTXOs, - kc keychain.Keychain, - pTXs map[ids.ID]*txs.Tx, -) Wallet { - addrs := kc.Addresses() - pUTXOs := NewChainUTXOs(constants.PlatformChainID, utxos) - pBackend := p.NewBackend(pCTX, pUTXOs, pTXs) - pBuilder := p.NewBuilder(addrs, pBackend) - pSigner := p.NewSigner(kc, pBackend) - pClient := platformvm.NewClient(uri) - - xChainID := xCTX.BlockchainID() - xUTXOs := NewChainUTXOs(xChainID, utxos) - xBackend := x.NewBackend(xCTX, xChainID, xUTXOs) - xBuilder := x.NewBuilder(addrs, xBackend) - xSigner := x.NewSigner(kc, xBackend) - xClient := avm.NewClient(uri, "X") + pUTXOs := common.NewChainUTXOs(constants.PlatformChainID, avaxState.UTXOs) + pBackend := p.NewBackend(avaxState.PCTX, pUTXOs, pChainTxs) + pBuilder := p.NewBuilder(avaxAddrs, pBackend) + pSigner := p.NewSigner(config.AVAXKeychain, pBackend) - return NewWallet( - p.NewWallet(pBuilder, pSigner, pClient, pBackend), - x.NewWallet(xBuilder, xSigner, xClient, xBackend), - ) -} + xChainID := avaxState.XCTX.BlockchainID() + xUTXOs := common.NewChainUTXOs(xChainID, avaxState.UTXOs) + xBackend := x.NewBackend(avaxState.XCTX, xUTXOs) + xBuilder := x.NewBuilder(avaxAddrs, xBackend) + xSigner := x.NewSigner(config.AVAXKeychain, xBackend) -// Creates a wallet with pre-fetched state. -func NewWalletWithState( - uri string, - pCTX p.Context, - xCTX x.Context, - utxos UTXOs, - kc keychain.Keychain, -) Wallet { - pTXs := make(map[ids.ID]*txs.Tx) - return NewWalletWithTxsAndState(uri, pCTX, xCTX, utxos, kc, pTXs) -} + cChainID := avaxState.CCTX.BlockchainID() + cUTXOs := common.NewChainUTXOs(cChainID, avaxState.UTXOs) + cBackend := c.NewBackend(avaxState.CCTX, cUTXOs, ethState.Accounts) + cBuilder := c.NewBuilder(avaxAddrs, ethAddrs, cBackend) + cSigner := c.NewSigner(config.AVAXKeychain, config.EthKeychain, cBackend) -// Creates a Wallet with the given set of options -func NewWalletWithOptions(w Wallet, options ...common.Option) Wallet { return NewWallet( - p.NewWalletWithOptions(w.P(), options...), - x.NewWalletWithOptions(w.X(), options...), - ) -} - -// Creates a new default wallet -func NewWallet(p p.Wallet, x x.Wallet) Wallet { - return &wallet{ - p: p, - x: x, - } + p.NewWallet(pBuilder, pSigner, avaxState.PClient, pBackend), + x.NewWallet(xBuilder, xSigner, avaxState.XClient, xBackend), + c.NewWallet(cBuilder, cSigner, avaxState.CClient, ethState.Client, cBackend), + ), nil } diff --git a/avalanchego/x/archivedb/batch.go b/avalanchego/x/archivedb/batch.go new file mode 100644 index 00000000..720ed6f9 --- /dev/null +++ b/avalanchego/x/archivedb/batch.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import "github.com/ava-labs/avalanchego/database" + +var _ database.Batch = (*batch)(nil) + +// batch is how a user performs modifications to the database. +// +// It consumes puts and deletes at a specified height. When committing, an +// atomic operation is created which registers the modifications at the +// specified height and updates the last tracked height to be equal to this +// batch's height. +type batch struct { + db *Database + height uint64 + database.BatchOps +} + +func (c *batch) Write() error { + batch := c.db.db.NewBatch() + for _, op := range c.Ops { + key, _ := newDBKeyFromUser(op.Key, c.height) + var value []byte + if !op.Delete { + value = newDBValue(op.Value) + } + if err := batch.Put(key, value); err != nil { + return err + } + } + + if err := database.PutUInt64(batch, heightKey, c.height); err != nil { + return err + } + + return batch.Write() +} + +func (c *batch) Inner() database.Batch { + return c +} diff --git a/avalanchego/x/archivedb/db.go b/avalanchego/x/archivedb/db.go new file mode 100644 index 00000000..74b658a3 --- /dev/null +++ b/avalanchego/x/archivedb/db.go @@ -0,0 +1,105 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import ( + "context" + "errors" + "io" + + "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/database" +) + +var ( + ErrNotImplemented = errors.New("feature not implemented") + ErrInvalidValue = errors.New("invalid data value") + + _ database.Compacter = (*Database)(nil) + _ health.Checker = (*Database)(nil) + _ io.Closer = (*Database)(nil) +) + +// Database implements an ArchiveDB on top of a database.Database. An ArchiveDB +// is an append only database which stores all state changes happening at every +// height. Each record is stored in such way to perform both fast insertions and +// lookups. +// +// The API is quite simple, it has two main functions, one to create a Batch +// write with a given height, inside this batch entries can be added with a +// given value or they can be deleted. +// +// The way it works is as follows: +// - NewBatch(10) +// batch.Put(foo, "foo's value is bar") +// batch.Put(bar, "bar's value is bar") +// - NewBatch(100) +// batch.Put(foo, "updatedfoo's value is bar") +// - NewBatch(1000) +// batch.Put(bar, "updated bar's value is bar") +// batch.Delete(foo) +// +// The other primary function is to read data at a given height. +// +// The way it works is as follows: +// - Open(10) +// reader.Get(foo) +// reader.Get(bar) +// - Open(99) +// reader.GetHeight(foo) +// - Open(100) +// reader.Get(foo) +// - Open(1000) +// reader.Get(foo) +// +// Requesting `reader.Get(foo)` at height 1000 will return ErrNotFound because +// foo was deleted at height 1000. When calling `reader.GetHeight(foo)` at +// height 99 it will return a tuple `("foo's value is bar", 10)` returning the +// value of `foo` at height 99 (which was set at height 10). +type Database struct { + db database.Database +} + +func New(db database.Database) *Database { + return &Database{ + db: db, + } +} + +// Height returns the last written height. +func (db *Database) Height() (uint64, error) { + return database.GetUInt64(db.db, heightKey) +} + +// Open returns a reader for the state at the given height. +func (db *Database) Open(height uint64) *Reader { + return &Reader{ + db: db, + height: height, + } +} + +// NewBatch creates a write batch to perform changes at a given height. +// +// Note: Committing multiple batches at the same height, or at a lower height +// than the currently committed height will not error. It is left up to the +// caller to enforce any guarantees they need around height consistency. +func (db *Database) NewBatch(height uint64) *batch { + return &batch{ + db: db, + height: height, + } +} + +func (db *Database) Compact(start []byte, limit []byte) error { + return db.db.Compact(start, limit) +} + +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { + return db.db.HealthCheck(ctx) +} + +func (db *Database) Close() error { + return db.db.Close() +} diff --git a/avalanchego/x/archivedb/db_test.go b/avalanchego/x/archivedb/db_test.go new file mode 100644 index 00000000..2b1fbea2 --- /dev/null +++ b/avalanchego/x/archivedb/db_test.go @@ -0,0 +1,227 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" +) + +func TestDBEntries(t *testing.T) { + require := require.New(t) + + db := New(memdb.New()) + + batch := db.NewBatch(1) + require.NoError(batch.Write()) + + batch = db.NewBatch(2) + require.NoError(batch.Put([]byte("key1"), []byte("value1@10"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2@10"))) + require.NoError(batch.Write()) + + batch = db.NewBatch(3) + require.NoError(batch.Write()) + + batch = db.NewBatch(4) + require.NoError(batch.Put([]byte("key1"), []byte("value1@100"))) + require.NoError(batch.Write()) + + batch = db.NewBatch(5) + require.NoError(batch.Write()) + + batch = db.NewBatch(6) + require.NoError(batch.Put([]byte("key1"), []byte("value1@1000"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2@1000"))) + require.NoError(batch.Write()) + + reader := db.Open(2) + value, err := reader.Get([]byte("key1")) + require.NoError(err) + require.Equal([]byte("value1@10"), value) + + value, height, exists, err := reader.GetEntry([]byte("key1")) + require.NoError(err) + require.True(exists) + require.Equal([]byte("value1@10"), value) + require.Equal(uint64(2), height) + + reader = db.Open(4) + value, err = reader.Get([]byte("key1")) + require.NoError(err) + require.Equal([]byte("value1@100"), value) + + value, height, exists, err = reader.GetEntry([]byte("key1")) + require.NoError(err) + require.True(exists) + require.Equal([]byte("value1@100"), value) + require.Equal(uint64(4), height) + + reader = db.Open(6) + value, err = reader.Get([]byte("key2")) + require.NoError(err) + require.Equal([]byte("value2@1000"), value) + + value, height, exists, err = reader.GetEntry([]byte("key2")) + require.NoError(err) + require.True(exists) + require.Equal([]byte("value2@1000"), value) + require.Equal(uint64(6), height) + + reader = db.Open(4) + value, err = reader.Get([]byte("key2")) + require.NoError(err) + require.Equal([]byte("value2@10"), value) + + value, height, exists, err = reader.GetEntry([]byte("key2")) + require.NoError(err) + require.True(exists) + require.Equal([]byte("value2@10"), value) + require.Equal(uint64(2), height) + + exists, err = reader.Has([]byte("key2")) + require.NoError(err) + require.True(exists) + + reader = db.Open(1) + _, err = reader.Get([]byte("key1")) + require.ErrorIs(err, database.ErrNotFound) + + exists, err = reader.Has([]byte("key1")) + require.NoError(err) + require.False(exists) + + reader = db.Open(1) + _, err = reader.Get([]byte("key3")) + require.ErrorIs(err, database.ErrNotFound) +} + +func TestDelete(t *testing.T) { + require := require.New(t) + + db := New(memdb.New()) + + batch := db.NewBatch(1) + require.NoError(batch.Put([]byte("key1"), []byte("value1@10"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2@10"))) + require.NoError(batch.Write()) + + batch = db.NewBatch(2) + require.NoError(batch.Put([]byte("key1"), []byte("value1@100"))) + require.NoError(batch.Write()) + + batch = db.NewBatch(3) + require.NoError(batch.Delete([]byte("key1"))) + require.NoError(batch.Delete([]byte("key2"))) + require.NoError(batch.Write()) + + reader := db.Open(2) + value, err := reader.Get([]byte("key1")) + require.NoError(err) + require.Equal([]byte("value1@100"), value) + + value, height, exists, err := reader.GetEntry([]byte("key1")) + require.NoError(err) + require.True(exists) + require.Equal(uint64(2), height) + require.Equal([]byte("value1@100"), value) + + reader = db.Open(1) + value, err = reader.Get([]byte("key2")) + require.NoError(err) + require.Equal([]byte("value2@10"), value) + + value, height, exists, err = reader.GetEntry([]byte("key2")) + require.NoError(err) + require.True(exists) + require.Equal(uint64(1), height) + require.Equal([]byte("value2@10"), value) + + reader = db.Open(3) + _, err = reader.Get([]byte("key2")) + require.ErrorIs(err, database.ErrNotFound) + + _, err = reader.Get([]byte("key1")) + require.ErrorIs(err, database.ErrNotFound) + + _, height, exists, err = reader.GetEntry([]byte("key1")) + require.NoError(err) + require.False(exists) + require.Equal(uint64(3), height) + + _, _, _, err = reader.GetEntry([]byte("key4")) + require.ErrorIs(err, database.ErrNotFound) +} + +func TestDBKeySpace(t *testing.T) { + require := require.New(t) + + var ( + key1 = []byte("key1") + key2, _ = newDBKeyFromUser([]byte("key1"), 2) + key3 = []byte("key3") + value1 = []byte("value1@1") + value2 = []byte("value2@2") + value3 = []byte("value3@3") + ) + require.NotEqual(key1, key2) + require.NotEqual(key1, key3) + require.NotEqual(key2, key3) + + db := New(memdb.New()) + + batch := db.NewBatch(1) + require.NoError(batch.Put(key1, value1)) + require.NoError(batch.Write()) + + batch = db.NewBatch(2) + require.NoError(batch.Put(key2, value2)) + require.NoError(batch.Write()) + + batch = db.NewBatch(3) + require.NoError(batch.Put(key3, value3)) + require.NoError(batch.Write()) + + storedHeight, err := db.Height() + require.NoError(err) + require.Equal(uint64(3), storedHeight) + + reader := db.Open(3) + value, err := reader.Get(key1) + require.NoError(err) + require.Equal(value1, value) + + value, height, exists, err := reader.GetEntry(key1) + require.NoError(err) + require.True(exists) + require.Equal(uint64(1), height) + require.Equal(value1, value) +} + +func TestSkipHeight(t *testing.T) { + require := require.New(t) + + db := New(memdb.New()) + + _, err := db.Height() + require.ErrorIs(err, database.ErrNotFound) + + batch := db.NewBatch(0) + require.NoError(batch.Write()) + + height, err := db.Height() + require.NoError(err) + require.Zero(height) + + batch = db.NewBatch(10) + require.NoError(batch.Write()) + + height, err = db.Height() + require.NoError(err) + require.Equal(uint64(10), height) +} diff --git a/avalanchego/x/archivedb/key.go b/avalanchego/x/archivedb/key.go new file mode 100644 index 00000000..86a884cb --- /dev/null +++ b/avalanchego/x/archivedb/key.go @@ -0,0 +1,95 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import ( + "encoding/binary" + "errors" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + ErrParsingKeyLength = errors.New("failed reading key length") + ErrIncorrectKeyLength = errors.New("incorrect key length") + + heightKey = newDBKeyFromMetadata([]byte{}) +) + +// The requirements of a database key are: +// +// 1. A given user key must have a unique database key prefix. This guarantees +// that user keys can not overlap on disk. +// 2. Inside of a database key prefix, the database keys must be sorted by +// decreasing height. +// 3. User keys must never overlap with any metadata keys. + +// newDBKeyFromUser converts a user key and height into a database formatted +// key. +// +// To meet the requirements of a database key, the prefix is defined by +// concatenating the length of the user key and the user key. The suffix of the +// database key is the negation of the big endian encoded height. This suffix +// guarantees the keys are sorted correctly. +// +// Example (Asumming heights are 1 byte): +// | User key | Stored as | +// |------------|-------------| +// | foo:10 | 3:foo:245 | +// | foo:20 | 3:foo:235 | +// +// Returns: +// - The database key +// - The database key prefix, which is independent of the height +func newDBKeyFromUser(key []byte, height uint64) ([]byte, []byte) { + keyLen := len(key) + dbKeyMaxSize := binary.MaxVarintLen64 + keyLen + wrappers.LongLen + dbKey := make([]byte, dbKeyMaxSize) + offset := binary.PutUvarint(dbKey, uint64(keyLen)) + offset += copy(dbKey[offset:], key) + prefixOffset := offset + binary.BigEndian.PutUint64(dbKey[offset:], ^height) + offset += wrappers.LongLen + return dbKey[:offset], dbKey[:prefixOffset] +} + +// parseDBKeyFromUser takes a database formatted key and returns the user key +// along with its height. +// +// Note: An error should only be returned from this function if the database has +// been corrupted. +func parseDBKeyFromUser(dbKey []byte) ([]byte, uint64, error) { + keyLen, offset := binary.Uvarint(dbKey) + if offset <= 0 { + return nil, 0, ErrParsingKeyLength + } + + heightIndex := uint64(offset) + keyLen + if uint64(len(dbKey)) != heightIndex+wrappers.LongLen { + return nil, 0, ErrIncorrectKeyLength + } + + key := dbKey[offset:heightIndex] + height := ^binary.BigEndian.Uint64(dbKey[heightIndex:]) + return key, height, nil +} + +// newDBKeyFromMetadata converts a metadata key into a database formatted key. +// +// To meet the requirements of a database key, the key is defined by +// concatenating the length of the metadata key + 1 and the metadata key. +// +// Example: +// | Metadata key | Stored as | +// |----------------|-------------| +// | foo | 4:foo | +// | fo | 3:fo | +func newDBKeyFromMetadata(key []byte) []byte { + keyLen := len(key) + dbKeyMaxSize := binary.MaxVarintLen64 + keyLen + dbKey := make([]byte, dbKeyMaxSize) + offset := binary.PutUvarint(dbKey, uint64(keyLen)+1) + offset += copy(dbKey[offset:], key) + return dbKey[:offset] +} diff --git a/avalanchego/x/archivedb/key_test.go b/avalanchego/x/archivedb/key_test.go new file mode 100644 index 00000000..5c7ff9d0 --- /dev/null +++ b/avalanchego/x/archivedb/key_test.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import ( + "bytes" + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNaturalDescSortingForSameKey(t *testing.T) { + key0, _ := newDBKeyFromUser(make([]byte, 0), 0) + key1, _ := newDBKeyFromUser(make([]byte, 0), 1) + key2, _ := newDBKeyFromUser(make([]byte, 0), 2) + key3, _ := newDBKeyFromUser(make([]byte, 0), 3) + + entry := [][]byte{key0, key1, key2, key3} + expected := [][]byte{key3, key2, key1, key0} + + slices.SortFunc(entry, bytes.Compare) + + require.Equal(t, expected, entry) +} + +func TestSortingDifferentPrefix(t *testing.T) { + key0, _ := newDBKeyFromUser([]byte{0}, 0) + key1, _ := newDBKeyFromUser([]byte{0}, 1) + key2, _ := newDBKeyFromUser([]byte{1}, 0) + key3, _ := newDBKeyFromUser([]byte{1}, 1) + + entry := [][]byte{key0, key1, key2, key3} + expected := [][]byte{key1, key0, key3, key2} + + slices.SortFunc(entry, bytes.Compare) + + require.Equal(t, expected, entry) +} + +func TestParseDBKey(t *testing.T) { + require := require.New(t) + + key := []byte{0, 1, 2, 3, 4, 5} + height := uint64(102310) + dbKey, _ := newDBKeyFromUser(key, height) + + parsedKey, parsedHeight, err := parseDBKeyFromUser(dbKey) + require.NoError(err) + require.Equal(key, parsedKey) + require.Equal(height, parsedHeight) +} + +func FuzzMetadataKeyInvariant(f *testing.F) { + f.Fuzz(func(t *testing.T, userKey []byte, metadataKey []byte) { + // The prefix is independent of the height, so its value doesn't matter + // for this test. + _, dbKeyPrefix := newDBKeyFromUser(userKey, 0) + dbKey := newDBKeyFromMetadata(metadataKey) + require.False(t, bytes.HasPrefix(dbKey, dbKeyPrefix)) + }) +} diff --git a/avalanchego/x/archivedb/prefix_test.go b/avalanchego/x/archivedb/prefix_test.go new file mode 100644 index 00000000..8558b592 --- /dev/null +++ b/avalanchego/x/archivedb/prefix_test.go @@ -0,0 +1,112 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" +) + +type limitIterationDB struct { + database.Database +} + +func (db *limitIterationDB) NewIterator() database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, nil) +} + +func (db *limitIterationDB) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +func (db *limitIterationDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +func (db *limitIterationDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + return &limitIterationIterator{ + Iterator: db.Database.NewIteratorWithStartAndPrefix(start, prefix), + } +} + +type limitIterationIterator struct { + database.Iterator + exhausted bool +} + +func (it *limitIterationIterator) Next() bool { + if it.exhausted { + return false + } + it.exhausted = true + return it.Iterator.Next() +} + +func TestDBEfficientLookups(t *testing.T) { + require := require.New(t) + + var ( + key = []byte("key") + maliciousKey, _ = newDBKeyFromUser(key, 2) + ) + + db := New(&limitIterationDB{Database: memdb.New()}) + + batch := db.NewBatch(1) + require.NoError(batch.Put(key, []byte("value"))) + require.NoError(batch.Write()) + + for i := 0; i < 10000; i++ { + batch = db.NewBatch(uint64(i) + 2) + require.NoError(batch.Put(maliciousKey, []byte{byte(i)})) + require.NoError(batch.Write()) + } + + reader := db.Open(10001) + value, err := reader.Get(key) + require.NoError(err) + require.Equal([]byte("value"), value) + + value, height, found, err := reader.GetEntry(key) + require.NoError(err) + require.True(found) + require.Equal(uint64(1), height) + require.Equal([]byte("value"), value) +} + +func TestDBMoreEfficientLookups(t *testing.T) { + require := require.New(t) + + var ( + key = []byte("key") + maliciousKey = []byte("key\xff\xff\xff\xff\xff\xff\xff\xfd") + ) + + db := New(&limitIterationDB{Database: memdb.New()}) + + batch := db.NewBatch(1) + require.NoError(batch.Put(key, []byte("value"))) + require.NoError(batch.Write()) + + for i := 2; i < 10000; i++ { + batch = db.NewBatch(uint64(i)) + require.NoError(batch.Put(maliciousKey, []byte{byte(i)})) + require.NoError(batch.Write()) + } + + reader := db.Open(10001) + value, err := reader.Get(key) + require.NoError(err) + require.Equal([]byte("value"), value) + + value, height, found, err := reader.GetEntry(key) + require.NoError(err) + require.True(found) + require.Equal(uint64(1), height) + require.Equal([]byte("value"), value) +} diff --git a/avalanchego/x/archivedb/reader.go b/avalanchego/x/archivedb/reader.go new file mode 100644 index 00000000..abac3d85 --- /dev/null +++ b/avalanchego/x/archivedb/reader.go @@ -0,0 +1,61 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +import "github.com/ava-labs/avalanchego/database" + +var _ database.KeyValueReader = (*Reader)(nil) + +type Reader struct { + db *Database + height uint64 +} + +func (r *Reader) Has(key []byte) (bool, error) { + _, err := r.Get(key) + if err == database.ErrNotFound { + return false, nil + } + return true, err +} + +func (r *Reader) Get(key []byte) ([]byte, error) { + value, _, exists, err := r.GetEntry(key) + if err != nil { + return nil, err + } + if exists { + return value, nil + } + return value, database.ErrNotFound +} + +// GetEntry retrieves the value of the provided key, the height it was last +// modified at, and a boolean to indicate if the last modification was an +// insertion. If the key has never been modified, ErrNotFound will be returned. +func (r *Reader) GetEntry(key []byte) ([]byte, uint64, bool, error) { + it := r.db.db.NewIteratorWithStartAndPrefix(newDBKeyFromUser(key, r.height)) + defer it.Release() + + next := it.Next() + if err := it.Error(); err != nil { + return nil, 0, false, err + } + + // There is no available key with the requested prefix + if !next { + return nil, 0, false, database.ErrNotFound + } + + _, height, err := parseDBKeyFromUser(it.Key()) + if err != nil { + return nil, 0, false, err + } + + value, exists := parseDBValue(it.Value()) + if !exists { + return nil, height, false, nil + } + return value, height, true, nil +} diff --git a/avalanchego/x/archivedb/value.go b/avalanchego/x/archivedb/value.go new file mode 100644 index 00000000..5f5861e2 --- /dev/null +++ b/avalanchego/x/archivedb/value.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package archivedb + +func newDBValue(value []byte) []byte { + dbValue := make([]byte, len(value)+1) + copy(dbValue[1:], value) + return dbValue +} + +func parseDBValue(dbValue []byte) ([]byte, bool) { + if len(dbValue) == 0 { + return nil, false + } + return dbValue[1:], true +} diff --git a/avalanchego/x/merkledb/README.md b/avalanchego/x/merkledb/README.md index 0a964a2c..29c9f0a7 100644 --- a/avalanchego/x/merkledb/README.md +++ b/avalanchego/x/merkledb/README.md @@ -1,71 +1,448 @@ -# Path Based Merkelized Radix Trie +# MerkleDB -## TODOs +## Structure -- [ ] Remove special casing around the root node from the physical structure of the hashed tree. -- [ ] Analyze performance impact of needing to skip intermediate nodes when generating range and change proofs - - [ ] Consider moving nodes with values to a separate db prefix -- [ ] Analyze performance of using database snapshots rather than in-memory history -- [ ] Improve intermediate node regeneration after ungraceful shutdown by reusing successfully written subtrees +A _Merkle radix trie_ is a data structure that is both a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree) and a [radix trie](https://en.wikipedia.org/wiki/Radix_tree). MerkleDB is an implementation of a persisted key-value store (sometimes just called "a store") using a Merkle radix trie. We sometimes use "Merkle radix trie" and "MerkleDB instance" interchangeably below, but the two are not the same. MerkleDB maintains data in a Merkle radix trie, but not all Merkle radix tries implement a key-value store. + +Like all tries, a MerkleDB instance is composed of nodes. Conceputally, a node has: + * A unique _key_ which identifies its position in the trie. A node's key is a prefix of its childrens' keys. + * A unique _ID_, which is the hash of the node. + * A _children_ array, where each element is the ID of the child at that index. A child at a lower index is to the "left" of children at higher indices. + * An optional value. If a node has a value, then the node's key maps to its value in the key-value store. Otherwise the key isn't present in the store. + +and looks like this: +``` +Node ++--------------------------------------------+ +| ID: 32 bytes | +| Key: ? bytes | +| Value: Some(value) | None | +| Children: | +| 0: Some(child0ID) | None | +| 1: Some(child2ID) | None | +| ... | +| BranchFactor-1: Some(child15ID) | None | ++--------------------------------------------+ +``` + +This conceptual picture differs slightly from the implementation of the `node` in MerkleDB but is still useful in understanding how MerkleDB works. + +## Root IDs and Revisions + +The ID of the root node is called the _root ID_, or sometimes just the _root_ of the trie. If any node in a MerkleDB instance changes, the root ID will change. This follows from the fact that changing a node changes its ID, which changes its parent's reference to it, which changes the parent, which changes the parent's ID, and so on until the root. + +The root ID also serves as a unique identifier of a given state; instances with the same key-value mappings always have the same root ID, and instances with different key-value mappings always have different root IDs. We call a state with a given root ID a _revision_, and we sometimes say that a MerkleDB instance is "at" a given revision or root ID. The two are equivalent. + +## Views + +A _view_ is a proposal to modify a MerkleDB. If a view is _committed_, its changes are written to the MerkleDB. It can be queried, and when it is, it returns the state that the MerkleDB will contain if the view is committed. A view is immutable after creation. Namely, none of its key-value pairs can be modified. + +A view can be built atop the MerkleDB itself, or it can be built atop another view. Views can be chained together. For example, we might have: + +``` + db + / \ +view1 view2 + | +view3 +``` + +where `view1` and `view2` are built atop MerkleDB instance `db` and `view3` is built atop `view1`. Equivalently, we say that `db` is the parent of `view1` and `view2`, and `view3` is a child of `view1`. `view1` and `view2` are _siblings_. + +`view1` contains all the key-value pairs in `db`, except those modified by `view1`. That is, if `db` has key-value pair `(k,v)`, and `view1` doesn't modify that pair, then `view1` will return `v` when queried for the value of `k`. If `db` has `(k,v)` but `view1` modifies the pair to `(k, v')` then it will return `v'` when queried for the value of `k`. Similar for `view2`. + +`view3` has all of the key-value pairs as `view1`, except those modified in `view3`. That is, it has the state after the changes in `view1` are applied to `db`, followed by those in `view3`. + +A view can be committed only if its parent is the MerkleDB (and not another view). A view can only be committed once. In the above diagram, `view3` can't be committed until `view1` is committed. + +When a view is created, we don't apply changes to the trie's structure or calculate the new IDs of nodes because this requires expensive hashing. Instead, we lazily apply changes and calculate node IDs (including the root ID) when necessary. + +### Validity + +When a view is committed, its siblings and all of their descendants are _invalidated_. An invalid view can't be read or committed. Method calls on it will return `ErrInvalid`. + +In the diagram above, if `view1` were committed, `view2` would be invalidated. It `view2` were committed, `view1` and `view3` would be invalidated. + +## Proofs + +### Simple Proofs + +MerkleDB instances can produce _merkle proofs_, sometimes just called "proofs." A merkle proof uses cryptography to prove that a given key-value pair is or isn't in the key-value store with a given root. That is, a MerkleDB instance with root ID `r` can create a proof that shows that it has a key-value pair `(k,v)`, or that `k` is not present. + +Proofs can be useful as a client fetching data in a Byzantine environment. Suppose there are one or more servers, which may be Byzantine, serving a distirbuted key-value store using MerkleDB, and a client that wants to retrieve key-value pairs. Suppose also that the client can learn a "trusted" root ID, perhaps because it's posted on a blockchain. The client can request a key-value pair from a server, and use the returned proof to verify that the returned key-value pair is actually in the key-value store with (or isn't, as it were.) + +```mermaid +flowchart TD + A[Client] -->|"ProofRequest(k,r)"| B(Server) + B --> |"Proof(k,r)"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pair from proof) + C --> |Proof Invalid| E(Client doesn't trust key-value pair from proof) +``` + +`ProofRequest(k,r)` is a request for the value that `k` maps to in the MerkleDB instance with root `r` and a proof for that data's correctness. + +`Proof(k,r)` is a proof that purports to show either that key-value pair `(k,v)` exists in the revision at `r`, or that `k` isn't in the revision. + +#### Verification + +A proof is represented as: + +```go +type Proof struct { + // Nodes in the proof path from root --> target key + // (or node that would be where key is if it doesn't exist). + // Always contains at least the root. + Path []ProofNode + + // This is a proof that [key] exists/doesn't exist. + Key Key + + // Nothing if [Key] isn't in the trie. + // Otherwise, the value corresponding to [Key]. + Value maybe.Maybe[[]byte] +} + +type ProofNode struct { + Key Key + // Nothing if this is an intermediate node. + // The value in this node if its length < [HashLen]. + // The hash of the value in this node otherwise. + ValueOrHash maybe.Maybe[[]byte] + Children map[byte]ids.ID +} +``` + +For an inclusion proof, the last node in `Path` should be the one containing `Key`. +For an exclusion proof, the last node is either: +* The node that would be the parent of `Key`, if such node has no child at the index `Key` would be at. +* The node at the same child index `Key` would be at, otherwise. + +In other words, the last node of a proof says either, "the key is in the trie, and this node contains it," or, "the key isn't in the trie, and this node's existence precludes the existence of the key." + +The prover can't simply trust that such a node exists, though. It has to verify this. The prover creates an empty trie and inserts the nodes in `Path`. If the root ID of this trie matches the `r`, the verifier can trust that the last node really does exist in the trie. If the last node _didn't_ really exist, the proof creator couldn't create `Path` such that its nodes both imply the existence of the ("fake") last node and also result in the correct root ID. This follows from the one-way property of hashing. + +### Range Proofs + +MerkleDB instances can also produce _range proofs_. A range proof proves that a contiguous set of key-value pairs is or isn't in the key-value store with a given root. This is similar to the merkle proofs described above, except for multiple key-value pairs. + +```mermaid +flowchart TD + A[Client] -->|"RangeProofRequest(start,end,r)"| B(Server) + B --> |"RangeProof(start,end,r)"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pairs) + C --> |Proof Invalid| E(Client doesn't trust key-value pairs) +``` + +`RangeProofRequest(start,end,r)` is a request for all of the key-value pairs, in order, between keys `start` and `end` at revision `r`. + +`RangeProof(start,end,r)` contains a list of key-value pairs `kvs`, sorted by increasing key. It purports to show that, at revision `r`: +* Each element of `kvs` is a key-value pair in the store. +* There are no keys at/after `start` but before the first key in `kvs`. +* For adjacent key-value pairs `(k1,v1)` and `(k2,v2)` in `kvs`, there doesn't exist a key-value pair `(k3,v3)` in the store such that `k1 < k3 < k2`. In other words, `kvs` is a contiguous set of key-value pairs. + +Clients can use range proofs to efficiently download many key-value pairs at a time from a MerkleDB instance, as opposed to getting a proof for each key-value pair individually. + +#### Verification + +Like simple proofs, range proofs can be verified without any additional context or knowledge of the contents of the key-value store. + +A range proof is represented as: + +```go +type RangeProof struct { + // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. -## Introduction + // A proof that the smallest key in the requested range does/doesn't exist. + // Note that this may not be an entire proof -- nodes are omitted if + // they are also in [EndProof]. + StartProof []ProofNode -The Merkle Trie is a data structure that allows efficient and secure verification of the contents. It is a combination of a [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree) and a [Radix Trie](https://en.wikipedia.org/wiki/Radix_tree). + // If no upper range bound was given and [KeyValues] is empty, this is empty. + // + // If no upper range bound was given and [KeyValues] is non-empty, this is + // a proof for the largest key in [KeyValues]. + // + // Otherwise this is a proof for the upper range bound. + EndProof []ProofNode -The trie contains `Merkle Nodes`, which store key/value and children information. + // This proof proves that the key-value pairs in [KeyValues] are in the trie. + // Sorted by increasing key. + KeyValues []KeyValue +} +``` + +The prover creates an empty trie and adds to it all of the key-value pairs in `KeyValues`. + +Then, it inserts: +* The nodes in `StartProof` +* The nodes in `EndProof` + +For each node in `StartProof`, the prover only populates `Children` entries whose key is before `start`. +For each node in `EndProof`, it populates only `Children` entries whose key is after `end`, where `end` is the largest key proven by the range proof. + +Then, it calculates the root ID of this trie and compares it to the expected one. + +If the proof: +* Omits any key-values in the range +* Includes additional key-values that aren't really in the range +* Provides an incorrect value for a key in the range + +then the actual root ID won't match the expected root ID. + +Like simple proofs, range proof verification relies on the fact that the proof generator can't forge data such that it results in a trie with both incorrect data and the correct root ID. + +### Change Proofs + +Finally, MerkleDB instances can produce and verify _change proofs_. A change proof proves that a set of key-value changes were applied to a MerkleDB instance in the process of changing its root from `r` to `r'`. For example, suppose there's an instance with root `r` + +```mermaid +flowchart TD + A[Client] -->|"ChangeProofRequest(start,end,r,r')"| B(Server) + B --> |"ChangeProof(start,end,r,r')"| C(Client) + C --> |Proof Valid| D(Client trusts key-value pair changes) + C --> |Proof Invalid| E(Client doesn't trust key-value changes) +``` + +`ChangeProofRequest(start,end,r,r')` is a request for all key-value pairs, in order, between keys `start` and `end`, that occurred after the root of was `r` and before the root was `r'`. + +`ChangeProof(start,end,r,r')` contains a set of key-value pairs `kvs`. It purports to show that: +* Each element of `kvs` is a key-value pair in the at revision `r'` but not at revision `r`. +* There are no key-value changes between `r` and `r'` such that the key is at/after `start` but before the first key in `kvs`. +* For adjacent key-value changes `(k1,v1)` and `(k2,v2)` in `kvs`, there doesn't exist a key-value change `(k3,v3)` between `r` and `r'` such that `k1 < k3 < k2`. In other words, `kvs` is a contiguous set of key-value changes. -Each `Merkle Node` represents a key path into the trie. It stores the key, the value (if one exists), its ID, and the IDs of its children nodes. The children have keys that contain the current node's key path as a prefix, and the index of each child indicates the next nibble in that child's key. For example, if we have two nodes, Node 1 with key path `0x91A` and Node 2 with key path `0x91A4`, Node 2 is stored in index `0x4` of Node 1's children (since 0x4 is the first value after the common prefix). +Change proofs are useful for applying changes between revisions. For example, suppose a client has a MerkleDB instance at revision `r`. The client learns that the state has been updated and that the new root is `r'`. The client can request a change proof from a server at revision `r'`, and apply the changes in the change proof to change its state from `r` to `r'`. Note that `r` and `r'` need not be "consecutive" revisions. For example, it's possible that the state goes from revision `r` to `r1` to `r2` to `r'`. The client apply changes to get directly from `r` to `r'`, without ever needing to be at revision `r1` or `r2`. -To reduce the depth of nodes in the trie, a `Merkle Node` utilizes path compression. Instead of having a long chain of nodes each containing only a single nibble of the key, we can "compress" the path by recording additional key information with each of a node's children. For example, if we have three nodes, Node 1 with key path `0x91A`, Node 2 with key path `0x91A4`, and Node 3 with key path `0x91A5132`, then Node 1 has a key of `0x91A`. Node 2 is stored at index `0x4` of Node 1's children since `4` is the next nibble in Node 2's key after skipping the common nibbles from Node 1's key. Node 3 is stored at index `0x5` of Node 1's children. Rather than have extra nodes for the remainder of Node 3's key, we instead store the rest of the key (`132`) in Node 1's children info. +#### Verification +Unlike simple proofs and range proofs, change proofs require additional context to verify. Namely, the prover must have the trie at the start root `r`. + +The verification algorithm is similar to range proofs, except that instead of inserting the key-value changes, start proof and end proof into an empty trie, they are added to the trie at revision `r`. + +## Serialization + +### Node + +Nodes are persisted in an underlying database. In order to persist nodes, we must first serialize them. Serialization is done by the `encoder` interface defined in `codec.go`. + +The node serialization format is: + +``` ++----------------------------------------------------+ +| Value existence flag (1 byte) | ++----------------------------------------------------+ +| Value length (varint) (optional) | ++----------------------------------------------------+ +| Value (variable length bytes) (optional) | ++----------------------------------------------------+ +| Number of children (varint) | ++----------------------------------------------------+ +| Child index (varint) | ++----------------------------------------------------+ +| Child compressed key length (varint) | ++----------------------------------------------------+ +| Child compressed key (variable length bytes) | ++----------------------------------------------------+ +| Child ID (32 bytes) | ++----------------------------------------------------+ +| Child has value (1 bytes) | ++----------------------------------------------------+ +| Child index (varint) | ++----------------------------------------------------+ +| Child compressed key length (varint) | ++----------------------------------------------------+ +| Child compressed key (variable length bytes) | ++----------------------------------------------------+ +| Child ID (32 bytes) | ++----------------------------------------------------+ +| Child has value (1 bytes) | ++----------------------------------------------------+ +|... | ++----------------------------------------------------+ ``` -+-----------------------------------+ -| Merkle Node | -| | -| ID: 0x0131 | an id representing the current node, derived from the node's value and all children ids -| Key: 0x91 | prefix of the key path, representing the location of the node in the trie -| Value: 0x00 | the value, if one exists, that is stored at the key path (pathPrefix + compressedPath) -| Children: | a map of children node ids for any nodes in the trie that have this node's key path as a prefix -| 0: [:0x00542F] | child 0 represents a node with key 0x910 with ID 0x00542F -| 1: [0x432:0xA0561C] | child 1 represents a node with key 0x911432 with ID 0xA0561C -| ... | -| 15: [0x9A67B:0x02FB093] | child 15 represents a node with key 0x91F9A67B with ID 0x02FB093 -+-----------------------------------+ + +Where: +* `Value existence flag` is `1` if this node has a value, otherwise `0`. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Number of children` is the number of children this node has. +* `Child index` is the index of a child node within the list of the node's children. +* `Child compressed key length` is the length of the child node's compressed key. +* `Child compressed key` is the child node's compressed key. +* `Child ID` is the child node's ID. +* `Child has value` indicates if that child has a value. + +For each child of the node, we have an additional: + +``` ++----------------------------------------------------+ +| Child index (varint) | ++----------------------------------------------------+ +| Child compressed key length (varint) | ++----------------------------------------------------+ +| Child compressed key (variable length bytes) | ++----------------------------------------------------+ +| Child ID (32 bytes) | ++----------------------------------------------------+ +| Child has value (1 bytes) | ++----------------------------------------------------+ ``` +Note that the `Child index` are not necessarily sequential. For example, if a node has 3 children, the `Child index` values could be `0`, `2`, and `15`. +However, the `Child index` values must be strictly increasing. For example, the `Child index` values cannot be `0`, `0`, and `1`, or `1`, `0`. + +Since a node can have up to 16 children, there can be up to 16 such blocks of children data. + +#### Example + +Let's take a look at an example node. + +Its byte representation (in hex) is: `0x01020204000210579EB3718A7E437D2DDCE931AC7CC05A0BC695A9C2084F5DF12FB96AD0FA32660E06FFF09845893C4F9D92C4E097FCF2589BC9D6882B1F18D1C2FC91D7DF1D3FCBDB4238` + +The node's key is empty (its the root) and has value `0x02`. +It has two children. +The first is at child index `0`, has compressed key `0x01` and ID (in hex) `0x579eb3718a7e437d2ddce931ac7cc05a0bc695a9c2084f5df12fb96ad0fa3266`. +The second is at child index `14`, has compressed key `0x0F0F0F` and ID (in hex) `0x9845893c4f9d92c4e097fcf2589bc9d6882b1f18d1c2fc91d7df1d3fcbdb4238`. + +``` ++--------------------------------------------------------------------+ +| Value existence flag (1 byte) | +| 0x01 | ++--------------------------------------------------------------------+ +| Value length (varint) (optional) | +| 0x02 | ++--------------------------------------------------------------------+ +| Value (variable length bytes) (optional) | +| 0x02 | ++--------------------------------------------------------------------+ +| Number of children (varint) | +| 0x04 | ++--------------------------------------------------------------------+ +| Child index (varint) | +| 0x00 | ++--------------------------------------------------------------------+ +| Child compressed key length (varint) | +| 0x02 | ++--------------------------------------------------------------------+ +| Child compressed key (variable length bytes) | +| 0x10 | ++--------------------------------------------------------------------+ +| Child ID (32 bytes) | +| 0x579EB3718A7E437D2DDCE931AC7CC05A0BC695A9C2084F5DF12FB96AD0FA3266 | ++--------------------------------------------------------------------+ +| Child index (varint) | +| 0x0E | ++--------------------------------------------------------------------+ +| Child compressed key length (varint) | +| 0x06 | ++--------------------------------------------------------------------+ +| Child compressed key (variable length bytes) | +| 0xFFF0 | ++--------------------------------------------------------------------+ +| Child ID (32 bytes) | +| 0x9845893C4F9D92C4E097FCF2589BC9D6882B1F18D1C2FC91D7DF1D3FCBDB4238 | ++--------------------------------------------------------------------+ +``` + +### Node Hashing + +Each node must have a unique ID that identifies it. This ID is calculated by hashing the following values: +* The node's children +* The node's value digest +* The node's key + +The node's value digest is: +* Nothing, if the node has no value +* The node's value, if it has a value < 32 bytes +* The hash of the node's value otherwise + +We use the node's value digest rather than its value when hashing so that when we send proofs, each `ProofNode` doesn't need to contain the node's value, which could be very large. By using the value digest, we allow a proof verifier to calculate a node's ID while limiting the size of the data sent to the verifier. + +Specifically, we encode these values in the following way: + +``` ++----------------------------------------------------+ +| Number of children (varint) | ++----------------------------------------------------+ +| Child index (varint) | ++----------------------------------------------------+ +| Child ID (32 bytes) | ++----------------------------------------------------+ +| Child index (varint) | ++----------------------------------------------------+ +| Child ID (32 bytes) | ++----------------------------------------------------+ +|... | ++----------------------------------------------------+ +| Value existence flag (1 byte) | ++----------------------------------------------------+ +| Value length (varint) (optional) | ++----------------------------------------------------+ +| Value (variable length bytes) (optional) | ++----------------------------------------------------+ +| Key length (varint) | ++----------------------------------------------------+ +| Key (variable length bytes) | ++----------------------------------------------------+ +``` + +Where: +* `Number of children` is the number of children this node has. +* `Child index` is the index of a child node within the list of the node's children. +* `Child ID` is the child node's ID. +* `Value existence flag` is `1` if this node has a value, otherwise `0`. +* `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. +* `Key length` is the number of nibbles in this node's key. +* `Key` is the node's key. + +Note that, as with the node serialization format, the `Child index` values aren't necessarily sequential, but they are unique and strictly increasing. +Also like the node serialization format, there can be up to 16 blocks of children data. +However, note that child compressed keys are not included in the node ID calculation. + +Once this is encoded, we `sha256` hash the resulting bytes to get the node's ID. + +### Encoding Varints and Bytes + +Varints are encoded with `binary.PutUvarint` from the standard library's `binary/encoding` package. +Bytes are encoded by simply copying them onto the buffer. + ## Design choices ### []byte copying -Nodes contain a []byte which represents its value. This slice should never be edited internally. This allows usage without having to make copies of it for safety. -Anytime these values leave the library, for example in `Get`, `GetValue`, `GetProof`, `GetRangeProof`, etc, they need to be copied into a new slice to prevent -edits made outside of the library from being reflected in the DB/TrieViews. -### Single node type +A node may contain a value, which is represented in Go as a `[]byte`. This slice is never edited, allowing it to be used without copying it first in many places. When a value leaves the library, for example when returned in `Get`, `GetValue`, `GetProof`, `GetRangeProof`, etc., the value is copied to prevent edits made outside the library from being reflected in the database. -A `Merkle Node` holds the IDs of its children, its value, as well as any path extension. This simplifies some logic and allows all of the data about a node to be loaded in a single database read. This trades off a small amount of storage efficiency (some fields may be `nil` but are still stored for every node). +### Split Node Storage -### Validity +Nodes with values ("value nodes") are persisted under one database prefix, while nodes without values ("intermediate nodes") are persisted under another database prefix. This separation allows for easy iteration over all key-value pairs in the database, as this is simply iterating over the database prefix containing value nodes. + +### Single Node Type -A `trieView` is built atop another trie, and that trie could change at any point. If it does, all descendants of the trie will be marked invalid before the edit of the trie occurs. If an operation is performed on an invalid trie, an ErrInvalid error will be returned instead of the expected result. When a view is committed, all of its sibling views (the views that share the same parent) are marked invalid and any child views of the view have their parent updated to exclude any committed views between them and the db. +MerkleDB uses one type to represent nodes, rather than having multiple types (e.g. branch nodes, value nodes, extension nodes) as other Merkle Trie implementations do. + +Not using extension nodes results in worse storage efficiency (some nodes may have mostly empty children) but simpler code. ### Locking -`Database` has a `RWMutex` named `lock`. Its read operations don't store data in a map, so a read lock suffices for read operations. -`Database` has a `Mutex` named `commitLock`. It enforces that only a single view/batch is attempting to commit to the database at one time. `lock` is insufficient because there is a period of view preparation where read access should still be allowed, followed by a period where a full write lock is needed. The `commitLock` ensures that only a single goroutine makes the transition from read->write. +`merkleDB` has a `RWMutex` named `lock`. Its read operations don't store data in a map, so a read lock suffices for read operations. +`merkleDB` has a `Mutex` named `commitLock`. It enforces that only a single view/batch is attempting to commit to the database at one time. `lock` is insufficient because there is a period of view preparation where read access should still be allowed, followed by a period where a full write lock is needed. The `commitLock` ensures that only a single goroutine makes the transition from read => write. + +A `view` is built atop another trie, which may be the underlying `merkleDB` or another `view`. +We use locking to guarantee atomicity/consistency of trie operations. + +`view` has a `RWMutex` named `commitLock` which ensures that we don't create a view atop the `view` while it's being committed. +It also has a `RWMutex` named `validityTrackingLock` that is held during methods that change the view's validity, tracking of child views' validity, or of the `view` parent trie. This lock ensures that writing/reading from `view` or any of its descendants is safe. +The `CommitToDB` method grabs the `merkleDB`'s `commitLock`. This is the only `view` method that modifies the underlying `merkleDB`. -A `trieView` is built atop another trie, which may be the underlying `Database` or another `trieView`. -It's important to guarantee atomicity/consistency of trie operations. -That is, if a view method is executing, the views/database underneath the view shouldn't be changing. -To prevent this, we need to use locking. +In some of `merkleDB`'s methods, we create a `view` and call unexported methods on it without locking it. +We do so because the exported counterpart of the method read locks the `merkleDB`, which is already locked. +This pattern is safe because the `merkleDB` is locked, so no data under the view is changing, and nobody else has a reference to the view, so there can't be any concurrent access. -`trieView` has a `RWMutex` named `lock` that's held when methods that access the trie's structure are executing. It is responsible for ensuring that writing/reading from a `trieView` or from any *ancestor* is safe. -It also has a `RWMutex` named `validityTrackingLock` that is held during methods that change the view's validity, tracking of child views' validity, or of the `trieView` parent trie. This lock ensures that writing/reading from `trieView` or any of its *descendants* is safe. -The `Commit` function also grabs the `Database`'s `commitLock` lock. This is the only `trieView` method that modifies the underlying `Database`. If an ancestor is modified during this time, the commit will error with ErrInvalid. +To prevent deadlocks, `view` and `merkleDB` never acquire the `commitLock` of descendant views. +That is, locking is always done from a view toward to the underlying `merkleDB`, never the other way around. +The `validityTrackingLock` goes the opposite way. A view can lock the `validityTrackingLock` of its children, but not its ancestors. Because of this, any function that takes the `validityTrackingLock` must not take the `commitLock` as this may cause a deadlock. Keeping `commitLock` solely in the ancestor direction and `validityTrackingLock` solely in the descendant direction prevents deadlocks from occurring. -In some of `Database`'s methods, we create a `trieView` and call unexported methods on it without locking it. -We do so because the exported counterpart of the method read locks the `Database`, which is already locked. -This pattern is safe because the `Database` is locked, so no data under the view is changing, and nobody else has a reference to the view, so there can't be any concurrent access. +## TODOs -To prevent deadlocks, `trieView` and `Database` never acquire the `lock` of any descendant views that are built atop it. -That is, locking is always done from a view down to the underlying `Database`, never the other way around. -The `validityTrackingLock` goes the opposite way. Views can validityTrackingLock their children, but not their ancestors. Because of this, any function that takes the `validityTrackingLock` should avoid taking the `lock` as this will likely trigger a deadlock. Keeping `lock` solely in the ancestor direction and `validityTrackingLock` solely in the descendant direction prevents deadlocks from occurring. +- [ ] Analyze performance of using database snapshots rather than in-memory history +- [ ] Improve intermediate node regeneration after ungraceful shutdown by reusing successfully written subtrees diff --git a/avalanchego/x/merkledb/batch.go b/avalanchego/x/merkledb/batch.go index 353c0839..03320040 100644 --- a/avalanchego/x/merkledb/batch.go +++ b/avalanchego/x/merkledb/batch.go @@ -1,23 +1,19 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb -import ( - "github.com/ava-labs/avalanchego/database" -) +import "github.com/ava-labs/avalanchego/database" -var _ database.Batch = &batch{} +var _ database.Batch = (*batch)(nil) -// batch is a write-only database that commits changes to its host database -// when Write is called. type batch struct { database.BatchOps - db *Database + db *merkleDB } -// apply all operations in order to the database and write the result to disk +// Assumes [b.db.lock] isn't held. func (b *batch) Write() error { return b.db.commitBatch(b.Ops) } diff --git a/avalanchego/x/merkledb/cache.go b/avalanchego/x/merkledb/cache.go index 9aecae74..ee2e7f0b 100644 --- a/avalanchego/x/merkledb/cache.go +++ b/avalanchego/x/merkledb/cache.go @@ -1,80 +1,111 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( + "errors" "sync" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/wrappers" ) +var errEmptyCacheTooLarge = errors.New("cache is empty yet still too large") + // A cache that calls [onEviction] on the evicted element. type onEvictCache[K comparable, V any] struct { - lock sync.Mutex - maxSize int - // LRU --> MRU from left to right. - lru linkedhashmap.LinkedHashmap[K, V] - onEviction func(V) error + lock sync.RWMutex + maxSize int + currentSize int + fifo linkedhashmap.LinkedHashmap[K, V] + size func(K, V) int + // Must not call any method that grabs [c.lock] + // because this would cause a deadlock. + onEviction func(K, V) error } -func newOnEvictCache[K comparable, V any](maxSize int, onEviction func(V) error) onEvictCache[K, V] { +// [size] must always return a positive number. +func newOnEvictCache[K comparable, V any]( + maxSize int, + size func(K, V) int, + onEviction func(K, V) error, +) onEvictCache[K, V] { return onEvictCache[K, V]{ maxSize: maxSize, - lru: linkedhashmap.New[K, V](), + fifo: linkedhashmap.New[K, V](), + size: size, onEviction: onEviction, } } // Get an element from this cache. func (c *onEvictCache[K, V]) Get(key K) (V, bool) { - c.lock.Lock() - defer c.lock.Unlock() + c.lock.RLock() + defer c.lock.RUnlock() - val, ok := c.lru.Get(key) - if ok { - // This key was touched; move it to the MRU position. - c.lru.Put(key, val) - } - return val, ok + return c.fifo.Get(key) } // Put an element into this cache. If this causes an element // to be evicted, calls [c.onEviction] on the evicted element -// and returns the error from [c.onEviction]. Otherwise returns nil. +// and returns the error from [c.onEviction]. Otherwise, returns nil. func (c *onEvictCache[K, V]) Put(key K, value V) error { c.lock.Lock() defer c.lock.Unlock() - c.lru.Put(key, value) // Mark as MRU - - if c.lru.Len() > c.maxSize { - // Note that [c.cache] has already evicted the oldest - // element because its max size is [c.maxSize]. - oldestKey, oldsetVal, _ := c.lru.Oldest() - c.lru.Delete(oldestKey) - return c.onEviction(oldsetVal) + if oldValue, replaced := c.fifo.Get(key); replaced { + c.currentSize -= c.size(key, oldValue) } - return nil + + c.currentSize += c.size(key, value) + c.fifo.Put(key, value) // Mark as MRU + + return c.resize(c.maxSize) } -// Removes all elements from the cache. +// Flush removes all elements from the cache. // Returns the last non-nil error during [c.onEviction], if any. // If [c.onEviction] errors, it will still be called for any // subsequent elements and the cache will still be emptied. func (c *onEvictCache[K, V]) Flush() error { c.lock.Lock() defer func() { - c.lru = linkedhashmap.New[K, V]() + c.fifo = linkedhashmap.New[K, V]() c.lock.Unlock() }() + return c.resize(0) +} + +// removeOldest returns and removes the oldest element from this cache. +// +// Assumes [c.lock] is held. +func (c *onEvictCache[K, V]) removeOldest() (K, V, bool) { + k, v, exists := c.fifo.Oldest() + if exists { + c.currentSize -= c.size(k, v) + c.fifo.Delete(k) + } + return k, v, exists +} + +// resize removes the oldest elements from the cache until the cache is not +// larger than the provided target. +// +// Assumes [c.lock] is held. +func (c *onEvictCache[K, V]) resize(target int) error { + // Note that we can't use [c.fifo]'s iterator because [c.onEviction] + // modifies [c.fifo], which violates the iterator's invariant. var errs wrappers.Errs - iter := c.lru.NewIterator() - for iter.Next() { - val := iter.Value() - errs.Add(c.onEviction(val)) + for c.currentSize > target { + k, v, exists := c.removeOldest() + if !exists { + // This should really never happen unless the size of an entry + // changed or the target size is negative. + return errEmptyCacheTooLarge + } + errs.Add(c.onEviction(k, v)) } return errs.Err } diff --git a/avalanchego/x/merkledb/cache_test.go b/avalanchego/x/merkledb/cache_test.go index a841aef7..2e4e2e16 100644 --- a/avalanchego/x/merkledb/cache_test.go +++ b/avalanchego/x/merkledb/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -16,51 +16,57 @@ func TestNewOnEvictCache(t *testing.T) { require := require.New(t) called := false - onEviction := func(int) error { + size := func(int, int) int { + return 1 + } + onEviction := func(int, int) error { called = true return nil } maxSize := 10 - cache := newOnEvictCache[int](maxSize, onEviction) + cache := newOnEvictCache(maxSize, size, onEviction) require.Equal(maxSize, cache.maxSize) - require.NotNil(cache.lru) - require.Equal(0, cache.lru.Len()) + require.NotNil(cache.fifo) + require.Zero(cache.fifo.Len()) // Can't test function equality directly so do this // to make sure it was assigned correctly - err := cache.onEviction(0) - require.NoError(err) + require.NoError(cache.onEviction(0, 0)) require.True(called) } // Test the functionality of the cache when the onEviction function // never returns an error. -// Note this test assumes the internal cache is an LRU cache. +// Note this test assumes the internal cache is a FIFO cache func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require := require.New(t) - evicted := []int{} - onEviction := func(n int) error { - evicted = append(evicted, n) + evictedKey := []int{} + evictedValue := []int{} + size := func(int, int) int { + return 1 + } + onEviction := func(k, n int) error { + evictedKey = append(evictedKey, k) + evictedValue = append(evictedValue, n) return nil } maxSize := 3 - cache := newOnEvictCache[int](maxSize, onEviction) + cache := newOnEvictCache(maxSize, size, onEviction) // Get non-existent key _, ok := cache.Get(0) require.False(ok) // Put key - err := cache.Put(0, 0) - require.NoError(err) - require.Equal(1, cache.lru.Len()) + require.NoError(cache.Put(0, 0)) + require.Equal(1, cache.fifo.Len()) // Get key val, ok := cache.Get(0) require.True(ok) - require.Equal(0, val) + require.Zero(val) // Get non-existent key _, ok = cache.Get(1) @@ -68,23 +74,23 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { // Fill the cache for i := 1; i < maxSize; i++ { - err := cache.Put(i, i) - require.NoError(err) - require.Equal(i+1, cache.lru.Len()) + require.NoError(cache.Put(i, i)) + require.Equal(i+1, cache.fifo.Len()) } - require.Len(evicted, 0) - - // Cache has [0,1,2] from LRU --> MRU - - // Put another key. This should evict the LRU key (0). - err = cache.Put(maxSize, maxSize) - require.NoError(err) - require.Equal(maxSize, cache.lru.Len()) - require.Len(evicted, 1) - require.Equal(0, evicted[0]) - - // Cache has [1,2,3] from LRU --> MRU - iter := cache.lru.NewIterator() + require.Empty(evictedKey) + require.Empty(evictedValue) + // Cache has [0,1,2] + + // Put another key. This should evict the oldest inserted key (0). + require.NoError(cache.Put(maxSize, maxSize)) + require.Equal(maxSize, cache.fifo.Len()) + require.Len(evictedKey, 1) + require.Zero(evictedKey[0]) + require.Len(evictedValue, 1) + require.Zero(evictedValue[0]) + + // Cache has [1,2,3] + iter := cache.fifo.NewIterator() require.True(iter.Next()) require.Equal(1, iter.Key()) require.Equal(1, iter.Value()) @@ -107,62 +113,67 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.Equal(i, val) } - // Cache has [3,2,1] from LRU --> MRU - iter = cache.lru.NewIterator() + // Cache has [1,2,3] + iter = cache.fifo.NewIterator() require.True(iter.Next()) - require.Equal(3, iter.Key()) - require.Equal(3, iter.Value()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) require.True(iter.Next()) require.Equal(2, iter.Key()) require.Equal(2, iter.Value()) require.True(iter.Next()) - require.Equal(1, iter.Key()) - require.Equal(1, iter.Value()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) require.False(iter.Next()) - // Put another key to evict the LRU key (3). - err = cache.Put(maxSize+1, maxSize+1) - require.NoError(err) - require.Equal(maxSize, cache.lru.Len()) - require.Len(evicted, 2) - require.Equal(3, evicted[1]) + // Put another key to evict the oldest inserted key (1). + require.NoError(cache.Put(maxSize+1, maxSize+1)) + require.Equal(maxSize, cache.fifo.Len()) + require.Len(evictedKey, 2) + require.Equal(1, evictedKey[1]) + require.Len(evictedValue, 2) + require.Equal(1, evictedValue[1]) - // Cache has [2,1,4] from LRU --> MRU - iter = cache.lru.NewIterator() + // Cache has [2,3,4] + iter = cache.fifo.NewIterator() require.True(iter.Next()) require.Equal(2, iter.Key()) require.Equal(2, iter.Value()) require.True(iter.Next()) - require.Equal(1, iter.Key()) - require.Equal(1, iter.Value()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) require.True(iter.Next()) require.Equal(4, iter.Key()) require.Equal(4, iter.Value()) require.False(iter.Next()) - // 3 should no longer be in the cache - _, ok = cache.Get(3) + // 1 should no longer be in the cache + _, ok = cache.Get(1) require.False(ok) - err = cache.Flush() - require.NoError(err) + require.NoError(cache.Flush()) // Cache should be empty - require.Equal(0, cache.lru.Len()) - require.Len(evicted, 5) - require.Equal(evicted, []int{0, 3, 2, 1, 4}) - require.Equal(0, cache.lru.Len()) + require.Zero(cache.fifo.Len()) + require.Len(evictedKey, 5) + require.Equal([]int{0, 1, 2, 3, 4}, evictedKey) + require.Len(evictedValue, 5) + require.Equal([]int{0, 1, 2, 3, 4}, evictedValue) + require.Zero(cache.fifo.Len()) require.Equal(maxSize, cache.maxSize) // Should be unchanged } // Test the functionality of the cache when the onEviction function // returns an error. -// Note this test assumes the internal cache is an LRU cache. +// Note this test assumes the cache is FIFO. func TestOnEvictCacheOnEvictionError(t *testing.T) { var ( - require = require.New(t) - evicted = []int{} - onEviction = func(n int) error { + require = require.New(t) + evicted = []int{} + size = func(int, int) int { + return 1 + } + onEviction = func(_, n int) error { // Evicting even keys errors evicted = append(evicted, n) if n%2 == 0 { @@ -173,23 +184,24 @@ func TestOnEvictCacheOnEvictionError(t *testing.T) { maxSize = 2 ) - cache := newOnEvictCache[int](maxSize, onEviction) + cache := newOnEvictCache(maxSize, size, onEviction) // Fill the cache for i := 0; i < maxSize; i++ { - err := cache.Put(i, i) - require.NoError(err) - require.Equal(i+1, cache.lru.Len()) + require.NoError(cache.Put(i, i)) + require.Equal(i+1, cache.fifo.Len()) } - // Put another key. This should evict the LRU key (0) + // Cache has [0,1] + + // Put another key. This should evict the first key (0) // and return an error since 0 is even. err := cache.Put(maxSize, maxSize) require.ErrorIs(err, errTest) - // Cache should still have correct state [1,2] - require.Equal(evicted, []int{0}) - require.Equal(maxSize, cache.lru.Len()) + // Cache has [1,2] + require.Equal([]int{0}, evicted) + require.Equal(maxSize, cache.fifo.Len()) _, ok := cache.Get(0) require.False(ok) _, ok = cache.Get(1) @@ -202,8 +214,8 @@ func TestOnEvictCacheOnEvictionError(t *testing.T) { require.ErrorIs(err, errTest) // Should still be empty. - require.Equal(0, cache.lru.Len()) - require.Equal(evicted, []int{0, 1, 2}) + require.Zero(cache.fifo.Len()) + require.Equal([]int{0, 1, 2}, evicted) _, ok = cache.Get(0) require.False(ok) _, ok = cache.Get(1) diff --git a/avalanchego/x/merkledb/codec.go b/avalanchego/x/merkledb/codec.go index 7baa3715..a5d4a922 100644 --- a/avalanchego/x/merkledb/codec.go +++ b/avalanchego/x/merkledb/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -7,572 +7,264 @@ import ( "bytes" "encoding/binary" "errors" - "fmt" "io" "math" + "math/bits" + "slices" "sync" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" ) const ( - codecVersion = 0 + boolLen = 1 trueByte = 1 falseByte = 0 minVarIntLen = 1 - boolLen = 1 - idLen = hashing.HashLen - minCodecVersionLen = minVarIntLen - minSerializedPathLen = minVarIntLen - minByteSliceLen = minVarIntLen - minDeletedKeyLen = minByteSliceLen minMaybeByteSliceLen = boolLen - minProofPathLen = minVarIntLen - minKeyValueLen = 2 * minByteSliceLen - minProofNodeLen = minSerializedPathLen + minMaybeByteSliceLen + minVarIntLen - minProofLen = minCodecVersionLen + minProofPathLen + minByteSliceLen - minChangeProofLen = minCodecVersionLen + +boolLen + 2*minProofPathLen + 2*minVarIntLen - minRangeProofLen = minCodecVersionLen + +2*minProofPathLen + minVarIntLen - minDBNodeLen = minCodecVersionLen + minMaybeByteSliceLen + minVarIntLen - minHashValuesLen = minCodecVersionLen + minVarIntLen + minMaybeByteSliceLen + minSerializedPathLen - minProofNodeChildLen = minVarIntLen + idLen - minChildLen = minVarIntLen + minSerializedPathLen + idLen + minKeyLen = minVarIntLen + minByteSliceLen = minVarIntLen + minDBNodeLen = minMaybeByteSliceLen + minVarIntLen + minChildLen = minVarIntLen + minKeyLen + ids.IDLen + boolLen + + estimatedKeyLen = 64 + estimatedValueLen = 64 + // Child index, child ID + hashValuesChildLen = minVarIntLen + ids.IDLen ) var ( - _ EncoderDecoder = (*codecImpl)(nil) + _ encoderDecoder = (*codecImpl)(nil) trueBytes = []byte{trueByte} falseBytes = []byte{falseByte} - errUnknownVersion = errors.New("unknown codec version") - errEncodeNil = errors.New("can't encode nil pointer or interface") - errDecodeNil = errors.New("can't decode nil") - errNegativeProofPathNodes = errors.New("negative proof path length") - errNegativeNumChildren = errors.New("number of children is negative") - errTooManyChildren = fmt.Errorf("length of children list is larger than branching factor of %d", NodeBranchFactor) - errChildIndexTooLarge = fmt.Errorf("invalid child index. Must be less than branching factor of %d", NodeBranchFactor) - errNegativeNibbleLength = errors.New("nibble length is negative") - errNegativeNumKeyValues = errors.New("negative number of key values") - errIntTooLarge = errors.New("integer too large to be decoded") - errLeadingZeroes = errors.New("varint has leading zeroes") - errInvalidBool = errors.New("decoded bool is neither true nor false") - errNonZeroNibblePadding = errors.New("nibbles should be padded with 0s") - errExtraSpace = errors.New("trailing buffer space") - errNegativeSliceLength = errors.New("negative slice length") - errInvalidCodecVersion = errors.New("invalid codec version") + errChildIndexTooLarge = errors.New("invalid child index. Must be less than branching factor") + errLeadingZeroes = errors.New("varint has leading zeroes") + errInvalidBool = errors.New("decoded bool is neither true nor false") + errNonZeroKeyPadding = errors.New("key partial byte should be padded with 0s") + errExtraSpace = errors.New("trailing buffer space") + errIntOverflow = errors.New("value overflows int") ) -// EncoderDecoder defines the interface needed by merkleDB to marshal +// encoderDecoder defines the interface needed by merkleDB to marshal // and unmarshal relevant types. -type EncoderDecoder interface { - Encoder - Decoder +type encoderDecoder interface { + encoder + decoder } -type Encoder interface { - EncodeProof(version uint16, p *Proof) ([]byte, error) - EncodeChangeProof(version uint16, p *ChangeProof) ([]byte, error) - EncodeRangeProof(version uint16, p *RangeProof) ([]byte, error) +type encoder interface { + // Assumes [n] is non-nil. + encodeDBNode(n *dbNode) []byte + encodedDBNodeSize(n *dbNode) int - encodeDBNode(version uint16, n *dbNode) ([]byte, error) - encodeHashValues(version uint16, hv *hashValues) ([]byte, error) + // Returns the bytes that will be hashed to generate [n]'s ID. + // Assumes [n] is non-nil. + encodeHashValues(n *node) []byte + encodeKey(key Key) []byte } -type Decoder interface { - DecodeProof(bytes []byte, p *Proof) (uint16, error) - DecodeChangeProof(bytes []byte, p *ChangeProof) (uint16, error) - DecodeRangeProof(bytes []byte, p *RangeProof) (uint16, error) - - decodeDBNode(bytes []byte, n *dbNode) (uint16, error) +type decoder interface { + // Assumes [n] is non-nil. + decodeDBNode(bytes []byte, n *dbNode) error + decodeKey(bytes []byte) (Key, error) } -func newCodec() (EncoderDecoder, uint16) { +func newCodec() encoderDecoder { return &codecImpl{ varIntPool: sync.Pool{ New: func() interface{} { return make([]byte, binary.MaxVarintLen64) }, }, - }, codecVersion + } } +// Note that bytes.Buffer.Write always returns nil, so we +// can ignore its return values in [codecImpl] methods. type codecImpl struct { + // Invariant: Every byte slice returned by [varIntPool] has + // length [binary.MaxVarintLen64]. varIntPool sync.Pool } -func (c *codecImpl) EncodeProof(version uint16, proof *Proof) ([]byte, error) { - if proof == nil { - return nil, errEncodeNil - } - - if version != codecVersion { - return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) - } - - buf := &bytes.Buffer{} - if err := c.encodeInt(buf, int(version)); err != nil { - return nil, err - } - if err := c.encodeProofPath(buf, proof.Path); err != nil { - return nil, err - } - if err := c.encodeByteSlice(buf, proof.Key); err != nil { - return nil, err - } - if err := c.encodeMaybeByteSlice(buf, proof.Value); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (c *codecImpl) EncodeChangeProof(version uint16, proof *ChangeProof) ([]byte, error) { - if proof == nil { - return nil, errEncodeNil - } - - if version != codecVersion { - return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) - } - - buf := &bytes.Buffer{} - - if err := c.encodeInt(buf, int(version)); err != nil { - return nil, err - } - if err := c.encodeBool(buf, proof.HadRootsInHistory); err != nil { - return nil, err - } - if err := c.encodeProofPath(buf, proof.StartProof); err != nil { - return nil, err - } - if err := c.encodeProofPath(buf, proof.EndProof); err != nil { - return nil, err - } - if err := c.encodeInt(buf, len(proof.KeyValues)); err != nil { - return nil, err - } - for _, kv := range proof.KeyValues { - if err := c.encodeKeyValue(kv, buf); err != nil { - return nil, err - } - } - - if err := c.encodeInt(buf, len(proof.DeletedKeys)); err != nil { - return nil, err - } - for _, key := range proof.DeletedKeys { - if err := c.encodeByteSlice(buf, key); err != nil { - return nil, err - } - } - return buf.Bytes(), nil +func (c *codecImpl) childSize(index byte, childEntry *child) int { + // * index + // * child ID + // * child key + // * bool indicating whether the child has a value + return c.uintSize(uint64(index)) + ids.IDLen + c.keySize(childEntry.compressedKey) + boolLen } -func (c *codecImpl) EncodeRangeProof(version uint16, proof *RangeProof) ([]byte, error) { - if proof == nil { - return nil, errEncodeNil - } - - if version != codecVersion { - return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) - } - - buf := &bytes.Buffer{} - if err := c.encodeInt(buf, int(version)); err != nil { - return nil, err - } - if err := c.encodeProofPath(buf, proof.StartProof); err != nil { - return nil, err - } - if err := c.encodeProofPath(buf, proof.EndProof); err != nil { - return nil, err - } - if err := c.encodeInt(buf, len(proof.KeyValues)); err != nil { - return nil, err - } - for _, kv := range proof.KeyValues { - if err := c.encodeKeyValue(kv, buf); err != nil { - return nil, err - } +// based on the current implementation of codecImpl.encodeUint which uses binary.PutUvarint +func (*codecImpl) uintSize(value uint64) int { + if value == 0 { + return 1 } - - return buf.Bytes(), nil + return (bits.Len64(value) + 6) / 7 } -func (c *codecImpl) encodeDBNode(version uint16, n *dbNode) ([]byte, error) { - if n == nil { - return nil, errEncodeNil - } - - if version != codecVersion { - return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) - } - - buf := &bytes.Buffer{} - if err := c.encodeInt(buf, int(version)); err != nil { - return nil, err - } - if err := c.encodeMaybeByteSlice(buf, n.value); err != nil { - return nil, err - } - childrenLength := len(n.children) - if err := c.encodeInt(buf, childrenLength); err != nil { - return nil, err - } - for index := byte(0); index < NodeBranchFactor; index++ { - if entry, ok := n.children[index]; ok { - if err := c.encodeInt(buf, int(index)); err != nil { - return nil, err - } - path := entry.compressedPath.Serialize() - if err := c.encodeSerializedPath(path, buf); err != nil { - return nil, err - } - if _, err := buf.Write(entry.id[:]); err != nil { - return nil, err - } - } - } - return buf.Bytes(), nil +func (c *codecImpl) keySize(p Key) int { + return c.uintSize(uint64(p.length)) + bytesNeeded(p.length) } -func (c *codecImpl) encodeHashValues(version uint16, hv *hashValues) ([]byte, error) { - if hv == nil { - return nil, errEncodeNil - } - - if version != codecVersion { - return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) - } - - buf := &bytes.Buffer{} - - if err := c.encodeInt(buf, int(version)); err != nil { - return nil, err - } - - length := len(hv.Children) - if err := c.encodeInt(buf, length); err != nil { - return nil, err - } - - // ensure that the order of entries is consistent - for index := byte(0); index < NodeBranchFactor; index++ { - if entry, ok := hv.Children[index]; ok { - if err := c.encodeInt(buf, int(index)); err != nil { - return nil, err - } - if _, err := buf.Write(entry.id[:]); err != nil { - return nil, err - } - } +func (c *codecImpl) encodedDBNodeSize(n *dbNode) int { + // * number of children + // * bool indicating whether [n] has a value + // * the value (optional) + // * children + size := c.uintSize(uint64(len(n.children))) + boolLen + if n.value.HasValue() { + valueLen := len(n.value.Value()) + size += c.uintSize(uint64(valueLen)) + valueLen } - if err := c.encodeMaybeByteSlice(buf, hv.Value); err != nil { - return nil, err - } - if err := c.encodeSerializedPath(hv.Key, buf); err != nil { - return nil, err + // for each non-nil entry, we add the additional size of the child entry + for index, entry := range n.children { + size += c.childSize(index, entry) } - - return buf.Bytes(), nil + return size } -func (c *codecImpl) DecodeProof(b []byte, proof *Proof) (uint16, error) { - if proof == nil { - return 0, errDecodeNil - } - if minProofLen > len(b) { - return 0, io.ErrUnexpectedEOF - } - - var ( - err error - src = bytes.NewReader(b) - ) - gotCodecVersion, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if codecVersion != gotCodecVersion { - return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) +func (c *codecImpl) encodeDBNode(n *dbNode) []byte { + buf := bytes.NewBuffer(make([]byte, 0, c.encodedDBNodeSize(n))) + c.encodeMaybeByteSlice(buf, n.value) + c.encodeUint(buf, uint64(len(n.children))) + // Note we insert children in order of increasing index + // for determinism. + keys := maps.Keys(n.children) + slices.Sort(keys) + for _, index := range keys { + entry := n.children[index] + c.encodeUint(buf, uint64(index)) + c.encodeKeyToBuffer(buf, entry.compressedKey) + _, _ = buf.Write(entry.id[:]) + c.encodeBool(buf, entry.hasValue) } - if proof.Path, err = c.decodeProofPath(src); err != nil { - return 0, err - } - if proof.Key, err = c.decodeByteSlice(src); err != nil { - return 0, err - } - if proof.Value, err = c.decodeMaybeByteSlice(src); err != nil { - return 0, err - } - if src.Len() != 0 { - return 0, errExtraSpace - } - return codecVersion, nil + return buf.Bytes() } -func (c *codecImpl) DecodeChangeProof(b []byte, proof *ChangeProof) (uint16, error) { - if proof == nil { - return 0, errDecodeNil - } - if minChangeProofLen > len(b) { - return 0, io.ErrUnexpectedEOF - } - +func (c *codecImpl) encodeHashValues(n *node) []byte { var ( - src = bytes.NewReader(b) - err error + numChildren = len(n.children) + // Estimate size [hv] to prevent memory allocations + estimatedLen = minVarIntLen + numChildren*hashValuesChildLen + estimatedValueLen + estimatedKeyLen + buf = bytes.NewBuffer(make([]byte, 0, estimatedLen)) ) - gotCodecVersion, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if gotCodecVersion != codecVersion { - return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) - } - if proof.HadRootsInHistory, err = c.decodeBool(src); err != nil { - return 0, err - } - if proof.StartProof, err = c.decodeProofPath(src); err != nil { - return 0, err - } - if proof.EndProof, err = c.decodeProofPath(src); err != nil { - return 0, err - } - - numKeyValues, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if numKeyValues < 0 { - return 0, errNegativeNumKeyValues - } - if numKeyValues > src.Len()/minKeyValueLen { - return 0, io.ErrUnexpectedEOF - } - proof.KeyValues = make([]KeyValue, numKeyValues) - for i := range proof.KeyValues { - if proof.KeyValues[i], err = c.decodeKeyValue(src); err != nil { - return 0, err - } - } - - numDeletedKeys, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if numDeletedKeys < 0 { - return 0, errNegativeNumKeyValues - } - if numDeletedKeys > src.Len()/minDeletedKeyLen { - return 0, io.ErrUnexpectedEOF - } - proof.DeletedKeys = make([][]byte, numDeletedKeys) - for i := range proof.DeletedKeys { - if proof.DeletedKeys[i], err = c.decodeByteSlice(src); err != nil { - return 0, err - } - } - if src.Len() != 0 { - return 0, errExtraSpace - } - return codecVersion, nil -} - -func (c *codecImpl) DecodeRangeProof(b []byte, proof *RangeProof) (uint16, error) { - if proof == nil { - return 0, errDecodeNil - } - if minRangeProofLen > len(b) { - return 0, io.ErrUnexpectedEOF - } + c.encodeUint(buf, uint64(numChildren)) - var ( - src = bytes.NewReader(b) - err error - ) - gotCodecVersion, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if codecVersion != gotCodecVersion { - return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) - } - if proof.StartProof, err = c.decodeProofPath(src); err != nil { - return 0, err - } - if proof.EndProof, err = c.decodeProofPath(src); err != nil { - return 0, err + // ensure that the order of entries is consistent + keys := maps.Keys(n.children) + slices.Sort(keys) + for _, index := range keys { + entry := n.children[index] + c.encodeUint(buf, uint64(index)) + _, _ = buf.Write(entry.id[:]) } + c.encodeMaybeByteSlice(buf, n.valueDigest) + c.encodeKeyToBuffer(buf, n.key) - numKeyValues, err := c.decodeInt(src) - if err != nil { - return 0, err - } - if numKeyValues < 0 { - return 0, errNegativeNumKeyValues - } - if numKeyValues > src.Len()/minKeyValueLen { - return 0, io.ErrUnexpectedEOF - } - proof.KeyValues = make([]KeyValue, numKeyValues) - for i := range proof.KeyValues { - if proof.KeyValues[i], err = c.decodeKeyValue(src); err != nil { - return 0, err - } - } - if src.Len() != 0 { - return 0, errExtraSpace - } - return codecVersion, nil + return buf.Bytes() } -func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) (uint16, error) { - if n == nil { - return 0, errDecodeNil - } +func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { if minDBNodeLen > len(b) { - return 0, io.ErrUnexpectedEOF + return io.ErrUnexpectedEOF } - var ( - src = bytes.NewReader(b) - err error - ) + src := bytes.NewReader(b) - gotCodecVersion, err := c.decodeInt(src) + value, err := c.decodeMaybeByteSlice(src) if err != nil { - return 0, err - } - if codecVersion != gotCodecVersion { - return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + return err } + n.value = value - if n.value, err = c.decodeMaybeByteSlice(src); err != nil { - return 0, err + numChildren, err := c.decodeUint(src) + switch { + case err != nil: + return err + case numChildren > uint64(src.Len()/minChildLen): + return io.ErrUnexpectedEOF } - numChildren, err := c.decodeInt(src) - if err != nil { - return 0, err - } - switch { - case numChildren < 0: - return 0, errNegativeNumChildren - case numChildren > NodeBranchFactor: - return 0, errTooManyChildren - case numChildren > src.Len()/minChildLen: - return 0, io.ErrUnexpectedEOF - } - - n.children = make(map[byte]child, NodeBranchFactor) - previousChild := -1 - for i := 0; i < numChildren; i++ { - var index int - if index, err = c.decodeInt(src); err != nil { - return 0, err + n.children = make(map[byte]*child, numChildren) + var previousChild uint64 + for i := uint64(0); i < numChildren; i++ { + index, err := c.decodeUint(src) + if err != nil { + return err } - if index <= previousChild || index > NodeBranchFactor-1 { - return 0, errChildIndexTooLarge + if (i != 0 && index <= previousChild) || index > math.MaxUint8 { + return errChildIndexTooLarge } previousChild = index - var compressedPath SerializedPath - if compressedPath, err = c.decodeSerializedPath(src); err != nil { - return 0, err + compressedKey, err := c.decodeKeyFromReader(src) + if err != nil { + return err } - var childID ids.ID - if childID, err = c.decodeID(src); err != nil { - return 0, err + childID, err := c.decodeID(src) + if err != nil { + return err } - n.children[byte(index)] = child{ - compressedPath: compressedPath.deserialize(), - id: childID, + hasValue, err := c.decodeBool(src) + if err != nil { + return err + } + n.children[byte(index)] = &child{ + compressedKey: compressedKey, + id: childID, + hasValue: hasValue, } } if src.Len() != 0 { - return 0, errExtraSpace - } - return codecVersion, err -} - -func (c *codecImpl) decodeKeyValue(src *bytes.Reader) (KeyValue, error) { - if minKeyValueLen > src.Len() { - return KeyValue{}, io.ErrUnexpectedEOF - } - - var ( - result KeyValue - err error - ) - if result.Key, err = c.decodeByteSlice(src); err != nil { - return result, err - } - if result.Value, err = c.decodeByteSlice(src); err != nil { - return result, err - } - return result, nil -} - -func (c *codecImpl) encodeKeyValue(kv KeyValue, dst io.Writer) error { - if err := c.encodeByteSlice(dst, kv.Key); err != nil { - return err - } - if err := c.encodeByteSlice(dst, kv.Value); err != nil { - return err + return errExtraSpace } return nil } -func (*codecImpl) encodeBool(dst io.Writer, value bool) error { +func (*codecImpl) encodeBool(dst *bytes.Buffer, value bool) { bytesValue := falseBytes if value { bytesValue = trueBytes } - _, err := dst.Write(bytesValue) - return err + _, _ = dst.Write(bytesValue) } func (*codecImpl) decodeBool(src *bytes.Reader) (bool, error) { boolByte, err := src.ReadByte() - if err == io.EOF { + switch { + case err == io.EOF: return false, io.ErrUnexpectedEOF - } - if err != nil { + case err != nil: return false, err - } - switch boolByte { - case trueByte: + case boolByte == trueByte: return true, nil - case falseByte: + case boolByte == falseByte: return false, nil default: return false, errInvalidBool } } -func (c *codecImpl) encodeInt(dst io.Writer, value int) error { - return c.encodeInt64(dst, int64(value)) -} - -func (*codecImpl) decodeInt(src *bytes.Reader) (int, error) { +func (*codecImpl) decodeUint(src *bytes.Reader) (uint64, error) { // To ensure encoding/decoding is canonical, we need to check for leading // zeroes in the varint. // The last byte of the varint we read is the most significant byte. // If it's 0, then it's a leading zero, which is considered invalid in the // canonical encoding. startLen := src.Len() - val64, err := binary.ReadVarint(src) - switch { - case err == io.EOF: - return 0, io.ErrUnexpectedEOF - case err != nil: + val64, err := binary.ReadUvarint(src) + if err != nil { + if err == io.EOF { + return 0, io.ErrUnexpectedEOF + } return 0, err - case val64 > math.MaxInt: - return 0, errIntTooLarge } endLen := src.Len() @@ -590,42 +282,39 @@ func (*codecImpl) decodeInt(src *bytes.Reader) (int, error) { } } - return int(val64), nil + return val64, nil } -func (c *codecImpl) encodeInt64(dst io.Writer, value int64) error { +func (c *codecImpl) encodeUint(dst *bytes.Buffer, value uint64) { buf := c.varIntPool.Get().([]byte) - size := binary.PutVarint(buf, value) - _, err := dst.Write(buf[:size]) + size := binary.PutUvarint(buf, value) + _, _ = dst.Write(buf[:size]) c.varIntPool.Put(buf) - return err } -func (c *codecImpl) encodeMaybeByteSlice(dst io.Writer, maybeValue Maybe[[]byte]) error { - if err := c.encodeBool(dst, !maybeValue.IsNothing()); err != nil { - return err - } - if maybeValue.IsNothing() { - return nil +func (c *codecImpl) encodeMaybeByteSlice(dst *bytes.Buffer, maybeValue maybe.Maybe[[]byte]) { + hasValue := maybeValue.HasValue() + c.encodeBool(dst, hasValue) + if hasValue { + c.encodeByteSlice(dst, maybeValue.Value()) } - return c.encodeByteSlice(dst, maybeValue.Value()) } -func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (Maybe[[]byte], error) { +func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (maybe.Maybe[[]byte], error) { if minMaybeByteSliceLen > src.Len() { - return Nothing[[]byte](), io.ErrUnexpectedEOF + return maybe.Nothing[[]byte](), io.ErrUnexpectedEOF } if hasValue, err := c.decodeBool(src); err != nil || !hasValue { - return Nothing[[]byte](), err + return maybe.Nothing[[]byte](), err } - bytes, err := c.decodeByteSlice(src) + rawBytes, err := c.decodeByteSlice(src) if err != nil { - return Nothing[[]byte](), err + return maybe.Nothing[[]byte](), err } - return Some(bytes), nil + return maybe.Some(rawBytes), nil } func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { @@ -633,226 +322,105 @@ func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { return nil, io.ErrUnexpectedEOF } - var ( - length int - err error - result []byte - ) - if length, err = c.decodeInt(src); err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - return nil, err - } - + length, err := c.decodeUint(src) switch { - case length < 0: - return nil, errNegativeSliceLength + case err == io.EOF: + return nil, io.ErrUnexpectedEOF + case err != nil: + return nil, err case length == 0: return nil, nil - case length > src.Len(): + case length > uint64(src.Len()): return nil, io.ErrUnexpectedEOF } - result = make([]byte, length) - if _, err := io.ReadFull(src, result); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err + result := make([]byte, length) + _, err = io.ReadFull(src, result) + if err == io.EOF { + err = io.ErrUnexpectedEOF } - return result, nil + return result, err } -func (c *codecImpl) encodeByteSlice(dst io.Writer, value []byte) error { - if err := c.encodeInt(dst, len(value)); err != nil { - return err - } +func (c *codecImpl) encodeByteSlice(dst *bytes.Buffer, value []byte) { + c.encodeUint(dst, uint64(len(value))) if value != nil { - if _, err := dst.Write(value); err != nil { - return err - } + _, _ = dst.Write(value) } - return nil } func (*codecImpl) decodeID(src *bytes.Reader) (ids.ID, error) { - if idLen > src.Len() { + if ids.IDLen > src.Len() { return ids.ID{}, io.ErrUnexpectedEOF } var id ids.ID - if _, err := io.ReadFull(src, id[:]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return id, err + _, err := io.ReadFull(src, id[:]) + if err == io.EOF { + err = io.ErrUnexpectedEOF } - return id, nil + return id, err } -// Assumes a proof path has > 0 nodes. -func (c *codecImpl) decodeProofPath(src *bytes.Reader) ([]ProofNode, error) { - if minProofPathLen > src.Len() { - return nil, io.ErrUnexpectedEOF - } - - numProofNodes, err := c.decodeInt(src) - if err != nil { - return nil, err - } - if numProofNodes < 0 { - return nil, errNegativeProofPathNodes - } - if numProofNodes > src.Len()/minProofNodeLen { - return nil, io.ErrUnexpectedEOF - } - result := make([]ProofNode, numProofNodes) - for i := 0; i < numProofNodes; i++ { - if result[i], err = c.decodeProofNode(src); err != nil { - return nil, err - } - } - return result, nil +func (c *codecImpl) encodeKey(key Key) []byte { + estimatedLen := binary.MaxVarintLen64 + len(key.Bytes()) + dst := bytes.NewBuffer(make([]byte, 0, estimatedLen)) + c.encodeKeyToBuffer(dst, key) + return dst.Bytes() } -// Invariant: len(path) > 0. -func (c *codecImpl) encodeProofPath(dst io.Writer, path []ProofNode) error { - if err := c.encodeInt(dst, len(path)); err != nil { - return err - } - for _, proofNode := range path { - if err := c.encodeProofNode(proofNode, dst); err != nil { - return err - } - } - return nil +func (c *codecImpl) encodeKeyToBuffer(dst *bytes.Buffer, key Key) { + c.encodeUint(dst, uint64(key.length)) + _, _ = dst.Write(key.Bytes()) } -func (c *codecImpl) decodeProofNode(src *bytes.Reader) (ProofNode, error) { - if minProofNodeLen > src.Len() { - return ProofNode{}, io.ErrUnexpectedEOF - } - - var ( - result ProofNode - err error - ) - if result.KeyPath, err = c.decodeSerializedPath(src); err != nil { - return result, err - } - if result.ValueOrHash, err = c.decodeMaybeByteSlice(src); err != nil { - return result, err - } - numChildren, err := c.decodeInt(src) +func (c *codecImpl) decodeKey(b []byte) (Key, error) { + src := bytes.NewReader(b) + key, err := c.decodeKeyFromReader(src) if err != nil { - return result, err - } - switch { - case numChildren < 0: - return result, errNegativeNumChildren - case numChildren > NodeBranchFactor: - return result, errTooManyChildren - case numChildren > src.Len()/minProofNodeChildLen: - return result, io.ErrUnexpectedEOF - } - - result.Children = make(map[byte]ids.ID, numChildren) - previousChild := -1 - for addedEntries := 0; addedEntries < numChildren; addedEntries++ { - index, err := c.decodeInt(src) - if err != nil { - return result, err - } - if index <= previousChild || index >= NodeBranchFactor { - return result, errChildIndexTooLarge - } - previousChild = index - - childID, err := c.decodeID(src) - if err != nil { - return result, err - } - result.Children[byte(index)] = childID - } - return result, nil -} - -func (c *codecImpl) encodeProofNode(pn ProofNode, dst io.Writer) error { - if err := c.encodeSerializedPath(pn.KeyPath, dst); err != nil { - return err - } - if err := c.encodeMaybeByteSlice(dst, pn.ValueOrHash); err != nil { - return err - } - if err := c.encodeInt(dst, len(pn.Children)); err != nil { - return err - } - // ensure this is in order - childrenCount := 0 - for index := byte(0); index < NodeBranchFactor; index++ { - childID, ok := pn.Children[index] - if !ok { - continue - } - childrenCount++ - if err := c.encodeInt(dst, int(index)); err != nil { - return err - } - if _, err := dst.Write(childID[:]); err != nil { - return err - } - } - // there are children present with index >= NodeBranchFactor - if childrenCount != len(pn.Children) { - return errChildIndexTooLarge + return Key{}, err } - return nil -} - -func (c *codecImpl) encodeSerializedPath(s SerializedPath, dst io.Writer) error { - if err := c.encodeInt(dst, s.NibbleLength); err != nil { - return err + if src.Len() != 0 { + return Key{}, errExtraSpace } - _, err := dst.Write(s.Value) - return err + return key, err } -func (c *codecImpl) decodeSerializedPath(src *bytes.Reader) (SerializedPath, error) { - if minSerializedPathLen > src.Len() { - return SerializedPath{}, io.ErrUnexpectedEOF +func (c *codecImpl) decodeKeyFromReader(src *bytes.Reader) (Key, error) { + if minKeyLen > src.Len() { + return Key{}, io.ErrUnexpectedEOF } - var ( - result SerializedPath - err error - ) - if result.NibbleLength, err = c.decodeInt(src); err != nil { - return result, err + length, err := c.decodeUint(src) + if err != nil { + return Key{}, err } - if result.NibbleLength < 0 { - return result, errNegativeNibbleLength + if length > math.MaxInt { + return Key{}, errIntOverflow } - pathBytesLen := result.NibbleLength >> 1 - hasOddLen := result.hasOddLength() - if hasOddLen { - pathBytesLen++ + result := Key{ + length: int(length), } - if pathBytesLen > src.Len() { - return result, io.ErrUnexpectedEOF + keyBytesLen := bytesNeeded(result.length) + if keyBytesLen > src.Len() { + return Key{}, io.ErrUnexpectedEOF } - result.Value = make([]byte, pathBytesLen) - if _, err := io.ReadFull(src, result.Value); err != nil { + buffer := make([]byte, keyBytesLen) + if _, err := io.ReadFull(src, buffer); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } - return result, err - } - if hasOddLen { - paddedNibble := result.Value[pathBytesLen-1] & 0x0F - if paddedNibble != 0 { - return result, errNonZeroNibblePadding + return Key{}, err + } + if result.hasPartialByte() { + // Confirm that the padding bits in the partial byte are 0. + // We want to only look at the bits to the right of the last token, which is at index length-1. + // Generate a mask where the (result.length % 8) left bits are 0. + paddingMask := byte(0xFF >> (result.length % 8)) + if buffer[keyBytesLen-1]&paddingMask != 0 { + return Key{}, errNonZeroKeyPadding } } + result.value = string(buffer) return result, nil } diff --git a/avalanchego/x/merkledb/codec_test.go b/avalanchego/x/merkledb/codec_test.go index f3561c81..455b75e1 100644 --- a/avalanchego/x/merkledb/codec_test.go +++ b/avalanchego/x/merkledb/codec_test.go @@ -1,120 +1,22 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( "bytes" + "encoding/binary" "io" + "math" "math/rand" - "reflect" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" ) -// TODO add more codec tests - -func newRandomProofNode(r *rand.Rand) ProofNode { - key := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(key) // #nosec G404 - val := make([]byte, r.Intn(64)) // #nosec G404 - _, _ = r.Read(val) // #nosec G404 - - children := map[byte]ids.ID{} - for j := 0; j < NodeBranchFactor; j++ { - if r.Float64() < 0.5 { - var childID ids.ID - _, _ = r.Read(childID[:]) // #nosec G404 - children[byte(j)] = childID - } - } - // use the hash instead when length is greater than the hash length - if len(val) >= HashLength { - val = hashing.ComputeHash256(val) - } else if len(val) == 0 { - // We do this because when we encode a value of []byte{} we will later - // decode it as nil. - // Doing this prevents inconsistency when comparing the encoded and - // decoded values. - // Calling nilEmptySlices doesn't set this because it is a private - // variable on the struct - val = nil - } - - return ProofNode{ - KeyPath: newPath(key).Serialize(), - ValueOrHash: Some(val), - Children: children, - } -} - -func newKeyValues(r *rand.Rand, num uint) []KeyValue { - keyValues := make([]KeyValue, num) - for i := range keyValues { - key := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(key) // #nosec G404 - val := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(val) // #nosec G404 - keyValues[i] = KeyValue{ - Key: key, - Value: val, - } - } - return keyValues -} - -func nilEmptySlices(dest interface{}) { - if dest == nil { - return - } - - destPtr := reflect.ValueOf(dest) - if destPtr.Kind() != reflect.Ptr { - return - } - nilEmptySlicesRec(destPtr.Elem()) -} - -func nilEmptySlicesRec(value reflect.Value) { - switch value.Kind() { - case reflect.Slice: - if value.Len() == 0 { - newValue := reflect.Zero(value.Type()) - value.Set(newValue) - return - } - - for i := 0; i < value.Len(); i++ { - f := value.Index(i) - nilEmptySlicesRec(f) - } - case reflect.Array: - for i := 0; i < value.Len(); i++ { - f := value.Index(i) - nilEmptySlicesRec(f) - } - case reflect.Interface, reflect.Ptr: - if value.IsNil() { - return - } - nilEmptySlicesRec(value.Elem()) - case reflect.Struct: - t := value.Type() - numFields := value.NumField() - for i := 0; i < numFields; i++ { - tField := t.Field(i) - if tField.IsExported() { - field := value.Field(i) - nilEmptySlicesRec(field) - } - } - } -} - func FuzzCodecBool(f *testing.F) { f.Fuzz( func( @@ -123,20 +25,19 @@ func FuzzCodecBool(f *testing.F) { ) { require := require.New(t) - codec := Codec.(*codecImpl) + codec := codec.(*codecImpl) reader := bytes.NewReader(b) startLen := reader.Len() got, err := codec.decodeBool(reader) if err != nil { - return + t.SkipNow() } endLen := reader.Len() numRead := startLen - endLen // Encoding [got] should be the same as [b]. var buf bytes.Buffer - err = codec.encodeBool(&buf, got) - require.NoError(err) + codec.encodeBool(&buf, got) bufBytes := buf.Bytes() require.Len(bufBytes, numRead) require.Equal(b[:numRead], bufBytes) @@ -152,20 +53,19 @@ func FuzzCodecInt(f *testing.F) { ) { require := require.New(t) - codec := Codec.(*codecImpl) + codec := codec.(*codecImpl) reader := bytes.NewReader(b) startLen := reader.Len() - got, err := codec.decodeInt(reader) + got, err := codec.decodeUint(reader) if err != nil { - return + t.SkipNow() } endLen := reader.Len() numRead := startLen - endLen // Encoding [got] should be the same as [b]. var buf bytes.Buffer - err = codec.encodeInt(&buf, got) - require.NoError(err) + codec.encodeUint(&buf, got) bufBytes := buf.Bytes() require.Len(bufBytes, numRead) require.Equal(b[:numRead], bufBytes) @@ -173,142 +73,22 @@ func FuzzCodecInt(f *testing.F) { ) } -func FuzzCodecSerializedPath(f *testing.F) { +func FuzzCodecKey(f *testing.F) { f.Fuzz( func( t *testing.T, b []byte, ) { require := require.New(t) - - codec := Codec.(*codecImpl) - reader := bytes.NewReader(b) - startLen := reader.Len() - got, err := codec.decodeSerializedPath(reader) + codec := codec.(*codecImpl) + got, err := codec.decodeKey(b) if err != nil { - return + t.SkipNow() } - endLen := reader.Len() - numRead := startLen - endLen // Encoding [got] should be the same as [b]. - var buf bytes.Buffer - err = codec.encodeSerializedPath(got, &buf) - require.NoError(err) - bufBytes := buf.Bytes() - require.Len(bufBytes, numRead) - require.Equal(b[:numRead], bufBytes) - - clonedGot := got.deserialize().Serialize() - require.Equal(got, clonedGot) - }, - ) -} - -func FuzzCodecProofCanonical(f *testing.F) { - f.Add( - []byte{ - // RootID: - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - // Path: - // Num proof nodes = 1 - 0x02, - // Key Path: - // Nibble Length: - 0x00, - // Value: - // Has Value = false - 0x00, - // Num Children = 2 - 0x04, - // Child 0: - // index = 0 - 0x00, - // childID: - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - // Child 1: - // index = 0 <- should fail - 0x00, - // childID: - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - // Key: - // length = 0 - 0x00, - }, - ) - f.Fuzz( - func( - t *testing.T, - b []byte, - ) { - require := require.New(t) - - codec := Codec.(*codecImpl) - proof := &Proof{} - got, err := codec.DecodeProof(b, proof) - if err != nil { - return - } - - // Encoding [proof] should be the same as [b]. - buf, err := codec.EncodeProof(got, proof) - require.NoError(err) - require.Equal(b, buf) - }, - ) -} - -func FuzzCodecChangeProofCanonical(f *testing.F) { - f.Fuzz( - func( - t *testing.T, - b []byte, - ) { - require := require.New(t) - - codec := Codec.(*codecImpl) - proof := &ChangeProof{} - got, err := codec.DecodeChangeProof(b, proof) - if err != nil { - return - } - - // Encoding [proof] should be the same as [b]. - buf, err := codec.EncodeChangeProof(got, proof) - require.NoError(err) - require.Equal(b, buf) - }, - ) -} - -func FuzzCodecRangeProofCanonical(f *testing.F) { - f.Fuzz( - func( - t *testing.T, - b []byte, - ) { - require := require.New(t) - - codec := Codec.(*codecImpl) - proof := &RangeProof{} - got, err := codec.DecodeRangeProof(b, proof) - if err != nil { - return - } - - // Encoding [proof] should be the same as [b]. - buf, err := codec.EncodeRangeProof(got, proof) - require.NoError(err) - require.Equal(b, buf) + gotBytes := codec.encodeKey(got) + require.Equal(b, gotBytes) }, ) } @@ -320,379 +100,169 @@ func FuzzCodecDBNodeCanonical(f *testing.F) { b []byte, ) { require := require.New(t) - - codec := Codec.(*codecImpl) + codec := codec.(*codecImpl) node := &dbNode{} - got, err := codec.decodeDBNode(b, node) - if err != nil { - return + if err := codec.decodeDBNode(b, node); err != nil { + t.SkipNow() } // Encoding [node] should be the same as [b]. - buf, err := codec.encodeDBNode(got, node) - require.NoError(err) + buf := codec.encodeDBNode(node) require.Equal(b, buf) }, ) } -func FuzzCodecProofDeterministic(f *testing.F) { +func FuzzCodecDBNodeDeterministic(f *testing.F) { f.Fuzz( func( t *testing.T, randSeed int, - key []byte, - numProofNodes uint, + hasValue bool, + valueBytes []byte, ) { require := require.New(t) + for _, bf := range validBranchFactors { + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + value := maybe.Nothing[[]byte]() + if hasValue { + if len(valueBytes) == 0 { + // We do this because when we encode a value of []byte{} + // we will later decode it as nil. + // Doing this prevents inconsistency when comparing the + // encoded and decoded values below. + valueBytes = nil + } + value = maybe.Some(valueBytes) + } - r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - - proofNodes := make([]ProofNode, numProofNodes) - for i := range proofNodes { - proofNodes[i] = newRandomProofNode(r) - } - - proof := Proof{ - Path: proofNodes, - Key: key, - } - - proofBytes, err := Codec.EncodeProof(Version, &proof) - require.NoError(err) - - var gotProof Proof - gotVersion, err := Codec.DecodeProof(proofBytes, &gotProof) - require.NoError(err) - require.Equal(Version, gotVersion) - - nilEmptySlices(&proof) - nilEmptySlices(&gotProof) - require.Equal(proof, gotProof) + numChildren := r.Intn(int(bf)) // #nosec G404 - proofBytes2, err := Codec.EncodeProof(Version, &gotProof) - require.NoError(err) - require.Equal(proofBytes, proofBytes2) - }, - ) -} + children := map[byte]*child{} + for i := 0; i < numChildren; i++ { + var childID ids.ID + _, _ = r.Read(childID[:]) // #nosec G404 -func FuzzCodecChangeProofDeterministic(f *testing.F) { - f.Fuzz( - func( - t *testing.T, - randSeed int, - hadRootsInHistory bool, - numProofNodes uint, - numDeletedKeys uint, - ) { - require := require.New(t) + childKeyBytes := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(childKeyBytes) // #nosec G404 - r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - - startProofNodes := make([]ProofNode, numProofNodes) - endProofNodes := make([]ProofNode, numProofNodes) - for i := range startProofNodes { - startProofNodes[i] = newRandomProofNode(r) - endProofNodes[i] = newRandomProofNode(r) - } + children[byte(i)] = &child{ + compressedKey: ToKey(childKeyBytes), + id: childID, + } + } + node := dbNode{ + value: value, + children: children, + } - deletedKeys := make([][]byte, numDeletedKeys) - for i := range deletedKeys { - deletedKeys[i] = make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(deletedKeys[i]) // #nosec G404 - } + nodeBytes := codec.encodeDBNode(&node) + require.Len(nodeBytes, codec.encodedDBNodeSize(&node)) + var gotNode dbNode + require.NoError(codec.decodeDBNode(nodeBytes, &gotNode)) + require.Equal(node, gotNode) - proof := ChangeProof{ - HadRootsInHistory: hadRootsInHistory, - StartProof: startProofNodes, - EndProof: endProofNodes, - KeyValues: newKeyValues(r, numProofNodes), - DeletedKeys: deletedKeys, + nodeBytes2 := codec.encodeDBNode(&gotNode) + require.Equal(nodeBytes, nodeBytes2) } - - proofBytes, err := Codec.EncodeChangeProof(Version, &proof) - require.NoError(err) - - var gotProof ChangeProof - gotVersion, err := Codec.DecodeChangeProof(proofBytes, &gotProof) - require.NoError(err) - require.Equal(Version, gotVersion) - - nilEmptySlices(&proof) - nilEmptySlices(&gotProof) - require.Equal(proof, gotProof) - - proofBytes2, err := Codec.EncodeChangeProof(Version, &gotProof) - require.NoError(err) - require.Equal(proofBytes, proofBytes2) }, ) } -func FuzzCodecRangeProofDeterministic(f *testing.F) { - f.Fuzz( - func( - t *testing.T, - randSeed int, - numStartProofNodes uint, - numEndProofNodes uint, - numKeyValues uint, - ) { - r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - - var rootID ids.ID - _, _ = r.Read(rootID[:]) // #nosec G404 - - startProofNodes := make([]ProofNode, numStartProofNodes) - for i := range startProofNodes { - startProofNodes[i] = newRandomProofNode(r) - } - - endProofNodes := make([]ProofNode, numEndProofNodes) - for i := range endProofNodes { - endProofNodes[i] = newRandomProofNode(r) - } - - keyValues := make([]KeyValue, numKeyValues) - for i := range keyValues { - key := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(key) // #nosec G404 - val := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(val) // #nosec G404 - keyValues[i] = KeyValue{ - Key: key, - Value: val, - } - } - - proof := RangeProof{ - StartProof: startProofNodes, - EndProof: endProofNodes, - KeyValues: keyValues, - } - - proofBytes, err := Codec.EncodeRangeProof(Version, &proof) - require.NoError(t, err) - - var gotProof RangeProof - _, err = Codec.DecodeRangeProof(proofBytes, &gotProof) - require.NoError(t, err) - - nilEmptySlices(&proof) - nilEmptySlices(&gotProof) - require.Equal(t, proof, gotProof) +func TestCodecDecodeDBNode_TooShort(t *testing.T) { + require := require.New(t) - proofBytes2, err := Codec.EncodeRangeProof(Version, &gotProof) - require.NoError(t, err) - require.Equal(t, proofBytes, proofBytes2) - }, + var ( + parsedDBNode dbNode + tooShortBytes = make([]byte, minDBNodeLen-1) ) + err := codec.decodeDBNode(tooShortBytes, &parsedDBNode) + require.ErrorIs(err, io.ErrUnexpectedEOF) } -func FuzzCodecDBNodeDeterministic(f *testing.F) { +// Ensure that encodeHashValues is deterministic +func FuzzEncodeHashValues(f *testing.F) { + codec1 := newCodec() + codec2 := newCodec() + f.Fuzz( func( t *testing.T, randSeed int, - hasValue bool, - valueBytes []byte, ) { require := require.New(t) - - r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - - value := Nothing[[]byte]() - if hasValue { - if len(valueBytes) == 0 { - // We do this because when we encode a value of []byte{} - // we will later decode it as nil. - // Doing this prevents inconsistency when comparing the - // encoded and decoded values below. - // Calling nilEmptySlices doesn't set this because it is a - // private variable on the struct - valueBytes = nil + for _, bf := range validBranchFactors { // Create a random node + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + children := map[byte]*child{} + numChildren := r.Intn(int(bf)) // #nosec G404 + for i := 0; i < numChildren; i++ { + compressedKeyLen := r.Intn(32) // #nosec G404 + compressedKeyBytes := make([]byte, compressedKeyLen) + _, _ = r.Read(compressedKeyBytes) // #nosec G404 + + children[byte(i)] = &child{ + compressedKey: ToKey(compressedKeyBytes), + id: ids.GenerateTestID(), + hasValue: r.Intn(2) == 1, // #nosec G404 + } } - value = Some(valueBytes) - } - - numChildren := r.Intn(NodeBranchFactor) // #nosec G404 - children := map[byte]child{} - for i := 0; i < numChildren; i++ { - var childID ids.ID - _, _ = r.Read(childID[:]) // #nosec G404 - - childPathBytes := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(childPathBytes) // #nosec G404 - - children[byte(i)] = child{ - compressedPath: newPath(childPathBytes), - id: childID, + hasValue := r.Intn(2) == 1 // #nosec G404 + value := maybe.Nothing[[]byte]() + if hasValue { + valueBytes := make([]byte, r.Intn(64)) // #nosec G404 + _, _ = r.Read(valueBytes) // #nosec G404 + value = maybe.Some(valueBytes) } - } - node := dbNode{ - value: value, - children: children, - } - nodeBytes, err := Codec.encodeDBNode(Version, &node) - require.NoError(err) + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 - var gotNode dbNode - gotVersion, err := Codec.decodeDBNode(nodeBytes, &gotNode) - require.NoError(err) - require.Equal(Version, gotVersion) + hv := &node{ + key: ToKey(key), + dbNode: dbNode{ + children: children, + value: value, + }, + } - nilEmptySlices(&node) - nilEmptySlices(&gotNode) - require.Equal(node, gotNode) + // Serialize hv with both codecs + hvBytes1 := codec1.encodeHashValues(hv) + hvBytes2 := codec2.encodeHashValues(hv) - nodeBytes2, err := Codec.encodeDBNode(Version, &gotNode) - require.NoError(err) - require.Equal(nodeBytes, nodeBytes2) + // Make sure they're the same + require.Equal(hvBytes1, hvBytes2) + } }, ) } -func TestCodec_DecodeProof(t *testing.T) { - require := require.New(t) - - _, err := Codec.DecodeProof([]byte{1}, nil) - require.ErrorIs(err, errDecodeNil) - - var ( - proof Proof - tooShortBytes = make([]byte, minProofLen-1) - ) - _, err = Codec.DecodeProof(tooShortBytes, &proof) - require.ErrorIs(err, io.ErrUnexpectedEOF) -} - -func TestCodec_DecodeChangeProof(t *testing.T) { - require := require.New(t) - - _, err := Codec.DecodeChangeProof([]byte{1}, nil) - require.ErrorIs(err, errDecodeNil) - - var ( - parsedProof ChangeProof - tooShortBytes = make([]byte, minChangeProofLen-1) - ) - _, err = Codec.DecodeChangeProof(tooShortBytes, &parsedProof) - require.ErrorIs(err, io.ErrUnexpectedEOF) - - proof := ChangeProof{ - HadRootsInHistory: true, - StartProof: nil, - EndProof: nil, - KeyValues: nil, - DeletedKeys: nil, - } - - proofBytes, err := Codec.EncodeChangeProof(Version, &proof) - require.NoError(err) - - // Remove key-values length and deleted keys length (both 0) from end - proofBytes = proofBytes[:len(proofBytes)-2*minVarIntLen] - - // Put key-values length of -1 and deleted keys length of 0 - proofBytesBuf := bytes.NewBuffer(proofBytes) - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, 0) - require.NoError(err) - - _, err = Codec.DecodeChangeProof(proofBytesBuf.Bytes(), &parsedProof) - require.ErrorIs(err, errNegativeNumKeyValues) - - proofBytes = proofBytesBuf.Bytes() - proofBytes = proofBytes[:len(proofBytes)-2*minVarIntLen] - proofBytesBuf = bytes.NewBuffer(proofBytes) - - // Remove key-values length and deleted keys length from end - // Put key-values length of 0 and deleted keys length of -1 - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, 0) - require.NoError(err) - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) - - _, err = Codec.DecodeChangeProof(proofBytesBuf.Bytes(), &parsedProof) - require.ErrorIs(err, errNegativeNumKeyValues) -} - -func TestCodec_DecodeRangeProof(t *testing.T) { - require := require.New(t) - - _, err := Codec.DecodeRangeProof([]byte{1}, nil) - require.ErrorIs(err, errDecodeNil) - - var ( - parsedProof RangeProof - tooShortBytes = make([]byte, minRangeProofLen-1) - ) - _, err = Codec.DecodeRangeProof(tooShortBytes, &parsedProof) - require.ErrorIs(err, io.ErrUnexpectedEOF) - - proof := RangeProof{ - StartProof: nil, - EndProof: nil, - KeyValues: nil, - } - - proofBytes, err := Codec.EncodeRangeProof(Version, &proof) - require.NoError(err) - - // Remove key-values length (0) from end - proofBytes = proofBytes[:len(proofBytes)-minVarIntLen] - proofBytesBuf := bytes.NewBuffer(proofBytes) - // Put key-value length (-1) at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) - - _, err = Codec.DecodeRangeProof(proofBytesBuf.Bytes(), &parsedProof) - require.ErrorIs(err, errNegativeNumKeyValues) +func TestCodecDecodeKeyLengthOverflowRegression(t *testing.T) { + codec := codec.(*codecImpl) + _, err := codec.decodeKey(binary.AppendUvarint(nil, math.MaxInt)) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) } -func TestCodec_DecodeDBNode(t *testing.T) { - require := require.New(t) - - _, err := Codec.decodeDBNode([]byte{1}, nil) - require.ErrorIs(err, errDecodeNil) - - var ( - parsedDBNode dbNode - tooShortBytes = make([]byte, minDBNodeLen-1) - ) - _, err = Codec.decodeDBNode(tooShortBytes, &parsedDBNode) - require.ErrorIs(err, io.ErrUnexpectedEOF) - - proof := dbNode{ - value: Some([]byte{1}), - children: map[byte]child{}, +func TestUintSize(t *testing.T) { + c := codec.(*codecImpl) + + // Test lower bound + expectedSize := c.uintSize(0) + actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), 0) + require.Equal(t, expectedSize, actualSize) + + // Test upper bound + expectedSize = c.uintSize(math.MaxUint64) + actualSize = binary.PutUvarint(make([]byte, binary.MaxVarintLen64), math.MaxUint64) + require.Equal(t, expectedSize, actualSize) + + // Test powers of 2 + for power := 0; power < 64; power++ { + n := uint64(1) << uint(power) + expectedSize := c.uintSize(n) + actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), n) + require.Equal(t, expectedSize, actualSize, power) } - - nodeBytes, err := Codec.encodeDBNode(Version, &proof) - require.NoError(err) - - // Remove num children (0) from end - nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] - proofBytesBuf := bytes.NewBuffer(nodeBytes) - // Put num children -1 at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) - - _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) - require.ErrorIs(err, errNegativeNumChildren) - - // Remove num children from end - nodeBytes = proofBytesBuf.Bytes() - nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] - proofBytesBuf = bytes.NewBuffer(nodeBytes) - // Put num children NodeBranchFactor+1 at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, NodeBranchFactor+1) - require.NoError(err) - - _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) - require.ErrorIs(err, errTooManyChildren) } diff --git a/avalanchego/x/merkledb/db.go b/avalanchego/x/merkledb/db.go index 88a63d23..cc81a1be 100644 --- a/avalanchego/x/merkledb/db.go +++ b/avalanchego/x/merkledb/db.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -8,65 +8,179 @@ import ( "context" "errors" "fmt" + "runtime" + "slices" "sync" "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" + "golang.org/x/sync/semaphore" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + + oteltrace "go.opentelemetry.io/otel/trace" ) const ( - RootPath = EmptyPath - // TODO: name better - rebuildViewSizeFractionOfCacheSize = 50 - minRebuildViewSizePerCommit = 1000 + rebuildViewSizeFractionOfCacheSize = 50 + minRebuildViewSizePerCommit = 1000 + clearBatchSize = units.MiB + rebuildIntermediateDeletionWriteSize = units.MiB + valueNodePrefixLen = 1 + cacheEntryOverHead = 8 ) var ( - _ Trie = &Database{} - _ database.Database = &Database{} + _ MerkleDB = (*merkleDB)(nil) - Codec, Version = newCodec() + codec = newCodec() - rootKey = []byte{} - nodePrefix = []byte("node") - metadataPrefix = []byte("metadata") - cleanShutdownKey = []byte("cleanShutdown") + metadataPrefix = []byte{0} + valueNodePrefix = []byte{1} + intermediateNodePrefix = []byte{2} + + cleanShutdownKey = []byte(string(metadataPrefix) + "cleanShutdown") + rootDBKey = []byte(string(metadataPrefix) + "root") hadCleanShutdown = []byte{1} didNotHaveCleanShutdown = []byte{0} errSameRoot = errors.New("start and end root are the same") ) +type ChangeProofer interface { + // GetChangeProof returns a proof for a subset of the key/value changes in key range + // [start, end] that occurred between [startRootID] and [endRootID]. + // Returns at most [maxLength] key/value pairs. + // Returns [ErrInsufficientHistory] if this node has insufficient history + // to generate the proof. + // Returns ErrEmptyProof if [endRootID] is ids.Empty. + // Note that [endRootID] == ids.Empty means the trie is empty + // (i.e. we don't need a change proof.) + // Returns [ErrNoEndRoot], which wraps [ErrInsufficientHistory], if the + // history doesn't contain the [endRootID]. + GetChangeProof( + ctx context.Context, + startRootID ids.ID, + endRootID ids.ID, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, + ) (*ChangeProof, error) + + // Returns nil iff all the following hold: + // - [start] <= [end]. + // - [proof] is non-empty. + // - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. + // If [start] is nothing, all keys are considered > [start]. + // If [end] is nothing, all keys are considered < [end]. + // - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. + // - [proof.StartProof] and [proof.EndProof] are well-formed. + // - When the changes in [proof.KeyChanes] are applied, + // the root ID of the database is [expectedEndRootID]. + VerifyChangeProof( + ctx context.Context, + proof *ChangeProof, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + expectedEndRootID ids.ID, + ) error + + // CommitChangeProof commits the key/value pairs within the [proof] to the db. + CommitChangeProof(ctx context.Context, proof *ChangeProof) error +} + +type RangeProofer interface { + // GetRangeProofAtRoot returns a proof for the key/value pairs in this trie within the range + // [start, end] when the root of the trie was [rootID]. + // If [start] is Nothing, there's no lower bound on the range. + // If [end] is Nothing, there's no upper bound on the range. + // Returns ErrEmptyProof if [rootID] is ids.Empty. + // Note that [rootID] == ids.Empty means the trie is empty + // (i.e. we don't need a range proof.) + GetRangeProofAtRoot( + ctx context.Context, + rootID ids.ID, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, + ) (*RangeProof, error) + + // CommitRangeProof commits the key/value pairs within the [proof] to the db. + // [start] is the smallest possible key in the range this [proof] covers. + // [end] is the largest possible key in the range this [proof] covers. + CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error +} + +type Clearer interface { + // Deletes all key/value pairs from the database + // and clears the change history. + Clear() error +} + +type Prefetcher interface { + // PrefetchPath attempts to load all trie nodes on the path of [key] + // into the cache. + PrefetchPath(key []byte) error + + // PrefetchPaths attempts to load all trie nodes on the paths of [keys] + // into the cache. + // + // Using PrefetchPaths can be more efficient than PrefetchPath because + // the underlying view used to compute each path can be reused. + PrefetchPaths(keys [][]byte) error +} + +type MerkleDB interface { + database.Database + Clearer + Trie + MerkleRootGetter + ProofGetter + ChangeProofer + RangeProofer + Prefetcher +} + type Config struct { + // BranchFactor determines the number of children each node can have. + BranchFactor BranchFactor + + // RootGenConcurrency is the number of goroutines to use when + // generating a new state root. + // + // If 0 is specified, [runtime.NumCPU] will be used. + RootGenConcurrency uint + // The number of changes to the database that we store in memory in order to // serve change proofs. - HistoryLength int - NodeCacheSize int + HistoryLength uint + // The number of bytes used to cache nodes with values. + ValueNodeCacheSize uint + // The number of bytes used to cache nodes without values. + IntermediateNodeCacheSize uint + // The number of bytes used to store nodes without values in memory before forcing them onto disk. + IntermediateWriteBufferSize uint + // The number of bytes to write to disk when intermediate nodes are evicted + // from the write buffer and written to disk. + IntermediateWriteBatchSize uint // If [Reg] is nil, metrics are collected locally but not exported through // Prometheus. // This may be useful for testing. - Reg prometheus.Registerer - Tracer trace.Tracer + Reg prometheus.Registerer + TraceLevel TraceLevel + Tracer trace.Tracer } -// Can only be edited by committing changes from a trieView. -type Database struct { +// merkleDB can only be edited by committing changes from a view. +type merkleDB struct { // Must be held when reading/writing fields. lock sync.RWMutex @@ -76,16 +190,12 @@ type Database struct { // Should be held before taking [db.lock] commitLock sync.RWMutex - // versiondb that the other dbs are built on. - // Allows the changes made to the snapshot and [nodeDB] to be atomic. - nodeDB *versiondb.Database - - // Stores data about the database's current state. - metadataDB database.Database + // Contains all the key-value pairs stored by this database, + // including metadata, intermediate nodes and value nodes. + baseDB database.Database - // If a value is nil, the corresponding key isn't in the trie. - nodeCache onEvictCache[path, *node] - onEvictionErr utils.Atomic[error] + valueNodeDB *valueNodeDB + intermediateNodeDB *intermediateNodeDB // Stores change lists. Used to serve change proofs and construct // historical views of the trie. @@ -96,13 +206,32 @@ type Database struct { metrics merkleMetrics - tracer trace.Tracer + debugTracer trace.Tracer + infoTracer trace.Tracer // The root of this trie. - root *node + // Nothing if the trie is empty. + root maybe.Maybe[*node] + + rootID ids.ID // Valid children of this trie. - childViews []*trieView + childViews []*view + + // calculateNodeIDsSema controls the number of goroutines inside + // [calculateNodeIDsHelper] at any given time. + calculateNodeIDsSema *semaphore.Weighted + + tokenSize int +} + +// New returns a new merkle database. +func New(ctx context.Context, db database.Database, config Config) (MerkleDB, error) { + metrics, err := newMetrics("merkleDB", config.Reg) + if err != nil { + return nil, err + } + return newDatabase(ctx, db, config, metrics) } func newDatabase( @@ -110,37 +239,66 @@ func newDatabase( db database.Database, config Config, metrics merkleMetrics, -) (*Database, error) { - trieDB := &Database{ - metrics: metrics, - nodeDB: versiondb.New(prefixdb.New(nodePrefix, db)), - metadataDB: prefixdb.New(metadataPrefix, db), - history: newTrieHistory(config.HistoryLength), - tracer: config.Tracer, - childViews: make([]*trieView, 0, defaultPreallocationSize), +) (*merkleDB, error) { + if err := config.BranchFactor.Valid(); err != nil { + return nil, err } - // Note: trieDB.OnEviction is responsible for writing intermediary nodes to - // disk as they are evicted from the cache. - trieDB.nodeCache = newOnEvictCache[path](config.NodeCacheSize, trieDB.onEviction) - - root, err := trieDB.initializeRootIfNeeded() - if err != nil { + rootGenConcurrency := uint(runtime.NumCPU()) + if config.RootGenConcurrency != 0 { + rootGenConcurrency = config.RootGenConcurrency + } + + // Share a sync.Pool of []byte between the intermediateNodeDB and valueNodeDB + // to reduce memory allocations. + bufferPool := &sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufferLength) + }, + } + + trieDB := &merkleDB{ + metrics: metrics, + baseDB: db, + intermediateNodeDB: newIntermediateNodeDB( + db, + bufferPool, + metrics, + int(config.IntermediateNodeCacheSize), + int(config.IntermediateWriteBufferSize), + int(config.IntermediateWriteBatchSize), + BranchFactorToTokenSize[config.BranchFactor]), + valueNodeDB: newValueNodeDB(db, + bufferPool, + metrics, + int(config.ValueNodeCacheSize)), + history: newTrieHistory(int(config.HistoryLength)), + debugTracer: getTracerIfEnabled(config.TraceLevel, DebugTrace, config.Tracer), + infoTracer: getTracerIfEnabled(config.TraceLevel, InfoTrace, config.Tracer), + childViews: make([]*view, 0, defaultPreallocationSize), + calculateNodeIDsSema: semaphore.NewWeighted(int64(rootGenConcurrency)), + tokenSize: BranchFactorToTokenSize[config.BranchFactor], + } + + if err := trieDB.initializeRoot(); err != nil { return nil, err } // add current root to history (has no changes) trieDB.history.record(&changeSummary{ - rootID: root, - values: map[path]*change[Maybe[[]byte]]{}, - nodes: map[path]*change[*node]{}, + rootID: trieDB.rootID, + rootChange: change[maybe.Maybe[*node]]{ + after: trieDB.root, + }, + values: map[Key]*change[maybe.Maybe[[]byte]]{}, + nodes: map[Key]*change[*node]{}, }) - shutdownType, err := trieDB.metadataDB.Get(cleanShutdownKey) + shutdownType, err := trieDB.baseDB.Get(cleanShutdownKey) switch err { case nil: if bytes.Equal(shutdownType, didNotHaveCleanShutdown) { - if err := trieDB.rebuild(ctx); err != nil { + if err := trieDB.rebuild(ctx, int(config.ValueNodeCacheSize)); err != nil { return nil, err } } @@ -152,109 +310,138 @@ func newDatabase( } // mark that the db has not yet been cleanly closed - err = trieDB.metadataDB.Put(cleanShutdownKey, didNotHaveCleanShutdown) + err = trieDB.baseDB.Put(cleanShutdownKey, didNotHaveCleanShutdown) return trieDB, err } // Deletes every intermediate node and rebuilds them by re-adding every key/value. // TODO: make this more efficient by only clearing out the stale portions of the trie. -func (db *Database) rebuild(ctx context.Context) error { - db.root = newNode(nil, RootPath) - if err := db.nodeDB.Delete(rootKey); err != nil { +func (db *merkleDB) rebuild(ctx context.Context, cacheSize int) error { + db.root = maybe.Nothing[*node]() + db.rootID = ids.Empty + + // Delete intermediate nodes. + if err := database.ClearPrefix(db.baseDB, intermediateNodePrefix, rebuildIntermediateDeletionWriteSize); err != nil { return err } - it := db.nodeDB.NewIterator() - defer it.Release() - currentViewSize := 0 - viewSizeLimit := math.Max( - db.nodeCache.maxSize/rebuildViewSizeFractionOfCacheSize, + // Add all key-value pairs back into the database. + opsSizeLimit := max( + cacheSize/rebuildViewSizeFractionOfCacheSize, minRebuildViewSizePerCommit, ) - - currentView, err := db.newUntrackedView(viewSizeLimit) - if err != nil { - return err - } - - for it.Next() { - if currentViewSize >= viewSizeLimit { - if err := currentView.commitToDB(ctx); err != nil { - return err - } - currentView, err = db.newUntrackedView(viewSizeLimit) + currentOps := make([]database.BatchOp, 0, opsSizeLimit) + valueIt := db.NewIterator() + // ensure valueIt is captured and release gets called on the latest copy of valueIt + defer func() { valueIt.Release() }() + for valueIt.Next() { + if len(currentOps) >= opsSizeLimit { + view, err := newView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) if err != nil { return err } - currentViewSize = 0 - } - - key := it.Key() - path := path(key) - value := it.Value() - n, err := parseNode(path, value) - if err != nil { - return err - } - if n.hasValue() { - serializedPath := path.Serialize() - if err := currentView.Insert(ctx, serializedPath.Value, n.value.value); err != nil { + if err := view.commitToDB(ctx); err != nil { return err } - currentViewSize++ - } - if err := db.nodeDB.Delete(key); err != nil { - return err + currentOps = make([]database.BatchOp, 0, opsSizeLimit) + // reset the iterator to prevent memory bloat + nextValue := valueIt.Key() + valueIt.Release() + valueIt = db.NewIteratorWithStart(nextValue) + continue } + + currentOps = append(currentOps, database.BatchOp{ + Key: valueIt.Key(), + Value: valueIt.Value(), + }) } - if err := it.Error(); err != nil { + if err := valueIt.Error(); err != nil { return err } - if err := currentView.commitToDB(ctx); err != nil { + view, err := newView(db, db, ViewChanges{BatchOps: currentOps, ConsumeBytes: true}) + if err != nil { return err } - return db.nodeDB.Compact(nil, nil) -} - -// New returns a new merkle database. -func New(ctx context.Context, db database.Database, config Config) (*Database, error) { - metrics, err := newMetrics("merkleDB", config.Reg) - if err != nil { - return nil, err + if err := view.commitToDB(ctx); err != nil { + return err } - return newDatabase(ctx, db, config, metrics) + return db.Compact(nil, nil) } -// Commits the key/value pairs within the [proof] to the db. -func (db *Database) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { +func (db *merkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { db.commitLock.Lock() defer db.commitLock.Unlock() - view, err := db.prepareChangeProofView(proof) + if db.closed { + return database.ErrClosed + } + ops := make([]database.BatchOp, len(proof.KeyChanges)) + for i, kv := range proof.KeyChanges { + ops[i] = database.BatchOp{ + Key: kv.Key, + Value: kv.Value.Value(), + Delete: kv.Value.IsNothing(), + } + } + + view, err := newView(db, db, ViewChanges{BatchOps: ops}) if err != nil { return err } return view.commitToDB(ctx) } -// Commits the key/value pairs within the [proof] to the db. -// [start] is the smallest key in the range this [proof] covers. -func (db *Database) CommitRangeProof(ctx context.Context, start []byte, proof *RangeProof) error { +func (db *merkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { db.commitLock.Lock() defer db.commitLock.Unlock() - view, err := db.prepareRangeProofView(start, proof) + if db.closed { + return database.ErrClosed + } + + ops := make([]database.BatchOp, len(proof.KeyValues)) + keys := set.NewSet[string](len(proof.KeyValues)) + for i, kv := range proof.KeyValues { + keys.Add(string(kv.Key)) + ops[i] = database.BatchOp{ + Key: kv.Key, + Value: kv.Value, + } + } + + largestKey := end + if len(proof.KeyValues) > 0 { + largestKey = maybe.Some(proof.KeyValues[len(proof.KeyValues)-1].Key) + } + keysToDelete, err := db.getKeysNotInSet(start, largestKey, keys) if err != nil { return err } + for _, keyToDelete := range keysToDelete { + ops = append(ops, database.BatchOp{ + Key: keyToDelete, + Delete: true, + }) + } + + // Don't need to lock [view] because nobody else has a reference to it. + view, err := newView(db, db, ViewChanges{BatchOps: ops}) + if err != nil { + return err + } + return view.commitToDB(ctx) } -func (db *Database) Compact(start []byte, limit []byte) error { - return db.nodeDB.Compact(start, limit) +func (db *merkleDB) Compact(start []byte, limit []byte) error { + if db.closed { + return database.ErrClosed + } + return db.baseDB.Compact(start, limit) } -func (db *Database) Close() error { +func (db *merkleDB) Close() error { db.commitLock.Lock() defer db.commitLock.Unlock() @@ -265,77 +452,99 @@ func (db *Database) Close() error { return database.ErrClosed } + // mark all children as no longer valid because the db has closed + db.invalidateChildrenExcept(nil) + db.closed = true + db.valueNodeDB.Close() + // Flush intermediary nodes to disk. + if err := db.intermediateNodeDB.Flush(); err != nil { + return err + } - defer func() { - _ = db.metadataDB.Close() - _ = db.nodeDB.Close() - }() + // Successfully wrote intermediate nodes. + return db.baseDB.Put(cleanShutdownKey, hadCleanShutdown) +} - if err := db.onEvictionErr.Get(); err != nil { - // If there was an error during cache eviction, - // [db.nodeCache] and [db.nodeDB] are in an inconsistent state. - // Do not write cached nodes to disk or mark clean shutdown. - return nil - } +func (db *merkleDB) PrefetchPaths(keys [][]byte) error { + db.commitLock.RLock() + defer db.commitLock.RUnlock() - // Flush [nodeCache] to persist intermediary nodes to disk. - if err := db.nodeCache.Flush(); err != nil { - // There was an error during cache eviction. - // Don't commit to disk. - return err + if db.closed { + return database.ErrClosed } - if err := db.nodeDB.Commit(); err != nil { - return err + for _, key := range keys { + if err := db.prefetchPath(key); err != nil { + return err + } } - // Successfully wrote intermediate nodes. - return db.metadataDB.Put(cleanShutdownKey, hadCleanShutdown) + return nil } -func (db *Database) Delete(key []byte) error { - // this is a duplicate because the database interface doesn't support - // contexts, which are used for tracing - return db.Remove(context.Background(), key) +func (db *merkleDB) PrefetchPath(key []byte) error { + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + if db.closed { + return database.ErrClosed + } + return db.prefetchPath(key) } -func (db *Database) Get(key []byte) ([]byte, error) { +func (db *merkleDB) prefetchPath(keyBytes []byte) error { + return visitPathToKey(db, ToKey(keyBytes), func(n *node) error { + if n.hasValue() { + db.valueNodeDB.nodeCache.Put(n.key, n) + } else { + db.intermediateNodeDB.nodeCache.Put(n.key, n) + } + return nil + }) +} + +func (db *merkleDB) Get(key []byte) ([]byte, error) { // this is a duplicate because the database interface doesn't support // contexts, which are used for tracing return db.GetValue(context.Background(), key) } -func (db *Database) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { - _, span := db.tracer.Start(ctx, "MerkleDB.GetValues", oteltrace.WithAttributes( +func (db *merkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + _, span := db.debugTracer.Start(ctx, "MerkleDB.GetValues", oteltrace.WithAttributes( attribute.Int("keyCount", len(keys)), )) defer span.End() + // Lock to ensure no commit happens during the reads. db.lock.RLock() defer db.lock.RUnlock() values := make([][]byte, len(keys)) - errors := make([]error, len(keys)) + getErrors := make([]error, len(keys)) for i, key := range keys { - values[i], errors[i] = db.getValueCopy(newPath(key), false) + values[i], getErrors[i] = db.getValueCopy(ToKey(key)) } - return values, errors + return values, getErrors } // GetValue returns the value associated with [key]. // Returns database.ErrNotFound if it doesn't exist. -func (db *Database) GetValue(ctx context.Context, key []byte) ([]byte, error) { - _, span := db.tracer.Start(ctx, "MerkleDB.GetValue") +func (db *merkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { + _, span := db.debugTracer.Start(ctx, "MerkleDB.GetValue") defer span.End() - return db.getValueCopy(newPath(key), true) + db.lock.RLock() + defer db.lock.RUnlock() + + return db.getValueCopy(ToKey(key)) } // getValueCopy returns a copy of the value for the given [key]. // Returns database.ErrNotFound if it doesn't exist. -func (db *Database) getValueCopy(key path, lock bool) ([]byte, error) { - val, err := db.getValue(key, lock) +// Assumes [db.lock] is read locked. +func (db *merkleDB) getValueCopy(key Key) ([]byte, error) { + val, err := db.getValueWithoutLock(key) if err != nil { return nil, err } @@ -344,138 +553,141 @@ func (db *Database) getValueCopy(key path, lock bool) ([]byte, error) { // getValue returns the value for the given [key]. // Returns database.ErrNotFound if it doesn't exist. -func (db *Database) getValue(key path, lock bool) ([]byte, error) { - if lock { - db.lock.RLock() - defer db.lock.RUnlock() - } +// Assumes [db.lock] isn't held. +func (db *merkleDB) getValue(key Key) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + return db.getValueWithoutLock(key) +} +// getValueWithoutLock returns the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +// Assumes [db.lock] is read locked. +func (db *merkleDB) getValueWithoutLock(key Key) ([]byte, error) { if db.closed { return nil, database.ErrClosed } - n, err := db.getNode(key) + + n, err := db.getNode(key, true /* hasValue */) if err != nil { return nil, err } if n.value.IsNothing() { return nil, database.ErrNotFound } - return n.value.value, nil + return n.value.Value(), nil } -// Returns the ID of the root node of the merkle trie. -func (db *Database) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - _, span := db.tracer.Start(ctx, "MerkleDB.GetMerkleRoot") +func (db *merkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetMerkleRoot") defer span.End() db.lock.RLock() defer db.lock.RUnlock() + if db.closed { + return ids.Empty, database.ErrClosed + } + return db.getMerkleRoot(), nil } -// Returns the ID of the root node of the merkle trie. -// Assumes [db.lock] is read locked. -func (db *Database) getMerkleRoot() ids.ID { - return db.root.id +// Assumes [db.lock] or [db.commitLock] is read locked. +func (db *merkleDB) getMerkleRoot() ids.ID { + return db.rootID } -// Returns a proof of the existence/non-existence of [key] in this trie. -func (db *Database) GetProof(ctx context.Context, key []byte) (*Proof, error) { +func (db *merkleDB) GetProof(ctx context.Context, key []byte) (*Proof, error) { db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getProof(ctx, key) -} + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetProof") + defer span.End() -// Returns a proof of the existence/non-existence of [key] in this trie. -// Assumes [db.commitLock] is read locked. -func (db *Database) getProof(ctx context.Context, key []byte) (*Proof, error) { - view, err := db.newUntrackedView(defaultPreallocationSize) - if err != nil { - return nil, err + if db.closed { + return nil, database.ErrClosed } - // Don't need to lock [view] because nobody else has a reference to it. - return view.getProof(ctx, key) + + return getProof(db, key) } -// Returns a proof for the key/value pairs in this trie within the range -// [start, end]. -func (db *Database) GetRangeProof( +func (db *merkleDB) GetRangeProof( ctx context.Context, - start, - end []byte, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], maxLength int, ) (*RangeProof, error) { db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getRangeProofAtRoot(ctx, db.getMerkleRoot(), start, end, maxLength) + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetRangeProof") + defer span.End() + + if db.closed { + return nil, database.ErrClosed + } + + return getRangeProof(db, start, end, maxLength) } -// Returns a proof for the key/value pairs in this trie within the range -// [start, end] when the root of the trie was [rootID]. -func (db *Database) GetRangeProofAtRoot( +func (db *merkleDB) GetRangeProofAtRoot( ctx context.Context, rootID ids.ID, - start, - end []byte, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], maxLength int, ) (*RangeProof, error) { db.commitLock.RLock() defer db.commitLock.RUnlock() - return db.getRangeProofAtRoot(ctx, rootID, start, end, maxLength) -} + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetRangeProofAtRoot") + defer span.End() -// Assumes [db.commitLock] is read locked. -func (db *Database) getRangeProofAtRoot( - ctx context.Context, - rootID ids.ID, - start, - end []byte, - maxLength int, -) (*RangeProof, error) { - if maxLength <= 0 { + switch { + case db.closed: + return nil, database.ErrClosed + case maxLength <= 0: return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + case rootID == ids.Empty: + return nil, ErrEmptyProof } - historicalView, err := db.getHistoricalViewForRange(rootID, start, end) + historicalTrie, err := db.getTrieAtRootForRange(rootID, start, end) if err != nil { return nil, err } - return historicalView.GetRangeProof(ctx, start, end, maxLength) + return getRangeProof(historicalTrie, start, end, maxLength) } -// Returns a proof for a subset of the key/value changes in key range -// [start, end] that occurred between [startRootID] and [endRootID]. -// Returns at most [maxLength] key/value pairs. -func (db *Database) GetChangeProof( +func (db *merkleDB) GetChangeProof( ctx context.Context, startRootID ids.ID, endRootID ids.ID, - start []byte, - end []byte, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], maxLength int, ) (*ChangeProof, error) { - if len(end) > 0 && bytes.Compare(start, end) == 1 { + _, span := db.infoTracer.Start(ctx, "MerkleDB.GetChangeProof") + defer span.End() + + switch { + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1: return nil, ErrStartAfterEnd - } - if startRootID == endRootID { + case startRootID == endRootID: return nil, errSameRoot + case endRootID == ids.Empty: + return nil, ErrEmptyProof } db.commitLock.RLock() defer db.commitLock.RUnlock() - result := &ChangeProof{ - HadRootsInHistory: true, + if db.closed { + return nil, database.ErrClosed } + changes, err := db.history.getValueChanges(startRootID, endRootID, start, end, maxLength) - if err == ErrRootIDNotPresent { - result.HadRootsInHistory = false - return result, nil - } if err != nil { return nil, err } @@ -484,47 +696,44 @@ func (db *Database) GetChangeProof( // values modified between [startRootID] to [endRootID] sorted in increasing // order. changedKeys := maps.Keys(changes.values) - slices.SortFunc(changedKeys, func(i, j path) bool { - return i.Compare(j) < 0 - }) + utils.Sort(changedKeys) - // TODO: sync.pool these buffers - result.KeyValues = make([]KeyValue, 0, len(changedKeys)) - result.DeletedKeys = make([][]byte, 0, len(changedKeys)) + result := &ChangeProof{ + KeyChanges: make([]KeyChange, 0, len(changedKeys)), + } for _, key := range changedKeys { change := changes.values[key] - serializedKey := key.Serialize().Value - if change.after.IsNothing() { - result.DeletedKeys = append(result.DeletedKeys, serializedKey) - } else { - result.KeyValues = append(result.KeyValues, KeyValue{ - Key: serializedKey, - // create a copy so edits of the []byte don't affect the db - Value: slices.Clone(change.after.value), - }) - } + result.KeyChanges = append(result.KeyChanges, KeyChange{ + Key: key.Bytes(), + // create a copy so edits of the []byte don't affect the db + Value: maybe.Bind(change.after, slices.Clone[[]byte]), + }) + } + + largestKey := end + if len(result.KeyChanges) > 0 { + largestKey = maybe.Some(result.KeyChanges[len(result.KeyChanges)-1].Key) } - largestKey := result.getLargestKey(end) // Since we hold [db.commitlock] we must still have sufficient // history to recreate the trie at [endRootID]. - historicalView, err := db.getHistoricalViewForRange(endRootID, start, largestKey) + historicalTrie, err := db.getTrieAtRootForRange(endRootID, start, largestKey) if err != nil { return nil, err } - if len(largestKey) > 0 { - endProof, err := historicalView.getProof(ctx, largestKey) + if largestKey.HasValue() { + endProof, err := getProof(historicalTrie, largestKey.Value()) if err != nil { return nil, err } result.EndProof = endProof.Path } - if len(start) > 0 { - startProof, err := historicalView.getProof(ctx, start) + if start.HasValue() { + startProof, err := getProof(historicalTrie, start.Value()) if err != nil { return nil, err } @@ -534,7 +743,7 @@ func (db *Database) GetChangeProof( commonNodeIndex := 0 for ; commonNodeIndex < len(result.StartProof) && commonNodeIndex < len(result.EndProof) && - result.StartProof[commonNodeIndex].KeyPath.Equal(result.EndProof[commonNodeIndex].KeyPath); commonNodeIndex++ { + result.StartProof[commonNodeIndex].Key == result.EndProof[commonNodeIndex].Key; commonNodeIndex++ { } result.StartProof = result.StartProof[commonNodeIndex:] } @@ -551,37 +760,39 @@ func (db *Database) GetChangeProof( return result, nil } -// Returns a new view on top of this trie. -// Changes made to the view will only be reflected in the original trie if Commit is called. -// Assumes [db.lock] isn't held. -func (db *Database) NewView() (TrieView, error) { - return db.NewPreallocatedView(defaultPreallocationSize) -} - -// Returns a new view that isn't tracked in [db.childViews]. -// For internal use only, namely in methods that create short-lived views. -// Assumes [db.lock] is read locked. -func (db *Database) newUntrackedView(estimatedSize int) (*trieView, error) { - return newTrieView(db, db, db.root.clone(), estimatedSize) -} +// NewView returns a new view on top of this Trie where the passed changes +// have been applied. +// +// Changes made to the view will only be reflected in the original trie if +// Commit is called. +// +// Assumes [db.commitLock] and [db.lock] aren't held. +func (db *merkleDB) NewView( + _ context.Context, + changes ViewChanges, +) (View, error) { + // ensure the db doesn't change while creating the new view + db.commitLock.RLock() + defer db.commitLock.RUnlock() -// Returns a new view preallocated to hold at least [estimatedSize] value changes at a time. -// If more changes are made, additional memory will be allocated. -// The returned view is added to [db.childViews]. -// Assumes [db.lock] isn't held. -func (db *Database) NewPreallocatedView(estimatedSize int) (TrieView, error) { - db.lock.Lock() - defer db.lock.Unlock() + if db.closed { + return nil, database.ErrClosed + } - newView, err := newTrieView(db, db, db.root.clone(), estimatedSize) + newView, err := newView(db, db, changes) if err != nil { return nil, err } + + // ensure access to childViews is protected + db.lock.Lock() + defer db.lock.Unlock() + db.childViews = append(db.childViews, newView) return newView, nil } -func (db *Database) Has(k []byte) (bool, error) { +func (db *merkleDB) Has(k []byte) (bool, error) { db.lock.RLock() defer db.lock.RUnlock() @@ -589,166 +800,135 @@ func (db *Database) Has(k []byte) (bool, error) { return false, database.ErrClosed } - _, err := db.getValue(newPath(k), true) - if err == database.ErrNotFound { + _, err := db.getValueWithoutLock(ToKey(k)) + if errors.Is(err, database.ErrNotFound) { return false, nil } return err == nil, err } -func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { - return db.nodeDB.HealthCheck(ctx) -} - -func (db *Database) Insert(ctx context.Context, k, v []byte) error { - db.commitLock.Lock() - defer db.commitLock.Unlock() - +func (db *merkleDB) HealthCheck(ctx context.Context) (interface{}, error) { db.lock.RLock() - view, err := db.newUntrackedView(defaultPreallocationSize) - db.lock.RUnlock() + defer db.lock.RUnlock() - if err != nil { - return err - } - // Don't need to lock [view] because nobody else has a reference to it. - if err := view.insert(k, v); err != nil { - return err + if db.closed { + return nil, database.ErrClosed } - return view.commitToDB(ctx) + return db.baseDB.HealthCheck(ctx) } -func (db *Database) NewBatch() database.Batch { +func (db *merkleDB) NewBatch() database.Batch { return &batch{ db: db, } } -func (db *Database) NewIterator() database.Iterator { - return &iterator{ - nodeIter: db.nodeDB.NewIterator(), - db: db, - } +func (db *merkleDB) NewIterator() database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, nil) } -func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { - return &iterator{ - nodeIter: db.nodeDB.NewIteratorWithStart(newPath(start).Bytes()), - db: db, - } +func (db *merkleDB) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) } -func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { - return &iterator{ - nodeIter: db.nodeDB.NewIteratorWithPrefix(newPath(prefix).Bytes()), - db: db, - } +func (db *merkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) } -func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { - startBytes := newPath(start).Bytes() - prefixBytes := newPath(prefix).Bytes() - return &iterator{ - nodeIter: db.nodeDB.NewIteratorWithStartAndPrefix(startBytes, prefixBytes), - db: db, - } +func (db *merkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + return db.valueNodeDB.newIteratorWithStartAndPrefix(start, prefix) } -// If [node] is an intermediary node, puts it in [nodeDB]. -// Note this is called by [db.nodeCache] with its lock held, so -// the movement of [node] from [db.nodeCache] to [db.nodeDB] is atomic. -// As soon as [db.nodeCache] no longer has [node], [db.nodeDB] does. -// Non-nil error is fatal -- causes [db] to close. -func (db *Database) onEviction(node *node) error { - if node == nil || node.hasValue() { - // only persist intermediary nodes - return nil - } +func (db *merkleDB) Put(k, v []byte) error { + return db.PutContext(context.Background(), k, v) +} - nodeBytes, err := node.marshal() - if err != nil { - db.onEvictionErr.Set(err) - // Prevent reads/writes from/to [db.nodeDB] to avoid inconsistent state. - _ = db.nodeDB.Close() - // This is a fatal error. - go db.Close() - return err +// Same as [Put] but takes in a context used for tracing. +func (db *merkleDB) PutContext(ctx context.Context, k, v []byte) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + if db.closed { + return database.ErrClosed } - if err := db.nodeDB.Put(node.key.Bytes(), nodeBytes); err != nil { - db.onEvictionErr.Set(err) - _ = db.nodeDB.Close() - go db.Close() + view, err := newView(db, db, ViewChanges{BatchOps: []database.BatchOp{{Key: k, Value: v}}}) + if err != nil { return err } - return nil + return view.commitToDB(ctx) } -// Inserts the key/value pair into the db. -func (db *Database) Put(k, v []byte) error { - return db.Insert(context.Background(), k, v) +func (db *merkleDB) Delete(key []byte) error { + return db.DeleteContext(context.Background(), key) } -func (db *Database) Remove(ctx context.Context, key []byte) error { +func (db *merkleDB) DeleteContext(ctx context.Context, key []byte) error { db.commitLock.Lock() defer db.commitLock.Unlock() - db.lock.RLock() - view, err := db.newUntrackedView(defaultPreallocationSize) - db.lock.RUnlock() - if err != nil { - return err + if db.closed { + return database.ErrClosed } - // Don't need to lock [view] because nobody else has a reference to it. - if err = view.remove(key); err != nil { + + view, err := newView(db, db, + ViewChanges{ + BatchOps: []database.BatchOp{{ + Key: key, + Delete: true, + }}, + ConsumeBytes: true, + }) + if err != nil { return err } return view.commitToDB(ctx) } -func (db *Database) commitBatch(ops []database.BatchOp) error { +// Assumes values inside [ops] are safe to reference after the function +// returns. Assumes [db.lock] isn't held. +func (db *merkleDB) commitBatch(ops []database.BatchOp) error { db.commitLock.Lock() defer db.commitLock.Unlock() - view, err := db.prepareBatchView(ops) + if db.closed { + return database.ErrClosed + } + + view, err := newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) if err != nil { return err } return view.commitToDB(context.Background()) } -// CommitToParent is a no-op for the db because it has no parent -func (*Database) CommitToParent(context.Context) error { - return nil -} - -// commitToDB is a no-op for the db because it is the db -func (*Database) commitToDB(context.Context) error { - return nil -} - -// commitChanges commits the changes in trieToCommit to the db -func (db *Database) commitChanges(ctx context.Context, trieToCommit *trieView) error { +// commitChanges commits the changes in [trieToCommit] to [db]. +// Assumes [trieToCommit]'s node IDs have been calculated. +// Assumes [db.commitLock] is held. +func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error { db.lock.Lock() defer db.lock.Unlock() - if trieToCommit == nil { + switch { + case db.closed: + return database.ErrClosed + case trieToCommit == nil: return nil - } - if trieToCommit.isInvalid() { + case trieToCommit.isInvalid(): return ErrInvalid + case trieToCommit.committed: + return ErrCommitted + case trieToCommit.db != trieToCommit.getParentTrie(): + return ErrParentNotDatabase } + changes := trieToCommit.changes - _, span := db.tracer.Start(ctx, "MerkleDB.commitChanges", oteltrace.WithAttributes( + _, span := db.infoTracer.Start(ctx, "MerkleDB.commitChanges", oteltrace.WithAttributes( attribute.Int("nodesChanged", len(changes.nodes)), attribute.Int("valuesChanged", len(changes.values)), )) defer span.End() - if db.closed { - return database.ErrClosed - } - // invalidate all child views except for the view being committed db.invalidateChildrenExcept(trieToCommit) @@ -759,76 +939,59 @@ func (db *Database) commitChanges(ctx context.Context, trieToCommit *trieView) e return nil } - rootChange, ok := changes.nodes[RootPath] - if !ok { - return errNoNewRoot - } + currentValueNodeBatch := db.valueNodeDB.NewBatch() + _, nodesSpan := db.infoTracer.Start(ctx, "MerkleDB.commitChanges.writeNodes") + for key, nodeChange := range changes.nodes { + shouldAddIntermediate := nodeChange.after != nil && !nodeChange.after.hasValue() + shouldDeleteIntermediate := !shouldAddIntermediate && nodeChange.before != nil && !nodeChange.before.hasValue() - // commit any outstanding cache evicted nodes. - // Note that we do this here because below we may Abort - // [db.nodeDB], which would cause us to lose these changes. - if err := db.nodeDB.Commit(); err != nil { - return err - } + shouldAddValue := nodeChange.after != nil && nodeChange.after.hasValue() + shouldDeleteValue := !shouldAddValue && nodeChange.before != nil && nodeChange.before.hasValue() - _, nodesSpan := db.tracer.Start(ctx, "MerkleDB.commitChanges.writeNodes") - for key, nodeChange := range changes.nodes { - if nodeChange.after == nil { - db.metrics.IOKeyWrite() - if err := db.nodeDB.Delete(key.Bytes()); err != nil { - db.nodeDB.Abort() + if shouldAddIntermediate { + if err := db.intermediateNodeDB.Put(key, nodeChange.after); err != nil { nodesSpan.End() return err } - } else if nodeChange.after.hasValue() || (nodeChange.before != nil && nodeChange.before.hasValue()) { - // Note: If [nodeChange.after] is an intermediary node we only - // persist [nodeChange] if [nodeChange.before] was a leaf. - // This guarantees that the key/value pairs are correctly persisted - // on disk, without being polluted by the previous value. - // Otherwise, intermediary nodes are persisted on cache eviction or - // shutdown. - db.metrics.IOKeyWrite() - nodeBytes, err := nodeChange.after.marshal() - if err != nil { - db.nodeDB.Abort() + } else if shouldDeleteIntermediate { + if err := db.intermediateNodeDB.Delete(key); err != nil { nodesSpan.End() return err } + } - if err := db.nodeDB.Put(key.Bytes(), nodeBytes); err != nil { - db.nodeDB.Abort() - nodesSpan.End() - return err - } + if shouldAddValue { + currentValueNodeBatch.Put(key, nodeChange.after) + } else if shouldDeleteValue { + currentValueNodeBatch.Delete(key) } } nodesSpan.End() - _, commitSpan := db.tracer.Start(ctx, "MerkleDB.commitChanges.dbCommit") - err := db.nodeDB.Commit() + _, commitSpan := db.infoTracer.Start(ctx, "MerkleDB.commitChanges.valueNodeDBCommit") + err := currentValueNodeBatch.Write() commitSpan.End() if err != nil { - db.nodeDB.Abort() return err } - // Only modify in-memory state after the commit succeeds - // so that we don't need to clean up on error. - db.root = rootChange.after + db.history.record(changes) - for key, nodeChange := range changes.nodes { - if err := db.putNodeInCache(key, nodeChange.after); err != nil { - return err - } + // Update root in database. + db.root = changes.rootChange.after + db.rootID = changes.rootID + + if db.root.IsNothing() { + return db.baseDB.Delete(rootDBKey) } - db.history.record(changes) - return nil + rootKey := codec.encodeKey(db.root.Value().key) + return db.baseDB.Put(rootDBKey, rootKey) } // moveChildViewsToDB removes any child views from the trieToCommit and moves them to the db // assumes [db.lock] is held -func (db *Database) moveChildViewsToDB(trieToCommit *trieView) { +func (db *merkleDB) moveChildViewsToDB(trieToCommit *view) { trieToCommit.validityTrackingLock.Lock() defer trieToCommit.validityTrackingLock.Unlock() @@ -836,18 +999,156 @@ func (db *Database) moveChildViewsToDB(trieToCommit *trieView) { childView.updateParent(db) db.childViews = append(db.childViews, childView) } - trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) + trieToCommit.childViews = make([]*view, 0, defaultPreallocationSize) } -// CommitToDB is a No Op for db since it is already in sync with itself -// here to satisfy TrieView interface -func (*Database) CommitToDB(context.Context) error { +// CommitToDB is a no-op for db since it is already in sync with itself. +// This exists to satisfy the View interface. +func (*merkleDB) CommitToDB(context.Context) error { + return nil +} + +// This is defined on merkleDB instead of ChangeProof +// because it accesses database internals. +// Assumes [db.lock] isn't held. +func (db *merkleDB) VerifyChangeProof( + ctx context.Context, + proof *ChangeProof, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + expectedEndRootID ids.ID, +) error { + switch { + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: + return ErrStartAfterEnd + case proof.Empty(): + return ErrEmptyProof + case end.HasValue() && len(proof.KeyChanges) == 0 && len(proof.EndProof) == 0: + // We requested an end proof but didn't get one. + return ErrNoEndProof + case start.HasValue() && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: + // We requested a start proof but didn't get one. + // Note that we also have to check that [proof.EndProof] is empty + // to handle the case that the start proof is empty because all + // its nodes are also in the end proof, and those nodes are omitted. + return ErrNoStartProof + } + + // Make sure the key-value pairs are sorted and in [start, end]. + if err := verifyKeyChanges(proof.KeyChanges, start, end); err != nil { + return err + } + + smallestKey := maybe.Bind(start, ToKey) + + // Make sure the start proof, if given, is well-formed. + if err := verifyProofPath(proof.StartProof, smallestKey); err != nil { + return err + } + + // Find the greatest key in [proof.KeyChanges] + // Note that [proof.EndProof] is a proof for this key. + // [largestKey] is also used when we add children of proof nodes to [trie] below. + largestKey := maybe.Bind(end, ToKey) + if len(proof.KeyChanges) > 0 { + // If [proof] has key-value pairs, we should insert children + // greater than [end] to ancestors of the node containing [end] + // so that we get the expected root ID. + largestKey = maybe.Some(ToKey(proof.KeyChanges[len(proof.KeyChanges)-1].Key)) + } + + // Make sure the end proof, if given, is well-formed. + if err := verifyProofPath(proof.EndProof, largestKey); err != nil { + return err + } + + keyValues := make(map[Key]maybe.Maybe[[]byte], len(proof.KeyChanges)) + for _, keyValue := range proof.KeyChanges { + keyValues[ToKey(keyValue.Key)] = keyValue.Value + } + + // want to prevent commit writes to DB, but not prevent DB reads + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + if db.closed { + return database.ErrClosed + } + + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.StartProof, + smallestKey, + largestKey, + keyValues, + ); err != nil { + return err + } + + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.EndProof, + smallestKey, + largestKey, + keyValues, + ); err != nil { + return err + } + + // Insert the key-value pairs into the trie. + ops := make([]database.BatchOp, len(proof.KeyChanges)) + for i, kv := range proof.KeyChanges { + ops[i] = database.BatchOp{ + Key: kv.Key, + Value: kv.Value.Value(), + Delete: kv.Value.IsNothing(), + } + } + + // Don't need to lock [view] because nobody else has a reference to it. + view, err := newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) + if err != nil { + return err + } + + // For all the nodes along the edges of the proof, insert the children whose + // keys are less than [insertChildrenLessThan] or whose keys are greater + // than [insertChildrenGreaterThan] into the trie so that we get the + // expected root ID (if this proof is valid). + if err := addPathInfo( + view, + proof.StartProof, + smallestKey, + largestKey, + ); err != nil { + return err + } + if err := addPathInfo( + view, + proof.EndProof, + smallestKey, + largestKey, + ); err != nil { + return err + } + + // Make sure we get the expected root. + calculatedRoot, err := view.GetMerkleRoot(ctx) + if err != nil { + return err + } + if expectedEndRootID != calculatedRoot { + return fmt.Errorf("%w:[%s], expected:[%s]", ErrInvalidProof, calculatedRoot, expectedEndRootID) + } + return nil } -// invalidate and remove any child views that aren't the exception +// Invalidates and removes any child views that aren't [exception]. // Assumes [db.lock] is held. -func (db *Database) invalidateChildrenExcept(exception *trieView) { +func (db *merkleDB) invalidateChildrenExcept(exception *view) { isTrackedView := false for _, childView := range db.childViews { @@ -857,85 +1158,85 @@ func (db *Database) invalidateChildrenExcept(exception *trieView) { isTrackedView = true } } - db.childViews = make([]*trieView, 0, defaultPreallocationSize) + db.childViews = make([]*view, 0, defaultPreallocationSize) if isTrackedView { db.childViews = append(db.childViews, exception) } } -func (db *Database) initializeRootIfNeeded() (ids.ID, error) { - // ensure that root exists - nodeBytes, err := db.nodeDB.Get(rootKey) - if err == nil { - // Root already exists, so parse it and set the in-mem copy - db.root, err = parseNode(RootPath, nodeBytes) - if err != nil { - return ids.Empty, err - } - if err := db.root.calculateID(db.metrics); err != nil { - return ids.Empty, err +// If the root is on disk, set [db.root] to it. +// Otherwise leave [db.root] as Nothing. +func (db *merkleDB) initializeRoot() error { + rootKeyBytes, err := db.baseDB.Get(rootDBKey) + if err != nil { + if !errors.Is(err, database.ErrNotFound) { + return err } - return db.root.id, nil - } - if err != database.ErrNotFound { - return ids.Empty, err + // Root isn't on disk. + return nil } - // Root doesn't exist; make a new one. - db.root = newNode(nil, RootPath) - - // update its ID - if err := db.root.calculateID(db.metrics); err != nil { - return ids.Empty, err + // Root is on disk. + rootKey, err := codec.decodeKey(rootKeyBytes) + if err != nil { + return err } - // write the newly constructed root to the DB - rootBytes, err := db.root.marshal() + // First, see if root is an intermediate node. + var root *node + root, err = db.getEditableNode(rootKey, false /* hasValue */) if err != nil { - return ids.Empty, err - } - if err := db.nodeDB.Put(rootKey, rootBytes); err != nil { - return ids.Empty, err + if !errors.Is(err, database.ErrNotFound) { + return err + } + + // The root must be a value node. + root, err = db.getEditableNode(rootKey, true /* hasValue */) + if err != nil { + return err + } } - return db.root.id, db.nodeDB.Commit() + db.rootID = root.calculateID(db.metrics) + db.root = maybe.Some(root) + return nil } // Returns a view of the trie as it was when it had root [rootID] for keys within range [start, end]. +// If [start] is Nothing, there's no lower bound on the range. +// If [end] is Nothing, there's no upper bound on the range. // Assumes [db.commitLock] is read locked. -func (db *Database) getHistoricalViewForRange( +func (db *merkleDB) getTrieAtRootForRange( rootID ids.ID, - start []byte, - end []byte, -) (*trieView, error) { - currentRootID := db.getMerkleRoot() - + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], +) (Trie, error) { // looking for the trie's current root id, so return the trie unmodified - if currentRootID == rootID { - return newTrieView(db, db, db.root.clone(), 100) + if rootID == db.getMerkleRoot() { + return db, nil } changeHistory, err := db.history.getChangesToGetToRoot(rootID, start, end) if err != nil { return nil, err } - return newTrieViewWithChanges(db, db, changeHistory, len(changeHistory.nodes)) + return newViewWithChanges(db, changeHistory) } -// Returns all of the keys in range [start, end] that aren't in [keySet]. -// If [start] is nil, then the range has no lower bound. -// If [end] is nil, then the range has no upper bound. -func (db *Database) getKeysNotInSet(start, end []byte, keySet set.Set[string]) ([][]byte, error) { +// Returns all keys in range [start, end] that aren't in [keySet]. +// If [start] is Nothing, then the range has no lower bound. +// If [end] is Nothing, then the range has no upper bound. +func (db *merkleDB) getKeysNotInSet(start, end maybe.Maybe[[]byte], keySet set.Set[string]) ([][]byte, error) { db.lock.RLock() defer db.lock.RUnlock() - it := db.NewIteratorWithStart(start) + it := db.NewIteratorWithStart(start.Value()) defer it.Release() keysNotInSet := make([][]byte, 0, keySet.Len()) for it.Next() { key := it.Key() - if len(end) != 0 && bytes.Compare(key, end) > 0 { + if end.HasValue() && bytes.Compare(key, end.Value()) > 0 { break } if !keySet.Contains(string(key)) { @@ -946,14 +1247,15 @@ func (db *Database) getKeysNotInSet(start, end []byte, keySet set.Set[string]) ( } // Returns a copy of the node with the given [key]. +// hasValue determines which db the key is looked up in (intermediateNodeDB or valueNodeDB) // This copy may be edited by the caller without affecting the database state. // Returns database.ErrNotFound if the node doesn't exist. // Assumes [db.lock] isn't held. -func (db *Database) getEditableNode(key path) (*node, error) { +func (db *merkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { db.lock.RLock() defer db.lock.RUnlock() - n, err := db.getNode(key) + n, err := db.getNode(key, hasValue) if err != nil { return nil, err } @@ -961,187 +1263,93 @@ func (db *Database) getEditableNode(key path) (*node, error) { } // Returns the node with the given [key]. +// hasValue determines which db the key is looked up in (intermediateNodeDB or valueNodeDB) // Editing the returned node affects the database state. // Returns database.ErrNotFound if the node doesn't exist. // Assumes [db.lock] is read locked. -func (db *Database) getNode(key path) (*node, error) { - if key == RootPath { - return db.root, nil - } - - if n, isCached := db.getNodeInCache(key); isCached { - db.metrics.DBNodeCacheHit() - if n == nil { - return nil, database.ErrNotFound - } - return n, nil - } - - db.metrics.DBNodeCacheMiss() - db.metrics.IOKeyRead() - rawBytes, err := db.nodeDB.Get(key.Bytes()) - if err != nil { - if err == database.ErrNotFound { - // Cache the miss. - if err := db.putNodeInCache(key, nil); err != nil { - return nil, err - } - } - return nil, err - } - - node, err := parseNode(key, rawBytes) - if err != nil { - return nil, err +func (db *merkleDB) getNode(key Key, hasValue bool) (*node, error) { + switch { + case db.closed: + return nil, database.ErrClosed + case db.root.HasValue() && key == db.root.Value().key: + return db.root.Value(), nil + case hasValue: + return db.valueNodeDB.Get(key) + default: + return db.intermediateNodeDB.Get(key) } +} - err = db.putNodeInCache(key, node) - return node, err +// Assumes [db.lock] or [db.commitLock] is read locked. +func (db *merkleDB) getRoot() maybe.Maybe[*node] { + return db.root } -// If [lock], grabs [db.lock]'s read lock. -// Otherwise assumes [db.lock] is already read locked. -func (db *Database) getKeyValues( - start []byte, - end []byte, - maxLength int, - keysToIgnore set.Set[string], - lock bool, -) ([]KeyValue, error) { - if lock { - db.lock.RLock() - defer db.lock.RUnlock() - } - if maxLength <= 0 { - return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) - } +func (db *merkleDB) Clear() error { + db.commitLock.Lock() + defer db.commitLock.Unlock() - it := db.NewIteratorWithStart(start) - defer it.Release() + db.lock.Lock() + defer db.lock.Unlock() - remainingLength := maxLength - result := make([]KeyValue, 0, maxLength) - // Keep adding key/value pairs until one of the following: - // * We hit a key that is lexicographically larger than the end key. - // * [maxLength] elements are in [result]. - // * There are no more values to add. - for remainingLength > 0 && it.Next() { - key := it.Key() - if len(end) != 0 && bytes.Compare(it.Key(), end) > 0 { - break - } - if keysToIgnore.Contains(string(key)) { - continue - } - result = append(result, KeyValue{ - Key: key, - Value: it.Value(), - }) - remainingLength-- + // Clear nodes from disk and caches + if err := db.valueNodeDB.Clear(); err != nil { + return err } - - return result, it.Error() -} - -// Returns a new view atop [db] with the changes in [ops] applied to it. -func (db *Database) prepareBatchView( - ops []database.BatchOp, -) (*trieView, error) { - db.lock.RLock() - view, err := db.newUntrackedView(len(ops)) - db.lock.RUnlock() - if err != nil { - return nil, err + if err := db.intermediateNodeDB.Clear(); err != nil { + return err } - // Don't need to lock [view] because nobody else has a reference to it. - // write into the trie - for _, op := range ops { - if op.Delete { - if err := view.remove(op.Key); err != nil { - return nil, err - } - } else if err := view.insert(op.Key, op.Value); err != nil { - return nil, err - } - } + // Clear root + db.root = maybe.Nothing[*node]() + db.rootID = ids.Empty - return view, nil + // Clear history + db.history = newTrieHistory(db.history.maxHistoryLen) + db.history.record(&changeSummary{ + rootID: db.rootID, + values: map[Key]*change[maybe.Maybe[[]byte]]{}, + nodes: map[Key]*change[*node]{}, + }) + return nil } -// Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] -// inserted and the key/value pairs in [proof.DeletedKeys] removed. -func (db *Database) prepareChangeProofView(proof *ChangeProof) (*trieView, error) { - db.lock.RLock() - view, err := db.newUntrackedView(len(proof.KeyValues)) - db.lock.RUnlock() - if err != nil { - return nil, err - } - // Don't need to lock [view] because nobody else has a reference to it. - - for _, kv := range proof.KeyValues { - if err := view.insert(kv.Key, kv.Value); err != nil { - return nil, err - } - } - - for _, keyToDelete := range proof.DeletedKeys { - if err := view.remove(keyToDelete); err != nil { - return nil, err - } - } - return view, nil +func (db *merkleDB) getTokenSize() int { + return db.tokenSize } -// Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] added and -// any existing key-value pairs in the proof's range but not in the proof removed. -// assumes [db.commitLock] is held -func (db *Database) prepareRangeProofView(start []byte, proof *RangeProof) (*trieView, error) { - // Don't need to lock [view] because nobody else has a reference to it. - db.lock.RLock() - view, err := db.newUntrackedView(len(proof.KeyValues)) - db.lock.RUnlock() - - if err != nil { - return nil, err - } - keys := set.NewSet[string](len(proof.KeyValues)) - for _, kv := range proof.KeyValues { - keys.Add(string(kv.Key)) - if err := view.insert(kv.Key, kv.Value); err != nil { - return nil, err - } - } - - var largestKey []byte - if len(proof.KeyValues) > 0 { - largestKey = proof.KeyValues[len(proof.KeyValues)-1].Key - } - keysToDelete, err := db.getKeysNotInSet(start, largestKey, keys) - if err != nil { - return nil, err - } - for _, keyToDelete := range keysToDelete { - if err := view.remove(keyToDelete); err != nil { - return nil, err - } - } - return view, nil +// Returns [key] prefixed by [prefix]. +// The returned []byte is taken from [bufferPool] and +// should be returned to it when the caller is done with it. +func addPrefixToKey(bufferPool *sync.Pool, prefix []byte, key []byte) []byte { + prefixLen := len(prefix) + keyLen := prefixLen + len(key) + prefixedKey := getBufferFromPool(bufferPool, keyLen) + copy(prefixedKey, prefix) + copy(prefixedKey[prefixLen:], key) + return prefixedKey } -// Non-nil error is fatal -- [db] will close. -func (db *Database) putNodeInCache(key path, n *node) error { - // TODO Cache metrics - // Note that this may cause a node to be evicted from the cache, - // which will call [OnEviction]. - return db.nodeCache.Put(key, n) +// Returns a []byte from [bufferPool] with length exactly [size]. +// The []byte is not guaranteed to be zeroed. +func getBufferFromPool(bufferPool *sync.Pool, size int) []byte { + buffer := bufferPool.Get().([]byte) + if cap(buffer) >= size { + // The [] byte we got from the pool is big enough to hold the prefixed key + buffer = buffer[:size] + } else { + // The []byte from the pool wasn't big enough. + // Put it back and allocate a new, bigger one + bufferPool.Put(buffer) + buffer = make([]byte, size) + } + return buffer } -func (db *Database) getNodeInCache(key path) (*node, bool) { - // TODO Cache metrics - if node, ok := db.nodeCache.Get(key); ok { - return node, true +// cacheEntrySize returns a rough approximation of the memory consumed by storing the key and node. +func cacheEntrySize(key Key, n *node) int { + if n == nil { + return cacheEntryOverHead + len(key.Bytes()) } - return nil, false + return cacheEntryOverHead + len(key.Bytes()) + codec.encodedDBNodeSize(&n.dbNode) } diff --git a/avalanchego/x/merkledb/db_test.go b/avalanchego/x/merkledb/db_test.go index 8a0f98d3..ed8dc568 100644 --- a/avalanchego/x/merkledb/db_test.go +++ b/avalanchego/x/merkledb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -6,10 +6,14 @@ package merkledb import ( "bytes" "context" + "fmt" "math/rand" + "slices" "strconv" "testing" + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -17,108 +21,146 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" ) -const minCacheSize = 1000 +const defaultHistoryLength = 300 -func newNoopTracer() trace.Tracer { - tracer, _ := trace.New(trace.Config{Enabled: false}) - return tracer +// newDB returns a new merkle database with the underlying type so that tests can access unexported fields +func newDB(ctx context.Context, db database.Database, config Config) (*merkleDB, error) { + db, err := New(ctx, db, config) + if err != nil { + return nil, err + } + return db.(*merkleDB), nil +} + +func newDefaultConfig() Config { + return Config{ + IntermediateWriteBatchSize: 10, + HistoryLength: defaultHistoryLength, + ValueNodeCacheSize: units.MiB, + IntermediateNodeCacheSize: units.MiB, + IntermediateWriteBufferSize: units.KiB, + Reg: prometheus.NewRegistry(), + Tracer: trace.Noop, + BranchFactor: BranchFactor16, + } } func Test_MerkleDB_Get_Safety(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + require.NoError(err) - val, err := db.Get([]byte{0}) - require.NoError(t, err) - n, err := db.getNode(newPath([]byte{0})) - require.NoError(t, err) - val[0] = 1 + keyBytes := []byte{0} + require.NoError(db.Put(keyBytes, []byte{0, 1, 2})) + + val, err := db.Get(keyBytes) + require.NoError(err) + + n, err := db.getNode(ToKey(keyBytes), true) + require.NoError(err) // node's value shouldn't be affected by the edit - require.NotEqual(t, val, n.value.value) + originalVal := slices.Clone(val) + val[0]++ + require.Equal(originalVal, n.value.Value()) } func Test_MerkleDB_GetValues_Safety(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + require.NoError(err) + + keyBytes := []byte{0} + value := []byte{0, 1, 2} + require.NoError(db.Put(keyBytes, value)) - vals, errs := db.GetValues(context.Background(), [][]byte{{0}}) - require.Len(t, errs, 1) - require.NoError(t, errs[0]) - require.Equal(t, []byte{0, 1, 2}, vals[0]) - vals[0][0] = 1 + gotValues, errs := db.GetValues(context.Background(), [][]byte{keyBytes}) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal(value, gotValues[0]) + gotValues[0][0]++ // editing the value array shouldn't affect the db - vals, errs = db.GetValues(context.Background(), [][]byte{{0}}) - require.Len(t, errs, 1) - require.NoError(t, errs[0]) - require.Equal(t, []byte{0, 1, 2}, vals[0]) + gotValues, errs = db.GetValues(context.Background(), [][]byte{keyBytes}) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal(value, gotValues[0]) } func Test_MerkleDB_DB_Interface(t *testing.T) { - for _, test := range database.Tests { - db, err := getBasicDB() - require.NoError(t, err) - test(t, db) + for _, bf := range validBranchFactors { + for name, test := range database.Tests { + t.Run(fmt.Sprintf("%s_%d", name, bf), func(t *testing.T) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(t, err) + test(t, db) + }) + } } } func Benchmark_MerkleDB_DBInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db, err := getBasicDB() - require.NoError(b, err) - bench(b, db, "merkledb", keys, values) + for _, bf := range validBranchFactors { + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("merkledb_%d_%d_pairs_%d_keys_%d_values_%s", bf, size[0], size[1], size[2], name), func(b *testing.B) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(b, err) + bench(b, db, keys, values) + }) + } } } } func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { require := require.New(t) - rdb := memdb.New() - defer rdb.Close() + baseDB := memdb.New() + defer baseDB.Close() db, err := New( context.Background(), - rdb, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: 100, - }, + baseDB, + newDefaultConfig(), ) require.NoError(err) - // Populate initial set of keys - view, err := db.NewView() + // Populate initial set of key-value pairs + keyCount := 100 + ops := make([]database.BatchOp, 0, keyCount) require.NoError(err) - for i := 0; i < 100; i++ { + for i := 0; i < keyCount; i++ { k := []byte(strconv.Itoa(i)) - require.NoError(view.Insert(context.Background(), k, hashing.ComputeHash256(k))) + ops = append(ops, database.BatchOp{ + Key: k, + Value: hashing.ComputeHash256(k), + }) } - require.NoError(view.commitToDB(context.Background())) + view, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + require.NoError(view.CommitToDB(context.Background())) root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) require.NoError(db.Close()) - // reloading the DB, should set the root back to the one that was saved to the memdb + // reloading the db should set the root back to the one that was saved to [baseDB] db, err = New( context.Background(), - rdb, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: 100, - }, + baseDB, + newDefaultConfig(), ) require.NoError(err) + reloadedRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) require.Equal(root, reloadedRoot) @@ -127,325 +169,333 @@ func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { func Test_MerkleDB_DB_Rebuild(t *testing.T) { require := require.New(t) - rdb := memdb.New() - defer rdb.Close() + initialSize := 5_000 - initialSize := 10_000 + config := newDefaultConfig() + config.ValueNodeCacheSize = uint(initialSize) + config.IntermediateNodeCacheSize = uint(initialSize) - db, err := New( + db, err := newDB( context.Background(), - rdb, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: initialSize, - }, + memdb.New(), + config, ) require.NoError(err) // Populate initial set of keys - view, err := db.NewView() + ops := make([]database.BatchOp, 0, initialSize) require.NoError(err) for i := 0; i < initialSize; i++ { k := []byte(strconv.Itoa(i)) - require.NoError(view.Insert(context.Background(), k, hashing.ComputeHash256(k))) + ops = append(ops, database.BatchOp{ + Key: k, + Value: hashing.ComputeHash256(k), + }) } + view, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) require.NoError(view.CommitToDB(context.Background())) + // Get root root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(db.rebuild(context.Background())) + // Rebuild + require.NoError(db.rebuild(context.Background(), initialSize)) + // Assert root is the same after rebuild rebuiltRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) require.Equal(root, rebuiltRoot) + + // add variation where root has a value + require.NoError(db.Put(nil, []byte{})) + + root, err = db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(db.rebuild(context.Background(), initialSize)) + + rebuiltRoot, err = db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(root, rebuiltRoot) } func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { + require := require.New(t) + memDB := memdb.New() db, err := New( context.Background(), memDB, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 300, - }, + newDefaultConfig(), ) - require.NoError(t, err) + require.NoError(err) _ = memDB.Close() batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) err = batch.Write() - // batch fails - require.ErrorIs(t, err, database.ErrClosed) + require.ErrorIs(err, database.ErrClosed) } func Test_MerkleDB_Value_Cache(t *testing.T) { + require := require.New(t) + memDB := memdb.New() db, err := New( context.Background(), memDB, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 300, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + key1, key2 := []byte("key1"), []byte("key2") + require.NoError(batch.Put(key1, []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Write()) batch = db.NewBatch() // force key2 to be inserted into the cache as not found - err = batch.Delete([]byte("key2")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Delete(key2)) + require.NoError(batch.Write()) - _ = memDB.Close() + require.NoError(memDB.Close()) // still works because key1 is read from cache - value, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("1"), value) + value, err := db.Get(key1) + require.NoError(err) + require.Equal([]byte("1"), value) // still returns missing instead of closed because key2 is read from cache - _, err = db.Get([]byte("key2")) - require.ErrorIs(t, err, database.ErrNotFound) + _, err = db.Get(key2) + require.ErrorIs(err, database.ErrNotFound) } func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) - viewToCommit, err := dbTrie.NewView() - require.NoError(t, err) + viewToCommit, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + ) + require.NoError(err) - sibling1, err := dbTrie.NewView() - require.NoError(t, err) - sibling2, err := dbTrie.NewView() - require.NoError(t, err) + // Create siblings of viewToCommit + sibling1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + sibling2, err := dbTrie.NewView(context.Background(), ViewChanges{}) + require.NoError(err) - require.False(t, sibling1.(*trieView).isInvalid()) - require.False(t, sibling2.(*trieView).isInvalid()) + require.False(sibling1.(*view).isInvalid()) + require.False(sibling2.(*view).isInvalid()) - require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) - require.NoError(t, viewToCommit.CommitToDB(context.Background())) + // Committing viewToCommit should invalidate siblings + require.NoError(viewToCommit.CommitToDB(context.Background())) - require.True(t, sibling1.(*trieView).isInvalid()) - require.True(t, sibling2.(*trieView).isInvalid()) - require.False(t, viewToCommit.(*trieView).isInvalid()) + require.True(sibling1.(*view).isInvalid()) + require.True(sibling2.(*view).isInvalid()) + require.False(viewToCommit.(*view).isInvalid()) } -func Test_MerkleDB_Commit_Proof_To_Empty_Trie(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) +func Test_MerkleDB_CommitRangeProof_DeletesValuesInRange(t *testing.T) { + require := require.New(t) - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) - require.NoError(t, err) + db, err := getBasicDB() + require.NoError(err) - freshDB, err := getBasicDB() - require.NoError(t, err) + // value that shouldn't be deleted + require.NoError(db.Put([]byte("key6"), []byte("3"))) - err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) - require.NoError(t, err) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) - value, err := freshDB.Get([]byte("key2")) - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + // Get an empty proof + proof, err := db.GetRangeProof( + context.Background(), + maybe.Nothing[[]byte](), + maybe.Some([]byte("key3")), + 10, + ) + require.NoError(err) - freshRoot, err := freshDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - oldRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, oldRoot, freshRoot) -} + // confirm there are no key.values in the proof + require.Empty(proof.KeyValues) -func Test_MerkleDB_Commit_Proof_To_Filled_Trie(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) + // add values to be deleted by proof commit batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) - require.NoError(t, err) - - freshDB, err := getBasicDB() - require.NoError(t, err) - batch = freshDB.NewBatch() - err = batch.Put([]byte("key1"), []byte("3")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("4")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("5")) - require.NoError(t, err) - err = batch.Put([]byte("key25"), []byte("5")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Write()) - err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) - require.NoError(t, err) + // despite having no key/values in it, committing this proof should delete key1-key3. + require.NoError(db.CommitRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key3")), proof)) - value, err := freshDB.Get([]byte("key2")) - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + afterCommitRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) - freshRoot, err := freshDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - oldRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, oldRoot, freshRoot) + require.Equal(startRoot, afterCommitRoot) } -func Test_MerkleDB_GetValues(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) +func Test_MerkleDB_CommitRangeProof_EmptyTrie(t *testing.T) { + require := require.New(t) - writeBasicBatch(t, db) - keys := [][]byte{{0}, {1}, {2}, {10}} - values, errors := db.GetValues(context.Background(), keys) - require.Len(t, values, len(keys)) - require.Len(t, errors, len(keys)) + // Populate [db1] with 3 key-value pairs. + db1, err := getBasicDB() + require.NoError(err) + batch := db1.NewBatch() + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Write()) + + // Get a proof for the range [key1, key3]. + proof, err := db1.GetRangeProof( + context.Background(), + maybe.Some([]byte("key1")), + maybe.Some([]byte("key3")), + 10, + ) + require.NoError(err) - // first 3 have values - // last was not found - require.NoError(t, errors[0]) - require.NoError(t, errors[1]) - require.NoError(t, errors[2]) - require.ErrorIs(t, errors[3], database.ErrNotFound) + // Commit the proof to a fresh database. + db2, err := getBasicDB() + require.NoError(err) - require.Equal(t, []byte{0}, values[0]) - require.Equal(t, []byte{1}, values[1]) - require.Equal(t, []byte{2}, values[2]) - require.Nil(t, values[3]) -} + require.NoError(db2.CommitRangeProof(context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), proof)) -func Test_MerkleDB_InsertNil(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), nil) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + // [db2] should have the same key-value pairs as [db1]. + db2Root, err := db2.GetMerkleRoot(context.Background()) + require.NoError(err) - value, err := db.Get([]byte("key0")) - require.NoError(t, err) - require.Nil(t, value) + db1Root, err := db1.GetMerkleRoot(context.Background()) + require.NoError(err) - value, err = getNodeValue(db, "key0") - require.NoError(t, err) - require.Nil(t, value) + require.Equal(db1Root, db2Root) } -func Test_MerkleDB_InsertAndRetrieve(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) +func Test_MerkleDB_CommitRangeProof_TrieWithInitialValues(t *testing.T) { + require := require.New(t) - // value hasn't been inserted so shouldn't exist - value, err := db.Get([]byte("key")) - require.Error(t, err) - require.Equal(t, database.ErrNotFound, err) - require.Nil(t, value) + // Populate [db1] with 3 key-value pairs. + db1, err := getBasicDB() + require.NoError(err) + batch := db1.NewBatch() + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Write()) + + // Get a proof for the range [key1, key3]. + proof, err := db1.GetRangeProof( + context.Background(), + maybe.Some([]byte("key1")), + maybe.Some([]byte("key3")), + 10, + ) + require.NoError(err) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(t, err) + // Populate [db2] with key-value pairs where some of the keys + // have different values than in [db1]. + db2, err := getBasicDB() + require.NoError(err) + batch = db2.NewBatch() + require.NoError(batch.Put([]byte("key1"), []byte("3"))) + require.NoError(batch.Put([]byte("key2"), []byte("4"))) + require.NoError(batch.Put([]byte("key3"), []byte("5"))) + require.NoError(batch.Put([]byte("key25"), []byte("5"))) + require.NoError(batch.Write()) + + // Commit the proof from [db1] to [db2] + require.NoError(db2.CommitRangeProof( + context.Background(), + maybe.Some([]byte("key1")), + maybe.Some([]byte("key3")), + proof, + )) + + // [db2] should have the same key-value pairs as [db1]. + // Note that "key25" was in the range covered by the proof, + // so it's deleted from [db2]. + db2Root, err := db2.GetMerkleRoot(context.Background()) + require.NoError(err) - value, err = db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value"), value) -} + db1Root, err := db1.GetMerkleRoot(context.Background()) + require.NoError(err) -func Test_MerkleDB_HealthCheck(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - val, err := db.HealthCheck(context.Background()) - require.NoError(t, err) - require.Nil(t, val) + require.Equal(db1Root, db2Root) } -func Test_MerkleDB_Overwrite(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - - err = db.Put([]byte("key"), []byte("value0")) - require.NoError(t, err) +func Test_MerkleDB_GetValues(t *testing.T) { + require := require.New(t) - value, err := db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + db, err := getBasicDB() + require.NoError(err) - err = db.Put([]byte("key"), []byte("value1")) - require.NoError(t, err) + writeBasicBatch(t, db) + keys := [][]byte{{0}, {1}, {2}, {10}} + values, errors := db.GetValues(context.Background(), keys) + require.Len(values, len(keys)) + require.Len(errors, len(keys)) - value, err = db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + // first 3 have values + // last was not found + require.NoError(errors[0]) + require.NoError(errors[1]) + require.NoError(errors[2]) + require.ErrorIs(errors[3], database.ErrNotFound) + + require.Equal([]byte{0}, values[0]) + require.Equal([]byte{1}, values[1]) + require.Equal([]byte{2}, values[2]) + require.Nil(values[3]) } -func Test_MerkleDB_Delete(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) +func Test_MerkleDB_InsertNil(t *testing.T) { + require := require.New(t) - err = db.Put([]byte("key"), []byte("value0")) - require.NoError(t, err) + db, err := getBasicDB() + require.NoError(err) - value, err := db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + batch := db.NewBatch() + key := []byte("key0") + require.NoError(batch.Put(key, nil)) + require.NoError(batch.Write()) - err = db.Delete([]byte("key")) - require.NoError(t, err) + value, err := db.Get(key) + require.NoError(err) + require.Empty(value) - value, err = db.Get([]byte("key")) - require.ErrorIs(t, err, database.ErrNotFound) - require.Nil(t, value) + value, err = getNodeValue(db, string(key)) + require.NoError(err) + require.Empty(value) } -func Test_MerkleDB_DeleteMissingKey(t *testing.T) { +func Test_MerkleDB_HealthCheck(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) - err = db.Delete([]byte("key")) - require.NoError(t, err) + val, err := db.HealthCheck(context.Background()) + require.NoError(err) + require.Nil(val) } -// Test that untracked views aren't persisted to [db.childViews]. +// Test that untracked views aren't tracked in [db.childViews]. func TestDatabaseNewUntrackedView(t *testing.T) { require := require.New(t) @@ -453,43 +503,48 @@ func TestDatabaseNewUntrackedView(t *testing.T) { require.NoError(err) // Create a new untracked view. - view, err := db.newUntrackedView(defaultPreallocationSize) + view, err := newView( + db, + db, + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{1}, Value: []byte{1}}, + }, + }, + ) require.NoError(err) require.Empty(db.childViews) - // Write to the untracked view. - err = view.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) - // Commit the view - err = view.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view.CommitToDB(context.Background())) // The untracked view should not be tracked by the parent database. require.Empty(db.childViews) } // Test that tracked views are persisted to [db.childViews]. -func TestDatabaseNewPreallocatedViewTracked(t *testing.T) { +func TestDatabaseNewViewFromBatchOpsTracked(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) // Create a new tracked view. - view, err := db.NewPreallocatedView(10) + view, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{1}, Value: []byte{1}}, + }, + }, + ) require.NoError(err) require.Len(db.childViews, 1) - // Write to the view. - err = view.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) - // Commit the view - err = view.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view.CommitToDB(context.Background())) - // The untracked view should be tracked by the parent database. + // The view should be tracked by the parent database. require.Contains(db.childViews, view) require.Len(db.childViews, 1) } @@ -502,46 +557,49 @@ func TestDatabaseCommitChanges(t *testing.T) { dbRoot := db.getMerkleRoot() // Committing a nil view should be a no-op. - err = db.commitToDB(context.Background()) - require.NoError(err) + require.NoError(db.CommitToDB(context.Background())) require.Equal(dbRoot, db.getMerkleRoot()) // Root didn't change // Committing an invalid view should fail. - invalidView, err := db.NewView() + invalidView, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - invalidView.(*trieView).invalidate() - err = invalidView.commitToDB(context.Background()) + invalidView.(*view).invalidate() + err = invalidView.CommitToDB(context.Background()) require.ErrorIs(err, ErrInvalid) // Add key-value pairs to the database - err = db.Put([]byte{1}, []byte{1}) - require.NoError(err) - err = db.Put([]byte{2}, []byte{2}) - require.NoError(err) + key1, key2, key3 := []byte{1}, []byte{2}, []byte{3} + value1, value2, value3 := []byte{1}, []byte{2}, []byte{3} + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) - // Make a view and inser/delete a key-value pair. - view1Intf, err := db.NewView() - require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) - err = view1.Insert(context.Background(), []byte{3}, []byte{3}) - require.NoError(err) - err = view1.Remove(context.Background(), []byte{1}) + // Make a view and insert/delete a key-value pair. + view1Intf, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key3, Value: value3}, // New k-v pair + {Key: key1, Delete: true}, // Delete k-v pair + }, + }, + ) require.NoError(err) - view1Root, err := view1.getMerkleRoot(context.Background()) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) + view1Root, err := view1.GetMerkleRoot(context.Background()) require.NoError(err) // Make a second view - view2Intf, err := db.NewView() + view2Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // Make a view atop a view - view3Intf, err := view1.NewView() + view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -550,18 +608,17 @@ func TestDatabaseCommitChanges(t *testing.T) { // db // Commit view1 - err = view1.commitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.commitToDB(context.Background())) // Make sure the key-value pairs are correct. - _, err = db.Get([]byte{1}) + _, err = db.Get(key1) require.ErrorIs(err, database.ErrNotFound) - value, err := db.Get([]byte{2}) + gotValue, err := db.Get(key2) require.NoError(err) - require.Equal([]byte{2}, value) - value, err = db.Get([]byte{3}) + require.Equal(value2, gotValue) + gotValue, err = db.Get(key3) require.NoError(err) - require.Equal([]byte{3}, value) + require.Equal(value3, gotValue) // Make sure the root is right require.Equal(view1Root, db.getMerkleRoot()) @@ -589,20 +646,20 @@ func TestDatabaseInvalidateChildrenExcept(t *testing.T) { require.NoError(err) // Create children - view1Intf, err := db.NewView() + view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) - view2Intf, err := db.NewView() + view2Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) - view3Intf, err := db.NewView() + view3Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) db.invalidateChildrenExcept(view1) @@ -627,111 +684,196 @@ func TestDatabaseInvalidateChildrenExcept(t *testing.T) { } func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { - totalState := 1000 + require := require.New(t) + var ( - allKeys [][]byte - keyMap map[string]struct{} + numRuns = 3 + numShuffles = 3 + numKeyValues = 1_000 + prefixProbability = .1 + nilValueProbability = 0.05 + keys [][]byte + keysSet set.Set[string] ) + + // Returns a random key. + // With probability approximately [prefixProbability], the returned key + // will be a prefix of a previously returned key. genKey := func(r *rand.Rand) []byte { - count := 0 for { var key []byte - if len(allKeys) > 2 && r.Intn(100) < 10 { - // new prefixed key - prefix := allKeys[r.Intn(len(allKeys))] + shouldPrefix := r.Float64() < prefixProbability + if len(keys) > 2 && shouldPrefix { + // Return a key that is a prefix of a previously returned key. + prefix := keys[r.Intn(len(keys))] key = make([]byte, r.Intn(50)+len(prefix)) copy(key, prefix) - _, err := r.Read(key[len(prefix):]) - require.NoError(t, err) + _, _ = r.Read(key[len(prefix):]) } else { key = make([]byte, r.Intn(50)) - _, err := r.Read(key) - require.NoError(t, err) + _, _ = r.Read(key) } - if _, ok := keyMap[string(key)]; !ok { - allKeys = append(allKeys, key) - keyMap[string(key)] = struct{}{} + + // If the key has already been returned, try again. + // This test would flake if we allowed duplicate keys + // because then the order of insertion matters. + if !keysSet.Contains(string(key)) { + keysSet.Add(string(key)) + keys = append(keys, key) return key } - count++ } } - for i := 0; i < 3; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + for i := 0; i < numRuns; i++ { + now := time.Now().UnixNano() + t.Logf("seed for iter %d: %d", i, now) + r := rand.New(rand.NewSource(now)) // #nosec G404 - ops := make([]*testOperation, 0, totalState) - allKeys = [][]byte{} - keyMap = map[string]struct{}{} - for x := 0; x < totalState; x++ { + // Insert key-value pairs into a database. + ops := make([]database.BatchOp, 0, numKeyValues) + keys = [][]byte{} + for x := 0; x < numKeyValues; x++ { key := genKey(r) value := make([]byte, r.Intn(51)) - if len(value) == 51 { + if r.Float64() < nilValueProbability { value = nil } else { - _, err := r.Read(value) - require.NoError(t, err) + _, _ = r.Read(value) } - ops = append(ops, &testOperation{key: key, value: value}) + ops = append(ops, database.BatchOp{ + Key: key, + Value: value, + }) } + db, err := getBasicDB() - require.NoError(t, err) - result, err := applyOperations(db, ops) - require.NoError(t, err) - primaryRoot, err := result.GetMerkleRoot(context.Background()) - require.NoError(t, err) - for shuffleIndex := 0; shuffleIndex < 3; shuffleIndex++ { - r.Shuffle(totalState, func(i, j int) { + require.NoError(err) + + view1, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + + // Get the root of the trie after applying [ops]. + view1Root, err := view1.GetMerkleRoot(context.Background()) + require.NoError(err) + + // Assert that the same operations applied in a different order + // result in the same root. Note this is only true because + // all keys inserted are unique. + for shuffleIndex := 0; shuffleIndex < numShuffles; shuffleIndex++ { + r.Shuffle(numKeyValues, func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) - result, err := applyOperations(db, ops) - require.NoError(t, err) - newRoot, err := result.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, primaryRoot, newRoot) - } - } -} -type testOperation struct { - key []byte - value []byte - delete bool -} + view2, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) -func applyOperations(t *Database, ops []*testOperation) (Trie, error) { - view, err := t.NewView() - if err != nil { - return nil, err - } - for _, op := range ops { - if op.delete { - if err := view.Remove(context.Background(), op.key); err != nil { - return nil, err - } - } else { - if err := view.Insert(context.Background(), op.key, op.value); err != nil { - return nil, err - } + view2Root, err := view2.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.Equal(view1Root, view2Root) } } - return view, nil } -func Test_MerkleDB_RandomCases(t *testing.T) { +func TestMerkleDBClear(t *testing.T) { require := require.New(t) - for i := 150; i < 500; i += 10 { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - runRandDBTest(require, r, generate(require, r, i, .01)) - } + // Make a database and insert some key-value pairs. + db, err := getBasicDB() + require.NoError(err) + + emptyRootID := db.getMerkleRoot() + + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + insertRandomKeyValues( + require, + r, + []database.Database{db}, + 1_000, + 0.25, + ) + + // Clear the database. + require.NoError(db.Clear()) + + // Assert that the database is empty. + iter := db.NewIterator() + defer iter.Release() + require.False(iter.Next()) + require.Equal(ids.Empty, db.getMerkleRoot()) + require.True(db.root.IsNothing()) + + // Assert caches are empty. + require.Zero(db.valueNodeDB.nodeCache.Len()) + require.Zero(db.intermediateNodeDB.writeBuffer.currentSize) + + // Assert history has only the clearing change. + require.Len(db.history.lastChanges, 1) + change, ok := db.history.lastChanges[emptyRootID] + require.True(ok) + require.Empty(change.nodes) + require.Empty(change.values) } -func Test_MerkleDB_RandomCases_InitialValues(t *testing.T) { - require := require.New(t) +func FuzzMerkleDBEmptyRandomizedActions(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int64, + size uint, + ) { + if size == 0 { + t.SkipNow() + } + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + for _, ts := range validTokenSizes { + runRandDBTest( + require, + r, + generateRandTest( + require, + r, + size, + 0.01, /*checkHashProbability*/ + ), + ts, + ) + } + }) +} - r := rand.New(rand.NewSource(int64(0))) // #nosec G404 - runRandDBTest(require, r, generateInitialValues(require, r, 2000, 3500, 0.0)) +func FuzzMerkleDBInitialValuesRandomizedActions(f *testing.F) { + f.Fuzz(func( + t *testing.T, + initialValues uint, + numSteps uint, + randSeed int64, + ) { + if numSteps == 0 { + t.SkipNow() + } + require := require.New(t) + r := rand.New(rand.NewSource(randSeed)) // #nosec G404 + for _, ts := range validTokenSizes { + runRandDBTest( + require, + r, + generateInitialValues( + require, + r, + initialValues, + numSteps, + 0.001, /*checkHashProbability*/ + ), + ts, + ) + } + }) } // randTest performs random trie operations. @@ -755,136 +897,195 @@ const ( opMax // boundary value, not an actual op ) -func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { - db, err := getBasicDB() +func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, tokenSize int) { + db, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) + const ( + maxProofLen = 100 + maxPastRoots = defaultHistoryLength + ) + + var ( + values = make(map[Key][]byte) // tracks content of the trie + currentBatch = db.NewBatch() + uncommittedKeyValues = make(map[Key][]byte) + uncommittedDeletes = set.Set[Key]{} + pastRoots = []ids.ID{} + ) + startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - values := make(map[path][]byte) // tracks content of the trie - currentBatch := db.NewBatch() - currentValues := make(map[path][]byte) - deleteValues := make(map[path]struct{}) - pastRoots := []ids.ID{} - for i, step := range rt { require.LessOrEqual(i, len(rt)) switch step.op { case opUpdate: - err := currentBatch.Put(step.key, step.value) - require.NoError(err) - currentValues[newPath(step.key)] = step.value - delete(deleteValues, newPath(step.key)) + require.NoError(currentBatch.Put(step.key, step.value)) + + uncommittedKeyValues[ToKey(step.key)] = step.value + uncommittedDeletes.Remove(ToKey(step.key)) case opDelete: - err := currentBatch.Delete(step.key) - require.NoError(err) - deleteValues[newPath(step.key)] = struct{}{} - delete(currentValues, newPath(step.key)) + require.NoError(currentBatch.Delete(step.key)) + + uncommittedDeletes.Add(ToKey(step.key)) + delete(uncommittedKeyValues, ToKey(step.key)) case opGenerateRangeProof: root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) + if len(pastRoots) > 0 { root = pastRoots[r.Intn(len(pastRoots))] } - rangeProof, err := db.GetRangeProofAtRoot(context.Background(), root, step.key, step.value, 100) + + start := maybe.Nothing[[]byte]() + if len(step.key) > 0 { + start = maybe.Some(step.key) + } + end := maybe.Nothing[[]byte]() + if len(step.value) > 0 { + end = maybe.Some(step.value) + } + + rangeProof, err := db.GetRangeProofAtRoot(context.Background(), root, start, end, maxProofLen) + if root == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + continue + } require.NoError(err) - err = rangeProof.Verify( + require.LessOrEqual(len(rangeProof.KeyValues), maxProofLen) + + require.NoError(rangeProof.Verify( context.Background(), - step.key, - step.value, + start, + end, root, - ) - require.NoError(err) - require.LessOrEqual(len(rangeProof.KeyValues), 100) + tokenSize, + )) case opGenerateChangeProof: root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) + if len(pastRoots) > 1 { root = pastRoots[r.Intn(len(pastRoots))] } - changeProof, err := db.GetChangeProof(context.Background(), startRoot, root, step.key, step.value, 100) + + start := maybe.Nothing[[]byte]() + if len(step.key) > 0 { + start = maybe.Some(step.key) + } + + end := maybe.Nothing[[]byte]() + if len(step.value) > 0 { + end = maybe.Some(step.value) + } + + changeProof, err := db.GetChangeProof(context.Background(), startRoot, root, start, end, maxProofLen) if startRoot == root { require.ErrorIs(err, errSameRoot) continue } + if root == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + continue + } require.NoError(err) - changeProofDB, err := getBasicDB() + require.LessOrEqual(len(changeProof.KeyChanges), maxProofLen) + + changeProofDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) - err = changeProof.Verify( + + require.NoError(changeProofDB.VerifyChangeProof( context.Background(), - changeProofDB, - step.key, - step.value, + changeProof, + start, + end, root, - ) - require.NoError(err) - require.LessOrEqual(len(changeProof.KeyValues)+len(changeProof.DeletedKeys), 100) + )) case opWriteBatch: oldRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - err = currentBatch.Write() - require.NoError(err) - for key, value := range currentValues { + + require.NoError(currentBatch.Write()) + currentBatch.Reset() + + if len(uncommittedKeyValues) == 0 && len(uncommittedDeletes) == 0 { + continue + } + + for key, value := range uncommittedKeyValues { values[key] = value } - for key := range deleteValues { + clear(uncommittedKeyValues) + + for key := range uncommittedDeletes { delete(values, key) } + uncommittedDeletes.Clear() - if len(currentValues) == 0 && len(deleteValues) == 0 { - continue - } newRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) + if oldRoot != newRoot { pastRoots = append(pastRoots, newRoot) - if len(pastRoots) > 300 { - pastRoots = pastRoots[len(pastRoots)-300:] + if len(pastRoots) > maxPastRoots { + pastRoots = pastRoots[len(pastRoots)-maxPastRoots:] } } - currentValues = map[path][]byte{} - deleteValues = map[path]struct{}{} - currentBatch = db.NewBatch() + case opGet: v, err := db.Get(step.key) if err != nil { require.ErrorIs(err, database.ErrNotFound) } - want := values[newPath(step.key)] + + want := values[ToKey(step.key)] require.True(bytes.Equal(want, v)) // Use bytes.Equal so nil treated equal to []byte{} + trieValue, err := getNodeValue(db, string(step.key)) if err != nil { require.ErrorIs(err, database.ErrNotFound) } + require.True(bytes.Equal(want, trieValue)) // Use bytes.Equal so nil treated equal to []byte{} case opCheckhash: - dbTrie, err := newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: minCacheSize, - }, - &mockMetrics{}, - ) + // Create a view with the same key-values as [db] + newDB, err := getBasicDBWithBranchFactor(tokenSizeToBranchFactor[tokenSize]) require.NoError(err) - localTrie := Trie(dbTrie) + + ops := make([]database.BatchOp, 0, len(values)) for key, value := range values { - err := localTrie.Insert(context.Background(), key.Serialize().Value, value) - require.NoError(err) + ops = append(ops, database.BatchOp{ + Key: key.Bytes(), + Value: value, + }) } - calculatedRoot, err := localTrie.GetMerkleRoot(context.Background()) + + newView, err := newDB.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + + // Check that the root of the view is the same as the root of [db] + newRoot, err := newView.GetMerkleRoot(context.Background()) require.NoError(err) + dbRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.Equal(dbRoot, calculatedRoot) + require.Equal(dbRoot, newRoot) + default: + require.FailNow("unknown op") } } } -func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Rand, size int, percentChanceToFullHash float64) randTest { +func generateRandTestWithKeys( + require *require.Assertions, + r *rand.Rand, + allKeys [][]byte, + size uint, + checkHashProbability float64, +) randTest { + const nilEndProbability = 0.1 + genKey := func() []byte { if len(allKeys) < 2 || r.Intn(100) < 10 { // new key @@ -909,8 +1110,10 @@ func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Ran } genEnd := func(key []byte) []byte { - shouldBeNil := r.Intn(10) - if shouldBeNil == 0 { + // got is defined because if a rand method is used + // in an if statement, the nosec directive doesn't work. + got := r.Float64() // #nosec G404 + if got < nilEndProbability { return nil } @@ -926,7 +1129,7 @@ func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Ran } var steps randTest - for i := 0; i < size-1; { + for i := uint(0); i < size-1; { step := randTestStep{op: r.Intn(opMax)} switch step.op { case opUpdate: @@ -945,7 +1148,7 @@ func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Ran step.value = genEnd(step.key) case opCheckhash: // this gets really expensive so control how often it happens - if r.Float64() >= percentChanceToFullHash { + if r.Float64() > checkHashProbability { continue } } @@ -957,47 +1160,130 @@ func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Ran return steps } -func generateInitialValues(require *require.Assertions, r *rand.Rand, initialValues int, size int, percentChanceToFullHash float64) randTest { +func generateInitialValues( + require *require.Assertions, + r *rand.Rand, + numInitialKeyValues uint, + size uint, + percentChanceToFullHash float64, +) randTest { + const ( + prefixProbability = 0.1 + nilValueProbability = 0.05 + ) + var allKeys [][]byte genKey := func() []byte { // new prefixed key - if len(allKeys) > 2 && r.Intn(100) < 10 { + if len(allKeys) > 2 && r.Float64() < prefixProbability { prefix := allKeys[r.Intn(len(allKeys))] key := make([]byte, r.Intn(50)+len(prefix)) copy(key, prefix) - _, err := r.Read(key[len(prefix):]) - require.NoError(err) + _, _ = r.Read(key[len(prefix):]) allKeys = append(allKeys, key) return key } // new key key := make([]byte, r.Intn(50)) - _, err := r.Read(key) - require.NoError(err) + _, _ = r.Read(key) allKeys = append(allKeys, key) return key } var steps randTest - for i := 0; i < initialValues; i++ { - step := randTestStep{op: opUpdate} - step.key = genKey() - step.value = make([]byte, r.Intn(51)) - if len(step.value) == 51 { + for i := uint(0); i < numInitialKeyValues; i++ { + step := randTestStep{ + op: opUpdate, + key: genKey(), + value: make([]byte, r.Intn(50)), + } + // got is defined because if a rand method is used + // in an if statement, the nosec directive doesn't work. + got := r.Float64() // #nosec G404 + if got < nilValueProbability { step.value = nil } else { - _, err := r.Read(step.value) - require.NoError(err) + _, _ = r.Read(step.value) } steps = append(steps, step) } steps = append(steps, randTestStep{op: opWriteBatch}) - steps = append(steps, generateWithKeys(require, allKeys, r, size, percentChanceToFullHash)...) + steps = append(steps, generateRandTestWithKeys(require, r, allKeys, size, percentChanceToFullHash)...) return steps } -func generate(require *require.Assertions, r *rand.Rand, size int, percentChanceToFullHash float64) randTest { - var allKeys [][]byte - return generateWithKeys(require, allKeys, r, size, percentChanceToFullHash) +func generateRandTest(require *require.Assertions, r *rand.Rand, size uint, percentChanceToFullHash float64) randTest { + return generateRandTestWithKeys(require, r, [][]byte{}, size, percentChanceToFullHash) +} + +// Inserts [n] random key/value pairs into each database. +// Deletes [deletePortion] of the key/value pairs after insertion. +func insertRandomKeyValues( + require *require.Assertions, + rand *rand.Rand, + dbs []database.Database, + numKeyValues uint, + deletePortion float64, +) { + maxKeyLen := units.KiB + maxValLen := 4 * units.KiB + + require.GreaterOrEqual(deletePortion, float64(0)) + require.LessOrEqual(deletePortion, float64(1)) + for i := uint(0); i < numKeyValues; i++ { + keyLen := rand.Intn(maxKeyLen) + key := make([]byte, keyLen) + _, _ = rand.Read(key) + + valueLen := rand.Intn(maxValLen) + value := make([]byte, valueLen) + _, _ = rand.Read(value) + for _, db := range dbs { + require.NoError(db.Put(key, value)) + } + + if rand.Float64() < deletePortion { + for _, db := range dbs { + require.NoError(db.Delete(key)) + } + } + } +} + +func TestGetRangeProofAtRootEmptyRootID(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + _, err = db.GetRangeProofAtRoot( + context.Background(), + ids.Empty, + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 10, + ) + require.ErrorIs(err, ErrEmptyProof) +} + +func TestGetChangeProofEmptyRootID(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + require.NoError(db.Put([]byte("key"), []byte("value"))) + + rootID := db.getMerkleRoot() + + _, err = db.GetChangeProof( + context.Background(), + rootID, + ids.Empty, + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 10, + ) + require.ErrorIs(err, ErrEmptyProof) } diff --git a/avalanchego/x/merkledb/helpers_test.go b/avalanchego/x/merkledb/helpers_test.go new file mode 100644 index 00000000..acb620ab --- /dev/null +++ b/avalanchego/x/merkledb/helpers_test.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +func getBasicDB() (*merkleDB, error) { + return newDatabase( + context.Background(), + memdb.New(), + newDefaultConfig(), + &mockMetrics{}, + ) +} + +func getBasicDBWithBranchFactor(bf BranchFactor) (*merkleDB, error) { + config := newDefaultConfig() + config.BranchFactor = bf + return newDatabase( + context.Background(), + memdb.New(), + config, + &mockMetrics{}, + ) +} + +// Writes []byte{i} -> []byte{i} for i in [0, 4] +func writeBasicBatch(t *testing.T, db *merkleDB) { + require := require.New(t) + + batch := db.NewBatch() + require.NoError(batch.Put([]byte{0}, []byte{0})) + require.NoError(batch.Put([]byte{1}, []byte{1})) + require.NoError(batch.Put([]byte{2}, []byte{2})) + require.NoError(batch.Put([]byte{3}, []byte{3})) + require.NoError(batch.Put([]byte{4}, []byte{4})) + require.NoError(batch.Write()) +} + +func newRandomProofNode(r *rand.Rand) ProofNode { + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 + serializedKey := ToKey(key) + + val := make([]byte, r.Intn(64)) // #nosec G404 + _, _ = r.Read(val) // #nosec G404 + + children := map[byte]ids.ID{} + for j := 0; j < 16; j++ { + if r.Float64() < 0.5 { + var childID ids.ID + _, _ = r.Read(childID[:]) // #nosec G404 + children[byte(j)] = childID + } + } + + hasValue := rand.Intn(2) == 1 // #nosec G404 + var valueOrHash maybe.Maybe[[]byte] + if hasValue { + // use the hash instead when length is greater than the hash length + if len(val) >= HashLength { + val = hashing.ComputeHash256(val) + } else if len(val) == 0 { + // We do this because when we encode a value of []byte{} we will later + // decode it as nil. + // Doing this prevents inconsistency when comparing the encoded and + // decoded values. + val = nil + } + valueOrHash = maybe.Some(val) + } + + return ProofNode{ + Key: serializedKey, + ValueOrHash: valueOrHash, + Children: children, + } +} diff --git a/avalanchego/x/merkledb/history.go b/avalanchego/x/merkledb/history.go index e58dedbd..22d87cd1 100644 --- a/avalanchego/x/merkledb/history.go +++ b/avalanchego/x/merkledb/history.go @@ -1,26 +1,29 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( + "bytes" "errors" "fmt" - "github.com/google/btree" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/buffer" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" ) var ( - ErrStartRootNotFound = errors.New("start root is not before end root in history") - ErrRootIDNotPresent = errors.New("root id is not present in history") + ErrInsufficientHistory = errors.New("insufficient history to generate proof") + ErrNoEndRoot = fmt.Errorf("%w: end root not found", ErrInsufficientHistory) ) // stores previous trie states type trieHistory struct { // Root ID --> The most recent change resulting in [rootID]. - lastChanges map[ids.ID]*changeSummaryAndIndex + lastChanges map[ids.ID]*changeSummaryAndInsertNumber // Maximum number of previous roots/changes to store in [history]. maxHistoryLen int @@ -28,9 +31,10 @@ type trieHistory struct { // Contains the history. // Sorted by increasing order of insertion. // Contains at most [maxHistoryLen] values. - history *btree.BTreeG[*changeSummaryAndIndex] + history buffer.Deque[*changeSummaryAndInsertNumber] - nextIndex uint64 + // Each change is tagged with this monotonic increasing number. + nextInsertNumber uint64 } // Tracks the beginning and ending state of a value. @@ -41,45 +45,55 @@ type change[T any] struct { // Wrapper around a changeSummary that allows comparison // of when the change was made. -type changeSummaryAndIndex struct { +type changeSummaryAndInsertNumber struct { *changeSummary - // Another changeSummaryAndIndex with a greater - // [index] means that change was after this one. - index uint64 + // Another changeSummaryAndInsertNumber with a greater + // [insertNumber] means that change was after this one. + insertNumber uint64 } -// Tracks all of the node and value changes that resulted in the rootID. +// Tracks all the node and value changes that resulted in the rootID. type changeSummary struct { + // The ID of the trie after these changes. rootID ids.ID - // key is path prefix - nodes map[path]*change[*node] - // key is full path - values map[path]*change[Maybe[[]byte]] + // The root before/after this change. + // Set in [calculateNodeIDs]. + rootChange change[maybe.Maybe[*node]] + nodes map[Key]*change[*node] + values map[Key]*change[maybe.Maybe[[]byte]] } func newChangeSummary(estimatedSize int) *changeSummary { return &changeSummary{ - nodes: make(map[path]*change[*node], estimatedSize), - values: make(map[path]*change[Maybe[[]byte]], estimatedSize), + nodes: make(map[Key]*change[*node], estimatedSize), + values: make(map[Key]*change[maybe.Maybe[[]byte]], estimatedSize), + rootChange: change[maybe.Maybe[*node]]{}, } } func newTrieHistory(maxHistoryLookback int) *trieHistory { return &trieHistory{ maxHistoryLen: maxHistoryLookback, - history: btree.NewG( - 2, - func(a, b *changeSummaryAndIndex) bool { - return a.index < b.index - }, - ), - lastChanges: make(map[ids.ID]*changeSummaryAndIndex), + history: buffer.NewUnboundedDeque[*changeSummaryAndInsertNumber](maxHistoryLookback), + lastChanges: make(map[ids.ID]*changeSummaryAndInsertNumber), } } -// Returns up to [maxLength] key-value pair changes with keys in [start, end] that -// occurred between [startRoot] and [endRoot]. -func (th *trieHistory) getValueChanges(startRoot, endRoot ids.ID, start, end []byte, maxLength int) (*changeSummary, error) { +// Returns up to [maxLength] key-value pair changes with keys in +// [start, end] that occurred between [startRoot] and [endRoot]. +// If [start] is Nothing, there's no lower bound on the range. +// If [end] is Nothing, there's no upper bound on the range. +// Returns [ErrInsufficientHistory] if the history is insufficient +// to generate the proof. +// Returns [ErrNoEndRoot], which wraps [ErrInsufficientHistory], if +// the [endRoot] isn't in the history. +func (th *trieHistory) getValueChanges( + startRoot ids.ID, + endRoot ids.ID, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, +) (*changeSummary, error) { if maxLength <= 0 { return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) } @@ -88,148 +102,187 @@ func (th *trieHistory) getValueChanges(startRoot, endRoot ids.ID, start, end []b return newChangeSummary(maxLength), nil } - // Confirm there's a change resulting in [startRoot] before - // a change resulting in [endRoot] in the history. - // [lastEndRootChange] is the last change in the history resulting in [endRoot]. - lastEndRootChange, ok := th.lastChanges[endRoot] + // [endRootChanges] is the last change in the history resulting in [endRoot]. + endRootChanges, ok := th.lastChanges[endRoot] if !ok { - return nil, ErrRootIDNotPresent + return nil, fmt.Errorf("%w: %s", ErrNoEndRoot, endRoot) } - // [startRootChanges] is the last appearance of [startRoot] + // Confirm there's a change resulting in [startRoot] before + // a change resulting in [endRoot] in the history. + // [startRootChanges] is the last appearance of [startRoot]. startRootChanges, ok := th.lastChanges[startRoot] if !ok { - return nil, ErrStartRootNotFound + return nil, fmt.Errorf("%w: start root %s not found", ErrInsufficientHistory, startRoot) } - // startRootChanges is after the lastEndRootChange, but that is just the latest appearance of start root - // there may be an earlier entry, so attempt to find an entry that comes before lastEndRootChange - if startRootChanges.index > lastEndRootChange.index { - th.history.DescendLessOrEqual( - lastEndRootChange, - func(item *changeSummaryAndIndex) bool { - if item == lastEndRootChange { - return true // Skip first iteration - } - if item.rootID == startRoot { - startRootChanges = item - return false - } - return true - }, - ) - // There's no change resulting in [startRoot] before the latest change resulting in [endRoot]. - if startRootChanges.index > lastEndRootChange.index { - return nil, ErrStartRootNotFound + var ( + // The insert number of the last element in [th.history]. + mostRecentChangeInsertNumber = th.nextInsertNumber - 1 + + // The index within [th.history] of its last element. + mostRecentChangeIndex = th.history.Len() - 1 + + // The difference between the last index in [th.history] and the index of [endRootChanges]. + endToMostRecentOffset = int(mostRecentChangeInsertNumber - endRootChanges.insertNumber) + + // The index in [th.history] of the latest change resulting in [endRoot]. + endRootIndex = mostRecentChangeIndex - endToMostRecentOffset + ) + + if startRootChanges.insertNumber > endRootChanges.insertNumber { + // [startRootChanges] happened after [endRootChanges]. + // However, that is just the *latest* change resulting in [startRoot]. + // Attempt to find a change resulting in [startRoot] before [endRootChanges]. + // + // Translate the insert number to the index in [th.history] so we can iterate + // backward from [endRootChanges]. + for i := endRootIndex - 1; i >= 0; i-- { + changes, _ := th.history.Index(i) + + if changes.rootID == startRoot { + // [startRootChanges] is now the last change resulting in + // [startRoot] before [endRootChanges]. + startRootChanges = changes + break + } + + if i == 0 { + return nil, fmt.Errorf( + "%w: start root %s not found before end root %s", + ErrInsufficientHistory, startRoot, endRoot, + ) + } } } - // Keep changes sorted so the largest can be removed in order to stay within the maxLength limit. - sortedKeys := btree.NewG( - 2, - func(a, b path) bool { - return a.Compare(b) < 0 - }, - ) + var ( + // Keep track of changed keys so the largest can be removed + // in order to stay within the [maxLength] limit if necessary. + changedKeys = set.Set[Key]{} - startPath := newPath(start) - endPath := newPath(end) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) - // For each element in the history in the range between [startRoot]'s - // last appearance (exclusive) and [endRoot]'s last appearance (inclusive), - // add the changes to keys in [start, end] to [combinedChanges]. - // Only the key-value pairs with the greatest [maxLength] keys will be kept. - combinedChanges := newChangeSummary(maxLength) + // For each element in the history in the range between [startRoot]'s + // last appearance (exclusive) and [endRoot]'s last appearance (inclusive), + // add the changes to keys in [start, end] to [combinedChanges]. + // Only the key-value pairs with the greatest [maxLength] keys will be kept. + combinedChanges = newChangeSummary(maxLength) + + // The difference between the index of [startRootChanges] and [endRootChanges] in [th.history]. + startToEndOffset = int(endRootChanges.insertNumber - startRootChanges.insertNumber) + + // The index of the last change resulting in [startRoot] + // which occurs before [endRootChanges]. + startRootIndex = endRootIndex - startToEndOffset + ) // For each change after [startRootChanges] up to and including - // [lastEndRootChange], record the change in [combinedChanges]. - th.history.AscendGreaterOrEqual( - startRootChanges, - func(item *changeSummaryAndIndex) bool { - if item == startRootChanges { - // Start from the first change after [startRootChanges]. - return true - } - if item.index > lastEndRootChange.index { - // Don't go past [lastEndRootChange]. - return false + // [endRootChanges], record the change in [combinedChanges]. + for i := startRootIndex + 1; i <= endRootIndex; i++ { + changes, _ := th.history.Index(i) + + // Add the changes from this commit to [combinedChanges]. + for key, valueChange := range changes.values { + // The key is outside the range [start, end]. + if (startKey.HasValue() && key.Less(startKey.Value())) || + (end.HasValue() && key.Greater(endKey.Value())) { + continue } - for key, valueChange := range item.values { - if (len(startPath) == 0 || key.Compare(startPath) >= 0) && - (len(endPath) == 0 || key.Compare(endPath) <= 0) { - if existing, ok := combinedChanges.values[key]; ok { - existing.after = valueChange.after - } else { - combinedChanges.values[key] = &change[Maybe[[]byte]]{ - before: valueChange.before, - after: valueChange.after, - } - } - sortedKeys.ReplaceOrInsert(key) + // A change to this key already exists in [combinedChanges] + // so update its before value with the earlier before value + if existing, ok := combinedChanges.values[key]; ok { + existing.after = valueChange.after + if existing.before.HasValue() == existing.after.HasValue() && + bytes.Equal(existing.before.Value(), existing.after.Value()) { + // The change to this key is a no-op, so remove it from [combinedChanges]. + delete(combinedChanges.values, key) + changedKeys.Remove(key) } - } - - // Keep only the smallest [maxLength] items in [combinedChanges.values]. - for sortedKeys.Len() > maxLength { - if greatestKey, found := sortedKeys.DeleteMax(); found { - delete(combinedChanges.values, greatestKey) + } else { + combinedChanges.values[key] = &change[maybe.Maybe[[]byte]]{ + before: valueChange.before, + after: valueChange.after, } + changedKeys.Add(key) } + } + } + + // If we have <= [maxLength] elements, we're done. + if changedKeys.Len() <= maxLength { + return combinedChanges, nil + } + + // Keep only the smallest [maxLength] items in [combinedChanges.values]. + sortedChangedKeys := changedKeys.List() + utils.Sort(sortedChangedKeys) + for len(sortedChangedKeys) > maxLength { + greatestKey := sortedChangedKeys[len(sortedChangedKeys)-1] + sortedChangedKeys = sortedChangedKeys[:len(sortedChangedKeys)-1] + delete(combinedChanges.values, greatestKey) + } - return true - }, - ) return combinedChanges, nil } // Returns the changes to go from the current trie state back to the requested [rootID] // for the keys in [start, end]. -// If [start] is nil, all keys are considered > [start]. -// If [end] is nil, all keys are considered < [end]. -func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start, end []byte) (*changeSummary, error) { +// If [start] is Nothing, all keys are considered > [start]. +// If [end] is Nothing, all keys are considered < [end]. +func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte]) (*changeSummary, error) { // [lastRootChange] is the last change in the history resulting in [rootID]. lastRootChange, ok := th.lastChanges[rootID] if !ok { - return nil, ErrRootIDNotPresent + return nil, ErrInsufficientHistory } var ( - startPath = newPath(start) - endPath = newPath(end) - combinedChanges = newChangeSummary(defaultPreallocationSize) + startKey = maybe.Bind(start, ToKey) + endKey = maybe.Bind(end, ToKey) + combinedChanges = newChangeSummary(defaultPreallocationSize) + mostRecentChangeInsertNumber = th.nextInsertNumber - 1 + mostRecentChangeIndex = th.history.Len() - 1 + offset = int(mostRecentChangeInsertNumber - lastRootChange.insertNumber) + lastRootChangeIndex = mostRecentChangeIndex - offset ) // Go backward from the most recent change in the history up to but // not including the last change resulting in [rootID]. // Record each change in [combinedChanges]. - th.history.Descend( - func(item *changeSummaryAndIndex) bool { - if item == lastRootChange { - return false - } - for key, changedNode := range item.nodes { - combinedChanges.nodes[key] = &change[*node]{ - after: changedNode.before, - } + for i := mostRecentChangeIndex; i > lastRootChangeIndex; i-- { + changes, _ := th.history.Index(i) + + if i == mostRecentChangeIndex { + combinedChanges.rootChange.before = changes.rootChange.after + } + if i == lastRootChangeIndex+1 { + combinedChanges.rootChange.after = changes.rootChange.before + } + + for key, changedNode := range changes.nodes { + combinedChanges.nodes[key] = &change[*node]{ + after: changedNode.before, } + } - for key, valueChange := range item.values { - if (len(startPath) == 0 || key.Compare(startPath) >= 0) && - (len(endPath) == 0 || key.Compare(endPath) <= 0) { - if existing, ok := combinedChanges.values[key]; ok { - existing.after = valueChange.before - } else { - combinedChanges.values[key] = &change[Maybe[[]byte]]{ - before: valueChange.after, - after: valueChange.before, - } + for key, valueChange := range changes.values { + if (startKey.IsNothing() || !key.Less(startKey.Value())) && + (endKey.IsNothing() || !key.Greater(endKey.Value())) { + if existing, ok := combinedChanges.values[key]; ok { + existing.after = valueChange.before + } else { + combinedChanges.values[key] = &change[maybe.Maybe[[]byte]]{ + before: valueChange.after, + after: valueChange.before, } } } - return true - }, - ) + } + } + return combinedChanges, nil } @@ -240,10 +293,11 @@ func (th *trieHistory) record(changes *changeSummary) { return } - for th.history.Len() == th.maxHistoryLen { + if th.history.Len() == th.maxHistoryLen { // This change causes us to go over our lookback limit. // Remove the oldest set of changes. - oldestEntry, _ := th.history.DeleteMin() + oldestEntry, _ := th.history.PopLeft() + latestChange := th.lastChanges[oldestEntry.rootID] if latestChange == oldestEntry { // The removed change was the most recent resulting in this root ID. @@ -251,14 +305,15 @@ func (th *trieHistory) record(changes *changeSummary) { } } - changesAndIndex := &changeSummaryAndIndex{ + changesAndIndex := &changeSummaryAndInsertNumber{ changeSummary: changes, - index: th.nextIndex, + insertNumber: th.nextInsertNumber, } - th.nextIndex++ + th.nextInsertNumber++ // Add [changes] to the sorted change list. - _, _ = th.history.ReplaceOrInsert(changesAndIndex) + _ = th.history.PushRight(changesAndIndex) + // Mark that this is the most recent change resulting in [changes.rootID]. th.lastChanges[changes.rootID] = changesAndIndex } diff --git a/avalanchego/x/merkledb/history_test.go b/avalanchego/x/merkledb/history_test.go index 29da9974..09c84321 100644 --- a/avalanchego/x/merkledb/history_test.go +++ b/avalanchego/x/merkledb/history_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -7,125 +7,104 @@ import ( "context" "math/rand" "testing" + "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/maybe" ) func Test_History_Simple(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 300, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) val, err := db.Get([]byte("key")) require.NoError(err) require.Equal([]byte("value"), val) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key8"), []byte("value8")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key8"), []byte("value8"))) + require.NoError(batch.Write()) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("k"), []byte("v"))) + require.NoError(batch.Write()) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Delete([]byte("k")) - require.NoError(err) - err = batch.Delete([]byte("ke")) - require.NoError(err) - err = batch.Delete([]byte("key")) - require.NoError(err) - err = batch.Delete([]byte("key1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Delete([]byte("key3")) - require.NoError(err) - err = batch.Delete([]byte("key4")) - require.NoError(err) - err = batch.Delete([]byte("key5")) - require.NoError(err) - err = batch.Delete([]byte("key8")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Delete([]byte("k"))) + require.NoError(batch.Delete([]byte("ke"))) + require.NoError(batch.Delete([]byte("key"))) + require.NoError(batch.Delete([]byte("key1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Delete([]byte("key3"))) + require.NoError(batch.Delete([]byte("key4"))) + require.NoError(batch.Delete([]byte("key5"))) + require.NoError(batch.Delete([]byte("key8"))) + require.NoError(batch.Write()) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Large(t *testing.T) { require := require.New(t) - for i := 1; i < 10; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + numIters := 250 + + for i := 1; i < 5; i++ { + config := newDefaultConfig() + // History must be large enough to get the change proof + // after this loop. + config.HistoryLength = uint(numIters) db, err := New( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 1500, - NodeCacheSize: 1000, - }, + config, ) require.NoError(err) roots := []ids.ID{} + + now := time.Now().UnixNano() + t.Logf("seed for iter %d: %d", i, now) + r := rand.New(rand.NewSource(now)) // #nosec G404 // make sure they stay in sync - for x := 0; x < 500; x++ { + for x := 0; x < numIters; x++ { + batch := db.NewBatch() addkey := make([]byte, r.Intn(50)) _, err := r.Read(addkey) require.NoError(err) @@ -133,14 +112,12 @@ func Test_History_Large(t *testing.T) { _, err = r.Read(val) require.NoError(err) - err = db.Put(addkey, val) - require.NoError(err) + require.NoError(batch.Put(addkey, val)) addNilkey := make([]byte, r.Intn(50)) _, err = r.Read(addNilkey) require.NoError(err) - err = db.Put(addNilkey, nil) - require.NoError(err) + require.NoError(batch.Put(addNilkey, nil)) deleteKeyStart := make([]byte, r.Intn(50)) _, err = r.Read(deleteKeyStart) @@ -148,484 +125,388 @@ func Test_History_Large(t *testing.T) { it := db.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = db.Delete(it.Key()) - require.NoError(err) + require.NoError(batch.Delete(it.Key())) } require.NoError(it.Error()) it.Release() + require.NoError(batch.Write()) root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) roots = append(roots, root) } - proof, err := db.GetRangeProofAtRoot(context.Background(), roots[0], nil, nil, 10) - require.NoError(err) - require.NotNil(proof) - err = proof.Verify(context.Background(), nil, nil, roots[0]) - require.NoError(err) + for i := 0; i < numIters; i += numIters / 10 { + proof, err := db.GetRangeProofAtRoot(context.Background(), roots[i], maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) + require.NoError(err) + require.NotNil(proof) + + require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor])) + } } } func Test_History_Bad_GetValueChanges_Input(t *testing.T) { require := require.New(t) - db, err := New( + config := newDefaultConfig() + config.HistoryLength = 5 + + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 5, - NodeCacheSize: minCacheSize, - }, + config, ) require.NoError(err) + + // Do 5 puts (i.e. the history length) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) - toBeDeletedRoot := db.getMerkleRoot() + root1 := db.getMerkleRoot() batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) - startRoot := db.getMerkleRoot() + root2 := db.getMerkleRoot() batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value0"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value3"))) + require.NoError(batch.Write()) - endRoot := db.getMerkleRoot() + root3 := db.getMerkleRoot() // ensure these start as valid calls - _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, nil, nil, 1) + _, err = db.history.getValueChanges(root1, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.NoError(err) - _, err = db.history.getValueChanges(startRoot, endRoot, nil, nil, 1) + _, err = db.history.getValueChanges(root2, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) require.NoError(err) - _, err = db.history.getValueChanges(startRoot, endRoot, nil, nil, -1) - require.Error(err, ErrInvalidMaxLength) + _, err = db.history.getValueChanges(root2, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) + require.ErrorIs(err, ErrInvalidMaxLength) - _, err = db.history.getValueChanges(endRoot, startRoot, nil, nil, 1) - require.Error(err, ErrStartRootNotFound) + _, err = db.history.getValueChanges(root3, root2, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + require.ErrorIs(err, ErrInsufficientHistory) - // trigger the first root to be deleted by exiting the lookback window + // Cause root1 to be removed from the history batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value4")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value4"))) + require.NoError(batch.Write()) - // now this root should no lnger be present - _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, nil, nil, 1) - require.Error(err, ErrRootIDNotPresent) + _, err = db.history.getValueChanges(root1, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 1) + require.ErrorIs(err, ErrInsufficientHistory) // same start/end roots should yield an empty changelist - changes, err := db.history.getValueChanges(endRoot, endRoot, nil, nil, 10) + changes, err := db.history.getValueChanges(root3, root3, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) - require.Len(changes.values, 0) + require.Empty(changes.values) } func Test_History_Trigger_History_Queue_Looping(t *testing.T) { require := require.New(t) - db, err := New( + config := newDefaultConfig() + config.HistoryLength = 2 + + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 2, - NodeCacheSize: minCacheSize, - }, + config, ) require.NoError(err) + + // Do 2 puts (i.e. the history length) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) origRootID := db.getMerkleRoot() - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - err = origProof.Verify( + require.NoError(origProof.Verify( context.Background(), - []byte("k"), - []byte("key3"), + maybe.Some([]byte("k")), + maybe.Some([]byte("key3")), origRootID, - ) - require.NoError(err) + db.tokenSize, + )) // write a new value into the db, now there should be 2 roots in the history batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) // ensure that previous root is still present and generates a valid proof - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify( + require.NoError(newProof.Verify( context.Background(), - []byte("k"), - []byte("key3"), + maybe.Some([]byte("k")), + maybe.Some([]byte("key3")), origRootID, - ) - require.NoError(err) + db.tokenSize, + )) // trigger a new root to be added to the history, which should cause rollover since there can only be 2 batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) // proof from first root shouldn't be generatable since it should have been removed from the history - _, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) - require.Error(err, ErrRootIDNotPresent) + _, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + require.ErrorIs(err, ErrInsufficientHistory) } func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { require := require.New(t) - db, err := New( + config := newDefaultConfig() + config.HistoryLength = 4 + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 4, - NodeCacheSize: minCacheSize, - }, + config, ) require.NoError(err) + + // Do 4 puts (i.e. the history length) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) // write a new value into the db batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) startRoot := db.getMerkleRoot() // write a new value into the db batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value0"))) + require.NoError(batch.Write()) // write a new value into the db that overwrites key1 batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) // trigger a new root to be added to the history, which should cause rollover since there can only be 3 batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value3"))) + require.NoError(batch.Write()) endRoot := db.getMerkleRoot() // changes should still be collectable even though the history has had to loop due to hitting max size - changes, err := db.history.getValueChanges(startRoot, endRoot, nil, nil, 10) + changes, err := db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) - require.Contains(changes.values, newPath([]byte("key1"))) - require.Equal([]byte("value1"), changes.values[newPath([]byte("key1"))].after.value) - require.Contains(changes.values, newPath([]byte("key2"))) - require.Equal([]byte("value3"), changes.values[newPath([]byte("key2"))].after.value) + require.Contains(changes.values, ToKey([]byte("key1"))) + require.Equal([]byte("value1"), changes.values[ToKey([]byte("key1"))].after.Value()) + require.Contains(changes.values, ToKey([]byte("key2"))) + require.Equal([]byte("value3"), changes.values[ToKey([]byte("key2"))].after.Value()) } func Test_History_RepeatedRoot(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("other")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("other")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("other")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("key1"), []byte("other"))) + require.NoError(batch.Put([]byte("key2"), []byte("other"))) + require.NoError(batch.Put([]byte("key3"), []byte("other"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) // revert state to be the same as in orig proof batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Write()) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_ExcessDeletes(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Delete([]byte("key1")) - require.NoError(err) - err = batch.Delete([]byte("key2")) - require.NoError(err) - err = batch.Delete([]byte("key3")) - require.NoError(err) - err = batch.Delete([]byte("key4")) - require.NoError(err) - err = batch.Delete([]byte("key5")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Delete([]byte("key1"))) + require.NoError(batch.Delete([]byte("key2"))) + require.NoError(batch.Delete([]byte("key3"))) + require.NoError(batch.Delete([]byte("key4"))) + require.NoError(batch.Delete([]byte("key5"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_DontIncludeAllNodes(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("z"), []byte("z")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("z"), []byte("z"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching2Nodes(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("k"), []byte("v"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_Branching3Nodes(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key123"), []byte("value123")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key123"), []byte("value123"))) + require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) - origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + origRootID := db.rootID + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) batch = db.NewBatch() - err = batch.Put([]byte("key321"), []byte("value321")) - require.NoError(err) - err = batch.Write() - require.NoError(err) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(batch.Put([]byte("key321"), []byte("value321"))) + require.NoError(batch.Write()) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) } func Test_History_MaxLength(t *testing.T) { require := require.New(t) - db, err := New( + config := newDefaultConfig() + config.HistoryLength = 2 + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 2, - NodeCacheSize: 1000, - }, + config, ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) oldRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("k"), []byte("v"))) + require.NoError(batch.Write()) require.Contains(db.history.lastChanges, oldRoot) batch = db.NewBatch() - err = batch.Put([]byte("k1"), []byte("v2")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("k1"), []byte("v2"))) // Overwrites oldest element in history + require.NoError(batch.Write()) require.NotContains(db.history.lastChanges, oldRoot) } @@ -633,66 +514,59 @@ func Test_History_MaxLength(t *testing.T) { func Test_Change_List(t *testing.T) { require := require.New(t) - db, err := New( + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: minCacheSize, - }, + newDefaultConfig(), ) require.NoError(err) - batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value20")) - require.NoError(err) - err = batch.Put([]byte("key21"), []byte("value21")) - require.NoError(err) - err = batch.Put([]byte("key22"), []byte("value22")) - require.NoError(err) - err = batch.Put([]byte("key23"), []byte("value23")) - require.NoError(err) - err = batch.Put([]byte("key24"), []byte("value24")) - require.NoError(err) - err = batch.Write() + + emptyRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) + + batch := db.NewBatch() + require.NoError(batch.Put([]byte("key20"), []byte("value20"))) + require.NoError(batch.Put([]byte("key21"), []byte("value21"))) + require.NoError(batch.Put([]byte("key22"), []byte("value22"))) + require.NoError(batch.Put([]byte("key23"), []byte("value23"))) + require.NoError(batch.Put([]byte("key24"), []byte("value24"))) + require.NoError(batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - batch = db.NewBatch() - err = batch.Put([]byte("key25"), []byte("value25")) - require.NoError(err) - err = batch.Put([]byte("key26"), []byte("value26")) - require.NoError(err) - err = batch.Put([]byte("key27"), []byte("value27")) - require.NoError(err) - err = batch.Put([]byte("key28"), []byte("value28")) - require.NoError(err) - err = batch.Put([]byte("key29"), []byte("value29")) - require.NoError(err) - err = batch.Write() + changes, err := db.history.getValueChanges(emptyRoot, startRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) require.NoError(err) + require.Len(changes.values, 5) batch = db.NewBatch() - err = batch.Put([]byte("key30"), []byte("value30")) - require.NoError(err) - err = batch.Put([]byte("key31"), []byte("value31")) - require.NoError(err) - err = batch.Put([]byte("key32"), []byte("value32")) - require.NoError(err) - err = batch.Delete([]byte("key21")) - require.NoError(err) - err = batch.Delete([]byte("key22")) + require.NoError(batch.Put([]byte("key25"), []byte("value25"))) + require.NoError(batch.Put([]byte("key26"), []byte("value26"))) + require.NoError(batch.Put([]byte("key27"), []byte("value27"))) + require.NoError(batch.Put([]byte("key28"), []byte("value28"))) + require.NoError(batch.Put([]byte("key29"), []byte("value29"))) + require.NoError(batch.Write()) + + endRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - err = batch.Write() + + changes, err = db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) require.NoError(err) + require.Len(changes.values, 5) - endRoot, err := db.GetMerkleRoot(context.Background()) + batch = db.NewBatch() + require.NoError(batch.Put([]byte("key30"), []byte("value30"))) + require.NoError(batch.Put([]byte("key31"), []byte("value31"))) + require.NoError(batch.Put([]byte("key32"), []byte("value32"))) + require.NoError(batch.Delete([]byte("key21"))) + require.NoError(batch.Delete([]byte("key22"))) + require.NoError(batch.Write()) + + endRoot, err = db.GetMerkleRoot(context.Background()) require.NoError(err) - changes, err := db.history.getValueChanges(startRoot, endRoot, nil, nil, 8) + changes, err = db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 8) require.NoError(err) - require.Equal(8, len(changes.values)) + require.Len(changes.values, 8) } func TestHistoryRecord(t *testing.T) { @@ -706,13 +580,15 @@ func TestHistoryRecord(t *testing.T) { changes = append(changes, &changeSummary{rootID: ids.GenerateTestID()}) th.record(changes[i]) - require.Equal(uint64(i+1), th.nextIndex) + require.Equal(uint64(i+1), th.nextInsertNumber) require.Equal(i+1, th.history.Len()) require.Len(th.lastChanges, i+1) require.Contains(th.lastChanges, changes[i].rootID) changeAndIndex := th.lastChanges[changes[i].rootID] - require.Equal(uint64(i), changeAndIndex.index) - require.True(th.history.Has(changeAndIndex)) + require.Equal(uint64(i), changeAndIndex.insertNumber) + got, ok := th.history.Index(int(changeAndIndex.insertNumber)) + require.True(ok) + require.Equal(changes[i], got.changeSummary) } // history is [changes[0], changes[1], changes[2]] @@ -720,18 +596,21 @@ func TestHistoryRecord(t *testing.T) { change3 := &changeSummary{rootID: ids.GenerateTestID()} th.record(change3) // history is [changes[1], changes[2], change3] - require.Equal(uint64(maxHistoryLen+1), th.nextIndex) + require.Equal(uint64(maxHistoryLen+1), th.nextInsertNumber) require.Equal(maxHistoryLen, th.history.Len()) require.Len(th.lastChanges, maxHistoryLen) require.Contains(th.lastChanges, change3.rootID) changeAndIndex := th.lastChanges[change3.rootID] - require.Equal(uint64(maxHistoryLen), changeAndIndex.index) - require.True(th.history.Has(changeAndIndex)) + require.Equal(uint64(maxHistoryLen), changeAndIndex.insertNumber) + got, ok := th.history.PeekRight() + require.True(ok) + require.Equal(change3, got.changeSummary) - // Make sure the oldest change was evicted + // // Make sure the oldest change was evicted require.NotContains(th.lastChanges, changes[0].rootID) - minChange, _ := th.history.Min() - require.Equal(uint64(1), minChange.index) + oldestChange, ok := th.history.PeekLeft() + require.True(ok) + require.Equal(uint64(1), oldestChange.insertNumber) // Add another change which was the same root ID as changes[2] change4 := &changeSummary{rootID: changes[2].rootID} @@ -747,18 +626,21 @@ func TestHistoryRecord(t *testing.T) { require.Len(th.lastChanges, maxHistoryLen) require.Contains(th.lastChanges, changes[2].rootID) changeAndIndex = th.lastChanges[changes[2].rootID] - require.Equal(uint64(maxHistoryLen+1), changeAndIndex.index) + require.Equal(uint64(maxHistoryLen+1), changeAndIndex.insertNumber) // Make sure [t.history] is right. require.Equal(maxHistoryLen, th.history.Len()) - got, _ := th.history.DeleteMin() - require.Equal(uint64(maxHistoryLen), got.index) + got, ok = th.history.PopLeft() + require.True(ok) + require.Equal(uint64(maxHistoryLen), got.insertNumber) require.Equal(change3.rootID, got.rootID) - got, _ = th.history.DeleteMin() - require.Equal(uint64(maxHistoryLen+1), got.index) + got, ok = th.history.PopLeft() + require.True(ok) + require.Equal(uint64(maxHistoryLen+1), got.insertNumber) require.Equal(change4.rootID, got.rootID) - got, _ = th.history.DeleteMin() - require.Equal(uint64(maxHistoryLen+2), got.index) + got, ok = th.history.PopLeft() + require.True(ok) + require.Equal(uint64(maxHistoryLen+2), got.insertNumber) require.Equal(change5.rootID, got.rootID) } @@ -770,16 +652,19 @@ func TestHistoryGetChangesToRoot(t *testing.T) { for i := 0; i < maxHistoryLen; i++ { // Fill the history changes = append(changes, &changeSummary{ rootID: ids.GenerateTestID(), - nodes: map[path]*change[*node]{ - newPath([]byte{byte(i)}): { - before: &node{id: ids.GenerateTestID()}, - after: &node{id: ids.GenerateTestID()}, + rootChange: change[maybe.Maybe[*node]]{ + before: maybe.Some(&node{}), + }, + nodes: map[Key]*change[*node]{ + ToKey([]byte{byte(i)}): { + before: &node{}, + after: &node{}, }, }, - values: map[path]*change[Maybe[[]byte]]{ - newPath([]byte{byte(i)}): { - before: Some([]byte{byte(i)}), - after: Some([]byte{byte(i + 1)}), + values: map[Key]*change[maybe.Maybe[[]byte]]{ + ToKey([]byte{byte(i)}): { + before: maybe.Some([]byte{byte(i)}), + after: maybe.Some([]byte{byte(i + 1)}), }, }, }) @@ -789,8 +674,8 @@ func TestHistoryGetChangesToRoot(t *testing.T) { type test struct { name string rootID ids.ID - start []byte - end []byte + start maybe.Maybe[[]byte] + end maybe.Maybe[[]byte] validateFunc func(*require.Assertions, *changeSummary) expectedErr error } @@ -799,13 +684,14 @@ func TestHistoryGetChangesToRoot(t *testing.T) { { name: "unknown root ID", rootID: ids.GenerateTestID(), - expectedErr: ErrRootIDNotPresent, + expectedErr: ErrInsufficientHistory, }, { name: "most recent change", rootID: changes[maxHistoryLen-1].rootID, validateFunc: func(require *require.Assertions, got *changeSummary) { - require.Equal(newChangeSummary(defaultPreallocationSize), got) + expected := newChangeSummary(defaultPreallocationSize) + require.Equal(expected, got) }, }, { @@ -816,7 +702,7 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 1) require.Len(got.values, 1) reversedChanges := changes[maxHistoryLen-1] - removedKey := newPath([]byte{byte(maxHistoryLen - 1)}) + removedKey := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges.nodes[removedKey].before, got.nodes[removedKey].after) require.Equal(reversedChanges.values[removedKey].before, got.values[removedKey].after) require.Equal(reversedChanges.values[removedKey].after, got.values[removedKey].before) @@ -829,12 +715,12 @@ func TestHistoryGetChangesToRoot(t *testing.T) { require.Len(got.nodes, 2) require.Len(got.values, 2) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) @@ -843,32 +729,32 @@ func TestHistoryGetChangesToRoot(t *testing.T) { { name: "third most recent change with start filter", rootID: changes[maxHistoryLen-3].rootID, - start: []byte{byte(maxHistoryLen - 1)}, // Omit values from second most recent change + start: maybe.Some([]byte{byte(maxHistoryLen - 1)}), // Omit values from second most recent change validateFunc: func(require *require.Assertions, got *changeSummary) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) }, }, { name: "third most recent change with end filter", rootID: changes[maxHistoryLen-3].rootID, - end: []byte{byte(maxHistoryLen - 2)}, // Omit values from most recent change + end: maybe.Some([]byte{byte(maxHistoryLen - 2)}), // Omit values from most recent change validateFunc: func(require *require.Assertions, got *changeSummary) { require.Len(got.nodes, 2) require.Len(got.values, 1) reversedChanges1 := changes[maxHistoryLen-1] - removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + removedKey1 := ToKey([]byte{byte(maxHistoryLen - 1)}) require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) reversedChanges2 := changes[maxHistoryLen-2] - removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + removedKey2 := ToKey([]byte{byte(maxHistoryLen - 2)}) require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) @@ -882,7 +768,7 @@ func TestHistoryGetChangesToRoot(t *testing.T) { got, err := history.getChangesToGetToRoot(tt.rootID, tt.start, tt.end) require.ErrorIs(err, tt.expectedErr) - if err != nil { + if tt.expectedErr != nil { return } tt.validateFunc(require, got) diff --git a/avalanchego/x/merkledb/intermediate_node_db.go b/avalanchego/x/merkledb/intermediate_node_db.go new file mode 100644 index 00000000..e57dcb31 --- /dev/null +++ b/avalanchego/x/merkledb/intermediate_node_db.go @@ -0,0 +1,180 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" +) + +const defaultBufferLength = 256 + +// Holds intermediate nodes. That is, those without values. +// Changes to this database aren't written to [baseDB] until +// they're evicted from the [nodeCache] or Flush is called. +type intermediateNodeDB struct { + // Holds unused []byte + bufferPool *sync.Pool + + // The underlying storage. + // Keys written to [baseDB] are prefixed with [intermediateNodePrefix]. + baseDB database.Database + + // The write buffer contains nodes that have been changed but have not been written to disk. + // Note that a call to Put may cause a node to be evicted + // from the cache, which will call [OnEviction]. + // A non-nil error returned from Put is considered fatal. + // Keys in [nodeCache] aren't prefixed with [intermediateNodePrefix]. + writeBuffer onEvictCache[Key, *node] + + // If a value is nil, the corresponding key isn't in the trie. + nodeCache cache.Cacher[Key, *node] + + // the number of bytes to evict during an eviction batch + evictionBatchSize int + metrics merkleMetrics + tokenSize int +} + +func newIntermediateNodeDB( + db database.Database, + bufferPool *sync.Pool, + metrics merkleMetrics, + cacheSize int, + writeBufferSize int, + evictionBatchSize int, + tokenSize int, +) *intermediateNodeDB { + result := &intermediateNodeDB{ + metrics: metrics, + baseDB: db, + bufferPool: bufferPool, + evictionBatchSize: evictionBatchSize, + tokenSize: tokenSize, + nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), + } + result.writeBuffer = newOnEvictCache( + writeBufferSize, + cacheEntrySize, + result.onEviction, + ) + + return result +} + +// A non-nil error is considered fatal and closes [db.baseDB]. +func (db *intermediateNodeDB) onEviction(key Key, n *node) error { + writeBatch := db.baseDB.NewBatch() + totalSize := cacheEntrySize(key, n) + if err := db.addToBatch(writeBatch, key, n); err != nil { + _ = db.baseDB.Close() + return err + } + + // Evict the oldest [evictionBatchSize] nodes from the cache + // and write them to disk. We write a batch of them, rather than + // just [n], so that we don't immediately evict and write another + // node, because each time this method is called we do a disk write. + // Evicts a total number of bytes, rather than a number of nodes + for totalSize < db.evictionBatchSize { + key, n, exists := db.writeBuffer.removeOldest() + if !exists { + // The cache is empty. + break + } + totalSize += cacheEntrySize(key, n) + if err := db.addToBatch(writeBatch, key, n); err != nil { + _ = db.baseDB.Close() + return err + } + } + if err := writeBatch.Write(); err != nil { + _ = db.baseDB.Close() + return err + } + return nil +} + +func (db *intermediateNodeDB) addToBatch(b database.Batch, key Key, n *node) error { + dbKey := db.constructDBKey(key) + defer db.bufferPool.Put(dbKey) + db.metrics.DatabaseNodeWrite() + if n == nil { + return b.Delete(dbKey) + } + return b.Put(dbKey, n.bytes()) +} + +func (db *intermediateNodeDB) Get(key Key) (*node, error) { + if cachedValue, isCached := db.nodeCache.Get(key); isCached { + db.metrics.IntermediateNodeCacheHit() + if cachedValue == nil { + return nil, database.ErrNotFound + } + return cachedValue, nil + } + db.metrics.IntermediateNodeCacheMiss() + + if cachedValue, isCached := db.writeBuffer.Get(key); isCached { + db.metrics.IntermediateNodeCacheHit() + if cachedValue == nil { + return nil, database.ErrNotFound + } + return cachedValue, nil + } + db.metrics.IntermediateNodeCacheMiss() + + dbKey := db.constructDBKey(key) + db.metrics.DatabaseNodeRead() + nodeBytes, err := db.baseDB.Get(dbKey) + if err != nil { + return nil, err + } + db.bufferPool.Put(dbKey) + + return parseNode(key, nodeBytes) +} + +// constructDBKey returns a key that can be used in [db.baseDB]. +// We need to be able to differentiate between two keys of equal +// byte length but different bit length, so we add padding to differentiate. +// Additionally, we add a prefix indicating it is part of the intermediateNodeDB. +func (db *intermediateNodeDB) constructDBKey(key Key) []byte { + if db.tokenSize == 8 { + // For tokens of size byte, no padding is needed since byte length == token length + return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Bytes()) + } + + return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Extend(ToToken(1, db.tokenSize)).Bytes()) +} + +func (db *intermediateNodeDB) Put(key Key, n *node) error { + db.nodeCache.Put(key, n) + return db.writeBuffer.Put(key, n) +} + +func (db *intermediateNodeDB) Flush() error { + db.nodeCache.Flush() + return db.writeBuffer.Flush() +} + +func (db *intermediateNodeDB) Delete(key Key) error { + db.nodeCache.Put(key, nil) + return db.writeBuffer.Put(key, nil) +} + +func (db *intermediateNodeDB) Clear() error { + db.nodeCache.Flush() + + // Reset the buffer. Note we don't flush because that would cause us to + // persist intermediate nodes we're about to delete. + db.writeBuffer = newOnEvictCache( + db.writeBuffer.maxSize, + db.writeBuffer.size, + db.writeBuffer.onEviction, + ) + return database.AtomicClearPrefix(db.baseDB, db.baseDB, intermediateNodePrefix) +} diff --git a/avalanchego/x/merkledb/intermediate_node_db_test.go b/avalanchego/x/merkledb/intermediate_node_db_test.go new file mode 100644 index 00000000..26ad722f --- /dev/null +++ b/avalanchego/x/merkledb/intermediate_node_db_test.go @@ -0,0 +1,294 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +// Tests: +// * Putting a key-node pair in the database +// * Getting a key-node pair from the cache and from the base db +// * Deleting a key-node pair from the database +// * Evicting elements from the cache +// * Flushing the cache +func Test_IntermediateNodeDB(t *testing.T) { + require := require.New(t) + + n := newNode(ToKey([]byte{0x00})) + n.setValue(maybe.Some([]byte{byte(0x02)})) + nodeSize := cacheEntrySize(n.key, n) + + // use exact multiple of node size so require.Equal(1, db.nodeCache.fifo.Len()) is correct later + cacheSize := nodeSize * 100 + bufferSize := nodeSize * 20 + + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + // Put a key-node pair + node1Key := ToKey([]byte{0x01}) + node1 := newNode(node1Key) + node1.setValue(maybe.Some([]byte{byte(0x01)})) + require.NoError(db.Put(node1Key, node1)) + + // Get the key-node pair from cache + node1Read, err := db.Get(node1Key) + require.NoError(err) + require.Equal(node1, node1Read) + + // Overwrite the key-node pair + node1Updated := newNode(node1Key) + node1Updated.setValue(maybe.Some([]byte{byte(0x02)})) + require.NoError(db.Put(node1Key, node1Updated)) + + // Assert the key-node pair was overwritten + node1Read, err = db.Get(node1Key) + require.NoError(err) + require.Equal(node1Updated, node1Read) + + // Delete the key-node pair + require.NoError(db.Delete(node1Key)) + _, err = db.Get(node1Key) + + // Assert the key-node pair was deleted + require.Equal(database.ErrNotFound, err) + + // Put elements in the cache until it is full. + expectedSize := 0 + added := 0 + for { + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) + node.setValue(maybe.Some([]byte{byte(added)})) + newExpectedSize := expectedSize + cacheEntrySize(key, node) + if newExpectedSize > bufferSize { + // Don't trigger eviction. + break + } + + require.NoError(db.Put(key, node)) + expectedSize = newExpectedSize + added++ + } + + // Assert cache has expected number of elements + require.Equal(added, db.writeBuffer.fifo.Len()) + + // Put one more element in the cache, which should trigger an eviction + // of all but 2 elements. 2 elements remain rather than 1 element because of + // the added key prefix increasing the size tracked by the batch. + key := ToKey([]byte{byte(added)}) + node := newNode(Key{}) + node.setValue(maybe.Some([]byte{byte(added)})) + require.NoError(db.Put(key, node)) + + // Assert cache has expected number of elements + require.Equal(1, db.writeBuffer.fifo.Len()) + gotKey, _, ok := db.writeBuffer.fifo.Oldest() + require.True(ok) + require.Equal(ToKey([]byte{byte(added)}), gotKey) + + // Get a node from the base database + // Use an early key that has been evicted from the cache + _, inCache := db.writeBuffer.Get(node1Key) + require.False(inCache) + nodeRead, err := db.Get(node1Key) + require.NoError(err) + require.Equal(maybe.Some([]byte{0x01}), nodeRead.value) + + // Flush the cache. + require.NoError(db.Flush()) + + // Assert the cache is empty + require.Zero(db.writeBuffer.fifo.Len()) + + // Assert the evicted cache elements were written to disk with prefix. + it := baseDB.NewIteratorWithPrefix(intermediateNodePrefix) + defer it.Release() + + count := 0 + for it.Next() { + count++ + } + require.NoError(it.Error()) + require.Equal(added+1, count) +} + +func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { + bufferSize := 200 + cacheSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + + f.Fuzz(func( + t *testing.T, + key []byte, + tokenLength uint, + ) { + require := require.New(t) + for _, tokenSize := range validTokenSizes { + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + tokenSize, + ) + + p := ToKey(key) + uBitLength := tokenLength * uint(tokenSize) + if uBitLength >= uint(p.length) { + t.SkipNow() + } + p = p.Take(int(uBitLength)) + constructedKey := db.constructDBKey(p) + baseLength := len(p.value) + len(intermediateNodePrefix) + require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) + switch { + case tokenSize == 8: + // for keys with tokens of size byte, no padding is added + require.Equal(p.Bytes(), constructedKey[len(intermediateNodePrefix):]) + case p.hasPartialByte(): + require.Len(constructedKey, baseLength) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) + default: + // when a whole number of bytes, there is an extra padding byte + require.Len(constructedKey, baseLength+1) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) + } + } + }) +} + +func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { + require := require.New(t) + cacheSize := 200 + bufferSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) + constructedKey := db.constructDBKey(ToKey([]byte{})) + require.Len(constructedKey, 2) + require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) + require.Equal(byte(16), constructedKey[len(constructedKey)-1]) + + db.bufferPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufferLength) + }, + } + db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) + p := ToKey([]byte{0xF0}).Take(4) + constructedKey = db.constructDBKey(p) + require.Len(constructedKey, 2) + require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) + require.Equal(p.Extend(ToToken(1, 4)).Bytes(), constructedKey[len(intermediateNodePrefix):]) +} + +func TestIntermediateNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + bufferSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + for _, b := range [][]byte{{1}, {2}, {3}} { + require.NoError(db.Put(ToKey(b), newNode(ToKey(b)))) + } + + require.NoError(db.Clear()) + + iter := baseDB.NewIteratorWithPrefix(intermediateNodePrefix) + defer iter.Release() + require.False(iter.Next()) + + require.Zero(db.writeBuffer.currentSize) +} + +// Test that deleting the empty key and flushing works correctly. +// Previously, there was a bug that occurred when deleting the empty key +// if the cache was empty. The size of the cache entry was reported as 0, +// which caused the cache's currentSize to be 0, so on resize() we didn't +// call onEviction. This caused the empty key to not be deleted from the baseDB. +func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) { + require := require.New(t) + cacheSize := 200 + bufferSize := 200 + evictionBatchSize := bufferSize + baseDB := memdb.New() + db := newIntermediateNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + bufferSize, + evictionBatchSize, + 4, + ) + + emptyKey := ToKey([]byte{}) + require.NoError(db.Put(emptyKey, newNode(emptyKey))) + require.NoError(db.Flush()) + + emptyDBKey := db.constructDBKey(emptyKey) + has, err := baseDB.Has(emptyDBKey) + require.NoError(err) + require.True(has) + + require.NoError(db.Delete(ToKey([]byte{}))) + require.NoError(db.Flush()) + + emptyDBKey = db.constructDBKey(emptyKey) + has, err = baseDB.Has(emptyDBKey) + require.NoError(err) + require.False(has) +} diff --git a/avalanchego/x/merkledb/iterator.go b/avalanchego/x/merkledb/iterator.go deleted file mode 100644 index ad235b7d..00000000 --- a/avalanchego/x/merkledb/iterator.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import "github.com/ava-labs/avalanchego/database" - -var _ database.Iterator = &iterator{} - -type iterator struct { - db *Database - nodeIter database.Iterator - current *node - err error -} - -func (i *iterator) Error() error { - if i.err != nil { - return i.err - } - return i.nodeIter.Error() -} - -func (i *iterator) Key() []byte { - if i.current == nil { - return nil - } - return i.current.key.Serialize().Value -} - -func (i *iterator) Value() []byte { - if i.current == nil { - return nil - } - return i.current.value.value -} - -func (i *iterator) Next() bool { - i.current = nil - if i.err != nil { - return false - } - for i.nodeIter.Next() { - i.db.metrics.IOKeyRead() - n, err := parseNode(path(i.nodeIter.Key()), i.nodeIter.Value()) - if err != nil { - i.err = err - return false - } - if n.hasValue() { - i.current = n - return true - } - } - if i.err == nil { - i.err = i.nodeIter.Error() - } - return false -} - -func (i *iterator) Release() { - i.nodeIter.Release() -} diff --git a/avalanchego/x/merkledb/key.go b/avalanchego/x/merkledb/key.go new file mode 100644 index 00000000..524c95bb --- /dev/null +++ b/avalanchego/x/merkledb/key.go @@ -0,0 +1,334 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "cmp" + "errors" + "fmt" + "slices" + "strings" + "unsafe" + + "golang.org/x/exp/maps" +) + +var ( + ErrInvalidBranchFactor = errors.New("branch factor must match one of the predefined branch factors") + + BranchFactorToTokenSize = map[BranchFactor]int{ + BranchFactor2: 1, + BranchFactor4: 2, + BranchFactor16: 4, + BranchFactor256: 8, + } + + tokenSizeToBranchFactor = map[int]BranchFactor{ + 1: BranchFactor2, + 2: BranchFactor4, + 4: BranchFactor16, + 8: BranchFactor256, + } + + validTokenSizes = maps.Keys(tokenSizeToBranchFactor) + + validBranchFactors = []BranchFactor{ + BranchFactor2, + BranchFactor4, + BranchFactor16, + BranchFactor256, + } +) + +type BranchFactor int + +const ( + BranchFactor2 = BranchFactor(2) + BranchFactor4 = BranchFactor(4) + BranchFactor16 = BranchFactor(16) + BranchFactor256 = BranchFactor(256) +) + +// Valid checks if BranchFactor [b] is one of the predefined valid options for BranchFactor +func (b BranchFactor) Valid() error { + for _, validBF := range validBranchFactors { + if validBF == b { + return nil + } + } + return fmt.Errorf("%w: %d", ErrInvalidBranchFactor, b) +} + +// ToToken creates a key version of the passed byte with bit length equal to tokenSize +func ToToken(val byte, tokenSize int) Key { + return Key{ + value: string([]byte{val << dualBitIndex(tokenSize)}), + length: tokenSize, + } +} + +// Token returns the token at the specified index, +// Assumes that bitIndex + tokenSize doesn't cross a byte boundary +func (k Key) Token(bitIndex int, tokenSize int) byte { + storageByte := k.value[bitIndex/8] + // Shift the byte right to get the last bit to the rightmost position. + storageByte >>= dualBitIndex((bitIndex + tokenSize) % 8) + // Apply a mask to remove any other bits in the byte. + return storageByte & (0xFF >> dualBitIndex(tokenSize)) +} + +// iteratedHasPrefix checks if the provided prefix key is a prefix of the current key starting after the [bitsOffset]th bit +// this has better performance than constructing the actual key via Skip() then calling HasPrefix because it avoids an allocation +func (k Key) iteratedHasPrefix(prefix Key, bitsOffset int, tokenSize int) bool { + if k.length-bitsOffset < prefix.length { + return false + } + for i := 0; i < prefix.length; i += tokenSize { + if k.Token(bitsOffset+i, tokenSize) != prefix.Token(i, tokenSize) { + return false + } + } + return true +} + +type Key struct { + // The number of bits in the key. + length int + // The string representation of the key + value string +} + +// ToKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Creates a copy of [keyBytes], so keyBytes are safe to edit after the call +func ToKey(keyBytes []byte) Key { + return toKey(slices.Clone(keyBytes)) +} + +// toKey returns [keyBytes] as a new key +// Assumes all bits of the keyBytes are part of the Key, call Key.Take if that is not the case +// Caller must not modify [keyBytes] after this call. +func toKey(keyBytes []byte) Key { + return Key{ + value: byteSliceToString(keyBytes), + length: len(keyBytes) * 8, + } +} + +// hasPartialByte returns true iff the key fits into a non-whole number of bytes +func (k Key) hasPartialByte() bool { + return k.length%8 > 0 +} + +// HasPrefix returns true iff [prefix] is a prefix of [k] or equal to it. +func (k Key) HasPrefix(prefix Key) bool { + // [prefix] must be shorter than [k] to be a prefix. + if k.length < prefix.length { + return false + } + + // The number of tokens in the last byte of [prefix], or zero + // if [prefix] fits into a whole number of bytes. + remainderBitCount := prefix.length % 8 + if remainderBitCount == 0 { + return strings.HasPrefix(k.value, prefix.value) + } + + // check that the tokens in the partially filled final byte of [prefix] are + // equal to the tokens in the final byte of [k]. + remainderBitsMask := byte(0xFF >> remainderBitCount) + prefixRemainderTokens := prefix.value[len(prefix.value)-1] | remainderBitsMask + remainderTokens := k.value[len(prefix.value)-1] | remainderBitsMask + + if prefixRemainderTokens != remainderTokens { + return false + } + + // Note that this will never be an index OOB because len(prefix.value) > 0. + // If len(prefix.value) == 0 were true, [remainderTokens] would be 0, so we + // would have returned above. + prefixWithoutPartialByte := prefix.value[:len(prefix.value)-1] + return strings.HasPrefix(k.value, prefixWithoutPartialByte) +} + +// HasStrictPrefix returns true iff [prefix] is a prefix of [k] +// but is not equal to it. +func (k Key) HasStrictPrefix(prefix Key) bool { + return k != prefix && k.HasPrefix(prefix) +} + +// Length returns the number of bits in the Key +func (k Key) Length() int { + return k.length +} + +// Greater returns true if current Key is greater than other Key +func (k Key) Greater(other Key) bool { + return k.Compare(other) == 1 +} + +// Less will return true if current Key is less than other Key +func (k Key) Less(other Key) bool { + return k.Compare(other) == -1 +} + +func (k Key) Compare(other Key) int { + if valueCmp := cmp.Compare(k.value, other.value); valueCmp != 0 { + return valueCmp + } + return cmp.Compare(k.length, other.length) +} + +// Extend returns a new Key that is the in-order aggregation of Key [k] with [keys] +func (k Key) Extend(keys ...Key) Key { + totalBitLength := k.length + for _, key := range keys { + totalBitLength += key.length + } + buffer := make([]byte, bytesNeeded(totalBitLength)) + copy(buffer, k.value) + currentTotal := k.length + for _, key := range keys { + extendIntoBuffer(buffer, key, currentTotal) + currentTotal += key.length + } + + return Key{ + value: byteSliceToString(buffer), + length: totalBitLength, + } +} + +func extendIntoBuffer(buffer []byte, val Key, bitsOffset int) { + if val.length == 0 { + return + } + bytesOffset := bytesNeeded(bitsOffset) + bitsRemainder := bitsOffset % 8 + if bitsRemainder == 0 { + copy(buffer[bytesOffset:], val.value) + return + } + + // Fill the partial byte with the first [shift] bits of the extension path + buffer[bytesOffset-1] |= val.value[0] >> bitsRemainder + + // copy the rest of the extension path bytes into the buffer, + // shifted byte shift bits + shiftCopy(buffer[bytesOffset:], val.value, dualBitIndex(bitsRemainder)) +} + +// dualBitIndex gets the dual of the bit index +// ex: in a byte, the bit 5 from the right is the same as the bit 3 from the left +func dualBitIndex(shift int) int { + return (8 - shift) % 8 +} + +// Treats [src] as a bit array and copies it into [dst] shifted by [shift] bits. +// For example, if [src] is [0b0000_0001, 0b0000_0010] and [shift] is 4, +// we copy [0b0001_0000, 0b0010_0000] into [dst]. +// Assumes len(dst) >= len(src)-1. +// If len(dst) == len(src)-1 the last byte of [src] is only partially copied +// (i.e. the rightmost bits are not copied). +func shiftCopy(dst []byte, src string, shift int) { + i := 0 + dualShift := dualBitIndex(shift) + for ; i < len(src)-1; i++ { + dst[i] = src[i]<>dualShift + } + + if i < len(dst) { + // the last byte only has values from byte i, as there is no byte i+1 + dst[i] = src[i] << shift + } +} + +// Skip returns a new Key that contains the last +// k.length-bitsToSkip bits of [k]. +func (k Key) Skip(bitsToSkip int) Key { + if k.length <= bitsToSkip { + return Key{} + } + result := Key{ + value: k.value[bitsToSkip/8:], + length: k.length - bitsToSkip, + } + + // if the tokens to skip is a whole number of bytes, + // the remaining bytes exactly equals the new key. + if bitsToSkip%8 == 0 { + return result + } + + // bitsToSkip does not remove a whole number of bytes. + // copy the remaining shifted bytes into a new buffer. + buffer := make([]byte, bytesNeeded(result.length)) + bitsRemovedFromFirstRemainingByte := bitsToSkip % 8 + shiftCopy(buffer, result.value, bitsRemovedFromFirstRemainingByte) + + result.value = byteSliceToString(buffer) + return result +} + +// Take returns a new Key that contains the first bitsToTake bits of the current Key +func (k Key) Take(bitsToTake int) Key { + if k.length <= bitsToTake { + return k + } + + result := Key{ + length: bitsToTake, + } + + remainderBits := result.length % 8 + if remainderBits == 0 { + result.value = k.value[:bitsToTake/8] + return result + } + + // We need to zero out some bits of the last byte so a simple slice will not work + // Create a new []byte to store the altered value + buffer := make([]byte, bytesNeeded(bitsToTake)) + copy(buffer, k.value) + + // We want to zero out everything to the right of the last token, which is at index bitsToTake-1 + // Mask will be (8-remainderBits) number of 1's followed by (remainderBits) number of 0's + buffer[len(buffer)-1] &= byte(0xFF << dualBitIndex(remainderBits)) + + result.value = byteSliceToString(buffer) + return result +} + +// Bytes returns the raw bytes of the Key +// Invariant: The returned value must not be modified. +func (k Key) Bytes() []byte { + // avoid copying during the conversion + // "safe" because we never edit the value, only used as DB key + return stringToByteSlice(k.value) +} + +// byteSliceToString converts the []byte to a string +// Invariant: The input []byte must not be modified. +func byteSliceToString(bs []byte) string { + // avoid copying during the conversion + // "safe" because we never edit the []byte, and it is never returned by any functions except Bytes() + return unsafe.String(unsafe.SliceData(bs), len(bs)) +} + +// stringToByteSlice converts the string to a []byte +// Invariant: The output []byte must not be modified. +func stringToByteSlice(value string) []byte { + // avoid copying during the conversion + // "safe" because we never edit the []byte + return unsafe.Slice(unsafe.StringData(value), len(value)) +} + +// Returns the number of bytes needed to store [bits] bits. +func bytesNeeded(bits int) int { + size := bits / 8 + if bits%8 != 0 { + size++ + } + return size +} diff --git a/avalanchego/x/merkledb/key_test.go b/avalanchego/x/merkledb/key_test.go new file mode 100644 index 00000000..d2e4e300 --- /dev/null +++ b/avalanchego/x/merkledb/key_test.go @@ -0,0 +1,614 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBranchFactor_Valid(t *testing.T) { + require := require.New(t) + for _, bf := range validBranchFactors { + require.NoError(bf.Valid()) + } + var empty BranchFactor + err := empty.Valid() + require.ErrorIs(err, ErrInvalidBranchFactor) +} + +func TestHasPartialByte(t *testing.T) { + for _, ts := range validTokenSizes { + t.Run(strconv.Itoa(ts), func(t *testing.T) { + require := require.New(t) + + key := Key{} + require.False(key.hasPartialByte()) + + if ts == 8 { + // Tokens are an entire byte so + // there is never a partial byte. + key = key.Extend(ToToken(1, ts)) + require.False(key.hasPartialByte()) + key = key.Extend(ToToken(0, ts)) + require.False(key.hasPartialByte()) + return + } + + // Fill all but the last token of the first byte. + for i := 0; i < 8-ts; i += ts { + key = key.Extend(ToToken(1, ts)) + require.True(key.hasPartialByte()) + } + + // Fill the last token of the first byte. + key = key.Extend(ToToken(0, ts)) + require.False(key.hasPartialByte()) + + // Fill the first token of the second byte. + key = key.Extend(ToToken(0, ts)) + require.True(key.hasPartialByte()) + }) + } +} + +func Test_Key_Has_Prefix(t *testing.T) { + type test struct { + name string + keyA func(ts int) Key + keyB func(ts int) Key + isStrictPrefix bool + isPrefix bool + } + + key := "Key" + + tests := []test{ + { + name: "equal keys", + keyA: func(int) Key { return ToKey([]byte(key)) }, + keyB: func(int) Key { return ToKey([]byte(key)) }, + isPrefix: true, + isStrictPrefix: false, + }, + { + name: "one key has one fewer token", + keyA: func(int) Key { return ToKey([]byte(key)) }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, + isPrefix: true, + isStrictPrefix: true, + }, + { + name: "equal keys, both have one fewer token", + keyA: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte(key)).Take(len(key)*8 - ts) + }, + isPrefix: true, + isStrictPrefix: false, + }, + { + name: "different keys", + keyA: func(int) Key { return ToKey([]byte{0xF7}) }, + keyB: func(int) Key { return ToKey([]byte{0xF0}) }, + isPrefix: false, + isStrictPrefix: false, + }, + { + name: "same bytes, different lengths", + keyA: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts) + }, + keyB: func(ts int) Key { + return ToKey([]byte{0x10, 0x00}).Take(ts * 2) + }, + isPrefix: false, + isStrictPrefix: false, + }, + } + + for _, tt := range tests { + for _, ts := range validTokenSizes { + t.Run(tt.name+" ts "+strconv.Itoa(ts), func(t *testing.T) { + require := require.New(t) + keyA := tt.keyA(ts) + keyB := tt.keyB(ts) + + require.Equal(tt.isPrefix, keyA.HasPrefix(keyB)) + require.Equal(tt.isPrefix, keyA.iteratedHasPrefix(keyB, 0, ts)) + require.Equal(tt.isStrictPrefix, keyA.HasStrictPrefix(keyB)) + }) + } + } +} + +func Test_Key_Skip(t *testing.T) { + require := require.New(t) + + empty := Key{} + require.Equal(ToKey([]byte{0}).Skip(8), empty) + for _, ts := range validTokenSizes { + if ts == 8 { + continue + } + shortKey := ToKey([]byte{0b0101_0101}) + longKey := ToKey([]byte{0b0101_0101, 0b0101_0101}) + for shift := 0; shift < 8; shift += ts { + skipKey := shortKey.Skip(shift) + require.Equal(byte(0b0101_0101<>(8-shift)), skipKey.value[0]) + require.Equal(byte(0b0101_0101<>shift)< ts { + key1 = key1.Take(key1.length - ts) + } + key2 := ToKey(second) + if forceSecondOdd && key2.length > ts { + key2 = key2.Take(key2.length - ts) + } + token := byte(int(tokenByte) % int(tokenSizeToBranchFactor[ts])) + extendedP := key1.Extend(ToToken(token, ts), key2) + require.Equal(key1.length+key2.length+ts, extendedP.length) + firstIndex := 0 + for ; firstIndex < key1.length; firstIndex += ts { + require.Equal(key1.Token(firstIndex, ts), extendedP.Token(firstIndex, ts)) + } + require.Equal(token, extendedP.Token(firstIndex, ts)) + firstIndex += ts + for secondIndex := 0; secondIndex < key2.length; secondIndex += ts { + require.Equal(key2.Token(secondIndex, ts), extendedP.Token(firstIndex+secondIndex, ts)) + } + } + }) +} + +func FuzzKeyDoubleExtend_Any(f *testing.F) { + f.Fuzz(func( + t *testing.T, + baseKeyBytes []byte, + firstKeyBytes []byte, + secondKeyBytes []byte, + forceBaseOdd bool, + forceFirstOdd bool, + forceSecondOdd bool, + ) { + require := require.New(t) + for _, ts := range validTokenSizes { + baseKey := ToKey(baseKeyBytes) + if forceBaseOdd && baseKey.length > ts { + baseKey = baseKey.Take(baseKey.length - ts) + } + firstKey := ToKey(firstKeyBytes) + if forceFirstOdd && firstKey.length > ts { + firstKey = firstKey.Take(firstKey.length - ts) + } + + secondKey := ToKey(secondKeyBytes) + if forceSecondOdd && secondKey.length > ts { + secondKey = secondKey.Take(secondKey.length - ts) + } + + extendedP := baseKey.Extend(firstKey, secondKey) + require.Equal(baseKey.length+firstKey.length+secondKey.length, extendedP.length) + totalIndex := 0 + for baseIndex := 0; baseIndex < baseKey.length; baseIndex += ts { + require.Equal(baseKey.Token(baseIndex, ts), extendedP.Token(baseIndex, ts)) + } + totalIndex += baseKey.length + for firstIndex := 0; firstIndex < firstKey.length; firstIndex += ts { + require.Equal(firstKey.Token(firstIndex, ts), extendedP.Token(totalIndex+firstIndex, ts)) + } + totalIndex += firstKey.length + for secondIndex := 0; secondIndex < secondKey.length; secondIndex += ts { + require.Equal(secondKey.Token(secondIndex, ts), extendedP.Token(totalIndex+secondIndex, ts)) + } + } + }) +} + +func FuzzKeySkip(f *testing.F) { + f.Fuzz(func( + t *testing.T, + first []byte, + tokensToSkip uint, + ) { + require := require.New(t) + key1 := ToKey(first) + for _, ts := range validTokenSizes { + // need bits to be a multiple of token size + ubitsToSkip := tokensToSkip * uint(ts) + if ubitsToSkip >= uint(key1.length) { + t.SkipNow() + } + bitsToSkip := int(ubitsToSkip) + key2 := key1.Skip(bitsToSkip) + require.Equal(key1.length-bitsToSkip, key2.length) + for i := 0; i < key2.length; i += ts { + require.Equal(key1.Token(bitsToSkip+i, ts), key2.Token(i, ts)) + } + } + }) +} + +func FuzzKeyTake(f *testing.F) { + f.Fuzz(func( + t *testing.T, + first []byte, + uTokensToTake uint, + ) { + require := require.New(t) + for _, ts := range validTokenSizes { + key1 := ToKey(first) + uBitsToTake := uTokensToTake * uint(ts) + if uBitsToTake >= uint(key1.length) { + t.SkipNow() + } + bitsToTake := int(uBitsToTake) + key2 := key1.Take(bitsToTake) + require.Equal(bitsToTake, key2.length) + if key2.hasPartialByte() { + paddingMask := byte(0xFF >> (key2.length % 8)) + require.Zero(key2.value[len(key2.value)-1] & paddingMask) + } + for i := 0; i < bitsToTake; i += ts { + require.Equal(key1.Token(i, ts), key2.Token(i, ts)) + } + } + }) +} + +func TestShiftCopy(t *testing.T) { + type test struct { + dst []byte + src []byte + expected []byte + shift int + } + + tests := []test{ + { + dst: []byte{}, + src: []byte{}, + expected: []byte{}, + shift: 0, + }, + { + dst: []byte{}, + src: []byte{}, + expected: []byte{}, + shift: 1, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001}, + expected: []byte{0b0000_0010}, + shift: 1, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001}, + expected: []byte{0b0000_0100}, + shift: 2, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001}, + expected: []byte{0b1000_0000}, + shift: 7, + }, + { + dst: make([]byte, 2), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b0000_0011, 0b0000_0010}, + shift: 1, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b0000_0011}, + shift: 1, + }, + { + dst: make([]byte, 2), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b1100_0000, 0b1000_0000}, + shift: 7, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b1100_0000}, + shift: 7, + }, + { + dst: make([]byte, 2), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b1000_0001, 0b0000_0000}, + shift: 8, + }, + { + dst: make([]byte, 1), + src: []byte{0b0000_0001, 0b1000_0001}, + expected: []byte{0b1000_0001}, + shift: 8, + }, + { + dst: make([]byte, 2), + src: []byte{0b0000_0001, 0b1000_0001, 0b1111_0101}, + expected: []byte{0b0000_0110, 0b000_0111}, + shift: 2, + }, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("dst: %v, src: %v", tt.dst, tt.src), func(t *testing.T) { + shiftCopy(tt.dst, string(tt.src), tt.shift) + require.Equal(t, tt.expected, tt.dst) + }) + } +} diff --git a/avalanchego/x/merkledb/maybe.go b/avalanchego/x/merkledb/maybe.go deleted file mode 100644 index acebb47f..00000000 --- a/avalanchego/x/merkledb/maybe.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import "golang.org/x/exp/slices" - -// Maybe T = Some T | Nothing. -// A data wrapper that allows values to be something [Some T] or nothing [Nothing]. -// Maybe is used to wrap types: -// * That can't be represented by nil. -// * That use nil as a valid value instead of an indicator of a missing value. -// For more info see https://en.wikipedia.org/wiki/Option_type -type Maybe[T any] struct { - hasValue bool - value T -} - -// Returns a new Maybe[T] with the value val. -func Some[T any](val T) Maybe[T] { - return Maybe[T]{ - value: val, - hasValue: true, - } -} - -// Returns a new Maybe[T] with no value. -func Nothing[T any]() Maybe[T] { - return Maybe[T]{} -} - -// Returns true iff [m] has a value. -func (m Maybe[T]) IsNothing() bool { - return !m.hasValue -} - -// Returns the value of [m]. -func (m Maybe[T]) Value() T { - return m.value -} - -func Clone(m Maybe[[]byte]) Maybe[[]byte] { - if !m.hasValue { - return Nothing[[]byte]() - } - return Some(slices.Clone(m.value)) -} diff --git a/avalanchego/x/merkledb/maybe_test.go b/avalanchego/x/merkledb/maybe_test.go deleted file mode 100644 index acaf1630..00000000 --- a/avalanchego/x/merkledb/maybe_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import ( - "testing" - - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" -) - -func TestMaybeClone(t *testing.T) { - // Case: Value is maybe - { - val := []byte{1, 2, 3} - originalVal := slices.Clone(val) - m := Some(val) - mClone := Clone(m) - m.value[0] = 0 - require.NotEqual(t, mClone.value, m.value) - require.Equal(t, originalVal, mClone.value) - } - - // Case: Value is nothing - { - m := Nothing[[]byte]() - mClone := Clone(m) - require.True(t, mClone.IsNothing()) - } -} diff --git a/avalanchego/x/merkledb/metrics.go b/avalanchego/x/merkledb/metrics.go index cc1efb08..058b4869 100644 --- a/avalanchego/x/merkledb/metrics.go +++ b/avalanchego/x/merkledb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -8,20 +8,22 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var ( - _ merkleMetrics = &mockMetrics{} - _ merkleMetrics = &metrics{} + _ merkleMetrics = (*mockMetrics)(nil) + _ merkleMetrics = (*metrics)(nil) ) type merkleMetrics interface { - IOKeyRead() - IOKeyWrite() + DatabaseNodeRead() + DatabaseNodeWrite() HashCalculated() - DBNodeCacheHit() - DBNodeCacheMiss() + ValueNodeCacheHit() + ValueNodeCacheMiss() + IntermediateNodeCacheHit() + IntermediateNodeCacheMiss() ViewNodeCacheHit() ViewNodeCacheMiss() ViewValueCacheHit() @@ -29,16 +31,18 @@ type merkleMetrics interface { } type mockMetrics struct { - lock sync.Mutex - keyReadCount int64 - keyWriteCount int64 - hashCount int64 - dbNodeCacheHit int64 - dbNodeCacheMiss int64 - viewNodeCacheHit int64 - viewNodeCacheMiss int64 - viewValueCacheHit int64 - viewValueCacheMiss int64 + lock sync.Mutex + keyReadCount int64 + keyWriteCount int64 + hashCount int64 + valueNodeCacheHit int64 + valueNodeCacheMiss int64 + intermediateNodeCacheHit int64 + intermediateNodeCacheMiss int64 + viewNodeCacheHit int64 + viewNodeCacheMiss int64 + viewValueCacheHit int64 + viewValueCacheMiss int64 } func (m *mockMetrics) HashCalculated() { @@ -48,14 +52,14 @@ func (m *mockMetrics) HashCalculated() { m.hashCount++ } -func (m *mockMetrics) IOKeyRead() { +func (m *mockMetrics) DatabaseNodeRead() { m.lock.Lock() defer m.lock.Unlock() m.keyReadCount++ } -func (m *mockMetrics) IOKeyWrite() { +func (m *mockMetrics) DatabaseNodeWrite() { m.lock.Lock() defer m.lock.Unlock() @@ -90,33 +94,50 @@ func (m *mockMetrics) ViewValueCacheMiss() { m.viewValueCacheMiss++ } -func (m *mockMetrics) DBNodeCacheHit() { +func (m *mockMetrics) ValueNodeCacheHit() { m.lock.Lock() defer m.lock.Unlock() - m.dbNodeCacheHit++ + m.valueNodeCacheHit++ } -func (m *mockMetrics) DBNodeCacheMiss() { +func (m *mockMetrics) ValueNodeCacheMiss() { m.lock.Lock() defer m.lock.Unlock() - m.dbNodeCacheMiss++ + m.valueNodeCacheMiss++ +} + +func (m *mockMetrics) IntermediateNodeCacheHit() { + m.lock.Lock() + defer m.lock.Unlock() + + m.intermediateNodeCacheHit++ +} + +func (m *mockMetrics) IntermediateNodeCacheMiss() { + m.lock.Lock() + defer m.lock.Unlock() + + m.intermediateNodeCacheMiss++ } type metrics struct { - ioKeyWrite prometheus.Counter - ioKeyRead prometheus.Counter - hashCount prometheus.Counter - dbNodeCacheHit prometheus.Counter - dbNodeCacheMiss prometheus.Counter - viewNodeCacheHit prometheus.Counter - viewNodeCacheMiss prometheus.Counter - viewValueCacheHit prometheus.Counter - viewValueCacheMiss prometheus.Counter + ioKeyWrite prometheus.Counter + ioKeyRead prometheus.Counter + hashCount prometheus.Counter + intermediateNodeCacheHit prometheus.Counter + intermediateNodeCacheMiss prometheus.Counter + valueNodeCacheHit prometheus.Counter + valueNodeCacheMiss prometheus.Counter + viewNodeCacheHit prometheus.Counter + viewNodeCacheMiss prometheus.Counter + viewValueCacheHit prometheus.Counter + viewValueCacheMiss prometheus.Counter } func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, error) { + // TODO: Should we instead return an error if reg is nil? if reg == nil { return &mockMetrics{}, nil } @@ -136,15 +157,25 @@ func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, err Name: "hashes_calculated", Help: "cumulative number of node hashes done", }), - dbNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ + valueNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "value_node_cache_hit", + Help: "cumulative amount of hits on the value node db cache", + }), + valueNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "value_node_cache_miss", + Help: "cumulative amount of misses on the value node db cache", + }), + intermediateNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: "db_node_cache_hit", - Help: "cumulative amount of hits on the db node cache", + Name: "intermediate_node_cache_hit", + Help: "cumulative amount of hits on the intermediate node db cache", }), - dbNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ + intermediateNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, - Name: "db_node_cache_miss", - Help: "cumulative amount of misses on the db node cache", + Name: "intermediate_node_cache_miss", + Help: "cumulative amount of misses on the intermediate node db cache", }), viewNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, @@ -167,26 +198,27 @@ func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, err Help: "cumulative amount of misses on the view value cache", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.ioKeyWrite), reg.Register(m.ioKeyRead), reg.Register(m.hashCount), - reg.Register(m.dbNodeCacheHit), - reg.Register(m.dbNodeCacheMiss), + reg.Register(m.valueNodeCacheHit), + reg.Register(m.valueNodeCacheMiss), + reg.Register(m.intermediateNodeCacheHit), + reg.Register(m.intermediateNodeCacheMiss), reg.Register(m.viewNodeCacheHit), reg.Register(m.viewNodeCacheMiss), reg.Register(m.viewValueCacheHit), reg.Register(m.viewValueCacheMiss), ) - return &m, errs.Err + return &m, err } -func (m *metrics) IOKeyRead() { +func (m *metrics) DatabaseNodeRead() { m.ioKeyRead.Inc() } -func (m *metrics) IOKeyWrite() { +func (m *metrics) DatabaseNodeWrite() { m.ioKeyWrite.Inc() } @@ -210,10 +242,18 @@ func (m *metrics) ViewValueCacheMiss() { m.viewValueCacheMiss.Inc() } -func (m *metrics) DBNodeCacheHit() { - m.dbNodeCacheHit.Inc() +func (m *metrics) IntermediateNodeCacheHit() { + m.intermediateNodeCacheHit.Inc() +} + +func (m *metrics) IntermediateNodeCacheMiss() { + m.intermediateNodeCacheMiss.Inc() +} + +func (m *metrics) ValueNodeCacheHit() { + m.valueNodeCacheHit.Inc() } -func (m *metrics) DBNodeCacheMiss() { - m.dbNodeCacheMiss.Inc() +func (m *metrics) ValueNodeCacheMiss() { + m.valueNodeCacheMiss.Inc() } diff --git a/avalanchego/x/merkledb/metrics_test.go b/avalanchego/x/merkledb/metrics_test.go index be08d7d8..20c4accb 100644 --- a/avalanchego/x/merkledb/metrics_test.go +++ b/avalanchego/x/merkledb/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -7,8 +7,6 @@ import ( "context" "testing" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -16,59 +14,55 @@ import ( ) func Test_Metrics_Basic_Usage(t *testing.T) { - db, err := New( + config := newDefaultConfig() + // Set to nil so that we use a mockMetrics instead of the real one inside + // merkledb. + config.Reg = nil + + db, err := newDB( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 300, - NodeCacheSize: minCacheSize, - }, + config, ) require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(t, err) + db.metrics.(*mockMetrics).keyReadCount = 0 + db.metrics.(*mockMetrics).keyWriteCount = 0 + db.metrics.(*mockMetrics).hashCount = 0 + + require.NoError(t, db.Put([]byte("key"), []byte("value"))) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(3), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(4), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) _, err = db.Get([]byte("key2")) require.ErrorIs(t, err, database.ErrNotFound) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) - require.Equal(t, int64(4), db.metrics.(*mockMetrics).hashCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) } func Test_Metrics_Initialize(t *testing.T) { db, err := New( context.Background(), memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 300, - Reg: prometheus.NewRegistry(), - NodeCacheSize: 1000, - }, + newDefaultConfig(), ) require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value"))) val, err := db.Get([]byte("key")) require.NoError(t, err) require.Equal(t, []byte("value"), val) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) } diff --git a/avalanchego/x/merkledb/mock_db.go b/avalanchego/x/merkledb/mock_db.go new file mode 100644 index 00000000..d43e2761 --- /dev/null +++ b/avalanchego/x/merkledb/mock_db.go @@ -0,0 +1,491 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: x/merkledb/db.go +// +// Generated by this command: +// +// mockgen -source=x/merkledb/db.go -destination=x/merkledb/mock_db.go -package=merkledb -exclude_interfaces=ChangeProofer,RangeProofer,Clearer,Prefetcher +// + +// Package merkledb is a generated GoMock package. +package merkledb + +import ( + context "context" + reflect "reflect" + + database "github.com/ava-labs/avalanchego/database" + ids "github.com/ava-labs/avalanchego/ids" + maybe "github.com/ava-labs/avalanchego/utils/maybe" + gomock "go.uber.org/mock/gomock" +) + +// MockMerkleDB is a mock of MerkleDB interface. +type MockMerkleDB struct { + ctrl *gomock.Controller + recorder *MockMerkleDBMockRecorder +} + +// MockMerkleDBMockRecorder is the mock recorder for MockMerkleDB. +type MockMerkleDBMockRecorder struct { + mock *MockMerkleDB +} + +// NewMockMerkleDB creates a new mock instance. +func NewMockMerkleDB(ctrl *gomock.Controller) *MockMerkleDB { + mock := &MockMerkleDB{ctrl: ctrl} + mock.recorder = &MockMerkleDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMerkleDB) EXPECT() *MockMerkleDBMockRecorder { + return m.recorder +} + +// Clear mocks base method. +func (m *MockMerkleDB) Clear() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clear") + ret0, _ := ret[0].(error) + return ret0 +} + +// Clear indicates an expected call of Clear. +func (mr *MockMerkleDBMockRecorder) Clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockMerkleDB)(nil).Clear)) +} + +// Close mocks base method. +func (m *MockMerkleDB) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockMerkleDBMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMerkleDB)(nil).Close)) +} + +// CommitChangeProof mocks base method. +func (m *MockMerkleDB) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitChangeProof", ctx, proof) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitChangeProof indicates an expected call of CommitChangeProof. +func (mr *MockMerkleDBMockRecorder) CommitChangeProof(ctx, proof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitChangeProof), ctx, proof) +} + +// CommitRangeProof mocks base method. +func (m *MockMerkleDB) CommitRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], proof *RangeProof) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitRangeProof", ctx, start, end, proof) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitRangeProof indicates an expected call of CommitRangeProof. +func (mr *MockMerkleDBMockRecorder) CommitRangeProof(ctx, start, end, proof any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).CommitRangeProof), ctx, start, end, proof) +} + +// Compact mocks base method. +func (m *MockMerkleDB) Compact(start, limit []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Compact", start, limit) + ret0, _ := ret[0].(error) + return ret0 +} + +// Compact indicates an expected call of Compact. +func (mr *MockMerkleDBMockRecorder) Compact(start, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compact", reflect.TypeOf((*MockMerkleDB)(nil).Compact), start, limit) +} + +// Delete mocks base method. +func (m *MockMerkleDB) Delete(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockMerkleDBMockRecorder) Delete(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockMerkleDB)(nil).Delete), key) +} + +// Get mocks base method. +func (m *MockMerkleDB) Get(key []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockMerkleDBMockRecorder) Get(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMerkleDB)(nil).Get), key) +} + +// GetChangeProof mocks base method. +func (m *MockMerkleDB) GetChangeProof(ctx context.Context, startRootID, endRootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*ChangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChangeProof", ctx, startRootID, endRootID, start, end, maxLength) + ret0, _ := ret[0].(*ChangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChangeProof indicates an expected call of GetChangeProof. +func (mr *MockMerkleDBMockRecorder) GetChangeProof(ctx, startRootID, endRootID, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetChangeProof), ctx, startRootID, endRootID, start, end, maxLength) +} + +// GetMerkleRoot mocks base method. +func (m *MockMerkleDB) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMerkleRoot", ctx) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMerkleRoot indicates an expected call of GetMerkleRoot. +func (mr *MockMerkleDBMockRecorder) GetMerkleRoot(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMerkleRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetMerkleRoot), ctx) +} + +// GetProof mocks base method. +func (m *MockMerkleDB) GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProof", ctx, keyBytes) + ret0, _ := ret[0].(*Proof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProof indicates an expected call of GetProof. +func (mr *MockMerkleDBMockRecorder) GetProof(ctx, keyBytes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockMerkleDB)(nil).GetProof), ctx, keyBytes) +} + +// GetRangeProof mocks base method. +func (m *MockMerkleDB) GetRangeProof(ctx context.Context, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeProof", ctx, start, end, maxLength) + ret0, _ := ret[0].(*RangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeProof indicates an expected call of GetRangeProof. +func (mr *MockMerkleDBMockRecorder) GetRangeProof(ctx, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProof), ctx, start, end, maxLength) +} + +// GetRangeProofAtRoot mocks base method. +func (m *MockMerkleDB) GetRangeProofAtRoot(ctx context.Context, rootID ids.ID, start, end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeProofAtRoot", ctx, rootID, start, end, maxLength) + ret0, _ := ret[0].(*RangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeProofAtRoot indicates an expected call of GetRangeProofAtRoot. +func (mr *MockMerkleDBMockRecorder) GetRangeProofAtRoot(ctx, rootID, start, end, maxLength any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProofAtRoot", reflect.TypeOf((*MockMerkleDB)(nil).GetRangeProofAtRoot), ctx, rootID, start, end, maxLength) +} + +// GetValue mocks base method. +func (m *MockMerkleDB) GetValue(ctx context.Context, key []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValue", ctx, key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetValue indicates an expected call of GetValue. +func (mr *MockMerkleDBMockRecorder) GetValue(ctx, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValue", reflect.TypeOf((*MockMerkleDB)(nil).GetValue), ctx, key) +} + +// GetValues mocks base method. +func (m *MockMerkleDB) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValues", ctx, keys) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].([]error) + return ret0, ret1 +} + +// GetValues indicates an expected call of GetValues. +func (mr *MockMerkleDBMockRecorder) GetValues(ctx, keys any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValues", reflect.TypeOf((*MockMerkleDB)(nil).GetValues), ctx, keys) +} + +// Has mocks base method. +func (m *MockMerkleDB) Has(key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockMerkleDBMockRecorder) Has(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMerkleDB)(nil).Has), key) +} + +// HealthCheck mocks base method. +func (m *MockMerkleDB) HealthCheck(arg0 context.Context) (any, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthCheck", arg0) + ret0, _ := ret[0].(any) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HealthCheck indicates an expected call of HealthCheck. +func (mr *MockMerkleDBMockRecorder) HealthCheck(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockMerkleDB)(nil).HealthCheck), arg0) +} + +// NewBatch mocks base method. +func (m *MockMerkleDB) NewBatch() database.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatch") + ret0, _ := ret[0].(database.Batch) + return ret0 +} + +// NewBatch indicates an expected call of NewBatch. +func (mr *MockMerkleDBMockRecorder) NewBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockMerkleDB)(nil).NewBatch)) +} + +// NewIterator mocks base method. +func (m *MockMerkleDB) NewIterator() database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIterator") + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIterator indicates an expected call of NewIterator. +func (mr *MockMerkleDBMockRecorder) NewIterator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIterator", reflect.TypeOf((*MockMerkleDB)(nil).NewIterator)) +} + +// NewIteratorWithPrefix mocks base method. +func (m *MockMerkleDB) NewIteratorWithPrefix(prefix []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithPrefix", prefix) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithPrefix indicates an expected call of NewIteratorWithPrefix. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithPrefix(prefix any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithPrefix), prefix) +} + +// NewIteratorWithStart mocks base method. +func (m *MockMerkleDB) NewIteratorWithStart(start []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithStart", start) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithStart indicates an expected call of NewIteratorWithStart. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStart(start any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStart", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStart), start) +} + +// NewIteratorWithStartAndPrefix mocks base method. +func (m *MockMerkleDB) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewIteratorWithStartAndPrefix", start, prefix) + ret0, _ := ret[0].(database.Iterator) + return ret0 +} + +// NewIteratorWithStartAndPrefix indicates an expected call of NewIteratorWithStartAndPrefix. +func (mr *MockMerkleDBMockRecorder) NewIteratorWithStartAndPrefix(start, prefix any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewIteratorWithStartAndPrefix", reflect.TypeOf((*MockMerkleDB)(nil).NewIteratorWithStartAndPrefix), start, prefix) +} + +// NewView mocks base method. +func (m *MockMerkleDB) NewView(ctx context.Context, changes ViewChanges) (View, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewView", ctx, changes) + ret0, _ := ret[0].(View) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewView indicates an expected call of NewView. +func (mr *MockMerkleDBMockRecorder) NewView(ctx, changes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewView", reflect.TypeOf((*MockMerkleDB)(nil).NewView), ctx, changes) +} + +// PrefetchPath mocks base method. +func (m *MockMerkleDB) PrefetchPath(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrefetchPath", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrefetchPath indicates an expected call of PrefetchPath. +func (mr *MockMerkleDBMockRecorder) PrefetchPath(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPath", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPath), key) +} + +// PrefetchPaths mocks base method. +func (m *MockMerkleDB) PrefetchPaths(keys [][]byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrefetchPaths", keys) + ret0, _ := ret[0].(error) + return ret0 +} + +// PrefetchPaths indicates an expected call of PrefetchPaths. +func (mr *MockMerkleDBMockRecorder) PrefetchPaths(keys any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefetchPaths", reflect.TypeOf((*MockMerkleDB)(nil).PrefetchPaths), keys) +} + +// Put mocks base method. +func (m *MockMerkleDB) Put(key, value []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Put indicates an expected call of Put. +func (mr *MockMerkleDBMockRecorder) Put(key, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockMerkleDB)(nil).Put), key, value) +} + +// VerifyChangeProof mocks base method. +func (m *MockMerkleDB) VerifyChangeProof(ctx context.Context, proof *ChangeProof, start, end maybe.Maybe[[]byte], expectedEndRootID ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyChangeProof", ctx, proof, start, end, expectedEndRootID) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyChangeProof indicates an expected call of VerifyChangeProof. +func (mr *MockMerkleDBMockRecorder) VerifyChangeProof(ctx, proof, start, end, expectedEndRootID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChangeProof", reflect.TypeOf((*MockMerkleDB)(nil).VerifyChangeProof), ctx, proof, start, end, expectedEndRootID) +} + +// getEditableNode mocks base method. +func (m *MockMerkleDB) getEditableNode(key Key, hasValue bool) (*node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getEditableNode", key, hasValue) + ret0, _ := ret[0].(*node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getEditableNode indicates an expected call of getEditableNode. +func (mr *MockMerkleDBMockRecorder) getEditableNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getEditableNode", reflect.TypeOf((*MockMerkleDB)(nil).getEditableNode), key, hasValue) +} + +// getNode mocks base method. +func (m *MockMerkleDB) getNode(key Key, hasValue bool) (*node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getNode", key, hasValue) + ret0, _ := ret[0].(*node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getNode indicates an expected call of getNode. +func (mr *MockMerkleDBMockRecorder) getNode(key, hasValue any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getNode", reflect.TypeOf((*MockMerkleDB)(nil).getNode), key, hasValue) +} + +// getRoot mocks base method. +func (m *MockMerkleDB) getRoot() maybe.Maybe[*node] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getRoot") + ret0, _ := ret[0].(maybe.Maybe[*node]) + return ret0 +} + +// getRoot indicates an expected call of getRoot. +func (mr *MockMerkleDBMockRecorder) getRoot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getRoot", reflect.TypeOf((*MockMerkleDB)(nil).getRoot)) +} + +// getTokenSize mocks base method. +func (m *MockMerkleDB) getTokenSize() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getTokenSize") + ret0, _ := ret[0].(int) + return ret0 +} + +// getTokenSize indicates an expected call of getTokenSize. +func (mr *MockMerkleDBMockRecorder) getTokenSize() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getTokenSize", reflect.TypeOf((*MockMerkleDB)(nil).getTokenSize)) +} + +// getValue mocks base method. +func (m *MockMerkleDB) getValue(key Key) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getValue", key) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// getValue indicates an expected call of getValue. +func (mr *MockMerkleDBMockRecorder) getValue(key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getValue", reflect.TypeOf((*MockMerkleDB)(nil).getValue), key) +} diff --git a/avalanchego/x/merkledb/node.go b/avalanchego/x/merkledb/node.go index edcb78c7..dd1f2ed6 100644 --- a/avalanchego/x/merkledb/node.go +++ b/avalanchego/x/merkledb/node.go @@ -1,72 +1,56 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( - "golang.org/x/exp/maps" + "slices" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" ) -const ( - NodeBranchFactor = 16 - HashLength = 32 -) - -// the values that go into the node's id -type hashValues struct { - Children map[byte]child - Value Maybe[[]byte] - Key SerializedPath -} +const HashLength = 32 // Representation of a node stored in the database. type dbNode struct { - value Maybe[[]byte] - children map[byte]child + value maybe.Maybe[[]byte] + children map[byte]*child } type child struct { - compressedPath path - id ids.ID + compressedKey Key + id ids.ID + hasValue bool } -// node holds additional information on top of the dbNode that makes calulcations easier to do +// node holds additional information on top of the dbNode that makes calculations easier to do type node struct { dbNode - id ids.ID - key path - nodeBytes []byte - valueDigest Maybe[[]byte] + key Key + valueDigest maybe.Maybe[[]byte] } // Returns a new node with the given [key] and no value. -// If [parent] isn't nil, the new node is added as a child of [parent]. -func newNode(parent *node, key path) *node { - newNode := &node{ +func newNode(key Key) *node { + return &node{ dbNode: dbNode{ - children: make(map[byte]child, NodeBranchFactor), + children: make(map[byte]*child, 2), }, key: key, } - if parent != nil { - parent.addChild(newNode) - } - return newNode } // Parse [nodeBytes] to a node and set its key to [key]. -func parseNode(key path, nodeBytes []byte) (*node, error) { +func parseNode(key Key, nodeBytes []byte) (*node, error) { n := dbNode{} - if _, err := Codec.decodeDBNode(nodeBytes, &n); err != nil { + if err := codec.decodeDBNode(nodeBytes, &n); err != nil { return nil, err } result := &node{ - dbNode: n, - key: key, - nodeBytes: nodeBytes, + dbNode: n, + key: key, } result.setValueDigest() @@ -79,122 +63,88 @@ func (n *node) hasValue() bool { } // Returns the byte representation of this node. -func (n *node) marshal() ([]byte, error) { - if n.nodeBytes != nil { - return n.nodeBytes, nil - } - - nodeBytes, err := Codec.encodeDBNode(Version, &(n.dbNode)) - if err != nil { - return nil, err - } - n.nodeBytes = nodeBytes - return n.nodeBytes, nil -} - -// clear the cached values that will need to be recalculated whenever the node changes -// for example, node ID and byte representation -func (n *node) onNodeChanged() { - n.id = ids.Empty - n.nodeBytes = nil +func (n *node) bytes() []byte { + return codec.encodeDBNode(&n.dbNode) } // Returns and caches the ID of this node. -func (n *node) calculateID(metrics merkleMetrics) error { - if n.id != ids.Empty { - return nil - } - - hv := &hashValues{ - Children: n.children, - Value: n.valueDigest, - Key: n.key.Serialize(), - } - - bytes, err := Codec.encodeHashValues(Version, hv) - if err != nil { - return err - } - +func (n *node) calculateID(metrics merkleMetrics) ids.ID { metrics.HashCalculated() - n.id = hashing.ComputeHash256Array(bytes) - return nil + bytes := codec.encodeHashValues(n) + return hashing.ComputeHash256Array(bytes) } // Set [n]'s value to [val]. -func (n *node) setValue(val Maybe[[]byte]) { - n.onNodeChanged() +func (n *node) setValue(val maybe.Maybe[[]byte]) { n.value = val n.setValueDigest() } func (n *node) setValueDigest() { - if n.value.IsNothing() || len(n.value.value) < HashLength { + if n.value.IsNothing() || len(n.value.Value()) < HashLength { n.valueDigest = n.value } else { - n.valueDigest = Some(hashing.ComputeHash256(n.value.value)) + n.valueDigest = maybe.Some(hashing.ComputeHash256(n.value.Value())) } } // Adds [child] as a child of [n]. // Assumes [child]'s key is valid as a child of [n]. // That is, [n.key] is a prefix of [child.key]. -func (n *node) addChild(child *node) { - n.addChildWithoutNode( - child.key[len(n.key)], - child.key[len(n.key)+1:], - child.id, - ) +func (n *node) addChild(childNode *node, tokenSize int) { + n.addChildWithID(childNode, tokenSize, ids.Empty) } -// Adds a child to [n] without a reference to the child node. -func (n *node) addChildWithoutNode(index byte, compressedPath path, childID ids.ID) { - n.onNodeChanged() - n.children[index] = child{ - compressedPath: compressedPath, - id: childID, - } +func (n *node) addChildWithID(childNode *node, tokenSize int, childID ids.ID) { + n.setChildEntry( + childNode.key.Token(n.key.length, tokenSize), + &child{ + compressedKey: childNode.key.Skip(n.key.length + tokenSize), + id: childID, + hasValue: childNode.hasValue(), + }, + ) } -// Returns the path of the only child of this node. -// Assumes this node has exactly one child. -func (n *node) getSingleChildPath() path { - for index, entry := range n.children { - return n.key + path(index) + entry.compressedPath - } - return "" +// Adds a child to [n] without a reference to the child node. +func (n *node) setChildEntry(index byte, childEntry *child) { + n.children[index] = childEntry } // Removes [child] from [n]'s children. -func (n *node) removeChild(child *node) { - n.onNodeChanged() - delete(n.children, child.key[len(n.key)]) +func (n *node) removeChild(child *node, tokenSize int) { + delete(n.children, child.key.Token(n.key.length, tokenSize)) } // clone Returns a copy of [n]. -// nodeBytes is intentionally not included because it can cause a race. -// nodes being evicted by the cache can write nodeBytes, -// so reading them during the cloning would be a data race. // Note: value isn't cloned because it is never edited, only overwritten // if this ever changes, value will need to be copied as well +// it is safe to clone all fields because they are only written/read while one or both of the db locks are held func (n *node) clone() *node { - return &node{ - id: n.id, + result := &node{ key: n.key, dbNode: dbNode{ value: n.value, - children: maps.Clone(n.children), + children: make(map[byte]*child, len(n.children)), }, valueDigest: n.valueDigest, } + for key, existing := range n.children { + result.children[key] = &child{ + compressedKey: existing.compressedKey, + id: existing.id, + hasValue: existing.hasValue, + } + } + return result } // Returns the ProofNode representation of this node. func (n *node) asProofNode() ProofNode { pn := ProofNode{ - KeyPath: n.key.Serialize(), + Key: n.key, Children: make(map[byte]ids.ID, len(n.children)), - ValueOrHash: Clone(n.valueDigest), + ValueOrHash: maybe.Bind(n.valueDigest, slices.Clone[[]byte]), } for index, entry := range n.children { pn.Children[index] = entry.id diff --git a/avalanchego/x/merkledb/node_test.go b/avalanchego/x/merkledb/node_test.go index 7c7c2578..3c096795 100644 --- a/avalanchego/x/merkledb/node_test.go +++ b/avalanchego/x/merkledb/node_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -8,62 +8,62 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/maybe" ) func Test_Node_Marshal(t *testing.T) { - root := newNode(nil, EmptyPath) + root := newNode(Key{}) require.NotNil(t, root) - fullpath := newPath([]byte("key")) - childNode := newNode(root, fullpath) - childNode.setValue(Some([]byte("value"))) + fullKey := ToKey([]byte("key")) + childNode := newNode(fullKey) + root.addChild(childNode, 4) + childNode.setValue(maybe.Some([]byte("value"))) require.NotNil(t, childNode) - err := childNode.calculateID(&mockMetrics{}) - require.NoError(t, err) - root.addChild(childNode) + childNode.calculateID(&mockMetrics{}) + root.addChild(childNode, 4) - data, err := root.marshal() - require.NoError(t, err) - rootParsed, err := parseNode(newPath([]byte("")), data) + data := root.bytes() + rootParsed, err := parseNode(ToKey([]byte("")), data) require.NoError(t, err) - require.Equal(t, 1, len(rootParsed.children)) + require.Len(t, rootParsed.children, 1) - rootIndex := root.getSingleChildPath()[len(root.key)] - parsedIndex := rootParsed.getSingleChildPath()[len(rootParsed.key)] + rootIndex := getSingleChildKey(root, 4).Token(0, 4) + parsedIndex := getSingleChildKey(rootParsed, 4).Token(0, 4) rootChildEntry := root.children[rootIndex] parseChildEntry := rootParsed.children[parsedIndex] require.Equal(t, rootChildEntry.id, parseChildEntry.id) } func Test_Node_Marshal_Errors(t *testing.T) { - root := newNode(nil, EmptyPath) + root := newNode(Key{}) require.NotNil(t, root) - fullpath := newPath([]byte{255}) - childNode1 := newNode(root, fullpath) - childNode1.setValue(Some([]byte("value1"))) + fullKey := ToKey([]byte{255}) + childNode1 := newNode(fullKey) + root.addChild(childNode1, 4) + childNode1.setValue(maybe.Some([]byte("value1"))) require.NotNil(t, childNode1) - err := childNode1.calculateID(&mockMetrics{}) - require.NoError(t, err) - root.addChild(childNode1) + childNode1.calculateID(&mockMetrics{}) + root.addChild(childNode1, 4) - fullpath = newPath([]byte{237}) - childNode2 := newNode(root, fullpath) - childNode2.setValue(Some([]byte("value2"))) + fullKey = ToKey([]byte{237}) + childNode2 := newNode(fullKey) + root.addChild(childNode2, 4) + childNode2.setValue(maybe.Some([]byte("value2"))) require.NotNil(t, childNode2) - err = childNode2.calculateID(&mockMetrics{}) - require.NoError(t, err) - root.addChild(childNode2) + childNode2.calculateID(&mockMetrics{}) + root.addChild(childNode2, 4) - data, err := root.marshal() - require.NoError(t, err) + data := root.bytes() for i := 1; i < len(data); i++ { broken := data[:i] - _, err = parseNode(newPath([]byte("")), broken) + _, err := parseNode(ToKey([]byte("")), broken) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } } diff --git a/avalanchego/x/merkledb/path.go b/avalanchego/x/merkledb/path.go deleted file mode 100644 index 7a78f2ac..00000000 --- a/avalanchego/x/merkledb/path.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import ( - "bytes" - "reflect" - "strings" - "unsafe" -) - -const EmptyPath path = "" - -// SerializedPath contains a path from the trie. -// The trie branch factor is 16, so the path may contain an odd number of nibbles. -// If it did contain an odd number of nibbles, the last 4 bits of the last byte should be discarded. -type SerializedPath struct { - NibbleLength int - Value []byte -} - -func (s SerializedPath) hasOddLength() bool { - return s.NibbleLength&1 == 1 -} - -func (s SerializedPath) Equal(other SerializedPath) bool { - return s.NibbleLength == other.NibbleLength && bytes.Equal(s.Value, other.Value) -} - -func (s SerializedPath) deserialize() path { - result := newPath(s.Value) - // trim the last nibble if the path has an odd length - return result[:len(result)-s.NibbleLength&1] -} - -// Returns true iff [prefix] is a prefix of [s] or equal to it. -func (s SerializedPath) HasPrefix(prefix SerializedPath) bool { - if len(s.Value) < len(prefix.Value) { - return false - } - prefixValue := prefix.Value - if !prefix.hasOddLength() { - return bytes.HasPrefix(s.Value, prefixValue) - } - reducedSize := len(prefixValue) - 1 - - // grab the last nibble in the prefix and serialized path - prefixRemainder := prefixValue[reducedSize] >> 4 - valueRemainder := s.Value[reducedSize] >> 4 - prefixValue = prefixValue[:reducedSize] - return bytes.HasPrefix(s.Value, prefixValue) && valueRemainder == prefixRemainder -} - -// Returns true iff [prefix] is a prefix of [s] but not equal to it. -func (s SerializedPath) HasStrictPrefix(prefix SerializedPath) bool { - return s.HasPrefix(prefix) && !s.Equal(prefix) -} - -func (s SerializedPath) NibbleVal(nibbleIndex int) byte { - value := s.Value[nibbleIndex>>1] - isOdd := byte(nibbleIndex & 1) - isEven := (1 - isOdd) - - // return value first(even index) or last 4(odd index) bits of the corresponding byte - return isEven*value>>4 + isOdd*(value&0x0F) -} - -func (s SerializedPath) AppendNibble(nibble byte) SerializedPath { - // even is 1 if even, 0 if odd - even := 1 - s.NibbleLength&1 - value := make([]byte, len(s.Value)+even) - copy(value, s.Value) - - // shift the nibble 4 left if even, do nothing if odd - value[len(value)-1] += nibble << (4 * even) - return SerializedPath{Value: value, NibbleLength: s.NibbleLength + 1} -} - -type path string - -// Returns: -// * 0 if [p] == [other]. -// * -1 if [p] < [other]. -// * 1 if [p] > [other]. -func (p path) Compare(other path) int { - return strings.Compare(string(p), string(other)) -} - -// Invariant: The returned value must not be modified. -func (p path) Bytes() []byte { - // avoid copying during the conversion - // "safe" because we never edit the value, only used as DB key - buf := *(*[]byte)(unsafe.Pointer(&p)) - (*reflect.SliceHeader)(unsafe.Pointer(&buf)).Cap = len(p) - return buf -} - -// Returns true iff [p] begins with [prefix]. -func (p path) HasPrefix(prefix path) bool { - return strings.HasPrefix(string(p), string(prefix)) -} - -// Append [val] to [p]. -func (p path) Append(val byte) path { - return p + path(val) -} - -// Returns the serialized representation of [p]. -func (p path) Serialize() SerializedPath { - // need half the number of bytes as nibbles - // add one so there is a byte for the odd nibble if it exists - // the extra nibble gets rounded down if even length - byteLength := (len(p) + 1) / 2 - - result := SerializedPath{ - NibbleLength: len(p), - Value: make([]byte, byteLength), - } - - // loop over the path's bytes - // if the length is odd, subtract 1 so we don't overflow on the p[pathIndex+1] - keyIndex := 0 - lastIndex := len(p) - len(p)&1 - for pathIndex := 0; pathIndex < lastIndex; pathIndex += 2 { - result.Value[keyIndex] = p[pathIndex]<<4 + p[pathIndex+1] - keyIndex++ - } - - // if there is was a odd number of nibbles, grab the last one - if result.hasOddLength() { - result.Value[keyIndex] = p[keyIndex<<1] << 4 - } - - return result -} - -func newPath(p []byte) path { - // create new buffer with double the length of the input since each byte gets split into two nibbles - buffer := make([]byte, 2*len(p)) - - // first nibble gets shifted right 4 (divided by 16) to isolate the first nibble - // second nibble gets bitwise anded with 0x0F (1111) to isolate the second nibble - bufferIndex := 0 - for _, currentByte := range p { - buffer[bufferIndex] = currentByte >> 4 - buffer[bufferIndex+1] = currentByte & 0x0F - bufferIndex += 2 - } - - // avoid copying during the conversion - return *(*path)(unsafe.Pointer(&buffer)) -} diff --git a/avalanchego/x/merkledb/path_test.go b/avalanchego/x/merkledb/path_test.go deleted file mode 100644 index 6dcab62a..00000000 --- a/avalanchego/x/merkledb/path_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func Test_SerializedPath_NibbleVal(t *testing.T) { - path := SerializedPath{Value: []byte{240, 237}} - require.Equal(t, byte(15), path.NibbleVal(0)) - require.Equal(t, byte(0), path.NibbleVal(1)) - require.Equal(t, byte(14), path.NibbleVal(2)) - require.Equal(t, byte(13), path.NibbleVal(3)) -} - -func Test_SerializedPath_AppendNibble(t *testing.T) { - path := SerializedPath{Value: []byte{}} - require.Equal(t, 0, path.NibbleLength) - - path = path.AppendNibble(1) - require.Equal(t, 1, path.NibbleLength) - require.Equal(t, byte(1), path.NibbleVal(0)) - - path = path.AppendNibble(2) - require.Equal(t, 2, path.NibbleLength) - require.Equal(t, byte(2), path.NibbleVal(1)) -} - -func Test_SerializedPath_Has_Prefix(t *testing.T) { - first := SerializedPath{Value: []byte("FirstKey")} - prefix := SerializedPath{Value: []byte("FirstKe")} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) - - first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} - prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) - - first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) - - first = SerializedPath{Value: []byte{247}, NibbleLength: 2} - prefix = SerializedPath{Value: []byte{240}, NibbleLength: 2} - require.False(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) - - first = SerializedPath{Value: []byte{247}, NibbleLength: 2} - prefix = SerializedPath{Value: []byte{240}, NibbleLength: 1} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) - - first = SerializedPath{Value: []byte{}, NibbleLength: 0} - prefix = SerializedPath{Value: []byte{}, NibbleLength: 0} - require.True(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) -} - -func Test_SerializedPath_Equal(t *testing.T) { - first := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} - prefix := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} - require.True(t, first.Equal(prefix)) - - first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} - prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.False(t, first.Equal(prefix)) - - first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.Equal(prefix)) -} diff --git a/avalanchego/x/merkledb/proof.go b/avalanchego/x/merkledb/proof.go index 651ae480..8ddd97ff 100644 --- a/avalanchego/x/merkledb/proof.go +++ b/avalanchego/x/merkledb/proof.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -8,15 +8,19 @@ import ( "context" "errors" "fmt" + "math" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/maybe" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) -const verificationCacheSize = 2_000 +const verificationCacheSize = math.MaxUint16 var ( ErrInvalidProof = errors.New("proof obtained an invalid root ID") @@ -26,48 +30,116 @@ var ( ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") ErrExtraProofNodes = errors.New("extra proof nodes in path") ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") + ErrEmptyProof = errors.New("proof is empty") ErrNoMerkleProof = errors.New("empty key response must include merkle proof") ErrShouldJustBeRoot = errors.New("end proof should only contain root") ErrNoStartProof = errors.New("no start proof") ErrNoEndProof = errors.New("no end proof") - ErrNoProof = errors.New("proof has no nodes") ErrProofNodeNotForKey = errors.New("the provided node has a key that is not a prefix of the specified key") ErrProofValueDoesntMatch = errors.New("the provided value does not match the proof node for the provided key's value") ErrProofNodeHasUnincludedValue = errors.New("the provided proof has a value for a key within the range that is not present in the provided key/values") + ErrInvalidMaybe = errors.New("maybe is nothing but has value") + ErrNilProofNode = errors.New("proof node is nil") + ErrNilValueOrHash = errors.New("proof node's valueOrHash field is nil") + ErrNilKey = errors.New("key is nil") + ErrInvalidKeyLength = errors.New("key length doesn't match bytes length, check specified branchFactor") + ErrNilRangeProof = errors.New("range proof is nil") + ErrNilChangeProof = errors.New("change proof is nil") + ErrNilMaybeBytes = errors.New("maybe bytes is nil") + ErrNilProof = errors.New("proof is nil") + ErrNilValue = errors.New("value is nil") + ErrUnexpectedEndProof = errors.New("end proof should be empty") ) type ProofNode struct { - KeyPath SerializedPath + Key Key // Nothing if this is an intermediate node. // The value in this node if its length < [HashLen]. // The hash of the value in this node otherwise. - ValueOrHash Maybe[[]byte] + ValueOrHash maybe.Maybe[[]byte] Children map[byte]ids.ID } -// An inclusion/exclustion proof of a key. +// ToProto converts the ProofNode into the protobuf version of a proof node +// Assumes [node.Key.Key.length] <= math.MaxUint64. +func (node *ProofNode) ToProto() *pb.ProofNode { + pbNode := &pb.ProofNode{ + Key: &pb.Key{ + Length: uint64(node.Key.length), + Value: node.Key.Bytes(), + }, + ValueOrHash: &pb.MaybeBytes{ + Value: node.ValueOrHash.Value(), + IsNothing: node.ValueOrHash.IsNothing(), + }, + Children: make(map[uint32][]byte, len(node.Children)), + } + + for childIndex, childID := range node.Children { + childID := childID + pbNode.Children[uint32(childIndex)] = childID[:] + } + + return pbNode +} + +func (node *ProofNode) UnmarshalProto(pbNode *pb.ProofNode) error { + switch { + case pbNode == nil: + return ErrNilProofNode + case pbNode.ValueOrHash == nil: + return ErrNilValueOrHash + case pbNode.ValueOrHash.IsNothing && len(pbNode.ValueOrHash.Value) != 0: + return ErrInvalidMaybe + case pbNode.Key == nil: + return ErrNilKey + case len(pbNode.Key.Value) != bytesNeeded(int(pbNode.Key.Length)): + return ErrInvalidKeyLength + } + node.Key = ToKey(pbNode.Key.Value).Take(int(pbNode.Key.Length)) + node.Children = make(map[byte]ids.ID, len(pbNode.Children)) + for childIndex, childIDBytes := range pbNode.Children { + if childIndex > math.MaxUint8 { + return errChildIndexTooLarge + } + childID, err := ids.ToID(childIDBytes) + if err != nil { + return err + } + node.Children[byte(childIndex)] = childID + } + + if !pbNode.ValueOrHash.IsNothing { + node.ValueOrHash = maybe.Some(pbNode.ValueOrHash.Value) + } + + return nil +} + +// Proof represents an inclusion/exclusion proof of a key. type Proof struct { // Nodes in the proof path from root --> target key // (or node that would be where key is if it doesn't exist). - // Must always be non-empty (i.e. have the root node). + // Always contains at least the root. Path []ProofNode // This is a proof that [key] exists/doesn't exist. - Key []byte + Key Key // Nothing if [Key] isn't in the trie. - // Otherwise the value corresponding to [Key]. - Value Maybe[[]byte] + // Otherwise, the value corresponding to [Key]. + Value maybe.Maybe[[]byte] } -// Returns nil if the trie given in [proof] has root [expectedRootID]. +// Verify returns nil if the trie given in [proof] has root [expectedRootID]. // That is, this is a valid proof that [proof.Key] exists/doesn't exist // in the trie with root [expectedRootID]. -func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { +func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize int) error { // Make sure the proof is well-formed. if len(proof.Path) == 0 { - return ErrNoProof + return ErrEmptyProof } - if err := verifyProofPath(proof.Path, newPath(proof.Key)); err != nil { + + if err := verifyProofPath(proof.Path, maybe.Some(proof.Key)); err != nil { return err } @@ -76,35 +148,35 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { // If the last proof node's key is [proof.Key] (i.e. this is an inclusion proof) // then the value of the last proof node must match [proof.Value]. - // Note odd length keys can never match the [proof.Key] since it's bytes, - // and thus an even number of nibbles. - if !lastNode.KeyPath.hasOddLength() && - bytes.Equal(proof.Key, lastNode.KeyPath.Value) && + // Note partial byte length keys can never match the [proof.Key] since it's bytes, + // and thus has a whole number of bytes + if !lastNode.Key.hasPartialByte() && + proof.Key == lastNode.Key && !valueOrHashMatches(proof.Value, lastNode.ValueOrHash) { return ErrProofValueDoesntMatch } - // If the last proof node has an odd length or a different key than [proof.Key] - // then this is an exclusion proof and should prove that [proof.Key] isn't in the trie.. - // Note odd length keys can never match the [proof.Key] since it's bytes, - // and thus an even number of nibbles. - if (lastNode.KeyPath.hasOddLength() || !bytes.Equal(proof.Key, lastNode.KeyPath.Value)) && - !proof.Value.IsNothing() { + // If the last proof node has a length not evenly divisible into bytes or a different key than [proof.Key] + // then this is an exclusion proof and should prove that [proof.Key] isn't in the trie. + // Note length not evenly divisible into bytes can never match the [proof.Key] since it's bytes, + // and thus an exact number of bytes. + if (lastNode.Key.hasPartialByte() || proof.Key != lastNode.Key) && + proof.Value.HasValue() { return ErrProofValueDoesntMatch } - view, err := getEmptyTrieView(ctx) + // Don't bother locking [view] -- nobody else has a reference to it. + view, err := getStandaloneView(ctx, nil, tokenSize) if err != nil { return err } - // Insert all of the proof nodes. - // [provenPath] is the path that we are proving exists, or the path - // that is where the path we are proving doesn't exist should be. - provenPath := proof.Path[len(proof.Path)-1].KeyPath.deserialize() + // Insert all proof nodes. + // [provenKey] is the key that we are proving exists, or the key + // that is the next key along the node path, proving that [proof.Key] doesn't exist in the trie. + provenKey := maybe.Some(proof.Path[len(proof.Path)-1].Key) - // Don't bother locking [db] and [view] -- nobody else has a reference to them. - if err = addPathInfo(view, proof.Path, provenPath, provenPath); err != nil { + if err = addPathInfo(view, proof.Path, provenKey, provenKey); err != nil { return err } @@ -118,23 +190,71 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { return nil } +func (proof *Proof) ToProto() *pb.Proof { + value := &pb.MaybeBytes{ + Value: proof.Value.Value(), + IsNothing: proof.Value.IsNothing(), + } + + pbProof := &pb.Proof{ + Key: proof.Key.Bytes(), + Value: value, + } + + pbProof.Proof = make([]*pb.ProofNode, len(proof.Path)) + for i, node := range proof.Path { + pbProof.Proof[i] = node.ToProto() + } + + return pbProof +} + +func (proof *Proof) UnmarshalProto(pbProof *pb.Proof) error { + switch { + case pbProof == nil: + return ErrNilProof + case pbProof.Value == nil: + return ErrNilValue + case pbProof.Value.IsNothing && len(pbProof.Value.Value) != 0: + return ErrInvalidMaybe + } + + proof.Key = ToKey(pbProof.Key) + + if !pbProof.Value.IsNothing { + proof.Value = maybe.Some(pbProof.Value.Value) + } + + proof.Path = make([]ProofNode, len(pbProof.Proof)) + for i, pbNode := range pbProof.Proof { + if err := proof.Path[i].UnmarshalProto(pbNode); err != nil { + return err + } + } + + return nil +} + type KeyValue struct { Key []byte Value []byte } -// A proof that a given set of key-value pairs are in a trie. +// RangeProof is a proof that a given set of key-value pairs are in a trie. type RangeProof struct { + // Invariant: At least one of [StartProof], [EndProof], [KeyValues] is non-empty. + // A proof that the smallest key in the requested range does/doesn't exist. // Note that this may not be an entire proof -- nodes are omitted if // they are also in [EndProof]. StartProof []ProofNode - // A proof of the greatest key in [KeyValues], or, if this proof contains - // no [KeyValues], just the root. - // Empty if the request for this range proof gave no upper bound - // on the range to fetch, unless this proof contains no [KeyValues] - // and [StartProof] is empty. + // If no upper range bound was given and [KeyValues] is empty, this is empty. + // + // If no upper range bound was given and [KeyValues] is non-empty, this is + // a proof for the largest key in [KeyValues]. + // + // Otherwise this is a proof for the upper range bound. EndProof []ProofNode // This proof proves that the key-value pairs in [KeyValues] are in the trie. @@ -142,35 +262,31 @@ type RangeProof struct { KeyValues []KeyValue } -// Returns nil iff all the following hold: +// Verify returns nil iff all the following hold: +// - The invariants of RangeProof hold. // - [start] <= [end]. -// - [proof] is non-empty. -// - All keys in [proof.KeyValues] are in the range [start, end]. -// If [start] is empty, all keys are considered > [start]. -// If [end] is empty, all keys are considered < [end]. -// - [proof.KeyValues] is sorted by increasing key. -// - [proof.StartProof] and [proof.EndProof] are well-formed. -// - One of the following holds: -// [end] and [proof.EndProof] are empty. -// [proof.StartProof], [start], [end], and [proof.KeyValues] are empty and -// [proof.EndProof] is just the root. -// [end] is non-empty and [proof.EndProof] is a valid proof of a key <= [end]. -// - [expectedRootID] is the root of the trie containing the given key-value -// pairs and start/end proofs. +// - [proof] proves the key-value pairs in [proof.KeyValues] are in the trie +// whose root is [expectedRootID]. +// +// All keys in [proof.KeyValues] are in the range [start, end]. +// +// If [start] is Nothing, all keys are considered > [start]. +// If [end] is Nothing, all keys are considered < [end]. func (proof *RangeProof) Verify( ctx context.Context, - start []byte, - end []byte, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], expectedRootID ids.ID, + tokenSize int, ) error { switch { - case len(end) > 0 && bytes.Compare(start, end) > 0: + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: return ErrStartAfterEnd case len(proof.KeyValues) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: - return ErrNoMerkleProof - case len(start) == 0 && len(end) == 0 && len(proof.KeyValues) == 0 && len(proof.EndProof) != 1: - return ErrShouldJustBeRoot - case len(proof.EndProof) == 0 && len(end) > 0: + return ErrEmptyProof + case end.IsNothing() && len(proof.KeyValues) == 0 && len(proof.EndProof) != 0: + return ErrUnexpectedEndProof + case len(proof.EndProof) == 0 && (end.HasValue() || len(proof.KeyValues) > 0): return ErrNoEndProof } @@ -179,63 +295,93 @@ func (proof *RangeProof) Verify( return err } - largestkey := end + // [proof] allegedly provides and proves all key-value + // pairs in [smallestProvenKey, largestProvenKey]. + // If [smallestProvenKey] is Nothing, [proof] should + // provide and prove all keys < [largestProvenKey]. + // If [largestProvenKey] is Nothing, [proof] should + // provide and prove all keys > [smallestProvenKey]. + // If both are Nothing, [proof] should prove the entire trie. + smallestProvenKey := maybe.Bind(start, ToKey) + + largestProvenKey := maybe.Bind(end, ToKey) + if len(proof.KeyValues) > 0 { // If [proof] has key-value pairs, we should insert children - // greater than [end] to ancestors of the node containing [end] - // so that we get the expected root ID. - largestkey = proof.KeyValues[len(proof.KeyValues)-1].Key + // greater than [largestProvenKey] to ancestors of the node containing + // [largestProvenKey] so that we get the expected root ID. + largestProvenKey = maybe.Some(ToKey(proof.KeyValues[len(proof.KeyValues)-1].Key)) } // The key-value pairs (allegedly) proven by [proof]. - keyValues := make(map[path][]byte, len(proof.KeyValues)) + keyValues := make(map[Key][]byte, len(proof.KeyValues)) for _, keyValue := range proof.KeyValues { - keyValues[newPath(keyValue.Key)] = keyValue.Value + keyValues[ToKey(keyValue.Key)] = keyValue.Value } - smallestPath := newPath(start) - largestPath := newPath(largestkey) - // Ensure that the start proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { + if err := verifyProofPath(proof.StartProof, smallestProvenKey); err != nil { return err } - if err := verifyAllRangeProofKeyValuesPresent(proof.StartProof, smallestPath, largestPath, keyValues); err != nil { + if err := verifyAllRangeProofKeyValuesPresent( + proof.StartProof, + smallestProvenKey, + largestProvenKey, + keyValues, + ); err != nil { return err } // Ensure that the end proof is valid and contains values that // match the key/values that were sent. - if err := verifyProofPath(proof.EndProof, largestPath); err != nil { + if err := verifyProofPath(proof.EndProof, largestProvenKey); err != nil { return err } - if err := verifyAllRangeProofKeyValuesPresent(proof.EndProof, smallestPath, largestPath, keyValues); err != nil { + if err := verifyAllRangeProofKeyValuesPresent( + proof.EndProof, + smallestProvenKey, + largestProvenKey, + keyValues, + ); err != nil { return err } + // Insert all key-value pairs into the trie. + ops := make([]database.BatchOp, len(proof.KeyValues)) + for i, kv := range proof.KeyValues { + ops[i] = database.BatchOp{ + Key: kv.Key, + Value: kv.Value, + } + } + // Don't need to lock [view] because nobody else has a reference to it. - view, err := getEmptyTrieView(ctx) + view, err := getStandaloneView(ctx, ops, tokenSize) if err != nil { return err } - // Insert all key-value pairs into the trie. - for _, kv := range proof.KeyValues { - if _, err := view.insertIntoTrie(newPath(kv.Key), Some(kv.Value)); err != nil { - return err - } - } - - // For all the nodes along the edges of the proof, insert children < [start] and > [end] + // For all the nodes along the edges of the proof, insert children + // < [smallestProvenKey] and > [largestProvenKey] // into the trie so that we get the expected root ID (if this proof is valid). - // By inserting all children < [start], we prove that there are no keys - // > [start] but less than the first key given. That is, the peer who - // gave us this proof is not omitting nodes. - if err := addPathInfo(view, proof.StartProof, smallestPath, largestPath); err != nil { + // By inserting all children < [smallestProvenKey], we prove that there are no keys + // > [smallestProvenKey] but less than the first key given. + // That is, the peer who gave us this proof is not omitting nodes. + if err := addPathInfo( + view, + proof.StartProof, + smallestProvenKey, + largestProvenKey, + ); err != nil { return err } - if err := addPathInfo(view, proof.EndProof, smallestPath, largestPath); err != nil { + if err := addPathInfo( + view, + proof.EndProof, + smallestProvenKey, + largestProvenKey, + ); err != nil { return err } @@ -249,24 +395,79 @@ func (proof *RangeProof) Verify( return nil } +func (proof *RangeProof) ToProto() *pb.RangeProof { + startProof := make([]*pb.ProofNode, len(proof.StartProof)) + for i, node := range proof.StartProof { + startProof[i] = node.ToProto() + } + + endProof := make([]*pb.ProofNode, len(proof.EndProof)) + for i, node := range proof.EndProof { + endProof[i] = node.ToProto() + } + + keyValues := make([]*pb.KeyValue, len(proof.KeyValues)) + for i, kv := range proof.KeyValues { + keyValues[i] = &pb.KeyValue{ + Key: kv.Key, + Value: kv.Value, + } + } + + return &pb.RangeProof{ + StartProof: startProof, + EndProof: endProof, + KeyValues: keyValues, + } +} + +func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof) error { + if pbProof == nil { + return ErrNilRangeProof + } + + proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) + for i, protoNode := range pbProof.StartProof { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { + return err + } + } + + proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) + for i, protoNode := range pbProof.EndProof { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { + return err + } + } + + proof.KeyValues = make([]KeyValue, len(pbProof.KeyValues)) + for i, kv := range pbProof.KeyValues { + proof.KeyValues[i] = KeyValue{ + Key: kv.Key, + Value: kv.Value, + } + } + + return nil +} + // Verify that all non-intermediate nodes in [proof] which have keys // in [[start], [end]] have the value given for that key in [keysValues]. -func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start, end path, keysValues map[path][]byte) error { +func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], keysValues map[Key][]byte) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodeKey = node.KeyPath - nodePath = nodeKey.deserialize() + node = proof[i] + nodeKey = node.Key ) - // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). - if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { - value, ok := keysValues[nodePath] - if !ok && !node.ValueOrHash.IsNothing() { + // Skip keys that cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] + if !ok && node.ValueOrHash.HasValue() { // We didn't get a key-value pair for this key, but the proof node has a value. return ErrProofNodeHasUnincludedValue } - if ok && !valueOrHashMatches(Some(value), node.ValueOrHash) { + if ok && !valueOrHashMatches(maybe.Some(value), node.ValueOrHash) { // We got a key-value pair for this key, but the value in the proof // node doesn't match the value we got for this key. return ErrProofValueDoesntMatch @@ -276,222 +477,177 @@ func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start, end path, key return nil } +type KeyChange struct { + Key []byte + Value maybe.Maybe[[]byte] +} + +// ChangeProof proves that a set of key-value changes occurred +// between two trie roots, where each key-value pair's key is +// between some lower and upper bound (inclusive). type ChangeProof struct { - // If false, the node that created this doesn't have - // sufficient history to generate a change proof and - // all other fields must be empty. - // Otherwise at least one other field is non-empty. - HadRootsInHistory bool + // Invariant: At least one of [StartProof], [EndProof], or + // [KeyChanges] is non-empty. + // A proof that the smallest key in the requested range does/doesn't // exist in the trie with the requested start root. // Empty if no lower bound on the requested range was given. // Note that this may not be an entire proof -- nodes are omitted if // they are also in [EndProof]. StartProof []ProofNode - // A proof that the largest key in [KeyValues] and [DeletedKeys] - // does/doesn't exist in the trie with the requested start root. - // Empty iff no upper bound on the requested range was given - // and [KeyValues] and [DeletedKeys] are empty. + + // If [KeyChanges] is non-empty, this is a proof of the largest key + // in [KeyChanges]. + // + // If [KeyChanges] is empty and an upper range bound was given, + // this is a proof of the upper range bound. + // + // If [KeyChanges] is empty and no upper range bound was given, + // this is empty. EndProof []ProofNode - // A subset of key-values that were added or had their values modified - // between the requested start root (exclusive) and the requested + + // A subset of key-values that were added, removed, or had their values + // modified between the requested start root (exclusive) and the requested // end root (inclusive). - // Sorted by increasing key. - KeyValues []KeyValue - // A subset of keys that were removed from the trie between the requested - // start root (exclusive) and the requested end root (inclusive). - // Sorted by increasing key. - DeletedKeys [][]byte + // Each key is in the requested range (inclusive). + // The first key-value is the first key-value at/after the range start. + // The key-value pairs are consecutive. That is, if keys k1 and k2 are + // in [KeyChanges] then there is no k3 that was modified between the start and + // end roots such that k1 < k3 < k2. + // This is a subset of the requested key-value range, rather than the entire + // range, because otherwise the proof may be too large. + // Sorted by increasing key and with no duplicate keys. + // + // Example: Suppose that between the start root and the end root, the following + // key-value pairs were added, removed, or modified: + // + // [kv1, kv2, kv3, kv4, kv5] + // where start <= kv1 < ... < kv5 <= end. + // + // The following are possible values of [KeyChanges]: + // + // [] + // [kv1] + // [kv1, kv2] + // [kv1, kv2, kv3] + // [kv1, kv2, kv3, kv4] + // [kv1, kv2, kv3, kv4, kv5] + // + // The following values of [KeyChanges] are always invalid, for example: + // + // [kv2] (Doesn't include kv1, the first key-value at/after the range start) + // [kv1, kv3] (Doesn't include kv2, the key-value between kv1 and kv3) + // [kv1, kv3, kv2] (Not sorted by increasing key) + // [kv1, kv1] (Duplicate key-value pairs) + // [kv0, kv1] (For some kv1 < start) + // [kv1, kv2, kv3, kv4, kv5, kv6] (For some kv6 > end) + KeyChanges []KeyChange } -// Returns nil iff all of the following hold: -// - [start] <= [end]. -// - [proof] is non-empty iff [proof.HadRootsInHistory]. -// - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. -// If [start] is empty, all keys are considered > [start]. -// If [end] is empty, all keys are considered < [end]. -// - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. -// - [proof.StartProof] and [proof.EndProof] are well-formed. -// - When the keys in [proof.KeyValues] are added to [db] and the keys in [proof.DeletedKeys] -// are removed from [db], the root ID of [db] is [expectedEndRootID]. -// -// Assumes [db.lock] isn't held. -func (proof *ChangeProof) Verify( - ctx context.Context, - db *Database, - start []byte, - end []byte, - expectedEndRootID ids.ID, -) error { - if len(end) > 0 && bytes.Compare(start, end) > 0 { - return ErrStartAfterEnd - } - - if !proof.HadRootsInHistory { - // The node we requested the proof from didn't have sufficient - // history to fulfill this request. - if !proof.Empty() { - // cannot have any changes if the root was missing - return ErrDataInMissingRootProof - } - return nil - } - - switch { - case proof.Empty(): - return ErrNoMerkleProof - case len(end) > 0 && len(proof.EndProof) == 0: - // We requested an end proof but didn't get one. - return ErrNoEndProof - case len(start) > 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: - // We requested a start proof but didn't get one. - // Note that we also have to check that [proof.EndProof] is empty - // to handle the case that the start proof is empty because all - // its nodes are also in the end proof, and those nodes are omitted. - return ErrNoStartProof - } - - // Make sure the key-value pairs are sorted and in [start, end]. - if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { - return err +func (proof *ChangeProof) ToProto() *pb.ChangeProof { + startProof := make([]*pb.ProofNode, len(proof.StartProof)) + for i, node := range proof.StartProof { + startProof[i] = node.ToProto() } - // Make sure the deleted keys are sorted and in [start, end]. - deletedKeys := make([]KeyValue, len(proof.DeletedKeys)) - for i, key := range proof.DeletedKeys { - deletedKeys[i] = KeyValue{Key: key, Value: nil} - } - if err := verifyKeyValues(deletedKeys, start, end); err != nil { - return err + endProof := make([]*pb.ProofNode, len(proof.EndProof)) + for i, node := range proof.EndProof { + endProof[i] = node.ToProto() } - smallestPath := newPath(start) - - // Make sure the start proof, if given, is well-formed. - if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { - return err - } - - // Find the greatest key in [proof.KeyValues] and [proof.DeletedKeys]. - // Note that [proof.EndProof] is a proof for this key. - // [largestPath] is also used when we add children of proof nodes to [trie] below. - largestPath := newPath(proof.getLargestKey(end)) - - // Make sure the end proof, if given, is well-formed. - if err := verifyProofPath(proof.EndProof, largestPath); err != nil { - return err - } - - // gather all key/values in the proof - keyValues := make(map[path]Maybe[[]byte], len(proof.KeyValues)+len(proof.DeletedKeys)) - for _, keyValue := range proof.KeyValues { - keyValues[newPath(keyValue.Key)] = Some(keyValue.Value) - } - for _, key := range proof.DeletedKeys { - keyValues[newPath(key)] = Nothing[[]byte]() - } - - // want to prevent commit writes to DB, but not prevent db reads - db.commitLock.RLock() - defer db.commitLock.RUnlock() - - if err := verifyAllChangeProofKeyValuesPresent( - ctx, - db, - proof.StartProof, - smallestPath, - largestPath, - keyValues, - ); err != nil { - return err + keyChanges := make([]*pb.KeyChange, len(proof.KeyChanges)) + for i, kv := range proof.KeyChanges { + keyChanges[i] = &pb.KeyChange{ + Key: kv.Key, + Value: &pb.MaybeBytes{ + Value: kv.Value.Value(), + IsNothing: kv.Value.IsNothing(), + }, + } } - if err := verifyAllChangeProofKeyValuesPresent( - ctx, - db, - proof.EndProof, - smallestPath, - largestPath, - keyValues, - ); err != nil { - return err + return &pb.ChangeProof{ + StartProof: startProof, + EndProof: endProof, + KeyChanges: keyChanges, } +} - // Don't need to lock [view] because nobody else has a reference to it. - view, err := db.newUntrackedView(len(proof.KeyValues)) - if err != nil { - return err +func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof) error { + if pbProof == nil { + return ErrNilChangeProof } - // Insert the key-value pairs into the trie. - for _, kv := range proof.KeyValues { - if _, err := view.insertIntoTrie(newPath(kv.Key), Some(kv.Value)); err != nil { + proof.StartProof = make([]ProofNode, len(pbProof.StartProof)) + for i, protoNode := range pbProof.StartProof { + if err := proof.StartProof[i].UnmarshalProto(protoNode); err != nil { return err } } - // Remove the deleted keys from the trie. - for _, key := range proof.DeletedKeys { - if err := view.removeFromTrie(newPath(key)); err != nil { + proof.EndProof = make([]ProofNode, len(pbProof.EndProof)) + for i, protoNode := range pbProof.EndProof { + if err := proof.EndProof[i].UnmarshalProto(protoNode); err != nil { return err } } - // For all the nodes along the edges of the proof, insert children < [start] and > [largestKey] - // into the trie so that we get the expected root ID (if this proof is valid). - if err := addPathInfo(view, proof.StartProof, smallestPath, largestPath); err != nil { - return err - } - if err := addPathInfo(view, proof.EndProof, smallestPath, largestPath); err != nil { - return err - } + proof.KeyChanges = make([]KeyChange, len(pbProof.KeyChanges)) + for i, kv := range pbProof.KeyChanges { + if kv.Value == nil { + return ErrNilMaybeBytes + } - // Make sure we get the expected root. - calculatedRoot, err := view.getMerkleRoot(ctx) - if err != nil { - return err - } - if expectedEndRootID != calculatedRoot { - return fmt.Errorf("%w:[%s], expected:[%s]", ErrInvalidProof, calculatedRoot, expectedEndRootID) + if kv.Value.IsNothing && len(kv.Value.Value) != 0 { + return ErrInvalidMaybe + } + + value := maybe.Nothing[[]byte]() + if !kv.Value.IsNothing { + value = maybe.Some(kv.Value.Value) + } + proof.KeyChanges[i] = KeyChange{ + Key: kv.Key, + Value: value, + } } return nil } // Verifies that all values present in the [proof]: -// - Are nothing when deleted, not in the db, or the node has an odd path length. -// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +// - Are nothing when deleted, not in the db, or the node has key partial byte length +// - if the node's key is within the key range, that has a value that matches the value passed in the change list or in the db func verifyAllChangeProofKeyValuesPresent( ctx context.Context, - db *Database, + db MerkleDB, proof []ProofNode, - start path, - end path, - keysValues map[path]Maybe[[]byte], + start maybe.Maybe[Key], + end maybe.Maybe[Key], + keysValues map[Key]maybe.Maybe[[]byte], ) error { for i := 0; i < len(proof); i++ { var ( - node = proof[i] - nodeKey = node.KeyPath - nodePath = nodeKey.deserialize() + node = proof[i] + nodeKey = node.Key ) // Check the value of any node with a key that is within the range. - // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). - if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { - value, ok := keysValues[nodePath] + // Skip keys that cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasPartialByte() && (start.IsNothing() || !nodeKey.Less(start.Value())) && (end.IsNothing() || !nodeKey.Greater(end.Value())) { + value, ok := keysValues[nodeKey] if !ok { // This value isn't in the list of key-value pairs we got. - dbValue, err := db.GetValue(ctx, nodeKey.Value) + dbValue, err := db.GetValue(ctx, nodeKey.Bytes()) if err != nil { - if err != database.ErrNotFound { + if !errors.Is(err, database.ErrNotFound) { return err } // This key isn't in the database so proof node should have Nothing. - value = Nothing[[]byte]() + value = maybe.Nothing[[]byte]() } else { // This key is in the database so proof node should have matching value. - value = Some(dbValue) + value = maybe.Some(dbValue) } } if !valueOrHashMatches(value, node.ValueOrHash) { @@ -503,41 +659,58 @@ func verifyAllChangeProofKeyValuesPresent( } func (proof *ChangeProof) Empty() bool { - return len(proof.KeyValues) == 0 && len(proof.DeletedKeys) == 0 && + return len(proof.KeyChanges) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0 } -// Returns the largest key in [proof.KeyValues] and [proof.DeletedKeys]. -// If there are no keys in the proof, returns [end]. -func (proof *ChangeProof) getLargestKey(end []byte) []byte { - largestKey := end - if len(proof.KeyValues) > 0 { - largestKey = proof.KeyValues[len(proof.KeyValues)-1].Key +// ChangeOrRangeProof has exactly one of [ChangeProof] or [RangeProof] is non-nil. +type ChangeOrRangeProof struct { + ChangeProof *ChangeProof + RangeProof *RangeProof +} + +// Returns nil iff both hold: +// 1. [kvs] is sorted by key in increasing order. +// 2. All keys in [kvs] are in the range [start, end]. +// If [start] is Nothing, there is no lower bound on acceptable keys. +// If [end] is Nothing, there is no upper bound on acceptable keys. +// If [kvs] is empty, returns nil. +func verifyKeyChanges(kvs []KeyChange, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte]) error { + if len(kvs) == 0 { + return nil } - if len(proof.DeletedKeys) > 0 { - lastDeleted := proof.DeletedKeys[len(proof.DeletedKeys)-1] - if bytes.Compare(lastDeleted, largestKey) > 0 || len(proof.KeyValues) == 0 { - largestKey = lastDeleted + + // ensure that the keys are in increasing order + for i := 0; i < len(kvs)-1; i++ { + if bytes.Compare(kvs[i].Key, kvs[i+1].Key) >= 0 { + return ErrNonIncreasingValues } } - return largestKey + + // ensure that the keys are within the range [start, end] + if (start.HasValue() && bytes.Compare(kvs[0].Key, start.Value()) < 0) || + (end.HasValue() && bytes.Compare(kvs[len(kvs)-1].Key, end.Value()) > 0) { + return ErrStateFromOutsideOfRange + } + + return nil } // Returns nil iff both hold: // 1. [kvs] is sorted by key in increasing order. // 2. All keys in [kvs] are in the range [start, end]. // If [start] is nil, there is no lower bound on acceptable keys. -// If [end] is nil, there is no upper bound on acceptable keys. +// If [end] is nothing, there is no upper bound on acceptable keys. // If [kvs] is empty, returns nil. -func verifyKeyValues(kvs []KeyValue, start, end []byte) error { - hasLowerBound := len(start) > 0 - hasUpperBound := len(end) > 0 +func verifyKeyValues(kvs []KeyValue, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte]) error { + hasLowerBound := start.HasValue() + hasUpperBound := end.HasValue() for i := 0; i < len(kvs); i++ { if i < len(kvs)-1 && bytes.Compare(kvs[i].Key, kvs[i+1].Key) >= 0 { return ErrNonIncreasingValues } - if (hasLowerBound && bytes.Compare(kvs[i].Key, start) < 0) || - (hasUpperBound && bytes.Compare(kvs[i].Key, end) > 0) { + if (hasLowerBound && bytes.Compare(kvs[i].Key, start.Value()) < 0) || + (hasUpperBound && bytes.Compare(kvs[i].Key, end.Value()) > 0) { return ErrStateFromOutsideOfRange } } @@ -545,31 +718,35 @@ func verifyKeyValues(kvs []KeyValue, start, end []byte) error { } // Returns nil iff all the following hold: -// - Any node with an odd nibble length, should not have a value associated with it -// since all keys with values are written in bytes, so have even nibble length. +// - Any node with a partial byte length, should not have a value associated with it +// since all keys with values are written in complete bytes([]byte). // - Each key in [proof] is a strict prefix of the following key. // - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. -// - If the last element in [proof] is [keyBytes], this is an inclusion proof. +// - If the last element in [proof] is [Key], this is an inclusion proof. // Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. -func verifyProofPath(proof []ProofNode, keyPath path) error { - provenKey := keyPath.Serialize() +func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { + if len(proof) == 0 { + return nil + } // loop over all but the last node since it will not have the prefix in exclusion proofs for i := 0; i < len(proof)-1; i++ { - nodeKey := proof[i].KeyPath + currentProofNode := proof[i] + nodeKey := currentProofNode.Key - // intermediate nodes (nodes with odd nibble length) should never have a value associated with them - if nodeKey.hasOddLength() && !proof[i].ValueOrHash.IsNothing() { - return ErrOddLengthWithValue + // Because the interface only support []byte keys, + // a key with a partial byte should store a value + if nodeKey.hasPartialByte() && proof[i].ValueOrHash.HasValue() { + return ErrPartialByteLengthWithValue } // each node should have a key that has the proven key as a prefix - if !provenKey.HasStrictPrefix(nodeKey) { + if key.HasValue() && !key.Value().HasStrictPrefix(nodeKey) { return ErrProofNodeNotForKey } - // each node should have a key that is a prefix of the next node's key - nextKey := proof[i+1].KeyPath + // each node should have a key that has a matching TokenConfig and is a prefix of the next node's key + nextKey := proof[i+1].Key if !nextKey.HasStrictPrefix(nodeKey) { return ErrNonIncreasingProofNodes } @@ -578,8 +755,8 @@ func verifyProofPath(proof []ProofNode, keyPath path) error { // check the last node for a value since the above loop doesn't check the last node if len(proof) > 0 { lastNode := proof[len(proof)-1] - if lastNode.KeyPath.hasOddLength() && !lastNode.ValueOrHash.IsNothing() { - return ErrOddLengthWithValue + if lastNode.Key.hasPartialByte() && !lastNode.ValueOrHash.IsNothing() { + return ErrPartialByteLengthWithValue } } @@ -588,7 +765,7 @@ func verifyProofPath(proof []ProofNode, keyPath path) error { // Returns true if [value] and [valueDigest] match. // [valueOrHash] should be the [ValueOrHash] field of a [ProofNode]. -func valueOrHashMatches(value Maybe[[]byte], valueOrHash Maybe[[]byte]) bool { +func valueOrHashMatches(value maybe.Maybe[[]byte], valueOrHash maybe.Maybe[[]byte]) bool { var ( valueIsNothing = value.IsNothing() digestIsNothing = valueOrHash.IsNothing() @@ -601,42 +778,42 @@ func valueOrHashMatches(value Maybe[[]byte], valueOrHash Maybe[[]byte]) bool { case valueIsNothing: // Both are nothing -- match. return true - case len(value.value) < HashLength: - return bytes.Equal(value.value, valueOrHash.value) + case len(value.Value()) < HashLength: + return bytes.Equal(value.Value(), valueOrHash.Value()) default: - valueHash := hashing.ComputeHash256(value.value) - return bytes.Equal(valueHash, valueOrHash.value) + valueHash := hashing.ComputeHash256(value.Value()) + return bytes.Equal(valueHash, valueOrHash.Value()) } } // Adds each key/value pair in [proofPath] to [t]. -// For each proof node, adds the children that are < [start] or > [end]. -// If [start] is empty, no children are < [start]. -// If [end] is empty, no children are > [end]. -// Assumes [t.lock] is held. +// For each proof node, adds the children that are +// < [insertChildrenLessThan] or > [insertChildrenGreaterThan]. +// If [insertChildrenLessThan] is Nothing, no children are < [insertChildrenLessThan]. +// If [insertChildrenGreaterThan] is Nothing, no children are > [insertChildrenGreaterThan]. +// Assumes [v.lock] is held. func addPathInfo( - t *trieView, + v *view, proofPath []ProofNode, - startPath path, - endPath path, + insertChildrenLessThan maybe.Maybe[Key], + insertChildrenGreaterThan maybe.Maybe[Key], ) error { var ( - hasLowerBound = len(startPath) > 0 - hasUpperBound = len(endPath) > 0 + shouldInsertLeftChildren = insertChildrenLessThan.HasValue() + shouldInsertRightChildren = insertChildrenGreaterThan.HasValue() ) for i := len(proofPath) - 1; i >= 0; i-- { proofNode := proofPath[i] - keyPath := proofNode.KeyPath.deserialize() + key := proofNode.Key - if len(keyPath)&1 == 1 && !proofNode.ValueOrHash.IsNothing() { - // a value cannot have an odd number of nibbles in its key - return ErrOddLengthWithValue + if key.hasPartialByte() && !proofNode.ValueOrHash.IsNothing() { + return ErrPartialByteLengthWithValue } // load the node associated with the key or create a new one // pass nothing because we are going to overwrite the value digest below - n, err := t.insertIntoTrie(keyPath, Nothing[[]byte]()) + n, err := v.insert(key, maybe.Nothing[[]byte]()) if err != nil { return err } @@ -644,22 +821,30 @@ func addPathInfo( // node because we may not know the pre-image of the valueDigest. n.valueDigest = proofNode.ValueOrHash - if !hasLowerBound && !hasUpperBound { + if !shouldInsertLeftChildren && !shouldInsertRightChildren { // No children of proof nodes are outside the range. // No need to add any children to [n]. continue } - // Add [proofNode]'s children which are outside the range [start, end]. - compressedPath := EmptyPath + // Add [proofNode]'s children which are outside the range + // [insertChildrenLessThan, insertChildrenGreaterThan]. + compressedKey := Key{} for index, childID := range proofNode.Children { if existingChild, ok := n.children[index]; ok { - compressedPath = existingChild.compressedPath + compressedKey = existingChild.compressedKey } - childPath := keyPath.Append(index) + compressedPath - if (hasLowerBound && childPath.Compare(startPath) < 0) || - (hasUpperBound && childPath.Compare(endPath) > 0) { - n.addChildWithoutNode(index, compressedPath, childID) + childKey := key.Extend(ToToken(index, v.tokenSize), compressedKey) + if (shouldInsertLeftChildren && childKey.Less(insertChildrenLessThan.Value())) || + (shouldInsertRightChildren && childKey.Greater(insertChildrenGreaterThan.Value())) { + // We didn't set the other values on the child entry, but it doesn't matter. + // We only need the IDs to be correct so that the calculated hash is correct. + n.setChildEntry( + index, + &child{ + id: childID, + compressedKey: compressedKey, + }) } } } @@ -667,17 +852,18 @@ func addPathInfo( return nil } -func getEmptyTrieView(ctx context.Context) (*trieView, error) { - tracer, err := trace.New(trace.Config{Enabled: false}) - if err != nil { - return nil, err - } +// getStandaloneView returns a new view that has nothing in it besides the changes due to [ops] +func getStandaloneView(ctx context.Context, ops []database.BatchOp, size int) (*view, error) { db, err := newDatabase( ctx, memdb.New(), Config{ - Tracer: tracer, - NodeCacheSize: verificationCacheSize, + BranchFactor: tokenSizeToBranchFactor[size], + Tracer: trace.Noop, + ValueNodeCacheSize: verificationCacheSize, + IntermediateNodeCacheSize: verificationCacheSize, + IntermediateWriteBufferSize: verificationCacheSize, + IntermediateWriteBatchSize: verificationCacheSize, }, &mockMetrics{}, ) @@ -685,5 +871,5 @@ func getEmptyTrieView(ctx context.Context) (*trieView, error) { return nil, err } - return db.newUntrackedView(defaultPreallocationSize) + return newView(db, db, ViewChanges{BatchOps: ops, ConsumeBytes: true}) } diff --git a/avalanchego/x/merkledb/proof_test.go b/avalanchego/x/merkledb/proof_test.go index c4d1eb86..3bc21962 100644 --- a/avalanchego/x/merkledb/proof_test.go +++ b/avalanchego/x/merkledb/proof_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -6,131 +6,44 @@ package merkledb import ( "bytes" "context" - "io" + "math/rand" "testing" + "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" -) + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" -func getBasicDB() (*Database, error) { - return newDatabase( - context.Background(), - memdb.New(), - Config{ - Tracer: newNoopTracer(), - HistoryLength: 1000, - NodeCacheSize: 1000, - }, - &mockMetrics{}, - ) -} + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) -func writeBasicBatch(t *testing.T, db *Database) { - batch := db.NewBatch() - require.NoError(t, batch.Put([]byte{0}, []byte{0})) - require.NoError(t, batch.Put([]byte{1}, []byte{1})) - require.NoError(t, batch.Put([]byte{2}, []byte{2})) - require.NoError(t, batch.Put([]byte{3}, []byte{3})) - require.NoError(t, batch.Put([]byte{4}, []byte{4})) - require.NoError(t, batch.Write()) +func Test_Proof_Empty(t *testing.T) { + proof := &Proof{} + err := proof.Verify(context.Background(), ids.Empty, 4) + require.ErrorIs(t, err, ErrEmptyProof) } -func Test_Proof_Marshal(t *testing.T) { +func Test_Proof_Simple(t *testing.T) { require := require.New(t) - dbTrie, err := getBasicDB() - require.NoError(err) - require.NotNil(dbTrie) - writeBasicBatch(t, dbTrie) - proof, err := dbTrie.GetProof(context.Background(), []byte{1}) + db, err := getBasicDB() require.NoError(err) - require.NotNil(proof) - proofBytes, err := Codec.EncodeProof(Version, proof) - require.NoError(err) + ctx := context.Background() + require.NoError(db.PutContext(ctx, []byte{}, []byte{1})) + require.NoError(db.PutContext(ctx, []byte{0}, []byte{2})) - parsedProof := &Proof{} - _, err = Codec.DecodeProof(proofBytes, parsedProof) + expectedRoot, err := db.GetMerkleRoot(ctx) require.NoError(err) - verifyPath(t, proof.Path, parsedProof.Path) - require.Equal([]byte{1}, proof.Value.value) -} - -func Test_Proof_Empty(t *testing.T) { - proof := &Proof{} - err := proof.Verify(context.Background(), ids.Empty) - require.ErrorIs(t, err, ErrNoProof) -} - -func Test_Proof_MissingValue(t *testing.T) { - trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) - - require.NoError(t, trie.Insert(context.Background(), []byte{1}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 2}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 2, 4}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 3}, []byte{0})) - - // get a proof for a value not in the db - proof, err := trie.GetProof(context.Background(), []byte{1, 2, 3}) - require.NoError(t, err) - require.NotNil(t, proof) - - require.True(t, proof.Value.IsNothing()) - - proofBytes, err := Codec.EncodeProof(Version, proof) - require.NoError(t, err) - - parsedProof := &Proof{} - _, err = Codec.DecodeProof(proofBytes, parsedProof) - require.NoError(t, err) - - verifyPath(t, proof.Path, parsedProof.Path) -} - -func Test_Proof_Marshal_Errors(t *testing.T) { - trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) - - writeBasicBatch(t, trie) - - proof, err := trie.GetProof(context.Background(), []byte{1}) - require.NoError(t, err) - require.NotNil(t, proof) - - proofBytes, err := Codec.EncodeProof(Version, proof) - require.NoError(t, err) - - for i := 1; i < len(proofBytes); i++ { - broken := proofBytes[:i] - parsed := &Proof{} - _, err = Codec.DecodeProof(broken, parsed) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) - } + proof, err := db.GetProof(ctx, []byte{}) + require.NoError(err) - // add a child at an invalid index - proof.Path[0].Children[255] = ids.Empty - _, err = Codec.EncodeProof(Version, proof) - require.ErrorIs(t, err, errChildIndexTooLarge) -} - -func verifyPath(t *testing.T, path1, path2 []ProofNode) { - require.Equal(t, len(path1), len(path2)) - for i := range path1 { - require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) - require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) - require.True(t, bytes.Equal(path1[i].ValueOrHash.value, path2[i].ValueOrHash.value)) - for childIndex := range path1[i].Children { - require.Equal(t, path1[i].Children[childIndex], path2[i].Children[childIndex]) - } - } + require.NoError(proof.Verify(ctx, expectedRoot, 4)) } func Test_Proof_Verify_Bad_Data(t *testing.T) { @@ -143,34 +56,41 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { tests := []test{ { name: "happyPath", - malform: func(proof *Proof) {}, + malform: func(*Proof) {}, expectedErr: nil, }, + { + name: "empty", + malform: func(proof *Proof) { + proof.Path = nil + }, + expectedErr: ErrEmptyProof, + }, { name: "odd length key path with value", malform: func(proof *Proof) { - proof.Path[1].ValueOrHash = Some([]byte{1, 2}) + proof.Path[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, - expectedErr: ErrOddLengthWithValue, + expectedErr: ErrPartialByteLengthWithValue, }, { name: "last proof node has missing value", malform: func(proof *Proof) { - proof.Path[len(proof.Path)-1].ValueOrHash = Nothing[[]byte]() + proof.Path[len(proof.Path)-1].ValueOrHash = maybe.Nothing[[]byte]() }, expectedErr: ErrProofValueDoesntMatch, }, { name: "missing value on proof", malform: func(proof *Proof) { - proof.Value = Nothing[[]byte]() + proof.Value = maybe.Nothing[[]byte]() }, expectedErr: ErrProofValueDoesntMatch, }, { name: "mismatched value on proof", malform: func(proof *Proof) { - proof.Value = Some([]byte{10}) + proof.Value = maybe.Some([]byte{10}) }, expectedErr: ErrProofValueDoesntMatch, }, @@ -186,64 +106,71 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) proof, err := db.GetProof(context.Background(), []byte{2}) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + err = proof.Verify(context.Background(), db.getMerkleRoot(), 4) + require.ErrorIs(err, tt.expectedErr) }) } } func Test_Proof_ValueOrHashMatches(t *testing.T) { - require.True(t, valueOrHashMatches(Some([]byte{0}), Some([]byte{0}))) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{0})))) - require.True(t, valueOrHashMatches(Nothing[[]byte](), Nothing[[]byte]())) - - require.False(t, valueOrHashMatches(Some([]byte{0}), Nothing[[]byte]())) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some([]byte{0}))) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{1})))) - require.False(t, valueOrHashMatches(Some(hashing.ComputeHash256([]byte{0})), Nothing[[]byte]())) + require := require.New(t) + + require.True(valueOrHashMatches(maybe.Some([]byte{0}), maybe.Some([]byte{0}))) + require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{0})))) + require.True(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Nothing[[]byte]())) + + require.False(valueOrHashMatches(maybe.Some([]byte{0}), maybe.Nothing[[]byte]())) + require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some([]byte{0}))) + require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{1})))) + require.False(valueOrHashMatches(maybe.Some(hashing.ComputeHash256([]byte{0})), maybe.Nothing[[]byte]())) } func Test_RangeProof_Extra_Value(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) val, err := db.Get([]byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val) + require.NoError(err) + require.Equal([]byte{2}, val) - proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{5, 5}, 10) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), 10) + require.NoError(err) + require.NotNil(proof) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), - []byte{1}, - []byte{5, 5}, - db.root.id, - ) - require.NoError(t, err) + maybe.Some([]byte{1}), + maybe.Some([]byte{5, 5}), + db.rootID, + db.tokenSize, + )) proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) err = proof.Verify( context.Background(), - []byte{1}, - []byte{5, 5}, - db.root.id, + maybe.Some([]byte{1}), + maybe.Some([]byte{5, 5}), + db.rootID, + db.tokenSize, ) - require.ErrorIs(t, err, ErrInvalidProof) + require.ErrorIs(err, ErrInvalidProof) } func Test_RangeProof_Verify_Bad_Data(t *testing.T) { @@ -256,27 +183,36 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { tests := []test{ { name: "happyPath", - malform: func(proof *RangeProof) {}, + malform: func(*RangeProof) {}, expectedErr: nil, }, + { + name: "empty", + malform: func(proof *RangeProof) { + proof.KeyValues = nil + proof.StartProof = nil + proof.EndProof = nil + }, + expectedErr: ErrEmptyProof, + }, { name: "StartProof: last proof node has missing value", malform: func(proof *RangeProof) { - proof.StartProof[len(proof.StartProof)-1].ValueOrHash = Nothing[[]byte]() + proof.StartProof[len(proof.StartProof)-1].ValueOrHash = maybe.Nothing[[]byte]() }, expectedErr: ErrProofValueDoesntMatch, }, { name: "EndProof: odd length key path with value", malform: func(proof *RangeProof) { - proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, - expectedErr: ErrOddLengthWithValue, + expectedErr: ErrPartialByteLengthWithValue, }, { name: "EndProof: last proof node has missing value", malform: func(proof *RangeProof) { - proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = maybe.Nothing[[]byte]() }, expectedErr: ErrProofValueDoesntMatch, }, @@ -291,84 +227,90 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) - proof, err := db.GetRangeProof(context.Background(), []byte{2}, []byte{3, 0}, 50) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), 50) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) - err = proof.Verify(context.Background(), []byte{2}, []byte{3, 0}, db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize) + require.ErrorIs(err, tt.expectedErr) }) } } func Test_RangeProof_MaxLength(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) + require.NotNil(dbTrie) + trie, err := dbTrie.NewView(context.Background(), ViewChanges{}) + require.NoError(err) - _, err = trie.GetRangeProof(context.Background(), nil, nil, -1) - require.ErrorIs(t, err, ErrInvalidMaxLength) + _, err = trie.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) + require.ErrorIs(err, ErrInvalidMaxLength) - _, err = trie.GetRangeProof(context.Background(), nil, nil, 0) - require.ErrorIs(t, err, ErrInvalidMaxLength) + _, err = trie.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 0) + require.ErrorIs(err, ErrInvalidMaxLength) } func Test_Proof(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) - - err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) - require.NoError(t, err) + require.NoError(err) + require.NotNil(dbTrie) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value")}, + {Key: []byte("key0"), Value: []byte("value0")}, + {Key: []byte("key1"), Value: []byte("value1")}, + {Key: []byte("key2"), Value: []byte("value2")}, + {Key: []byte("key3"), Value: []byte("value3")}, + {Key: []byte("key4"), Value: []byte("value4")}, + }, + }, + ) + require.NoError(err) _, err = trie.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := trie.GetProof(context.Background(), []byte("key1")) - require.NoError(t, err) - require.NotNil(t, proof) - - require.Len(t, proof.Path, 3) + require.NoError(err) + require.NotNil(proof) - require.Equal(t, newPath([]byte("key1")).Serialize(), proof.Path[2].KeyPath) - require.Equal(t, Some([]byte("value1")), proof.Path[2].ValueOrHash) + require.Len(proof.Path, 3) - require.Equal(t, newPath([]byte{}).Serialize(), proof.Path[0].KeyPath) - require.True(t, proof.Path[0].ValueOrHash.IsNothing()) + require.Equal(ToKey([]byte("key")), proof.Path[0].Key) + require.Equal(maybe.Some([]byte("value")), proof.Path[0].ValueOrHash) + require.Equal(ToKey([]byte("key0")).Take(28), proof.Path[1].Key) + require.True(proof.Path[1].ValueOrHash.IsNothing()) // intermediate node + require.Equal(ToKey([]byte("key1")), proof.Path[2].Key) + require.Equal(maybe.Some([]byte("value1")), proof.Path[2].ValueOrHash) expectedRootID, err := trie.GetMerkleRoot(context.Background()) - require.NoError(t, err) - err = proof.Verify(context.Background(), expectedRootID) - require.NoError(t, err) - - proof.Path[0].ValueOrHash = Some([]byte("value2")) + require.NoError(err) + require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize)) - err = proof.Verify(context.Background(), expectedRootID) - require.ErrorIs(t, err, ErrInvalidProof) + proof.Path[0].Key = ToKey([]byte("key1")) + err = proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize) + require.ErrorIs(err, ErrProofNodeNotForKey) } func Test_RangeProof_Syntactic_Verify(t *testing.T) { type test struct { name string - start []byte - end []byte + start maybe.Maybe[[]byte] + end maybe.Maybe[[]byte] proof *RangeProof expectedErr error } @@ -376,126 +318,140 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { tests := []test{ { name: "start > end", - start: []byte{1}, - end: []byte{0}, + start: maybe.Some([]byte{1}), + end: maybe.Some([]byte{0}), proof: &RangeProof{}, expectedErr: ErrStartAfterEnd, }, { - name: "empty", // Also tests start can be > end if end is nil - start: []byte{1}, - end: nil, + name: "empty", + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{}, - expectedErr: ErrNoMerkleProof, + expectedErr: ErrEmptyProof, }, { - name: "should just be root", - start: nil, - end: nil, + name: "unexpected end proof", + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{ - EndProof: []ProofNode{{}, {}}, + StartProof: []ProofNode{{}}, + EndProof: []ProofNode{{}}, }, - expectedErr: ErrShouldJustBeRoot, + expectedErr: ErrUnexpectedEndProof, }, { - name: "no end proof", - start: []byte{1}, - end: []byte{1}, + name: "no end proof; has end bound", + start: maybe.Some([]byte{1}), + end: maybe.Some([]byte{1}), proof: &RangeProof{ - KeyValues: []KeyValue{{Key: []byte{1}, Value: []byte{1}}}, + StartProof: []ProofNode{{}}, + }, + expectedErr: ErrNoEndProof, + }, + { + name: "no end proof; has key-values", + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), + proof: &RangeProof{ + KeyValues: []KeyValue{{}}, }, expectedErr: ErrNoEndProof, }, { name: "unsorted key values", - start: []byte{1}, - end: nil, + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{1}, Value: []byte{1}}, {Key: []byte{0}, Value: []byte{0}}, }, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrNonIncreasingValues, }, { name: "key lower than start", - start: []byte{1}, - end: nil, + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{0}, Value: []byte{0}}, }, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, { name: "key greater than end", - start: []byte{1}, - end: []byte{1}, + start: maybe.Some([]byte{1}), + end: maybe.Some([]byte{1}), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{2}, Value: []byte{0}}, }, - EndProof: []ProofNode{{}}, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrStateFromOutsideOfRange, }, { name: "start proof nodes in wrong order", - start: []byte{1, 2}, - end: nil, + start: maybe.Some([]byte{1, 2}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{1, 2}, Value: []byte{1}}, }, StartProof: []ProofNode{ { - KeyPath: newPath([]byte{2}).Serialize(), + Key: ToKey([]byte{2}), }, { - KeyPath: newPath([]byte{1}).Serialize(), + Key: ToKey([]byte{1}), }, }, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, { name: "start proof has node for wrong key", - start: []byte{1, 2}, - end: nil, + start: maybe.Some([]byte{1, 2}), + end: maybe.Nothing[[]byte](), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{1, 2}, Value: []byte{1}}, }, StartProof: []ProofNode{ { - KeyPath: newPath([]byte{1}).Serialize(), + Key: ToKey([]byte{1}), }, { - KeyPath: newPath([]byte{1, 2, 3}).Serialize(), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - KeyPath: newPath([]byte{1, 2, 3, 4}).Serialize(), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, + EndProof: []ProofNode{{Key: Key{}}}, }, expectedErr: ErrProofNodeNotForKey, }, { name: "end proof nodes in wrong order", - start: nil, - end: []byte{1, 2}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1, 2}), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{1, 2}, Value: []byte{1}}, }, EndProof: []ProofNode{ { - KeyPath: newPath([]byte{2}).Serialize(), + Key: ToKey([]byte{2}), }, { - KeyPath: newPath([]byte{1}).Serialize(), + Key: ToKey([]byte{1}), }, }, }, @@ -503,21 +459,21 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { }, { name: "end proof has node for wrong key", - start: nil, - end: []byte{1, 2}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1, 2}), proof: &RangeProof{ KeyValues: []KeyValue{ {Key: []byte{1, 2}, Value: []byte{1}}, }, EndProof: []ProofNode{ { - KeyPath: newPath([]byte{1}).Serialize(), + Key: ToKey([]byte{1}), }, { - KeyPath: newPath([]byte{1, 2, 3}).Serialize(), // Not a prefix of [1, 2] + Key: ToKey([]byte{1, 2, 3}), // Not a prefix of [1, 2] }, { - KeyPath: newPath([]byte{1, 2, 3, 4}).Serialize(), + Key: ToKey([]byte{1, 2, 3, 4}), }, }, }, @@ -527,10 +483,8 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty) - require.ErrorIs(err, tt.expectedErr) + err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4) + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -542,7 +496,7 @@ func Test_RangeProof(t *testing.T) { require.NoError(err) writeBasicBatch(t, db) - proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{3, 5}, 10) + proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), 10) require.NoError(err) require.NotNil(proof) require.Len(proof.KeyValues, 3) @@ -555,588 +509,313 @@ func Test_RangeProof(t *testing.T) { require.Equal([]byte{2}, proof.KeyValues[1].Value) require.Equal([]byte{3}, proof.KeyValues[2].Value) - require.Equal([]byte{}, proof.EndProof[0].KeyPath.Value) - require.Equal([]byte{0}, proof.EndProof[1].KeyPath.Value) - require.Equal([]byte{3}, proof.EndProof[2].KeyPath.Value) + require.Len(proof.EndProof, 2) + require.Equal([]byte{0}, proof.EndProof[0].Key.Bytes()) + require.Len(proof.EndProof[0].Children, 5) // 0,1,2,3,4 + require.Equal([]byte{3}, proof.EndProof[1].Key.Bytes()) // only a single node here since others are duplicates in endproof - require.Equal([]byte{1}, proof.StartProof[0].KeyPath.Value) + require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), - []byte{1}, - []byte{3, 5}, - db.root.id, - ) - require.NoError(err) + maybe.Some([]byte{1}), + maybe.Some([]byte{3, 5}), + db.rootID, + db.tokenSize, + )) } func Test_RangeProof_BadBounds(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) + + require.NoError(db.Put(nil, nil)) // non-nil start/end - proof, err := db.GetRangeProof(context.Background(), []byte{4}, []byte{3}, 50) - require.ErrorIs(t, err, ErrStartAfterEnd) - require.Nil(t, proof) + proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{4}), maybe.Some([]byte{3}), 50) + require.ErrorIs(err, ErrStartAfterEnd) + require.Nil(proof) } func Test_RangeProof_NilStart(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Put([]byte("key4"), []byte("value4"))) + require.NoError(batch.Write()) val, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.NoError(err) + require.Equal([]byte("value1"), val) - proof, err := db.GetRangeProof(context.Background(), nil, []byte("key35"), 2) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), 2) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 2) + require.Len(proof.KeyValues, 2) - require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) - require.Equal(t, []byte("key2"), proof.KeyValues[1].Key) + require.Equal([]byte("key1"), proof.KeyValues[0].Key) + require.Equal([]byte("key2"), proof.KeyValues[1].Key) - require.Equal(t, []byte("value1"), proof.KeyValues[0].Value) - require.Equal(t, []byte("value2"), proof.KeyValues[1].Value) + require.Equal([]byte("value1"), proof.KeyValues[0].Value) + require.Equal([]byte("value2"), proof.KeyValues[1].Value) - require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(t, SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) - require.Equal(t, newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key, db.tokenSize) + require.Equal(ToKey([]byte("key2")).Take(28), proof.EndProof[0].Key) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), - nil, - []byte("key35"), - db.root.id, - ) - require.NoError(t, err) + maybe.Nothing[[]byte](), + maybe.Some([]byte("key35")), + db.rootID, + db.tokenSize, + )) } func Test_RangeProof_NilEnd(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) + writeBasicBatch(t, db) - require.NoError(t, err) + require.NoError(err) - proof, err := db.GetRangeProof(context.Background(), []byte{1}, nil, 2) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetRangeProof( // Should have keys [1], [2] + context.Background(), + maybe.Some([]byte{1}), + maybe.Nothing[[]byte](), + 2, + ) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 2) + require.Len(proof.KeyValues, 2) - require.Equal(t, []byte{1}, proof.KeyValues[0].Key) - require.Equal(t, []byte{2}, proof.KeyValues[1].Key) + require.Equal([]byte{1}, proof.KeyValues[0].Key) + require.Equal([]byte{2}, proof.KeyValues[1].Key) - require.Equal(t, []byte{1}, proof.KeyValues[0].Value) - require.Equal(t, []byte{2}, proof.KeyValues[1].Value) + require.Equal([]byte{1}, proof.KeyValues[0].Value) + require.Equal([]byte{2}, proof.KeyValues[1].Value) - require.Equal(t, []byte{1}, proof.StartProof[0].KeyPath.Value) + require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) - require.Equal(t, []byte{}, proof.EndProof[0].KeyPath.Value) - require.Equal(t, []byte{0}, proof.EndProof[1].KeyPath.Value) - require.Equal(t, []byte{2}, proof.EndProof[2].KeyPath.Value) + require.Equal(db.root.Value().key, proof.EndProof[0].Key) + require.Equal([]byte{2}, proof.EndProof[1].Key.Bytes()) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), - []byte{1}, - nil, - db.root.id, - ) - require.NoError(t, err) + maybe.Some([]byte{1}), + maybe.Nothing[[]byte](), + db.rootID, + db.tokenSize, + )) } func Test_RangeProof_EmptyValues(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), nil) - require.NoError(t, err) - err = batch.Put([]byte("key12"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte{}) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key1"), nil)) + require.NoError(batch.Put([]byte("key12"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte{})) + require.NoError(batch.Write()) val, err := db.Get([]byte("key12")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.NoError(err) + require.Equal([]byte("value1"), val) - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key2"), 10) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), 10) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 3) - require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) - require.Empty(t, proof.KeyValues[0].Value) - require.Equal(t, []byte("key12"), proof.KeyValues[1].Key) - require.Equal(t, []byte("value1"), proof.KeyValues[1].Value) - require.Equal(t, []byte("key2"), proof.KeyValues[2].Key) - require.Empty(t, proof.KeyValues[2].Value) + require.Len(proof.KeyValues, 3) + require.Equal([]byte("key1"), proof.KeyValues[0].Key) + require.Empty(proof.KeyValues[0].Value) + require.Equal([]byte("key12"), proof.KeyValues[1].Key) + require.Equal([]byte("value1"), proof.KeyValues[1].Value) + require.Equal([]byte("key2"), proof.KeyValues[2].Key) + require.Empty(proof.KeyValues[2].Value) - require.Len(t, proof.StartProof, 1) - require.Equal(t, newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) + require.Len(proof.StartProof, 1) + require.Equal(ToKey([]byte("key1")), proof.StartProof[0].Key) - require.Len(t, proof.EndProof, 3) - require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(t, newPath([]byte{}).Serialize(), proof.EndProof[0].KeyPath) + require.Len(proof.EndProof, 2) + require.Equal(ToKey([]byte("key1")).Take(28), proof.EndProof[0].Key, db.tokenSize) // root + require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key, db.tokenSize) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), - []byte("key1"), - []byte("key2"), - db.root.id, - ) - require.NoError(t, err) -} - -func Test_RangeProof_Marshal_Nil(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - writeBasicBatch(t, db) - - val, err := db.Get([]byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val) - - proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key35"), 10) - require.NoError(t, err) - require.NotNil(t, proof) - - proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) - - parsedProof := &RangeProof{} - _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) - require.NoError(t, err) - - verifyPath(t, proof.StartProof, parsedProof.StartProof) - verifyPath(t, proof.EndProof, parsedProof.EndProof) - - for index, kv := range proof.KeyValues { - require.True(t, bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) - require.True(t, bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) - } -} - -func Test_RangeProof_Marshal(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - - writeBasicBatch(t, db) - - val, err := db.Get([]byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val) - - proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) - require.NoError(t, err) - require.NotNil(t, proof) - - proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) - - parsedProof := &RangeProof{} - _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) - require.NoError(t, err) - - verifyPath(t, proof.StartProof, parsedProof.StartProof) - verifyPath(t, proof.EndProof, parsedProof.EndProof) - - for index, state := range proof.KeyValues { - require.True(t, bytes.Equal(state.Key, parsedProof.KeyValues[index].Key)) - require.True(t, bytes.Equal(state.Value, parsedProof.KeyValues[index].Value)) - } -} - -func Test_RangeProof_Marshal_Errors(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - writeBasicBatch(t, db) - - proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) - require.NoError(t, err) - require.NotNil(t, proof) - - proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) - - for i := 1; i < len(proofBytes); i++ { - broken := proofBytes[:i] - parsedProof := &RangeProof{} - _, err = Codec.DecodeRangeProof(broken, parsedProof) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) - } -} - -func TestChangeProofGetLargestKey(t *testing.T) { - type test struct { - name string - proof ChangeProof - end []byte - expected []byte - } - - tests := []test{ - { - name: "empty proof", - proof: ChangeProof{}, - end: []byte{0}, - expected: []byte{0}, - }, - { - name: "1 KV no deleted keys", - proof: ChangeProof{ - KeyValues: []KeyValue{ - { - Key: []byte{1}, - }, - }, - }, - end: []byte{0}, - expected: []byte{1}, - }, - { - name: "2 KV no deleted keys", - proof: ChangeProof{ - KeyValues: []KeyValue{ - { - Key: []byte{1}, - }, - { - Key: []byte{2}, - }, - }, - }, - end: []byte{0}, - expected: []byte{2}, - }, - { - name: "no KVs 1 deleted key", - proof: ChangeProof{ - DeletedKeys: [][]byte{{1}}, - }, - end: []byte{0}, - expected: []byte{1}, - }, - { - name: "no KVs 2 deleted keys", - proof: ChangeProof{ - DeletedKeys: [][]byte{{1}, {2}}, - }, - end: []byte{0}, - expected: []byte{2}, - }, - { - name: "KV and deleted keys; KV larger", - proof: ChangeProof{ - KeyValues: []KeyValue{ - { - Key: []byte{1}, - }, - { - Key: []byte{3}, - }, - }, - DeletedKeys: [][]byte{{0}, {2}}, - }, - end: []byte{5}, - expected: []byte{3}, - }, - { - name: "KV and deleted keys; deleted key larger", - proof: ChangeProof{ - KeyValues: []KeyValue{ - { - Key: []byte{0}, - }, - { - Key: []byte{2}, - }, - }, - DeletedKeys: [][]byte{{1}, {3}}, - }, - end: []byte{5}, - expected: []byte{3}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.proof.getLargestKey(tt.end)) - }) - } + maybe.Some([]byte("key1")), + maybe.Some([]byte("key2")), + db.rootID, + db.tokenSize, + )) } -func Test_ChangeProof_Marshal(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - batch = db.NewBatch() - err = batch.Put([]byte("key4"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key5"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key6"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key7"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key8"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - - batch = db.NewBatch() - err = batch.Put([]byte("key9"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key10"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key11"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key12"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key13"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) - endroot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.True(t, proof.HadRootsInHistory) - - proofBytes, err := Codec.EncodeChangeProof(Version, proof) - require.NoError(t, err) - - parsedProof := &ChangeProof{} - _, err = Codec.DecodeChangeProof(proofBytes, parsedProof) - require.NoError(t, err) - - verifyPath(t, proof.StartProof, parsedProof.StartProof) - verifyPath(t, proof.EndProof, parsedProof.EndProof) - - for index, kv := range proof.KeyValues { - require.True(t, bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) - require.True(t, bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) - } -} +func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { + require := require.New(t) + seed := time.Now().UnixNano() + t.Logf("Seed: %d", seed) + rand := rand.New(rand.NewSource(seed)) // #nosec G404 -func Test_ChangeProof_Marshal_Errors(t *testing.T) { db, err := getBasicDB() - require.NoError(t, err) - writeBasicBatch(t, db) - startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - batch := db.NewBatch() - require.NoError(t, batch.Put([]byte{5}, []byte{5})) - require.NoError(t, batch.Put([]byte{6}, []byte{6})) - require.NoError(t, batch.Put([]byte{7}, []byte{7})) - require.NoError(t, batch.Put([]byte{8}, []byte{8})) - require.NoError(t, batch.Delete([]byte{0})) - require.NoError(t, batch.Write()) + require.NoError(err) - batch = db.NewBatch() - require.NoError(t, batch.Put([]byte{9}, []byte{9})) - require.NoError(t, batch.Put([]byte{10}, []byte{10})) - require.NoError(t, batch.Put([]byte{11}, []byte{11})) - require.NoError(t, batch.Put([]byte{12}, []byte{12})) - require.NoError(t, batch.Delete([]byte{1})) - require.NoError(t, batch.Write()) - endroot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - - proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.True(t, proof.HadRootsInHistory) - require.Len(t, proof.KeyValues, 8) - require.Len(t, proof.DeletedKeys, 2) - - proofBytes, err := Codec.EncodeChangeProof(Version, proof) - require.NoError(t, err) - - for i := 1; i < len(proofBytes); i++ { - broken := proofBytes[:i] - parsedProof := &ChangeProof{} - _, err = Codec.DecodeChangeProof(broken, parsedProof) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) + roots := []ids.ID{} + for i := 0; i < defaultHistoryLength+1; i++ { + key := make([]byte, 16) + _, _ = rand.Read(key) + require.NoError(db.Put(key, nil)) + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + roots = append(roots, root) } -} -func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { - db, err := getBasicDB() - require.NoError(t, err) - startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + _, err = db.GetChangeProof( + context.Background(), + roots[len(roots)-1], + ids.GenerateTestID(), + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.ErrorIs(err, ErrNoEndRoot) + require.ErrorIs(err, ErrInsufficientHistory) - proof, err := db.GetChangeProof(context.Background(), startRoot, ids.Empty, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.False(t, proof.HadRootsInHistory) + _, err = db.GetChangeProof( + context.Background(), + roots[0], + roots[len(roots)-1], + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.NotErrorIs(err, ErrNoEndRoot) + require.ErrorIs(err, ErrInsufficientHistory) - require.NoError(t, proof.Verify(context.Background(), db, nil, nil, db.getMerkleRoot())) + _, err = db.GetChangeProof( + context.Background(), + roots[1], + roots[len(roots)-1], + maybe.Nothing[[]byte](), + maybe.Nothing[[]byte](), + 50, + ) + require.NoError(err) } func Test_ChangeProof_BadBounds(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(db.PutContext(context.Background(), []byte{0}, []byte{0})) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // non-nil start/end - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key4"), []byte("key3"), 50) - require.ErrorIs(t, err, ErrStartAfterEnd) - require.Nil(t, proof) + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key4")), maybe.Some([]byte("key3")), 50) + require.ErrorIs(err, ErrStartAfterEnd) + require.Nil(proof) } func Test_ChangeProof_Verify(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // create a second db that has "synced" to the start root dbClone, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch = dbClone.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(batch.Write()) // the second db has started to sync some of the range outside of the range proof batch = dbClone.NewBatch() - err = batch.Put([]byte("key31"), []byte("value1")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key25"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key26"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key27"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key28"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key29"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key25"), []byte("value0"))) + require.NoError(batch.Put([]byte("key26"), []byte("value1"))) + require.NoError(batch.Put([]byte("key27"), []byte("value2"))) + require.NoError(batch.Put([]byte("key28"), []byte("value3"))) + require.NoError(batch.Put([]byte("key29"), []byte("value4"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key30"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key31"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key32"), []byte("value2")) - require.NoError(t, err) - err = batch.Delete([]byte("key21")) - require.NoError(t, err) - err = batch.Delete([]byte("key22")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(batch.Put([]byte("key30"), []byte("value0"))) + require.NoError(batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(batch.Put([]byte("key32"), []byte("value2"))) + require.NoError(batch.Delete([]byte("key21"))) + require.NoError(batch.Delete([]byte("key22"))) + require.NoError(batch.Write()) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // non-nil start/end - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key21"), []byte("key30"), 50) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), 50) + require.NoError(err) + require.NotNil(proof) - err = proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), db.getMerkleRoot())) // low maxLength - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 5) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 5) + require.NoError(err) + require.NotNil(proof) - err = proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), db.getMerkleRoot())) // nil start/end - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - - err = proof.Verify(context.Background(), dbClone, nil, nil, endRoot) - require.NoError(t, err) + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 50) + require.NoError(err) + require.NotNil(proof) - err = dbClone.CommitChangeProof(context.Background(), proof) - require.NoError(t, err) + require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), endRoot)) + require.NoError(dbClone.CommitChangeProof(context.Background(), proof)) newRoot, err := dbClone.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, endRoot, newRoot) + require.NoError(err) + require.Equal(endRoot, newRoot) - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key20"), []byte("key30"), 50) - require.NoError(t, err) - require.NotNil(t, proof) + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), 50) + require.NoError(err) + require.NotNil(proof) - err = proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), db.getMerkleRoot())) } func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { @@ -1149,27 +828,27 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { tests := []test{ { name: "happyPath", - malform: func(proof *ChangeProof) {}, + malform: func(*ChangeProof) {}, expectedErr: nil, }, { name: "odd length key path with value", malform: func(proof *ChangeProof) { - proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + proof.EndProof[0].ValueOrHash = maybe.Some([]byte{1, 2}) }, - expectedErr: ErrOddLengthWithValue, + expectedErr: ErrPartialByteLengthWithValue, }, { name: "last proof node has missing value", malform: func(proof *ChangeProof) { - proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = maybe.Nothing[[]byte]() }, expectedErr: ErrProofValueDoesntMatch, }, { name: "missing key/value", malform: func(proof *ChangeProof) { - proof.KeyValues = proof.KeyValues[1:] + proof.KeyChanges = proof.KeyChanges[1:] }, expectedErr: ErrProofValueDoesntMatch, }, @@ -1177,29 +856,44 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // create a second db that will be synced to the first db dbClone, err := getBasicDB() - require.NoError(t, err) - - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte{2}, []byte{3, 0}, 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + + proof, err := db.GetChangeProof( + context.Background(), + startRoot, + endRoot, + maybe.Some([]byte{2}), + maybe.Some([]byte{3, 0}), + 50, + ) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) - err = proof.Verify(context.Background(), dbClone, []byte{2}, []byte{3, 0}, db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + err = dbClone.VerifyChangeProof( + context.Background(), + proof, + maybe.Some([]byte{2}), + maybe.Some([]byte{3, 0}), + db.getMerkleRoot(), + ) + require.ErrorIs(err, tt.expectedErr) }) } } @@ -1208,8 +902,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { type test struct { name string proof *ChangeProof - start []byte - end []byte + start maybe.Maybe[[]byte] + end maybe.Maybe[[]byte] expectedErr error } @@ -1217,232 +911,147 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { { name: "start after end", proof: nil, - start: []byte{1}, - end: []byte{0}, + start: maybe.Some([]byte{1}), + end: maybe.Some([]byte{0}), expectedErr: ErrStartAfterEnd, }, { - name: "no roots in history and non-empty key-values", - proof: &ChangeProof{ - HadRootsInHistory: false, - KeyValues: []KeyValue{{Key: []byte{1}, Value: []byte{1}}}, - }, - start: []byte{0}, - end: nil, // Also tests start can be after end if end is nil - expectedErr: ErrDataInMissingRootProof, - }, - { - name: "no roots in history and non-empty deleted keys", - proof: &ChangeProof{ - HadRootsInHistory: false, - DeletedKeys: [][]byte{{1}}, - }, - start: nil, - end: nil, - expectedErr: ErrDataInMissingRootProof, - }, - { - name: "no roots in history and non-empty start proof", - proof: &ChangeProof{ - HadRootsInHistory: false, - StartProof: []ProofNode{{}}, - }, - start: nil, - end: nil, - expectedErr: ErrDataInMissingRootProof, - }, - { - name: "no roots in history and non-empty end proof", - proof: &ChangeProof{ - HadRootsInHistory: false, - EndProof: []ProofNode{{}}, - }, - start: nil, - end: nil, - expectedErr: ErrDataInMissingRootProof, - }, - { - name: "no roots in history; empty", - proof: &ChangeProof{ - HadRootsInHistory: false, - }, - start: nil, - end: nil, - expectedErr: nil, - }, - { - name: "root in history; empty", - proof: &ChangeProof{ - HadRootsInHistory: true, - }, - start: nil, - end: nil, - expectedErr: ErrNoMerkleProof, + name: "empty", + proof: &ChangeProof{}, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), + expectedErr: ErrEmptyProof, }, { name: "no end proof", proof: &ChangeProof{ - HadRootsInHistory: true, - StartProof: []ProofNode{{}}, + StartProof: []ProofNode{{}}, }, - start: nil, - end: []byte{1}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1}), expectedErr: ErrNoEndProof, }, { name: "no start proof", proof: &ChangeProof{ - HadRootsInHistory: true, - DeletedKeys: [][]byte{{1}}, + KeyChanges: []KeyChange{{Key: []byte{1}}}, }, - start: []byte{1}, - end: nil, + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), expectedErr: ErrNoStartProof, }, { name: "non-increasing key-values", proof: &ChangeProof{ - HadRootsInHistory: true, - KeyValues: []KeyValue{ + KeyChanges: []KeyChange{ {Key: []byte{1}}, {Key: []byte{0}}, }, }, - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), expectedErr: ErrNonIncreasingValues, }, { name: "key-value too low", proof: &ChangeProof{ - HadRootsInHistory: true, - StartProof: []ProofNode{{}}, - KeyValues: []KeyValue{ + StartProof: []ProofNode{{}}, + KeyChanges: []KeyChange{ {Key: []byte{0}}, }, }, - start: []byte{1}, - end: nil, + start: maybe.Some([]byte{1}), + end: maybe.Nothing[[]byte](), expectedErr: ErrStateFromOutsideOfRange, }, { name: "key-value too great", proof: &ChangeProof{ - HadRootsInHistory: true, - EndProof: []ProofNode{{}}, - KeyValues: []KeyValue{ + EndProof: []ProofNode{{}}, + KeyChanges: []KeyChange{ {Key: []byte{2}}, }, }, - start: nil, - end: []byte{1}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1}), expectedErr: ErrStateFromOutsideOfRange, }, { - name: "non-increasing deleted keys", + name: "duplicate key", proof: &ChangeProof{ - HadRootsInHistory: true, - DeletedKeys: [][]byte{ - {1}, - {1}, + KeyChanges: []KeyChange{ + {Key: []byte{1}}, + {Key: []byte{1}}, }, }, - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), expectedErr: ErrNonIncreasingValues, }, - { - name: "deleted key too low", - proof: &ChangeProof{ - HadRootsInHistory: true, - StartProof: []ProofNode{{}}, - DeletedKeys: [][]byte{ - {0}, - }, - }, - start: []byte{1}, - end: nil, - expectedErr: ErrStateFromOutsideOfRange, - }, - { - name: "deleted key too great", - proof: &ChangeProof{ - HadRootsInHistory: true, - EndProof: []ProofNode{{}}, - DeletedKeys: [][]byte{ - {1}, - }, - }, - start: nil, - end: []byte{0}, - expectedErr: ErrStateFromOutsideOfRange, - }, { name: "start proof node has wrong prefix", proof: &ChangeProof{ - HadRootsInHistory: true, StartProof: []ProofNode{ - {KeyPath: newPath([]byte{2}).Serialize()}, - {KeyPath: newPath([]byte{2, 3}).Serialize()}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, - start: []byte{1, 2, 3}, - end: nil, + start: maybe.Some([]byte{1, 2, 3}), + end: maybe.Nothing[[]byte](), expectedErr: ErrProofNodeNotForKey, }, { name: "start proof non-increasing", proof: &ChangeProof{ - HadRootsInHistory: true, StartProof: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, - start: []byte{1, 2, 3}, - end: nil, + start: maybe.Some([]byte{1, 2, 3}), + end: maybe.Nothing[[]byte](), expectedErr: ErrNonIncreasingProofNodes, }, { name: "end proof node has wrong prefix", proof: &ChangeProof{ - HadRootsInHistory: true, - KeyValues: []KeyValue{ - {Key: []byte{1, 2}}, // Also tests [end] set to greatest key-value/deleted key + KeyChanges: []KeyChange{ + {Key: []byte{1, 2}, Value: maybe.Some([]byte{0})}, }, EndProof: []ProofNode{ - {KeyPath: newPath([]byte{2}).Serialize()}, - {KeyPath: newPath([]byte{2, 3}).Serialize()}, + {Key: ToKey([]byte{2})}, + {Key: ToKey([]byte{2, 3})}, }, }, - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), expectedErr: ErrProofNodeNotForKey, }, { name: "end proof non-increasing", proof: &ChangeProof{ - HadRootsInHistory: true, - DeletedKeys: [][]byte{ - {1, 2, 3}, // Also tests [end] set to greatest key-value/deleted key + KeyChanges: []KeyChange{ + {Key: []byte{1, 2, 3}}, }, EndProof: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{2, 3})}, }, }, - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), expectedErr: ErrNonIncreasingProofNodes, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) - err = tt.proof.Verify(context.Background(), db, tt.start, tt.end, ids.Empty) - require.ErrorIs(t, err, tt.expectedErr) + require.NoError(err) + err = db.VerifyChangeProof(context.Background(), tt.proof, tt.start, tt.end, ids.Empty) + require.ErrorIs(err, tt.expectedErr) }) } } @@ -1450,8 +1059,8 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { func TestVerifyKeyValues(t *testing.T) { type test struct { name string - start []byte - end []byte + start maybe.Maybe[[]byte] + end maybe.Maybe[[]byte] kvs []KeyValue expectedErr error } @@ -1459,15 +1068,15 @@ func TestVerifyKeyValues(t *testing.T) { tests := []test{ { name: "empty", - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), kvs: nil, expectedErr: nil, }, { name: "1 key", - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), kvs: []KeyValue{ {Key: []byte{0}}, }, @@ -1475,8 +1084,8 @@ func TestVerifyKeyValues(t *testing.T) { }, { name: "non-increasing keys", - start: nil, - end: nil, + start: maybe.Nothing[[]byte](), + end: maybe.Nothing[[]byte](), kvs: []KeyValue{ {Key: []byte{0}}, {Key: []byte{0}}, @@ -1485,8 +1094,8 @@ func TestVerifyKeyValues(t *testing.T) { }, { name: "key before start", - start: []byte{1, 2}, - end: nil, + start: maybe.Some([]byte{1, 2}), + end: maybe.Nothing[[]byte](), kvs: []KeyValue{ {Key: []byte{1}}, {Key: []byte{1, 2}}, @@ -1495,8 +1104,8 @@ func TestVerifyKeyValues(t *testing.T) { }, { name: "key after end", - start: nil, - end: []byte{1, 2}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1, 2}), kvs: []KeyValue{ {Key: []byte{1}}, {Key: []byte{1, 2}}, @@ -1506,8 +1115,8 @@ func TestVerifyKeyValues(t *testing.T) { }, { name: "happy path", - start: nil, - end: []byte{1, 2, 3}, + start: maybe.Nothing[[]byte](), + end: maybe.Some([]byte{1, 2, 3}), kvs: []KeyValue{ {Key: []byte{1}}, {Key: []byte{1, 2}}, @@ -1528,7 +1137,7 @@ func TestVerifyProofPath(t *testing.T) { type test struct { name string path []ProofNode - proofKey []byte + proofKey maybe.Maybe[Key] expectedErr error } @@ -1536,125 +1145,802 @@ func TestVerifyProofPath(t *testing.T) { { name: "empty", path: nil, - proofKey: nil, + proofKey: maybe.Nothing[Key](), expectedErr: nil, }, { name: "1 element", - path: []ProofNode{{}}, - proofKey: nil, + path: []ProofNode{{Key: ToKey([]byte{1})}}, + proofKey: maybe.Nothing[Key](), expectedErr: nil, }, { name: "non-increasing keys", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "invalid key", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 4}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node inclusion proof", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2}, + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "extra node exclusion proof", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 3}).Serialize()}, - {KeyPath: newPath([]byte{1, 3, 4}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 3})}, + {Key: ToKey([]byte{1, 3, 4})}, }, - proofKey: []byte{1, 2}, + proofKey: maybe.Some(ToKey([]byte{1, 2})), expectedErr: ErrProofNodeNotForKey, }, { name: "happy path exclusion proof", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 4}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 4})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "happy path inclusion proof", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: nil, }, { name: "repeat nodes", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 2", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrNonIncreasingProofNodes, }, { name: "repeat nodes 3", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, - {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + {Key: ToKey([]byte{1, 2, 3})}, + {Key: ToKey([]byte{1, 2, 3})}, }, - proofKey: []byte{1, 2, 3}, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), expectedErr: ErrProofNodeNotForKey, }, { name: "oddLength key with value", path: []ProofNode{ - {KeyPath: newPath([]byte{1}).Serialize()}, - {KeyPath: newPath([]byte{1, 2}).Serialize()}, - {KeyPath: SerializedPath{Value: []byte{1, 2, 240}, NibbleLength: 5}, ValueOrHash: Some([]byte{1})}, + {Key: ToKey([]byte{1})}, + {Key: ToKey([]byte{1, 2})}, + { + Key: Key{ + value: string([]byte{1, 2, 240}), + length: 20, + }, + ValueOrHash: maybe.Some([]byte{1}), + }, }, - proofKey: []byte{1, 2, 3}, - expectedErr: ErrOddLengthWithValue, + proofKey: maybe.Some(ToKey([]byte{1, 2, 3})), + expectedErr: ErrPartialByteLengthWithValue, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := verifyProofPath(tt.path, newPath(tt.proofKey)) + err := verifyProofPath(tt.path, tt.proofKey) require.ErrorIs(t, err, tt.expectedErr) }) } } + +func TestProofNodeUnmarshalProtoInvalidMaybe(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + node := newRandomProofNode(rand) + protoNode := node.ToProto() + + // It's invalid to have a value and be nothing. + protoNode.ValueOrHash = &pb.MaybeBytes{ + Value: []byte{1, 2, 3}, + IsNothing: true, + } + + var unmarshaledNode ProofNode + err := unmarshaledNode.UnmarshalProto(protoNode) + require.ErrorIs(t, err, ErrInvalidMaybe) +} + +func TestProofNodeUnmarshalProtoInvalidChildBytes(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + node := newRandomProofNode(rand) + protoNode := node.ToProto() + + protoNode.Children = map[uint32][]byte{ + 1: []byte("not 32 bytes"), + } + + var unmarshaledNode ProofNode + err := unmarshaledNode.UnmarshalProto(protoNode) + require.ErrorIs(t, err, hashing.ErrInvalidHashLen) +} + +func TestProofNodeUnmarshalProtoInvalidChildIndex(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + node := newRandomProofNode(rand) + protoNode := node.ToProto() + + childID := ids.GenerateTestID() + protoNode.Children[256] = childID[:] + + var unmarshaledNode ProofNode + err := unmarshaledNode.UnmarshalProto(protoNode) + require.ErrorIs(t, err, errChildIndexTooLarge) +} + +func TestProofNodeUnmarshalProtoMissingFields(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + type test struct { + name string + nodeFunc func() *pb.ProofNode + expectedErr error + } + + tests := []test{ + { + name: "nil node", + nodeFunc: func() *pb.ProofNode { + return nil + }, + expectedErr: ErrNilProofNode, + }, + { + name: "nil ValueOrHash", + nodeFunc: func() *pb.ProofNode { + node := newRandomProofNode(rand) + protoNode := node.ToProto() + protoNode.ValueOrHash = nil + return protoNode + }, + expectedErr: ErrNilValueOrHash, + }, + { + name: "nil key", + nodeFunc: func() *pb.ProofNode { + node := newRandomProofNode(rand) + protoNode := node.ToProto() + protoNode.Key = nil + return protoNode + }, + expectedErr: ErrNilKey, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var node ProofNode + err := node.UnmarshalProto(tt.nodeFunc()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func FuzzProofNodeProtoMarshalUnmarshal(f *testing.F) { + f.Fuzz(func( + t *testing.T, + randSeed int64, + ) { + require := require.New(t) + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + node := newRandomProofNode(rand) + + // Marshal and unmarshal it. + // Assert the unmarshaled one is the same as the original. + protoNode := node.ToProto() + var unmarshaledNode ProofNode + require.NoError(unmarshaledNode.UnmarshalProto(protoNode)) + require.Equal(node, unmarshaledNode) + + // Marshaling again should yield same result. + protoUnmarshaledNode := unmarshaledNode.ToProto() + require.Equal(protoNode, protoUnmarshaledNode) + }) +} + +func FuzzRangeProofProtoMarshalUnmarshal(f *testing.F) { + f.Fuzz(func( + t *testing.T, + randSeed int64, + ) { + require := require.New(t) + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + // Make a random range proof. + startProofLen := rand.Intn(32) + startProof := make([]ProofNode, startProofLen) + for i := 0; i < startProofLen; i++ { + startProof[i] = newRandomProofNode(rand) + } + + endProofLen := rand.Intn(32) + endProof := make([]ProofNode, endProofLen) + for i := 0; i < endProofLen; i++ { + endProof[i] = newRandomProofNode(rand) + } + + numKeyValues := rand.Intn(128) + keyValues := make([]KeyValue, numKeyValues) + for i := 0; i < numKeyValues; i++ { + keyLen := rand.Intn(32) + key := make([]byte, keyLen) + _, _ = rand.Read(key) + + valueLen := rand.Intn(32) + value := make([]byte, valueLen) + _, _ = rand.Read(value) + + keyValues[i] = KeyValue{ + Key: key, + Value: value, + } + } + + proof := RangeProof{ + StartProof: startProof, + EndProof: endProof, + KeyValues: keyValues, + } + + // Marshal and unmarshal it. + // Assert the unmarshaled one is the same as the original. + var unmarshaledProof RangeProof + protoProof := proof.ToProto() + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) + require.Equal(proof, unmarshaledProof) + + // Marshaling again should yield same result. + protoUnmarshaledProof := unmarshaledProof.ToProto() + require.Equal(protoProof, protoUnmarshaledProof) + }) +} + +func FuzzChangeProofProtoMarshalUnmarshal(f *testing.F) { + f.Fuzz(func( + t *testing.T, + randSeed int64, + ) { + require := require.New(t) + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + // Make a random change proof. + startProofLen := rand.Intn(32) + startProof := make([]ProofNode, startProofLen) + for i := 0; i < startProofLen; i++ { + startProof[i] = newRandomProofNode(rand) + } + + endProofLen := rand.Intn(32) + endProof := make([]ProofNode, endProofLen) + for i := 0; i < endProofLen; i++ { + endProof[i] = newRandomProofNode(rand) + } + + numKeyChanges := rand.Intn(128) + keyChanges := make([]KeyChange, numKeyChanges) + for i := 0; i < numKeyChanges; i++ { + keyLen := rand.Intn(32) + key := make([]byte, keyLen) + _, _ = rand.Read(key) + + value := maybe.Nothing[[]byte]() + hasValue := rand.Intn(2) == 0 + if hasValue { + valueLen := rand.Intn(32) + valueBytes := make([]byte, valueLen) + _, _ = rand.Read(valueBytes) + value = maybe.Some(valueBytes) + } + + keyChanges[i] = KeyChange{ + Key: key, + Value: value, + } + } + + proof := ChangeProof{ + StartProof: startProof, + EndProof: endProof, + KeyChanges: keyChanges, + } + + // Marshal and unmarshal it. + // Assert the unmarshaled one is the same as the original. + var unmarshaledProof ChangeProof + protoProof := proof.ToProto() + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) + require.Equal(proof, unmarshaledProof) + + // Marshaling again should yield same result. + protoUnmarshaledProof := unmarshaledProof.ToProto() + require.Equal(protoProof, protoUnmarshaledProof) + }) +} + +func TestChangeProofUnmarshalProtoNil(t *testing.T) { + var proof ChangeProof + err := proof.UnmarshalProto(nil) + require.ErrorIs(t, err, ErrNilChangeProof) +} + +func TestChangeProofUnmarshalProtoNilValue(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + // Make a random change proof. + startProofLen := rand.Intn(32) + startProof := make([]ProofNode, startProofLen) + for i := 0; i < startProofLen; i++ { + startProof[i] = newRandomProofNode(rand) + } + + endProofLen := rand.Intn(32) + endProof := make([]ProofNode, endProofLen) + for i := 0; i < endProofLen; i++ { + endProof[i] = newRandomProofNode(rand) + } + + numKeyChanges := rand.Intn(128) + 1 + keyChanges := make([]KeyChange, numKeyChanges) + for i := 0; i < numKeyChanges; i++ { + keyLen := rand.Intn(32) + key := make([]byte, keyLen) + _, _ = rand.Read(key) + + value := maybe.Nothing[[]byte]() + hasValue := rand.Intn(2) == 0 + if hasValue { + valueLen := rand.Intn(32) + valueBytes := make([]byte, valueLen) + _, _ = rand.Read(valueBytes) + value = maybe.Some(valueBytes) + } + + keyChanges[i] = KeyChange{ + Key: key, + Value: value, + } + } + + proof := ChangeProof{ + StartProof: startProof, + EndProof: endProof, + KeyChanges: keyChanges, + } + protoProof := proof.ToProto() + // Make a value nil + protoProof.KeyChanges[0].Value = nil + + var unmarshaledProof ChangeProof + err := unmarshaledProof.UnmarshalProto(protoProof) + require.ErrorIs(t, err, ErrNilMaybeBytes) +} + +func TestChangeProofUnmarshalProtoInvalidMaybe(t *testing.T) { + protoProof := &pb.ChangeProof{ + KeyChanges: []*pb.KeyChange{ + { + Key: []byte{1}, + Value: &pb.MaybeBytes{ + Value: []byte{1}, + IsNothing: true, + }, + }, + }, + } + + var proof ChangeProof + err := proof.UnmarshalProto(protoProof) + require.ErrorIs(t, err, ErrInvalidMaybe) +} + +func FuzzProofProtoMarshalUnmarshal(f *testing.F) { + f.Fuzz(func( + t *testing.T, + randSeed int64, + ) { + require := require.New(t) + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + // Make a random proof. + proofLen := rand.Intn(32) + proofPath := make([]ProofNode, proofLen) + for i := 0; i < proofLen; i++ { + proofPath[i] = newRandomProofNode(rand) + } + + keyLen := rand.Intn(32) + key := make([]byte, keyLen) + _, _ = rand.Read(key) + + hasValue := rand.Intn(2) == 1 + value := maybe.Nothing[[]byte]() + if hasValue { + valueLen := rand.Intn(32) + valueBytes := make([]byte, valueLen) + _, _ = rand.Read(valueBytes) + value = maybe.Some(valueBytes) + } + + proof := Proof{ + Key: ToKey(key), + Value: value, + Path: proofPath, + } + + // Marshal and unmarshal it. + // Assert the unmarshaled one is the same as the original. + var unmarshaledProof Proof + protoProof := proof.ToProto() + require.NoError(unmarshaledProof.UnmarshalProto(protoProof)) + require.Equal(proof, unmarshaledProof) + + // Marshaling again should yield same result. + protoUnmarshaledProof := unmarshaledProof.ToProto() + require.Equal(protoProof, protoUnmarshaledProof) + }) +} + +func TestProofProtoUnmarshal(t *testing.T) { + type test struct { + name string + proof *pb.Proof + expectedErr error + } + + tests := []test{ + { + name: "nil", + proof: nil, + expectedErr: ErrNilProof, + }, + { + name: "nil value", + proof: &pb.Proof{}, + expectedErr: ErrNilValue, + }, + { + name: "invalid maybe", + proof: &pb.Proof{ + Value: &pb.MaybeBytes{ + Value: []byte{1}, + IsNothing: true, + }, + }, + expectedErr: ErrInvalidMaybe, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var proof Proof + err := proof.UnmarshalProto(tt.proof) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func FuzzRangeProofInvariants(f *testing.F) { + deletePortion := 0.25 + f.Fuzz(func( + t *testing.T, + randSeed int64, + startBytes []byte, + endBytes []byte, + maxProofLen uint, + numKeyValues uint, + ) { + require := require.New(t) + + // Make sure proof length is valid + if maxProofLen == 0 { + t.SkipNow() + } + if numKeyValues == 0 { + t.SkipNow() + } + + // Make sure proof bounds are valid + if len(endBytes) != 0 && bytes.Compare(startBytes, endBytes) > 0 { + t.SkipNow() + } + + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + db, err := getBasicDB() + require.NoError(err) + + // Insert a bunch of random key values. + insertRandomKeyValues( + require, + rand, + []database.Database{db}, + numKeyValues, + deletePortion, + ) + + start := maybe.Nothing[[]byte]() + if len(startBytes) != 0 { + start = maybe.Some(startBytes) + } + + end := maybe.Nothing[[]byte]() + if len(endBytes) != 0 { + end = maybe.Some(endBytes) + } + + rootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + rangeProof, err := db.GetRangeProof( + context.Background(), + start, + end, + int(maxProofLen), + ) + if rootID == ids.Empty { + require.ErrorIs(err, ErrEmptyProof) + return + } + require.NoError(err) + + require.NoError(rangeProof.Verify( + context.Background(), + start, + end, + rootID, + db.tokenSize, + )) + + // Make sure the start proof doesn't contain any nodes + // that are in the end proof. + endProofKeys := set.Set[Key]{} + for _, node := range rangeProof.EndProof { + endProofKeys.Add(node.Key) + } + + for _, node := range rangeProof.StartProof { + require.NotContains(endProofKeys, node.Key) + } + + // Make sure the EndProof invariant is maintained + switch { + case end.IsNothing(): + if len(rangeProof.KeyValues) == 0 { + if len(rangeProof.StartProof) == 0 { + require.Len(rangeProof.EndProof, 1) // Just the root + require.Empty(rangeProof.EndProof[0].Key.Bytes()) + } else { + require.Empty(rangeProof.EndProof) + } + } + case len(rangeProof.KeyValues) == 0: + require.NotEmpty(rangeProof.EndProof) + + // EndProof should be a proof for upper range bound. + value := maybe.Nothing[[]byte]() + upperRangeBoundVal, err := db.Get(endBytes) + if err != nil { + require.ErrorIs(err, database.ErrNotFound) + } else { + value = maybe.Some(upperRangeBoundVal) + } + + proof := Proof{ + Path: rangeProof.EndProof, + Key: ToKey(endBytes), + Value: value, + } + + rootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + default: + require.NotEmpty(rangeProof.EndProof) + + greatestKV := rangeProof.KeyValues[len(rangeProof.KeyValues)-1] + // EndProof should be a proof for largest key-value. + proof := Proof{ + Path: rangeProof.EndProof, + Key: ToKey(greatestKV.Key), + Value: maybe.Some(greatestKV.Value), + } + + rootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + } + }) +} + +func FuzzProofVerification(f *testing.F) { + deletePortion := 0.25 + f.Fuzz(func( + t *testing.T, + key []byte, + randSeed int64, + numKeyValues uint, + ) { + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + require := require.New(t) + db, err := getBasicDB() + require.NoError(err) + + // Insert a bunch of random key values. + insertRandomKeyValues( + require, + rand, + []database.Database{db}, + numKeyValues, + deletePortion, + ) + + if db.getMerkleRoot() == ids.Empty { + return + } + + proof, err := db.GetProof( + context.Background(), + key, + ) + + require.NoError(err) + + rootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + + // Insert a new key-value pair + newKey := make([]byte, 32) + _, _ = rand.Read(newKey) // #nosec G404 + newValue := make([]byte, 32) + _, _ = rand.Read(newValue) // #nosec G404 + require.NoError(db.Put(newKey, newValue)) + + // Delete a key-value pair so database doesn't grow unbounded + iter := db.NewIterator() + deleteKey := iter.Key() + iter.Release() + + require.NoError(db.Delete(deleteKey)) + }) +} + +// Generate change proofs and verify that they are valid. +func FuzzChangeProofVerification(f *testing.F) { + const ( + numKeyValues = defaultHistoryLength / 2 + deletePortion = 0.25 + ) + + f.Fuzz(func( + t *testing.T, + startBytes []byte, + endBytes []byte, + maxProofLen uint, + randSeed int64, + ) { + require := require.New(t) + rand := rand.New(rand.NewSource(randSeed)) // #nosec G404 + + db, err := getBasicDB() + require.NoError(err) + + startRootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + // Insert a bunch of random key values. + // Don't insert so many that we have insufficient history. + insertRandomKeyValues( + require, + rand, + []database.Database{db}, + numKeyValues, + deletePortion, + ) + + endRootID, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + // Make sure proof bounds are valid + if len(endBytes) != 0 && bytes.Compare(startBytes, endBytes) > 0 { + return + } + // Make sure proof length is valid + if maxProofLen == 0 { + return + } + + start := maybe.Nothing[[]byte]() + if len(startBytes) != 0 { + start = maybe.Some(startBytes) + } + + end := maybe.Nothing[[]byte]() + if len(endBytes) != 0 { + end = maybe.Some(endBytes) + } + + changeProof, err := db.GetChangeProof( + context.Background(), + startRootID, + endRootID, + start, + end, + int(maxProofLen), + ) + require.NoError(err) + + require.NoError(db.VerifyChangeProof( + context.Background(), + changeProof, + start, + end, + endRootID, + )) + }) +} diff --git a/avalanchego/x/merkledb/tracer.go b/avalanchego/x/merkledb/tracer.go new file mode 100644 index 00000000..d4e7a6fc --- /dev/null +++ b/avalanchego/x/merkledb/tracer.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "github.com/ava-labs/avalanchego/trace" + +const ( + DebugTrace TraceLevel = iota - 1 + InfoTrace // Default + NoTrace +) + +type TraceLevel int + +func getTracerIfEnabled(level, minLevel TraceLevel, tracer trace.Tracer) trace.Tracer { + if level <= minLevel { + return tracer + } + return trace.Noop +} diff --git a/avalanchego/x/merkledb/trie.go b/avalanchego/x/merkledb/trie.go index 47f0860b..891a90d3 100644 --- a/avalanchego/x/merkledb/trie.go +++ b/avalanchego/x/merkledb/trie.go @@ -1,19 +1,66 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb import ( + "bytes" "context" - "errors" + "fmt" + "slices" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/maybe" ) -var errNoNewRoot = errors.New("there was no updated root in change list") +type ViewChanges struct { + BatchOps []database.BatchOp + MapOps map[string]maybe.Maybe[[]byte] + // ConsumeBytes when set to true will skip copying of bytes and assume + // ownership of the provided bytes. + ConsumeBytes bool +} + +type MerkleRootGetter interface { + // GetMerkleRoot returns the merkle root of the trie. + // Returns ids.Empty if the trie is empty. + GetMerkleRoot(ctx context.Context) (ids.ID, error) +} + +type ProofGetter interface { + // GetProof generates a proof of the value associated with a particular key, + // or a proof of its absence from the trie + // Returns ErrEmptyProof if the trie is empty. + GetProof(ctx context.Context, keyBytes []byte) (*Proof, error) +} + +type trieInternals interface { + // get the value associated with the key in path form + // database.ErrNotFound if the key is not present + getValue(key Key) ([]byte, error) + + // get an editable copy of the node with the given key path + // hasValue indicates which db to look in (value or intermediate) + getEditableNode(key Key, hasValue bool) (*node, error) + + // get the node associated with the key without locking + getNode(key Key, hasValue bool) (*node, error) + + // If this trie is non-empty, returns the root node. + // Must be copied before modification. + // Otherwise returns Nothing. + getRoot() maybe.Maybe[*node] + + getTokenSize() int +} + +type Trie interface { + trieInternals + MerkleRootGetter + ProofGetter + database.Iteratee -type ReadOnlyTrie interface { // GetValue gets the value associated with the specified key // database.ErrNotFound if the key is not present GetValue(ctx context.Context, key []byte) ([]byte, error) @@ -22,63 +69,202 @@ type ReadOnlyTrie interface { // database.ErrNotFound if the key is not present GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) - // get the value associated with the key in path form - // database.ErrNotFound if the key is not present - getValue(key path, lock bool) ([]byte, error) + // GetRangeProof returns a proof of up to [maxLength] key-value pairs with + // keys in range [start, end]. + // If [start] is Nothing, there's no lower bound on the range. + // If [end] is Nothing, there's no upper bound on the range. + // Returns ErrEmptyProof if the trie is empty. + GetRangeProof(ctx context.Context, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], maxLength int) (*RangeProof, error) - // GetMerkleRoot returns the merkle root of the Trie - GetMerkleRoot(ctx context.Context) (ids.ID, error) + // NewView returns a new view on top of this Trie where the passed changes + // have been applied. + NewView( + ctx context.Context, + changes ViewChanges, + ) (View, error) +} - // get an editable copy of the node with the given key path - getEditableNode(key path) (*node, error) +type View interface { + Trie - // GetProof generates a proof of the value associated with a particular key, or a proof of its absence from the trie - GetProof(ctx context.Context, bytesPath []byte) (*Proof, error) + // CommitToDB writes the changes in this view to the database. + // Takes the DB commit lock. + CommitToDB(ctx context.Context) error +} - // GetRangeProof generates a proof of up to maxLength smallest key/values with keys between start and end - GetRangeProof(ctx context.Context, start, end []byte, maxLength int) (*RangeProof, error) +// Returns the nodes along the path to [key]. +// The first node is the root, and the last node is either the node with the +// given [key], if it's in the trie, or the node with the largest prefix of +// the [key] if it isn't in the trie. +// Always returns at least the root node. +// Assumes [t] doesn't change while this function is running. +func visitPathToKey(t Trie, key Key, visitNode func(*node) error) error { + maybeRoot := t.getRoot() + if maybeRoot.IsNothing() { + return nil + } + root := maybeRoot.Value() + if !key.HasPrefix(root.key) { + return nil + } + var ( + // all node paths start at the root + currentNode = root + tokenSize = t.getTokenSize() + err error + ) + if err := visitNode(currentNode); err != nil { + return err + } + // while the entire path hasn't been matched + for currentNode.key.length < key.length { + // confirm that a child exists and grab its ID before attempting to load it + nextChildEntry, hasChild := currentNode.children[key.Token(currentNode.key.length, tokenSize)] - getKeyValues( - start []byte, - end []byte, - maxLength int, - keysToIgnore set.Set[string], - lock bool, - ) ([]KeyValue, error) + if !hasChild || !key.iteratedHasPrefix(nextChildEntry.compressedKey, currentNode.key.length+tokenSize, tokenSize) { + // there was no child along the path or the child that was there doesn't match the remaining path + return nil + } + // grab the next node along the path + currentNode, err = t.getNode(key.Take(currentNode.key.length+tokenSize+nextChildEntry.compressedKey.length), nextChildEntry.hasValue) + if err != nil { + return err + } + if err := visitNode(currentNode); err != nil { + return err + } + } + return nil } -type Trie interface { - ReadOnlyTrie +// Returns a proof that [bytesPath] is in or not in trie [t]. +// Assumes [t] doesn't change while this function is running. +func getProof(t Trie, key []byte) (*Proof, error) { + root := t.getRoot() + if root.IsNothing() { + return nil, ErrEmptyProof + } + + proof := &Proof{ + Key: ToKey(key), + } + + var closestNode *node + if err := visitPathToKey(t, proof.Key, func(n *node) error { + closestNode = n + // From root --> node from left --> right. + proof.Path = append(proof.Path, n.asProofNode()) + return nil + }); err != nil { + return nil, err + } - // Remove will delete a key from the Trie - Remove(ctx context.Context, key []byte) error + if len(proof.Path) == 0 { + // No key in [t] is a prefix of [key]. + // The root alone proves that [key] isn't in [t]. + proof.Path = append(proof.Path, root.Value().asProofNode()) + return proof, nil + } - // NewPreallocatedView returns a new view on top of this Trie with space allocated for changes - NewPreallocatedView(estimatedChanges int) (TrieView, error) + if closestNode.key == proof.Key { + // There is a node with the given [key]. + proof.Value = maybe.Bind(closestNode.value, slices.Clone[[]byte]) + return proof, nil + } - // NewView returns a new view on top of this Trie - NewView() (TrieView, error) + // There is no node with the given [key]. + // If there is a child at the index where the node would be + // if it existed, include that child in the proof. + nextIndex := proof.Key.Token(closestNode.key.length, t.getTokenSize()) + child, ok := closestNode.children[nextIndex] + if !ok { + return proof, nil + } - // Insert a key/value pair into the Trie - Insert(ctx context.Context, key, value []byte) error + childNode, err := t.getNode( + closestNode.key.Extend(ToToken(nextIndex, t.getTokenSize()), child.compressedKey), + child.hasValue, + ) + if err != nil { + return nil, err + } + proof.Path = append(proof.Path, childNode.asProofNode()) + return proof, nil } -type TrieView interface { - Trie +// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. +// The returned proof's [KeyValues] has at most [maxLength] values. +// [maxLength] must be > 0. +// Assumes [t] doesn't change while this function is running. +func getRangeProof( + t Trie, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, +) (*RangeProof, error) { + switch { + case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) == 1: + return nil, ErrStartAfterEnd + case maxLength <= 0: + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + case t.getRoot().IsNothing(): + return nil, ErrEmptyProof + } - // CommitToDB takes the changes of this trie and commits them down the view stack - // until all changes in the stack commit to the database - // Takes the DB commit lock - CommitToDB(ctx context.Context) error + result := RangeProof{ + KeyValues: make([]KeyValue, 0, initKeyValuesSize), + } + it := t.NewIteratorWithStart(start.Value()) + for it.Next() && len(result.KeyValues) < maxLength && (end.IsNothing() || bytes.Compare(it.Key(), end.Value()) <= 0) { + // clone the value to prevent editing of the values stored within the trie + result.KeyValues = append(result.KeyValues, KeyValue{ + Key: it.Key(), + Value: slices.Clone(it.Value()), + }) + } + it.Release() + if err := it.Error(); err != nil { + return nil, err + } + + // This proof may not contain all key-value pairs in [start, end] due to size limitations. + // The end proof we provide should be for the last key-value pair in the proof, not for + // the last key-value pair requested, which may not be in this proof. + var ( + endProof *Proof + err error + ) + if len(result.KeyValues) > 0 { + greatestKey := result.KeyValues[len(result.KeyValues)-1].Key + endProof, err = getProof(t, greatestKey) + if err != nil { + return nil, err + } + } else if end.HasValue() { + endProof, err = getProof(t, end.Value()) + if err != nil { + return nil, err + } + } + if endProof != nil { + result.EndProof = endProof.Path + } - // CommitToParent takes changes of this TrieView and commits them to its parent Trie - // Takes the DB commit lock - CommitToParent(ctx context.Context) error + if start.HasValue() { + startProof, err := getProof(t, start.Value()) + if err != nil { + return nil, err + } + result.StartProof = startProof.Path - // commits changes in the trie to its parent - // then commits the combined changes down the stack until all changes in the stack commit to the database - commitToDB(ctx context.Context) error + // strip out any common nodes to reduce proof size + i := 0 + for ; i < len(result.StartProof) && + i < len(result.EndProof) && + result.StartProof[i].Key == result.EndProof[i].Key; i++ { + } + result.StartProof = result.StartProof[i:] + } - // commits changes in the trieToCommit into the current trie - commitChanges(ctx context.Context, trieToCommit *trieView) error + return &result, nil } diff --git a/avalanchego/x/merkledb/trie_test.go b/avalanchego/x/merkledb/trie_test.go index 530a2db6..f6dc0351 100644 --- a/avalanchego/x/merkledb/trie_test.go +++ b/avalanchego/x/merkledb/trie_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package merkledb @@ -9,7 +9,6 @@ import ( "strconv" "sync" "testing" - "time" "github.com/stretchr/testify/require" @@ -19,41 +18,28 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { - if asTrieView, ok := t.(*trieView); ok { - if err := asTrieView.calculateNodeIDs(context.Background()); err != nil { +func getNodeValue(t Trie, key string) ([]byte, error) { + path := ToKey([]byte(key)) + if asView, ok := t.(*view); ok { + if err := asView.calculateNodeIDs(context.Background()); err != nil { return nil, err } - path := newPath([]byte(key)) - nodePath, err := asTrieView.getPathTo(path) - if err != nil { - return nil, err - } - closestNode := nodePath[len(nodePath)-1] - if closestNode.key.Compare(path) != 0 || closestNode == nil { - return nil, database.ErrNotFound - } - - return closestNode.value.value, nil } - if asDatabases, ok := t.(*Database); ok { - view, err := asDatabases.NewView() - if err != nil { - return nil, err - } - path := newPath([]byte(key)) - nodePath, err := view.(*trieView).getPathTo(path) - if err != nil { - return nil, err - } - closestNode := nodePath[len(nodePath)-1] - if closestNode.key.Compare(path) != 0 || closestNode == nil { - return nil, database.ErrNotFound - } - return closestNode.value.value, nil + var result *node + + err := visitPathToKey(t, path, func(n *node) error { + result = n + return nil + }) + if err != nil { + return nil, err } - return nil, nil + if result == nil || result.key != path { + return nil, database.ErrNotFound + } + + return result.value.Value(), nil } func Test_GetValue_Safety(t *testing.T) { @@ -62,17 +48,23 @@ func Test_GetValue_Safety(t *testing.T) { db, err := getBasicDB() require.NoError(err) - trieView, err := db.NewView() + view, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + ) require.NoError(err) - require.NoError(trieView.Insert(context.Background(), []byte{0}, []byte{0})) - trieVal, err := trieView.GetValue(context.Background(), []byte{0}) + trieVal, err := view.GetValue(context.Background(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) trieVal[0] = 1 // should still be []byte{0} after edit - trieVal, err = trieView.GetValue(context.Background(), []byte{0}) + trieVal, err = view.GetValue(context.Background(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) } @@ -83,11 +75,17 @@ func Test_GetValues_Safety(t *testing.T) { db, err := getBasicDB() require.NoError(err) - trieView, err := db.NewView() + view, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + ) require.NoError(err) - require.NoError(trieView.Insert(context.Background(), []byte{0}, []byte{0})) - trieVals, errs := trieView.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs := view.GetValues(context.Background(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) @@ -95,723 +93,823 @@ func Test_GetValues_Safety(t *testing.T) { require.Equal([]byte{1}, trieVals[0]) // should still be []byte{0} after edit - trieVals, errs = trieView.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs = view.GetValues(context.Background(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) } -func TestTrieViewGetPathTo(t *testing.T) { +func TestVisitPathToKey(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) - trieIntf, err := db.NewView() + trieIntf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - trie, ok := trieIntf.(*trieView) - require.True(ok) + require.IsType(&view{}, trieIntf) + trie := trieIntf.(*view) - path, err := trie.getPathTo(newPath(nil)) - require.NoError(err) + var nodePath []*node + require.NoError(visitPathToKey(trie, ToKey(nil), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) - // Just the root - require.Len(path, 1) - require.Equal(trie.root, path[0]) + require.Empty(nodePath) // Insert a key key1 := []byte{0} - err = trie.Insert(context.Background(), key1, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key1, Value: []byte("value")}, + }, + }, + ) require.NoError(err) + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) + require.NoError(trie.calculateNodeIDs(context.Background())) - path, err = trie.getPathTo(newPath(key1)) - require.NoError(err) + nodePath = make([]*node, 0, 1) + require.NoError(visitPathToKey(trie, ToKey(key1), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) - // Root and 1 value - require.Len(path, 2) - require.Equal(trie.root, path[0]) - require.Equal(newPath(key1), path[1].key) + // 1 value + require.Len(nodePath, 1) + require.Equal(ToKey(key1), nodePath[0].key) // Insert another key which is a child of the first key2 := []byte{0, 1} - err = trie.Insert(context.Background(), key2, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) - require.NoError(err) - - path, err = trie.getPathTo(newPath(key2)) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key2, Value: []byte("value")}, + }, + }, + ) require.NoError(err) - require.Len(path, 3) - require.Equal(trie.root, path[0]) - require.Equal(newPath(key1), path[1].key) - require.Equal(newPath(key2), path[2].key) - + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) + require.NoError(trie.calculateNodeIDs(context.Background())) + + nodePath = make([]*node, 0, 2) + require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) + require.Len(nodePath, 2) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[0].key) + require.Equal(ToKey(key2), nodePath[1].key) + + // Trie is: + // [0] + // | + // [0,1] // Insert a key which shares no prefix with the others key3 := []byte{255} - err = trie.Insert(context.Background(), key3, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) - require.NoError(err) - - path, err = trie.getPathTo(newPath(key3)) - require.NoError(err) - require.Len(path, 2) - require.Equal(trie.root, path[0]) - require.Equal(newPath(key3), path[1].key) - - // Other key paths not affected - path, err = trie.getPathTo(newPath(key2)) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key3, Value: []byte("value")}, + }, + }, + ) require.NoError(err) - require.Len(path, 3) - require.Equal(trie.root, path[0]) - require.Equal(newPath(key1), path[1].key) - require.Equal(newPath(key2), path[2].key) + require.IsType(&view{}, trieIntf) + trie = trieIntf.(*view) + require.NoError(trie.calculateNodeIDs(context.Background())) + + // Trie is: + // [] + // / \ + // [0] [255] + // | + // [0,1] + nodePath = make([]*node, 0, 2) + require.NoError(visitPathToKey(trie, ToKey(key3), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) + + require.Len(nodePath, 2) + require.Equal(trie.root.Value(), nodePath[0]) + require.Zero(trie.root.Value().key.length) + require.Equal(ToKey(key3), nodePath[1].key) + + // Other key path not affected + nodePath = make([]*node, 0, 3) + require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) + require.Len(nodePath, 3) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets closest node when key doesn't exist key4 := []byte{0, 1, 2} - path, err = trie.getPathTo(newPath(key4)) - require.NoError(err) - require.Len(path, 3) - require.Equal(trie.root, path[0]) - require.Equal(newPath(key1), path[1].key) - require.Equal(newPath(key2), path[2].key) + nodePath = make([]*node, 0, 3) + require.NoError(visitPathToKey(trie, ToKey(key4), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) + + require.Len(nodePath, 3) + require.Equal(trie.root.Value(), nodePath[0]) + require.Equal(ToKey(key1), nodePath[1].key) + require.Equal(ToKey(key2), nodePath[2].key) // Gets just root when key doesn't exist and no key shares a prefix key5 := []byte{128} - path, err = trie.getPathTo(newPath(key5)) - require.NoError(err) - require.Len(path, 1) - require.Equal(trie.root, path[0]) + nodePath = make([]*node, 0, 1) + require.NoError(visitPathToKey(trie, ToKey(key5), func(n *node) error { + nodePath = append(nodePath, n) + return nil + })) + require.Len(nodePath, 1) + require.Equal(trie.root.Value(), nodePath[0]) } func Test_Trie_ViewOnCommitedView(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require := require.New(t) - committedTrie, err := dbTrie.NewView() - require.NoError(t, err) - err = committedTrie.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) - require.NoError(t, committedTrie.CommitToDB(context.Background())) + committedTrie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + ) + require.NoError(err) - newView, err := committedTrie.NewView() - require.NoError(t, err) + require.NoError(committedTrie.CommitToDB(context.Background())) - err = newView.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) - require.NoError(t, newView.CommitToDB(context.Background())) + newView, err := committedTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{1}, Value: []byte{1}}, + }, + }, + ) + require.NoError(err) + require.NoError(newView.CommitToDB(context.Background())) val0, err := dbTrie.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) + require.NoError(err) + require.Equal([]byte{0}, val0) val1, err := dbTrie.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) + require.NoError(err) + require.Equal([]byte{1}, val1) } -func Test_Trie_Partial_Commit_Leaves_Valid_Tries(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - - trie2, err := dbTrie.NewView() - require.NoError(t, err) - err = trie2.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) - - trie3, err := trie2.NewView() - require.NoError(t, err) - err = trie3.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - - trie4, err := trie3.NewView() - require.NoError(t, err) - err = trie4.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) - - trie5, err := trie4.NewView() - require.NoError(t, err) - err = trie5.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) - - err = trie3.CommitToDB(context.Background()) - require.NoError(t, err) +func Test_Trie_WriteToDB(t *testing.T) { + require := require.New(t) - root, err := trie3.GetMerkleRoot(context.Background()) - require.NoError(t, err) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) - dbRoot, err := dbTrie.GetMerkleRoot(context.Background()) - require.NoError(t, err) + trieIntf1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + trie1 := trieIntf1.(*view) - require.Equal(t, root, dbRoot) -} + // value hasn't been inserted so shouldn't exist + value, err := trie1.GetValue(context.Background(), []byte("key")) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(value) + + trieIntf2, err := trie1.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value")}, + }, + }, + ) + require.NoError(err) + trie2 := trieIntf2.(*view) -func Test_Trie_WriteToDB(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) + value, err = getNodeValue(trie2, "key") + require.NoError(err) + require.Equal([]byte("value"), value) - // value hasn't been inserted so shouldn't exist - value, err := trie.GetValue(context.Background(), []byte("key")) - require.Error(t, err) - require.Equal(t, database.ErrNotFound, err) - require.Nil(t, value) + require.NoError(trie1.CommitToDB(context.Background())) + require.NoError(trie2.CommitToDB(context.Background())) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) + key := []byte("key") + prefixedKey := make([]byte, len(key)+valueNodePrefixLen) + copy(prefixedKey, valueNodePrefix) + copy(prefixedKey[valueNodePrefixLen:], key) + rawBytes, err := dbTrie.baseDB.Get(prefixedKey) + require.NoError(err) - value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value"), value) - - err = trie.CommitToDB(context.Background()) - require.NoError(t, err) - p := newPath([]byte("key")) - rawBytes, err := dbTrie.nodeDB.Get(p.Bytes()) - require.NoError(t, err) - node, err := parseNode(p, rawBytes) - require.NoError(t, err) - require.Equal(t, []byte("value"), node.value.value) + node, err := parseNode(ToKey(key), rawBytes) + require.NoError(err) + require.Equal([]byte("value"), node.value.Value()) } func Test_Trie_InsertAndRetrieve(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) + require.NoError(err) + require.NotNil(dbTrie) // value hasn't been inserted so shouldn't exist value, err := dbTrie.Get([]byte("key")) - require.Error(t, err) - require.Equal(t, database.ErrNotFound, err) - require.Nil(t, value) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(dbTrie.Put([]byte("key"), []byte("value"))) - value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value"), value) + value, err = getNodeValue(dbTrie, "key") + require.NoError(err) + require.Equal([]byte("value"), value) } func Test_Trie_Overwrite(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) - - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + require := require.New(t) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value0")}, + {Key: []byte("key"), Value: []byte("value1")}, + }, + }, + ) + require.NoError(err) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) - - err = trie.Insert(context.Background(), []byte("key"), []byte("value1")) - require.NoError(t, err) + require.NoError(err) + require.Equal([]byte("value1"), value) + trie, err = dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value2")}, + }, + }, + ) + require.NoError(err) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value2"), value) } func Test_Trie_Delete(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) + require.NoError(err) + require.NotNil(dbTrie) - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value0")}, + }, + }, + ) + require.NoError(err) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) + trie, err = dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Delete: true}, + }, + }, + ) + require.NoError(err) value, err = getNodeValue(trie, "key") - require.ErrorIs(t, err, database.ErrNotFound) - require.Nil(t, value) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(value) } func Test_Trie_DeleteMissingKey(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) + require.NoError(trie.DeleteContext(context.Background(), []byte("key"))) } func Test_Trie_ExpandOnKeyPath(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) + require := require.New(t) - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) + trieIntf, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value0")}, + }, + }, + ) + require.NoError(err) + trie := trieIntf.(*view) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key1"), Value: []byte("value1")}, + }, + }, + ) + require.NoError(err) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key12"), Value: []byte("value12")}, + }, + }, + ) + require.NoError(err) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) } -func Test_Trie_CompressedPaths(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) +func Test_Trie_CompressedKeys(t *testing.T) { + require := require.New(t) - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) + trieIntf, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key12"), Value: []byte("value12")}, + }, + }, + ) + require.NoError(err) + trie := trieIntf.(*view) value, err := getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key1"), Value: []byte("value1")}, + }, + }, + ) + require.NoError(err) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) + trieIntf, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value")}, + }, + }, + ) + require.NoError(err) + trie = trieIntf.(*view) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value"), value) + require.NoError(err) + require.Equal([]byte("value"), value) } func Test_Trie_SplitBranch(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) + require.NoError(err) + require.NotNil(dbTrie) // force a new node to generate with common prefix "key1" and have these two nodes as children - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) - require.NoError(t, err) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key12"), Value: []byte("value12")}, + {Key: []byte("key134"), Value: []byte("value134")}, + }, + }, + ) + require.NoError(err) value, err := getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key134") - require.NoError(t, err) - require.Equal(t, []byte("value134"), value) + require.NoError(err) + require.Equal([]byte("value134"), value) } func Test_Trie_HashCountOnBranch(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) + require.NoError(err) + require.NotNil(dbTrie) - // force a new node to generate with common prefix "key1" and have these two nodes as children - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) - oldCount := dbTrie.metrics.(*mockMetrics).hashCount - err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) - require.NoError(t, err) - // only hashes the new branch node, the new child node, and root - // shouldn't hash the existing node - require.Equal(t, oldCount+3, dbTrie.metrics.(*mockMetrics).hashCount) -} + key1, key2, keyPrefix := []byte("12"), []byte("1F"), []byte("1") -func Test_Trie_HashCountOnDelete(t *testing.T) { - trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) - - err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) - require.NoError(t, err) - - oldCount := trie.metrics.(*mockMetrics).hashCount + view1, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key1, Value: []byte("")}, + }, + }) + require.NoError(err) - // delete the middle values - view, err := trie.NewView() - require.NoError(t, err) - err = view.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = view.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = view.Remove(context.Background(), []byte("key")) - require.NoError(t, err) - err = view.CommitToDB(context.Background()) - require.NoError(t, err) - - // the root is the only updated node so only one new hash - require.Equal(t, oldCount+1, trie.metrics.(*mockMetrics).hashCount) -} + // trie is: + // [1] -func Test_Trie_NoExistingResidual(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie := Trie(dbTrie) - - err = trie.Insert(context.Background(), []byte("k"), []byte("1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key123"), []byte("4")) - require.NoError(t, err) + // create new node with common prefix whose children + // are key1, key2 + view2, err := view1.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key2, Value: []byte("")}, + }, + }) + require.NoError(err) - value, err := getNodeValue(trie, "k") - require.NoError(t, err) - require.Equal(t, []byte("1"), value) + // trie is: + // [1] + // / \ + // [12] [1F] - value, err = getNodeValue(trie, "ke") - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + // clear the hash count to ignore setup + dbTrie.metrics.(*mockMetrics).hashCount = 0 - value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("3"), value) + // calculate the root + _, err = view2.GetMerkleRoot(context.Background()) + require.NoError(err) - value, err = getNodeValue(trie, "key123") - require.NoError(t, err) - require.Equal(t, []byte("4"), value) + // Make sure the root is an intermediate node with the expected common prefix. + // Note it's only created on call to GetMerkleRoot, not in NewView. + prefixNode, err := view2.getEditableNode(ToKey(keyPrefix), false) + require.NoError(err) + root := view2.getRoot().Value() + require.Equal(root, prefixNode) + require.Len(root.children, 2) + + // Had to hash each of the new nodes ("12" and "1F") and the new root + require.Equal(int64(3), dbTrie.metrics.(*mockMetrics).hashCount) } -func Test_Trie_CommitChanges(t *testing.T) { +func Test_Trie_HashCountOnDelete(t *testing.T) { require := require.New(t) - db, err := getBasicDB() - require.NoError(err) - - view1Intf, err := db.NewView() + dbTrie, err := getBasicDB() require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) - err = view1.Insert(context.Background(), []byte{1}, []byte{1}) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("k"), Value: []byte("value0")}, + {Key: []byte("ke"), Value: []byte("value1")}, + {Key: []byte("key"), Value: []byte("value2")}, + {Key: []byte("key1"), Value: []byte("value3")}, + {Key: []byte("key2"), Value: []byte("value4")}, + }, + }, + ) require.NoError(err) + require.NotNil(trie) - // view1 - // | - // db - - // Case: Committing to an invalid view - view1.invalidated = true - err = view1.commitChanges(context.Background(), &trieView{}) - require.ErrorIs(err, ErrInvalid) - view1.invalidated = false // Reset + require.NoError(trie.CommitToDB(context.Background())) + oldCount := dbTrie.metrics.(*mockMetrics).hashCount - // Case: Committing a nil view is a no-op - oldRoot, err := view1.getMerkleRoot(context.Background()) - require.NoError(err) - err = view1.commitChanges(context.Background(), nil) - require.NoError(err) - newRoot, err := view1.getMerkleRoot(context.Background()) + // delete the middle values + view, err := trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("k"), Delete: true}, + {Key: []byte("ke"), Delete: true}, + {Key: []byte("key"), Delete: true}, + }, + }, + ) require.NoError(err) - require.Equal(oldRoot, newRoot) + require.NoError(view.CommitToDB(context.Background())) - // Case: Committing a view with the wrong parent. - err = view1.commitChanges(context.Background(), &trieView{}) - require.ErrorIs(err, ErrViewIsNotAChild) + // trie is: + // [key0] (first 28 bits) + // / \ + // [key1] [key2] + root := view.getRoot().Value() + expectedRootKey := ToKey([]byte("key0")).Take(28) + require.Equal(expectedRootKey, root.key) + require.Len(root.children, 2) - // Case: Committing a view which is invalid - err = view1.commitChanges(context.Background(), &trieView{ - parentTrie: view1, - invalidated: true, - }) - require.ErrorIs(err, ErrInvalid) + // Had to hash the new root but not [key1] or [key2] nodes + require.Equal(oldCount+1, dbTrie.metrics.(*mockMetrics).hashCount) +} - // Make more views atop the existing one - view2Intf, err := view1.NewView() - require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) +func Test_Trie_NoExistingResidual(t *testing.T) { + require := require.New(t) - err = view2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(err) - err = view2.Remove(context.Background(), []byte{1}) + dbTrie, err := getBasicDB() require.NoError(err) - - view2Root, err := view2.getMerkleRoot(context.Background()) + require.NotNil(dbTrie) + + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("k"), Value: []byte("1")}, + {Key: []byte("ke"), Value: []byte("2")}, + {Key: []byte("key1"), Value: []byte("3")}, + {Key: []byte("key123"), Value: []byte("4")}, + }, + }, + ) require.NoError(err) + require.NotNil(trie) - // view1 has 1 --> 1 - // view2 has 2 --> 2 - - view3Intf, err := view1.NewView() + value, err := getNodeValue(trie, "k") require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.Equal([]byte("1"), value) - view4Intf, err := view2.NewView() + value, err = getNodeValue(trie, "ke") require.NoError(err) - view4, ok := view4Intf.(*trieView) - require.True(ok) - - // view4 - // | - // view2 view3 - // | / - // view1 - // | - // db + require.Equal([]byte("2"), value) - // Commit view2 to view1 - err = view1.commitChanges(context.Background(), view2) + value, err = getNodeValue(trie, "key1") require.NoError(err) + require.Equal([]byte("3"), value) - // All siblings of view2 should be invalidated - require.True(view3.invalidated) - - // Children of view2 are now children of view1 - require.Equal(view1, view4.parentTrie) - require.Contains(view1.childViews, view4) - - // Value changes from view2 are reflected in view1 - newView1Root, err := view1.getMerkleRoot(context.Background()) - require.NoError(err) - require.Equal(view2Root, newView1Root) - _, err = view1.GetValue(context.Background(), []byte{1}) - require.ErrorIs(err, database.ErrNotFound) - got, err := view1.GetValue(context.Background(), []byte{2}) + value, err = getNodeValue(trie, "key123") require.NoError(err) - require.Equal([]byte{2}, got) + require.Equal([]byte("4"), value) } func Test_Trie_BatchApply(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) - - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) - require.NoError(t, err) - err = trie.Remove(context.Background(), []byte("key1")) - require.NoError(t, err) + require.NoError(err) + require.NotNil(dbTrie) + + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key1"), Value: []byte("value1")}, + {Key: []byte("key12"), Value: []byte("value12")}, + {Key: []byte("key134"), Value: []byte("value134")}, + {Key: []byte("key1"), Delete: true}, + }, + }, + ) + require.NoError(err) + require.NotNil(trie) value, err := getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key134") - require.NoError(t, err) - require.Equal(t, []byte("value134"), value) + require.NoError(err) + require.Equal([]byte("value134"), value) _, err = getNodeValue(trie, "key1") - require.Error(t, err) - require.Equal(t, database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) } func Test_Trie_ChainDeletion(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) - newTrie, err := trie.NewView() - require.NoError(t, err) - - err = newTrie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = newTrie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) - root, err := newTrie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Equal(t, 1, len(root.children)) - - err = newTrie.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("key1")) - require.NoError(t, err) - err = newTrie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) - root, err = newTrie.getEditableNode(EmptyPath) - require.NoError(t, err) - // since all values have been deleted, the nodes should have been cleaned up - require.Equal(t, 0, len(root.children)) + require.NoError(err) + require.NotNil(trie) + newTrie, err := trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("k"), Value: []byte("value0")}, + {Key: []byte("ke"), Value: []byte("value1")}, + {Key: []byte("key"), Value: []byte("value2")}, + {Key: []byte("key1"), Value: []byte("value3")}, + }, + }, + ) + require.NoError(err) + + require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + maybeRoot := newTrie.getRoot() + require.NoError(err) + require.True(maybeRoot.HasValue()) + require.Equal([]byte("value0"), maybeRoot.Value().value.Value()) + require.Len(maybeRoot.Value().children, 1) + + newTrie, err = newTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("k"), Delete: true}, + {Key: []byte("ke"), Delete: true}, + {Key: []byte("key"), Delete: true}, + {Key: []byte("key1"), Delete: true}, + }, + }, + ) + require.NoError(err) + require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + + // trie should be empty + root := newTrie.getRoot() + require.False(root.HasValue()) } -func Test_Trie_Invalidate_Children_On_Edits(t *testing.T) { +func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) + view1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + require.NoError(err) - childTrie1, err := trie.NewView() - require.NoError(t, err) - childTrie2, err := trie.NewView() - require.NoError(t, err) - childTrie3, err := trie.NewView() - require.NoError(t, err) + view2, err := view1.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + ) + require.NoError(err) + + // Siblings of view2 + sibling1, err := view1.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + sibling2, err := view1.NewView(context.Background(), ViewChanges{}) + require.NoError(err) - require.False(t, childTrie1.(*trieView).isInvalid()) - require.False(t, childTrie2.(*trieView).isInvalid()) - require.False(t, childTrie3.(*trieView).isInvalid()) + require.False(sibling1.(*view).isInvalid()) + require.False(sibling2.(*view).isInvalid()) - err = trie.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + require.NoError(view1.CommitToDB(context.Background())) + require.NoError(view2.CommitToDB(context.Background())) - require.True(t, childTrie1.(*trieView).isInvalid()) - require.True(t, childTrie2.(*trieView).isInvalid()) - require.True(t, childTrie3.(*trieView).isInvalid()) + require.True(sibling1.(*view).isInvalid()) + require.True(sibling2.(*view).isInvalid()) + require.False(view2.(*view).isInvalid()) } -func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { +func Test_Trie_NodeCollapse(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) - baseView, err := dbTrie.NewView() - require.NoError(t, err) + kvs := []database.BatchOp{ + {Key: []byte("k"), Value: []byte("value0")}, + {Key: []byte("ke"), Value: []byte("value1")}, + {Key: []byte("key"), Value: []byte("value2")}, + {Key: []byte("key1"), Value: []byte("value3")}, + {Key: []byte("key2"), Value: []byte("value4")}, + } - viewToCommit, err := baseView.NewView() - require.NoError(t, err) + trie, err := dbTrie.NewView( + context.Background(), + ViewChanges{ + BatchOps: kvs, + }, + ) + require.NoError(err) - sibling1, err := baseView.NewView() - require.NoError(t, err) - sibling2, err := baseView.NewView() - require.NoError(t, err) + require.NoError(trie.(*view).calculateNodeIDs(context.Background())) - require.False(t, sibling1.(*trieView).isInvalid()) - require.False(t, sibling2.(*trieView).isInvalid()) + for _, kv := range kvs { + node, err := trie.getEditableNode(ToKey(kv.Key), true) + require.NoError(err) - require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) - require.NoError(t, viewToCommit.CommitToDB(context.Background())) + require.Equal(kv.Value, node.value.Value()) + } - require.True(t, sibling1.(*trieView).isInvalid()) - require.True(t, sibling2.(*trieView).isInvalid()) - require.False(t, viewToCommit.(*trieView).isInvalid()) -} + // delete some values + deletedKVs, remainingKVs := kvs[:3], kvs[3:] + deleteOps := make([]database.BatchOp, len(deletedKVs)) + for i, kv := range deletedKVs { + deleteOps[i] = database.BatchOp{ + Key: kv.Key, + Delete: true, + } + } -func Test_Trie_NodeCollapse(t *testing.T) { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - trie, err := dbTrie.NewView() - require.NoError(t, err) - - err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) - require.NoError(t, err) - - err = trie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) - root, err := trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Equal(t, 1, len(root.children)) - - root, err = trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Equal(t, 1, len(root.children)) - - firstNode, err := trie.getEditableNode(root.getSingleChildPath()) - require.NoError(t, err) - require.Equal(t, 1, len(firstNode.children)) + trie, err = trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: deleteOps, + }, + ) + require.NoError(err) - // delete the middle values - err = trie.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = trie.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) - - err = trie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) - - root, err = trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Equal(t, 1, len(root.children)) - - firstNode, err = trie.getEditableNode(root.getSingleChildPath()) - require.NoError(t, err) - require.Equal(t, 2, len(firstNode.children)) + require.NoError(trie.(*view).calculateNodeIDs(context.Background())) + + for _, kv := range deletedKVs { + _, err := trie.getEditableNode(ToKey(kv.Key), true) + require.ErrorIs(err, database.ErrNotFound) + } + + // make sure the other values are still there + for _, kv := range remainingKVs { + node, err := trie.getEditableNode(ToKey(kv.Key), true) + require.NoError(err) + + require.Equal(kv.Value, node.value.Value()) + } } func Test_Trie_MultipleStates(t *testing.T) { randCount := int64(0) for _, commitApproach := range []string{"never", "before", "after"} { t.Run(commitApproach, func(t *testing.T) { + require := require.New(t) + r := rand.New(rand.NewSource(randCount)) // #nosec G404 randCount++ rdb := memdb.New() @@ -819,82 +917,95 @@ func Test_Trie_MultipleStates(t *testing.T) { db, err := New( context.Background(), rdb, - Config{ - Tracer: newNoopTracer(), - HistoryLength: 100, - NodeCacheSize: 100, - }, + newDefaultConfig(), ) - require.NoError(t, err) + require.NoError(err) defer db.Close() initialSet := 1000 // Populate initial set of keys - root, err := db.NewView() - require.NoError(t, err) + ops := make([]database.BatchOp, 0, initialSet) + require.NoError(err) kv := [][]byte{} for i := 0; i < initialSet; i++ { k := []byte(strconv.Itoa(i)) kv = append(kv, k) - require.NoError(t, root.Insert(context.Background(), k, hashing.ComputeHash256(k))) + ops = append(ops, database.BatchOp{Key: k, Value: hashing.ComputeHash256(k)}) } + root, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: ops, + }, + ) + require.NoError(err) // Get initial root _, err = root.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) if commitApproach == "before" { - require.NoError(t, root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(context.Background())) } // Populate additional states concurrentStates := []Trie{} for i := 0; i < 5; i++ { - newState, err := root.NewView() - require.NoError(t, err) + newState, err := root.NewView(context.Background(), ViewChanges{}) + require.NoError(err) concurrentStates = append(concurrentStates, newState) } if commitApproach == "after" { - require.NoError(t, root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(context.Background())) } // Process ops newStart := initialSet + concurrentOps := make([][]database.BatchOp, len(concurrentStates)) for i := 0; i < 100; i++ { if r.Intn(100) < 20 { // New Key - for _, state := range concurrentStates { + for index := range concurrentStates { k := []byte(strconv.Itoa(newStart)) - require.NoError(t, state.Insert(context.Background(), k, hashing.ComputeHash256(k))) + concurrentOps[index] = append(concurrentOps[index], database.BatchOp{Key: k, Value: hashing.ComputeHash256(k)}) } newStart++ } else { // Fetch and update old selectedKey := kv[r.Intn(len(kv))] var pastV []byte - for _, state := range concurrentStates { + for index, state := range concurrentStates { v, err := state.GetValue(context.Background(), selectedKey) - require.NoError(t, err) + require.NoError(err) if pastV == nil { pastV = v } else { - require.Equal(t, pastV, v, "lookup mismatch") + require.Equal(pastV, v) } - require.NoError(t, state.Insert(context.Background(), selectedKey, hashing.ComputeHash256(v))) + concurrentOps[index] = append(concurrentOps[index], database.BatchOp{Key: selectedKey, Value: hashing.ComputeHash256(v)}) } } } + for index, state := range concurrentStates { + concurrentStates[index], err = state.NewView( + context.Background(), + ViewChanges{ + BatchOps: concurrentOps[index], + }, + ) + require.NoError(err) + } // Generate roots var pastRoot ids.ID for _, state := range concurrentStates { mroot, err := state.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) if pastRoot == ids.Empty { pastRoot = mroot } else { - require.Equal(t, pastRoot, mroot, "root mismatch") + require.Equal(pastRoot, mroot) } } }) @@ -908,10 +1019,10 @@ func TestNewViewOnCommittedView(t *testing.T) { require.NoError(err) // Create a view - view1Intf, err := db.NewView() + view1Intf, err := db.NewView(context.Background(), ViewChanges{BatchOps: []database.BatchOp{{Key: []byte{1}, Value: []byte{1}}}}) require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // view1 // | @@ -921,12 +1032,8 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Contains(db.childViews, view1) require.Equal(db, view1.parentTrie) - err = view1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) - // Commit the view - err = view1.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.CommitToDB(context.Background())) // view1 (committed) // | @@ -937,10 +1044,10 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view1.parentTrie) // Create a new view on the committed view - view2Intf, err := view1.NewView() + view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // view2 // | @@ -959,10 +1066,10 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal([]byte{1}, got) // Make another view - view3Intf, err := view2.NewView() + view3Intf, err := view2.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -980,8 +1087,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Len(db.childViews, 2) // Commit view2 - err = view2.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view2.CommitToDB(context.Background())) // view3 // | @@ -999,8 +1105,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view3.parentTrie) // Commit view3 - err = view3.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view3.CommitToDB(context.Background())) // view3 being committed invalidates view2 require.True(view2.invalidated) @@ -1009,23 +1114,23 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view3.parentTrie) } -func Test_TrieView_NewView(t *testing.T) { +func Test_View_NewView(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) // Create a view - view1Intf, err := db.NewView() + view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // Create a view atop view1 - view2Intf, err := view1.NewView() + view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) // view2 // | @@ -1039,14 +1144,13 @@ func Test_TrieView_NewView(t *testing.T) { require.Len(view1.childViews, 1) // Commit view1 - err = view1.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.CommitToDB(context.Background())) // Make another view atop view1 - view3Intf, err := view1.NewView() + view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view3 // | @@ -1062,33 +1166,33 @@ func Test_TrieView_NewView(t *testing.T) { require.NotContains(view1.childViews, view3) // Assert that NewPreallocatedView on an invalid view fails - invalidView := &trieView{invalidated: true} - _, err = invalidView.NewView() + invalidView := &view{invalidated: true} + _, err = invalidView.NewView(context.Background(), ViewChanges{}) require.ErrorIs(err, ErrInvalid) } -func TestTrieViewInvalidate(t *testing.T) { +func TestViewInvalidate(t *testing.T) { require := require.New(t) db, err := getBasicDB() require.NoError(err) // Create a view - view1Intf, err := db.NewView() + view1Intf, err := db.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view1Intf) + view1 := view1Intf.(*view) // Create 2 views atop view1 - view2Intf, err := view1.NewView() + view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view2Intf) + view2 := view2Intf.(*view) - view3Intf, err := view1.NewView() + view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) + require.IsType(&view{}, view3Intf) + view3 := view3Intf.(*view) // view2 view3 // | / @@ -1105,235 +1209,21 @@ func TestTrieViewInvalidate(t *testing.T) { require.True(view3.invalidated) } -func TestTrieViewMoveChildViewsToView(t *testing.T) { - require := require.New(t) - - db, err := getBasicDB() - require.NoError(err) - - // Create a view - view1Intf, err := db.NewView() - require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) - - // Create a view atop view1 - view2Intf, err := view1.NewView() - require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) - - // Create a view atop view2 - view3Intf, err := view1.NewView() - require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) - - // view3 - // | - // view2 - // | - // view1 - // | - // db - - view1.moveChildViewsToView(view2) - - require.Equal(view1, view3.parentTrie) - require.Contains(view1.childViews, view3) - require.Contains(view1.childViews, view2) - require.Len(view1.childViews, 2) -} - -func TestTrieViewInvalidChildrenExcept(t *testing.T) { - require := require.New(t) - - db, err := getBasicDB() - require.NoError(err) - - // Create a view - view1Intf, err := db.NewView() - require.NoError(err) - view1, ok := view1Intf.(*trieView) - require.True(ok) - - // Create 2 views atop view1 - view2Intf, err := view1.NewView() - require.NoError(err) - view2, ok := view2Intf.(*trieView) - require.True(ok) - - view3Intf, err := view1.NewView() - require.NoError(err) - view3, ok := view3Intf.(*trieView) - require.True(ok) - - view1.invalidateChildrenExcept(view2) - - require.False(view2.invalidated) - require.True(view3.invalidated) - require.Contains(view1.childViews, view2) - require.Len(view1.childViews, 1) - - view1.invalidateChildrenExcept(nil) - require.True(view2.invalidated) - require.True(view3.invalidated) - require.Empty(view1.childViews) -} - -func Test_Trie_CommitToParentView_Concurrent(t *testing.T) { - for i := 0; i < 5000; i++ { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - - baseView, err := dbTrie.NewView() - require.NoError(t, err) - - parentView, err := baseView.NewView() - require.NoError(t, err) - err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) - - childView1, err := parentView.NewView() - require.NoError(t, err) - err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) - - childView2, err := childView1.NewView() - require.NoError(t, err) - err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(t, err) - - var wg sync.WaitGroup - wg.Add(3) - go func() { - defer wg.Done() - require.NoError(t, parentView.CommitToParent(context.Background())) - }() - go func() { - defer wg.Done() - require.NoError(t, childView1.CommitToParent(context.Background())) - }() - go func() { - defer wg.Done() - require.NoError(t, childView2.CommitToParent(context.Background())) - }() - - wg.Wait() - - val0, err := baseView.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) - - val1, err := baseView.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) - - val2, err := baseView.GetValue(context.Background(), []byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val2) - } -} - -func Test_Trie_CommitToParentDB_Concurrent(t *testing.T) { - for i := 0; i < 5000; i++ { - dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) - - parentView, err := dbTrie.NewView() - require.NoError(t, err) - err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) - - childView1, err := parentView.NewView() - require.NoError(t, err) - err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) - - childView2, err := childView1.NewView() - require.NoError(t, err) - err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(t, err) - - var wg sync.WaitGroup - wg.Add(3) - go func() { - defer wg.Done() - require.NoError(t, parentView.CommitToParent(context.Background())) - }() - go func() { - defer wg.Done() - require.NoError(t, childView1.CommitToParent(context.Background())) - }() - go func() { - defer wg.Done() - require.NoError(t, childView2.CommitToParent(context.Background())) - }() - - wg.Wait() - - val0, err := dbTrie.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) - - val1, err := dbTrie.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) - - val2, err := dbTrie.GetValue(context.Background(), []byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val2) - } -} - -func Test_Trie_ConcurrentReadWrite(t *testing.T) { +func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { require := require.New(t) trie, err := getBasicDB() require.NoError(err) require.NotNil(trie) - newTrie, err := trie.NewView() - require.NoError(err) - - var wg sync.WaitGroup - defer wg.Wait() - - wg.Add(1) - go func() { - defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(err) - }() - require.Eventually( - func() bool { - value, err := newTrie.GetValue(context.Background(), []byte("key")) - - if err == database.ErrNotFound { - return false - } - - require.NoError(err) - require.Equal([]byte("value"), value) - return true + newTrie, err := trie.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: []byte("key"), Value: []byte("value0")}, + }, }, - time.Second, - time.Millisecond, ) -} - -func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { - require := require.New(t) - - trie, err := getBasicDB() - require.NoError(err) - require.NotNil(trie) - - newTrie, err := trie.NewView() - require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) require.NoError(err) var wg sync.WaitGroup @@ -1342,124 +1232,120 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(newTrie.CommitToDB(context.Background())) }() - newView, err := newTrie.NewView() + newView, err := newTrie.NewView(context.Background(), ViewChanges{}) require.NoError(err) require.NotNil(newView) } -func Test_Trie_ConcurrentDeleteAndMerkleRoot(t *testing.T) { - require := require.New(t) - - trie, err := getBasicDB() - require.NoError(err) - require.NotNil(trie) - - newTrie, err := trie.NewView() - require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(err) - - var wg sync.WaitGroup - defer wg.Wait() - - wg.Add(1) - go func() { - defer wg.Done() - err := newTrie.Remove(context.Background(), []byte("key")) - require.NoError(err) - }() - - rootID, err := newTrie.GetMerkleRoot(context.Background()) - require.NoError(err) - require.NotZero(rootID) +// Returns the path of the only child of this node. +// Assumes this node has exactly one child. +func getSingleChildKey(n *node, tokenSize int) Key { + for index, entry := range n.children { + return n.key.Extend(ToToken(index, tokenSize), entry.compressedKey) + } + return Key{} } -func Test_Trie_ConcurrentInsertProveCommit(t *testing.T) { - require := require.New(t) - - trie, err := getBasicDB() - require.NoError(err) - require.NotNil(trie) - - newTrie, err := trie.NewView() - require.NoError(err) +func TestTrieCommitToDB(t *testing.T) { + r := require.New(t) - var wg sync.WaitGroup - defer wg.Wait() + type test struct { + name string + trieFunc func() View + expectedErr error + } - wg.Add(1) - go func() { - defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(err) - }() + // Make a database + db, err := getBasicDB() + r.NoError(err) - require.Eventually( - func() bool { - proof, err := newTrie.GetProof(context.Background(), []byte("key2")) - require.NoError(err) - require.NotNil(proof) + tests := []test{ + { + name: "invalid", + trieFunc: func() View { + nView, err := db.NewView(context.Background(), ViewChanges{}) + r.NoError(err) - if proof.Value.value == nil { - // this is an exclusion proof since the value is nil - // return false to keep waiting for Insert to complete. - return false - } - require.Equal([]byte("value2"), proof.Value.value) + // Invalidate the view + nView.(*view).invalidate() - err = newTrie.CommitToDB(context.Background()) - require.NoError(err) - return true + return nView + }, + expectedErr: ErrInvalid, }, - time.Second, - time.Millisecond, - ) -} - -func Test_Trie_ConcurrentInsertAndRangeProof(t *testing.T) { - require := require.New(t) + { + name: "committed", + trieFunc: func() View { + view, err := db.NewView(context.Background(), ViewChanges{}) + r.NoError(err) + + // Commit the view + r.NoError(view.CommitToDB(context.Background())) + + return view + }, + expectedErr: ErrCommitted, + }, + { + name: "parent not database", + trieFunc: func() View { + nView, err := db.NewView(context.Background(), ViewChanges{}) + r.NoError(err) + + // Change the parent + nView.(*view).parentTrie = &view{} + + return nView + }, + expectedErr: ErrParentNotDatabase, + }, + } - trie, err := getBasicDB() - require.NoError(err) - require.NotNil(trie) + for _, tt := range tests { + require := require.New(t) - newTrie, err := trie.NewView() - require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(err) + trie := tt.trieFunc() + err := trie.CommitToDB(context.Background()) + require.ErrorIs(err, tt.expectedErr) + } - var wg sync.WaitGroup - defer wg.Wait() + // Put 2 key-value pairs + key1, value1 := []byte("key1"), []byte("value1") + key2, value2 := []byte("key2"), []byte("value2") + r.NoError(db.Put(key1, value1)) + r.NoError(db.Put(key2, value2)) + + // Make a view + key3, value3 := []byte("key3"), []byte("value3") + // Delete a key-value pair, modify a key-value pair, + // and insert a new key-value pair + view, err := db.NewView( + context.Background(), + ViewChanges{ + BatchOps: []database.BatchOp{ + {Key: key1, Delete: true}, + {Key: key2, Value: value3}, + {Key: key3, Value: value3}, + }, + }, + ) + r.NoError(err) - wg.Add(1) - go func() { - defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(err) - }() + // Commit the view + r.NoError(view.CommitToDB(context.Background())) - require.Eventually( - func() bool { - rangeProof, err := newTrie.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 3) - require.NoError(err) - require.NotNil(rangeProof) + // Make sure the database has the right values + _, err = db.Get(key1) + r.ErrorIs(err, database.ErrNotFound) - if len(rangeProof.KeyValues) < 3 { - // Wait for the other goroutine to finish inserting - return false - } + got, err := db.Get(key2) + r.NoError(err) + r.Equal(value3, got) - // Make sure we have exactly 3 KeyValues - require.Len(rangeProof.KeyValues, 3) - return true - }, - time.Second, - time.Millisecond, - ) + got, err = db.Get(key3) + r.NoError(err) + r.Equal(value3, got) } diff --git a/avalanchego/x/merkledb/trieview.go b/avalanchego/x/merkledb/trieview.go deleted file mode 100644 index a1fc676d..00000000 --- a/avalanchego/x/merkledb/trieview.go +++ /dev/null @@ -1,1413 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package merkledb - -import ( - "bytes" - "context" - "errors" - "fmt" - "runtime" - "sync" - - "go.opentelemetry.io/otel/attribute" - - oteltrace "go.opentelemetry.io/otel/trace" - - "golang.org/x/exp/slices" - "golang.org/x/sync/errgroup" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -const defaultPreallocationSize = 100 - -var ( - ErrCommitted = errors.New("view has been committed") - ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") - ErrOddLengthWithValue = errors.New( - "the underlying db only supports whole number of byte keys, so cannot record changes with odd nibble length", - ) - ErrGetPathToFailure = errors.New("GetPathTo failed to return the closest node") - ErrStartAfterEnd = errors.New("start key > end key") - ErrViewIsNotAChild = errors.New("passed in view is required to be a child of the current view") - ErrNoValidRoot = errors.New("a valid root was not provided to the trieView constructor") - - _ TrieView = &trieView{} - - numCPU = runtime.NumCPU() -) - -// Editable view of a trie, collects changes on top of a parent trie. -// Delays adding key/value pairs to the trie. -type trieView struct { - // Must be held when reading/writing fields except validity tracking fields: - // [childViews], [parentTrie], and [invalidated]. - // Only use to lock current trieView or ancestors of the current trieView - lock sync.RWMutex - - // Controls the trie's validity related fields. - // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. - // Only use to lock current trieView or descendants of the current trieView - // DO NOT grab the [lock] or [validityTrackingLock] of this trie or any ancestor trie while this is held. - validityTrackingLock sync.RWMutex - - // If true, this view has been invalidated and can't be used. - // - // Invariant: This view is marked as invalid before any of its ancestors change. - // Since we ensure that all subviews are marked invalid before making an invalidating change - // then if we are still valid at the end of the function, then no corrupting changes could have - // occurred during execution. - // Namely, if we have a method with: - // - // *Code Accessing Ancestor State* - // - // if t.isInvalid() { - // return ErrInvalid - // } - // return [result] - // - // If the invalidated check passes, then we're guaranteed that no ancestor changes occurred - // during the code that accessed ancestor state and the result of that work is still valid - // - // [validityTrackingLock] must be held when reading/writing this field. - invalidated bool - - // the uncommitted parent trie of this view - // [validityTrackingLock] must be held when reading/writing this field. - parentTrie TrieView - - // The valid children of this trie. - // [validityTrackingLock] must be held when reading/writing this field. - childViews []*trieView - - // Changes made to this view. - // May include nodes that haven't been updated - // but will when their ID is recalculated. - changes *changeSummary - - // Key/value pairs that have been inserted/removed but not - // yet reflected in the trie's structure. This allows us to - // defer the cost of updating the trie until we calculate node IDs. - // A Nothing value indicates that the key has been removed. - unappliedValueChanges map[path]Maybe[[]byte] - - db *Database - - // The root of the trie represented by this view. - root *node - - // True if the IDs of nodes in this view need to be recalculated. - needsRecalculation bool - - // If true, this view has been committed and cannot be edited. - // Calls to Insert and Remove will return ErrCommitted. - committed bool - - estimatedSize int -} - -// NewView returns a new view on top of this one. -// Adds the new view to [t.childViews]. -// Assumes [t.lock] is not held. -func (t *trieView) NewView() (TrieView, error) { - return t.NewPreallocatedView(defaultPreallocationSize) -} - -// NewPreallocatedView returns a new view on top of this one with memory allocated to store the -// [estimatedChanges] number of key/value changes. -// If this view is already committed, the new view's parent will -// be set to the parent of the current view. -// Otherwise, adds the new view to [t.childViews]. -// Assumes [t.lock] is not held. -func (t *trieView) NewPreallocatedView( - estimatedChanges int, -) (TrieView, error) { - t.lock.RLock() - defer t.lock.RUnlock() - - if t.isInvalid() { - return nil, ErrInvalid - } - - if t.committed { - return t.getParentTrie().NewPreallocatedView(estimatedChanges) - } - - newView, err := newTrieView(t.db, t, t.root.clone(), estimatedChanges) - if err != nil { - return nil, err - } - - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - if t.invalidated { - return nil, ErrInvalid - } - t.childViews = append(t.childViews, newView) - - return newView, nil -} - -// Creates a new view with the given [parentTrie]. -func newTrieView( - db *Database, - parentTrie TrieView, - root *node, - estimatedSize int, -) (*trieView, error) { - if root == nil { - return nil, ErrNoValidRoot - } - - return &trieView{ - root: root, - db: db, - parentTrie: parentTrie, - changes: newChangeSummary(estimatedSize), - estimatedSize: estimatedSize, - unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), - }, nil -} - -// Creates a new view with the given [parentTrie]. -func newTrieViewWithChanges( - db *Database, - parentTrie TrieView, - changes *changeSummary, - estimatedSize int, -) (*trieView, error) { - if changes == nil { - return nil, ErrNoValidRoot - } - - passedRootChange, ok := changes.nodes[RootPath] - if !ok { - return nil, ErrNoValidRoot - } - - return &trieView{ - root: passedRootChange.after, - db: db, - parentTrie: parentTrie, - changes: changes, - estimatedSize: estimatedSize, - unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), - }, nil -} - -// Recalculates the node IDs for all changed nodes in the trie. -// Assumes [t.lock] is held. -func (t *trieView) calculateNodeIDs(ctx context.Context) error { - switch { - case t.isInvalid(): - return ErrInvalid - case !t.needsRecalculation: - return nil - case t.committed: - // Note that this should never happen. If a view is committed, it should - // never be edited, so [t.needsRecalculation] should always be false. - return ErrCommitted - } - - // We wait to create the span until after checking that we need to actually - // calculateNodeIDs to make traces more useful (otherwise there may be a span - // per key modified even though IDs are not re-calculated). - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateNodeIDs") - defer span.End() - - // ensure that the view under this one is up-to-date before potentially pulling in nodes from it - // getting the Merkle root forces any unupdated nodes to recalculate their ids - if _, err := t.getParentTrie().GetMerkleRoot(ctx); err != nil { - return err - } - - if err := t.applyChangedValuesToTrie(ctx); err != nil { - return err - } - - _, helperSpan := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateNodeIDsHelper") - defer helperSpan.End() - - // [eg] limits the number of goroutines we start. - var eg errgroup.Group - eg.SetLimit(numCPU) - if err := t.calculateNodeIDsHelper(ctx, t.root, &eg); err != nil { - return err - } - if err := eg.Wait(); err != nil { - return err - } - t.needsRecalculation = false - t.changes.rootID = t.root.id - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return ErrInvalid - } - - return nil -} - -// Calculates the ID of all descendants of [n] which need to be recalculated, -// and then calculates the ID of [n] itself. -func (t *trieView) calculateNodeIDsHelper(ctx context.Context, n *node, eg *errgroup.Group) error { - var ( - // We use [wg] to wait until all descendants of [n] have been updated. - // Note we can't wait on [eg] because [eg] may have started goroutines - // that aren't calculating IDs for descendants of [n]. - wg sync.WaitGroup - updatedChildren = make(chan *node, len(n.children)) - ) - - for childIndex, child := range n.children { - childIndex, child := childIndex, child - - childPath := n.key + path(childIndex) + child.compressedPath - childNodeChange, ok := t.changes.nodes[childPath] - if !ok { - // This child wasn't changed. - continue - } - - wg.Add(1) - updateChild := func() error { - defer wg.Done() - - if err := t.calculateNodeIDsHelper(ctx, childNodeChange.after, eg); err != nil { - return err - } - - // Note that this will never block - updatedChildren <- childNodeChange.after - return nil - } - - // Try updating the child and its descendants in a goroutine. - if ok := eg.TryGo(updateChild); !ok { - // We're at the goroutine limit; do the work in this goroutine. - if err := updateChild(); err != nil { - return err - } - } - } - - // Wait until all descendants of [n] have been updated. - wg.Wait() - close(updatedChildren) - - for child := range updatedChildren { - n.addChild(child) - } - - // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. - return n.calculateID(t.db.metrics) -} - -// GetProof returns a proof that [bytesPath] is in or not in trie [t]. -func (t *trieView) GetProof(ctx context.Context, key []byte) (*Proof, error) { - _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.GetProof") - defer span.End() - - t.lock.RLock() - defer t.lock.RUnlock() - - // only need full lock if nodes ids need to be calculated - // looped to ensure that the value didn't change after the lock was released - for t.needsRecalculation { - t.lock.RUnlock() - t.lock.Lock() - if err := t.calculateNodeIDs(ctx); err != nil { - return nil, err - } - t.lock.Unlock() - t.lock.RLock() - } - - return t.getProof(ctx, key) -} - -// Returns a proof that [bytesPath] is in or not in trie [t]. -// Assumes [t.lock] is held. -func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { - _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.getProof") - defer span.End() - - proof := &Proof{ - Key: key, - } - - // Get the node at the given path, or the node closest to it. - keyPath := newPath(key) - - proofPath, err := t.getPathTo(keyPath) - if err != nil { - return nil, err - } - - // From root --> node from left --> right. - proof.Path = make([]ProofNode, len(proofPath), len(proofPath)+1) - for i, node := range proofPath { - proof.Path[i] = node.asProofNode() - } - - closestNode := proofPath[len(proofPath)-1] - - if closestNode.key.Compare(keyPath) == 0 { - // There is a node with the given [key]. - proof.Value = Clone(closestNode.value) - return proof, nil - } - - // There is no node with the given [key]. - // If there is a child at the index where the node would be - // if it existed, include that child in the proof. - nextIndex := keyPath[len(closestNode.key)] - child, ok := closestNode.children[nextIndex] - if !ok { - return proof, nil - } - - childPath := closestNode.key + path(nextIndex) + child.compressedPath - childNode, err := t.getNodeFromParent(closestNode, childPath) - if err != nil { - return nil, err - } - proof.Path = append(proof.Path, childNode.asProofNode()) - if t.isInvalid() { - return nil, ErrInvalid - } - return proof, nil -} - -// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. -// The returned proof's [KeyValues] has at most [maxLength] values. -// [maxLength] must be > 0. -func (t *trieView) GetRangeProof( - ctx context.Context, - start, end []byte, - maxLength int, -) (*RangeProof, error) { - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.GetRangeProof") - defer span.End() - - if len(end) > 0 && bytes.Compare(start, end) == 1 { - return nil, ErrStartAfterEnd - } - - if maxLength <= 0 { - return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) - } - - t.lock.RLock() - defer t.lock.RUnlock() - - // only need full lock if nodes ids need to be calculated - // looped to ensure that the value didn't change after the lock was released - for t.needsRecalculation { - t.lock.RUnlock() - t.lock.Lock() - if err := t.calculateNodeIDs(ctx); err != nil { - return nil, err - } - t.lock.Unlock() - t.lock.RLock() - } - - var ( - result RangeProof - err error - ) - - result.KeyValues, err = t.getKeyValues( - start, - end, - maxLength, - set.Set[string]{}, - false, /*lock*/ - ) - if err != nil { - return nil, err - } - - // copy values, so edits won't affect the underlying arrays - for i, kv := range result.KeyValues { - result.KeyValues[i] = KeyValue{Key: kv.Key, Value: slices.Clone(kv.Value)} - } - - // This proof may not contain all key-value pairs in [start, end] due to size limitations. - // The end proof we provide should be for the last key-value pair in the proof, not for - // the last key-value pair requested, which may not be in this proof. - if len(result.KeyValues) > 0 { - end = result.KeyValues[len(result.KeyValues)-1].Key - } - - if len(end) > 0 { - endProof, err := t.getProof(ctx, end) - if err != nil { - return nil, err - } - result.EndProof = endProof.Path - } - - if len(start) > 0 { - startProof, err := t.getProof(ctx, start) - if err != nil { - return nil, err - } - result.StartProof = startProof.Path - - // strip out any common nodes to reduce proof size - i := 0 - for ; i < len(result.StartProof) && - i < len(result.EndProof) && - result.StartProof[i].KeyPath.Equal(result.EndProof[i].KeyPath); i++ { - } - result.StartProof = result.StartProof[i:] - } - - if len(result.StartProof) == 0 && len(result.EndProof) == 0 && len(result.KeyValues) == 0 { - // If the range is empty, return the root proof. - rootProof, err := t.getProof(ctx, rootKey) - if err != nil { - return nil, err - } - result.EndProof = rootProof.Path - } - if t.isInvalid() { - return nil, ErrInvalid - } - return &result, nil -} - -// CommitToDB commits changes from this trie to the underlying DB. -func (t *trieView) CommitToDB(ctx context.Context) error { - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.CommitToDB") - defer span.End() - - t.db.commitLock.Lock() - defer t.db.commitLock.Unlock() - - return t.commitToDB(ctx) -} - -// Adds the changes from [trieToCommit] to this trie. -// Assumes [trieToCommit.lock] is held if trieToCommit is not nil. -func (t *trieView) commitChanges(ctx context.Context, trieToCommit *trieView) error { - t.lock.Lock() - defer t.lock.Unlock() - - _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitChanges", oteltrace.WithAttributes( - attribute.Int("changeCount", len(t.changes.values)), - )) - defer span.End() - - switch { - case t.isInvalid(): - // don't apply changes to an invalid view - return ErrInvalid - case trieToCommit == nil: - // no changes to apply - return nil - case trieToCommit.getParentTrie() != t: - // trieToCommit needs to be a child of t, otherwise the changes merge would not work - return ErrViewIsNotAChild - case trieToCommit.isInvalid(): - // don't apply changes from an invalid view - return ErrInvalid - } - - // Invalidate all child views except the view being committed. - // Note that we invalidate children before modifying their ancestor [t] - // to uphold the invariant on [t.invalidated]. - t.invalidateChildrenExcept(trieToCommit) - - if err := trieToCommit.calculateNodeIDs(ctx); err != nil { - return err - } - - for key, nodeChange := range trieToCommit.changes.nodes { - if existing, ok := t.changes.nodes[key]; ok { - existing.after = nodeChange.after - } else { - t.changes.nodes[key] = &change[*node]{ - before: nodeChange.before, - after: nodeChange.after, - } - } - } - - for key, valueChange := range trieToCommit.changes.values { - if existing, ok := t.changes.values[key]; ok { - existing.after = valueChange.after - } else { - t.changes.values[key] = &change[Maybe[[]byte]]{ - before: valueChange.before, - after: valueChange.after, - } - } - } - // update this view's root info to match the newly committed root - t.root = trieToCommit.root - t.changes.rootID = trieToCommit.changes.rootID - - // move the children from the incoming trieview to the current trieview - // do this after the current view has been updated - // this allows child views calls to their parent to remain consistent during the move - t.moveChildViewsToView(trieToCommit) - - return nil -} - -// CommitToParent commits the changes from this view to its parent Trie -func (t *trieView) CommitToParent(ctx context.Context) error { - // TODO: Only lock the commitlock when the parent is the DB - // TODO: fix concurrency bugs with CommitToParent - t.db.commitLock.Lock() - defer t.db.commitLock.Unlock() - - t.lock.Lock() - defer t.lock.Unlock() - - return t.commitToParent(ctx) -} - -// commitToParent commits the changes from this view to its parent Trie -// assumes [t.lock] is held -func (t *trieView) commitToParent(ctx context.Context) error { - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitToParent") - defer span.End() - - if t.isInvalid() { - return ErrInvalid - } - if t.committed { - return ErrCommitted - } - - // ensure all of this view's changes have been calculated - if err := t.calculateNodeIDs(ctx); err != nil { - return err - } - - // write this view's changes into its parent - if err := t.getParentTrie().commitChanges(ctx, t); err != nil { - return err - } - if t.isInvalid() { - return ErrInvalid - } - - t.committed = true - - return nil -} - -// Commits the changes from [trieToCommit] to this view, -// this view to its parent, and so on until committing to the db. -// Assumes [t.db.commitLock] is held. -func (t *trieView) commitToDB(ctx context.Context) error { - t.lock.Lock() - defer t.lock.Unlock() - - ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitToDB", oteltrace.WithAttributes( - attribute.Int("changeCount", len(t.changes.values)), - )) - defer span.End() - - // first merge changes into the parent trie - if err := t.commitToParent(ctx); err != nil { - return err - } - - // now commit the parent trie to the db - return t.getParentTrie().commitToDB(ctx) -} - -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) isInvalid() bool { - t.validityTrackingLock.RLock() - defer t.validityTrackingLock.RUnlock() - - return t.invalidated -} - -// Invalidates this view and all descendants. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) invalidate() { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - t.invalidated = true - - for _, childView := range t.childViews { - childView.invalidate() - } - - // after invalidating the children, they no longer need to be tracked - t.childViews = make([]*trieView, 0, defaultPreallocationSize) -} - -// Invalidates all children of this view. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) invalidateChildren() { - t.invalidateChildrenExcept(nil) -} - -// moveChildViewsToView removes any child views from the trieToCommit and moves them to the current trie view -func (t *trieView) moveChildViewsToView(trieToCommit *trieView) { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - trieToCommit.validityTrackingLock.Lock() - defer trieToCommit.validityTrackingLock.Unlock() - - for _, childView := range trieToCommit.childViews { - childView.updateParent(t) - t.childViews = append(t.childViews, childView) - } - trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) -} - -func (t *trieView) updateParent(newParent TrieView) { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - t.parentTrie = newParent -} - -// Invalidates all children of this view except [exception]. -// [t.childViews] will only contain the exception after invalidation is complete. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) invalidateChildrenExcept(exception *trieView) { - t.validityTrackingLock.Lock() - defer t.validityTrackingLock.Unlock() - - for _, childView := range t.childViews { - if childView != exception { - childView.invalidate() - } - } - - // after invalidating the children, they no longer need to be tracked - t.childViews = make([]*trieView, 0, defaultPreallocationSize) - - // add back in the exception view since it is still valid - if exception != nil { - t.childViews = append(t.childViews, exception) - } -} - -// GetMerkleRoot returns the ID of the root of this trie. -func (t *trieView) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - t.lock.Lock() - defer t.lock.Unlock() - - return t.getMerkleRoot(ctx) -} - -// Returns the ID of the root node of this trie. -// Assumes [t.lock] is held. -func (t *trieView) getMerkleRoot(ctx context.Context) (ids.ID, error) { - if err := t.calculateNodeIDs(ctx); err != nil { - return ids.Empty, err - } - return t.root.id, nil -} - -// Returns up to [maxLength] key/values from keys in closed range [start, end]. -// Acts similarly to the merge step of a merge sort to combine state from the view -// with state from the parent trie. -// If [lock], grabs [t.lock]'s read lock. -// Otherwise assumes [t.lock]'s read lock is held. -func (t *trieView) getKeyValues( - start []byte, - end []byte, - maxLength int, - keysToIgnore set.Set[string], - lock bool, -) ([]KeyValue, error) { - if lock { - t.lock.RLock() - defer t.lock.RUnlock() - } - - if maxLength <= 0 { - return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) - } - - if t.isInvalid() { - return nil, ErrInvalid - } - - // collect all values that have changed or been deleted - changes := make([]KeyValue, 0, len(t.changes.values)) - for key, change := range t.changes.values { - if change.after.IsNothing() { - // This was deleted - keysToIgnore.Add(string(key.Serialize().Value)) - } else { - changes = append(changes, KeyValue{ - Key: key.Serialize().Value, - Value: change.after.value, - }) - } - } - // sort [changes] so they can be merged with the parent trie's state - slices.SortFunc(changes, func(a, b KeyValue) bool { - return bytes.Compare(a.Key, b.Key) == -1 - }) - - baseKeyValues, err := t.getParentTrie().getKeyValues( - start, - end, - maxLength, - keysToIgnore, - true, /*lock*/ - ) - if err != nil { - return nil, err - } - - var ( - // True if there are more key/value pairs from [baseKeyValues] to add to result - baseKeyValuesFinished = false - // True if there are more key/value pairs from [changes] to add to result - changesFinished = false - // The index of the next key/value pair to add from [baseKeyValues]. - baseKeyValuesIndex = 0 - // The index of the next key/value pair to add from [changes]. - changesIndex = 0 - remainingLength = maxLength - hasUpperBound = len(end) > 0 - result = make([]KeyValue, 0, len(baseKeyValues)) - ) - - // keep adding key/value pairs until one of the following: - // * a key that is lexicographically larger than the end key is hit - // * the maxLength is hit - // * no more values are available to add - for remainingLength > 0 { - // the baseKeyValues iterator is finished when we have run out of keys or hit a key greater than the end key - baseKeyValuesFinished = baseKeyValuesFinished || - (baseKeyValuesIndex >= len(baseKeyValues) || (hasUpperBound && bytes.Compare(baseKeyValues[baseKeyValuesIndex].Key, end) == 1)) - - // the changes iterator is finished when we have run out of keys or hit a key greater than the end key - changesFinished = changesFinished || - (changesIndex >= len(changes) || (hasUpperBound && bytes.Compare(changes[changesIndex].Key, end) == 1)) - - // if both the base state and changes are finished, return the result of the merge - if baseKeyValuesFinished && changesFinished { - return result, nil - } - - // one or both iterators still have values, so one will be added to the result - remainingLength-- - - // both still have key/values available, so add the smallest key - if !changesFinished && !baseKeyValuesFinished { - currentChangeState := changes[changesIndex] - currentKeyValues := baseKeyValues[baseKeyValuesIndex] - - switch bytes.Compare(currentChangeState.Key, currentKeyValues.Key) { - case -1: - result = append(result, currentChangeState) - changesIndex++ - case 0: - // the keys are the same, so override the base value with the changed value - result = append(result, currentChangeState) - changesIndex++ - baseKeyValuesIndex++ - case 1: - result = append(result, currentKeyValues) - baseKeyValuesIndex++ - } - continue - } - - // the base state is not finished, but the changes is finished. - // add the next base state value. - if !baseKeyValuesFinished { - currentBaseState := baseKeyValues[baseKeyValuesIndex] - result = append(result, currentBaseState) - baseKeyValuesIndex++ - continue - } - - // the base state is finished, but the changes is not finished. - // add the next changes value. - currentChangeState := changes[changesIndex] - result = append(result, currentChangeState) - changesIndex++ - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return nil, ErrInvalid - } - - return result, nil -} - -func (t *trieView) GetValues(_ context.Context, keys [][]byte) ([][]byte, []error) { - t.lock.RLock() - defer t.lock.RUnlock() - - results := make([][]byte, len(keys)) - valueErrors := make([]error, len(keys)) - - for i, key := range keys { - results[i], valueErrors[i] = t.getValueCopy(newPath(key), false) - } - return results, valueErrors -} - -// GetValue returns the value for the given [key]. -// Returns database.ErrNotFound if it doesn't exist. -func (t *trieView) GetValue(_ context.Context, key []byte) ([]byte, error) { - return t.getValueCopy(newPath(key), true) -} - -// getValueCopy returns a copy of the value for the given [key]. -// Returns database.ErrNotFound if it doesn't exist. -func (t *trieView) getValueCopy(key path, lock bool) ([]byte, error) { - val, err := t.getValue(key, lock) - if err != nil { - return nil, err - } - return slices.Clone(val), nil -} - -func (t *trieView) getValue(key path, lock bool) ([]byte, error) { - if lock { - t.lock.RLock() - defer t.lock.RUnlock() - } - - if t.isInvalid() { - return nil, ErrInvalid - } - - if change, ok := t.changes.values[key]; ok { - t.db.metrics.ViewValueCacheHit() - if change.after.IsNothing() { - return nil, database.ErrNotFound - } - return change.after.value, nil - } - t.db.metrics.ViewValueCacheMiss() - - // if we don't have local copy of the key, then grab a copy from the parent trie - value, err := t.getParentTrie().getValue(key, true) - if err != nil { - return nil, err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return nil, ErrInvalid - } - - return value, nil -} - -// Insert will upsert the key/value pair into the trie. -func (t *trieView) Insert(_ context.Context, key []byte, value []byte) error { - t.lock.Lock() - defer t.lock.Unlock() - - return t.insert(key, value) -} - -// Assumes [t.lock] is held. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) insert(key []byte, value []byte) error { - if t.committed { - return ErrCommitted - } - if t.isInvalid() { - return ErrInvalid - } - - // the trie has been changed, so invalidate all children and remove them from tracking - t.invalidateChildren() - - valCopy := slices.Clone(value) - - if err := t.recordValueChange(newPath(key), Some(valCopy)); err != nil { - return err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return ErrInvalid - } - - return nil -} - -// Remove will delete the value associated with [key] from this trie. -func (t *trieView) Remove(_ context.Context, key []byte) error { - t.lock.Lock() - defer t.lock.Unlock() - - return t.remove(key) -} - -// Assumes [t.lock] is held. -// Assumes [t.validityTrackingLock] isn't held. -func (t *trieView) remove(key []byte) error { - if t.committed { - return ErrCommitted - } - - if t.isInvalid() { - return ErrInvalid - } - - // the trie has been changed, so invalidate all children and remove them from tracking - t.invalidateChildren() - - if err := t.recordValueChange(newPath(key), Nothing[[]byte]()); err != nil { - return err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return ErrInvalid - } - - return nil -} - -// Assumes [t.lock] is held. -func (t *trieView) applyChangedValuesToTrie(ctx context.Context) error { - _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.applyChangedValuesToTrie") - defer span.End() - - unappliedValues := t.unappliedValueChanges - t.unappliedValueChanges = make(map[path]Maybe[[]byte], t.estimatedSize) - - for key, change := range unappliedValues { - if change.IsNothing() { - if err := t.removeFromTrie(key); err != nil { - return err - } - } else { - if _, err := t.insertIntoTrie(key, change); err != nil { - return err - } - } - } - return nil -} - -// Merges together nodes in the inclusive descendants of [node] that -// have no value and a single child into one node with a compressed -// path until a node that doesn't meet those criteria is reached. -// [parent] is [node]'s parent. -// Assumes at least one of the following is true: -// * [node] has a value. -// * [node] has children. -// Assumes [t.lock] is held. -func (t *trieView) compressNodePath(parent, node *node) error { - // don't collapse into this node if it's the root, doesn't have 1 child, or has a value - if len(node.children) != 1 || node.hasValue() { - return nil - } - - // delete all empty nodes with a single child under [node] - for len(node.children) == 1 && !node.hasValue() { - if err := t.recordNodeDeleted(node); err != nil { - return err - } - - nextNode, err := t.getNodeFromParent(node, node.getSingleChildPath()) - if err != nil { - return err - } - node = nextNode - } - - // [node] is the first node with multiple children. - // combine it with the [node] passed in. - parent.addChild(node) - return t.recordNodeChange(parent) -} - -// Starting from the last node in [nodePath], traverses toward the root -// and deletes each node that has no value and no children. -// Stops when a node with a value or children is reached. -// Assumes [nodePath] is a path from the root to a node. -// Assumes [t.lock] is held. -func (t *trieView) deleteEmptyNodes(nodePath []*node) error { - node := nodePath[len(nodePath)-1] - nextParentIndex := len(nodePath) - 2 - - for ; nextParentIndex >= 0 && len(node.children) == 0 && !node.hasValue(); nextParentIndex-- { - if err := t.recordNodeDeleted(node); err != nil { - return err - } - - parent := nodePath[nextParentIndex] - - parent.removeChild(node) - if err := t.recordNodeChange(parent); err != nil { - return err - } - - node = parent - } - - if nextParentIndex < 0 { - return nil - } - parent := nodePath[nextParentIndex] - - return t.compressNodePath(parent, node) -} - -// Returns the nodes along the path to [key]. -// The first node is the root, and the last node is either the node with the -// given [key], if it's in the trie, or the node with the largest prefix of -// the [key] if it isn't in the trie. -// Always returns at least the root node. -func (t *trieView) getPathTo(key path) ([]*node, error) { - var ( - // all paths start at the root - currentNode = t.root - matchedKeyIndex = 0 - nodes = []*node{t.root} - ) - - // while the entire path hasn't been matched - for matchedKeyIndex < len(key) { - // confirm that a child exists and grab its ID before attempting to load it - nextChildEntry, hasChild := currentNode.children[key[matchedKeyIndex]] - - // the nibble for the child entry has now been handled, so increment the matchedPathIndex - matchedKeyIndex += 1 - - if !hasChild || !key[matchedKeyIndex:].HasPrefix(nextChildEntry.compressedPath) { - // there was no child along the path or the child that was there doesn't match the remaining path - return nodes, nil - } - - // the compressed path of the entry there matched the path, so increment the matched index - matchedKeyIndex += len(nextChildEntry.compressedPath) - - // grab the next node along the path - var err error - currentNode, err = t.getNodeWithID(nextChildEntry.id, key[:matchedKeyIndex]) - if err != nil { - return nil, err - } - - // add node to path - nodes = append(nodes, currentNode) - } - return nodes, nil -} - -func getLengthOfCommonPrefix(first, second path) int { - commonIndex := 0 - for len(first) > commonIndex && len(second) > commonIndex && first[commonIndex] == second[commonIndex] { - commonIndex++ - } - return commonIndex -} - -// Get a copy of the node matching the passed key from the trie -// Used by views to get nodes from their ancestors -// assumes that [t.needsRecalculation] is false -func (t *trieView) getEditableNode(key path) (*node, error) { - t.lock.RLock() - defer t.lock.RUnlock() - - if t.isInvalid() { - return nil, ErrInvalid - } - - // grab the node in question - n, err := t.getNodeWithID(ids.Empty, key) - if err != nil { - return nil, err - } - - // ensure no ancestor changes occurred during execution - if t.isInvalid() { - return nil, ErrInvalid - } - - // return a clone of the node, so it can be edited without affecting this trie - return n.clone(), nil -} - -// Inserts a key/value pair into the trie. -// Assumes [t.lock] is held. -func (t *trieView) insertIntoTrie( - key path, - value Maybe[[]byte], -) (*node, error) { - // find the node that most closely matches [key] - pathToNode, err := t.getPathTo(key) - if err != nil { - return nil, err - } - - // We're inserting a node whose ancestry is [pathToNode] - // so we'll need to recalculate their IDs. - for _, node := range pathToNode { - if err := t.recordNodeChange(node); err != nil { - return nil, err - } - } - - closestNode := pathToNode[len(pathToNode)-1] - - // a node with that exact path already exists so update its value - if closestNode.key.Compare(key) == 0 { - closestNode.setValue(value) - return closestNode, nil - } - - closestNodeKeyLength := len(closestNode.key) - // A node with the exact key doesn't exist so determine the portion of the - // key that hasn't been matched yet - // Note that [key] has prefix [closestNodeFullPath] but exactMatch was false, - // so [key] must be longer than [closestNodeFullPath] and the following slice won't OOB. - remainingKey := key[closestNodeKeyLength+1:] - - existingChildEntry, hasChild := closestNode.children[key[closestNodeKeyLength]] - // there are no existing nodes along the path [fullPath], so create a new node to insert [value] - if !hasChild { - newNode := newNode( - closestNode, - key, - ) - newNode.setValue(value) - return newNode, t.recordNodeChange(newNode) - } else if err != nil { - return nil, err - } - - // if we have reached this point, then the [fullpath] we are trying to insert and - // the existing path node have some common prefix. - // a new branching node will be created that will represent this common prefix and - // have the existing path node and the value being inserted as children. - - // generate the new branch node - branchNode := newNode( - closestNode, - key[:closestNodeKeyLength+1+getLengthOfCommonPrefix(existingChildEntry.compressedPath, remainingKey)], - ) - if err := t.recordNodeChange(closestNode); err != nil { - return nil, err - } - nodeWithValue := branchNode - - if len(key)-len(branchNode.key) == 0 { - // there was no residual path for the inserted key, so the value goes directly into the new branch node - branchNode.setValue(value) - } else { - // generate a new node and add it as a child of the branch node - newNode := newNode( - branchNode, - key, - ) - newNode.setValue(value) - if err := t.recordNodeChange(newNode); err != nil { - return nil, err - } - nodeWithValue = newNode - } - - existingChildKey := key[:closestNodeKeyLength+1] + existingChildEntry.compressedPath - - // the existing child's key is of length: len(closestNodekey) + 1 for the child index + len(existing child's compressed key) - // if that length is less than or equal to the branch node's key that implies that the existing child's key matched the key to be inserted - // since it matched the key to be inserted, it should have been returned by GetPathTo - if len(existingChildKey) <= len(branchNode.key) { - return nil, ErrGetPathToFailure - } - - branchNode.addChildWithoutNode( - existingChildKey[len(branchNode.key)], - existingChildKey[len(branchNode.key)+1:], - existingChildEntry.id, - ) - - return nodeWithValue, t.recordNodeChange(branchNode) -} - -// Records that a node has been changed. -// Assumes [t.lock] is held. -func (t *trieView) recordNodeChange(after *node) error { - return t.recordKeyChange(after.key, after) -} - -// Records that the node associated with the given key has been deleted. -// Assumes [t.lock] is held. -func (t *trieView) recordNodeDeleted(after *node) error { - // don't delete the root. - if len(after.key) == 0 { - return t.recordKeyChange(after.key, after) - } - return t.recordKeyChange(after.key, nil) -} - -// Records that the node associated with the given key has been changed. -// Assumes [t.lock] is held. -func (t *trieView) recordKeyChange(key path, after *node) error { - t.needsRecalculation = true - - if existing, ok := t.changes.nodes[key]; ok { - existing.after = after - return nil - } - - before, err := t.getParentTrie().getEditableNode(key) - if err != nil { - if err != database.ErrNotFound { - return err - } - before = nil - } - - t.changes.nodes[key] = &change[*node]{ - before: before, - after: after, - } - return nil -} - -// Records that a key's value has been added or updated. -// Doesn't actually change the trie data structure. -// That's deferred until we calculate node IDs. -// Assumes [t.lock] is held. -func (t *trieView) recordValueChange(key path, value Maybe[[]byte]) error { - t.needsRecalculation = true - - // record the value change so that it can be inserted - // into a trie nodes later - t.unappliedValueChanges[key] = value - - // update the existing change if it exists - if existing, ok := t.changes.values[key]; ok { - existing.after = value - return nil - } - - // grab the before value - var beforeMaybe Maybe[[]byte] - before, err := t.getParentTrie().getValue(key, true) - switch err { - case nil: - beforeMaybe = Some(before) - case database.ErrNotFound: - beforeMaybe = Nothing[[]byte]() - default: - return err - } - - t.changes.values[key] = &change[Maybe[[]byte]]{ - before: beforeMaybe, - after: value, - } - return nil -} - -// Removes the provided [key] from the trie. -// Assumes [t.lock] write lock is held. -func (t *trieView) removeFromTrie(key path) error { - nodePath, err := t.getPathTo(key) - if err != nil { - return err - } - - nodeToDelete := nodePath[len(nodePath)-1] - - if nodeToDelete.key.Compare(key) != 0 || !nodeToDelete.hasValue() { - // the key wasn't in the trie or doesn't have a value so there's nothing to do - return nil - } - - // A node with ancestry [nodePath] is being deleted, so we need to recalculate - // all the nodes in this path. - for _, node := range nodePath { - if err := t.recordNodeChange(node); err != nil { - return err - } - } - - nodeToDelete.setValue(Nothing[[]byte]()) - if err := t.recordNodeChange(nodeToDelete); err != nil { - return err - } - - // if the removed node has no children, the node can be removed from the trie - if len(nodeToDelete.children) == 0 { - return t.deleteEmptyNodes(nodePath) - } - - if len(nodePath) == 1 { - return nil - } - parent := nodePath[len(nodePath)-2] - - // merge this node and its descendants into a single node if possible - return t.compressNodePath(parent, nodeToDelete) -} - -// Retrieves the node with the given [key], which is a child of [parent], and -// uses the [parent] node to initialize the child node's ID. -// Returns database.ErrNotFound if the child doesn't exist. -// Assumes [t.lock] write or read lock is held. -func (t *trieView) getNodeFromParent(parent *node, key path) (*node, error) { - // confirm the child exists and get its ID before attempting to load it - if child, exists := parent.children[key[len(parent.key)]]; exists { - return t.getNodeWithID(child.id, key) - } - - return nil, database.ErrNotFound -} - -// Retrieves a node with the given [key]. -// If the node is fetched from [t.parentTrie] and [id] isn't empty, -// sets the node's ID to [id]. -// Returns database.ErrNotFound if the node doesn't exist. -// Assumes [t.lock] write or read lock is held. -func (t *trieView) getNodeWithID(id ids.ID, key path) (*node, error) { - // check for the key within the changed nodes - if nodeChange, isChanged := t.changes.nodes[key]; isChanged { - t.db.metrics.ViewNodeCacheHit() - if nodeChange.after == nil { - return nil, database.ErrNotFound - } - return nodeChange.after, nil - } - - // get the node from the parent trie and store a local copy - parentTrieNode, err := t.getParentTrie().getEditableNode(key) - if err != nil { - return nil, err - } - - // only need to initialize the id if it's from the parent trie. - // nodes in the current view change list have already been initialized. - if id != ids.Empty { - parentTrieNode.id = id - } - return parentTrieNode, nil -} - -// Get the parent trie of the view -func (t *trieView) getParentTrie() TrieView { - t.validityTrackingLock.RLock() - defer t.validityTrackingLock.RUnlock() - return t.parentTrie -} diff --git a/avalanchego/x/merkledb/value_node_db.go b/avalanchego/x/merkledb/value_node_db.go new file mode 100644 index 00000000..8ee7d743 --- /dev/null +++ b/avalanchego/x/merkledb/value_node_db.go @@ -0,0 +1,186 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils" +) + +var _ database.Iterator = (*iterator)(nil) + +type valueNodeDB struct { + // Holds unused []byte + bufferPool *sync.Pool + + // The underlying storage. + // Keys written to [baseDB] are prefixed with [valueNodePrefix]. + baseDB database.Database + + // If a value is nil, the corresponding key isn't in the trie. + // Paths in [nodeCache] aren't prefixed with [valueNodePrefix]. + nodeCache cache.Cacher[Key, *node] + metrics merkleMetrics + + closed utils.Atomic[bool] +} + +func newValueNodeDB( + db database.Database, + bufferPool *sync.Pool, + metrics merkleMetrics, + cacheSize int, +) *valueNodeDB { + return &valueNodeDB{ + metrics: metrics, + baseDB: db, + bufferPool: bufferPool, + nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), + } +} + +func (db *valueNodeDB) newIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + prefixedStart := addPrefixToKey(db.bufferPool, valueNodePrefix, start) + prefixedPrefix := addPrefixToKey(db.bufferPool, valueNodePrefix, prefix) + i := &iterator{ + db: db, + nodeIter: db.baseDB.NewIteratorWithStartAndPrefix(prefixedStart, prefixedPrefix), + } + db.bufferPool.Put(prefixedStart) + db.bufferPool.Put(prefixedPrefix) + return i +} + +func (db *valueNodeDB) Close() { + db.closed.Set(true) +} + +func (db *valueNodeDB) NewBatch() *valueNodeBatch { + return &valueNodeBatch{ + db: db, + ops: make(map[Key]*node, defaultBufferLength), + } +} + +func (db *valueNodeDB) Get(key Key) (*node, error) { + if cachedValue, isCached := db.nodeCache.Get(key); isCached { + db.metrics.ValueNodeCacheHit() + if cachedValue == nil { + return nil, database.ErrNotFound + } + return cachedValue, nil + } + db.metrics.ValueNodeCacheMiss() + + prefixedKey := addPrefixToKey(db.bufferPool, valueNodePrefix, key.Bytes()) + defer db.bufferPool.Put(prefixedKey) + + db.metrics.DatabaseNodeRead() + nodeBytes, err := db.baseDB.Get(prefixedKey) + if err != nil { + return nil, err + } + + return parseNode(key, nodeBytes) +} + +func (db *valueNodeDB) Clear() error { + db.nodeCache.Flush() + return database.AtomicClearPrefix(db.baseDB, db.baseDB, valueNodePrefix) +} + +// Batch of database operations +type valueNodeBatch struct { + db *valueNodeDB + ops map[Key]*node +} + +func (b *valueNodeBatch) Put(key Key, value *node) { + b.ops[key] = value +} + +func (b *valueNodeBatch) Delete(key Key) { + b.ops[key] = nil +} + +// Write flushes any accumulated data to the underlying database. +func (b *valueNodeBatch) Write() error { + dbBatch := b.db.baseDB.NewBatch() + for key, n := range b.ops { + b.db.metrics.DatabaseNodeWrite() + b.db.nodeCache.Put(key, n) + prefixedKey := addPrefixToKey(b.db.bufferPool, valueNodePrefix, key.Bytes()) + if n == nil { + if err := dbBatch.Delete(prefixedKey); err != nil { + return err + } + } else if err := dbBatch.Put(prefixedKey, n.bytes()); err != nil { + return err + } + + b.db.bufferPool.Put(prefixedKey) + } + + return dbBatch.Write() +} + +type iterator struct { + db *valueNodeDB + nodeIter database.Iterator + current *node + err error +} + +func (i *iterator) Error() error { + if i.err != nil { + return i.err + } + if i.db.closed.Get() { + return database.ErrClosed + } + return i.nodeIter.Error() +} + +func (i *iterator) Key() []byte { + if i.current == nil { + return nil + } + return i.current.key.Bytes() +} + +func (i *iterator) Value() []byte { + if i.current == nil { + return nil + } + return i.current.value.Value() +} + +func (i *iterator) Next() bool { + i.current = nil + if i.Error() != nil || i.db.closed.Get() { + return false + } + if !i.nodeIter.Next() { + return false + } + + i.db.metrics.DatabaseNodeRead() + key := i.nodeIter.Key() + key = key[valueNodePrefixLen:] + n, err := parseNode(ToKey(key), i.nodeIter.Value()) + if err != nil { + i.err = err + return false + } + + i.current = n + return true +} + +func (i *iterator) Release() { + i.nodeIter.Release() +} diff --git a/avalanchego/x/merkledb/value_node_db_test.go b/avalanchego/x/merkledb/value_node_db_test.go new file mode 100644 index 00000000..224a4fe9 --- /dev/null +++ b/avalanchego/x/merkledb/value_node_db_test.go @@ -0,0 +1,251 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +// Test putting, modifying, deleting, and getting key-node pairs. +func TestValueNodeDB(t *testing.T) { + require := require.New(t) + + baseDB := memdb.New() + + cacheSize := 10_000 + db := newValueNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + ) + + // Getting a key that doesn't exist should return an error. + key := ToKey([]byte{0x01}) + _, err := db.Get(key) + require.ErrorIs(err, database.ErrNotFound) + + // Put a key-node pair. + node1 := &node{ + dbNode: dbNode{ + value: maybe.Some([]byte{0x01}), + }, + key: key, + } + batch := db.NewBatch() + batch.Put(key, node1) + require.NoError(batch.Write()) + + // Get the key-node pair. + node1Read, err := db.Get(key) + require.NoError(err) + require.Equal(node1, node1Read) + + // Delete the key-node pair. + batch = db.NewBatch() + batch.Delete(key) + require.NoError(batch.Write()) + + // Key should be gone now. + _, err = db.Get(key) + require.ErrorIs(err, database.ErrNotFound) + + // Put a key-node pair and delete it in the same batch. + batch = db.NewBatch() + batch.Put(key, node1) + batch.Delete(key) + require.NoError(batch.Write()) + + // Key should still be gone. + _, err = db.Get(key) + require.ErrorIs(err, database.ErrNotFound) + + // Put a key-node pair and overwrite it in the same batch. + node2 := &node{ + dbNode: dbNode{ + value: maybe.Some([]byte{0x02}), + }, + key: key, + } + batch = db.NewBatch() + batch.Put(key, node1) + batch.Put(key, node2) + require.NoError(batch.Write()) + + // Get the key-node pair. + node2Read, err := db.Get(key) + require.NoError(err) + require.Equal(node2, node2Read) + + // Overwrite the key-node pair in a subsequent batch. + batch = db.NewBatch() + batch.Put(key, node1) + require.NoError(batch.Write()) + + // Get the key-node pair. + node1Read, err = db.Get(key) + require.NoError(err) + require.Equal(node1, node1Read) + + // Get the key-node pair from the database, not the cache. + db.nodeCache.Flush() + node1Read, err = db.Get(key) + require.NoError(err) + // Only check value since we're not setting other node fields. + require.Equal(node1.value, node1Read.value) + + // Make sure the key is prefixed in the base database. + it := baseDB.NewIteratorWithPrefix(valueNodePrefix) + defer it.Release() + require.True(it.Next()) + require.False(it.Next()) +} + +func TestValueNodeDBIterator(t *testing.T) { + require := require.New(t) + + baseDB := memdb.New() + cacheSize := 10 + db := newValueNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + ) + + // Put key-node pairs. + for i := 0; i < cacheSize; i++ { + key := ToKey([]byte{byte(i)}) + node := &node{ + dbNode: dbNode{ + value: maybe.Some([]byte{byte(i)}), + }, + key: key, + } + batch := db.NewBatch() + batch.Put(key, node) + require.NoError(batch.Write()) + } + + // Iterate over the key-node pairs. + it := db.newIteratorWithStartAndPrefix(nil, nil) + + i := 0 + for it.Next() { + require.Equal([]byte{byte(i)}, it.Key()) + require.Equal([]byte{byte(i)}, it.Value()) + i++ + } + require.NoError(it.Error()) + require.Equal(cacheSize, i) + it.Release() + + // Iterate over the key-node pairs with a start. + it = db.newIteratorWithStartAndPrefix([]byte{2}, nil) + i = 0 + for it.Next() { + require.Equal([]byte{2 + byte(i)}, it.Key()) + require.Equal([]byte{2 + byte(i)}, it.Value()) + i++ + } + require.NoError(it.Error()) + require.Equal(cacheSize-2, i) + it.Release() + + // Put key-node pairs with a common prefix. + key := ToKey([]byte{0xFF, 0x00}) + n := &node{ + dbNode: dbNode{ + value: maybe.Some([]byte{0xFF, 0x00}), + }, + key: key, + } + batch := db.NewBatch() + batch.Put(key, n) + require.NoError(batch.Write()) + + key = ToKey([]byte{0xFF, 0x01}) + n = &node{ + dbNode: dbNode{ + value: maybe.Some([]byte{0xFF, 0x01}), + }, + key: key, + } + batch = db.NewBatch() + batch.Put(key, n) + require.NoError(batch.Write()) + + // Iterate over the key-node pairs with a prefix. + it = db.newIteratorWithStartAndPrefix(nil, []byte{0xFF}) + i = 0 + for it.Next() { + require.Equal([]byte{0xFF, byte(i)}, it.Key()) + require.Equal([]byte{0xFF, byte(i)}, it.Value()) + i++ + } + require.NoError(it.Error()) + require.Equal(2, i) + + // Iterate over the key-node pairs with a start and prefix. + it = db.newIteratorWithStartAndPrefix([]byte{0xFF, 0x01}, []byte{0xFF}) + i = 0 + for it.Next() { + require.Equal([]byte{0xFF, 0x01}, it.Key()) + require.Equal([]byte{0xFF, 0x01}, it.Value()) + i++ + } + require.NoError(it.Error()) + require.Equal(1, i) + + // Iterate over closed database. + it = db.newIteratorWithStartAndPrefix(nil, nil) + require.True(it.Next()) + require.NoError(it.Error()) + db.Close() + require.False(it.Next()) + err := it.Error() + require.ErrorIs(err, database.ErrClosed) +} + +func TestValueNodeDBClear(t *testing.T) { + require := require.New(t) + cacheSize := 200 + baseDB := memdb.New() + db := newValueNodeDB( + baseDB, + &sync.Pool{ + New: func() interface{} { return make([]byte, 0) }, + }, + &mockMetrics{}, + cacheSize, + ) + + batch := db.NewBatch() + for _, b := range [][]byte{{1}, {2}, {3}} { + batch.Put(ToKey(b), newNode(ToKey(b))) + } + require.NoError(batch.Write()) + + // Assert the db is not empty + iter := baseDB.NewIteratorWithPrefix(valueNodePrefix) + require.True(iter.Next()) + iter.Release() + + require.NoError(db.Clear()) + + iter = baseDB.NewIteratorWithPrefix(valueNodePrefix) + defer iter.Release() + require.False(iter.Next()) +} diff --git a/avalanchego/x/merkledb/view.go b/avalanchego/x/merkledb/view.go new file mode 100644 index 00000000..441cc371 --- /dev/null +++ b/avalanchego/x/merkledb/view.go @@ -0,0 +1,864 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "errors" + "slices" + "sync" + + "go.opentelemetry.io/otel/attribute" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/maybe" + + oteltrace "go.opentelemetry.io/otel/trace" +) + +const ( + initKeyValuesSize = 256 + defaultPreallocationSize = 100 +) + +var ( + _ View = (*view)(nil) + + ErrCommitted = errors.New("view has been committed") + ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") + ErrPartialByteLengthWithValue = errors.New( + "the underlying db only supports whole number of byte keys, so cannot record changes with partial byte lengths", + ) + ErrVisitPathToKey = errors.New("failed to visit expected node during insertion") + ErrStartAfterEnd = errors.New("start key > end key") + ErrNoChanges = errors.New("no changes provided") + ErrParentNotDatabase = errors.New("parent trie is not database") + ErrNodesAlreadyCalculated = errors.New("cannot modify the trie after the node changes have been calculated") +) + +type view struct { + // If true, this view has been committed. + // [commitLock] must be held while accessing this field. + committed bool + commitLock sync.RWMutex + + // tracking bool to enforce that no changes are made to the trie after the nodes have been calculated + nodesAlreadyCalculated utils.Atomic[bool] + + // calculateNodesOnce is a once to ensure that node calculation only occurs a single time + calculateNodesOnce sync.Once + + // Controls the view's validity related fields. + // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. + // Only use to lock current view or descendants of the current view + // DO NOT grab the [validityTrackingLock] of any ancestor trie while this is held. + validityTrackingLock sync.RWMutex + + // If true, this view has been invalidated and can't be used. + // + // Invariant: This view is marked as invalid before any of its ancestors change. + // Since we ensure that all subviews are marked invalid before making an invalidating change + // then if we are still valid at the end of the function, then no corrupting changes could have + // occurred during execution. + // Namely, if we have a method with: + // + // *Code Accessing Ancestor State* + // + // if v.isInvalid() { + // return ErrInvalid + // } + // return [result] + // + // If the invalidated check passes, then we're guaranteed that no ancestor changes occurred + // during the code that accessed ancestor state and the result of that work is still valid + // + // [validityTrackingLock] must be held when reading/writing this field. + invalidated bool + + // the uncommitted parent trie of this view + // [validityTrackingLock] must be held when reading/writing this field. + parentTrie View + + // The valid children of this view. + // [validityTrackingLock] must be held when reading/writing this field. + childViews []*view + + // Changes made to this view. + // May include nodes that haven't been updated + // but will when their ID is recalculated. + changes *changeSummary + + db *merkleDB + + // The root of the trie represented by this view. + root maybe.Maybe[*node] + + tokenSize int +} + +// NewView returns a new view on top of this view where the passed changes +// have been applied. +// Adds the new view to [v.childViews]. +// Assumes [v.commitLock] isn't held. +func (v *view) NewView( + ctx context.Context, + changes ViewChanges, +) (View, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + v.commitLock.RLock() + defer v.commitLock.RUnlock() + + if v.committed { + return v.getParentTrie().NewView(ctx, changes) + } + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + + newView, err := newView(v.db, v, changes) + if err != nil { + return nil, err + } + + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + if v.invalidated { + return nil, ErrInvalid + } + v.childViews = append(v.childViews, newView) + + return newView, nil +} + +// Creates a new view with the given [parentTrie]. +func newView( + db *merkleDB, + parentTrie View, + changes ViewChanges, +) (*view, error) { + newView := &view{ + root: maybe.Bind(parentTrie.getRoot(), (*node).clone), + db: db, + parentTrie: parentTrie, + changes: newChangeSummary(len(changes.BatchOps) + len(changes.MapOps)), + tokenSize: db.tokenSize, + } + + for _, op := range changes.BatchOps { + key := op.Key + if !changes.ConsumeBytes { + key = slices.Clone(op.Key) + } + + newVal := maybe.Nothing[[]byte]() + if !op.Delete { + newVal = maybe.Some(op.Value) + if !changes.ConsumeBytes { + newVal = maybe.Some(slices.Clone(op.Value)) + } + } + if err := newView.recordValueChange(toKey(key), newVal); err != nil { + return nil, err + } + } + for key, val := range changes.MapOps { + if !changes.ConsumeBytes { + val = maybe.Bind(val, slices.Clone[[]byte]) + } + if err := newView.recordValueChange(toKey(stringToByteSlice(key)), val); err != nil { + return nil, err + } + } + return newView, nil +} + +// Creates a view of the db at a historical root using the provided [changes]. +// Returns ErrNoChanges if [changes] is empty. +func newViewWithChanges( + db *merkleDB, + changes *changeSummary, +) (*view, error) { + if changes == nil { + return nil, ErrNoChanges + } + + newView := &view{ + root: changes.rootChange.after, + db: db, + parentTrie: db, + changes: changes, + tokenSize: db.tokenSize, + } + // since this is a set of historical changes, all nodes have already been calculated + // since no new changes have occurred, no new calculations need to be done + newView.calculateNodesOnce.Do(func() {}) + newView.nodesAlreadyCalculated.Set(true) + return newView, nil +} + +func (v *view) getTokenSize() int { + return v.tokenSize +} + +func (v *view) getRoot() maybe.Maybe[*node] { + return v.root +} + +// Recalculates the node IDs for all changed nodes in the trie. +// Cancelling [ctx] doesn't cancel calculation. It's used only for tracing. +func (v *view) calculateNodeIDs(ctx context.Context) error { + var err error + v.calculateNodesOnce.Do(func() { + if v.isInvalid() { + err = ErrInvalid + return + } + defer v.nodesAlreadyCalculated.Set(true) + + oldRoot := maybe.Bind(v.root, (*node).clone) + + // We wait to create the span until after checking that we need to actually + // calculateNodeIDs to make traces more useful (otherwise there may be a span + // per key modified even though IDs are not re-calculated). + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.calculateNodeIDs") + defer span.End() + + // add all the changed key/values to the nodes of the trie + for key, change := range v.changes.values { + if change.after.IsNothing() { + // Note we're setting [err] defined outside this function. + if err = v.remove(key); err != nil { + return + } + // Note we're setting [err] defined outside this function. + } else if _, err = v.insert(key, change.after); err != nil { + return + } + } + + if !v.root.IsNothing() { + _ = v.db.calculateNodeIDsSema.Acquire(context.Background(), 1) + v.changes.rootID = v.calculateNodeIDsHelper(v.root.Value()) + v.db.calculateNodeIDsSema.Release(1) + } else { + v.changes.rootID = ids.Empty + } + + v.changes.rootChange = change[maybe.Maybe[*node]]{ + before: oldRoot, + after: v.root, + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + err = ErrInvalid + return + } + }) + return err +} + +// Calculates the ID of all descendants of [n] which need to be recalculated, +// and then calculates the ID of [n] itself. +func (v *view) calculateNodeIDsHelper(n *node) ids.ID { + // We use [wg] to wait until all descendants of [n] have been updated. + var wg sync.WaitGroup + + for childIndex := range n.children { + childEntry := n.children[childIndex] + childKey := n.key.Extend(ToToken(childIndex, v.tokenSize), childEntry.compressedKey) + childNodeChange, ok := v.changes.nodes[childKey] + if !ok { + // This child wasn't changed. + continue + } + childEntry.hasValue = childNodeChange.after.hasValue() + + // Try updating the child and its descendants in a goroutine. + if ok := v.db.calculateNodeIDsSema.TryAcquire(1); ok { + wg.Add(1) + go func() { + childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) + v.db.calculateNodeIDsSema.Release(1) + wg.Done() + }() + } else { + // We're at the goroutine limit; do the work in this goroutine. + childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) + } + } + + // Wait until all descendants of [n] have been updated. + wg.Wait() + + // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. + return n.calculateID(v.db.metrics) +} + +// GetProof returns a proof that [bytesPath] is in or not in trie [t]. +func (v *view) GetProof(ctx context.Context, key []byte) (*Proof, error) { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetProof") + defer span.End() + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + + result, err := getProof(v, key) + if err != nil { + return nil, err + } + if v.isInvalid() { + return nil, ErrInvalid + } + return result, nil +} + +// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. +// The returned proof's [KeyValues] has at most [maxLength] values. +// [maxLength] must be > 0. +func (v *view) GetRangeProof( + ctx context.Context, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + maxLength int, +) (*RangeProof, error) { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetRangeProof") + defer span.End() + + if err := v.calculateNodeIDs(ctx); err != nil { + return nil, err + } + result, err := getRangeProof(v, start, end, maxLength) + if err != nil { + return nil, err + } + if v.isInvalid() { + return nil, ErrInvalid + } + return result, nil +} + +// CommitToDB commits changes from this view to the underlying DB. +func (v *view) CommitToDB(ctx context.Context) error { + ctx, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.CommitToDB") + defer span.End() + + v.db.commitLock.Lock() + defer v.db.commitLock.Unlock() + + return v.commitToDB(ctx) +} + +// Commits the changes from [trieToCommit] to this view, +// this view to its parent, and so on until committing to the db. +// Assumes [v.db.commitLock] is held. +func (v *view) commitToDB(ctx context.Context) error { + v.commitLock.Lock() + defer v.commitLock.Unlock() + + ctx, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.commitToDB", oteltrace.WithAttributes( + attribute.Int("changeCount", len(v.changes.values)), + )) + defer span.End() + + // Call this here instead of in [v.db.commitChanges] + // because doing so there would be a deadlock. + if err := v.calculateNodeIDs(ctx); err != nil { + return err + } + + if err := v.db.commitChanges(ctx, v); err != nil { + return err + } + + v.committed = true + + return nil +} + +// Assumes [v.validityTrackingLock] isn't held. +func (v *view) isInvalid() bool { + v.validityTrackingLock.RLock() + defer v.validityTrackingLock.RUnlock() + + return v.invalidated +} + +// Invalidates this view and all descendants. +// Assumes [v.validityTrackingLock] isn't held. +func (v *view) invalidate() { + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + v.invalidated = true + + for _, childView := range v.childViews { + childView.invalidate() + } + + // after invalidating the children, they no longer need to be tracked + v.childViews = make([]*view, 0, defaultPreallocationSize) +} + +func (v *view) updateParent(newParent View) { + v.validityTrackingLock.Lock() + defer v.validityTrackingLock.Unlock() + + v.parentTrie = newParent +} + +// GetMerkleRoot returns the ID of the root of this view. +func (v *view) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + if err := v.calculateNodeIDs(ctx); err != nil { + return ids.Empty, err + } + return v.changes.rootID, nil +} + +func (v *view) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + _, span := v.db.debugTracer.Start(ctx, "MerkleDB.view.GetValues", oteltrace.WithAttributes( + attribute.Int("keyCount", len(keys)), + )) + defer span.End() + + results := make([][]byte, len(keys)) + valueErrors := make([]error, len(keys)) + + for i, key := range keys { + results[i], valueErrors[i] = v.getValueCopy(ToKey(key)) + } + return results, valueErrors +} + +// GetValue returns the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (v *view) GetValue(ctx context.Context, key []byte) ([]byte, error) { + _, span := v.db.debugTracer.Start(ctx, "MerkleDB.view.GetValue") + defer span.End() + + return v.getValueCopy(ToKey(key)) +} + +// getValueCopy returns a copy of the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (v *view) getValueCopy(key Key) ([]byte, error) { + val, err := v.getValue(key) + if err != nil { + return nil, err + } + return slices.Clone(val), nil +} + +func (v *view) getValue(key Key) ([]byte, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + + if change, ok := v.changes.values[key]; ok { + v.db.metrics.ViewValueCacheHit() + if change.after.IsNothing() { + return nil, database.ErrNotFound + } + return change.after.Value(), nil + } + v.db.metrics.ViewValueCacheMiss() + + // if we don't have local copy of the key, then grab a copy from the parent trie + value, err := v.getParentTrie().getValue(key) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + return nil, ErrInvalid + } + + return value, nil +} + +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) remove(key Key) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + // confirm a node exists with a value + keyNode, err := v.getNode(key, true) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + // [key] isn't in the trie. + return nil + } + return err + } + + if !keyNode.hasValue() { + // [key] doesn't have a value. + return nil + } + + // if the node exists and contains a value + // mark all ancestor for change + // grab parent and grandparent nodes for path compression + var grandParent, parent, nodeToDelete *node + if err := visitPathToKey(v, key, func(n *node) error { + grandParent = parent + parent = nodeToDelete + nodeToDelete = n + return v.recordNodeChange(n) + }); err != nil { + return err + } + + nodeToDelete.setValue(maybe.Nothing[[]byte]()) + + // if the removed node has no children, the node can be removed from the trie + if len(nodeToDelete.children) == 0 { + if err := v.recordNodeDeleted(nodeToDelete); err != nil { + return err + } + + if nodeToDelete.key == v.root.Value().key { + // We deleted the root. The trie is empty now. + v.root = maybe.Nothing[*node]() + return nil + } + + // Note [parent] != nil since [nodeToDelete.key] != [v.root.key]. + // i.e. There's the root and at least one more node. + parent.removeChild(nodeToDelete, v.tokenSize) + + // merge the parent node and its child into a single node if possible + return v.compressNodePath(grandParent, parent) + } + + // merge this node and its descendants into a single node if possible + return v.compressNodePath(parent, nodeToDelete) +} + +// Merges together nodes in the inclusive descendants of [n] that +// have no value and a single child into one node with a compressed +// path until a node that doesn't meet those criteria is reached. +// [parent] is [n]'s parent. If [parent] is nil, [n] is the root +// node and [v.root] is updated to [n]. +// Assumes at least one of the following is true: +// * [n] has a value. +// * [n] has children. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) compressNodePath(parent, n *node) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + if len(n.children) != 1 || n.hasValue() { + return nil + } + + if err := v.recordNodeDeleted(n); err != nil { + return err + } + + var ( + childEntry *child + childKey Key + ) + // There is only one child, but we don't know the index. + // "Cycle" over the key/values to find the only child. + // Note this iteration once because len(node.children) == 1. + for index, entry := range n.children { + childKey = n.key.Extend(ToToken(index, v.tokenSize), entry.compressedKey) + childEntry = entry + } + + if parent == nil { + root, err := v.getNode(childKey, childEntry.hasValue) + if err != nil { + return err + } + v.root = maybe.Some(root) + return nil + } + + parent.setChildEntry(childKey.Token(parent.key.length, v.tokenSize), + &child{ + compressedKey: childKey.Skip(parent.key.length + v.tokenSize), + id: childEntry.id, + hasValue: childEntry.hasValue, + }) + return v.recordNodeChange(parent) +} + +// Get a copy of the node matching the passed key from the view. +// Used by views to get nodes from their ancestors. +func (v *view) getEditableNode(key Key, hadValue bool) (*node, error) { + if v.isInvalid() { + return nil, ErrInvalid + } + + // grab the node in question + n, err := v.getNode(key, hadValue) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if v.isInvalid() { + return nil, ErrInvalid + } + + // return a clone of the node, so it can be edited without affecting this view + return n.clone(), nil +} + +// insert a key/value pair into the correct node of the trie. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) insert( + key Key, + value maybe.Maybe[[]byte], +) (*node, error) { + if v.nodesAlreadyCalculated.Get() { + return nil, ErrNodesAlreadyCalculated + } + + if v.root.IsNothing() { + // the trie is empty, so create a new root node. + root := newNode(key) + root.setValue(value) + v.root = maybe.Some(root) + return root, v.recordNewNode(root) + } + + // Find the node that most closely matches [key]. + var closestNode *node + if err := visitPathToKey(v, key, func(n *node) error { + closestNode = n + // Need to recalculate ID for all nodes on path to [key]. + return v.recordNodeChange(n) + }); err != nil { + return nil, err + } + + if closestNode == nil { + // [v.root.key] isn't a prefix of [key]. + var ( + oldRoot = v.root.Value() + commonPrefixLength = getLengthOfCommonPrefix(oldRoot.key, key, 0 /*offset*/, v.tokenSize) + commonPrefix = oldRoot.key.Take(commonPrefixLength) + newRoot = newNode(commonPrefix) + oldRootID = oldRoot.calculateID(v.db.metrics) + ) + + // Call addChildWithID instead of addChild so the old root is added + // to the new root with the correct ID. + // TODO: + // [oldRootID] shouldn't need to be calculated here. + // Either oldRootID should already be calculated or will be calculated at the end with the other nodes + // Initialize the v.changes.rootID during newView and then use that here instead of oldRootID + newRoot.addChildWithID(oldRoot, v.tokenSize, oldRootID) + if err := v.recordNewNode(newRoot); err != nil { + return nil, err + } + v.root = maybe.Some(newRoot) + + closestNode = newRoot + } + + // a node with that exact key already exists so update its value + if closestNode.key == key { + closestNode.setValue(value) + // closestNode was already marked as changed in the ancestry loop above + return closestNode, nil + } + + // A node with the exact key doesn't exist so determine the portion of the + // key that hasn't been matched yet + // Note that [key] has prefix [closestNode.key], so [key] must be longer + // and the following index won't OOB. + existingChildEntry, hasChild := closestNode.children[key.Token(closestNode.key.length, v.tokenSize)] + if !hasChild { + // there are no existing nodes along the key [key], so create a new node to insert [value] + newNode := newNode(key) + newNode.setValue(value) + closestNode.addChild(newNode, v.tokenSize) + return newNode, v.recordNewNode(newNode) + } + + // if we have reached this point, then the [key] we are trying to insert and + // the existing path node have some common prefix. + // a new branching node will be created that will represent this common prefix and + // have the existing path node and the value being inserted as children. + + // generate the new branch node + // find how many tokens are common between the existing child's compressed key and + // the current key(offset by the closest node's key), + // then move all the common tokens into the branch node + commonPrefixLength := getLengthOfCommonPrefix( + existingChildEntry.compressedKey, + key, + closestNode.key.length+v.tokenSize, + v.tokenSize, + ) + + if existingChildEntry.compressedKey.length <= commonPrefixLength { + // Since the compressed key is shorter than the common prefix, + // we should have visited [existingChildEntry] in [visitPathToKey]. + return nil, ErrVisitPathToKey + } + + branchNode := newNode(key.Take(closestNode.key.length + v.tokenSize + commonPrefixLength)) + closestNode.addChild(branchNode, v.tokenSize) + nodeWithValue := branchNode + + if key.length == branchNode.key.length { + // the branch node has exactly the key to be inserted as its key, so set the value on the branch node + branchNode.setValue(value) + } else { + // the key to be inserted is a child of the branch node + // create a new node and add the value to it + newNode := newNode(key) + newNode.setValue(value) + branchNode.addChild(newNode, v.tokenSize) + if err := v.recordNewNode(newNode); err != nil { + return nil, err + } + nodeWithValue = newNode + } + + // add the existing child onto the branch node + branchNode.setChildEntry( + existingChildEntry.compressedKey.Token(commonPrefixLength, v.tokenSize), + &child{ + compressedKey: existingChildEntry.compressedKey.Skip(commonPrefixLength + v.tokenSize), + id: existingChildEntry.id, + hasValue: existingChildEntry.hasValue, + }) + + return nodeWithValue, v.recordNewNode(branchNode) +} + +func getLengthOfCommonPrefix(first, second Key, secondOffset int, tokenSize int) int { + commonIndex := 0 + for first.length > commonIndex && second.length > commonIndex+secondOffset && + first.Token(commonIndex, tokenSize) == second.Token(commonIndex+secondOffset, tokenSize) { + commonIndex += tokenSize + } + return commonIndex +} + +// Records that a node has been created. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNewNode(after *node) error { + return v.recordKeyChange(after.key, after, after.hasValue(), true /* newNode */) +} + +// Records that an existing node has been changed. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNodeChange(after *node) error { + return v.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) +} + +// Records that the node associated with the given key has been deleted. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordNodeDeleted(after *node) error { + return v.recordKeyChange(after.key, nil, after.hasValue(), false /* newNode */) +} + +// Records that the node associated with the given key has been changed. +// If it is an existing node, record what its value was before it was changed. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordKeyChange(key Key, after *node, hadValue bool, newNode bool) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + if existing, ok := v.changes.nodes[key]; ok { + existing.after = after + return nil + } + + if newNode { + v.changes.nodes[key] = &change[*node]{ + after: after, + } + return nil + } + + before, err := v.getParentTrie().getEditableNode(key, hadValue) + if err != nil { + return err + } + v.changes.nodes[key] = &change[*node]{ + before: before, + after: after, + } + return nil +} + +// Records that a key's value has been added or updated. +// Doesn't actually change the trie data structure. +// That's deferred until we call [calculateNodeIDs]. +// Must not be called after [calculateNodeIDs] has returned. +func (v *view) recordValueChange(key Key, value maybe.Maybe[[]byte]) error { + if v.nodesAlreadyCalculated.Get() { + return ErrNodesAlreadyCalculated + } + + // update the existing change if it exists + if existing, ok := v.changes.values[key]; ok { + existing.after = value + return nil + } + + // grab the before value + var beforeMaybe maybe.Maybe[[]byte] + before, err := v.getParentTrie().getValue(key) + switch err { + case nil: + beforeMaybe = maybe.Some(before) + case database.ErrNotFound: + beforeMaybe = maybe.Nothing[[]byte]() + default: + return err + } + + v.changes.values[key] = &change[maybe.Maybe[[]byte]]{ + before: beforeMaybe, + after: value, + } + return nil +} + +// Retrieves a node with the given [key]. +// If the node is fetched from [v.parentTrie] and [id] isn't empty, +// sets the node's ID to [id]. +// If the node is loaded from the baseDB, [hasValue] determines which database the node is stored in. +// Returns database.ErrNotFound if the node doesn't exist. +func (v *view) getNode(key Key, hasValue bool) (*node, error) { + // check for the key within the changed nodes + if nodeChange, isChanged := v.changes.nodes[key]; isChanged { + v.db.metrics.ViewNodeCacheHit() + if nodeChange.after == nil { + return nil, database.ErrNotFound + } + return nodeChange.after, nil + } + + // get the node from the parent trie and store a local copy + return v.getParentTrie().getEditableNode(key, hasValue) +} + +// Get the parent trie of the view +func (v *view) getParentTrie() View { + v.validityTrackingLock.RLock() + defer v.validityTrackingLock.RUnlock() + return v.parentTrie +} diff --git a/avalanchego/x/merkledb/view_iterator.go b/avalanchego/x/merkledb/view_iterator.go new file mode 100644 index 00000000..14f1b3bd --- /dev/null +++ b/avalanchego/x/merkledb/view_iterator.go @@ -0,0 +1,170 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "slices" + + "github.com/ava-labs/avalanchego/database" +) + +func (v *view) NewIterator() database.Iterator { + return v.NewIteratorWithStartAndPrefix(nil, nil) +} + +func (v *view) NewIteratorWithStart(start []byte) database.Iterator { + return v.NewIteratorWithStartAndPrefix(start, nil) +} + +func (v *view) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return v.NewIteratorWithStartAndPrefix(nil, prefix) +} + +func (v *view) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + var ( + changes = make([]KeyChange, 0, len(v.changes.values)) + startKey = ToKey(start) + prefixKey = ToKey(prefix) + ) + + for key, change := range v.changes.values { + if len(start) > 0 && startKey.Greater(key) || !key.HasPrefix(prefixKey) { + continue + } + changes = append(changes, KeyChange{ + Key: key.Bytes(), + Value: change.after, + }) + } + + // sort [changes] so they can be merged with the parent trie's state + slices.SortFunc(changes, func(a, b KeyChange) int { + return bytes.Compare(a.Key, b.Key) + }) + + return &viewIterator{ + view: v, + parentIter: v.parentTrie.NewIteratorWithStartAndPrefix(start, prefix), + sortedChanges: changes, + } +} + +// viewIterator walks over both the in memory database and the underlying database +// at the same time. +type viewIterator struct { + view *view + parentIter database.Iterator + + key, value []byte + err error + + sortedChanges []KeyChange + + initialized, parentIterExhausted bool +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. We must pay careful attention to set the proper values +// based on if the in memory changes or the underlying db should be read next +func (it *viewIterator) Next() bool { + switch { + case it.view.isInvalid(): + it.key = nil + it.value = nil + it.err = ErrInvalid + return false + case !it.initialized: + it.parentIterExhausted = !it.parentIter.Next() + it.initialized = true + } + + for { + switch { + case it.parentIterExhausted && len(it.sortedChanges) == 0: + // there are no more changes or underlying key/values + it.key = nil + it.value = nil + return false + case it.parentIterExhausted: + // there are no more underlying key/values, so use the local changes + nextKeyValue := it.sortedChanges[0] + + // move to next change + it.sortedChanges = it.sortedChanges[1:] + + // If current change is not a deletion, return it. + // Otherwise go to next loop iteration. + if !nextKeyValue.Value.IsNothing() { + it.key = nextKeyValue.Key + it.value = nextKeyValue.Value.Value() + return true + } + case len(it.sortedChanges) == 0: + it.key = it.parentIter.Key() + it.value = it.parentIter.Value() + it.parentIterExhausted = !it.parentIter.Next() + return true + default: + memKey := it.sortedChanges[0].Key + memValue := it.sortedChanges[0].Value + + parentKey := it.parentIter.Key() + + switch bytes.Compare(memKey, parentKey) { + case -1: + // The current change has a smaller key than the parent key. + // Move to the next change. + it.sortedChanges = it.sortedChanges[1:] + + // If current change is not a deletion, return it. + // Otherwise, go to next loop iteration. + if memValue.HasValue() { + it.key = memKey + it.value = slices.Clone(memValue.Value()) + return true + } + case 1: + // The parent key is smaller, so return it and iterate the parent iterator + it.key = parentKey + it.value = it.parentIter.Value() + it.parentIterExhausted = !it.parentIter.Next() + return true + default: + // the keys are the same, so use the local change and + // iterate both the sorted changes and the parent iterator + it.sortedChanges = it.sortedChanges[1:] + it.parentIterExhausted = !it.parentIter.Next() + + if memValue.HasValue() { + it.key = memKey + it.value = slices.Clone(memValue.Value()) + return true + } + } + } + } +} + +func (it *viewIterator) Error() error { + if it.err != nil { + return it.err + } + return it.parentIter.Error() +} + +func (it *viewIterator) Key() []byte { + return it.key +} + +func (it *viewIterator) Value() []byte { + return it.value +} + +func (it *viewIterator) Release() { + it.key = nil + it.value = nil + it.sortedChanges = nil + it.parentIter.Release() +} diff --git a/avalanchego/x/merkledb/view_iterator_test.go b/avalanchego/x/merkledb/view_iterator_test.go new file mode 100644 index 00000000..9caa8d20 --- /dev/null +++ b/avalanchego/x/merkledb/view_iterator_test.go @@ -0,0 +1,307 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "math/rand" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +func Test_View_Iterator(t *testing.T) { + require := require.New(t) + + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + db, err := getBasicDB() + require.NoError(err) + + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIterator() + require.NotNil(iterator) + + defer iterator.Release() + + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) +} + +func Test_View_Iterator_DBClosed(t *testing.T) { + require := require.New(t) + + key1 := []byte("hello1") + value1 := []byte("world1") + + db, err := getBasicDB() + require.NoError(err) + + require.NoError(db.Put(key1, value1)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIterator() + require.NotNil(iterator) + + defer iterator.Release() + + require.NoError(db.Close()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + err = iterator.Error() + require.ErrorIs(err, ErrInvalid) +} + +// Test_View_IteratorStart tests to make sure the iterator can be configured to +// start midway through the database. +func Test_View_IteratorStart(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() + require.NoError(err) + + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithStart(key2) + require.NotNil(iterator) + + defer iterator.Release() + + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) +} + +// Test_View_IteratorPrefix tests to make sure the iterator can be configured to skip +// keys missing the provided prefix. +func Test_View_IteratorPrefix(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() + require.NoError(err) + + key1 := []byte("hello") + value1 := []byte("world1") + + key2 := []byte("goodbye") + value2 := []byte("world2") + + key3 := []byte("joy") + value3 := []byte("world3") + + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithPrefix([]byte("h")) + require.NotNil(iterator) + + defer iterator.Release() + + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) +} + +// Test_View_IteratorStartPrefix tests to make sure that the iterator can start +// midway through the database while skipping a prefix. +func Test_View_IteratorStartPrefix(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() + require.NoError(err) + + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("z") + value2 := []byte("world2") + + key3 := []byte("hello3") + value3 := []byte("world3") + + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) + + view, err := db.NewView(context.Background(), ViewChanges{}) + require.NoError(err) + iterator := view.NewIteratorWithStartAndPrefix(key1, []byte("h")) + require.NotNil(iterator) + + defer iterator.Release() + + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.True(iterator.Next()) + require.Equal(key3, iterator.Key()) + require.Equal(value3, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) +} + +// Test view iteration by creating a stack of views, +// inserting random key/value pairs into them, and +// iterating over the last view. +func Test_View_Iterator_Random(t *testing.T) { + require := require.New(t) + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + + var ( + numKeyChanges = 5_000 + maxKeyLen = 16 + maxValLen = 16 + ) + + keyChanges := []KeyChange{} + for i := 0; i < numKeyChanges; i++ { + key := make([]byte, rand.Intn(maxKeyLen)) + _, _ = rand.Read(key) + value := make([]byte, rand.Intn(maxValLen)) + _, _ = rand.Read(value) + keyChanges = append(keyChanges, KeyChange{ + Key: key, + Value: maybe.Some(value), + }) + } + + db, err := getBasicDB() + require.NoError(err) + + for i := 0; i < numKeyChanges/4; i++ { + require.NoError(db.Put(keyChanges[i].Key, keyChanges[i].Value.Value())) + } + + ops := make([]database.BatchOp, 0, numKeyChanges/4) + for i := numKeyChanges / 4; i < 2*numKeyChanges/4; i++ { + ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) + } + + view1, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + + ops = make([]database.BatchOp, 0, numKeyChanges/4) + for i := 2 * numKeyChanges / 4; i < 3*numKeyChanges/4; i++ { + ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) + } + + view2, err := view1.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + + ops = make([]database.BatchOp, 0, numKeyChanges/4) + for i := 3 * numKeyChanges / 4; i < numKeyChanges; i++ { + ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) + } + + view3, err := view2.NewView(context.Background(), ViewChanges{BatchOps: ops}) + require.NoError(err) + + // Might have introduced duplicates, so only expect the latest value. + uniqueKeyChanges := make(map[string][]byte) + for _, keyChange := range keyChanges { + uniqueKeyChanges[string(keyChange.Key)] = keyChange.Value.Value() + } + + iter := view3.NewIterator() + uniqueKeys := maps.Keys(uniqueKeyChanges) + sort.Strings(uniqueKeys) + i := 0 + for iter.Next() { + expectedKey := uniqueKeys[i] + expectedValue := uniqueKeyChanges[expectedKey] + require.True(bytes.Equal([]byte(expectedKey), iter.Key())) + if len(expectedValue) == 0 { + // Don't differentiate between nil and []byte{} + require.Empty(iter.Value()) + } else { + require.Equal(expectedValue, iter.Value()) + } + i++ + } + require.Len(uniqueKeys, i) + iter.Release() + require.NoError(iter.Error()) + + // Test with start and prefix. + prefix := []byte{128} + start := []byte{128, 5} + iter = view3.NewIteratorWithStartAndPrefix(start, prefix) + startPrefixUniqueKeys := []string{} + // Remove keys that don't have the prefix/are before the start. + for i := 0; i < len(uniqueKeys); i++ { + if bytes.HasPrefix([]byte(uniqueKeys[i]), prefix) && bytes.Compare([]byte(uniqueKeys[i]), start) >= 0 { + startPrefixUniqueKeys = append(startPrefixUniqueKeys, uniqueKeys[i]) + } + } + require.NotEmpty(startPrefixUniqueKeys) // Sanity check to make sure we have some keys to test. + i = 0 + for iter.Next() { + expectedKey := startPrefixUniqueKeys[i] + expectedValue := uniqueKeyChanges[expectedKey] + require.Equal([]byte(expectedKey), iter.Key()) + if len(expectedValue) == 0 { + // Don't differentiate between nil and []byte{} + require.Empty(iter.Value()) + } else { + require.Equal(expectedValue, iter.Value()) + } + i++ + } + require.Len(startPrefixUniqueKeys, i) + iter.Release() + require.NoError(iter.Error()) +} diff --git a/avalanchego/x/sync/README.md b/avalanchego/x/sync/README.md new file mode 100644 index 00000000..e62fa86d --- /dev/null +++ b/avalanchego/x/sync/README.md @@ -0,0 +1,162 @@ +# `sync` package + +## Overview + +This package implements a client and server that allows for the syncing of a [MerkleDB](../merkledb/README.md). +The servers have an up to date version of the database, and the clients have an out of date version of the database or an empty database. + +It's planned that these client and server implementations will eventually be compatible with Firewood. + +## Messages + +There are four message types sent between the client and server: + +1. `SyncGetRangeProofRequest` +2. `RangeProof` +3. `SyncGetChangeProofRequest` +4. `SyncGetChangeProofResponse` + +These message types are defined in `avalanchego/proto/sync.proto`. +For more information on range proofs and change proofs, see their definitions in `avalanchego/merkledb/proof.go`. + +### `SyncGetRangeProofRequest` + +This message is sent from the client to the server to request a range proof for a given key range and root hash. +That is, the client says, "Give me the key-value pairs that were in this key range when the database had this root." +This request includes a limit on the number of key-value pairs to return, and the size of the response. + +### `RangeProof` + +This message is sent from the server to the client in response to a `SyncGetRangeProofRequest`. +It contains the key-value pairs that were in the requested key range when the database had the requested root, +as well as a proof that the key-value pairs are correct. +If a server can't serve the entire requested key range in one response, its response will omit keys from the +end of the range rather than the start. +For example, if a client requests a range proof for range [`requested_start`, `requested_end`] but the server +can't fit all the key-value pairs in one response, it'll send a range proof for [`requested_start`, `proof_end`] where `proof_end` < `requested_end`, +as opposed to sending a range proof for [`proof_start`, `requested_end`] where `proof_start` > `requested_start`. + +### `SyncGetChangeProofRequest` + +This message is sent from the client to the server to request a change proof between the given root hashes. +That is, the client says, "Give me the key-value pairs that changed between the time the database had this root and that root." +This request includes a limit on the number of key-value pairs to return, and the size of the response. + +### `SyncGetChangeProofResponse` + +This message is sent from the server to the client in response to a `SyncGetChangeProofRequest`. +If the server had sufficient history to generate a change proof, it contains a change proof that contains +the key-value pairs that changed between the requested roots. +If the server did not have sufficient history to generate a change proof, it contains a range proof that +contains the key-value pairs that were in the database when the database had the latter root. +Like range proofs, if a client requests a change proof for range [`requested_start`, `requested_end`] but +the server can't fit all the key-value pairs in one response, +it'll send a change proof for [`requested_start`, `proof_end`] where `proof_end` < `requested_end`, +as opposed to sending a change proof for [`proof_start`, `requested_end`] where `proof_start` > `requested_start`. + +## Algorithm + +For each proof it receives, the sync client tracks the root hash of the revision associated with the proof's key-value pairs. +For example, it will store information that says something like, "I have all of the key-value pairs that +are in range [`start`, `end`] for the revision with root `root_hash`" for some keys `start` and `end`. +Note that `root_hash` is the root hash of the revision that the client is trying to sync to, not the +root hash of its own (incomplete) database. +Tracking the revision associated with each downloaded key range, as well as using data in its own +(incomplete) database, allows the client to figure out which key ranges are not up to date and need to be synced. +The hash of the incomplete database on a client is never sent anywhere because it does not represent a root hash of any revision. + +When the client is created, it is given the root hash of the revision to sync to. +When it starts syncing, it requests from a server a range proof for the entire database. +(To indicate that it wants no lower bound on the key range, the client doesn't provide a lower bound in the request. +To indicate that it wants no upper bound, the client doesn't provide an upper bound. +Thus, to request the entire database, the client omits both the lower and upper bounds in its request.) +The server replies with a range proof, which the client verifies. +If it's valid, the key-value pairs in the proof are written to the database. +If it's not, the client drops the proof and requests the proof from another server. + +A range proof sent by a server must return a continuous range of the key-value pairs, but may not +return the full range that was requested. +For example, a client might request all the key-value pairs in [`requested_start`, `requested_end`] +but only receive those in range [`requested_start`, `proof_end`] where `proof_end` < `requested_end`. +There might be too many key-value pairs to include in one message, or the server may be too busy to provide any more in its response. +Unless the database is very small, this means that the range proof the client receives in response to + its range proof request for the entire database will not contain all of the key-value pairs in the database. + +If a client requests a range proof for range [`requested_start`, `requested_end`] but only receives +a range proof for [`requested_start`, `proof_end`] where `proof_end` < `requested_end` +it recognizes that it must still fetch all of the keys in [`proof_end`, `requested_end`]. +It repeatedly requests range proofs for chunks of the remaining key range until it has all of the +key-value pairs in [`requested_start`, `requested_end`]. +The client may split the remaining key range into chunks and fetch chunks of key-value pairs in parallel, possibly even from different servers. + +Additional commits to the database may occur while the client is syncing. +The sync client can be notified that the root hash of the database it's trying to sync to has changed. +Detecting that the root hash to sync to has changed is done outside this package. +For example, if the database is being used to store blockchain state then the sync client would be +notified when a new block is accepted because that implies a commit to the database. +If this occurs, the key-value pairs the client has learned about via range proofs may no longer be up to date. + +We use change proofs as an optimization to correct the out of date key-value pairs. +When the sync client is notified that the root hash to sync to has changed, it requests a change proof +from a server for a given key range. +For example, if a client has the key-value pairs in range [`start`, `end`] that were in the database +when it had `root_hash`, then it will request a change proof that provides all of the key-value changes +in range [`start`, `end`] from the database version with root hash `root_hash` to the database version with root hash `new_root_hash`. +The client verifies the change proof, and if it's valid, it applies the changes to its database. +If it's not, the client drops the proof and requests the proof from another server. + +A server needs to have history in order to serve a change proof. +Namely, it needs to know all of the database changes between two roots. +If the server does not have sufficient history to generate a change proof, it will send a range proof for +the requested range at revision `new_root_hash` instead. +The client will verify and apply the range proof. (Note that change proofs are just an optimization for bandwidth and speed. +A range proof for a given key range and revision has the same information as a change proof from +`old_root_hash` to `new_root_hash` for the key range, assuming the client has the key-value pairs +for the key range at the revision with `old_root_hash`.) +Change proofs, like range proofs, may not contain all of the key-value pairs in the requested range. +This is OK because as mentioned above, the client tracks the root hash associated with each range of +key-value pairs it has, so it knows which key-value pairs are out of date. +Similar to range proofs, if a client requests the changes in range [`requested_start`, `requested_end`], +but the server replies with all of the changes in [`requested_start`, `proof_end`] for some `proof_end` < `requested_end`, +the client will repeatedly request change proofs until it gets remaining key-value pairs (namely in [`proof_end`, `requested_end`]). + +Eventually, by repeatedly requesting, receiving, verifying and applying range and change proofs, +the client will have all of the key-value pairs in the database. +At this point, it's synced. + +## Diagram + + +Assuming you have `Root Hash` `r1` which has many keys, some of which are k25, k50, k75, +approximately 25%, 50%, and 75% of the way into the sorted set of keys, respectively, +this diagram shows an example flow from client to server: + +```mermaid +sequenceDiagram + box Client/Server + participant Server + participant Client + end + box New Revision Notifier + participant Notifier + end + + Note right of Client: Normal sync flow + Notifier->>Client: CurrentRoot(r1) + Client->>Server: RangeProofRequest(r1, all) + Server->>Client: RangeProofResponse(r1, ..k25) + Client->>Server: RangeProofRequest(r1, k25..) + Server->>Client: RangeProofResponse(r1, k25..k75) + Notifier-)Client: NewRootHash(r2) + Client->>Server: ChangeProofRequest(r1, r2, 0..k75) + Server->>Client: ChangeProofResponse(r1, r2, 0..k50) + Client->>Server: ChangeProofRequest(r1, r2, k50..k75) + Server->>Client: ChangeProofResponse(r1, r2, k50..k75) + Note right of Client: client is @r2 through (..k75) + Client->>Server: RangeProofRequest(r2, k75..) + Server->>Client: RangeProofResponse(r2, k75..k100) +``` + +## TODOs + +- [ ] Handle errors on proof requests. Currently, any errors that occur server side are not sent back to the client. diff --git a/avalanchego/x/sync/client.go b/avalanchego/x/sync/client.go index 99416f0d..7a71f1d4 100644 --- a/avalanchego/x/sync/client.go +++ b/avalanchego/x/sync/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -7,195 +7,374 @@ import ( "context" "errors" "fmt" + "math" "sync/atomic" "time" "go.uber.org/zap" + "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) const ( - failedRequestSleepInterval = 10 * time.Millisecond + initialRetryWait = 10 * time.Millisecond + maxRetryWait = time.Second + retryWaitFactor = 1.5 // Larger --> timeout grows more quickly epsilon = 1e-6 // small amount to add to time to avoid division by 0 ) var ( - _ Client = &client{} + _ Client = (*client)(nil) - errInvalidRangeProof = errors.New("failed to verify range proof") - errTooManyLeaves = errors.New("response contains more than requested leaves") + errInvalidRangeProof = errors.New("failed to verify range proof") + errInvalidChangeProof = errors.New("failed to verify change proof") + errTooManyKeys = errors.New("response contains more than requested keys") + errTooManyBytes = errors.New("response contains more than requested bytes") + errUnexpectedChangeProofResponse = errors.New("unexpected response type") ) -// Client synchronously fetches data from the network to fulfill state sync requests. +// Client synchronously fetches data from the network +// to fulfill state sync requests. // Repeatedly retries failed requests until the context is canceled. type Client interface { - // GetRangeProof synchronously sends the given request, returning a parsed StateResponse or error - // Note: this verifies the response including the range proof. - GetRangeProof(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) - // GetChangeProof synchronously sends the given request, returning a parsed ChangesResponse or error - // [verificationDB] is the local db that has all key/values in it for the proof's startroot within the proof's key range - // Note: this verifies the response including the change proof. - GetChangeProof(ctx context.Context, request *ChangeProofRequest, verificationDB *merkledb.Database) (*merkledb.ChangeProof, error) + // GetRangeProof synchronously sends the given request + // and returns the parsed response. + // This method verifies the range proof before returning it. + GetRangeProof( + ctx context.Context, + request *pb.SyncGetRangeProofRequest, + ) (*merkledb.RangeProof, error) + + // GetChangeProof synchronously sends the given request + // and returns the parsed response. + // This method verifies the change proof / range proof + // before returning it. + // If the server responds with a change proof, + // it's verified using [verificationDB]. + GetChangeProof( + ctx context.Context, + request *pb.SyncGetChangeProofRequest, + verificationDB DB, + ) (*merkledb.ChangeOrRangeProof, error) } type client struct { - networkClient NetworkClient - stateSyncNodes []ids.NodeID - stateSyncNodeIdx uint32 - stateSyncMinVersion *version.Application - log logging.Logger - metrics SyncMetrics + networkClient NetworkClient + stateSyncNodes []ids.NodeID + stateSyncNodeIdx uint32 + log logging.Logger + metrics SyncMetrics + tokenSize int } type ClientConfig struct { - NetworkClient NetworkClient - StateSyncNodeIDs []ids.NodeID - StateSyncMinVersion *version.Application - Log logging.Logger - Metrics SyncMetrics + NetworkClient NetworkClient + StateSyncNodeIDs []ids.NodeID + Log logging.Logger + Metrics SyncMetrics + BranchFactor merkledb.BranchFactor } -func NewClient(config *ClientConfig) Client { - c := &client{ - networkClient: config.NetworkClient, - stateSyncNodes: config.StateSyncNodeIDs, - stateSyncMinVersion: config.StateSyncMinVersion, - log: config.Log, - metrics: config.Metrics, +func NewClient(config *ClientConfig) (Client, error) { + if err := config.BranchFactor.Valid(); err != nil { + return nil, err } - return c + return &client{ + networkClient: config.NetworkClient, + stateSyncNodes: config.StateSyncNodeIDs, + log: config.Log, + metrics: config.Metrics, + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], + }, nil } // GetChangeProof synchronously retrieves the change proof given by [req]. // Upon failure, retries until the context is expired. // The returned change proof is verified. -func (c *client) GetChangeProof(ctx context.Context, req *ChangeProofRequest, db *merkledb.Database) (*merkledb.ChangeProof, error) { - parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.ChangeProof, error) { - changeProof := &merkledb.ChangeProof{} - if _, err := merkledb.Codec.DecodeChangeProof(responseBytes, changeProof); err != nil { - return nil, err +func (c *client) GetChangeProof( + ctx context.Context, + req *pb.SyncGetChangeProofRequest, + db DB, +) (*merkledb.ChangeOrRangeProof, error) { + parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.ChangeOrRangeProof, error) { + if len(responseBytes) > int(req.BytesLimit) { + return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyBytes, len(responseBytes), req.BytesLimit) } - // Ensure the response does not contain more than the requested number of leaves - // and the start and end roots match the requested roots. - if len(changeProof.KeyValues)+len(changeProof.DeletedKeys) > int(req.Limit) { - return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyLeaves, len(changeProof.KeyValues), req.Limit) + var changeProofResp pb.SyncGetChangeProofResponse + if err := proto.Unmarshal(responseBytes, &changeProofResp); err != nil { + return nil, err } - if err := changeProof.Verify(ctx, db, req.Start, req.End, req.EndingRoot); err != nil { - return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + startKey := maybeBytesToMaybe(req.StartKey) + endKey := maybeBytesToMaybe(req.EndKey) + + switch changeProofResp := changeProofResp.Response.(type) { + case *pb.SyncGetChangeProofResponse_ChangeProof: + // The server had enough history to send us a change proof + var changeProof merkledb.ChangeProof + if err := changeProof.UnmarshalProto(changeProofResp.ChangeProof); err != nil { + return nil, err + } + + // Ensure the response does not contain more than the requested number of leaves + // and the start and end roots match the requested roots. + if len(changeProof.KeyChanges) > int(req.KeyLimit) { + return nil, fmt.Errorf( + "%w: (%d) > %d)", + errTooManyKeys, len(changeProof.KeyChanges), req.KeyLimit, + ) + } + + endRoot, err := ids.ToID(req.EndRootHash) + if err != nil { + return nil, err + } + + if err := db.VerifyChangeProof( + ctx, + &changeProof, + startKey, + endKey, + endRoot, + ); err != nil { + return nil, fmt.Errorf("%w due to %w", errInvalidChangeProof, err) + } + + return &merkledb.ChangeOrRangeProof{ + ChangeProof: &changeProof, + }, nil + case *pb.SyncGetChangeProofResponse_RangeProof: + + var rangeProof merkledb.RangeProof + if err := rangeProof.UnmarshalProto(changeProofResp.RangeProof); err != nil { + return nil, err + } + + // The server did not have enough history to send us a change proof + // so they sent a range proof instead. + err := verifyRangeProof( + ctx, + &rangeProof, + int(req.KeyLimit), + startKey, + endKey, + req.EndRootHash, + c.tokenSize, + ) + if err != nil { + return nil, err + } + + return &merkledb.ChangeOrRangeProof{ + RangeProof: &rangeProof, + }, nil + default: + return nil, fmt.Errorf( + "%w: %T", + errUnexpectedChangeProofResponse, changeProofResp, + ) } - return changeProof, nil } - return getAndParse(ctx, c, req, parseFn) + + reqBytes, err := proto.Marshal(&pb.Request{ + Message: &pb.Request_ChangeProofRequest{ + ChangeProofRequest: req, + }, + }) + if err != nil { + return nil, err + } + return getAndParse(ctx, c, reqBytes, parseFn) +} + +// Verify [rangeProof] is a valid range proof for keys in [start, end] for +// root [rootBytes]. Returns [errTooManyKeys] if the response contains more +// than [keyLimit] keys. +func verifyRangeProof( + ctx context.Context, + rangeProof *merkledb.RangeProof, + keyLimit int, + start maybe.Maybe[[]byte], + end maybe.Maybe[[]byte], + rootBytes []byte, + tokenSize int, +) error { + root, err := ids.ToID(rootBytes) + if err != nil { + return err + } + + // Ensure the response does not contain more than the maximum requested number of leaves. + if len(rangeProof.KeyValues) > keyLimit { + return fmt.Errorf( + "%w: (%d) > %d)", + errTooManyKeys, len(rangeProof.KeyValues), keyLimit, + ) + } + + if err := rangeProof.Verify( + ctx, + start, + end, + root, + tokenSize, + ); err != nil { + return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) + } + return nil } // GetRangeProof synchronously retrieves the range proof given by [req]. // Upon failure, retries until the context is expired. // The returned range proof is verified. -func (c *client) GetRangeProof(ctx context.Context, req *RangeProofRequest) (*merkledb.RangeProof, error) { +func (c *client) GetRangeProof( + ctx context.Context, + req *pb.SyncGetRangeProofRequest, +) (*merkledb.RangeProof, error) { parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.RangeProof, error) { - rangeProof := &merkledb.RangeProof{} - if _, err := merkledb.Codec.DecodeRangeProof(responseBytes, rangeProof); err != nil { + if len(responseBytes) > int(req.BytesLimit) { + return nil, fmt.Errorf( + "%w: (%d) > %d)", + errTooManyBytes, len(responseBytes), req.BytesLimit, + ) + } + + var rangeProofProto pb.RangeProof + if err := proto.Unmarshal(responseBytes, &rangeProofProto); err != nil { return nil, err } - // Ensure the response does not contain more than the maximum requested number of leaves. - if len(rangeProof.KeyValues) > int(req.Limit) { - return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyLeaves, len(rangeProof.KeyValues), req.Limit) + var rangeProof merkledb.RangeProof + if err := rangeProof.UnmarshalProto(&rangeProofProto); err != nil { + return nil, err } - if err := rangeProof.Verify( + if err := verifyRangeProof( ctx, - req.Start, - req.End, - req.Root, + &rangeProof, + int(req.KeyLimit), + maybeBytesToMaybe(req.StartKey), + maybeBytesToMaybe(req.EndKey), + req.RootHash, + c.tokenSize, ); err != nil { - return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + return nil, err } - return rangeProof, nil + return &rangeProof, nil } - return getAndParse(ctx, c, req, parseFn) -} -// getAndParse uses [client] to send [request] to an arbitrary peer. If the peer responds, -// [parseFn] is called with the raw response. If [parseFn] returns an error or the request -// times out, this function will retry the request to a different peer until [ctx] expires. -// If [parseFn] returns a nil error, the result is returned from getAndParse. -func getAndParse[T any](ctx context.Context, client *client, request Request, parseFn func(context.Context, []byte) (*T, error)) (*T, error) { - // marshal the request into requestBytes - requestBytes, err := syncCodec.Marshal(Version, &request) + reqBytes, err := proto.Marshal(&pb.Request{ + Message: &pb.Request_RangeProofRequest{ + RangeProofRequest: req, + }, + }) if err != nil { return nil, err } + return getAndParse(ctx, c, reqBytes, parseFn) +} + +// getAndParse uses [client] to send [request] to an arbitrary peer. +// Returns the response to the request. +// [parseFn] parses the raw response. +// If the request is unsuccessful or the response can't be parsed, +// retries the request to a different peer until [ctx] expires. +// Returns [errAppSendFailed] if we fail to send an AppRequest/AppResponse. +// This should be treated as a fatal error. +func getAndParse[T any]( + ctx context.Context, + client *client, + request []byte, + parseFn func(context.Context, []byte) (*T, error), +) (*T, error) { var ( lastErr error response *T ) // Loop until the context is cancelled or we get a valid response. - for attempt := 0; ; attempt++ { - // If the context has finished, return the context error early. - if err := ctx.Err(); err != nil { - if lastErr != nil { - return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %s", attempt, lastErr, err) - } - return nil, err - } - responseBytes, nodeID, err := client.get(ctx, requestBytes) + for attempt := 1; ; attempt++ { + nodeID, responseBytes, err := client.get(ctx, request) if err == nil { if response, err = parseFn(ctx, responseBytes); err == nil { return response, nil } } + if errors.Is(err, errAppSendFailed) { + // Failing to send an AppRequest is a fatal error. + return nil, err + } + client.log.Debug("request failed, retrying", zap.Stringer("nodeID", nodeID), zap.Int("attempt", attempt), - zap.Stringer("request", request), - zap.Error(err)) - + zap.Error(err), + ) + // if [err] is being propagated from [ctx], avoid overwriting [lastErr]. if err != ctx.Err() { - // if [err] is being propagated from [ctx], avoid overwriting [lastErr]. lastErr = err - time.Sleep(failedRequestSleepInterval) + } + + retryWait := initialRetryWait * time.Duration(math.Pow(retryWaitFactor, float64(attempt))) + if retryWait > maxRetryWait || retryWait < 0 { // Handle overflows with negative check. + retryWait = maxRetryWait + } + + select { + case <-ctx.Done(): + if lastErr != nil { + // prefer reporting [lastErr] if it's not nil. + return nil, fmt.Errorf( + "request failed after %d attempts with last error %w and ctx error %w", + attempt, lastErr, ctx.Err(), + ) + } + return nil, ctx.Err() + case <-time.After(retryWait): } } } -// get sends [request] to an arbitrary peer and blocks until the node receives a response -// or [ctx] expires. Returns the raw response from the peer, the peer's NodeID, and an -// error if the request timed out. Thread safe. -func (c *client) get(ctx context.Context, requestBytes []byte) ([]byte, ids.NodeID, error) { - c.metrics.RequestMade() +// get sends [request] to an arbitrary peer and blocks +// until the node receives a response, failure notification +// or [ctx] is canceled. +// Returns the peer's NodeID and response. +// Returns [errAppSendFailed] if we failed to send an AppRequest/AppResponse. +// This should be treated as fatal. +// It's safe to call this method multiple times concurrently. +func (c *client) get(ctx context.Context, request []byte) (ids.NodeID, []byte, error) { var ( - response []byte - nodeID ids.NodeID - err error - startTime = time.Now() + response []byte + nodeID ids.NodeID + err error ) + + c.metrics.RequestMade() + if len(c.stateSyncNodes) == 0 { - response, nodeID, err = c.networkClient.RequestAny(ctx, c.stateSyncMinVersion, requestBytes) + nodeID, response, err = c.networkClient.RequestAny(ctx, request) } else { - // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 - // we do this every attempt to ensure we get a different node each time if possible. + // Get the next nodeID to query using the [nodeIdx] offset. + // If we're out of nodes, loop back to 0. + // We do this try to query a different node each time if possible. nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] - response, err = c.networkClient.Request(ctx, nodeID, requestBytes) + response, err = c.networkClient.Request(ctx, nodeID, request) } if err != nil { c.metrics.RequestFailed() - c.networkClient.TrackBandwidth(nodeID, 0) - return response, nodeID, err + return nodeID, response, err } - bandwidth := float64(len(response)) / (time.Since(startTime).Seconds() + epsilon) - c.networkClient.TrackBandwidth(nodeID, bandwidth) c.metrics.RequestSucceeded() - return response, nodeID, nil + return nodeID, response, nil } diff --git a/avalanchego/x/sync/client_test.go b/avalanchego/x/sync/client_test.go index ac72e312..fed60e93 100644 --- a/avalanchego/x/sync/client_test.go +++ b/avalanchego/x/sync/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -6,100 +6,144 @@ package sync import ( "context" "math/rand" - "sync" "testing" "time" - "github.com/golang/mock/gomock" - + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) -func sendRequest( +func newDefaultDBConfig() merkledb.Config { + return merkledb.Config{ + IntermediateWriteBatchSize: 100, + HistoryLength: defaultRequestKeyLimit, + ValueNodeCacheSize: defaultRequestKeyLimit, + IntermediateWriteBufferSize: defaultRequestKeyLimit, + IntermediateNodeCacheSize: defaultRequestKeyLimit, + Reg: prometheus.NewRegistry(), + Tracer: trace.Noop, + BranchFactor: merkledb.BranchFactor16, + } +} + +// Create a client and send a range proof request to a server +// whose underlying database is [serverDB]. +// The server's response is modified with [modifyResponse] before +// being returned to the server. +// The client makes at most [maxAttempts] attempts to fulfill +// the request before returning an error. +func sendRangeProofRequest( t *testing.T, - db *merkledb.Database, - request *RangeProofRequest, - maxAttempts uint32, + serverDB DB, + request *pb.SyncGetRangeProofRequest, + maxAttempts int, modifyResponse func(*merkledb.RangeProof), ) (*merkledb.RangeProof, error) { t.Helper() - var wg sync.WaitGroup - defer wg.Wait() // wait for goroutines spawned - require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - sender := common.NewMockSender(ctrl) - handler := NewNetworkServer(sender, db, logging.NoLog{}) - clientNodeID, serverNodeID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - networkClient := NewNetworkClient(sender, clientNodeID, 1, logging.NoLog{}) - err := networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp) - require.NoError(err) - client := NewClient(&ClientConfig{ + var ( + // Number of calls from the client to the server so far. + numAttempts int + + // Sends messages from server to client. + sender = common.NewMockSender(ctrl) + + // Serves the range proof. + server = NewNetworkServer(sender, serverDB, logging.NoLog{}) + + clientNodeID, serverNodeID = ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + + // "Sends" the request from the client to the server and + // "receives" the response from the server. In reality, + // it just invokes the server's method and receives + // the response on [serverResponseChan]. + networkClient = NewMockNetworkClient(ctrl) + + serverResponseChan = make(chan []byte, 1) + + // The context used in client.GetRangeProof. + // Canceled after the first response is received because + // the client will keep sending requests until its context + // expires or it succeeds. + ctx, cancel = context.WithCancel(context.Background()) + ) + + defer cancel() + + // The client fetching a range proof. + client, err := NewClient(&ClientConfig{ NetworkClient: networkClient, Metrics: &mockMetrics{}, Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, }) + require.NoError(err) - ctx, cancel := context.WithCancel(context.Background()) - deadline := time.Now().Add(1 * time.Hour) // enough time to complete a request - defer cancel() // avoid leaking a goroutine - - expectedSendNodeIDs := set.NewSet[ids.NodeID](1) - expectedSendNodeIDs.Add(serverNodeID) - sender.EXPECT().SendAppRequest( - gomock.Any(), // ctx - expectedSendNodeIDs, // {serverNodeID} - gomock.Any(), // requestID - gomock.Any(), // requestBytes + networkClient.EXPECT().RequestAny( + gomock.Any(), // ctx + gomock.Any(), // request ).DoAndReturn( - func(ctx context.Context, _ set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { - // limit the number of attempts to [maxAttempts] by cancelling the context if needed. - if requestID >= maxAttempts { - cancel() - return ctx.Err() + func(_ context.Context, request []byte) (ids.NodeID, []byte, error) { + go func() { + // Get response from server + require.NoError(server.AppRequest(context.Background(), clientNodeID, 0, time.Now().Add(time.Hour), request)) + }() + + // Wait for response from server + serverResponse := <-serverResponseChan + + numAttempts++ + + if numAttempts >= maxAttempts { + defer cancel() } - wg.Add(1) - go func() { - defer wg.Done() - err := handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes) - require.NoError(err) - }() // should be on a goroutine so the test can make progress. - return nil + return serverNodeID, serverResponse, nil }, ).AnyTimes() + + // The server should expect to "send" a response to the client. sender.EXPECT().SendAppResponse( gomock.Any(), // ctx clientNodeID, gomock.Any(), // requestID gomock.Any(), // responseBytes ).DoAndReturn( - func(_ context.Context, _ ids.NodeID, requestID uint32, responseBytes []byte) error { + func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { // deserialize the response so we can modify it if needed. - response := &merkledb.RangeProof{} - _, err := merkledb.Codec.DecodeRangeProof(responseBytes, response) - require.NoError(err) + var responseProto pb.RangeProof + require.NoError(proto.Unmarshal(responseBytes, &responseProto)) + + var response merkledb.RangeProof + require.NoError(response.UnmarshalProto(&responseProto)) // modify if needed if modifyResponse != nil { - modifyResponse(response) + modifyResponse(&response) } // reserialize the response and pass it to the client to complete the handling. - responseBytes, err = merkledb.Codec.EncodeRangeProof(merkledb.Version, response) - require.NoError(err) - err = networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes) + responseBytes, err := proto.Marshal(response.ToProto()) require.NoError(err) + + serverResponseChan <- responseBytes + return nil }, ).AnyTimes() @@ -108,78 +152,100 @@ func sendRequest( } func TestGetRangeProof(t *testing.T) { - r := rand.New(rand.NewSource(1)) // #nosec G404 + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 - smallTrieKeyCount := defaultLeafRequestLimit + smallTrieKeyCount := defaultRequestKeyLimit smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, smallTrieKeyCount, 1) require.NoError(t, err) smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) require.NoError(t, err) - largeTrieKeyCount := 10_000 + largeTrieKeyCount := 3 * defaultRequestKeyLimit largeTrieDB, largeTrieKeys, err := generateTrieWithMinKeyLen(t, r, largeTrieKeyCount, 1) require.NoError(t, err) largeTrieRoot, err := largeTrieDB.GetMerkleRoot(context.Background()) require.NoError(t, err) tests := map[string]struct { - db *merkledb.Database - request *RangeProofRequest + db DB + request *pb.SyncGetRangeProofRequest modifyResponse func(*merkledb.RangeProof) expectedErr error expectedResponseLen int }{ + "proof restricted by BytesLimit": { + db: smallTrieDB, + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 10000, + }, + }, "full response for small (single request) trie": { db: smallTrieDB, - request: &RangeProofRequest{ - Root: smallTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, - expectedResponseLen: defaultLeafRequestLimit, + expectedResponseLen: defaultRequestKeyLimit, }, "too many leaves in response": { db: smallTrieDB, - request: &RangeProofRequest{ - Root: smallTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) }, - expectedErr: errTooManyLeaves, + expectedErr: errTooManyKeys, }, "partial response to request for entire trie (full leaf limit)": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, - expectedResponseLen: defaultLeafRequestLimit, + expectedResponseLen: defaultRequestKeyLimit, }, "full response from near end of trie to end of trie (less than leaf limit)": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Start: largeTrieKeys[len(largeTrieKeys)-30], // Set start 30 keys from the end of the large trie - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + StartKey: &pb.MaybeBytes{ + Value: largeTrieKeys[len(largeTrieKeys)-30], // Set start 30 keys from the end of the large trie + IsNothing: false, + }, + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, expectedResponseLen: 30, }, "full response for intermediate range of trie (less than leaf limit)": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Start: largeTrieKeys[1000], // Set the range for 1000 leafs in an intermediate range of the trie - End: largeTrieKeys[1099], // (inclusive range) - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + StartKey: &pb.MaybeBytes{ + Value: largeTrieKeys[1000], // Set the range for 1000 leafs in an intermediate range of the trie + IsNothing: false, + }, + EndKey: &pb.MaybeBytes{Value: largeTrieKeys[1099]}, // (inclusive range) + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, expectedResponseLen: 100, }, "removed first key in response": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { response.KeyValues = response.KeyValues[1:] @@ -188,86 +254,472 @@ func TestGetRangeProof(t *testing.T) { }, "removed first key in response and replaced proof": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { - start := response.KeyValues[1].Key - proof, err := largeTrieDB.GetRangeProof(context.Background(), start, nil, defaultLeafRequestLimit) - if err != nil { - panic(err) - } + start := maybe.Some(response.KeyValues[1].Key) + rootID, err := largeTrieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + proof, err := largeTrieDB.GetRangeProofAtRoot(context.Background(), rootID, start, maybe.Nothing[[]byte](), defaultRequestKeyLimit) + require.NoError(t, err) response.KeyValues = proof.KeyValues response.StartProof = proof.StartProof response.EndProof = proof.EndProof }, - expectedErr: merkledb.ErrProofNodeNotForKey, + expectedErr: errInvalidRangeProof, }, - "removed last key in response": { + "removed key from middle of response": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = response.KeyValues[:len(response.KeyValues)-2] + response.KeyValues = append(response.KeyValues[:100], response.KeyValues[101:]...) }, expectedErr: merkledb.ErrInvalidProof, }, - "removed key from middle of response": { + "start and end proof nodes removed": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { - response.KeyValues = append(response.KeyValues[:100], response.KeyValues[101:]...) + response.StartProof = nil + response.EndProof = nil }, - expectedErr: merkledb.ErrInvalidProof, + expectedErr: merkledb.ErrNoEndProof, }, - "all proof keys removed from response": { + "end proof removed": { + db: largeTrieDB, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.EndProof = nil + }, + expectedErr: merkledb.ErrNoEndProof, + }, + "empty proof": { db: largeTrieDB, - request: &RangeProofRequest{ - Root: largeTrieRoot, - Limit: defaultLeafRequestLimit, + request: &pb.SyncGetRangeProofRequest{ + RootHash: largeTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, }, modifyResponse: func(response *merkledb.RangeProof) { response.StartProof = nil response.EndProof = nil + response.KeyValues = nil + }, + expectedErr: merkledb.ErrEmptyProof, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + proof, err := sendRangeProofRequest(t, test.db, test.request, 1, test.modifyResponse) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return + } + if test.expectedResponseLen > 0 { + require.Len(proof.KeyValues, test.expectedResponseLen) + } + bytes, err := proto.Marshal(proof.ToProto()) + require.NoError(err) + require.Less(len(bytes), int(test.request.BytesLimit)) + }) + } +} + +func sendChangeProofRequest( + t *testing.T, + serverDB DB, + clientDB DB, + request *pb.SyncGetChangeProofRequest, + maxAttempts int, + modifyChangeProof func(*merkledb.ChangeProof), + modifyRangeProof func(*merkledb.RangeProof), +) (*merkledb.ChangeOrRangeProof, error) { + t.Helper() + + require := require.New(t) + ctrl := gomock.NewController(t) + + var ( + // Number of calls from the client to the server so far. + numAttempts int + + // Sends messages from server to client. + sender = common.NewMockSender(ctrl) + + // Serves the change proof. + server = NewNetworkServer(sender, serverDB, logging.NoLog{}) + + clientNodeID, serverNodeID = ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + + // "Sends" the request from the client to the server and + // "receives" the response from the server. In reality, + // it just invokes the server's method and receives + // the response on [serverResponseChan]. + networkClient = NewMockNetworkClient(ctrl) + + serverResponseChan = make(chan []byte, 1) + + // The context used in client.GetChangeProof. + // Canceled after the first response is received because + // the client will keep sending requests until its context + // expires or it succeeds. + ctx, cancel = context.WithCancel(context.Background()) + ) + + // The client fetching a change proof. + client, err := NewClient(&ClientConfig{ + NetworkClient: networkClient, + Metrics: &mockMetrics{}, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + + defer cancel() // avoid leaking a goroutine + + networkClient.EXPECT().RequestAny( + gomock.Any(), // ctx + gomock.Any(), // request + ).DoAndReturn( + func(_ context.Context, request []byte) (ids.NodeID, []byte, error) { + go func() { + // Get response from server + require.NoError(server.AppRequest(context.Background(), clientNodeID, 0, time.Now().Add(time.Hour), request)) + }() + + // Wait for response from server + serverResponse := <-serverResponseChan + + numAttempts++ + + if numAttempts >= maxAttempts { + defer cancel() + } + + return serverNodeID, serverResponse, nil + }, + ).AnyTimes() + + // Expect server (serverDB) to send app response to client (clientDB) + sender.EXPECT().SendAppResponse( + gomock.Any(), // ctx + clientNodeID, + gomock.Any(), // requestID + gomock.Any(), // responseBytes + ).DoAndReturn( + func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { + // deserialize the response so we can modify it if needed. + var responseProto pb.SyncGetChangeProofResponse + require.NoError(proto.Unmarshal(responseBytes, &responseProto)) + + if responseProto.GetChangeProof() != nil { + // Server responded with a change proof + var changeProof merkledb.ChangeProof + require.NoError(changeProof.UnmarshalProto(responseProto.GetChangeProof())) + + // modify if needed + if modifyChangeProof != nil { + modifyChangeProof(&changeProof) + } + + // reserialize the response and pass it to the client to complete the handling. + responseBytes, err := proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_ChangeProof{ + ChangeProof: changeProof.ToProto(), + }, + }) + require.NoError(err) + + serverResponseChan <- responseBytes + + return nil + } + + // Server responded with a range proof + var rangeProof merkledb.RangeProof + require.NoError(rangeProof.UnmarshalProto(responseProto.GetRangeProof())) + + // modify if needed + if modifyRangeProof != nil { + modifyRangeProof(&rangeProof) + } + + // reserialize the response and pass it to the client to complete the handling. + responseBytes, err := proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_RangeProof{ + RangeProof: rangeProof.ToProto(), + }, + }) + require.NoError(err) + + serverResponseChan <- responseBytes + + return nil + }, + ).AnyTimes() + + return client.GetChangeProof(ctx, request, clientDB) +} + +func TestGetChangeProof(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + serverDB, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(t, err) + + clientDB, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(t, err) + startRoot, err := serverDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // create changes + for x := 0; x < defaultRequestKeyLimit/2; x++ { + ops := make([]database.BatchOp, 0, 11) + // add some key/values + for i := 0; i < 10; i++ { + key := make([]byte, r.Intn(100)) + _, err = r.Read(key) + require.NoError(t, err) + + val := make([]byte, r.Intn(100)) + _, err = r.Read(val) + require.NoError(t, err) + + ops = append(ops, database.BatchOp{Key: key, Value: val}) + } + + // delete a key + deleteKeyStart := make([]byte, r.Intn(10)) + _, err = r.Read(deleteKeyStart) + require.NoError(t, err) + + it := serverDB.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + ops = append(ops, database.BatchOp{Key: it.Key(), Delete: true}) + } + require.NoError(t, it.Error()) + it.Release() + + view, err := serverDB.NewView( + context.Background(), + merkledb.ViewChanges{BatchOps: ops}, + ) + require.NoError(t, err) + require.NoError(t, view.CommitToDB(context.Background())) + } + + endRoot, err := serverDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + fakeRootID := ids.GenerateTestID() + + tests := map[string]struct { + db DB + request *pb.SyncGetChangeProofRequest + modifyChangeProofResponse func(*merkledb.ChangeProof) + modifyRangeProofResponse func(*merkledb.RangeProof) + expectedErr error + expectedResponseLen int + expectRangeProof bool // Otherwise expect change proof + }{ + "proof restricted by BytesLimit": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 10000, + }, + }, + "full response for small (single request) trie": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedResponseLen: defaultRequestKeyLimit, + }, + "too many keys in response": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyChangeProofResponse: func(response *merkledb.ChangeProof) { + response.KeyChanges = append(response.KeyChanges, make([]merkledb.KeyChange, defaultRequestKeyLimit)...) + }, + expectedErr: errTooManyKeys, + }, + "partial response to request for entire trie (full leaf limit)": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedResponseLen: defaultRequestKeyLimit, + }, + "removed first key in response": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyChangeProofResponse: func(response *merkledb.ChangeProof) { + response.KeyChanges = response.KeyChanges[1:] + }, + expectedErr: errInvalidChangeProof, + }, + "removed key from middle of response": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyChangeProofResponse: func(response *merkledb.ChangeProof) { + response.KeyChanges = append(response.KeyChanges[:100], response.KeyChanges[101:]...) + }, + expectedErr: merkledb.ErrInvalidProof, + }, + "all proof keys removed from response": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyChangeProofResponse: func(response *merkledb.ChangeProof) { + response.StartProof = nil + response.EndProof = nil }, expectedErr: merkledb.ErrInvalidProof, }, + "range proof response; remove first key": { + request: &pb.SyncGetChangeProofRequest{ + // Server doesn't have the (non-existent) start root + // so should respond with range proof. + StartRootHash: fakeRootID[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + modifyChangeProofResponse: nil, + modifyRangeProofResponse: func(response *merkledb.RangeProof) { + response.KeyValues = response.KeyValues[1:] + }, + expectedErr: errInvalidRangeProof, + expectRangeProof: true, + }, } for name, test := range tests { t.Run(name, func(t *testing.T) { require := require.New(t) - proof, err := sendRequest(t, test.db, test.request, 1, test.modifyResponse) + + // Ensure test is well-formed. + if test.expectRangeProof { + require.Nil(test.modifyChangeProofResponse) + } else { + require.Nil(test.modifyRangeProofResponse) + } + + changeOrRangeProof, err := sendChangeProofRequest( + t, + serverDB, + clientDB, + test.request, + 1, + test.modifyChangeProofResponse, + test.modifyRangeProofResponse, + ) + require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) return } + + if test.expectRangeProof { + require.NotNil(changeOrRangeProof.RangeProof) + require.Nil(changeOrRangeProof.ChangeProof) + } else { + require.NotNil(changeOrRangeProof.ChangeProof) + require.Nil(changeOrRangeProof.RangeProof) + } + + if test.expectedResponseLen > 0 { + if test.expectRangeProof { + require.LessOrEqual(len(changeOrRangeProof.RangeProof.KeyValues), test.expectedResponseLen) + } else { + require.LessOrEqual(len(changeOrRangeProof.ChangeProof.KeyChanges), test.expectedResponseLen) + } + } + + var bytes []byte + if test.expectRangeProof { + bytes, err = proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_RangeProof{ + RangeProof: changeOrRangeProof.RangeProof.ToProto(), + }, + }) + } else { + bytes, err = proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_ChangeProof{ + ChangeProof: changeOrRangeProof.ChangeProof.ToProto(), + }, + }) + } require.NoError(err) - require.Len(proof.KeyValues, test.expectedResponseLen) + require.LessOrEqual(len(bytes), int(test.request.BytesLimit)) }) } } -func TestRetries(t *testing.T) { - r := rand.New(rand.NewSource(1)) // #nosec G404 +func TestRangeProofRetries(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 require := require.New(t) - keyCount := defaultLeafRequestLimit + keyCount := defaultRequestKeyLimit db, _, err := generateTrieWithMinKeyLen(t, r, keyCount, 1) require.NoError(err) root, err := db.GetMerkleRoot(context.Background()) require.NoError(err) maxRequests := 4 - request := &RangeProofRequest{ - Root: root, - Limit: uint16(keyCount), + request := &pb.SyncGetRangeProofRequest{ + RootHash: root[:], + KeyLimit: uint32(keyCount), + BytesLimit: defaultRequestByteSizeLimit, } responseCount := 0 @@ -278,9 +730,48 @@ func TestRetries(t *testing.T) { response.KeyValues = nil } } - proof, err := sendRequest(t, db, request, uint32(maxRequests), modifyResponse) + proof, err := sendRangeProofRequest(t, db, request, maxRequests, modifyResponse) require.NoError(err) require.Len(proof.KeyValues, keyCount) require.Equal(responseCount, maxRequests) // check the client performed retries. } + +// Test that a failure to send an AppRequest is propagated +// and returned by GetRangeProof and GetChangeProof. +func TestAppRequestSendFailed(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + networkClient := NewMockNetworkClient(ctrl) + + client, err := NewClient( + &ClientConfig{ + NetworkClient: networkClient, + Log: logging.NoLog{}, + Metrics: &mockMetrics{}, + BranchFactor: merkledb.BranchFactor16, + }, + ) + require.NoError(err) + + // Mock failure to send app request + networkClient.EXPECT().RequestAny( + gomock.Any(), + gomock.Any(), + ).Return(ids.EmptyNodeID, nil, errAppSendFailed).Times(2) + + _, err = client.GetChangeProof( + context.Background(), + &pb.SyncGetChangeProofRequest{}, + nil, // database is unused + ) + require.ErrorIs(err, errAppSendFailed) + + _, err = client.GetRangeProof( + context.Background(), + &pb.SyncGetRangeProofRequest{}, + ) + require.ErrorIs(err, errAppSendFailed) +} diff --git a/avalanchego/x/sync/codec.go b/avalanchego/x/sync/codec.go deleted file mode 100644 index 68aada26..00000000 --- a/avalanchego/x/sync/codec.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -const ( - Version = 0 // TODO danlaine unexport this - maxMessageSize = 1 * units.MiB -) - -var syncCodec codec.Manager - -func init() { - syncCodec = codec.NewManager(maxMessageSize) - c := linearcodec.NewDefault() - - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&ChangeProofRequest{}), - c.RegisterType(&RangeProofRequest{}), - syncCodec.RegisterCodec(Version, c), - ) - - if errs.Errored() { - panic(errs.Err) - } -} diff --git a/avalanchego/x/sync/db.go b/avalanchego/x/sync/db.go new file mode 100644 index 00000000..5ed9061b --- /dev/null +++ b/avalanchego/x/sync/db.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import "github.com/ava-labs/avalanchego/x/merkledb" + +type DB interface { + merkledb.Clearer + merkledb.MerkleRootGetter + merkledb.ProofGetter + merkledb.ChangeProofer + merkledb.RangeProofer +} diff --git a/avalanchego/x/sync/g_db/db_client.go b/avalanchego/x/sync/g_db/db_client.go new file mode 100644 index 00000000..37b33397 --- /dev/null +++ b/avalanchego/x/sync/g_db/db_client.go @@ -0,0 +1,193 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gdb + +import ( + "context" + "errors" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/avalanchego/x/sync" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) + +var _ sync.DB = (*DBClient)(nil) + +func NewDBClient(client pb.DBClient) *DBClient { + return &DBClient{ + client: client, + } +} + +type DBClient struct { + client pb.DBClient +} + +func (c *DBClient) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + resp, err := c.client.GetMerkleRoot(ctx, &emptypb.Empty{}) + if err != nil { + return ids.ID{}, err + } + return ids.ToID(resp.RootHash) +} + +func (c *DBClient) GetChangeProof( + ctx context.Context, + startRootID ids.ID, + endRootID ids.ID, + startKey maybe.Maybe[[]byte], + endKey maybe.Maybe[[]byte], + keyLimit int, +) (*merkledb.ChangeProof, error) { + if endRootID == ids.Empty { + return nil, merkledb.ErrEmptyProof + } + + resp, err := c.client.GetChangeProof(ctx, &pb.GetChangeProofRequest{ + StartRootHash: startRootID[:], + EndRootHash: endRootID[:], + StartKey: &pb.MaybeBytes{ + IsNothing: startKey.IsNothing(), + Value: startKey.Value(), + }, + EndKey: &pb.MaybeBytes{ + IsNothing: endKey.IsNothing(), + Value: endKey.Value(), + }, + KeyLimit: uint32(keyLimit), + }) + if err != nil { + return nil, err + } + + // TODO handle merkledb.ErrInvalidMaxLength + // TODO disambiguate between the root not being present due to + // the end root not being present and the start root not being + // present before the end root. i.e. ErrNoEndRoot vs ErrInsufficientHistory. + if resp.GetRootNotPresent() { + return nil, merkledb.ErrInsufficientHistory + } + + var proof merkledb.ChangeProof + if err := proof.UnmarshalProto(resp.GetChangeProof()); err != nil { + return nil, err + } + return &proof, nil +} + +func (c *DBClient) VerifyChangeProof( + ctx context.Context, + proof *merkledb.ChangeProof, + startKey maybe.Maybe[[]byte], + endKey maybe.Maybe[[]byte], + expectedRootID ids.ID, +) error { + resp, err := c.client.VerifyChangeProof(ctx, &pb.VerifyChangeProofRequest{ + Proof: proof.ToProto(), + StartKey: &pb.MaybeBytes{ + Value: startKey.Value(), + IsNothing: startKey.IsNothing(), + }, + EndKey: &pb.MaybeBytes{ + Value: endKey.Value(), + IsNothing: endKey.IsNothing(), + }, + ExpectedRootHash: expectedRootID[:], + }) + if err != nil { + return err + } + + // TODO there's probably a better way to do this. + if len(resp.Error) == 0 { + return nil + } + return errors.New(resp.Error) +} + +func (c *DBClient) CommitChangeProof(ctx context.Context, proof *merkledb.ChangeProof) error { + _, err := c.client.CommitChangeProof(ctx, &pb.CommitChangeProofRequest{ + Proof: proof.ToProto(), + }) + return err +} + +func (c *DBClient) GetProof(ctx context.Context, key []byte) (*merkledb.Proof, error) { + resp, err := c.client.GetProof(ctx, &pb.GetProofRequest{ + Key: key, + }) + if err != nil { + return nil, err + } + + var proof merkledb.Proof + if err := proof.UnmarshalProto(resp.Proof); err != nil { + return nil, err + } + return &proof, nil +} + +func (c *DBClient) GetRangeProofAtRoot( + ctx context.Context, + rootID ids.ID, + startKey maybe.Maybe[[]byte], + endKey maybe.Maybe[[]byte], + keyLimit int, +) (*merkledb.RangeProof, error) { + if rootID == ids.Empty { + return nil, merkledb.ErrEmptyProof + } + + resp, err := c.client.GetRangeProof(ctx, &pb.GetRangeProofRequest{ + RootHash: rootID[:], + StartKey: &pb.MaybeBytes{ + IsNothing: startKey.IsNothing(), + Value: startKey.Value(), + }, + EndKey: &pb.MaybeBytes{ + IsNothing: endKey.IsNothing(), + Value: endKey.Value(), + }, + KeyLimit: uint32(keyLimit), + }) + if err != nil { + return nil, err + } + + var proof merkledb.RangeProof + if err := proof.UnmarshalProto(resp.Proof); err != nil { + return nil, err + } + return &proof, nil +} + +func (c *DBClient) CommitRangeProof( + ctx context.Context, + startKey maybe.Maybe[[]byte], + endKey maybe.Maybe[[]byte], + proof *merkledb.RangeProof, +) error { + _, err := c.client.CommitRangeProof(ctx, &pb.CommitRangeProofRequest{ + StartKey: &pb.MaybeBytes{ + IsNothing: startKey.IsNothing(), + Value: startKey.Value(), + }, + EndKey: &pb.MaybeBytes{ + IsNothing: endKey.IsNothing(), + Value: endKey.Value(), + }, + RangeProof: proof.ToProto(), + }) + return err +} + +func (c *DBClient) Clear() error { + _, err := c.client.Clear(context.Background(), &emptypb.Empty{}) + return err +} diff --git a/avalanchego/x/sync/g_db/db_server.go b/avalanchego/x/sync/g_db/db_server.go new file mode 100644 index 00000000..a65e8a4f --- /dev/null +++ b/avalanchego/x/sync/g_db/db_server.go @@ -0,0 +1,222 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gdb + +import ( + "context" + "errors" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/x/merkledb" + "github.com/ava-labs/avalanchego/x/sync" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) + +var _ pb.DBServer = (*DBServer)(nil) + +func NewDBServer(db sync.DB) *DBServer { + return &DBServer{ + db: db, + } +} + +type DBServer struct { + pb.UnsafeDBServer + + db sync.DB +} + +func (s *DBServer) GetMerkleRoot( + ctx context.Context, + _ *emptypb.Empty, +) (*pb.GetMerkleRootResponse, error) { + root, err := s.db.GetMerkleRoot(ctx) + if err != nil { + return nil, err + } + return &pb.GetMerkleRootResponse{ + RootHash: root[:], + }, nil +} + +func (s *DBServer) GetChangeProof( + ctx context.Context, + req *pb.GetChangeProofRequest, +) (*pb.GetChangeProofResponse, error) { + startRootID, err := ids.ToID(req.StartRootHash) + if err != nil { + return nil, err + } + endRootID, err := ids.ToID(req.EndRootHash) + if err != nil { + return nil, err + } + start := maybe.Nothing[[]byte]() + if req.StartKey != nil && !req.StartKey.IsNothing { + start = maybe.Some(req.StartKey.Value) + } + end := maybe.Nothing[[]byte]() + if req.EndKey != nil && !req.EndKey.IsNothing { + end = maybe.Some(req.EndKey.Value) + } + + changeProof, err := s.db.GetChangeProof( + ctx, + startRootID, + endRootID, + start, + end, + int(req.KeyLimit), + ) + if err != nil { + if !errors.Is(err, merkledb.ErrInsufficientHistory) { + return nil, err + } + return &pb.GetChangeProofResponse{ + Response: &pb.GetChangeProofResponse_RootNotPresent{ + RootNotPresent: true, + }, + }, nil + } + + return &pb.GetChangeProofResponse{ + Response: &pb.GetChangeProofResponse_ChangeProof{ + ChangeProof: changeProof.ToProto(), + }, + }, nil +} + +func (s *DBServer) VerifyChangeProof( + ctx context.Context, + req *pb.VerifyChangeProofRequest, +) (*pb.VerifyChangeProofResponse, error) { + var proof merkledb.ChangeProof + if err := proof.UnmarshalProto(req.Proof); err != nil { + return nil, err + } + + rootID, err := ids.ToID(req.ExpectedRootHash) + if err != nil { + return nil, err + } + startKey := maybe.Nothing[[]byte]() + if req.StartKey != nil && !req.StartKey.IsNothing { + startKey = maybe.Some(req.StartKey.Value) + } + endKey := maybe.Nothing[[]byte]() + if req.EndKey != nil && !req.EndKey.IsNothing { + endKey = maybe.Some(req.EndKey.Value) + } + + // TODO there's probably a better way to do this. + var errString string + if err := s.db.VerifyChangeProof(ctx, &proof, startKey, endKey, rootID); err != nil { + errString = err.Error() + } + return &pb.VerifyChangeProofResponse{ + Error: errString, + }, nil +} + +func (s *DBServer) CommitChangeProof( + ctx context.Context, + req *pb.CommitChangeProofRequest, +) (*emptypb.Empty, error) { + var proof merkledb.ChangeProof + if err := proof.UnmarshalProto(req.Proof); err != nil { + return nil, err + } + + err := s.db.CommitChangeProof(ctx, &proof) + return &emptypb.Empty{}, err +} + +func (s *DBServer) GetProof( + ctx context.Context, + req *pb.GetProofRequest, +) (*pb.GetProofResponse, error) { + proof, err := s.db.GetProof(ctx, req.Key) + if err != nil { + return nil, err + } + + return &pb.GetProofResponse{ + Proof: proof.ToProto(), + }, nil +} + +func (s *DBServer) GetRangeProof( + ctx context.Context, + req *pb.GetRangeProofRequest, +) (*pb.GetRangeProofResponse, error) { + rootID, err := ids.ToID(req.RootHash) + if err != nil { + return nil, err + } + start := maybe.Nothing[[]byte]() + if req.StartKey != nil && !req.StartKey.IsNothing { + start = maybe.Some(req.StartKey.Value) + } + end := maybe.Nothing[[]byte]() + if req.EndKey != nil && !req.EndKey.IsNothing { + end = maybe.Some(req.EndKey.Value) + } + proof, err := s.db.GetRangeProofAtRoot(ctx, rootID, start, end, int(req.KeyLimit)) + if err != nil { + return nil, err + } + + protoProof := &pb.GetRangeProofResponse{ + Proof: &pb.RangeProof{ + StartProof: make([]*pb.ProofNode, len(proof.StartProof)), + EndProof: make([]*pb.ProofNode, len(proof.EndProof)), + KeyValues: make([]*pb.KeyValue, len(proof.KeyValues)), + }, + } + for i, node := range proof.StartProof { + protoProof.Proof.StartProof[i] = node.ToProto() + } + for i, node := range proof.EndProof { + protoProof.Proof.EndProof[i] = node.ToProto() + } + for i, kv := range proof.KeyValues { + protoProof.Proof.KeyValues[i] = &pb.KeyValue{ + Key: kv.Key, + Value: kv.Value, + } + } + + return protoProof, nil +} + +func (s *DBServer) CommitRangeProof( + ctx context.Context, + req *pb.CommitRangeProofRequest, +) (*emptypb.Empty, error) { + var proof merkledb.RangeProof + if err := proof.UnmarshalProto(req.RangeProof); err != nil { + return nil, err + } + + start := maybe.Nothing[[]byte]() + if req.StartKey != nil && !req.StartKey.IsNothing { + start = maybe.Some(req.StartKey.Value) + } + + end := maybe.Nothing[[]byte]() + if req.EndKey != nil && !req.EndKey.IsNothing { + end = maybe.Some(req.EndKey.Value) + } + + err := s.db.CommitRangeProof(ctx, start, end, &proof) + return &emptypb.Empty{}, err +} + +func (s *DBServer) Clear(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { + return &emptypb.Empty{}, s.db.Clear() +} diff --git a/avalanchego/x/sync/manager.go b/avalanchego/x/sync/manager.go new file mode 100644 index 00000000..fa70c03f --- /dev/null +++ b/avalanchego/x/sync/manager.go @@ -0,0 +1,879 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "context" + "errors" + "fmt" + "slices" + "sync" + + "go.uber.org/zap" + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/x/merkledb" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) + +const ( + defaultRequestKeyLimit = maxKeyValuesLimit + defaultRequestByteSizeLimit = maxByteSizeLimit +) + +var ( + ErrAlreadyStarted = errors.New("cannot start a Manager that has already been started") + ErrAlreadyClosed = errors.New("Manager is closed") + ErrNoClientProvided = errors.New("client is a required field of the sync config") + ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") + ErrNoLogProvided = errors.New("log is a required field of the sync config") + ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") + ErrFinishedWithUnexpectedRoot = errors.New("finished syncing with an unexpected root") +) + +type priority byte + +// Note that [highPriority] > [medPriority] > [lowPriority]. +const ( + lowPriority priority = iota + 1 + medPriority + highPriority +) + +// Signifies that we should sync the range [start, end]. +// nil [start] means there is no lower bound. +// nil [end] means there is no upper bound. +// [localRootID] is the ID of the root of this range in our database. +// If we have no local root for this range, [localRootID] is ids.Empty. +type workItem struct { + start maybe.Maybe[[]byte] + end maybe.Maybe[[]byte] + priority priority + localRootID ids.ID +} + +func newWorkItem(localRootID ids.ID, start maybe.Maybe[[]byte], end maybe.Maybe[[]byte], priority priority) *workItem { + return &workItem{ + localRootID: localRootID, + start: start, + end: end, + priority: priority, + } +} + +type Manager struct { + // Must be held when accessing [config.TargetRoot]. + syncTargetLock sync.RWMutex + config ManagerConfig + + workLock sync.Mutex + // The number of work items currently being processed. + // Namely, the number of goroutines executing [doWork]. + // [workLock] must be held when accessing [processingWorkItems]. + processingWorkItems int + // [workLock] must be held while accessing [unprocessedWork]. + unprocessedWork *workHeap + // Signalled when: + // - An item is added to [unprocessedWork]. + // - An item is added to [processedWork]. + // - Close() is called. + // [workLock] is its inner lock. + unprocessedWorkCond sync.Cond + // [workLock] must be held while accessing [processedWork]. + processedWork *workHeap + + // When this is closed: + // - [closed] is true. + // - [cancelCtx] was called. + // - [workToBeDone] and [completedWork] are closed. + doneChan chan struct{} + + errLock sync.Mutex + // If non-nil, there was a fatal error. + // [errLock] must be held when accessing [fatalError]. + fatalError error + + // Cancels all currently processing work items. + cancelCtx context.CancelFunc + + // Set to true when StartSyncing is called. + syncing bool + closeOnce sync.Once + tokenSize int +} + +type ManagerConfig struct { + DB DB + Client Client + SimultaneousWorkLimit int + Log logging.Logger + TargetRoot ids.ID + BranchFactor merkledb.BranchFactor +} + +func NewManager(config ManagerConfig) (*Manager, error) { + switch { + case config.Client == nil: + return nil, ErrNoClientProvided + case config.DB == nil: + return nil, ErrNoDatabaseProvided + case config.Log == nil: + return nil, ErrNoLogProvided + case config.SimultaneousWorkLimit == 0: + return nil, ErrZeroWorkLimit + } + if err := config.BranchFactor.Valid(); err != nil { + return nil, err + } + + m := &Manager{ + config: config, + doneChan: make(chan struct{}), + unprocessedWork: newWorkHeap(), + processedWork: newWorkHeap(), + tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], + } + m.unprocessedWorkCond.L = &m.workLock + + return m, nil +} + +func (m *Manager) Start(ctx context.Context) error { + m.workLock.Lock() + defer m.workLock.Unlock() + + if m.syncing { + return ErrAlreadyStarted + } + + m.config.Log.Info("starting sync", zap.Stringer("target root", m.config.TargetRoot)) + + // Add work item to fetch the entire key range. + // Note that this will be the first work item to be processed. + m.unprocessedWork.Insert(newWorkItem(ids.Empty, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), lowPriority)) + + m.syncing = true + ctx, m.cancelCtx = context.WithCancel(ctx) + + go m.sync(ctx) + return nil +} + +// sync awaits signal on [m.unprocessedWorkCond], which indicates that there +// is work to do or syncing completes. If there is work, sync will dispatch a goroutine to do +// the work. +func (m *Manager) sync(ctx context.Context) { + defer func() { + // Invariant: [m.workLock] is held when this goroutine begins. + m.close() + m.workLock.Unlock() + }() + + // Keep doing work until we're closed, done or [ctx] is canceled. + m.workLock.Lock() + for { + // Invariant: [m.workLock] is held here. + switch { + case ctx.Err() != nil: + return // [m.workLock] released by defer. + case m.processingWorkItems >= m.config.SimultaneousWorkLimit: + // We're already processing the maximum number of work items. + // Wait until one of them finishes. + m.unprocessedWorkCond.Wait() + case m.unprocessedWork.Len() == 0: + if m.processingWorkItems == 0 { + // There's no work to do, and there are no work items being processed + // which could cause work to be added, so we're done. + return // [m.workLock] released by defer. + } + // There's no work to do. + // Note that if [m].Close() is called, or [ctx] is canceled, + // Close() will be called, which will broadcast on [m.unprocessedWorkCond], + // which will cause Wait() to return, and this goroutine to exit. + m.unprocessedWorkCond.Wait() + default: + m.processingWorkItems++ + work := m.unprocessedWork.GetWork() + go m.doWork(ctx, work) + } + } +} + +// Close will stop the syncing process +func (m *Manager) Close() { + m.workLock.Lock() + defer m.workLock.Unlock() + + m.close() +} + +// close is called when there is a fatal error or sync is complete. +// [workLock] must be held +func (m *Manager) close() { + m.closeOnce.Do(func() { + // Don't process any more work items. + // Drop currently processing work items. + if m.cancelCtx != nil { + m.cancelCtx() + } + + // ensure any goroutines waiting for work from the heaps gets released + m.unprocessedWork.Close() + m.unprocessedWorkCond.Signal() + m.processedWork.Close() + + // signal all code waiting on the sync to complete + close(m.doneChan) + }) +} + +// Processes [item] by fetching and applying a change or range proof. +// Assumes [m.workLock] is not held. +func (m *Manager) doWork(ctx context.Context, work *workItem) { + defer func() { + m.workLock.Lock() + defer m.workLock.Unlock() + + m.processingWorkItems-- + m.unprocessedWorkCond.Signal() + }() + + if work.localRootID == ids.Empty { + // the keys in this range have not been downloaded, so get all key/values + m.getAndApplyRangeProof(ctx, work) + } else { + // the keys in this range have already been downloaded, but the root changed, so get all changes + m.getAndApplyChangeProof(ctx, work) + } +} + +// Fetch and apply the change proof given by [work]. +// Assumes [m.workLock] is not held. +func (m *Manager) getAndApplyChangeProof(ctx context.Context, work *workItem) { + targetRootID := m.getTargetRoot() + + if work.localRootID == targetRootID { + // Start root is the same as the end root, so we're done. + m.completeWorkItem(ctx, work, work.end, targetRootID, nil) + return + } + + if targetRootID == ids.Empty { + // The trie is empty after this change. + // Delete all the key-value pairs in the range. + if err := m.config.DB.Clear(); err != nil { + m.setError(err) + return + } + work.start = maybe.Nothing[[]byte]() + m.completeWorkItem(ctx, work, maybe.Nothing[[]byte](), targetRootID, nil) + return + } + + changeOrRangeProof, err := m.config.Client.GetChangeProof( + ctx, + &pb.SyncGetChangeProofRequest{ + StartRootHash: work.localRootID[:], + EndRootHash: targetRootID[:], + StartKey: &pb.MaybeBytes{ + Value: work.start.Value(), + IsNothing: work.start.IsNothing(), + }, + EndKey: &pb.MaybeBytes{ + Value: work.end.Value(), + IsNothing: work.end.IsNothing(), + }, + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + m.config.DB, + ) + if err != nil { + m.setError(err) + return + } + + select { + case <-m.doneChan: + // If we're closed, don't apply the proof. + return + default: + } + + if changeOrRangeProof.ChangeProof != nil { + // The server had sufficient history to respond with a change proof. + changeProof := changeOrRangeProof.ChangeProof + largestHandledKey := work.end + // if the proof wasn't empty, apply changes to the sync DB + if len(changeProof.KeyChanges) > 0 { + if err := m.config.DB.CommitChangeProof(ctx, changeProof); err != nil { + m.setError(err) + return + } + largestHandledKey = maybe.Some(changeProof.KeyChanges[len(changeProof.KeyChanges)-1].Key) + } + + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, changeProof.EndProof) + return + } + + // The server responded with a range proof. + rangeProof := changeOrRangeProof.RangeProof + largestHandledKey := work.end + if len(rangeProof.KeyValues) > 0 { + // Add all the key-value pairs we got to the database. + if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, rangeProof); err != nil { + m.setError(err) + return + } + largestHandledKey = maybe.Some(rangeProof.KeyValues[len(rangeProof.KeyValues)-1].Key) + } + + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, rangeProof.EndProof) +} + +// Fetch and apply the range proof given by [work]. +// Assumes [m.workLock] is not held. +func (m *Manager) getAndApplyRangeProof(ctx context.Context, work *workItem) { + targetRootID := m.getTargetRoot() + + if targetRootID == ids.Empty { + if err := m.config.DB.Clear(); err != nil { + m.setError(err) + return + } + work.start = maybe.Nothing[[]byte]() + m.completeWorkItem(ctx, work, maybe.Nothing[[]byte](), targetRootID, nil) + return + } + + proof, err := m.config.Client.GetRangeProof(ctx, + &pb.SyncGetRangeProofRequest{ + RootHash: targetRootID[:], + StartKey: &pb.MaybeBytes{ + Value: work.start.Value(), + IsNothing: work.start.IsNothing(), + }, + EndKey: &pb.MaybeBytes{ + Value: work.end.Value(), + IsNothing: work.end.IsNothing(), + }, + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + ) + if err != nil { + m.setError(err) + return + } + + select { + case <-m.doneChan: + // If we're closed, don't apply the proof. + return + default: + } + + largestHandledKey := work.end + + // Replace all the key-value pairs in the DB from start to end with values from the response. + if err := m.config.DB.CommitRangeProof(ctx, work.start, work.end, proof); err != nil { + m.setError(err) + return + } + + if len(proof.KeyValues) > 0 { + largestHandledKey = maybe.Some(proof.KeyValues[len(proof.KeyValues)-1].Key) + } + + m.completeWorkItem(ctx, work, largestHandledKey, targetRootID, proof.EndProof) +} + +// findNextKey returns the start of the key range that should be fetched next +// given that we just received a range/change proof that proved a range of +// key-value pairs ending at [lastReceivedKey]. +// +// [rangeEnd] is the end of the range that we want to fetch. +// +// Returns Nothing if there are no more keys to fetch in [lastReceivedKey, rangeEnd]. +// +// [endProof] is the end proof of the last proof received. +// +// Invariant: [lastReceivedKey] < [rangeEnd]. +// If [rangeEnd] is Nothing it's considered > [lastReceivedKey]. +func (m *Manager) findNextKey( + ctx context.Context, + lastReceivedKey []byte, + rangeEnd maybe.Maybe[[]byte], + endProof []merkledb.ProofNode, +) (maybe.Maybe[[]byte], error) { + if len(endProof) == 0 { + // We try to find the next key to fetch by looking at the end proof. + // If the end proof is empty, we have no information to use. + // Start fetching from the next key after [lastReceivedKey]. + nextKey := lastReceivedKey + nextKey = append(nextKey, 0) + return maybe.Some(nextKey), nil + } + + // We want the first key larger than the [lastReceivedKey]. + // This is done by taking two proofs for the same key + // (one that was just received as part of a proof, and one from the local db) + // and traversing them from the longest key to the shortest key. + // For each node in these proofs, compare if the children of that node exist + // or have the same ID in the other proof. + proofKeyPath := merkledb.ToKey(lastReceivedKey) + + // If the received proof is an exclusion proof, the last node may be for a + // key that is after the [lastReceivedKey]. + // If the last received node's key is after the [lastReceivedKey], it can + // be removed to obtain a valid proof for a prefix of the [lastReceivedKey]. + if !proofKeyPath.HasPrefix(endProof[len(endProof)-1].Key) { + endProof = endProof[:len(endProof)-1] + // update the proofKeyPath to be for the prefix + proofKeyPath = endProof[len(endProof)-1].Key + } + + // get a proof for the same key as the received proof from the local db + localProofOfKey, err := m.config.DB.GetProof(ctx, proofKeyPath.Bytes()) + if err != nil { + return maybe.Nothing[[]byte](), err + } + localProofNodes := localProofOfKey.Path + + // The local proof may also be an exclusion proof with an extra node. + // Remove this extra node if it exists to get a proof of the same key as the received proof + if !proofKeyPath.HasPrefix(localProofNodes[len(localProofNodes)-1].Key) { + localProofNodes = localProofNodes[:len(localProofNodes)-1] + } + + nextKey := maybe.Nothing[[]byte]() + + // Add sentinel node back into the localProofNodes, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(localProofNodes) > 0 && localProofNodes[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + localProofNodes[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + localProofNodes = append([]merkledb.ProofNode{sentinel}, localProofNodes...) + } + + // Add sentinel node back into the endProof, if it is missing. + // Required to ensure that a common node exists in both proofs + if len(endProof) > 0 && endProof[0].Key.Length() != 0 { + sentinel := merkledb.ProofNode{ + Children: map[byte]ids.ID{ + endProof[0].Key.Token(0, m.tokenSize): ids.Empty, + }, + } + endProof = append([]merkledb.ProofNode{sentinel}, endProof...) + } + + localProofNodeIndex := len(localProofNodes) - 1 + receivedProofNodeIndex := len(endProof) - 1 + + // traverse the two proofs from the deepest nodes up to the sentinel node until a difference is found + for localProofNodeIndex >= 0 && receivedProofNodeIndex >= 0 && nextKey.IsNothing() { + localProofNode := localProofNodes[localProofNodeIndex] + receivedProofNode := endProof[receivedProofNodeIndex] + + // [deepestNode] is the proof node with the longest key (deepest in the trie) in the + // two proofs that hasn't been handled yet. + // [deepestNodeFromOtherProof] is the proof node from the other proof with + // the same key/depth if it exists, nil otherwise. + var deepestNode, deepestNodeFromOtherProof *merkledb.ProofNode + + // select the deepest proof node from the two proofs + switch { + case receivedProofNode.Key.Length() > localProofNode.Key.Length(): + // there was a branch node in the received proof that isn't in the local proof + // see if the received proof node has children not present in the local proof + deepestNode = &receivedProofNode + + // we have dealt with this received node, so move on to the next received node + receivedProofNodeIndex-- + + case localProofNode.Key.Length() > receivedProofNode.Key.Length(): + // there was a branch node in the local proof that isn't in the received proof + // see if the local proof node has children not present in the received proof + deepestNode = &localProofNode + + // we have dealt with this local node, so move on to the next local node + localProofNodeIndex-- + + default: + // the two nodes are at the same depth + // see if any of the children present in the local proof node are different + // from the children in the received proof node + deepestNode = &localProofNode + deepestNodeFromOtherProof = &receivedProofNode + + // we have dealt with this local node and received node, so move on to the next nodes + localProofNodeIndex-- + receivedProofNodeIndex-- + } + + // We only want to look at the children with keys greater than the proofKey. + // The proof key has the deepest node's key as a prefix, + // so only the next token of the proof key needs to be considered. + + // If the deepest node has the same key as [proofKeyPath], + // then all of its children have keys greater than the proof key, + // so we can start at the 0 token. + startingChildToken := 0 + + // If the deepest node has a key shorter than the key being proven, + // we can look at the next token index of the proof key to determine which of that + // node's children have keys larger than [proofKeyPath]. + // Any child with a token greater than the [proofKeyPath]'s token at that + // index will have a larger key. + if deepestNode.Key.Length() < proofKeyPath.Length() { + startingChildToken = int(proofKeyPath.Token(deepestNode.Key.Length(), m.tokenSize)) + 1 + } + + // determine if there are any differences in the children for the deepest unhandled node of the two proofs + if childIndex, hasDifference := findChildDifference(deepestNode, deepestNodeFromOtherProof, startingChildToken); hasDifference { + nextKey = maybe.Some(deepestNode.Key.Extend(merkledb.ToToken(childIndex, m.tokenSize)).Bytes()) + break + } + } + + // If the nextKey is before or equal to the [lastReceivedKey] + // then we couldn't find a better answer than the [lastReceivedKey]. + // Set the nextKey to [lastReceivedKey] + 0, which is the first key in + // the open range (lastReceivedKey, rangeEnd). + if nextKey.HasValue() && bytes.Compare(nextKey.Value(), lastReceivedKey) <= 0 { + nextKeyVal := slices.Clone(lastReceivedKey) + nextKeyVal = append(nextKeyVal, 0) + nextKey = maybe.Some(nextKeyVal) + } + + // If the [nextKey] is larger than the end of the range, return Nothing to signal that there is no next key in range + if rangeEnd.HasValue() && bytes.Compare(nextKey.Value(), rangeEnd.Value()) >= 0 { + return maybe.Nothing[[]byte](), nil + } + + // the nextKey is within the open range (lastReceivedKey, rangeEnd), so return it + return nextKey, nil +} + +func (m *Manager) Error() error { + m.errLock.Lock() + defer m.errLock.Unlock() + + return m.fatalError +} + +// Wait blocks until one of the following occurs: +// - sync is complete. +// - sync fatally errored. +// - [ctx] is canceled. +// If [ctx] is canceled, returns [ctx].Err(). +func (m *Manager) Wait(ctx context.Context) error { + select { + case <-m.doneChan: + case <-ctx.Done(): + return ctx.Err() + } + + // There was a fatal error. + if err := m.Error(); err != nil { + return err + } + + root, err := m.config.DB.GetMerkleRoot(ctx) + if err != nil { + return err + } + + if targetRootID := m.getTargetRoot(); targetRootID != root { + // This should never happen. + return fmt.Errorf("%w: expected %s, got %s", ErrFinishedWithUnexpectedRoot, targetRootID, root) + } + + m.config.Log.Info("completed", zap.Stringer("root", root)) + return nil +} + +func (m *Manager) UpdateSyncTarget(syncTargetRoot ids.ID) error { + m.syncTargetLock.Lock() + defer m.syncTargetLock.Unlock() + + m.workLock.Lock() + defer m.workLock.Unlock() + + select { + case <-m.doneChan: + return ErrAlreadyClosed + default: + } + + if m.config.TargetRoot == syncTargetRoot { + // the target hasn't changed, so there is nothing to do + return nil + } + + m.config.Log.Debug("updated sync target", zap.Stringer("target", syncTargetRoot)) + m.config.TargetRoot = syncTargetRoot + + // move all completed ranges into the work heap with high priority + shouldSignal := m.processedWork.Len() > 0 + for m.processedWork.Len() > 0 { + // Note that [m.processedWork].Close() hasn't + // been called because we have [m.workLock] + // and we checked that [m.closed] is false. + currentItem := m.processedWork.GetWork() + currentItem.priority = highPriority + m.unprocessedWork.Insert(currentItem) + } + if shouldSignal { + // Only signal once because we only have 1 goroutine + // waiting on [m.unprocessedWorkCond]. + m.unprocessedWorkCond.Signal() + } + return nil +} + +func (m *Manager) getTargetRoot() ids.ID { + m.syncTargetLock.RLock() + defer m.syncTargetLock.RUnlock() + + return m.config.TargetRoot +} + +// Record that there was a fatal error and begin shutting down. +func (m *Manager) setError(err error) { + m.errLock.Lock() + defer m.errLock.Unlock() + + m.config.Log.Error("sync errored", zap.Error(err)) + m.fatalError = err + // Call in goroutine because we might be holding [m.workLock] + // which [m.Close] will try to acquire. + go m.Close() +} + +// Mark that we've fetched all the key-value pairs in the range +// [workItem.start, largestHandledKey] for the trie with root [rootID]. +// +// If [workItem.start] is Nothing, then we've fetched all the key-value +// pairs up to and including [largestHandledKey]. +// +// If [largestHandledKey] is Nothing, then we've fetched all the key-value +// pairs at and after [workItem.start]. +// +// [proofOfLargestKey] is the end proof for the range/change proof +// that gave us the range up to and including [largestHandledKey]. +// +// Assumes [m.workLock] is not held. +func (m *Manager) completeWorkItem(ctx context.Context, work *workItem, largestHandledKey maybe.Maybe[[]byte], rootID ids.ID, proofOfLargestKey []merkledb.ProofNode) { + if !maybe.Equal(largestHandledKey, work.end, bytes.Equal) { + // The largest handled key isn't equal to the end of the work item. + // Find the start of the next key range to fetch. + // Note that [largestHandledKey] can't be Nothing. + // Proof: Suppose it is. That means that we got a range/change proof that proved up to the + // greatest key-value pair in the database. That means we requested a proof with no upper + // bound. That is, [workItem.end] is Nothing. Since we're here, [bothNothing] is false, + // which means [workItem.end] isn't Nothing. Contradiction. + nextStartKey, err := m.findNextKey(ctx, largestHandledKey.Value(), work.end, proofOfLargestKey) + if err != nil { + m.setError(err) + return + } + + // nextStartKey being Nothing indicates that the entire range has been completed + if nextStartKey.IsNothing() { + largestHandledKey = work.end + } else { + // the full range wasn't completed, so enqueue a new work item for the range [nextStartKey, workItem.end] + m.enqueueWork(newWorkItem(work.localRootID, nextStartKey, work.end, work.priority)) + largestHandledKey = nextStartKey + } + } + + // Process [work] while holding [syncTargetLock] to ensure that object + // is added to the right queue, even if a target update is triggered + m.syncTargetLock.RLock() + defer m.syncTargetLock.RUnlock() + + stale := m.config.TargetRoot != rootID + if stale { + // the root has changed, so reinsert with high priority + m.enqueueWork(newWorkItem(rootID, work.start, largestHandledKey, highPriority)) + } else { + m.workLock.Lock() + defer m.workLock.Unlock() + + m.processedWork.MergeInsert(newWorkItem(rootID, work.start, largestHandledKey, work.priority)) + } + + // completed the range [work.start, lastKey], log and record in the completed work heap + m.config.Log.Debug("completed range", + zap.Stringer("start", work.start), + zap.Stringer("end", largestHandledKey), + zap.Stringer("rootID", rootID), + zap.Bool("stale", stale), + ) +} + +// Queue the given key range to be fetched and applied. +// If there are sufficiently few unprocessed/processing work items, +// splits the range into two items and queues them both. +// Assumes [m.workLock] is not held. +func (m *Manager) enqueueWork(work *workItem) { + m.workLock.Lock() + defer func() { + m.workLock.Unlock() + m.unprocessedWorkCond.Signal() + }() + + if m.processingWorkItems+m.unprocessedWork.Len() > 2*m.config.SimultaneousWorkLimit { + // There are too many work items already, don't split the range + m.unprocessedWork.Insert(work) + return + } + + // Split the remaining range into to 2. + // Find the middle point. + mid := midPoint(work.start, work.end) + + if maybe.Equal(work.start, mid, bytes.Equal) || maybe.Equal(mid, work.end, bytes.Equal) { + // The range is too small to split. + // If we didn't have this check we would add work items + // [start, start] and [start, end]. Since start <= end, this would + // violate the invariant of [m.unprocessedWork] and [m.processedWork] + // that there are no overlapping ranges. + m.unprocessedWork.Insert(work) + return + } + + // first item gets higher priority than the second to encourage finished ranges to grow + // rather than start a new range that is not contiguous with existing completed ranges + first := newWorkItem(work.localRootID, work.start, mid, medPriority) + second := newWorkItem(work.localRootID, mid, work.end, lowPriority) + + m.unprocessedWork.Insert(first) + m.unprocessedWork.Insert(second) +} + +// find the midpoint between two keys +// start is expected to be less than end +// Nothing/nil [start] is treated as all 0's +// Nothing/nil [end] is treated as all 255's +func midPoint(startMaybe, endMaybe maybe.Maybe[[]byte]) maybe.Maybe[[]byte] { + start := startMaybe.Value() + end := endMaybe.Value() + length := len(start) + if len(end) > length { + length = len(end) + } + + if length == 0 { + if endMaybe.IsNothing() { + return maybe.Some([]byte{127}) + } else if len(end) == 0 { + return maybe.Nothing[[]byte]() + } + } + + // This check deals with cases where the end has a 255(or is nothing which is treated as all 255s) and the start key ends 255. + // For example, midPoint([255], nothing) should be [255, 127], not [255]. + // The result needs the extra byte added on to the end to deal with the fact that the naive midpoint between 255 and 255 would be 255 + if (len(start) > 0 && start[len(start)-1] == 255) && (len(end) == 0 || end[len(end)-1] == 255) { + length++ + } + + leftover := 0 + midpoint := make([]byte, length+1) + for i := 0; i < length; i++ { + startVal := 0 + if i < len(start) { + startVal = int(start[i]) + } + + endVal := 0 + if endMaybe.IsNothing() { + endVal = 255 + } + if i < len(end) { + endVal = int(end[i]) + } + + total := startVal + endVal + leftover + leftover = 0 + // if total is odd, when we divide, we will lose the .5, + // record that in the leftover for the next digits + if total%2 == 1 { + leftover = 256 + } + + // find the midpoint between the start and the end + total /= 2 + + // larger than byte can hold, so carry over to previous byte + if total >= 256 { + total -= 256 + index := i - 1 + for index > 0 && midpoint[index] == 255 { + midpoint[index] = 0 + index-- + } + midpoint[index]++ + } + midpoint[i] = byte(total) + } + if leftover > 0 { + midpoint[length] = 127 + } else { + midpoint = midpoint[0:length] + } + return maybe.Some(midpoint) +} + +// findChildDifference returns the first child index that is different between node 1 and node 2 if one exists and +// a bool indicating if any difference was found +func findChildDifference(node1, node2 *merkledb.ProofNode, startIndex int) (byte, bool) { + // Children indices >= [startIndex] present in at least one of the nodes. + childIndices := set.Set[byte]{} + for _, node := range []*merkledb.ProofNode{node1, node2} { + if node == nil { + continue + } + for key := range node.Children { + if int(key) >= startIndex { + childIndices.Add(key) + } + } + } + + sortedChildIndices := maps.Keys(childIndices) + slices.Sort(sortedChildIndices) + var ( + child1, child2 ids.ID + ok1, ok2 bool + ) + for _, childIndex := range sortedChildIndices { + if node1 != nil { + child1, ok1 = node1.Children[childIndex] + } + if node2 != nil { + child2, ok2 = node2.Children[childIndex] + } + // if one node has a child and the other doesn't or the children ids don't match, + // return the current child index as the first difference + if (ok1 || ok2) && child1 != child2 { + return childIndex, true + } + } + // there were no differences found + return 0, false +} diff --git a/avalanchego/x/sync/metrics.go b/avalanchego/x/sync/metrics.go index d67d61f6..fb27e6b4 100644 --- a/avalanchego/x/sync/metrics.go +++ b/avalanchego/x/sync/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -8,12 +8,12 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) var ( - _ SyncMetrics = &mockMetrics{} - _ SyncMetrics = &metrics{} + _ SyncMetrics = (*mockMetrics)(nil) + _ SyncMetrics = (*metrics)(nil) ) type SyncMetrics interface { @@ -74,13 +74,12 @@ func NewMetrics(namespace string, reg prometheus.Registerer) (SyncMetrics, error Help: "cumulative amount of proof requests that were successful", }), } - errs := wrappers.Errs{} - errs.Add( + err := utils.Err( reg.Register(m.requestsFailed), reg.Register(m.requestsMade), reg.Register(m.requestsSucceeded), ) - return &m, errs.Err + return &m, err } func (m *metrics) RequestFailed() { diff --git a/avalanchego/x/sync/mock_client.go b/avalanchego/x/sync/mock_client.go index 08f02c31..98fa6d69 100644 --- a/avalanchego/x/sync/mock_client.go +++ b/avalanchego/x/sync/mock_client.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/x/sync (interfaces: Client) +// +// Generated by this command: +// +// mockgen -package=sync -destination=x/sync/mock_client.go github.com/ava-labs/avalanchego/x/sync Client +// // Package sync is a generated GoMock package. package sync @@ -11,8 +13,9 @@ import ( context "context" reflect "reflect" + sync "github.com/ava-labs/avalanchego/proto/pb/sync" merkledb "github.com/ava-labs/avalanchego/x/merkledb" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockClient is a mock of Client interface. @@ -39,22 +42,22 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { } // GetChangeProof mocks base method. -func (m *MockClient) GetChangeProof(arg0 context.Context, arg1 *ChangeProofRequest, arg2 *merkledb.Database) (*merkledb.ChangeProof, error) { +func (m *MockClient) GetChangeProof(arg0 context.Context, arg1 *sync.SyncGetChangeProofRequest, arg2 DB) (*merkledb.ChangeOrRangeProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetChangeProof", arg0, arg1, arg2) - ret0, _ := ret[0].(*merkledb.ChangeProof) + ret0, _ := ret[0].(*merkledb.ChangeOrRangeProof) ret1, _ := ret[1].(error) return ret0, ret1 } // GetChangeProof indicates an expected call of GetChangeProof. -func (mr *MockClientMockRecorder) GetChangeProof(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) GetChangeProof(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockClient)(nil).GetChangeProof), arg0, arg1, arg2) } // GetRangeProof mocks base method. -func (m *MockClient) GetRangeProof(arg0 context.Context, arg1 *RangeProofRequest) (*merkledb.RangeProof, error) { +func (m *MockClient) GetRangeProof(arg0 context.Context, arg1 *sync.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetRangeProof", arg0, arg1) ret0, _ := ret[0].(*merkledb.RangeProof) @@ -63,7 +66,7 @@ func (m *MockClient) GetRangeProof(arg0 context.Context, arg1 *RangeProofRequest } // GetRangeProof indicates an expected call of GetRangeProof. -func (mr *MockClientMockRecorder) GetRangeProof(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) GetRangeProof(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockClient)(nil).GetRangeProof), arg0, arg1) } diff --git a/avalanchego/x/sync/mock_network_client.go b/avalanchego/x/sync/mock_network_client.go new file mode 100644 index 00000000..3156d145 --- /dev/null +++ b/avalanchego/x/sync/mock_network_client.go @@ -0,0 +1,129 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/x/sync (interfaces: NetworkClient) +// +// Generated by this command: +// +// mockgen -package=sync -destination=x/sync/mock_network_client.go github.com/ava-labs/avalanchego/x/sync NetworkClient +// + +// Package sync is a generated GoMock package. +package sync + +import ( + context "context" + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + version "github.com/ava-labs/avalanchego/version" + gomock "go.uber.org/mock/gomock" +) + +// MockNetworkClient is a mock of NetworkClient interface. +type MockNetworkClient struct { + ctrl *gomock.Controller + recorder *MockNetworkClientMockRecorder +} + +// MockNetworkClientMockRecorder is the mock recorder for MockNetworkClient. +type MockNetworkClientMockRecorder struct { + mock *MockNetworkClient +} + +// NewMockNetworkClient creates a new mock instance. +func NewMockNetworkClient(ctrl *gomock.Controller) *MockNetworkClient { + mock := &MockNetworkClient{ctrl: ctrl} + mock.recorder = &MockNetworkClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetworkClient) EXPECT() *MockNetworkClientMockRecorder { + return m.recorder +} + +// AppRequestFailed mocks base method. +func (m *MockNetworkClient) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppRequestFailed indicates an expected call of AppRequestFailed. +func (mr *MockNetworkClientMockRecorder) AppRequestFailed(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockNetworkClient)(nil).AppRequestFailed), arg0, arg1, arg2) +} + +// AppResponse mocks base method. +func (m *MockNetworkClient) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppResponse", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppResponse indicates an expected call of AppResponse. +func (mr *MockNetworkClientMockRecorder) AppResponse(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockNetworkClient)(nil).AppResponse), arg0, arg1, arg2, arg3) +} + +// Connected mocks base method. +func (m *MockNetworkClient) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *version.Application) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connected", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connected indicates an expected call of Connected. +func (mr *MockNetworkClientMockRecorder) Connected(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockNetworkClient)(nil).Connected), arg0, arg1, arg2) +} + +// Disconnected mocks base method. +func (m *MockNetworkClient) Disconnected(arg0 context.Context, arg1 ids.NodeID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Disconnected", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Disconnected indicates an expected call of Disconnected. +func (mr *MockNetworkClientMockRecorder) Disconnected(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockNetworkClient)(nil).Disconnected), arg0, arg1) +} + +// Request mocks base method. +func (m *MockNetworkClient) Request(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Request", arg0, arg1, arg2) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Request indicates an expected call of Request. +func (mr *MockNetworkClientMockRecorder) Request(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Request", reflect.TypeOf((*MockNetworkClient)(nil).Request), arg0, arg1, arg2) +} + +// RequestAny mocks base method. +func (m *MockNetworkClient) RequestAny(arg0 context.Context, arg1 []byte) (ids.NodeID, []byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RequestAny", arg0, arg1) + ret0, _ := ret[0].(ids.NodeID) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// RequestAny indicates an expected call of RequestAny. +func (mr *MockNetworkClientMockRecorder) RequestAny(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestAny", reflect.TypeOf((*MockNetworkClient)(nil).RequestAny), arg0, arg1) +} diff --git a/avalanchego/x/sync/network_client.go b/avalanchego/x/sync/network_client.go index 7496d131..15f59cc5 100644 --- a/avalanchego/x/sync/network_client.go +++ b/avalanchego/x/sync/network_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -10,11 +10,12 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "golang.org/x/sync/semaphore" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -25,10 +26,11 @@ import ( const minRequestHandlingDuration = 100 * time.Millisecond var ( - _ NetworkClient = &networkClient{} + _ NetworkClient = (*networkClient)(nil) - ErrAcquiringSemaphore = errors.New("error acquiring semaphore") - ErrRequestFailed = errors.New("request failed") + errAcquiringSemaphore = errors.New("error acquiring semaphore") + errRequestFailed = errors.New("request failed") + errAppSendFailed = errors.New("failed to send app message") ) // NetworkClient defines ability to send request / response through the Network @@ -37,33 +39,53 @@ type NetworkClient interface { // node version greater than or equal to minVersion. // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if // the request should be retried. - RequestAny(ctx context.Context, minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) - - // Request synchronously sends request to the selected nodeID. - // Returns response bytes, and ErrRequestFailed if the request should be retried. - Request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) - - // TrackBandwidth should be called for each valid response with the bandwidth - // (length of response divided by request time), and with 0 if the response is invalid. - TrackBandwidth(nodeID ids.NodeID, bandwidth float64) + RequestAny( + ctx context.Context, + request []byte, + ) (ids.NodeID, []byte, error) + + // Sends [request] to [nodeID] and returns the response. + // Blocks until the number of outstanding requests is + // below the limit before sending the request. + Request( + ctx context.Context, + nodeID ids.NodeID, + request []byte, + ) ([]byte, error) // The following declarations allow this interface to be embedded in the VM // to handle incoming responses from peers. + + // Always returns nil because the engine considers errors + // returned from this function as fatal. AppResponse(context.Context, ids.NodeID, uint32, []byte) error + + // Always returns nil because the engine considers errors + // returned from this function as fatal. AppRequestFailed(context.Context, ids.NodeID, uint32) error + + // Adds the given [nodeID] to the peer + // list so that it can receive messages. + // If [nodeID] is this node's ID, this is a no-op. Connected(context.Context, ids.NodeID, *version.Application) error + + // Removes given [nodeID] from the peer list. Disconnected(context.Context, ids.NodeID) error } type networkClient struct { - lock sync.Mutex // lock for mutating state of this Network struct - myNodeID ids.NodeID // NodeID of this node - requestID uint32 // requestID counter used to track outbound requests - outstandingRequestHandlers map[uint32]ResponseHandler // requestID => handler for the response/failure - activeRequests *semaphore.Weighted // controls maximum number of active outbound requests - peers *peerTracker // tracking of peers & bandwidth - appSender common.AppSender // AppSender for sending messages - log logging.Logger + lock sync.Mutex + log logging.Logger + // requestID counter used to track outbound requests + requestID uint32 + // requestID => handler for the response/failure + outstandingRequestHandlers map[uint32]ResponseHandler + // controls maximum number of active outbound requests + activeRequests *semaphore.Weighted + // tracking of peers & bandwidth usage + peers *p2p.PeerTracker + // For sending messages to peers + appSender common.AppSender } func NewNetworkClient( @@ -71,21 +93,36 @@ func NewNetworkClient( myNodeID ids.NodeID, maxActiveRequests int64, log logging.Logger, -) NetworkClient { + metricsNamespace string, + registerer prometheus.Registerer, + minVersion *version.Application, +) (NetworkClient, error) { + peerTracker, err := p2p.NewPeerTracker( + log, + metricsNamespace, + registerer, + set.Of(myNodeID), + minVersion, + ) + if err != nil { + return nil, fmt.Errorf("failed to create peer tracker: %w", err) + } + return &networkClient{ appSender: appSender, - myNodeID: myNodeID, outstandingRequestHandlers: make(map[uint32]ResponseHandler), activeRequests: semaphore.NewWeighted(maxActiveRequests), - peers: newPeerTracker(log), + peers: peerTracker, log: log, - } + }, nil } -// AppResponse is called when this node receives a response from a peer. -// As the engine considers errors returned from this function as fatal, -// this function always returns nil. -func (c *networkClient) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { +func (c *networkClient) AppResponse( + _ context.Context, + nodeID ids.NodeID, + requestID uint32, + response []byte, +) error { c.lock.Lock() defer c.lock.Unlock() @@ -98,8 +135,9 @@ func (c *networkClient) AppResponse(_ context.Context, nodeID ids.NodeID, reques handler, exists := c.getRequestHandler(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - c.log.Error( + // Should never happen since the engine + // should be managing outstanding requests + c.log.Warn( "received response to unknown request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -111,13 +149,11 @@ func (c *networkClient) AppResponse(_ context.Context, nodeID ids.NodeID, reques return nil } -// AppRequestFailed can be called by the avalanchego -> VM in following cases: -// - node is benched -// - failed to send message to [nodeID] due to a network issue -// - timeout -// As the engine considers errors returned from this function as fatal, -// this function always returns nil. -func (c *networkClient) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { +func (c *networkClient) AppRequestFailed( + _ context.Context, + nodeID ids.NodeID, + requestID uint32, +) error { c.lock.Lock() defer c.lock.Unlock() @@ -129,8 +165,9 @@ func (c *networkClient) AppRequestFailed(_ context.Context, nodeID ids.NodeID, r handler, exists := c.getRequestHandler(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - c.log.Error( + // Should never happen since the engine + // should be managing outstanding requests + c.log.Warn( "received request failed to unknown request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -142,7 +179,7 @@ func (c *networkClient) AppRequestFailed(_ context.Context, nodeID ids.NodeID, r } // Returns the handler for [requestID] and marks the request as fulfilled. -// This is called by either [AppResponse] or [AppRequestFailed]. +// Returns false if there's no outstanding request with [requestID]. // Assumes [c.lock] is held. func (c *networkClient) getRequestHandler(requestID uint32) (ResponseHandler, bool) { handler, exists := c.outstandingRequestHandlers[requestID] @@ -151,144 +188,174 @@ func (c *networkClient) getRequestHandler(requestID uint32) (ResponseHandler, bo } // mark message as processed, release activeRequests slot delete(c.outstandingRequestHandlers, requestID) - c.activeRequests.Release(1) return handler, true } -// RequestAny synchronously sends [request] to a randomly chosen peer with a -// node version greater than or equal to [minVersion]. If [minVersion] is nil, -// the request is sent to any peer regardless of their version. -// If the limit on active requests is reached, this function blocks until -// a slot becomes available. -// Returns the node's response and the ID of the node. +// If [errAppSendFailed] is returned this should be considered fatal. func (c *networkClient) RequestAny( ctx context.Context, - minVersion *version.Application, request []byte, -) ([]byte, ids.NodeID, error) { +) (ids.NodeID, []byte, error) { // Take a slot from total [activeRequests] and block until a slot becomes available. if err := c.activeRequests.Acquire(ctx, 1); err != nil { - return nil, ids.EmptyNodeID, ErrAcquiringSemaphore + return ids.EmptyNodeID, nil, errAcquiringSemaphore } + defer c.activeRequests.Release(1) - c.lock.Lock() - if nodeID, ok := c.peers.GetAnyPeer(minVersion); ok { - response, err := c.request(ctx, nodeID, request) - return response, nodeID, err + nodeID, responseChan, err := c.sendRequestAny(ctx, request) + if err != nil { + return ids.EmptyNodeID, nil, err } - c.lock.Unlock() - c.activeRequests.Release(1) - return nil, ids.EmptyNodeID, fmt.Errorf("no peers found matching version %s out of %d peers", minVersion, c.peers.Size()) + response, err := c.awaitResponse(ctx, nodeID, responseChan) + return nodeID, response, err } -// Sends [request] to [nodeID] and registers a handler for the response/failure. -// If the limit on active requests is reached, this function blocks until -// a slot becomes available. -func (c *networkClient) Request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { - // TODO danlaine: is it possible for this condition to occur? - if nodeID == ids.EmptyNodeID { - return nil, fmt.Errorf("cannot send request to empty nodeID, nodeID=%s, requestLen=%d", nodeID, len(request)) +func (c *networkClient) sendRequestAny( + ctx context.Context, + request []byte, +) (ids.NodeID, chan []byte, error) { + c.lock.Lock() + defer c.lock.Unlock() + + nodeID, ok := c.peers.SelectPeer() + if !ok { + numPeers := c.peers.Size() + return ids.EmptyNodeID, nil, fmt.Errorf("no peers found from %d peers", numPeers) } - // Take a slot from total [activeRequests] and block until a slot becomes available. + responseChan, err := c.sendRequestLocked(ctx, nodeID, request) + return nodeID, responseChan, err +} + +// If [errAppSendFailed] is returned this should be considered fatal. +func (c *networkClient) Request( + ctx context.Context, + nodeID ids.NodeID, + request []byte, +) ([]byte, error) { + // Take a slot from total [activeRequests] + // and block until a slot becomes available. if err := c.activeRequests.Acquire(ctx, 1); err != nil { - return nil, ErrAcquiringSemaphore + return nil, errAcquiringSemaphore } + defer c.activeRequests.Release(1) + + responseChan, err := c.sendRequest(ctx, nodeID, request) + if err != nil { + return nil, err + } + + return c.awaitResponse(ctx, nodeID, responseChan) +} +func (c *networkClient) sendRequest( + ctx context.Context, + nodeID ids.NodeID, + request []byte, +) (chan []byte, error) { c.lock.Lock() - return c.request(ctx, nodeID, request) + defer c.lock.Unlock() + + return c.sendRequestLocked(ctx, nodeID, request) } -// Sends [request] to [nodeID] and adds the response handler to [c.outstandingRequestHandlers] -// so that it can be invoked upon response/failure. -// Blocks until a response is received or the request fails. -// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will not be added to [c.peers]. -// Releases active requests semaphore if there was an error in sending the request. -// Returns an error if [appSender] is unable to make the request. -// Assumes [c.lock] is held and unlocks [c.lock] before returning. -func (c *networkClient) request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { - c.log.Debug("sending request to peer", zap.Stringer("nodeID", nodeID), zap.Int("requestLen", len(request))) - c.peers.TrackPeer(nodeID) - - // generate requestID +// Sends [request] to [nodeID] and returns a channel that will populate the +// response. +// +// If [errAppSendFailed] is returned this should be considered fatal. +// +// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will +// not be added to [c.peers]. +// +// Assumes [c.lock] is held. +func (c *networkClient) sendRequestLocked( + ctx context.Context, + nodeID ids.NodeID, + request []byte, +) (chan []byte, error) { requestID := c.requestID c.requestID++ - handler := newResponseHandler() - c.outstandingRequestHandlers[requestID] = handler - - nodeIDs := set.NewSet[ids.NodeID](1) - nodeIDs.Add(nodeID) + c.log.Debug("sending request to peer", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("requestLen", len(request)), + ) + c.peers.RegisterRequest(nodeID) // Send an app request to the peer. + nodeIDs := set.Of(nodeID) if err := c.appSender.SendAppRequest(ctx, nodeIDs, requestID, request); err != nil { - // On failure, release the activeRequests slot and mark the message as processed. - c.activeRequests.Release(1) - delete(c.outstandingRequestHandlers, requestID) c.lock.Unlock() - return nil, err + c.log.Fatal("failed to send app request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("requestLen", len(request)), + zap.Error(err), + ) + return nil, fmt.Errorf("%w: %w", errAppSendFailed, err) } - c.lock.Unlock() // unlock so response can be received + handler := newResponseHandler() + c.outstandingRequestHandlers[requestID] = handler + return handler.responseChan, nil +} - var response []byte +// awaitResponse from [nodeID] and returns the response. +// +// Returns an error if the request failed or [ctx] is canceled. +// +// Blocks until a response is received or the [ctx] is canceled fails. +// +// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will +// not be added to [c.peers]. +// +// Assumes [c.lock] is not held. +func (c *networkClient) awaitResponse( + ctx context.Context, + nodeID ids.NodeID, + responseChan chan []byte, +) ([]byte, error) { + var ( + response []byte + responded bool + startTime = time.Now() + ) select { case <-ctx.Done(): + c.peers.RegisterFailure(nodeID) return nil, ctx.Err() - case response = <-handler.responseChan: + case response, responded = <-responseChan: } - if handler.failed { - return nil, ErrRequestFailed + if !responded { + c.peers.RegisterFailure(nodeID) + return nil, errRequestFailed } + elapsedSeconds := time.Since(startTime).Seconds() + bandwidth := float64(len(response)) / (elapsedSeconds + epsilon) + c.peers.RegisterResponse(nodeID, bandwidth) + c.log.Debug("received response from peer", zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Int("responseLen", len(response))) + zap.Int("responseLen", len(response)), + ) return response, nil } -// Connected adds the given nodeID to the peer list so that it can receive messages. -// If [nodeID] is [c.myNodeID], this is a no-op. -func (c *networkClient) Connected(_ context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { - c.lock.Lock() - defer c.lock.Unlock() - - if nodeID == c.myNodeID { - c.log.Debug("skipping registering self as peer") - return nil - } - +func (c *networkClient) Connected( + _ context.Context, + nodeID ids.NodeID, + nodeVersion *version.Application, +) error { c.log.Debug("adding new peer", zap.Stringer("nodeID", nodeID)) c.peers.Connected(nodeID, nodeVersion) return nil } -// Disconnected removes given [nodeID] from the peer list. -// TODO danlaine: should this be a no-op if [nodeID] is [c.myNodeID]? func (c *networkClient) Disconnected(_ context.Context, nodeID ids.NodeID) error { - c.lock.Lock() - defer c.lock.Unlock() - c.log.Debug("disconnecting peer", zap.Stringer("nodeID", nodeID)) c.peers.Disconnected(nodeID) return nil } - -// Shutdown disconnects all peers -func (c *networkClient) Shutdown() { - c.lock.Lock() - defer c.lock.Unlock() - - // reset peers - // TODO danlaine: should we call [Disconnected] on each peer? - c.peers = newPeerTracker(c.log) -} - -func (c *networkClient) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { - c.lock.Lock() - defer c.lock.Unlock() - - c.peers.TrackBandwidth(nodeID, bandwidth) -} diff --git a/avalanchego/x/sync/network_server.go b/avalanchego/x/sync/network_server.go index 9109a3bb..f7ca7ec6 100644 --- a/avalanchego/x/sync/network_server.go +++ b/avalanchego/x/sync/network_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -7,33 +7,58 @@ import ( "bytes" "context" "errors" + "fmt" "time" "go.uber.org/zap" - "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/x/merkledb" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" ) -// Maximum number of key-value pairs to return in a proof. -// This overrides any other Limit specified in a RangeProofRequest -// or ChangeProofRequest if the given Limit is greater. -const maxKeyValuesLimit = 1024 +const ( + // Maximum number of key-value pairs to return in a proof. + // This overrides any other Limit specified in a RangeProofRequest + // or ChangeProofRequest if the given Limit is greater. + maxKeyValuesLimit = 2048 + // Estimated max overhead, in bytes, of putting a proof into a message. + // We use this to ensure that the proof we generate is not too large to fit in a message. + // TODO: refine this estimate. This is almost certainly a large overestimate. + estimatedMessageOverhead = 4 * units.KiB + maxByteSizeLimit = constants.DefaultMaxMessageSize - estimatedMessageOverhead +) -var _ Handler = (*NetworkServer)(nil) +var ( + ErrMinProofSizeIsTooLarge = errors.New("cannot generate any proof within the requested limit") + + errInvalidBytesLimit = errors.New("bytes limit must be greater than 0") + errInvalidKeyLimit = errors.New("key limit must be greater than 0") + errInvalidStartRootHash = fmt.Errorf("start root hash must have length %d", hashing.HashLen) + errInvalidEndRootHash = fmt.Errorf("end root hash must have length %d", hashing.HashLen) + errInvalidStartKey = errors.New("start key is Nothing but has value") + errInvalidEndKey = errors.New("end key is Nothing but has value") + errInvalidBounds = errors.New("start key is greater than end key") + errInvalidRootHash = fmt.Errorf("root hash must have length %d", hashing.HashLen) +) type NetworkServer struct { appSender common.AppSender // Used to respond to peer requests via AppResponse. - db *merkledb.Database + db DB log logging.Logger } -func NewNetworkServer(appSender common.AppSender, db *merkledb.Database, log logging.Logger) *NetworkServer { +func NewNetworkServer(appSender common.AppSender, db DB, log logging.Logger) *NetworkServer { return &NetworkServer{ appSender: appSender, db: db, @@ -42,19 +67,19 @@ func NewNetworkServer(appSender common.AppSender, db *merkledb.Database, log log } // AppRequest is called by avalanchego -> VM when there is an incoming AppRequest from a peer. -// Never returns errors as they are considered fatal. +// Returns a non-nil error iff we fail to send an app message. This is a fatal error. // Sends a response back to the sender if length of response returned by the handler > 0. func (s *NetworkServer) AppRequest( - _ context.Context, + ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte, ) error { - var req Request - if _, err := syncCodec.Unmarshal(request, &req); err != nil { + var req pb.Request + if err := proto.Unmarshal(request, &req); err != nil { s.log.Debug( - "failed to unmarshal app request", + "failed to unmarshal AppRequest", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Int("requestLen", len(request)), @@ -66,7 +91,6 @@ func (s *NetworkServer) AppRequest( "processing AppRequest from node", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), - zap.Stringer("request", req), ) // bufferedDeadline is half the time till actual deadline so that the message has a @@ -82,131 +106,328 @@ func (s *NetworkServer) AppRequest( "deadline to process AppRequest has expired, skipping", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), - zap.Stringer("req", req), ) return nil } - // TODO danlaine: Why don't we use the passed in context instead of [context.Background()]? - handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) + ctx, cancel := context.WithDeadline(ctx, bufferedDeadline) defer cancel() - err := req.Handle(handleCtx, nodeID, requestID, s) - if err != nil && !isTimeout(err) { - // log unexpected errors instead of returning them, since they are fatal. - s.log.Warn( - "unexpected error handling AppRequest", + var err error + switch req := req.GetMessage().(type) { + case *pb.Request_ChangeProofRequest: + err = s.HandleChangeProofRequest(ctx, nodeID, requestID, req.ChangeProofRequest) + case *pb.Request_RangeProofRequest: + err = s.HandleRangeProofRequest(ctx, nodeID, requestID, req.RangeProofRequest) + default: + s.log.Debug( + "unknown AppRequest type", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), - zap.Stringer("req", req), - zap.Error(err), + zap.Int("requestLen", len(request)), + zap.String("requestType", fmt.Sprintf("%T", req)), ) + return nil + } + + if err != nil { + if errors.Is(err, errAppSendFailed) { + return err + } + + if !isTimeout(err) { + // log unexpected errors instead of returning them, since they are fatal. + s.log.Warn( + "unexpected error handling AppRequest", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) + } } return nil } -// isTimeout returns true if err is a timeout from a context cancellation -// or a context cancellation over grpc. -func isTimeout(err error) bool { - // handle grpc wrapped DeadlineExceeded - if e, ok := status.FromError(err); ok { - if e.Code() == codes.DeadlineExceeded { - return true - } +func maybeBytesToMaybe(mb *pb.MaybeBytes) maybe.Maybe[[]byte] { + if mb != nil && !mb.IsNothing { + return maybe.Some(mb.Value) } - // otherwise, check for context.DeadlineExceeded directly - return errors.Is(err, context.DeadlineExceeded) + return maybe.Nothing[[]byte]() } // Generates a change proof and sends it to [nodeID]. +// If [errAppSendFailed] is returned, this should be considered fatal. func (s *NetworkServer) HandleChangeProofRequest( ctx context.Context, nodeID ids.NodeID, requestID uint32, - req *ChangeProofRequest, + req *pb.SyncGetChangeProofRequest, ) error { - if req.Limit == 0 || req.EndingRoot == ids.Empty || (len(req.End) > 0 && bytes.Compare(req.Start, req.End) > 0) { + if err := validateChangeProofRequest(req); err != nil { s.log.Debug( "dropping invalid change proof request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("req", req), + zap.Error(err), ) return nil // dropping request } - // override limit if it is greater than maxKeyValuesLimit - limit := req.Limit - if limit > maxKeyValuesLimit { - limit = maxKeyValuesLimit - } + // override limits if they exceed caps + var ( + keyLimit = min(req.KeyLimit, maxKeyValuesLimit) + bytesLimit = min(int(req.BytesLimit), maxByteSizeLimit) + start = maybeBytesToMaybe(req.StartKey) + end = maybeBytesToMaybe(req.EndKey) + ) - changeProof, err := s.db.GetChangeProof(ctx, req.StartingRoot, req.EndingRoot, req.Start, req.End, int(limit)) + startRoot, err := ids.ToID(req.StartRootHash) if err != nil { - // handle expected errors so clients cannot cause servers to spam warning logs. - if errors.Is(err, merkledb.ErrRootIDNotPresent) || errors.Is(err, merkledb.ErrStartRootNotFound) { - s.log.Debug( - "dropping invalid change proof request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("req", req), - zap.Error(err), - ) - return nil // dropping request - } return err } - proofBytes, err := merkledb.Codec.EncodeChangeProof(Version, changeProof) + endRoot, err := ids.ToID(req.EndRootHash) if err != nil { return err } - return s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes) + + for keyLimit > 0 { + changeProof, err := s.db.GetChangeProof(ctx, startRoot, endRoot, start, end, int(keyLimit)) + if err != nil { + if !errors.Is(err, merkledb.ErrInsufficientHistory) { + // We should only fail to get a change proof if we have insufficient history. + // Other errors are unexpected. + return err + } + if errors.Is(err, merkledb.ErrNoEndRoot) { + // [s.db] doesn't have [endRoot] in its history. + // We can't generate a change/range proof. Drop this request. + return nil + } + + // [s.db] doesn't have sufficient history to generate change proof. + // Generate a range proof for the end root ID instead. + proofBytes, err := getRangeProof( + ctx, + s.db, + &pb.SyncGetRangeProofRequest{ + RootHash: req.EndRootHash, + StartKey: req.StartKey, + EndKey: req.EndKey, + KeyLimit: req.KeyLimit, + BytesLimit: req.BytesLimit, + }, + func(rangeProof *merkledb.RangeProof) ([]byte, error) { + return proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_RangeProof{ + RangeProof: rangeProof.ToProto(), + }, + }) + }, + ) + if err != nil { + return err + } + + if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { + s.log.Fatal( + "failed to send app response", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(proofBytes)), + zap.Error(err), + ) + return fmt.Errorf("%w: %w", errAppSendFailed, err) + } + return nil + } + + // We generated a change proof. See if it's small enough. + proofBytes, err := proto.Marshal(&pb.SyncGetChangeProofResponse{ + Response: &pb.SyncGetChangeProofResponse_ChangeProof{ + ChangeProof: changeProof.ToProto(), + }, + }) + if err != nil { + return err + } + + if len(proofBytes) < bytesLimit { + if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { + s.log.Fatal( + "failed to send app response", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(proofBytes)), + zap.Error(err), + ) + return fmt.Errorf("%w: %w", errAppSendFailed, err) + } + return nil + } + + // The proof was too large. Try to shrink it. + keyLimit = uint32(len(changeProof.KeyChanges)) / 2 + } + return ErrMinProofSizeIsTooLarge } // Generates a range proof and sends it to [nodeID]. -// TODO danlaine how should we handle context cancellation? +// If [errAppSendFailed] is returned, this should be considered fatal. func (s *NetworkServer) HandleRangeProofRequest( ctx context.Context, nodeID ids.NodeID, requestID uint32, - req *RangeProofRequest, + req *pb.SyncGetRangeProofRequest, ) error { - if req.Limit == 0 || req.Root == ids.Empty || (len(req.End) > 0 && bytes.Compare(req.Start, req.End) > 0) { + if err := validateRangeProofRequest(req); err != nil { s.log.Debug( "dropping invalid range proof request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("req", req), + zap.Error(err), ) - return nil // dropping request + return nil // drop request } - // override limit if it is greater than maxKeyValuesLimit - limit := req.Limit - if limit > maxKeyValuesLimit { - limit = maxKeyValuesLimit - } + // override limits if they exceed caps + req.KeyLimit = min(req.KeyLimit, maxKeyValuesLimit) + req.BytesLimit = min(req.BytesLimit, maxByteSizeLimit) - rangeProof, err := s.db.GetRangeProofAtRoot(ctx, req.Root, req.Start, req.End, int(limit)) + proofBytes, err := getRangeProof( + ctx, + s.db, + req, + func(rangeProof *merkledb.RangeProof) ([]byte, error) { + return proto.Marshal(rangeProof.ToProto()) + }, + ) if err != nil { - // handle expected errors so clients cannot cause servers to spam warning logs. - if errors.Is(err, merkledb.ErrRootIDNotPresent) { - s.log.Debug( - "dropping invalid range proof request", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("req", req), - zap.Error(err), - ) - return nil // dropping request - } return err } + if err := s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes); err != nil { + s.log.Fatal( + "failed to send app response", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(proofBytes)), + zap.Error(err), + ) + return fmt.Errorf("%w: %w", errAppSendFailed, err) + } + return nil +} - proofBytes, err := merkledb.Codec.EncodeRangeProof(Version, rangeProof) +// Get the range proof specified by [req]. +// If the generated proof is too large, the key limit is reduced +// and the proof is regenerated. This process is repeated until +// the proof is smaller than [req.BytesLimit]. +// When a sufficiently small proof is generated, returns it. +// If no sufficiently small proof can be generated, returns [ErrMinProofSizeIsTooLarge]. +// TODO improve range proof generation so we don't need to iteratively +// reduce the key limit. +func getRangeProof( + ctx context.Context, + db DB, + req *pb.SyncGetRangeProofRequest, + marshalFunc func(*merkledb.RangeProof) ([]byte, error), +) ([]byte, error) { + root, err := ids.ToID(req.RootHash) if err != nil { - return err + return nil, err + } + + keyLimit := int(req.KeyLimit) + + for keyLimit > 0 { + rangeProof, err := db.GetRangeProofAtRoot( + ctx, + root, + maybeBytesToMaybe(req.StartKey), + maybeBytesToMaybe(req.EndKey), + keyLimit, + ) + if err != nil { + if errors.Is(err, merkledb.ErrInsufficientHistory) { + return nil, nil // drop request + } + return nil, err + } + + proofBytes, err := marshalFunc(rangeProof) + if err != nil { + return nil, err + } + + if len(proofBytes) < int(req.BytesLimit) { + return proofBytes, nil + } + + // The proof was too large. Try to shrink it. + keyLimit = len(rangeProof.KeyValues) / 2 + } + return nil, ErrMinProofSizeIsTooLarge +} + +// isTimeout returns true if err is a timeout from a context cancellation +// or a context cancellation over grpc. +func isTimeout(err error) bool { + // handle grpc wrapped DeadlineExceeded + if e, ok := status.FromError(err); ok { + if e.Code() == codes.DeadlineExceeded { + return true + } + } + // otherwise, check for context.DeadlineExceeded directly + return errors.Is(err, context.DeadlineExceeded) +} + +// Returns nil iff [req] is well-formed. +func validateChangeProofRequest(req *pb.SyncGetChangeProofRequest) error { + switch { + case req.BytesLimit == 0: + return errInvalidBytesLimit + case req.KeyLimit == 0: + return errInvalidKeyLimit + case len(req.StartRootHash) != hashing.HashLen: + return errInvalidStartRootHash + case len(req.EndRootHash) != hashing.HashLen: + return errInvalidEndRootHash + case bytes.Equal(req.EndRootHash, ids.Empty[:]): + return merkledb.ErrEmptyProof + case req.StartKey != nil && req.StartKey.IsNothing && len(req.StartKey.Value) > 0: + return errInvalidStartKey + case req.EndKey != nil && req.EndKey.IsNothing && len(req.EndKey.Value) > 0: + return errInvalidEndKey + case req.StartKey != nil && req.EndKey != nil && !req.StartKey.IsNothing && + !req.EndKey.IsNothing && bytes.Compare(req.StartKey.Value, req.EndKey.Value) > 0: + return errInvalidBounds + default: + return nil + } +} + +// Returns nil iff [req] is well-formed. +func validateRangeProofRequest(req *pb.SyncGetRangeProofRequest) error { + switch { + case req.BytesLimit == 0: + return errInvalidBytesLimit + case req.KeyLimit == 0: + return errInvalidKeyLimit + case len(req.RootHash) != ids.IDLen: + return errInvalidRootHash + case bytes.Equal(req.RootHash, ids.Empty[:]): + return merkledb.ErrEmptyProof + case req.StartKey != nil && req.StartKey.IsNothing && len(req.StartKey.Value) > 0: + return errInvalidStartKey + case req.EndKey != nil && req.EndKey.IsNothing && len(req.EndKey.Value) > 0: + return errInvalidEndKey + case req.StartKey != nil && req.EndKey != nil && !req.StartKey.IsNothing && + !req.EndKey.IsNothing && bytes.Compare(req.StartKey.Value, req.EndKey.Value) > 0: + return errInvalidBounds + default: + return nil } - return s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes) } diff --git a/avalanchego/x/sync/network_server_test.go b/avalanchego/x/sync/network_server_test.go new file mode 100644 index 00000000..3d39addd --- /dev/null +++ b/avalanchego/x/sync/network_server_test.go @@ -0,0 +1,467 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/x/merkledb" + + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) + +func Test_Server_GetRangeProof(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, defaultRequestKeyLimit, 1) + require.NoError(t, err) + smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + tests := map[string]struct { + request *pb.SyncGetRangeProofRequest + expectedErr error + expectedResponseLen int + expectedMaxResponseBytes int + nodeID ids.NodeID + proofNil bool + }{ + "proof too large": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 1000, + }, + proofNil: true, + expectedErr: ErrMinProofSizeIsTooLarge, + }, + "byteslimit is 0": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 0, + }, + proofNil: true, + }, + "keylimit is 0": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 0, + }, + proofNil: true, + }, + "keys out of order": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + StartKey: &pb.MaybeBytes{Value: []byte{1}}, + EndKey: &pb.MaybeBytes{Value: []byte{0}}, + }, + proofNil: true, + }, + "key limit too large": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: 2 * defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedResponseLen: defaultRequestKeyLimit, + }, + "bytes limit too large": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: smallTrieRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 2 * defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + }, + "empty proof": { + request: &pb.SyncGetRangeProofRequest{ + RootHash: ids.Empty[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + proofNil: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + sender := common.NewMockSender(ctrl) + var proof *merkledb.RangeProof + sender.EXPECT().SendAppResponse( + gomock.Any(), // ctx + gomock.Any(), // nodeID + gomock.Any(), // requestID + gomock.Any(), // responseBytes + ).DoAndReturn( + func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { + // grab a copy of the proof so we can inspect it later + if !test.proofNil { + var proofProto pb.RangeProof + require.NoError(proto.Unmarshal(responseBytes, &proofProto)) + + var p merkledb.RangeProof + require.NoError(p.UnmarshalProto(&proofProto)) + proof = &p + } + return nil + }, + ).AnyTimes() + handler := NewNetworkServer(sender, smallTrieDB, logging.NoLog{}) + err := handler.HandleRangeProofRequest(context.Background(), test.nodeID, 0, test.request) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return + } + if test.proofNil { + require.Nil(proof) + return + } + require.NotNil(proof) + if test.expectedResponseLen > 0 { + require.LessOrEqual(len(proof.KeyValues), test.expectedResponseLen) + } + + bytes, err := proto.Marshal(proof.ToProto()) + require.NoError(err) + require.LessOrEqual(len(bytes), int(test.request.BytesLimit)) + if test.expectedMaxResponseBytes > 0 { + require.LessOrEqual(len(bytes), test.expectedMaxResponseBytes) + } + }) + } +} + +func Test_Server_GetChangeProof(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + trieDB, _, err := generateTrieWithMinKeyLen(t, r, defaultRequestKeyLimit, 1) + require.NoError(t, err) + + startRoot, err := trieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // create changes + ops := make([]database.BatchOp, 0, 300) + for x := 0; x < 300; x++ { + key := make([]byte, r.Intn(100)) + _, err = r.Read(key) + require.NoError(t, err) + + val := make([]byte, r.Intn(100)) + _, err = r.Read(val) + require.NoError(t, err) + + ops = append(ops, database.BatchOp{Key: key, Value: val}) + + deleteKeyStart := make([]byte, r.Intn(10)) + _, err = r.Read(deleteKeyStart) + require.NoError(t, err) + + it := trieDB.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + ops = append(ops, database.BatchOp{Key: it.Key(), Delete: true}) + } + require.NoError(t, it.Error()) + it.Release() + + view, err := trieDB.NewView( + context.Background(), + merkledb.ViewChanges{BatchOps: ops}, + ) + require.NoError(t, err) + require.NoError(t, view.CommitToDB(context.Background())) + } + + endRoot, err := trieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + fakeRootID := ids.GenerateTestID() + + tests := map[string]struct { + request *pb.SyncGetChangeProofRequest + expectedErr error + expectedResponseLen int + expectedMaxResponseBytes int + nodeID ids.NodeID + proofNil bool + expectRangeProof bool // Otherwise expect change proof + }{ + "byteslimit is 0": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 0, + }, + proofNil: true, + }, + "keylimit is 0": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 0, + }, + proofNil: true, + }, + "keys out of order": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + StartKey: &pb.MaybeBytes{Value: []byte{1}}, + EndKey: &pb.MaybeBytes{Value: []byte{0}}, + }, + proofNil: true, + }, + "key limit too large": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: 2 * defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedResponseLen: defaultRequestKeyLimit, + }, + "bytes limit too large": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRoot[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: 2 * defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + }, + "insufficient history for change proof; return range proof": { + request: &pb.SyncGetChangeProofRequest{ + // This root doesn't exist so server has insufficient history + // to serve a change proof + StartRootHash: fakeRootID[:], + EndRootHash: endRoot[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + expectRangeProof: true, + }, + "insufficient history for change proof or range proof": { + request: &pb.SyncGetChangeProofRequest{ + // These roots don't exist so server has insufficient history + // to serve a change proof or range proof + StartRootHash: ids.Empty[:], + EndRootHash: fakeRootID[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + proofNil: true, + }, + "empt proof": { + request: &pb.SyncGetChangeProofRequest{ + StartRootHash: fakeRootID[:], + EndRootHash: ids.Empty[:], + KeyLimit: defaultRequestKeyLimit, + BytesLimit: defaultRequestByteSizeLimit, + }, + expectedMaxResponseBytes: defaultRequestByteSizeLimit, + proofNil: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Store proof returned by server in [proofResult] + var proofResult *pb.SyncGetChangeProofResponse + var proofBytes []byte + sender := common.NewMockSender(ctrl) + sender.EXPECT().SendAppResponse( + gomock.Any(), // ctx + gomock.Any(), // nodeID + gomock.Any(), // requestID + gomock.Any(), // responseBytes + ).DoAndReturn( + func(_ context.Context, _ ids.NodeID, _ uint32, responseBytes []byte) error { + if test.proofNil { + return nil + } + proofBytes = responseBytes + + // grab a copy of the proof so we can inspect it later + var responseProto pb.SyncGetChangeProofResponse + require.NoError(proto.Unmarshal(responseBytes, &responseProto)) + proofResult = &responseProto + + return nil + }, + ).AnyTimes() + + handler := NewNetworkServer(sender, trieDB, logging.NoLog{}) + err := handler.HandleChangeProofRequest(context.Background(), test.nodeID, 0, test.request) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return + } + + if test.proofNil { + require.Nil(proofResult) + return + } + require.NotNil(proofResult) + + if test.expectRangeProof { + require.NotNil(proofResult.GetRangeProof()) + } else { + require.NotNil(proofResult.GetChangeProof()) + } + + if test.expectedResponseLen > 0 { + if test.expectRangeProof { + require.LessOrEqual(len(proofResult.GetRangeProof().KeyValues), test.expectedResponseLen) + } else { + require.LessOrEqual(len(proofResult.GetChangeProof().KeyChanges), test.expectedResponseLen) + } + } + + require.NoError(err) + require.LessOrEqual(len(proofBytes), int(test.request.BytesLimit)) + if test.expectedMaxResponseBytes > 0 { + require.LessOrEqual(len(proofBytes), test.expectedMaxResponseBytes) + } + }) + } +} + +// Test that AppRequest returns a non-nil error if we fail to send +// an AppRequest or AppResponse. +func TestAppRequestErrAppSendFailed(t *testing.T) { + startRootID := ids.GenerateTestID() + endRootID := ids.GenerateTestID() + + type test struct { + name string + request *pb.Request + handlerFunc func(*gomock.Controller) *NetworkServer + expectedErr error + } + + tests := []test{ + { + name: "GetChangeProof", + request: &pb.Request{ + Message: &pb.Request_ChangeProofRequest{ + ChangeProofRequest: &pb.SyncGetChangeProofRequest{ + StartRootHash: startRootID[:], + EndRootHash: endRootID[:], + StartKey: &pb.MaybeBytes{Value: []byte{1}}, + EndKey: &pb.MaybeBytes{Value: []byte{2}}, + KeyLimit: 100, + BytesLimit: 100, + }, + }, + }, + handlerFunc: func(ctrl *gomock.Controller) *NetworkServer { + sender := common.NewMockSender(ctrl) + sender.EXPECT().SendAppResponse( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errAppSendFailed).AnyTimes() + + db := merkledb.NewMockMerkleDB(ctrl) + db.EXPECT().GetChangeProof( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&merkledb.ChangeProof{}, nil).Times(1) + + return NewNetworkServer(sender, db, logging.NoLog{}) + }, + expectedErr: errAppSendFailed, + }, + { + name: "GetRangeProof", + request: &pb.Request{ + Message: &pb.Request_RangeProofRequest{ + RangeProofRequest: &pb.SyncGetRangeProofRequest{ + RootHash: endRootID[:], + StartKey: &pb.MaybeBytes{Value: []byte{1}}, + EndKey: &pb.MaybeBytes{Value: []byte{2}}, + KeyLimit: 100, + BytesLimit: 100, + }, + }, + }, + handlerFunc: func(ctrl *gomock.Controller) *NetworkServer { + sender := common.NewMockSender(ctrl) + sender.EXPECT().SendAppResponse( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(errAppSendFailed).AnyTimes() + + db := merkledb.NewMockMerkleDB(ctrl) + db.EXPECT().GetRangeProofAtRoot( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(&merkledb.RangeProof{}, nil).Times(1) + + return NewNetworkServer(sender, db, logging.NoLog{}) + }, + expectedErr: errAppSendFailed, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + handler := tt.handlerFunc(ctrl) + requestBytes, err := proto.Marshal(tt.request) + require.NoError(err) + + err = handler.AppRequest( + context.Background(), + ids.EmptyNodeID, + 0, + time.Now().Add(10*time.Second), + requestBytes, + ) + require.ErrorIs(err, tt.expectedErr) + }) + } +} diff --git a/avalanchego/x/sync/peer_tracker.go b/avalanchego/x/sync/peer_tracker.go deleted file mode 100644 index d045ace8..00000000 --- a/avalanchego/x/sync/peer_tracker.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "math/rand" - "time" - - stdmath "math" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" -) - -const ( - bandwidthHalflife = 5 * time.Minute - - // controls how eagerly we connect to new peers vs. using - // peers with known good response bandwidth. - desiredMinResponsivePeers = 20 - newPeerConnectFactor = 0.1 - - // The probability that, when we select a peer, we select randomly rather - // than based on their performance. - randomPeerProbability = 0.2 -) - -// information we track on a given peer -type peerInfo struct { - version *version.Application - bandwidth math.Averager -} - -// Tracks the bandwidth of responses coming from peers, -// preferring to contact peers with known good bandwidth, connecting -// to new peers with an exponentially decaying probability. -// Note: not thread safe. Caller must handle synchronization. -type peerTracker struct { - // All peers we are connected to - peers map[ids.NodeID]*peerInfo - // Peers that we're connected to that we've sent a request to - // since we most recently connected to them. - trackedPeers set.Set[ids.NodeID] - // Peers that we're connected to that responded to the last request they were sent. - responsivePeers set.Set[ids.NodeID] - // Max heap that contains the average bandwidth of peers. - bandwidthHeap math.AveragerHeap - averageBandwidth math.Averager - log logging.Logger - // numTrackedPeers prometheus.Gauge - // numResponsivePeers prometheus.Gauge - // averageBandwidthMetric prometheus.Gauge -} - -func newPeerTracker(log logging.Logger) *peerTracker { - // TODO: initialize metrics - return &peerTracker{ - peers: make(map[ids.NodeID]*peerInfo), - trackedPeers: make(set.Set[ids.NodeID]), - responsivePeers: make(set.Set[ids.NodeID]), - bandwidthHeap: math.NewMaxAveragerHeap(), - averageBandwidth: math.NewAverager(0, bandwidthHalflife, time.Now()), - log: log, - // numTrackedPeers: metrics.GetOrRegisterGauge("net_tracked_peers", nil), - // numResponsivePeers: metrics.GetOrRegisterGauge("net_responsive_peers", nil), - // averageBandwidthMetric: metrics.GetOrRegisterGaugeFloat64("net_average_bandwidth", nil), - } -} - -// Returns true if we're not connected to enough peers. -// Otherwise returns true probabilistically based on the number of tracked peers. -func (p *peerTracker) shouldTrackNewPeer() bool { - numResponsivePeers := p.responsivePeers.Len() - if numResponsivePeers < desiredMinResponsivePeers { - return true - } - if len(p.trackedPeers) >= len(p.peers) { - // already tracking all the peers - return false - } - // TODO danlaine: we should consider tuning this probability function. - // With [newPeerConnectFactor] as 0.1 the probabilities are: - // - // numResponsivePeers | probability - // 100 | 4.5399929762484854e-05 - // 200 | 2.061153622438558e-09 - // 500 | 1.9287498479639178e-22 - // 1000 | 3.720075976020836e-44 - // 2000 | 1.3838965267367376e-87 - // 5000 | 7.124576406741286e-218 - // - // In other words, the probability drops off extremely quickly. - newPeerProbability := stdmath.Exp(-float64(numResponsivePeers) * newPeerConnectFactor) - return rand.Float64() < newPeerProbability // #nosec G404 -} - -// Returns a peer that we're connected to. -// If we should track more peers, returns a random peer with version >= [minVersion], if any exist. -// Otherwise, with probability [randomPeerProbability] returns a random peer from [p.responsivePeers]. -// With probability [1-randomPeerProbability] returns the peer in [p.bandwidthHeap] with the highest bandwidth. -func (p *peerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, bool) { - if p.shouldTrackNewPeer() { - for nodeID := range p.peers { - // if minVersion is specified and peer's version is less, skip - if minVersion != nil && p.peers[nodeID].version.Compare(minVersion) < 0 { - continue - } - // skip peers already tracked - if p.trackedPeers.Contains(nodeID) { - continue - } - p.log.Debug( - "tracking peer", - zap.Int("trackedPeers", len(p.trackedPeers)), - zap.Stringer("nodeID", nodeID), - ) - return nodeID, true - } - } - - var ( - nodeID ids.NodeID - ok bool - ) - useRand := rand.Float64() < randomPeerProbability // #nosec G404 - if useRand { - nodeID, ok = p.responsivePeers.Peek() - } else { - nodeID, _, ok = p.bandwidthHeap.Pop() - } - if !ok { - // if no nodes found in the bandwidth heap, return a tracked node at random - return p.trackedPeers.Peek() - } - p.log.Debug( - "peer tracking: popping peer", - zap.Stringer("nodeID", nodeID), - zap.Bool("random", useRand), - ) - return nodeID, true -} - -// Record that we sent a request to [nodeID]. -func (p *peerTracker) TrackPeer(nodeID ids.NodeID) { - p.trackedPeers.Add(nodeID) - // p.numTrackedPeers.Set(float64(p.trackedPeers.Len())) -} - -// Record that we observed that [nodeID]'s bandwidth is [bandwidth]. -// Adds the peer's bandwidth averager to the bandwidth heap. -func (p *peerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { - peer := p.peers[nodeID] - if peer == nil { - // we're not connected to this peer, nothing to do here - p.log.Debug("tracking bandwidth for untracked peer", zap.Stringer("nodeID", nodeID)) - return - } - - now := time.Now() - if peer.bandwidth == nil { - peer.bandwidth = math.NewAverager(bandwidth, bandwidthHalflife, now) - } else { - peer.bandwidth.Observe(bandwidth, now) - } - p.bandwidthHeap.Add(nodeID, peer.bandwidth) - - if bandwidth == 0 { - p.responsivePeers.Remove(nodeID) - } else { - p.responsivePeers.Add(nodeID) - // TODO danlaine: shouldn't we add the observation of 0 - // to the average bandwidth in the if statement? - p.averageBandwidth.Observe(bandwidth, now) - // p.averageBandwidthMetric.Set(p.averageBandwidth.Read()) - } - // p.numResponsivePeers.Set(float64(p.responsivePeers.Len())) -} - -// Connected should be called when [nodeID] connects to this node -func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { - peer := p.peers[nodeID] - if peer == nil { - p.peers[nodeID] = &peerInfo{ - version: nodeVersion, - } - return - } - - // Peer is already connected, update the version if it has changed. - // Log a warning message since the consensus engine should never call Connected on a peer - // that we have already marked as Connected. - if nodeVersion.Compare(peer.version) != 0 { - p.peers[nodeID] = &peerInfo{ - version: nodeVersion, - bandwidth: peer.bandwidth, - } - p.log.Warn( - "updating node version of already connected peer", - zap.Stringer("nodeID", nodeID), - zap.Stringer("storedVersion", peer.version), - zap.Stringer("nodeVersion", nodeVersion), - ) - } else { - p.log.Warn( - "ignoring peer connected event for already connected peer with identical version", - zap.Stringer("nodeID", nodeID), - ) - } -} - -// Disconnected should be called when [nodeID] disconnects from this node -func (p *peerTracker) Disconnected(nodeID ids.NodeID) { - p.bandwidthHeap.Remove(nodeID) - p.trackedPeers.Remove(nodeID) - // p.numTrackedPeers.Set(float64(p.trackedPeers.Len())) - p.responsivePeers.Remove(nodeID) - // p.numResponsivePeers.Set(float64(p.responsivePeers.Len())) - delete(p.peers, nodeID) -} - -// Returns the number of peers the node is connected to. -func (p *peerTracker) Size() int { - return len(p.peers) -} diff --git a/avalanchego/x/sync/request.go b/avalanchego/x/sync/request.go deleted file mode 100644 index ca516888..00000000 --- a/avalanchego/x/sync/request.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "context" - "encoding/hex" - "fmt" - - "github.com/ava-labs/avalanchego/ids" -) - -var ( - _ Request = (*RangeProofRequest)(nil) - _ Request = (*ChangeProofRequest)(nil) -) - -// A request to this node for a proof. -type Request interface { - fmt.Stringer - Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error -} - -type rangeProofHandler interface { - // Generates a range proof and sends it to [nodeID]. - // TODO danlaine how should we handle context cancellation? - HandleRangeProofRequest( - ctx context.Context, - nodeID ids.NodeID, - requestID uint32, - request *RangeProofRequest, - ) error -} - -type changeProofHandler interface { - // Generates a change proof and sends it to [nodeID]. - // TODO danlaine how should we handle context cancellation? - HandleChangeProofRequest( - ctx context.Context, - nodeID ids.NodeID, - requestID uint32, - request *ChangeProofRequest, - ) error -} - -type Handler interface { - rangeProofHandler - changeProofHandler -} - -type RangeProofRequest struct { - Root ids.ID `serialize:"true"` - Start []byte `serialize:"true"` - End []byte `serialize:"true"` - Limit uint16 `serialize:"true"` -} - -func (r *RangeProofRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error { - return h.HandleRangeProofRequest(ctx, nodeID, requestID, r) -} - -func (r RangeProofRequest) String() string { - return fmt.Sprintf( - "RangeProofRequest(Root=%s, Start=%s, End=%s, Limit=%d)", - r.Root, - hex.EncodeToString(r.Start), - hex.EncodeToString(r.End), - r.Limit, - ) -} - -// ChangeProofRequest is a request to receive trie leaves at specified Root within Start and End byte range -// Limit outlines maximum number of leaves to returns starting at Start -type ChangeProofRequest struct { - StartingRoot ids.ID `serialize:"true"` - EndingRoot ids.ID `serialize:"true"` - Start []byte `serialize:"true"` - End []byte `serialize:"true"` - Limit uint16 `serialize:"true"` -} - -func (r *ChangeProofRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error { - return h.HandleChangeProofRequest(ctx, nodeID, requestID, r) -} - -func (r ChangeProofRequest) String() string { - return fmt.Sprintf( - "ChangeProofRequest(StartRoot=%s, EndRoot=%s, Start=%s, End=%s, Limit=%d)", - r.StartingRoot, - r.EndingRoot, - hex.EncodeToString(r.Start), - hex.EncodeToString(r.End), - r.Limit, - ) -} diff --git a/avalanchego/x/sync/response_handler.go b/avalanchego/x/sync/response_handler.go index c35c0a01..3f14e94d 100644 --- a/avalanchego/x/sync/response_handler.go +++ b/avalanchego/x/sync/response_handler.go @@ -1,13 +1,9 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync -// TODO danlaine: We create a new response handler for every request. -// Look into making a struct to handle requests/responses that uses a sync pool -// to avoid allocations. - -var _ ResponseHandler = &responseHandler{} +var _ ResponseHandler = (*responseHandler)(nil) // Handles responses/failure notifications for a sent request. // Exactly one of OnResponse or OnFailure is eventually called. @@ -24,23 +20,22 @@ func newResponseHandler() *responseHandler { // Implements [ResponseHandler]. // Used to wait for a response after making a synchronous request. -// responseChan may contain response bytes if the original request has not failed. +// responseChan contains response bytes if the request succeeded. // responseChan is closed in either fail or success scenario. type responseHandler struct { // If [OnResponse] is called, the response bytes are sent on this channel. + // If [OnFailure] is called, the channel is closed without sending bytes. responseChan chan []byte - // Set to true in [OnFailure]. - failed bool } -// OnResponse passes the response bytes to the responseChan and closes the channel +// OnResponse passes the response bytes to the responseChan and closes the +// channel. func (h *responseHandler) OnResponse(response []byte) { h.responseChan <- response close(h.responseChan) } -// OnFailure sets the failed flag to true and closes the channel +// OnFailure closes the channel. func (h *responseHandler) OnFailure() { - h.failed = true close(h.responseChan) } diff --git a/avalanchego/x/sync/sync_test.go b/avalanchego/x/sync/sync_test.go index b130416c..3d5fe6d3 100644 --- a/avalanchego/x/sync/sync_test.go +++ b/avalanchego/x/sync/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sync @@ -7,319 +7,506 @@ import ( "bytes" "context" "math/rand" + "slices" "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" - "golang.org/x/exp/slices" - + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/maybe" "github.com/ava-labs/avalanchego/x/merkledb" -) - -var _ Client = &mockClient{} -func newNoopTracer() trace.Tracer { - tracer, _ := trace.New(trace.Config{Enabled: false}) - return tracer -} + pb "github.com/ava-labs/avalanchego/proto/pb/sync" +) -type mockClient struct { - db *merkledb.Database -} +func newCallthroughSyncClient(ctrl *gomock.Controller, db merkledb.MerkleDB) *MockClient { + syncClient := NewMockClient(ctrl) + syncClient.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { + return db.GetRangeProof( + context.Background(), + maybeBytesToMaybe(request.StartKey), + maybeBytesToMaybe(request.EndKey), + int(request.KeyLimit), + ) + }).AnyTimes() + syncClient.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { + startRoot, err := ids.ToID(request.StartRootHash) + if err != nil { + return nil, err + } -func (client *mockClient) GetChangeProof(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { - return client.db.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) -} + endRoot, err := ids.ToID(request.EndRootHash) + if err != nil { + return nil, err + } -func (client *mockClient) GetRangeProof(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { - return client.db.GetRangeProofAtRoot(ctx, request.Root, request.Start, request.End, int(request.Limit)) + changeProof, err := db.GetChangeProof( + context.Background(), + startRoot, + endRoot, + maybeBytesToMaybe(request.StartKey), + maybeBytesToMaybe(request.EndKey), + int(request.KeyLimit), + ) + if err != nil { + return nil, err + } + return &merkledb.ChangeOrRangeProof{ + ChangeProof: changeProof, + }, nil + }).AnyTimes() + return syncClient } func Test_Creation(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + db, err := merkledb.New( context.Background(), memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, + newDefaultDBConfig(), ) - require.NoError(t, err) + require.NoError(err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{}, + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: NewMockClient(ctrl), TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, }) - require.NotNil(t, syncer) - require.NoError(t, err) + require.NoError(err) + require.NotNil(syncer) } func Test_Completion(t *testing.T) { - for i := 0; i < 10; i++ { - ctrl := gomock.NewController(t) - defer ctrl.Finish() + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() - emptyDB, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, - ) - require.NoError(t, err) - emptyRoot, err := emptyDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) - db, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, - ) - require.NoError(t, err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: emptyDB}, - TargetRoot: emptyRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) - syncer.workLock.Lock() - require.Equal(t, 0, syncer.unprocessedWork.Len()) - require.Equal(t, 1, syncer.processedWork.Len()) - syncer.workLock.Unlock() - } + emptyDB, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + emptyRoot, err := emptyDB.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, emptyDB), + TargetRoot: emptyRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + + require.NoError(syncer.Start(context.Background())) + require.NoError(syncer.Wait(context.Background())) + + syncer.workLock.Lock() + require.Zero(syncer.unprocessedWork.Len()) + require.Equal(1, syncer.processedWork.Len()) + syncer.workLock.Unlock() } func Test_Midpoint(t *testing.T) { - mid := midPoint([]byte{1, 255}, []byte{2, 1}) - require.Equal(t, []byte{2, 0}, mid) + require := require.New(t) + + mid := midPoint(maybe.Some([]byte{1, 255}), maybe.Some([]byte{2, 1})) + require.Equal(maybe.Some([]byte{2, 0}), mid) - mid = midPoint(nil, []byte{255, 255, 0}) - require.Equal(t, []byte{127, 255, 128}, mid) + mid = midPoint(maybe.Nothing[[]byte](), maybe.Some([]byte{255, 255, 0})) + require.Equal(maybe.Some([]byte{127, 255, 128}), mid) - mid = midPoint([]byte{255, 255, 255}, []byte{255, 255}) - require.Equal(t, []byte{255, 255, 127, 128}, mid) + mid = midPoint(maybe.Some([]byte{255, 255, 255}), maybe.Some([]byte{255, 255})) + require.Equal(maybe.Some([]byte{255, 255, 127, 128}), mid) - mid = midPoint(nil, []byte{255}) - require.Equal(t, []byte{127, 127}, mid) + mid = midPoint(maybe.Nothing[[]byte](), maybe.Some([]byte{255})) + require.Equal(maybe.Some([]byte{127, 127}), mid) - mid = midPoint([]byte{1, 255}, []byte{255, 1}) - require.Equal(t, []byte{128, 128}, mid) + mid = midPoint(maybe.Some([]byte{1, 255}), maybe.Some([]byte{255, 1})) + require.Equal(maybe.Some([]byte{128, 128}), mid) - mid = midPoint([]byte{140, 255}, []byte{141, 0}) - require.Equal(t, []byte{140, 255, 127}, mid) + mid = midPoint(maybe.Some([]byte{140, 255}), maybe.Some([]byte{141, 0})) + require.Equal(maybe.Some([]byte{140, 255, 127}), mid) - mid = midPoint([]byte{126, 255}, []byte{127}) - require.Equal(t, []byte{126, 255, 127}, mid) + mid = midPoint(maybe.Some([]byte{126, 255}), maybe.Some([]byte{127})) + require.Equal(maybe.Some([]byte{126, 255, 127}), mid) - mid = midPoint(nil, nil) - require.Equal(t, []byte{127}, mid) + mid = midPoint(maybe.Nothing[[]byte](), maybe.Nothing[[]byte]()) + require.Equal(maybe.Some([]byte{127}), mid) - low := midPoint(nil, mid) - require.Equal(t, []byte{63, 127}, low) + low := midPoint(maybe.Nothing[[]byte](), mid) + require.Equal(maybe.Some([]byte{63, 127}), low) - high := midPoint(mid, nil) - require.Equal(t, []byte{191}, high) + high := midPoint(mid, maybe.Nothing[[]byte]()) + require.Equal(maybe.Some([]byte{191}), high) - mid = midPoint([]byte{255, 255}, nil) - require.Equal(t, []byte{255, 255, 127, 127}, mid) + mid = midPoint(maybe.Some([]byte{255, 255}), maybe.Nothing[[]byte]()) + require.Equal(maybe.Some([]byte{255, 255, 127, 127}), mid) - mid = midPoint([]byte{255}, nil) - require.Equal(t, []byte{255, 127, 127}, mid) + mid = midPoint(maybe.Some([]byte{255}), maybe.Nothing[[]byte]()) + require.Equal(maybe.Some([]byte{255, 127, 127}), mid) for i := 0; i < 5000; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 start := make([]byte, r.Intn(99)+1) _, err := r.Read(start) - require.NoError(t, err) + require.NoError(err) end := make([]byte, r.Intn(99)+1) _, err = r.Read(end) - require.NoError(t, err) + require.NoError(err) for bytes.Equal(start, end) { _, err = r.Read(end) - require.NoError(t, err) + require.NoError(err) } if bytes.Compare(start, end) == 1 { start, end = end, start } - mid = midPoint(start, end) - require.Equal(t, -1, bytes.Compare(start, mid)) - require.Equal(t, -1, bytes.Compare(mid, end)) + mid = midPoint(maybe.Some(start), maybe.Some(end)) + require.Equal(-1, bytes.Compare(start, mid.Value())) + require.Equal(-1, bytes.Compare(mid.Value(), end)) } } func Test_Sync_FindNextKey_InSync(t *testing.T) { - for i := 0; i < 3; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 1000) - require.NoError(t, err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() - db, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, - ) - require.NoError(t, err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) - - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) - - proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) - require.NoError(t, err) - - // the two dbs should be in sync, so next key should be nil - lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key - nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.Nil(t, nextKey) - - // add an extra value to sync db past the last key returned - newKey := midPoint(lastKey, nil) - err = db.Put(newKey, []byte{1}) - require.NoError(t, err) - - // create a range endpoint that is before the newly added key, but after the last key - endPointBeforeNewKey := make([]byte, 0, 2) - for i := 0; i < len(newKey); i++ { - endPointBeforeNewKey = append(endPointBeforeNewKey, newKey[i]) - - // we need the new key to be after the last key - // don't subtract anything from the current byte if newkey and lastkey are equal - if lastKey[i] == newKey[i] { - continue - } + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + dbToSync, err := generateTrie(t, r, 1000) + require.NoError(err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) - // if the first nibble is > 0, subtract "1" from it - if endPointBeforeNewKey[i] >= 16 { - endPointBeforeNewKey[i] -= 16 - break - } - // if the second nibble > 0, subtract 1 from it - if endPointBeforeNewKey[i] > 0 { - endPointBeforeNewKey[i] -= 1 - break - } - // both nibbles were 0, so move onto the next byte - } + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + + require.NoError(syncer.Start(context.Background())) + require.NoError(syncer.Wait(context.Background())) + + proof, err := dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 500) + require.NoError(err) - nextKey, err = syncer.findNextKey(context.Background(), lastKey, endPointBeforeNewKey, proof.EndProof) - require.NoError(t, err) + // the two dbs should be in sync, so next key should be nil + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + nextKey, err := syncer.findNextKey(context.Background(), lastKey, maybe.Nothing[[]byte](), proof.EndProof) + require.NoError(err) + require.True(nextKey.IsNothing()) + + // add an extra value to sync db past the last key returned + newKey := midPoint(maybe.Some(lastKey), maybe.Nothing[[]byte]()) + newKeyVal := newKey.Value() + require.NoError(db.Put(newKeyVal, []byte{1})) + + // create a range endpoint that is before the newly added key, but after the last key + endPointBeforeNewKey := make([]byte, 0, 2) + for i := 0; i < len(newKeyVal); i++ { + endPointBeforeNewKey = append(endPointBeforeNewKey, newKeyVal[i]) + + // we need the new key to be after the last key + // don't subtract anything from the current byte if newkey and lastkey are equal + if lastKey[i] == newKeyVal[i] { + continue + } - // next key would be after the end of the range, so it returns nil instead - require.Nil(t, nextKey) + // if the first nibble is > 0, subtract "1" from it + if endPointBeforeNewKey[i] >= 16 { + endPointBeforeNewKey[i] -= 16 + break + } + // if the second nibble > 0, subtract 1 from it + if endPointBeforeNewKey[i] > 0 { + endPointBeforeNewKey[i] -= 1 + break + } + // both nibbles were 0, so move onto the next byte } + + nextKey, err = syncer.findNextKey(context.Background(), lastKey, maybe.Some(endPointBeforeNewKey), proof.EndProof) + require.NoError(err) + + // next key would be after the end of the range, so it returns Nothing instead + require.True(nextKey.IsNothing()) +} + +func Test_Sync_FindNextKey_Deleted(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + require.NoError(db.Put([]byte{0x10}, []byte{1})) + require.NoError(db.Put([]byte{0x11, 0x11}, []byte{2})) + + syncRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: NewMockClient(ctrl), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + + // 0x12 was "deleted" and there should be no extra node in the proof since there was nothing with a common prefix + noExtraNodeProof, err := db.GetProof(context.Background(), []byte{0x12}) + require.NoError(err) + + // 0x11 was "deleted" and 0x11.0x11 should be in the exclusion proof + extraNodeProof, err := db.GetProof(context.Background(), []byte{0x11}) + require.NoError(err) + + // there is now another value in the range that needs to be sync'ed + require.NoError(db.Put([]byte{0x13}, []byte{3})) + + nextKey, err := syncer.findNextKey(context.Background(), []byte{0x12}, maybe.Some([]byte{0x20}), noExtraNodeProof.Path) + require.NoError(err) + require.Equal(maybe.Some([]byte{0x13}), nextKey) + + nextKey, err = syncer.findNextKey(context.Background(), []byte{0x11}, maybe.Some([]byte{0x20}), extraNodeProof.Path) + require.NoError(err) + require.Equal(maybe.Some([]byte{0x13}), nextKey) +} + +func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + require.NoError(db.Put([]byte{0x11}, []byte{1})) + require.NoError(db.Put([]byte{0x11, 0x11}, []byte{2})) + + targetRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + proof, err := db.GetProof(context.Background(), []byte{0x11, 0x11}) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: NewMockClient(ctrl), + TargetRoot: targetRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NoError(db.Put([]byte{0x11, 0x15}, []byte{4})) + + nextKey, err := syncer.findNextKey(context.Background(), []byte{0x11, 0x11}, maybe.Some([]byte{0x20}), proof.Path) + require.NoError(err) + require.Equal(maybe.Some([]byte{0x11, 0x15}), nextKey) +} + +func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + require.NoError(db.Put([]byte{0x11}, []byte{1})) + require.NoError(db.Put([]byte{0x12}, []byte{2})) + require.NoError(db.Put([]byte{0x12, 0xA0}, []byte{4})) + + targetRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + proof, err := db.GetProof(context.Background(), []byte{0x12}) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: NewMockClient(ctrl), + TargetRoot: targetRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NoError(db.Delete([]byte{0x12, 0xA0})) + + nextKey, err := syncer.findNextKey(context.Background(), []byte{0x12}, maybe.Some([]byte{0x20}), proof.Path) + require.NoError(err) + require.Equal(maybe.Some([]byte{0x12, 0xA0}), nextKey) } func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { - for i := 0; i < 10; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 1000) - require.NoError(t, err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() - db, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, - ) - require.NoError(t, err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + dbToSync, err := generateTrie(t, r, 1000) + require.NoError(err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + + require.NoError(syncer.Start(context.Background())) + require.NoError(syncer.Wait(context.Background())) + + proof, err := dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 500) + require.NoError(err) + + // add an extra value to local db + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + midpoint := midPoint(maybe.Some(lastKey), maybe.Nothing[[]byte]()) + midPointVal := midpoint.Value() + + require.NoError(db.Put(midPointVal, []byte{1})) + + // next key at prefix of newly added point + nextKey, err := syncer.findNextKey(context.Background(), lastKey, maybe.Nothing[[]byte](), proof.EndProof) + require.NoError(err) + require.True(nextKey.HasValue()) + + require.True(isPrefix(midPointVal, nextKey.Value())) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(db.Delete(midPointVal)) - proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) - require.NoError(t, err) + require.NoError(dbToSync.Put(midPointVal, []byte{1})) - // add an extra value to local db - lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key - midpoint := midPoint(lastKey, nil) + proof, err = dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some(lastKey), 500) + require.NoError(err) - err = db.Put(midpoint, []byte{1}) - require.NoError(t, err) + // next key at prefix of newly added point + nextKey, err = syncer.findNextKey(context.Background(), lastKey, maybe.Nothing[[]byte](), proof.EndProof) + require.NoError(err) + require.True(nextKey.HasValue()) - // next key at prefix of newly added point - nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.NotNil(t, nextKey) + // deal with odd length key + require.True(isPrefix(midPointVal, nextKey.Value())) +} + +func TestFindNextKeyEmptyEndProof(t *testing.T) { + require := require.New(t) + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + ctrl := gomock.NewController(t) + defer ctrl.Finish() - require.True(t, isPrefix(midpoint, nextKey)) + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) - err = db.Delete(midpoint) - require.NoError(t, err) + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: NewMockClient(ctrl), + TargetRoot: ids.Empty, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) - err = dbToSync.Put(midpoint, []byte{1}) - require.NoError(t, err) + for i := 0; i < 100; i++ { + lastReceivedKeyLen := r.Intn(16) + lastReceivedKey := make([]byte, lastReceivedKeyLen) + _, _ = r.Read(lastReceivedKey) // #nosec G404 - proof, err = dbToSync.GetRangeProof(context.Background(), nil, lastKey, 500) - require.NoError(t, err) + rangeEndLen := r.Intn(16) + rangeEndBytes := make([]byte, rangeEndLen) + _, _ = r.Read(rangeEndBytes) // #nosec G404 - // next key at prefix of newly added point - nextKey, err = syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.NotNil(t, nextKey) + rangeEnd := maybe.Nothing[[]byte]() + if rangeEndLen > 0 { + rangeEnd = maybe.Some(rangeEndBytes) + } - // deal with odd length key - require.True(t, isPrefix(midpoint, nextKey)) + nextKey, err := syncer.findNextKey( + context.Background(), + lastReceivedKey, + rangeEnd, + nil, /* endProof */ + ) + require.NoError(err) + require.Equal(maybe.Some(append(lastReceivedKey, 0)), nextKey) } } @@ -337,246 +524,456 @@ func isPrefix(data []byte, prefix []byte) bool { } func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { - for i := 0; i < 10; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 500) - require.NoError(t, err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + dbToSync, err := generateTrie(t, r, 500) + require.NoError(err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) - db, err := merkledb.New( + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.Start(context.Background())) + require.NoError(syncer.Wait(context.Background())) + + proof, err := dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) + require.NoError(err) + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + + // local db has a different child than remote db + lastKey = append(lastKey, 16) + require.NoError(db.Put(lastKey, []byte{1})) + + require.NoError(dbToSync.Put(lastKey, []byte{2})) + + proof, err = dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some(proof.KeyValues[len(proof.KeyValues)-1].Key), 100) + require.NoError(err) + + nextKey, err := syncer.findNextKey(context.Background(), proof.KeyValues[len(proof.KeyValues)-1].Key, maybe.Nothing[[]byte](), proof.EndProof) + require.NoError(err) + require.True(nextKey.HasValue()) + require.Equal(lastKey, nextKey.Value()) +} + +// Test findNextKey by computing the expected result in a naive, inefficient +// way and comparing it to the actual result +func TestFindNextKeyRandom(t *testing.T) { + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + rand := rand.New(rand.NewSource(now)) // #nosec G404 + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create a "remote" database and "local" database + remoteDB, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + config := newDefaultDBConfig() + localDB, err := merkledb.New( + context.Background(), + memdb.New(), + config, + ) + require.NoError(err) + + var ( + numProofsToTest = 250 + numKeyValues = 250 + maxKeyLen = 256 + maxValLen = 256 + maxRangeStartLen = 8 + maxRangeEndLen = 8 + maxProofLen = 128 + ) + + // Put random keys into the databases + for _, db := range []database.Database{remoteDB, localDB} { + for i := 0; i < numKeyValues; i++ { + key := make([]byte, rand.Intn(maxKeyLen)) + _, _ = rand.Read(key) + val := make([]byte, rand.Intn(maxValLen)) + _, _ = rand.Read(val) + require.NoError(db.Put(key, val)) + } + } + + // Repeatedly generate end proofs from the remote database and compare + // the result of findNextKey to the expected result. + for proofIndex := 0; proofIndex < numProofsToTest; proofIndex++ { + // Generate a proof for a random key + var ( + rangeStart []byte + rangeEnd []byte + ) + // Generate a valid range start and end + for rangeStart == nil || bytes.Compare(rangeStart, rangeEnd) == 1 { + rangeStart = make([]byte, rand.Intn(maxRangeStartLen)+1) + _, _ = rand.Read(rangeStart) + rangeEnd = make([]byte, rand.Intn(maxRangeEndLen)+1) + _, _ = rand.Read(rangeEnd) + } + + startKey := maybe.Nothing[[]byte]() + if len(rangeStart) > 0 { + startKey = maybe.Some(rangeStart) + } + endKey := maybe.Nothing[[]byte]() + if len(rangeEnd) > 0 { + endKey = maybe.Some(rangeEnd) + } + + remoteProof, err := remoteDB.GetRangeProof( context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, + startKey, + endKey, + rand.Intn(maxProofLen)+1, ) - require.NoError(t, err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) - - proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 100) - require.NoError(t, err) - lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key - - // local db has a different child than remote db - lastKey = append(lastKey, 16) - err = db.Put(lastKey, []byte{1}) - require.NoError(t, err) - - err = dbToSync.Put(lastKey, []byte{2}) - require.NoError(t, err) - - proof, err = dbToSync.GetRangeProof(context.Background(), nil, proof.KeyValues[len(proof.KeyValues)-1].Key, 100) - require.NoError(t, err) - - nextKey, err := syncer.findNextKey(context.Background(), proof.KeyValues[len(proof.KeyValues)-1].Key, nil, proof.EndProof) - require.NoError(t, err) - require.Equal(t, nextKey, lastKey) - } -} + require.NoError(err) -func Test_Sync_Result_Correct_Root(t *testing.T) { - for i := 0; i < 3; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 5000) - require.NoError(t, err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + if len(remoteProof.KeyValues) == 0 { + continue + } + lastReceivedKey := remoteProof.KeyValues[len(remoteProof.KeyValues)-1].Key + + // Commit the proof to the local database as we do + // in the actual syncer. + require.NoError(localDB.CommitRangeProof( + context.Background(), + startKey, + endKey, + remoteProof, + )) - db, err := merkledb.New( + localProof, err := localDB.GetProof( context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, + lastReceivedKey, ) - require.NoError(t, err) - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - - err = syncer.Wait(context.Background()) - require.NoError(t, err) - require.NoError(t, syncer.Error()) - - // new db has fully sync'ed and should be at the same root as the original db - newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) - - // make sure they stay in sync - for x := 0; x < 50; x++ { - addkey := make([]byte, r.Intn(50)) - _, err = r.Read(addkey) - require.NoError(t, err) - val := make([]byte, r.Intn(50)) - _, err = r.Read(val) - require.NoError(t, err) - - err = db.Put(addkey, val) - require.NoError(t, err) - - err = dbToSync.Put(addkey, val) - require.NoError(t, err) - - addNilkey := make([]byte, r.Intn(50)) - _, err = r.Read(addNilkey) - require.NoError(t, err) - err = db.Put(addNilkey, nil) - require.NoError(t, err) - - err = dbToSync.Put(addNilkey, nil) - require.NoError(t, err) - - deleteKeyStart := make([]byte, r.Intn(50)) - _, err = r.Read(deleteKeyStart) - require.NoError(t, err) - - it := dbToSync.NewIteratorWithStart(deleteKeyStart) - if it.Next() { - err = dbToSync.Delete(it.Key()) - require.NoError(t, err) - err = db.Delete(it.Key()) - require.NoError(t, err) + require.NoError(err) + + type keyAndID struct { + key merkledb.Key + id ids.ID + } + + // Set of key prefix/ID pairs proven by the remote database's end proof. + remoteKeyIDs := []keyAndID{} + for _, node := range remoteProof.EndProof { + for childIdx, childID := range node.Children { + remoteKeyIDs = append(remoteKeyIDs, keyAndID{ + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), + id: childID, + }) } - require.NoError(t, it.Error()) - it.Release() + } - syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + // Set of key prefix/ID pairs proven by the local database's proof. + localKeyIDs := []keyAndID{} + for _, node := range localProof.Path { + for childIdx, childID := range node.Children { + localKeyIDs = append(localKeyIDs, keyAndID{ + key: node.Key.Extend(merkledb.ToToken(childIdx, merkledb.BranchFactorToTokenSize[config.BranchFactor])), + id: childID, + }) + } + } - newRoot, err = db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) + // Sort in ascending order by key prefix. + serializedPathCompare := func(i, j keyAndID) int { + return i.key.Compare(j.key) + } + slices.SortFunc(remoteKeyIDs, serializedPathCompare) + slices.SortFunc(localKeyIDs, serializedPathCompare) + + // Filter out keys that are before the last received key + findBounds := func(keyIDs []keyAndID) (int, int) { + var ( + firstIdxInRange = len(keyIDs) + firstIdxInRangeFound = false + firstIdxOutOfRange = len(keyIDs) + ) + for i, keyID := range keyIDs { + if !firstIdxInRangeFound && bytes.Compare(keyID.key.Bytes(), lastReceivedKey) > 0 { + firstIdxInRange = i + firstIdxInRangeFound = true + continue + } + if bytes.Compare(keyID.key.Bytes(), rangeEnd) > 0 { + firstIdxOutOfRange = i + break + } + } + return firstIdxInRange, firstIdxOutOfRange } - } -} -func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { - for i := 0; i < 5; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 5000) - require.NoError(t, err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + remoteFirstIdxAfterLastReceived, remoteFirstIdxAfterEnd := findBounds(remoteKeyIDs) + remoteKeyIDs = remoteKeyIDs[remoteFirstIdxAfterLastReceived:remoteFirstIdxAfterEnd] - db, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, + localFirstIdxAfterLastReceived, localFirstIdxAfterEnd := findBounds(localKeyIDs) + localKeyIDs = localKeyIDs[localFirstIdxAfterLastReceived:localFirstIdxAfterEnd] + + // Find smallest difference between the set of key/ID pairs proven by + // the remote/local proofs for key/ID pairs after the last received key. + var ( + smallestDiffKey merkledb.Key + foundDiff bool ) - require.NoError(t, err) + for i := 0; i < len(remoteKeyIDs) && i < len(localKeyIDs); i++ { + // See if the keys are different. + smaller, bigger := remoteKeyIDs[i], localKeyIDs[i] + if serializedPathCompare(localKeyIDs[i], remoteKeyIDs[i]) == -1 { + smaller, bigger = localKeyIDs[i], remoteKeyIDs[i] + } - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) - require.NoError(t, err) - require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - - time.Sleep(15 * time.Millisecond) - syncer.Close() - - newSyncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: &mockClient{db: dbToSync}, - TargetRoot: syncRoot, + if smaller.key != bigger.key || smaller.id != bigger.id { + smallestDiffKey = smaller.key + foundDiff = true + break + } + } + if !foundDiff { + // All the keys were equal. The smallest diff is the next key + // in the longer of the lists (if they're not same length.) + if len(remoteKeyIDs) < len(localKeyIDs) { + smallestDiffKey = localKeyIDs[len(remoteKeyIDs)].key + } else if len(remoteKeyIDs) > len(localKeyIDs) { + smallestDiffKey = remoteKeyIDs[len(localKeyIDs)].key + } + } + + // Get the actual value from the syncer + syncer, err := NewManager(ManagerConfig{ + DB: localDB, + Client: NewMockClient(ctrl), + TargetRoot: ids.GenerateTestID(), SimultaneousWorkLimit: 5, Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, }) - require.NoError(t, err) - require.NotNil(t, newSyncer) - err = newSyncer.StartSyncing(context.Background()) - require.NoError(t, err) - require.NoError(t, newSyncer.Error()) - err = newSyncer.Wait(context.Background()) - require.NoError(t, err) - newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) + require.NoError(err) + + gotFirstDiff, err := syncer.findNextKey( + context.Background(), + lastReceivedKey, + endKey, + remoteProof.EndProof, + ) + require.NoError(err) + + if bytes.Compare(smallestDiffKey.Bytes(), rangeEnd) >= 0 { + // The smallest key which differs is after the range end so the + // next key to get should be nil because we're done fetching the range. + require.True(gotFirstDiff.IsNothing()) + } else { + require.Equal(smallestDiffKey.Bytes(), gotFirstDiff.Value()) + } } } -func Test_Sync_Error_During_Sync(t *testing.T) { +func Test_Sync_Result_Correct_Root(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() - r := rand.New(rand.NewSource(int64(0))) // #nosec G404 - dbToSync, err := generateTrie(t, r, 100) + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + dbToSync, err := generateTrie(t, r, 1000) + require.NoError(err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.Start(context.Background())) + + require.NoError(syncer.Wait(context.Background())) + require.NoError(syncer.Error()) + + // new db has fully sync'ed and should be at the same root as the original db + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(syncRoot, newRoot) + + // make sure they stay in sync + addkey := make([]byte, r.Intn(50)) + _, err = r.Read(addkey) require.NoError(err) + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) + require.NoError(err) + + require.NoError(db.Put(addkey, val)) + + require.NoError(dbToSync.Put(addkey, val)) + syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + newRoot, err = db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(syncRoot, newRoot) +} + +func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + dbToSync, err := generateTrie(t, r, 3*maxKeyValuesLimit) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) require.NoError(err) db, err := merkledb.New( context.Background(), memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, + newDefaultDBConfig(), + ) + require.NoError(err) + + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.Start(context.Background())) + + // Wait until we've processed some work + // before updating the sync target. + require.Eventually( + func() bool { + syncer.workLock.Lock() + defer syncer.workLock.Unlock() + + return syncer.processedWork.Len() > 0 }, + 5*time.Second, + 5*time.Millisecond, + ) + syncer.Close() + + newSyncer, err := NewManager(ManagerConfig{ + DB: db, + Client: newCallthroughSyncClient(ctrl, dbToSync), + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(newSyncer) + + require.NoError(newSyncer.Start(context.Background())) + require.NoError(newSyncer.Error()) + require.NoError(newSyncer.Wait(context.Background())) + + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(syncRoot, newRoot) +} + +func Test_Sync_Error_During_Sync(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 + + dbToSync, err := generateTrie(t, r, 100) + require.NoError(err) + + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), ) require.NoError(err) client := NewMockClient(ctrl) client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { + func(context.Context, *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { return nil, errInvalidRangeProof }, ).AnyTimes() client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { - return dbToSync.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) + func(ctx context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { + startRoot, err := ids.ToID(request.StartRootHash) + require.NoError(err) + + endRoot, err := ids.ToID(request.EndRootHash) + require.NoError(err) + + changeProof, err := dbToSync.GetChangeProof(ctx, startRoot, endRoot, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) + if err != nil { + return nil, err + } + + return &merkledb.ChangeOrRangeProof{ + ChangeProof: changeProof, + }, nil }, ).AnyTimes() - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, + syncer, err := NewManager(ManagerConfig{ + DB: db, Client: client, TargetRoot: syncRoot, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, }) require.NoError(err) - require.NotNil(t, syncer) + require.NotNil(syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(err) + require.NoError(syncer.Start(context.Background())) err = syncer.Wait(context.Background()) require.ErrorIs(err, errInvalidRangeProof) @@ -585,131 +982,140 @@ func Test_Sync_Error_During_Sync(t *testing.T) { func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - for i := 0; i < 5; i++ { - r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + now := time.Now().UnixNano() + t.Logf("seed: %d", now) + r := rand.New(rand.NewSource(now)) // #nosec G404 - dbToSync, err := generateTrie(t, r, 10000) - require.NoError(err) + dbToSync, err := generateTrie(t, r, 3*maxKeyValuesLimit) + require.NoError(err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + firstSyncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + for x := 0; x < 100; x++ { + key := make([]byte, r.Intn(50)) + _, err = r.Read(key) require.NoError(err) - db, err := merkledb.New( - context.Background(), - memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 0, - NodeCacheSize: 1000, - }, - ) + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) require.NoError(err) - // Only let one response go through until we update the root. - updatedRootChan := make(chan struct{}, 1) - updatedRootChan <- struct{}{} - client := NewMockClient(ctrl) - client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { - <-updatedRootChan - return dbToSync.GetRangeProofAtRoot(ctx, request.Root, request.Start, request.End, int(request.Limit)) - }, - ).AnyTimes() - client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { - <-updatedRootChan - return dbToSync.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) - }, - ).AnyTimes() - - syncer, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: db, - Client: client, - TargetRoot: syncRoot, - SimultaneousWorkLimit: 5, - Log: logging.NoLog{}, - }) + require.NoError(dbToSync.Put(key, val)) + + deleteKeyStart := make([]byte, r.Intn(50)) + _, err = r.Read(deleteKeyStart) require.NoError(err) - require.NotNil(t, syncer) - for x := 0; x < 50; x++ { - key := make([]byte, r.Intn(50)) - _, err = r.Read(key) - require.NoError(err) - val := make([]byte, r.Intn(50)) - _, err = r.Read(val) + it := dbToSync.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + require.NoError(dbToSync.Delete(it.Key())) + } + require.NoError(it.Error()) + it.Release() + } + + secondSyncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + newDefaultDBConfig(), + ) + require.NoError(err) + + // Only let one response go through until we update the root. + updatedRootChan := make(chan struct{}, 1) + updatedRootChan <- struct{}{} + + client := NewMockClient(ctrl) + client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *pb.SyncGetRangeProofRequest) (*merkledb.RangeProof, error) { + <-updatedRootChan + root, err := ids.ToID(request.RootHash) require.NoError(err) + return dbToSync.GetRangeProofAtRoot(ctx, root, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) + }, + ).AnyTimes() + client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *pb.SyncGetChangeProofRequest, _ DB) (*merkledb.ChangeOrRangeProof, error) { + <-updatedRootChan - err = dbToSync.Put(key, val) + startRoot, err := ids.ToID(request.StartRootHash) require.NoError(err) - deleteKeyStart := make([]byte, r.Intn(50)) - _, err = r.Read(deleteKeyStart) + endRoot, err := ids.ToID(request.EndRootHash) require.NoError(err) - it := dbToSync.NewIteratorWithStart(deleteKeyStart) - if it.Next() { - err = dbToSync.Delete(it.Key()) - require.NoError(err) + changeProof, err := dbToSync.GetChangeProof(ctx, startRoot, endRoot, maybeBytesToMaybe(request.StartKey), maybeBytesToMaybe(request.EndKey), int(request.KeyLimit)) + if err != nil { + return nil, err } - require.NoError(it.Error()) - it.Release() - } - syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) - require.NoError(err) + return &merkledb.ChangeOrRangeProof{ + ChangeProof: changeProof, + }, nil + }, + ).AnyTimes() - err = syncer.StartSyncing(context.Background()) - require.NoError(err) + syncer, err := NewManager(ManagerConfig{ + DB: db, + Client: client, + TargetRoot: firstSyncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, + }) + require.NoError(err) + require.NotNil(syncer) - // Wait until we've processed some work - // before updating the sync target. - require.Eventually( - func() bool { - syncer.workLock.Lock() - defer syncer.workLock.Unlock() - - return syncer.processedWork.Len() > 0 - }, - 3*time.Second, - 10*time.Millisecond, - ) - err = syncer.UpdateSyncTarget(syncRoot) - require.NoError(err) - close(updatedRootChan) + require.NoError(syncer.Start(context.Background())) - err = syncer.Wait(context.Background()) - require.NoError(err) - require.NoError(syncer.Error()) + // Wait until we've processed some work + // before updating the sync target. + require.Eventually( + func() bool { + syncer.workLock.Lock() + defer syncer.workLock.Unlock() - newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(err) - require.Equal(syncRoot, newRoot) - } + return syncer.processedWork.Len() > 0 + }, + 5*time.Second, + 10*time.Millisecond, + ) + require.NoError(syncer.UpdateSyncTarget(secondSyncRoot)) + close(updatedRootChan) + + require.NoError(syncer.Wait(context.Background())) + require.NoError(syncer.Error()) + + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(secondSyncRoot, newRoot) } func Test_Sync_UpdateSyncTarget(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - defer ctrl.Finish() - m, err := NewStateSyncManager(StateSyncConfig{ - SyncDB: &merkledb.Database{}, // Not used - Client: NewMockClient(ctrl), // Not used + m, err := NewManager(ManagerConfig{ + DB: merkledb.NewMockMerkleDB(ctrl), // Not used + Client: NewMockClient(ctrl), // Not used TargetRoot: ids.Empty, SimultaneousWorkLimit: 5, Log: logging.NoLog{}, + BranchFactor: merkledb.BranchFactor16, }) require.NoError(err) // Populate [m.processWork] to ensure that UpdateSyncTarget // moves the work to [m.unprocessedWork]. - item := &syncWorkItem{ - start: []byte{1}, - end: []byte{2}, - LocalRootID: ids.GenerateTestID(), + item := &workItem{ + start: maybe.Some([]byte{1}), + end: maybe.Some([]byte{2}), + localRootID: ids.GenerateTestID(), } m.processedWork.Insert(item) @@ -728,29 +1134,26 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { <-startedWaiting newSyncRoot := ids.GenerateTestID() - err = m.UpdateSyncTarget(newSyncRoot) - require.NoError(err) + require.NoError(m.UpdateSyncTarget(newSyncRoot)) <-gotSignalChan require.Equal(newSyncRoot, m.config.TargetRoot) - require.Equal(0, m.processedWork.Len()) + require.Zero(m.processedWork.Len()) require.Equal(1, m.unprocessedWork.Len()) } -func generateTrie(t *testing.T, r *rand.Rand, count int) (*merkledb.Database, error) { +func generateTrie(t *testing.T, r *rand.Rand, count int) (merkledb.MerkleDB, error) { db, _, err := generateTrieWithMinKeyLen(t, r, count, 0) return db, err } -func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (*merkledb.Database, [][]byte, error) { +func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (merkledb.MerkleDB, [][]byte, error) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), - merkledb.Config{ - Tracer: newNoopTracer(), - HistoryLength: 1000, - NodeCacheSize: 1000, - }, + newDefaultDBConfig(), ) if err != nil { return nil, nil, err @@ -767,14 +1170,14 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen key := make([]byte, r.Intn(50)+len(prefix)) copy(key, prefix) _, err := r.Read(key[len(prefix):]) - require.NoError(t, err) + require.NoError(err) return key } // new key key := make([]byte, r.Intn(50)+minKeyLen) _, err = r.Read(key) - require.NoError(t, err) + require.NoError(err) return key } @@ -784,7 +1187,7 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen value = nil } else { _, err = r.Read(value) - require.NoError(t, err) + require.NoError(err) } key := genKey() if _, seen := seenKeys[string(key)]; seen { @@ -797,8 +1200,6 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen } i++ } - slices.SortFunc(allKeys, func(a, b []byte) bool { - return bytes.Compare(a, b) < 0 - }) + slices.SortFunc(allKeys, bytes.Compare) return db, allKeys, batch.Write() } diff --git a/avalanchego/x/sync/syncmanager.go b/avalanchego/x/sync/syncmanager.go deleted file mode 100644 index deb6870a..00000000 --- a/avalanchego/x/sync/syncmanager.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/x/merkledb" -) - -const ( - defaultLeafRequestLimit = 1024 - maxTokenWaitTime = 5 * time.Second -) - -var ( - token = struct{}{} - ErrAlreadyStarted = errors.New("cannot start a StateSyncManager that has already been started") - ErrAlreadyClosed = errors.New("StateSyncManager is closed") - ErrNotEnoughBytes = errors.New("less bytes read than the specified length") - ErrNoClientProvided = errors.New("client is a required field of the sync config") - ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") - ErrNoLogProvided = errors.New("log is a required field of the sync config") - ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") - ErrFinishedWithUnexpectedRoot = errors.New("finished syncing with an unexpected root") -) - -type priority byte - -// Note that [highPriority] > [medPriority] > [lowPriority]. -const ( - lowPriority priority = iota + 1 - medPriority - highPriority -) - -// Signifies that we should sync the range [start, end]. -// nil [start] means there is no lower bound. -// nil [end] means there is no upper bound. -// [LocalRootID] is the ID of the root of this range in our database. -// If we have no local root for this range, [LocalRootID] is ids.Empty. -type syncWorkItem struct { - start []byte - end []byte - priority priority - LocalRootID ids.ID -} - -// TODO danlaine look into using a sync.Pool for syncWorkItems -func newWorkItem(localRootID ids.ID, start, end []byte, priority priority) *syncWorkItem { - return &syncWorkItem{ - LocalRootID: localRootID, - start: start, - end: end, - priority: priority, - } -} - -type StateSyncManager struct { - // Must be held when accessing [config.TargetRoot]. - syncTargetLock sync.RWMutex - config StateSyncConfig - - workLock sync.Mutex - // The number of work items currently being processed. - // Namely, the number of goroutines executing [doWork]. - // [workLock] must be held when accessing [processingWorkItems]. - processingWorkItems int - // [workLock] must be held while accessing [unprocessedWork]. - unprocessedWork *syncWorkHeap - // Signalled when: - // - An item is added to [unprocessedWork]. - // - There are no more items in [unprocessedWork] and [processingWorkItems] is 0. - // - Close() is called. - // [workLock] is its inner lock. - unprocessedWorkCond sync.Cond - // [workLock] must be held while accessing [processedWork]. - processedWork *syncWorkHeap - - // When this is closed: - // - [closed] is true. - // - [cancelCtx] was called. - // - [workToBeDone] and [completedWork] are closed. - syncDoneChan chan struct{} - - // Rate-limits the number of concurrently processing work items. - workTokens chan struct{} - - errLock sync.Mutex - // If non-nil, there was a fatal error. - // [errLock] must be held when accessing [fatalError]. - fatalError error - - // Cancels all currently processing work items. - cancelCtx context.CancelFunc - - // Set to true when StartSyncing is called. - syncing bool - closeOnce sync.Once -} - -type StateSyncConfig struct { - SyncDB *merkledb.Database - Client Client - SimultaneousWorkLimit int - Log logging.Logger - TargetRoot ids.ID -} - -func NewStateSyncManager(config StateSyncConfig) (*StateSyncManager, error) { - switch { - case config.Client == nil: - return nil, ErrNoClientProvided - case config.SyncDB == nil: - return nil, ErrNoDatabaseProvided - case config.Log == nil: - return nil, ErrNoLogProvided - case config.SimultaneousWorkLimit == 0: - return nil, ErrZeroWorkLimit - } - - m := &StateSyncManager{ - config: config, - syncDoneChan: make(chan struct{}), - unprocessedWork: newSyncWorkHeap(2 * config.SimultaneousWorkLimit), - processedWork: newSyncWorkHeap(2 * config.SimultaneousWorkLimit), - workTokens: make(chan struct{}, config.SimultaneousWorkLimit), - } - m.unprocessedWorkCond.L = &m.workLock - - // fill the work tokens channel with work tokens - for i := 0; i < config.SimultaneousWorkLimit; i++ { - m.workTokens <- token - } - return m, nil -} - -func (m *StateSyncManager) StartSyncing(ctx context.Context) error { - m.workLock.Lock() - defer m.workLock.Unlock() - - if m.syncing { - return ErrAlreadyStarted - } - - // Add work item to fetch the entire key range. - // Note that this will be the first work item to be processed. - m.unprocessedWork.Insert(newWorkItem(ids.Empty, nil, nil, lowPriority)) - - m.syncing = true - ctx, m.cancelCtx = context.WithCancel(ctx) - - go m.sync(ctx) - return nil -} - -// Repeatedly awaits signal on [m.unprocessedWorkCond] that there -// is work to do or we're done, and dispatches a goroutine to do -// the work. -func (m *StateSyncManager) sync(ctx context.Context) { - defer func() { - // Note we release [m.workLock] before calling Close() - // because Close() will try to acquire [m.workLock]. - // Invariant: [m.workLock] is held when we return from this goroutine. - m.workLock.Unlock() - m.Close() - }() - - // Keep doing work until we're closed, done or [ctx] is canceled. - m.workLock.Lock() - for { - // Invariant: [m.workLock] is held here. - if ctx.Err() != nil { // [m] is closed. - return // [m.workLock] released by defer. - } - if m.unprocessedWork.Len() == 0 { - if m.processingWorkItems == 0 { - // There's no work to do, and there are no work items being processed - // which could cause work to be added, so we're done. - return // [m.workLock] released by defer. - } - // There's no work to do. - // Note that if [m].Close() is called, or [ctx] is canceled, - // Close() will be called, which will broadcast on [m.unprocessedWorkCond], - // which will cause Wait() to return, and this goroutine to exit. - m.unprocessedWorkCond.Wait() - continue - } - m.processingWorkItems++ - workItem := m.unprocessedWork.GetWork() - // TODO danlaine: We won't release [m.workLock] until - // we've started a goroutine for each available work item. - // We can't apply proofs we receive until we release [m.workLock]. - // Is this OK? Is it possible we end up with too many goroutines? - go m.doWork(ctx, workItem) - } -} - -// Called when there is a fatal error or sync is complete. -func (m *StateSyncManager) Close() { - m.closeOnce.Do(func() { - m.workLock.Lock() - defer m.workLock.Unlock() - - // Don't process any more work items. - // Drop currently processing work items. - if m.cancelCtx != nil { - m.cancelCtx() - } - - // ensure any goroutines waiting for work from the heaps gets released - m.unprocessedWork.Close() - m.unprocessedWorkCond.Signal() - m.processedWork.Close() - - // signal all code waiting on the sync to complete - close(m.syncDoneChan) - }) -} - -// Processes [item] by fetching and applying a change or range proof. -// Assumes [m.workLock] is not held. -func (m *StateSyncManager) doWork(ctx context.Context, item *syncWorkItem) { - // Wait until we get a work token or we close. - select { - case <-m.workTokens: - case <-ctx.Done(): - // [m] is closed and sync() is returning so don't care about cleanup. - return - } - - defer func() { - m.workTokens <- token - m.workLock.Lock() - m.processingWorkItems-- - if m.processingWorkItems == 0 && m.unprocessedWork.Len() == 0 { - // There are no processing or unprocessed work items so we're done. - m.unprocessedWorkCond.Signal() - } - m.workLock.Unlock() - }() - - if item.LocalRootID == ids.Empty { - // the keys in this range have not been downloaded, so get all key/values - m.getAndApplyRangeProof(ctx, item) - } else { - // the keys in this range have already been downloaded, but the root changed, so get all changes - m.getAndApplyChangeProof(ctx, item) - } -} - -// Fetch and apply the change proof given by [workItem]. -// Assumes [m.workLock] is not held. -func (m *StateSyncManager) getAndApplyChangeProof(ctx context.Context, workItem *syncWorkItem) { - rootID := m.getTargetRoot() - - if workItem.LocalRootID == rootID { - // Start root is the same as the end root, so we're done. - m.completeWorkItem(ctx, workItem, workItem.end, rootID, nil) - return - } - - changeproof, err := m.config.Client.GetChangeProof(ctx, - &ChangeProofRequest{ - StartingRoot: workItem.LocalRootID, - EndingRoot: rootID, - Start: workItem.start, - End: workItem.end, - Limit: defaultLeafRequestLimit, - }, - m.config.SyncDB, - ) - if err != nil { - m.setError(err) - return - } - - select { - case <-m.syncDoneChan: - // If we're closed, don't apply the proof. - return - default: - } - - // The start or end root IDs are not present in other nodes' history. - // Add this range as a fresh uncompleted work item to the work heap. - // TODO danlaine send range proof instead of failure notification - if !changeproof.HadRootsInHistory { - workItem.LocalRootID = ids.Empty - m.enqueueWork(workItem) - return - } - - largestHandledKey := workItem.end - // if the proof wasn't empty, apply changes to the sync DB - if len(changeproof.KeyValues)+len(changeproof.DeletedKeys) > 0 { - if err := m.config.SyncDB.CommitChangeProof(ctx, changeproof); err != nil { - m.setError(err) - return - } - - if len(changeproof.KeyValues) > 0 { - largestHandledKey = changeproof.KeyValues[len(changeproof.KeyValues)-1].Key - } - if len(changeproof.DeletedKeys) > 0 { - lastDeletedKey := changeproof.DeletedKeys[len(changeproof.DeletedKeys)-1] - if bytes.Compare(lastDeletedKey, largestHandledKey) == 1 { - largestHandledKey = lastDeletedKey - } - } - } - - m.completeWorkItem(ctx, workItem, largestHandledKey, rootID, changeproof.EndProof) -} - -// Fetch and apply the range proof given by [workItem]. -// Assumes [m.workLock] is not held. -func (m *StateSyncManager) getAndApplyRangeProof(ctx context.Context, workItem *syncWorkItem) { - rootID := m.getTargetRoot() - proof, err := m.config.Client.GetRangeProof(ctx, - &RangeProofRequest{ - Root: rootID, - Start: workItem.start, - End: workItem.end, - Limit: defaultLeafRequestLimit, - }, - ) - if err != nil { - m.setError(err) - return - } - - select { - case <-m.syncDoneChan: - // If we're closed, don't apply the proof. - return - default: - } - - largestHandledKey := workItem.end - if len(proof.KeyValues) > 0 { - // Add all the key-value pairs we got to the database. - if err := m.config.SyncDB.CommitRangeProof(ctx, workItem.start, proof); err != nil { - m.setError(err) - return - } - - largestHandledKey = proof.KeyValues[len(proof.KeyValues)-1].Key - } - - m.completeWorkItem(ctx, workItem, largestHandledKey, rootID, proof.EndProof) -} - -// Attempt to find what key to query next based on the differences between -// the local trie path to a node and the path recently received. -func (m *StateSyncManager) findNextKey( - ctx context.Context, - start []byte, - end []byte, - receivedProofNodes []merkledb.ProofNode, -) ([]byte, error) { - proofOfStart, err := m.config.SyncDB.GetProof(ctx, start) - if err != nil { - return nil, err - } - localProofNodes := proofOfStart.Path - - var result []byte - localIndex := len(localProofNodes) - 1 - receivedIndex := len(receivedProofNodes) - 1 - startKeyPath := merkledb.SerializedPath{Value: start, NibbleLength: 2 * len(start)} - - // Just return the start key when the proof nodes contain keys that are not prefixes of the start key - // this occurs mostly in change proofs where the largest returned key was a deleted key. - // Since the key was deleted, it no longer shows up in the proof nodes - // for now, just fallback to using the start key, which is always correct. - // TODO: determine a more accurate nextKey in this scenario - if !startKeyPath.HasPrefix(localProofNodes[localIndex].KeyPath) || !startKeyPath.HasPrefix(receivedProofNodes[receivedIndex].KeyPath) { - return start, nil - } - - // walk up the node paths until a difference is found - for receivedIndex >= 0 && result == nil { - localNode := localProofNodes[localIndex] - receivedNode := receivedProofNodes[receivedIndex] - // the two nodes have the same key - if localNode.KeyPath.Equal(receivedNode.KeyPath) { - startingChildIndex := byte(0) - if localNode.KeyPath.NibbleLength < startKeyPath.NibbleLength { - startingChildIndex = startKeyPath.NibbleVal(localNode.KeyPath.NibbleLength) + 1 - } - // the two nodes have the same path, so ensure that all children have matching ids - for childIndex := startingChildIndex; childIndex < 16; childIndex++ { - receivedChildID, receiveOk := receivedNode.Children[childIndex] - localChildID, localOk := localNode.Children[childIndex] - // if they both don't have a child or have matching children, continue - if (receiveOk || localOk) && receivedChildID != localChildID { - result = localNode.KeyPath.AppendNibble(childIndex).Value - break - } - } - if result != nil { - break - } - // only want to move both indexes when they have equal keys - localIndex-- - receivedIndex-- - continue - } - - var branchNode merkledb.ProofNode - - if receivedNode.KeyPath.NibbleLength > localNode.KeyPath.NibbleLength { - // the received proof has an extra node due to a branch that is not present locally - branchNode = receivedNode - receivedIndex-- - } else { - // the local proof has an extra node due to a branch that was not present in the received proof - branchNode = localNode - localIndex-- - } - - // the two nodes have different paths, so find where they branched - for nextKeyNibble := startKeyPath.NibbleVal(branchNode.KeyPath.NibbleLength) + 1; nextKeyNibble < 16; nextKeyNibble++ { - if _, ok := branchNode.Children[nextKeyNibble]; ok { - result = branchNode.KeyPath.AppendNibble(nextKeyNibble).Value - break - } - } - } - - if result == nil || (len(end) > 0 && bytes.Compare(result, end) >= 0) { - return nil, nil - } - - return result, nil -} - -func (m *StateSyncManager) Error() error { - m.errLock.Lock() - defer m.errLock.Unlock() - - return m.fatalError -} - -// Blocks until either: -// - sync is complete. -// - sync fatally errored. -// - [ctx] is canceled. -// If [ctx] is canceled, returns [ctx].Err(). -func (m *StateSyncManager) Wait(ctx context.Context) error { - select { - case <-m.syncDoneChan: - case <-ctx.Done(): - return ctx.Err() - } - - // There was a fatal error. - if err := m.Error(); err != nil { - return err - } - - root, err := m.config.SyncDB.GetMerkleRoot(ctx) - if err != nil { - m.config.Log.Info("completed with error", zap.Error(err)) - return err - } - if m.getTargetRoot() != root { - // This should never happen. - return fmt.Errorf("%w: expected %s, got %s", ErrFinishedWithUnexpectedRoot, m.getTargetRoot(), root) - } - m.config.Log.Info("completed", zap.String("new root", root.String())) - return nil -} - -func (m *StateSyncManager) UpdateSyncTarget(syncTargetRoot ids.ID) error { - m.workLock.Lock() - defer m.workLock.Unlock() - - select { - case <-m.syncDoneChan: - return ErrAlreadyClosed - default: - } - - m.syncTargetLock.Lock() - defer m.syncTargetLock.Unlock() - - if m.config.TargetRoot == syncTargetRoot { - // the target hasn't changed, so there is nothing to do - return nil - } - - m.config.TargetRoot = syncTargetRoot - - // move all completed ranges into the work heap with high priority - shouldSignal := m.processedWork.Len() > 0 - for m.processedWork.Len() > 0 { - // Note that [m.processedWork].Close() hasn't - // been called because we have [m.workLock] - // and we checked that [m.closed] is false. - currentItem := m.processedWork.GetWork() - currentItem.priority = highPriority - m.unprocessedWork.Insert(currentItem) - } - if shouldSignal { - // Only signal once because we only have 1 goroutine - // waiting on [m.unprocessedWorkCond]. - m.unprocessedWorkCond.Signal() - } - return nil -} - -func (m *StateSyncManager) getTargetRoot() ids.ID { - m.syncTargetLock.RLock() - defer m.syncTargetLock.RUnlock() - - return m.config.TargetRoot -} - -// Record that there was a fatal error and begin shutting down. -func (m *StateSyncManager) setError(err error) { - m.errLock.Lock() - defer m.errLock.Unlock() - - m.config.Log.Error("syncing failed", zap.Error(err)) - m.fatalError = err - // Call in goroutine because we might be holding [m.workLock] - // which [m.Close] will try to acquire. - go m.Close() -} - -// Mark the range [start, end] as synced up to [rootID]. -// Assumes [m.workLock] is not held. -func (m *StateSyncManager) completeWorkItem(ctx context.Context, workItem *syncWorkItem, largestHandledKey []byte, rootID ids.ID, proofOfLargestKey []merkledb.ProofNode) { - // if the last key is equal to the end, then the full range is completed - if !bytes.Equal(largestHandledKey, workItem.end) { - // find the next key to start querying by comparing the proofs for the last completed key - nextStartKey, err := m.findNextKey(ctx, largestHandledKey, workItem.end, proofOfLargestKey) - if err != nil { - m.setError(err) - return - } - - largestHandledKey = workItem.end - - // nextStartKey being nil indicates that the entire range has been completed - if nextStartKey != nil { - // the full range wasn't completed, so enqueue a new work item for the range [nextStartKey, workItem.end] - m.enqueueWork(newWorkItem(workItem.LocalRootID, nextStartKey, workItem.end, workItem.priority)) - largestHandledKey = nextStartKey - } - } - - // completed the range [workItem.start, lastKey], log and record in the completed work heap - m.config.Log.Info("completed range", - zap.Binary("start", workItem.start), - zap.Binary("end", largestHandledKey), - ) - if m.getTargetRoot() == rootID { - m.workLock.Lock() - defer m.workLock.Unlock() - - m.processedWork.MergeInsert(newWorkItem(rootID, workItem.start, largestHandledKey, workItem.priority)) - } else { - // the root has changed, so reinsert with high priority - m.enqueueWork(newWorkItem(rootID, workItem.start, largestHandledKey, highPriority)) - } -} - -// Queue the given key range to be fetched and applied. -// If there are sufficiently few unprocessed/processing work items, -// splits the range into two items and queues them both. -// Assumes [m.workLock] is not held. -func (m *StateSyncManager) enqueueWork(item *syncWorkItem) { - m.workLock.Lock() - defer func() { - m.workLock.Unlock() - m.unprocessedWorkCond.Signal() - }() - - if m.processingWorkItems+m.unprocessedWork.Len() > 2*m.config.SimultaneousWorkLimit { - // There are too many work items already, don't split the range - m.unprocessedWork.Insert(item) - return - } - - // Split the remaining range into to 2. - // Find the middle point. - mid := midPoint(item.start, item.end) - - // first item gets higher priority than the second to encourage finished ranges to grow - // rather than start a new range that is not contiguous with existing completed ranges - first := newWorkItem(item.LocalRootID, item.start, mid, medPriority) - second := newWorkItem(item.LocalRootID, mid, item.end, lowPriority) - - m.unprocessedWork.Insert(first) - m.unprocessedWork.Insert(second) -} - -// find the midpoint between two keys -// nil on start is treated as all 0's -// nil on end is treated as all 255's -func midPoint(start, end []byte) []byte { - length := len(start) - if len(end) > length { - length = len(end) - } - if length == 0 { - return []byte{127} - } - - // This check deals with cases where the end has a 255(or is nil which is treated as all 255s) and the start key ends 255. - // For example, midPoint([255], nil) should be [255, 127], not [255]. - // The result needs the extra byte added on to the end to deal with the fact that the naive midpoint between 255 and 255 would be 255 - if (len(start) > 0 && start[len(start)-1] == 255) && (len(end) == 0 || end[len(end)-1] == 255) { - length++ - } - - leftover := 0 - midpoint := make([]byte, length+1) - for i := 0; i < length; i++ { - startVal := 0 - if i < len(start) { - startVal = int(start[i]) - } - - endVal := 0 - if len(end) == 0 { - endVal = 255 - } - if i < len(end) { - endVal = int(end[i]) - } - - total := startVal + endVal + leftover - leftover = 0 - // if total is odd, when we divide, we will lose the .5, - // record that in the leftover for the next digits - if total%2 == 1 { - leftover = 256 - } - - // find the midpoint between the start and the end - total /= 2 - - // larger than byte can hold, so carry over to previous byte - if total >= 256 { - total -= 256 - index := i - 1 - for index >= 0 { - if midpoint[index] != 255 { - midpoint[index]++ - break - } - - midpoint[index] = 0 - index-- - } - } - midpoint[i] = byte(total) - } - if leftover > 0 { - midpoint[length] = 127 - } else { - midpoint = midpoint[0:length] - } - return midpoint -} diff --git a/avalanchego/x/sync/syncworkheap.go b/avalanchego/x/sync/syncworkheap.go deleted file mode 100644 index b015b023..00000000 --- a/avalanchego/x/sync/syncworkheap.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "bytes" - "container/heap" - - "github.com/google/btree" -) - -var _ heap.Interface = &syncWorkHeap{} - -type heapItem struct { - workItem *syncWorkItem - heapIndex int -} - -// A priority queue of syncWorkItems. -// Note that work item ranges never overlap. -// Supports range merging and priority updating. -// Not safe for concurrent use. -type syncWorkHeap struct { - // Max heap of items by priority. - // i.e. heap.Pop returns highest priority item. - priorityHeap []*heapItem - // The heap items sorted by range start. - // A nil start is considered to be the smallest. - sortedItems *btree.BTreeG[*heapItem] - closed bool -} - -func newSyncWorkHeap(maxSize int) *syncWorkHeap { - return &syncWorkHeap{ - priorityHeap: make([]*heapItem, 0, maxSize), - sortedItems: btree.NewG( - 2, - func(a, b *heapItem) bool { - if a.workItem == nil { - return b.workItem != nil - } - if b.workItem == nil { - return false - } - return bytes.Compare(a.workItem.start, b.workItem.start) < 0 - }, - ), - } -} - -// Marks the heap as closed. -func (wh *syncWorkHeap) Close() { - wh.closed = true -} - -// Adds a new [item] into the heap. Will not merge items, unlike MergeInsert. -func (wh *syncWorkHeap) Insert(item *syncWorkItem) { - if wh.closed { - return - } - - heap.Push(wh, &heapItem{workItem: item}) -} - -// Pops and returns a work item from the heap. -// Returns nil if no work is available or the heap is closed. -func (wh *syncWorkHeap) GetWork() *syncWorkItem { - if wh.closed || wh.Len() == 0 { - return nil - } - return heap.Pop(wh).(*heapItem).workItem -} - -// Insert the item into the heap, merging it with existing items -// that share a boundary and root ID. -// e.g. if the heap contains a work item with range -// [0,10] and then [10,20] is inserted, we will merge the two -// into a single work item with range [0,20]. -// e.g. if the heap contains work items [0,10] and [20,30], -// and we add [10,20], we will merge them into [0,30]. -func (wh *syncWorkHeap) MergeInsert(item *syncWorkItem) { - if wh.closed { - return - } - - var mergedRange *heapItem - - // Find the item with the greatest start range which is less than [item.start]. - // Note that the iterator function will run at most once, since it always returns false. - wh.sortedItems.DescendLessOrEqual( - &heapItem{ - workItem: &syncWorkItem{ - start: item.start, - }, - }, - func(beforeItem *heapItem) bool { - if item.LocalRootID == beforeItem.workItem.LocalRootID && bytes.Equal(beforeItem.workItem.end, item.start) { - // [beforeItem.start, beforeItem.end] and [item.start, item.end] are - // merged into [beforeItem.start, item.end] - beforeItem.workItem.end = item.end - mergedRange = beforeItem - } - return false - }) - - // Find the item with the smallest start range which is greater than [item.start]. - // Note that the iterator function will run at most once, since it always returns false. - wh.sortedItems.AscendGreaterOrEqual( - &heapItem{ - workItem: &syncWorkItem{ - start: item.start, - }, - }, - func(afterItem *heapItem) bool { - if item.LocalRootID == afterItem.workItem.LocalRootID && bytes.Equal(afterItem.workItem.start, item.end) { - if mergedRange != nil { - // [beforeItem.start, item.end] and [afterItem.start, afterItem.end] are merged - // into [beforeItem.start, afterItem.end]. - // Modify [mergedRange] and remove [afterItem] since [mergedRange] now contains the entire - // range that was covered by [afterItem]. - wh.remove(afterItem) - mergedRange.workItem.end = afterItem.workItem.end - if afterItem.workItem.priority > mergedRange.workItem.priority { - mergedRange.workItem.priority = afterItem.workItem.priority - heap.Fix(wh, mergedRange.heapIndex) - } - } else { - // [item.start, item.end] and [afterItem.start, afterItem.end] are merged into - // [item.start, afterItem.end]. - afterItem.workItem.start = item.start - mergedRange = afterItem - } - } - return false - }) - - if mergedRange != nil { - // We merged [item] with at least one existing item. - if item.priority > mergedRange.workItem.priority { - mergedRange.workItem.priority = item.priority - // Priority was updated; fix position in the heap. - heap.Fix(wh, mergedRange.heapIndex) - } - } else { - // We didn't merge [item] with an existing one; put it in the heap. - heap.Push(wh, &heapItem{workItem: item}) - } -} - -// Deletes [item] from the heap. -func (wh *syncWorkHeap) remove(item *heapItem) { - oldIndex := item.heapIndex - newLength := len(wh.priorityHeap) - 1 - - // swap with last item, delete item, then fix heap if required - wh.Swap(newLength, item.heapIndex) - wh.priorityHeap[newLength] = nil - wh.priorityHeap = wh.priorityHeap[:newLength] - - // the item was already the last item, so nothing needs to be fixed - if oldIndex != newLength { - heap.Fix(wh, oldIndex) - } - wh.sortedItems.Delete(item) -} - -// below this line are the implementations required for heap.Interface - -func (wh *syncWorkHeap) Len() int { - return len(wh.priorityHeap) -} - -func (wh *syncWorkHeap) Less(i int, j int) bool { - return wh.priorityHeap[i].workItem.priority > wh.priorityHeap[j].workItem.priority -} - -func (wh *syncWorkHeap) Swap(i int, j int) { - wh.priorityHeap[i], wh.priorityHeap[j] = wh.priorityHeap[j], wh.priorityHeap[i] - wh.priorityHeap[i].heapIndex = i - wh.priorityHeap[j].heapIndex = j -} - -func (wh *syncWorkHeap) Pop() interface{} { - newLength := len(wh.priorityHeap) - 1 - value := wh.priorityHeap[newLength] - wh.priorityHeap[newLength] = nil - wh.priorityHeap = wh.priorityHeap[:newLength] - wh.sortedItems.Delete(value) - return value -} - -func (wh *syncWorkHeap) Push(x interface{}) { - item := x.(*heapItem) - item.heapIndex = len(wh.priorityHeap) - wh.priorityHeap = append(wh.priorityHeap, item) - wh.sortedItems.ReplaceOrInsert(item) -} diff --git a/avalanchego/x/sync/syncworkheap_test.go b/avalanchego/x/sync/syncworkheap_test.go deleted file mode 100644 index 67582901..00000000 --- a/avalanchego/x/sync/syncworkheap_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package sync - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -// Tests heap.Interface methods Push, Pop, Swap, Len, Less. -func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { - require := require.New(t) - - h := newSyncWorkHeap(1) - require.Equal(0, h.Len()) - - item1 := &heapItem{ - workItem: &syncWorkItem{ - start: nil, - end: nil, - priority: highPriority, - LocalRootID: ids.GenerateTestID(), - }, - } - h.Push(item1) - require.Equal(1, h.Len()) - require.Len(h.priorityHeap, 1) - require.Equal(item1, h.priorityHeap[0]) - require.Equal(0, h.priorityHeap[0].heapIndex) - require.Equal(1, h.sortedItems.Len()) - gotItem, ok := h.sortedItems.Get(item1) - require.True(ok) - require.Equal(item1, gotItem) - - h.Pop() - require.Equal(0, h.Len()) - require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) - - item2 := &heapItem{ - workItem: &syncWorkItem{ - start: []byte{0}, - end: []byte{1}, - priority: highPriority, - LocalRootID: ids.GenerateTestID(), - }, - } - h.Push(item1) - h.Push(item2) - require.Equal(2, h.Len()) - require.Len(h.priorityHeap, 2) - require.Equal(item1, h.priorityHeap[0]) - require.Equal(item2, h.priorityHeap[1]) - require.Equal(0, item1.heapIndex) - require.Equal(1, item2.heapIndex) - require.Equal(2, h.sortedItems.Len()) - gotItem, ok = h.sortedItems.Get(item1) - require.True(ok) - require.Equal(item1, gotItem) - gotItem, ok = h.sortedItems.Get(item2) - require.True(ok) - require.Equal(item2, gotItem) - - require.False(h.Less(0, 1)) - - h.Swap(0, 1) - require.Equal(item2, h.priorityHeap[0]) - require.Equal(item1, h.priorityHeap[1]) - require.Equal(1, item1.heapIndex) - require.Equal(0, item2.heapIndex) - - require.False(h.Less(0, 1)) - - item1.workItem.priority = lowPriority - require.True(h.Less(0, 1)) - - gotItem = h.Pop().(*heapItem) - require.Equal(item1, gotItem) - - gotItem = h.Pop().(*heapItem) - require.Equal(item2, gotItem) - - require.Equal(0, h.Len()) - require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) -} - -// Tests Insert and GetWork -func Test_SyncWorkHeap_Insert_GetWork(t *testing.T) { - require := require.New(t) - h := newSyncWorkHeap(1) - - item1 := &syncWorkItem{ - start: []byte{0}, - end: []byte{1}, - priority: lowPriority, - LocalRootID: ids.GenerateTestID(), - } - item2 := &syncWorkItem{ - start: []byte{2}, - end: []byte{3}, - priority: medPriority, - LocalRootID: ids.GenerateTestID(), - } - item3 := &syncWorkItem{ - start: []byte{4}, - end: []byte{5}, - priority: highPriority, - LocalRootID: ids.GenerateTestID(), - } - h.Insert(item3) - h.Insert(item2) - h.Insert(item1) - require.Equal(3, h.Len()) - - // Ensure [sortedItems] is in right order. - got := []*syncWorkItem{} - h.sortedItems.Ascend( - func(i *heapItem) bool { - got = append(got, i.workItem) - return true - }, - ) - require.Equal([]*syncWorkItem{item1, item2, item3}, got) - - // Ensure priorities are in right order. - gotItem := h.GetWork() - require.Equal(item3, gotItem) - gotItem = h.GetWork() - require.Equal(item2, gotItem) - gotItem = h.GetWork() - require.Equal(item1, gotItem) - gotItem = h.GetWork() - require.Nil(gotItem) - - require.Equal(0, h.Len()) -} - -func Test_SyncWorkHeap_remove(t *testing.T) { - require := require.New(t) - - h := newSyncWorkHeap(1) - - item1 := &syncWorkItem{ - start: []byte{0}, - end: []byte{1}, - priority: lowPriority, - LocalRootID: ids.GenerateTestID(), - } - - h.Insert(item1) - - heapItem1 := h.priorityHeap[0] - h.remove(heapItem1) - - require.Equal(0, h.Len()) - require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) - - item2 := &syncWorkItem{ - start: []byte{2}, - end: []byte{3}, - priority: medPriority, - LocalRootID: ids.GenerateTestID(), - } - - h.Insert(item1) - h.Insert(item2) - - heapItem2 := h.priorityHeap[0] - require.Equal(item2, heapItem2.workItem) - h.remove(heapItem2) - require.Equal(1, h.Len()) - require.Len(h.priorityHeap, 1) - require.Equal(1, h.sortedItems.Len()) - require.Equal(0, h.priorityHeap[0].heapIndex) - require.Equal(item1, h.priorityHeap[0].workItem) - - heapItem1 = h.priorityHeap[0] - require.Equal(item1, heapItem1.workItem) - h.remove(heapItem1) - require.Equal(0, h.Len()) - require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) -} - -func Test_SyncWorkHeap_Merge_Insert(t *testing.T) { - // merge with range before - syncHeap := newSyncWorkHeap(1000) - - syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) - require.Equal(t, 1, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: []byte{192}}) - require.Equal(t, 2, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{193}, end: nil}) - require.Equal(t, 3, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{63}, end: []byte{126}, priority: lowPriority}) - require.Equal(t, 3, syncHeap.Len()) - - // merge with range after - syncHeap = newSyncWorkHeap(1000) - - syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) - require.Equal(t, 1, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: []byte{192}}) - require.Equal(t, 2, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{193}, end: nil}) - require.Equal(t, 3, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{64}, end: []byte{127}, priority: lowPriority}) - require.Equal(t, 3, syncHeap.Len()) - - // merge both sides at the same time - syncHeap = newSyncWorkHeap(1000) - - syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) - require.Equal(t, 1, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: nil}) - require.Equal(t, 2, syncHeap.Len()) - - syncHeap.MergeInsert(&syncWorkItem{start: []byte{63}, end: []byte{127}, priority: lowPriority}) - require.Equal(t, 1, syncHeap.Len()) -} diff --git a/avalanchego/x/sync/workheap.go b/avalanchego/x/sync/workheap.go new file mode 100644 index 00000000..0a7203f1 --- /dev/null +++ b/avalanchego/x/sync/workheap.go @@ -0,0 +1,159 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + + "github.com/google/btree" + + "github.com/ava-labs/avalanchego/utils/heap" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +// A priority queue of syncWorkItems. +// Note that work item ranges never overlap. +// Supports range merging and priority updating. +// Not safe for concurrent use. +type workHeap struct { + // Max heap of items by priority. + // i.e. heap.Pop returns highest priority item. + innerHeap heap.Set[*workItem] + // The heap items sorted by range start. + // A Nothing start is considered to be the smallest. + sortedItems *btree.BTreeG[*workItem] + closed bool +} + +func newWorkHeap() *workHeap { + return &workHeap{ + innerHeap: heap.NewSet[*workItem](func(a, b *workItem) bool { + return a.priority > b.priority + }), + sortedItems: btree.NewG( + 2, + func(a, b *workItem) bool { + aNothing := a.start.IsNothing() + bNothing := b.start.IsNothing() + if aNothing { + // [a] is Nothing, so if [b] is Nothing, they're equal. + // Otherwise, [b] is greater. + return !bNothing + } + if bNothing { + // [a] has a value and [b] doesn't so [a] is greater. + return false + } + // [a] and [b] both contain values. Compare the values. + return bytes.Compare(a.start.Value(), b.start.Value()) < 0 + }, + ), + } +} + +// Marks the heap as closed. +func (wh *workHeap) Close() { + wh.closed = true +} + +// Adds a new [item] into the heap. Will not merge items, unlike MergeInsert. +func (wh *workHeap) Insert(item *workItem) { + if wh.closed { + return + } + + wh.innerHeap.Push(item) + wh.sortedItems.ReplaceOrInsert(item) +} + +// Pops and returns a work item from the heap. +// Returns nil if no work is available or the heap is closed. +func (wh *workHeap) GetWork() *workItem { + if wh.closed || wh.Len() == 0 { + return nil + } + item, _ := wh.innerHeap.Pop() + wh.sortedItems.Delete(item) + return item +} + +// Insert the item into the heap, merging it with existing items +// that share a boundary and root ID. +// e.g. if the heap contains a work item with range +// [0,10] and then [10,20] is inserted, we will merge the two +// into a single work item with range [0,20]. +// e.g. if the heap contains work items [0,10] and [20,30], +// and we add [10,20], we will merge them into [0,30]. +func (wh *workHeap) MergeInsert(item *workItem) { + if wh.closed { + return + } + + var mergedBefore, mergedAfter *workItem + searchItem := &workItem{ + start: item.start, + } + + // Find the item with the greatest start range which is less than [item.start]. + // Note that the iterator function will run at most once, since it always returns false. + wh.sortedItems.DescendLessOrEqual( + searchItem, + func(beforeItem *workItem) bool { + if item.localRootID == beforeItem.localRootID && + maybe.Equal(item.start, beforeItem.end, bytes.Equal) { + // [beforeItem.start, beforeItem.end] and [item.start, item.end] are + // merged into [beforeItem.start, item.end] + beforeItem.end = item.end + beforeItem.priority = max(item.priority, beforeItem.priority) + wh.innerHeap.Fix(beforeItem) + mergedBefore = beforeItem + } + return false + }) + + // Find the item with the smallest start range which is greater than [item.start]. + // Note that the iterator function will run at most once, since it always returns false. + wh.sortedItems.AscendGreaterOrEqual( + searchItem, + func(afterItem *workItem) bool { + if item.localRootID == afterItem.localRootID && + maybe.Equal(item.end, afterItem.start, bytes.Equal) { + // [item.start, item.end] and [afterItem.start, afterItem.end] are merged into + // [item.start, afterItem.end]. + afterItem.start = item.start + afterItem.priority = max(item.priority, afterItem.priority) + wh.innerHeap.Fix(afterItem) + mergedAfter = afterItem + } + return false + }) + + // if the new item should be merged with both the item before and the item after, + // we can combine the before item with the after item + if mergedBefore != nil && mergedAfter != nil { + // combine the two ranges + mergedBefore.end = mergedAfter.end + // remove the second range since it is now covered by the first + wh.remove(mergedAfter) + // update the priority + mergedBefore.priority = max(mergedBefore.priority, mergedAfter.priority) + wh.innerHeap.Fix(mergedBefore) + } + + // nothing was merged, so add new item to the heap + if mergedBefore == nil && mergedAfter == nil { + // We didn't merge [item] with an existing one; put it in the heap. + wh.Insert(item) + } +} + +// Deletes [item] from the heap. +func (wh *workHeap) remove(item *workItem) { + wh.innerHeap.Remove(item) + wh.sortedItems.Delete(item) +} + +func (wh *workHeap) Len() int { + return wh.innerHeap.Len() +} diff --git a/avalanchego/x/sync/workheap_test.go b/avalanchego/x/sync/workheap_test.go new file mode 100644 index 00000000..41577f09 --- /dev/null +++ b/avalanchego/x/sync/workheap_test.go @@ -0,0 +1,310 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "math/rand" + "slices" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +// Tests Insert and GetWork +func Test_WorkHeap_Insert_GetWork(t *testing.T) { + require := require.New(t) + h := newWorkHeap() + + lowPriorityItem := &workItem{ + start: maybe.Some([]byte{4}), + end: maybe.Some([]byte{5}), + priority: lowPriority, + localRootID: ids.GenerateTestID(), + } + mediumPriorityItem := &workItem{ + start: maybe.Some([]byte{0}), + end: maybe.Some([]byte{1}), + priority: medPriority, + localRootID: ids.GenerateTestID(), + } + highPriorityItem := &workItem{ + start: maybe.Some([]byte{2}), + end: maybe.Some([]byte{3}), + priority: highPriority, + localRootID: ids.GenerateTestID(), + } + h.Insert(highPriorityItem) + h.Insert(mediumPriorityItem) + h.Insert(lowPriorityItem) + require.Equal(3, h.Len()) + + // Ensure [sortedItems] is in right order. + got := []*workItem{} + h.sortedItems.Ascend( + func(i *workItem) bool { + got = append(got, i) + return true + }, + ) + require.Equal( + []*workItem{mediumPriorityItem, highPriorityItem, lowPriorityItem}, + got, + ) + + // Ensure priorities are in right order. + gotItem := h.GetWork() + require.Equal(highPriorityItem, gotItem) + gotItem = h.GetWork() + require.Equal(mediumPriorityItem, gotItem) + gotItem = h.GetWork() + require.Equal(lowPriorityItem, gotItem) + gotItem = h.GetWork() + require.Nil(gotItem) + + require.Zero(h.Len()) +} + +func Test_WorkHeap_remove(t *testing.T) { + require := require.New(t) + + h := newWorkHeap() + + lowPriorityItem := &workItem{ + start: maybe.Some([]byte{0}), + end: maybe.Some([]byte{1}), + priority: lowPriority, + localRootID: ids.GenerateTestID(), + } + + mediumPriorityItem := &workItem{ + start: maybe.Some([]byte{2}), + end: maybe.Some([]byte{3}), + priority: medPriority, + localRootID: ids.GenerateTestID(), + } + + highPriorityItem := &workItem{ + start: maybe.Some([]byte{4}), + end: maybe.Some([]byte{5}), + priority: highPriority, + localRootID: ids.GenerateTestID(), + } + + h.Insert(lowPriorityItem) + + wrappedLowPriorityItem, ok := h.innerHeap.Peek() + require.True(ok) + h.remove(wrappedLowPriorityItem) + + require.Zero(h.Len()) + require.Zero(h.sortedItems.Len()) + + h.Insert(lowPriorityItem) + h.Insert(mediumPriorityItem) + h.Insert(highPriorityItem) + + wrappedhighPriorityItem, ok := h.innerHeap.Peek() + require.True(ok) + require.Equal(highPriorityItem, wrappedhighPriorityItem) + h.remove(wrappedhighPriorityItem) + require.Equal(2, h.Len()) + require.Equal(2, h.sortedItems.Len()) + got, ok := h.innerHeap.Peek() + require.True(ok) + require.Equal(mediumPriorityItem, got) + + wrappedMediumPriorityItem, ok := h.innerHeap.Peek() + require.True(ok) + require.Equal(mediumPriorityItem, wrappedMediumPriorityItem) + h.remove(wrappedMediumPriorityItem) + require.Equal(1, h.Len()) + require.Equal(1, h.sortedItems.Len()) + got, ok = h.innerHeap.Peek() + require.True(ok) + require.Equal(lowPriorityItem, got) + + wrappedLowPriorityItem, ok = h.innerHeap.Peek() + require.True(ok) + require.Equal(lowPriorityItem, wrappedLowPriorityItem) + h.remove(wrappedLowPriorityItem) + require.Zero(h.Len()) + require.Zero(h.sortedItems.Len()) +} + +func Test_WorkHeap_Merge_Insert(t *testing.T) { + // merge with range before + syncHeap := newWorkHeap() + + syncHeap.MergeInsert(&workItem{start: maybe.Nothing[[]byte](), end: maybe.Some([]byte{63})}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{127}), end: maybe.Some([]byte{192})}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{193}), end: maybe.Nothing[[]byte]()}) + require.Equal(t, 3, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{63}), end: maybe.Some([]byte{126}), priority: lowPriority}) + require.Equal(t, 3, syncHeap.Len()) + + // merge with range after + syncHeap = newWorkHeap() + + syncHeap.MergeInsert(&workItem{start: maybe.Nothing[[]byte](), end: maybe.Some([]byte{63})}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{127}), end: maybe.Some([]byte{192})}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{193}), end: maybe.Nothing[[]byte]()}) + require.Equal(t, 3, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{64}), end: maybe.Some([]byte{127}), priority: lowPriority}) + require.Equal(t, 3, syncHeap.Len()) + + // merge both sides at the same time + syncHeap = newWorkHeap() + + syncHeap.MergeInsert(&workItem{start: maybe.Nothing[[]byte](), end: maybe.Some([]byte{63})}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{127}), end: maybe.Nothing[[]byte]()}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&workItem{start: maybe.Some([]byte{63}), end: maybe.Some([]byte{127}), priority: lowPriority}) + require.Equal(t, 1, syncHeap.Len()) +} + +func TestWorkHeapMergeInsertRandom(t *testing.T) { + var ( + require = require.New(t) + seed = time.Now().UnixNano() + rand = rand.New(rand.NewSource(seed)) // #nosec G404 + numRanges = 1_000 + bounds = [][]byte{} + rootID = ids.GenerateTestID() + ) + t.Logf("seed: %d", seed) + + // Create start and end bounds + for i := 0; i < numRanges; i++ { + bound := make([]byte, 32) + _, _ = rand.Read(bound) + bounds = append(bounds, bound) + } + slices.SortFunc(bounds, bytes.Compare) + + // Note that start < end for all ranges. + // It is possible but extremely unlikely that + // two elements of [bounds] are equal. + ranges := []workItem{} + for i := 0; i < numRanges/2; i++ { + start := bounds[i*2] + end := bounds[i*2+1] + ranges = append(ranges, workItem{ + start: maybe.Some(start), + end: maybe.Some(end), + priority: lowPriority, + // Note they all share the same root ID. + localRootID: rootID, + }) + } + // Set beginning of first range to Nothing. + ranges[0].start = maybe.Nothing[[]byte]() + // Set end of last range to Nothing. + ranges[len(ranges)-1].end = maybe.Nothing[[]byte]() + + setup := func() *workHeap { + // Insert all the ranges into the heap. + h := newWorkHeap() + for i, r := range ranges { + require.Equal(i, h.Len()) + rCopy := r + h.MergeInsert(&rCopy) + } + return h + } + + { + // Case 1: Merging an item with the range before and after + h := setup() + // Keep merging ranges until there's only one range left. + for i := 0; i < len(ranges)-1; i++ { + // Merge ranges[i] with ranges[i+1] + h.MergeInsert(&workItem{ + start: ranges[i].end, + end: ranges[i+1].start, + priority: lowPriority, + localRootID: rootID, + }) + require.Equal(len(ranges)-i-1, h.Len()) + } + got := h.GetWork() + require.True(got.start.IsNothing()) + require.True(got.end.IsNothing()) + } + + { + // Case 2: Merging an item with the range before + h := setup() + for i := 0; i < len(ranges)-1; i++ { + // Extend end of ranges[i] + newEnd := slices.Clone(ranges[i].end.Value()) + newEnd = append(newEnd, 0) + h.MergeInsert(&workItem{ + start: ranges[i].end, + end: maybe.Some(newEnd), + priority: lowPriority, + localRootID: rootID, + }) + + // Shouldn't cause number of elements to change + require.Equal(len(ranges), h.Len()) + + start := ranges[i].start + if i == 0 { + start = maybe.Nothing[[]byte]() + } + // Make sure end is updated + got, ok := h.sortedItems.Get(&workItem{ + start: start, + }) + require.True(ok) + require.Equal(newEnd, got.end.Value()) + } + } + + { + // Case 3: Merging an item with the range after + h := setup() + for i := 1; i < len(ranges); i++ { + // Extend start of ranges[i] + newStartBytes := slices.Clone(ranges[i].start.Value()) + newStartBytes = newStartBytes[:len(newStartBytes)-1] + newStart := maybe.Some(newStartBytes) + + h.MergeInsert(&workItem{ + start: newStart, + end: ranges[i].start, + priority: lowPriority, + localRootID: rootID, + }) + + // Shouldn't cause number of elements to change + require.Equal(len(ranges), h.Len()) + + // Make sure start is updated + got, ok := h.sortedItems.Get(&workItem{ + start: newStart, + }) + require.True(ok) + require.Equal(newStartBytes, got.start.Value()) + } + } +} diff --git a/coreth/.gitignore b/coreth/.gitignore index 87ff040a..a93619ce 100644 --- a/coreth/.gitignore +++ b/coreth/.gitignore @@ -44,3 +44,6 @@ awscpu bin/ build/ + +# Used for e2e testing +avalanchego diff --git a/coreth/.golangci.yml b/coreth/.golangci.yml index a0dd1c43..25ea0feb 100644 --- a/coreth/.golangci.yml +++ b/coreth/.golangci.yml @@ -1,7 +1,7 @@ # This file configures github.com/golangci/golangci-lint. run: - timeout: 3m + timeout: 10m tests: true # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ diff --git a/coreth/.kurtosis/kurtosis.sh b/coreth/.kurtosis/kurtosis.sh deleted file mode 100755 index a3d1cd85..00000000 --- a/coreth/.kurtosis/kurtosis.sh +++ /dev/null @@ -1,226 +0,0 @@ -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# -# Do not modify this file! It will get overwritten when you upgrade Kurtosis! -# -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -set -euo pipefail - - - -# ============================================================================================ -# Constants -# ============================================================================================ -# The directory where Kurtosis will store files it uses in between executions, e.g. access tokens -# Can make this configurable if needed -KURTOSIS_DIRPATH="${HOME}/.kurtosis" - -KURTOSIS_CORE_TAG="1.8" -KURTOSIS_DOCKERHUB_ORG="kurtosistech" -INITIALIZER_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_initializer:${KURTOSIS_CORE_TAG}" -API_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_api:${KURTOSIS_CORE_TAG}" - -POSITIONAL_ARG_DEFINITION_FRAGMENTS=2 - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -function print_help_and_exit() { - echo "" - echo "$(basename "${0}") [--custom-params custom_params_json] [--client-id client_id] [--client-secret client_secret] [--help] [--kurtosis-log-level kurtosis_log_level] [--list] [--parallelism parallelism] [--tests test_names] [--test-suite-log-level test_suite_log_level] test_suite_image" - echo "" - echo " --custom-params custom_params_json JSON string containing arbitrary data that will be passed as-is to your testsuite, so it can modify its behaviour based on input (default: {})" - echo " --client-id client_id An OAuth client ID which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --client-secret client_secret An OAuth client secret which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --help Display this message" - echo " --kurtosis-log-level kurtosis_log_level The log level that all output generated by the Kurtosis framework itself should log at (panic|fatal|error|warning|info|debug|trace) (default: info)" - echo " --list Rather than running the tests, lists the tests available to run" - echo " --parallelism parallelism The number of texts to execute in parallel (default: 4)" - echo " --tests test_names List of test names to run, separated by ',' (default or empty: run all tests)" - echo " --test-suite-log-level test_suite_log_level A string that will be passed as-is to the test suite container to indicate what log level the test suite container should output at; this string should be meaningful to the test suite container because Kurtosis won't know what logging framework the testsuite uses (default: info)" - echo " test_suite_image The Docker image containing the testsuite to execute" - - echo "" - exit 1 # Exit with an error code, so that if it gets accidentally called in parent scripts/CI it fails loudly -} - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -client_id="" -client_secret="" -custom_params_json="{}" -do_list="false" -kurtosis_log_level="info" -parallelism="4" -show_help="false" -test_names="" -test_suite_image="" -test_suite_log_level="info" - - - -POSITIONAL=() -while [ ${#} -gt 0 ]; do - key="${1}" - case "${key}" in - - --custom-params) - - custom_params_json="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-id) - - client_id="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-secret) - - client_secret="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --help) - show_help="true" - shift # Shift to clear out the flag - - ;; - - --kurtosis-log-level) - - kurtosis_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --list) - do_list="true" - shift # Shift to clear out the flag - - ;; - - --parallelism) - - parallelism="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --tests) - - test_names="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --test-suite-log-level) - - test_suite_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - -*) - echo "ERROR: Unrecognized flag '${key}'" >&2 - exit 1 - ;; - *) - POSITIONAL+=("${1}") - shift - ;; - esac -done - -if "${show_help}"; then - print_help_and_exit -fi - -# Restore positional parameters and assign them to variables -set -- "${POSITIONAL[@]}" -test_suite_image="${1:-}" - - - - - -# ============================================================================================ -# Arg Validation -# ============================================================================================ -if [ "${#}" -ne 1 ]; then - echo "ERROR: Expected 1 positional variables but got ${#}" >&2 - print_help_and_exit -fi - -if [ -z "$test_suite_image" ]; then - echo "ERROR: Variable 'test_suite_image' cannot be empty" >&2 - exit 1 -fi - - - -# ============================================================================================ -# Main Logic -# ============================================================================================# Because Kurtosis X.Y.Z tags are normalized to X.Y so that minor patch updates are transparently -# used, we need to pull the latest API & initializer images -echo "Pulling latest versions of API & initializer image..." -if ! docker pull "${INITIALIZER_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the initializer image (${INITIALIZER_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of initializer image" -fi -if ! docker pull "${API_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the API image (${API_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of API image" -fi - -# Kurtosis needs a Docker volume to store its execution data in -# To learn more about volumes, see: https://docs.docker.com/storage/volumes/ -sanitized_image="$(echo "${test_suite_image}" | sed 's/[^a-zA-Z0-9_.-]/_/g')" -suite_execution_volume="$(date +%Y-%m-%dT%H.%M.%S)_${sanitized_image}" -if ! docker volume create "${suite_execution_volume}" > /dev/null; then - echo "ERROR: Failed to create a Docker volume to store the execution files in" >&2 - exit 1 -fi - -if ! mkdir -p "${KURTOSIS_DIRPATH}"; then - echo "ERROR: Failed to create the Kurtosis directory at '${KURTOSIS_DIRPATH}'" >&2 - exit 1 -fi - -docker run \ - `# The Kurtosis initializer runs inside a Docker container, but needs to access to the Docker engine; this is how to do it` \ - `# For more info, see the bottom of: http://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/` \ - --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ - \ - `# Because the Kurtosis initializer runs inside Docker but needs to persist & read files on the host filesystem between execution,` \ - `# the container expects the Kurtosis directory to be bind-mounted at the special "/kurtosis" path` \ - --mount "type=bind,source=${KURTOSIS_DIRPATH},target=/kurtosis" \ - \ - `# The Kurtosis initializer image requires the volume for storing suite execution data to be mounted at the special "/suite-execution" path` \ - --mount "type=volume,source=${suite_execution_volume},target=/suite-execution" \ - \ - `# Keep these sorted alphabetically` \ - --env CLIENT_ID="${client_id}" \ - --env CLIENT_SECRET="${client_secret}" \ - --env CUSTOM_PARAMS_JSON="${custom_params_json}" \ - --env DO_LIST="${do_list}" \ - --env KURTOSIS_API_IMAGE="${API_IMAGE}" \ - --env KURTOSIS_LOG_LEVEL="${kurtosis_log_level}" \ - --env PARALLELISM="${parallelism}" \ - --env SUITE_EXECUTION_VOLUME="${suite_execution_volume}" \ - --env TEST_NAMES="${test_names}" \ - --env TEST_SUITE_IMAGE="${test_suite_image}" \ - --env TEST_SUITE_LOG_LEVEL="${test_suite_log_level}" \ - \ - "${INITIALIZER_IMAGE}" diff --git a/coreth/Dockerfile b/coreth/Dockerfile index d6b8578f..85d724fc 100644 --- a/coreth/Dockerfile +++ b/coreth/Dockerfile @@ -1,7 +1,5 @@ # ============= Compilation Stage ================ -FROM golang:1.20.1-buster AS builder - -RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 +FROM golang:1.21.7-bullseye AS builder ARG AVALANCHE_VERSION @@ -19,7 +17,7 @@ WORKDIR $GOPATH/src/github.com/ava-labs/avalanchego RUN go mod download # Replace the coreth dependency RUN go mod edit -replace github.com/ava-labs/coreth=../coreth -RUN go mod download && go mod tidy -compat=1.19 +RUN go mod download && go mod tidy -compat=1.21 # Build the AvalancheGo binary with local version of coreth. RUN ./scripts/build_avalanche.sh diff --git a/coreth/RELEASES.md b/coreth/RELEASES.md index cf484b38..745bbbd4 100644 --- a/coreth/RELEASES.md +++ b/coreth/RELEASES.md @@ -1,5 +1,71 @@ # Release Notes +## [v0.12.10](https://github.com/ava-labs/coreth/releases/tag/v0.12.10) +- Add support for off-chain warp messages +- Add support for getBlockReceipts RPC API +- Fix issue with state sync for large blocks +- Migrating Push Gossip to avalanchego network SDK handlers + +## [v0.12.9](https://github.com/ava-labs/coreth/releases/tag/v0.12.9) + +- Add concurrent prefetching of trie nodes during block processing +- Add `skip-tx-indexing` flag to disable transaction indexing and unindexing +- Update acceptor tip before sending chain events to subscribers +- Add soft cap on total block data size for state sync block requests + +## [v0.12.8](https://github.com/ava-labs/coreth/releases/tag/v0.12.8) + +- Bump AvalancheGo to v1.10.15 +- Fix crash in prestate tracer on memory read + +## [v0.12.7](https://github.com/ava-labs/coreth/releases/tag/v0.12.7) + +- Bump AvalancheGo to v1.10.14 + +## [v0.12.6](https://github.com/ava-labs/coreth/releases/tag/v0.12.6) + +- Remove lock options from HTTP handlers +- Fix deadlock in `eth_getLogs` when matcher session hits a missing block +- Replace Kurtosis E2E tests with avctl test framework + +## [v0.12.5](https://github.com/ava-labs/coreth/releases/tag/v0.12.5) + +- Add P2P SDK Pull Gossip to mempool +- Fix hanging requests on shutdown that could cause ungraceful shutdown +- Increase batch size writing snapshot diff to disk +- Migrate geth changes from v1.11.4 through v1.12.0 +- Bump AvalancheGo dependency to v1.10.10 + +## [v0.12.4](https://github.com/ava-labs/coreth/releases/tag/v0.12.4) + +- Fix API handler crash for `lookupState` in `prestate` tracer +- Fix API handler crash for LOG edge cases in the `callTracer` +- Fix regression in `eth_getLogs` serving request for blocks containing no Ethereum transactions +- Export `CalculateDynamicFee` + +## [v0.12.3](https://github.com/ava-labs/coreth/releases/tag/v0.12.3) + +- Migrate go-ethereum changes through v1.11.4 +- Downgrade API error log from `Warn` to `Info` + +## [v0.12.2](https://github.com/ava-labs/coreth/releases/tag/v0.12.2) + +- Increase default trie dirty cache size from 256MB to 512MB + +## [v0.12.1](https://github.com/ava-labs/coreth/releases/tag/v0.12.1) + +- Bump AvalancheGo dependency to v1.10.1 +- Improve block building logic +- Use shorter ctx while reading snapshot to serve state sync requests +- Remove proposer activation time from gossiper +- Fail outstanding requests on shutdown +- Make state sync request sizes configurable + +## [v0.12.0](https://github.com/ava-labs/coreth/releases/tag/v0.12.0) + +- Increase C-Chain block gas limit to 15M in Cortina +- Add Mainnet and Fuji Cortina Activation timestamps + ## [v0.11.9](https://github.com/ava-labs/coreth/releases/tag/v0.11.9) - Downgrade SetPreference log from warn to debug diff --git a/coreth/SECURITY.md b/coreth/SECURITY.md new file mode 100644 index 00000000..90dd1fb3 --- /dev/null +++ b/coreth/SECURITY.md @@ -0,0 +1,20 @@ +# Security Policy + +Avalanche takes the security of the platform and of its users very seriously. We and our community recognize the critical role of external security researchers and developers and welcome +responsible disclosures. Valid reports will be eligible for a reward (terms and conditions apply). + +## Reporting a Vulnerability + +**Please do not file a public ticket** mentioning the vulnerability. To disclose a vulnerability submit it through our [Bug Bounty Program](https://hackenproof.com/avalanche). + +Vulnerabilities must be disclosed to us privately with reasonable time to respond, and avoid compromise of other users and accounts, or loss of funds that are not your own. We do not reward spam or +social engineering vulnerabilities. + +Do not test for or validate any security issues in the live Avalanche networks (Mainnet and Fuji testnet), confirm all exploits in a local private testnet. + +Please refer to the [Bug Bounty Page](https://hackenproof.com/avalanche) for the most up-to-date program rules and scope. + +## Supported Versions + +Please use the [most recently released version](https://github.com/ava-labs/coreth/releases/latest) to perform testing and to validate security issues. + diff --git a/coreth/accounts/abi/abi.go b/coreth/accounts/abi/abi.go index 4c70970d..9755a3aa 100644 --- a/coreth/accounts/abi/abi.go +++ b/coreth/accounts/abi/abi.go @@ -91,6 +91,55 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { return append(method.ID, arguments...), nil } +// PackEvent packs the given event name and arguments to conform the ABI. +// Returns the topics for the event including the event signature (if non-anonymous event) and +// hashes derived from indexed arguments and the packed data of non-indexed args according to +// the event ABI specification. +// The order of arguments must match the order of the event definition. +// https://docs.soliditylang.org/en/v0.8.17/abi-spec.html#indexed-event-encoding. +// Note: PackEvent does not support array (fixed or dynamic-size) or struct types. +func (abi ABI) PackEvent(name string, args ...interface{}) ([]common.Hash, []byte, error) { + event, exist := abi.Events[name] + if !exist { + return nil, nil, fmt.Errorf("event '%s' not found", name) + } + if len(args) != len(event.Inputs) { + return nil, nil, fmt.Errorf("event '%s' unexpected number of inputs %d", name, len(args)) + } + + var ( + nonIndexedInputs = make([]interface{}, 0) + indexedInputs = make([]interface{}, 0) + nonIndexedArgs Arguments + indexedArgs Arguments + ) + + for i, arg := range event.Inputs { + if arg.Indexed { + indexedArgs = append(indexedArgs, arg) + indexedInputs = append(indexedInputs, args[i]) + } else { + nonIndexedArgs = append(nonIndexedArgs, arg) + nonIndexedInputs = append(nonIndexedInputs, args[i]) + } + } + + packedArguments, err := nonIndexedArgs.Pack(nonIndexedInputs...) + if err != nil { + return nil, nil, err + } + topics := make([]common.Hash, 0, len(indexedArgs)+1) + if !event.Anonymous { + topics = append(topics, event.ID) + } + indexedTopics, err := PackTopics(indexedInputs) + if err != nil { + return nil, nil, err + } + + return append(topics, indexedTopics...), packedArguments, nil +} + // PackOutput packs the given [args] as the output of given method [name] to conform the ABI. // This does not include method ID. func (abi ABI) PackOutput(name string, args ...interface{}) ([]byte, error) { @@ -107,12 +156,12 @@ func (abi ABI) PackOutput(name string, args ...interface{}) ([]byte, error) { } // getInputs gets input arguments of the given [name] method. -func (abi ABI) getInputs(name string, data []byte) (Arguments, error) { +func (abi ABI) getInputs(name string, data []byte, useStrictMode bool) (Arguments, error) { // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event var args Arguments if method, ok := abi.Methods[name]; ok { - if len(data)%32 != 0 { + if useStrictMode && len(data)%32 != 0 { return nil, fmt.Errorf("abi: improperly formatted input: %s - Bytes: [%+v]", string(data), data) } args = method.Inputs @@ -133,7 +182,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { var args Arguments if method, ok := abi.Methods[name]; ok { if len(data)%32 != 0 { - return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data) + return nil, fmt.Errorf("abi: improperly formatted output: %q - Bytes: %+v", data, data) } args = method.Outputs } @@ -147,8 +196,8 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { } // UnpackInput unpacks the input according to the ABI specification. -func (abi ABI) UnpackInput(name string, data []byte) ([]interface{}, error) { - args, err := abi.getInputs(name, data) +func (abi ABI) UnpackInput(name string, data []byte, useStrictMode bool) ([]interface{}, error) { + args, err := abi.getInputs(name, data, useStrictMode) if err != nil { return nil, err } @@ -167,8 +216,8 @@ func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) { // UnpackInputIntoInterface unpacks the input in v according to the ABI specification. // It performs an additional copy. Please only use, if you want to unpack into a // structure that does not strictly conform to the ABI structure (e.g. has additional arguments) -func (abi ABI) UnpackInputIntoInterface(v interface{}, name string, data []byte) error { - args, err := abi.getInputs(name, data) +func (abi ABI) UnpackInputIntoInterface(v interface{}, name string, data []byte, useStrictMode bool) error { + args, err := abi.getInputs(name, data, useStrictMode) if err != nil { return err } @@ -292,6 +341,17 @@ func (abi *ABI) EventByID(topic common.Hash) (*Event, error) { return nil, fmt.Errorf("no event with id: %#x", topic.Hex()) } +// ErrorByID looks up an error by the 4-byte id, +// returns nil if none found. +func (abi *ABI) ErrorByID(sigdata [4]byte) (*Error, error) { + for _, errABI := range abi.Errors { + if bytes.Equal(errABI.ID[:4], sigdata[:]) { + return &errABI, nil + } + } + return nil, fmt.Errorf("no error with id: %#x", sigdata[:]) +} + // HasFallback returns an indicator whether a fallback function is included. func (abi *ABI) HasFallback() bool { return abi.Fallback.Type == Fallback @@ -316,7 +376,10 @@ func UnpackRevert(data []byte) (string, error) { if !bytes.Equal(data[:4], revertSelector) { return "", errors.New("invalid data for unpacking") } - typ, _ := NewType("string", "", nil) + typ, err := NewType("string", "", nil) + if err != nil { + return "", err + } unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:]) if err != nil { return "", err diff --git a/coreth/accounts/abi/abi_extra_test.go b/coreth/accounts/abi/abi_extra_test.go new file mode 100644 index 00000000..52bce1b0 --- /dev/null +++ b/coreth/accounts/abi/abi_extra_test.go @@ -0,0 +1,111 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package abi + +import ( + "bytes" + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// Note: This file contains tests in addition to those found in go-ethereum. + +const TEST_ABI = `[{"type":"function","name":"receive","inputs":[{"name":"sender","type":"address"},{"name":"amount","type":"uint256"},{"name":"memo","type":"bytes"}],"outputs":[{"internalType":"bool","name":"isAllowed","type":"bool"}]}]` + +func TestUnpackInputIntoInterface(t *testing.T) { + abi, err := JSON(strings.NewReader(TEST_ABI)) + require.NoError(t, err) + + type inputType struct { + Sender common.Address + Amount *big.Int + Memo []byte + } + input := inputType{ + Sender: common.HexToAddress("0x02"), + Amount: big.NewInt(100), + Memo: []byte("hello"), + } + + rawData, err := abi.Pack("receive", input.Sender, input.Amount, input.Memo) + require.NoError(t, err) + + abi, err = JSON(strings.NewReader(TEST_ABI)) + require.NoError(t, err) + + for _, test := range []struct { + name string + extraPaddingBytes int + strictMode bool + expectedErrorSubstring string + }{ + { + name: "No extra padding to input data", + strictMode: true, + }, + { + name: "Valid input data with 32 extra padding(%32) ", + extraPaddingBytes: 32, + strictMode: true, + }, + { + name: "Valid input data with 64 extra padding(%32)", + extraPaddingBytes: 64, + strictMode: true, + }, + { + name: "Valid input data with extra padding indivisible by 32", + extraPaddingBytes: 33, + strictMode: true, + expectedErrorSubstring: "abi: improperly formatted input:", + }, + { + name: "Valid input data with extra padding indivisible by 32, no strict mode", + extraPaddingBytes: 33, + strictMode: false, + }, + } { + { + t.Run(test.name, func(t *testing.T) { + // skip 4 byte selector + data := rawData[4:] + // Add extra padding to data + data = append(data, make([]byte, test.extraPaddingBytes)...) + + // Unpack into interface + var v inputType + err = abi.UnpackInputIntoInterface(&v, "receive", data, test.strictMode) // skips 4 byte selector + + if test.expectedErrorSubstring != "" { + require.Error(t, err) + require.ErrorContains(t, err, test.expectedErrorSubstring) + } else { + require.NoError(t, err) + // Verify unpacked values match input + require.Equal(t, v.Amount, input.Amount) + require.EqualValues(t, v.Amount, input.Amount) + require.True(t, bytes.Equal(v.Memo, input.Memo)) + } + }) + } + } +} + +func TestPackOutput(t *testing.T) { + abi, err := JSON(strings.NewReader(TEST_ABI)) + require.NoError(t, err) + + bytes, err := abi.PackOutput("receive", true) + require.NoError(t, err) + + vals, err := abi.Methods["receive"].Outputs.Unpack(bytes) + require.NoError(t, err) + + require.Len(t, vals, 1) + require.True(t, vals[0].(bool)) +} diff --git a/coreth/accounts/abi/abi_test.go b/coreth/accounts/abi/abi_test.go index d156d82c..669fe2e6 100644 --- a/coreth/accounts/abi/abi_test.go +++ b/coreth/accounts/abi/abi_test.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/assert" ) const jsondata = ` @@ -1067,6 +1068,34 @@ func TestABI_EventById(t *testing.T) { } } +func TestABI_ErrorByID(t *testing.T) { + abi, err := JSON(strings.NewReader(`[ + {"inputs":[{"internalType":"uint256","name":"x","type":"uint256"}],"name":"MyError1","type":"error"}, + {"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"x","type":"tuple"},{"internalType":"address","name":"y","type":"address"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"string","name":"b","type":"string"},{"internalType":"address","name":"c","type":"address"}],"internalType":"struct MyError.MyStruct","name":"z","type":"tuple"}],"name":"MyError2","type":"error"}, + {"inputs":[{"internalType":"uint256[]","name":"x","type":"uint256[]"}],"name":"MyError3","type":"error"} + ]`)) + if err != nil { + t.Fatal(err) + } + for name, m := range abi.Errors { + a := fmt.Sprintf("%v", &m) + var id [4]byte + copy(id[:], m.ID[:4]) + m2, err := abi.ErrorByID(id) + if err != nil { + t.Fatalf("Failed to look up ABI error: %v", err) + } + b := fmt.Sprintf("%v", m2) + if a != b { + t.Errorf("Error %v (id %x) not 'findable' by id in ABI", name, id) + } + } + // test unsuccessful lookups + if _, err = abi.ErrorByID([4]byte{}); err == nil { + t.Error("Expected error: no error with this id") + } +} + // TestDoubleDuplicateMethodNames checks that if transfer0 already exists, there won't be a name // conflict and that the second transfer method will be renamed transfer1. func TestDoubleDuplicateMethodNames(t *testing.T) { @@ -1174,3 +1203,98 @@ func TestUnpackRevert(t *testing.T) { }) } } + +func TestABI_PackEvent(t *testing.T) { + tests := []struct { + name string + json string + event string + args []interface{} + expectedTopics []common.Hash + expectedData []byte + }{ + { + name: "received", + json: `[ + {"type":"event","name":"received","anonymous":false,"inputs":[ + {"indexed":false,"name":"sender","type":"address"}, + {"indexed":false,"name":"amount","type":"uint256"}, + {"indexed":false,"name":"memo","type":"bytes"} + ] + }]`, + event: "received(address,uint256,bytes)", + args: []interface{}{ + common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), + big.NewInt(1), + []byte{0x88}, + }, + expectedTopics: []common.Hash{ + common.HexToHash("0x75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed"), + }, + expectedData: common.Hex2Bytes("000000000000000000000000376c47978271565f56deb45495afa69e59c16ab20000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000018800000000000000000000000000000000000000000000000000000000000000"), + }, + { + name: "received", + json: `[ + {"type":"event","name":"received","anonymous":true,"inputs":[ + {"indexed":false,"name":"sender","type":"address"}, + {"indexed":false,"name":"amount","type":"uint256"}, + {"indexed":false,"name":"memo","type":"bytes"} + ] + }]`, + event: "received(address,uint256,bytes)", + args: []interface{}{ + common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), + big.NewInt(1), + []byte{0x88}, + }, + expectedTopics: []common.Hash{}, + expectedData: common.Hex2Bytes("000000000000000000000000376c47978271565f56deb45495afa69e59c16ab20000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000018800000000000000000000000000000000000000000000000000000000000000"), + }, { + name: "Transfer", + json: `[ + { "constant": true, "inputs": [], "name": "name", "outputs": [ { "name": "", "type": "string" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "constant": false, "inputs": [ { "name": "_spender", "type": "address" }, { "name": "_value", "type": "uint256" } ], "name": "approve", "outputs": [ { "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, + { "constant": true, "inputs": [], "name": "totalSupply", "outputs": [ { "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "constant": false, "inputs": [ { "name": "_from", "type": "address" }, { "name": "_to", "type": "address" }, { "name": "_value", "type": "uint256" } ], "name": "transferFrom", "outputs": [ { "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, + { "constant": true, "inputs": [], "name": "decimals", "outputs": [ { "name": "", "type": "uint8" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "constant": true, "inputs": [ { "name": "_owner", "type": "address" } ], "name": "balanceOf", "outputs": [ { "name": "balance", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "constant": true, "inputs": [], "name": "symbol", "outputs": [ { "name": "", "type": "string" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "constant": false, "inputs": [ { "name": "_to", "type": "address" }, { "name": "_value", "type": "uint256" } ], "name": "transfer", "outputs": [ { "name": "", "type": "bool" } ], "payable": false, "stateMutability": "nonpayable", "type": "function" }, + { "constant": true, "inputs": [ { "name": "_owner", "type": "address" }, { "name": "_spender", "type": "address" } ], "name": "allowance", "outputs": [ { "name": "", "type": "uint256" } ], "payable": false, "stateMutability": "view", "type": "function" }, + { "payable": true, "stateMutability": "payable", "type": "fallback" }, + { "anonymous": false, "inputs": [ { "indexed": true, "name": "owner", "type": "address" }, { "indexed": true, "name": "spender", "type": "address" }, { "indexed": false, "name": "value", "type": "uint256" } ], "name": "Approval", "type": "event" }, + { "anonymous": false, "inputs": [ { "indexed": true, "name": "from", "type": "address" }, { "indexed": true, "name": "to", "type": "address" }, { "indexed": false, "name": "value", "type": "uint256" } ], "name": "Transfer", "type": "event" } + ]`, + event: "Transfer(address,address,uint256)", + args: []interface{}{ + common.HexToAddress("0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC"), + common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), + big.NewInt(100), + }, + expectedTopics: []common.Hash{ + common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + common.HexToHash("0x0000000000000000000000008db97c7cece249c2b98bdc0226cc4c2a57bf52fc"), + common.HexToHash("0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab2"), + }, + expectedData: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000064"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + abi, err := JSON(strings.NewReader(test.json)) + if err != nil { + t.Error(err) + } + + topics, data, err := abi.PackEvent(test.name, test.args...) + if err != nil { + t.Fatal(err) + } + + assert.EqualValues(t, test.expectedTopics, topics) + assert.EqualValues(t, test.expectedData, data) + }) + } +} diff --git a/coreth/accounts/abi/bind/backends/simulated.go b/coreth/accounts/abi/bind/backends/simulated.go index cc1e2f1b..b182a30e 100644 --- a/coreth/accounts/abi/bind/backends/simulated.go +++ b/coreth/accounts/abi/bind/backends/simulated.go @@ -47,13 +47,13 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/filters" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/interfaces" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -107,15 +107,15 @@ type SimulatedBackend struct { // and uses a simulated blockchain for testing purposes. // A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { - cpcfg := params.TestChainConfig - cpcfg.ChainID = big.NewInt(1337) + copyConfig := *params.TestChainConfig + copyConfig.ChainID = big.NewInt(1337) genesis := core.Genesis{ - Config: cpcfg, + Config: ©Config, GasLimit: gasLimit, Alloc: alloc, } cacheConfig := &core.CacheConfig{} - blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) backend := &SimulatedBackend{ database: database, @@ -125,9 +125,12 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis filterBackend := &filterBackend{database, blockchain, backend} backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) - backend.events = filters.NewEventSystem(backend.filterSystem, false) + backend.events = filters.NewEventSystem(backend.filterSystem) - backend.rollback(blockchain.CurrentBlock()) + header := backend.blockchain.CurrentBlock() + block := backend.blockchain.GetBlock(header.Hash(), header.Number.Uint64()) + + backend.rollback(block) return backend } @@ -173,7 +176,10 @@ func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() - b.rollback(b.blockchain.CurrentBlock()) + header := b.blockchain.CurrentBlock() + block := b.blockchain.GetBlock(header.Hash(), header.Number.Uint64()) + + b.rollback(block) } func (b *SimulatedBackend) rollback(parent *types.Block) { @@ -212,7 +218,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { // stateByBlockNumber retrieves a state by a given blocknumber. func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { - if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { + if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number) == 0 { return b.blockchain.State() } block, err := b.blockByNumber(ctx, blockNumber) @@ -341,7 +347,7 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) ( // (associated with its hash) if found without Lock. func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { if number == nil || number.Cmp(b.acceptedBlock.Number()) == 0 { - return b.blockchain.CurrentBlock(), nil + return b.blockByHash(ctx, b.blockchain.CurrentBlock().Hash()) } block := b.blockchain.GetBlockByNumber(uint64(number.Int64())) @@ -469,7 +475,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call interfaces.Cal b.mu.Lock() defer b.mu.Unlock() - if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { + if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 { return nil, errBlockNumberUnsupported } stateDB, err := b.blockchain.State() @@ -493,7 +499,7 @@ func (b *SimulatedBackend) AcceptedCallContract(ctx context.Context, call interf defer b.mu.Unlock() defer b.acceptedState.RevertToSnapshot(b.acceptedState.Snapshot()) - res, err := b.callContract(ctx, call, b.acceptedBlock, b.acceptedState) + res, err := b.callContract(ctx, call, b.acceptedBlock.Header(), b.acceptedState) if err != nil { return nil, err } @@ -565,7 +571,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call available := new(big.Int).Set(balance) if call.Value != nil { if call.Value.Cmp(available) >= 0 { - return 0, errors.New("insufficient funds for transfer") + return 0, core.ErrInsufficientFundsForTransfer } available.Sub(available, call.Value) } @@ -575,7 +581,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call if transfer == nil { transfer = new(big.Int) } - log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + log.Info("Gas estimation capped by limited funds", "original", hi, "balance", balance, "sent", transfer, "feecap", feeCap, "fundable", allowance) hi = allowance.Uint64() } @@ -587,7 +593,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call call.Gas = gas snapshot := b.acceptedState.Snapshot() - res, err := b.callContract(ctx, call, b.acceptedBlock, b.acceptedState) + res, err := b.callContract(ctx, call, b.acceptedBlock.Header(), b.acceptedState) b.acceptedState.RevertToSnapshot(snapshot) if err != nil { @@ -637,13 +643,13 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call // callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. -func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { +func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) { // Gas prices post 1559 need to be initialized if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } head := b.blockchain.CurrentHeader() - if !b.blockchain.Config().IsApricotPhase3(new(big.Int).SetUint64(head.Time)) { + if !b.blockchain.Config().IsApricotPhase3(head.Time) { // If there's no basefee, then it must be a non-1559 execution if call.GasPrice == nil { call.GasPrice = new(big.Int) @@ -676,20 +682,33 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.Cal if call.Value == nil { call.Value = new(big.Int) } + // Set infinite balance to the fake caller account. from := stateDB.GetOrNewStateObject(call.From) from.SetBalance(math.MaxBig256) + // Execute the call. - msg := callMsg{call} + msg := &core.Message{ + From: call.From, + To: call.To, + Value: call.Value, + GasLimit: call.Gas, + GasPrice: call.GasPrice, + GasFeeCap: call.GasFeeCap, + GasTipCap: call.GasTipCap, + Data: call.Data, + AccessList: call.AccessList, + SkipAccountChecks: true, + } - txContext := core.NewEVMTxContext(msg) - evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(header, b.blockchain, nil) vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) gasPool := new(core.GasPool).AddGas(math.MaxUint64) - return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() + return core.ApplyMessage(vmEnv, msg, gasPool) } // SendTransaction updates the pending block to include the given transaction. @@ -703,7 +722,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa return errors.New("could not fetch parent") } // Check transaction validity - signer := types.NewLondonSigner(b.blockchain.Config().ChainID) + signer := types.MakeSigner(b.blockchain.Config(), block.Number(), block.Time()) sender, err := types.Sender(signer, tx) if err != nil { return fmt.Errorf("invalid transaction: %v", err) @@ -833,8 +852,12 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { if len(b.acceptedBlock.Transactions()) != 0 { return errors.New("Could not adjust time on non-empty block") } + block := b.blockchain.GetBlockByHash(b.acceptedBlock.ParentHash()) + if block == nil { + return fmt.Errorf("could not find parent") + } - blocks, _, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) { + blocks, _, _ := core.GenerateChain(b.config, block, dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) { block.OffsetTime(int64(adjustment.Seconds())) }) stateDB, _ := b.blockchain.State() @@ -850,23 +873,6 @@ func (b *SimulatedBackend) Blockchain() *core.BlockChain { return b.blockchain } -// callMsg implements core.Message to allow passing it as a transaction simulator. -type callMsg struct { - interfaces.CallMsg -} - -func (m callMsg) From() common.Address { return m.CallMsg.From } -func (m callMsg) Nonce() uint64 { return 0 } -func (m callMsg) IsFake() bool { return true } -func (m callMsg) To() *common.Address { return m.CallMsg.To } -func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } -func (m callMsg) GasFeeCap() *big.Int { return m.CallMsg.GasFeeCap } -func (m callMsg) GasTipCap() *big.Int { return m.CallMsg.GasTipCap } -func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } -func (m callMsg) Value() *big.Int { return m.CallMsg.Value } -func (m callMsg) Data() []byte { return m.CallMsg.Data } -func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } - // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. type filterBackend struct { @@ -887,8 +893,8 @@ func (fb *filterBackend) SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxs return fb.bc.SubscribeAcceptedTransactionEvent(ch) } -func (fb *filterBackend) GetVMConfig() *vm.Config { - return fb.bc.GetVMConfig() +func (fb *filterBackend) IsAllowUnfinalizedQueries() bool { + return false } func (fb *filterBackend) LastAcceptedBlock() *types.Block { @@ -903,23 +909,41 @@ func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } -func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { - if block == rpc.LatestBlockNumber { +func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + switch number { + case rpc.PendingBlockNumber, rpc.AcceptedBlockNumber: + if block := fb.backend.acceptedBlock; block != nil { + return block.Header(), nil + } + return nil, nil + case rpc.LatestBlockNumber: return fb.bc.CurrentHeader(), nil + default: + return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil } - return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil } func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { return fb.bc.GetHeaderByHash(hash), nil } +func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := fb.bc.GetBody(hash); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { number := rawdb.ReadHeaderNumber(fb.db, hash) if number == nil { return nil, nil } - return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil + header := rawdb.ReadHeader(fb.db, hash, *number) + if header == nil { + return nil, nil + } + return rawdb.ReadReceipts(fb.db, hash, *number, header.Time, fb.bc.Config()), nil } func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { @@ -953,6 +977,14 @@ func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.Matche panic("not supported") } +func (fb *filterBackend) ChainConfig() *params.ChainConfig { + panic("not supported") +} + +func (fb *filterBackend) CurrentHeader() *types.Header { + panic("not supported") +} + func nullSubscription() event.Subscription { return event.NewSubscription(func(quit <-chan struct{}) error { <-quit diff --git a/coreth/accounts/abi/bind/backends/simulated_test.go b/coreth/accounts/abi/bind/backends/simulated_test.go index 9c44b249..66ca116d 100644 --- a/coreth/accounts/abi/bind/backends/simulated_test.go +++ b/coreth/accounts/abi/bind/backends/simulated_test.go @@ -75,8 +75,7 @@ func TestSimulatedBackend(t *testing.T) { code := `6060604052600a8060106000396000f360606040526008565b00` var gas uint64 = 3000000 tx := types.NewContractCreation(0, big.NewInt(0), gas, gasPrice, common.FromHex(code)) - signer := types.NewLondonSigner(big.NewInt(1337)) - tx, _ = types.SignTx(tx, signer, key) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, key) err = sim.SendTransaction(context.Background(), tx) if err != nil { @@ -137,14 +136,6 @@ func TestNewSimulatedBackend(t *testing.T) { sim := simTestBackend(testAddr) defer sim.Close() - if sim.config != params.TestChainConfig { - t.Errorf("expected sim config to equal params.AllEthashProtocolChanges, got %v", sim.config) - } - - if sim.blockchain.Config() != params.TestChainConfig { - t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config) - } - stateDB, _ := sim.blockchain.State() bal := stateDB.GetBalance(testAddr) if bal.Cmp(expectedBal) != 0 { @@ -1210,7 +1201,7 @@ func TestFork(t *testing.T) { sim.Commit(false) } // 3. - if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) { + if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n) { t.Error("wrong chain length") } // 4. @@ -1220,7 +1211,7 @@ func TestFork(t *testing.T) { sim.Commit(false) } // 6. - if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) { + if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n+1) { t.Error("wrong chain length") } } @@ -1368,7 +1359,7 @@ func TestCommitReturnValue(t *testing.T) { sim := simTestBackend(testAddr) defer sim.Close() - startBlockHeight := sim.blockchain.CurrentBlock().NumberU64() + startBlockHeight := sim.blockchain.CurrentBlock().Number.Uint64() // Test if Commit returns the correct block hash h1 := sim.Commit(true) @@ -1401,3 +1392,23 @@ func TestCommitReturnValue(t *testing.T) { t.Error("Could not retrieve the just created block (side-chain)") } } + +// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork +// block's parent rather than the canonical head's parent. +func TestAdjustTimeAfterFork(t *testing.T) { + testAddr := crypto.PubkeyToAddress(testKey.PublicKey) + sim := simTestBackend(testAddr) + defer sim.Close() + + sim.Commit(false) // h1 + h1 := sim.blockchain.CurrentHeader().Hash() + sim.Commit(false) // h2 + sim.Fork(context.Background(), h1) + sim.AdjustTime(1 * time.Second) + sim.Commit(false) + + head := sim.blockchain.CurrentHeader() + if head.Number == common.Big2 && head.ParentHash != h1 { + t.Errorf("failed to build block on fork") + } +} diff --git a/coreth/accounts/abi/bind/base.go b/coreth/accounts/abi/bind/base.go index 7c6df083..f89a03f8 100644 --- a/coreth/accounts/abi/bind/base.go +++ b/coreth/accounts/abi/bind/base.go @@ -46,6 +46,9 @@ import ( const basefeeWiggleMultiplier = 2 var ( + errNoEventSignature = errors.New("no event signature") + errEventSignatureMismatch = errors.New("event signature mismatch") + ErrNilAssetAmount = errors.New("cannot specify nil asset amount for native asset call") errNativeAssetDeployContract = errors.New("cannot specify native asset params while deploying a contract") ) @@ -440,6 +443,8 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i } if opts.GasPrice != nil { rawTx, err = c.createLegacyTx(opts, contract, input) + } else if opts.GasFeeCap != nil && opts.GasTipCap != nil { + rawTx, err = c.createDynamicTx(opts, contract, input, nil) } else { // Only query for basefee if gasPrice not specified if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil { @@ -553,8 +558,12 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter // UnpackLog unpacks a retrieved log into the provided output structure. func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error { + // Anonymous events are not supported. + if len(log.Topics) == 0 { + return errNoEventSignature + } if log.Topics[0] != c.abi.Events[event].ID { - return fmt.Errorf("event signature mismatch") + return errEventSignatureMismatch } if len(log.Data) > 0 { if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil { @@ -572,8 +581,12 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) // UnpackLogIntoMap unpacks a retrieved log into the provided map. func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error { + // Anonymous events are not supported. + if len(log.Topics) == 0 { + return errNoEventSignature + } if log.Topics[0] != c.abi.Events[event].ID { - return fmt.Errorf("event signature mismatch") + return errEventSignatureMismatch } if len(log.Data) > 0 { if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil { diff --git a/coreth/accounts/abi/bind/base_test.go b/coreth/accounts/abi/bind/base_test.go index c8bfdbb8..31f6c819 100644 --- a/coreth/accounts/abi/bind/base_test.go +++ b/coreth/accounts/abi/bind/base_test.go @@ -197,6 +197,23 @@ func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { unpackAndCheck(t, bc, expectedReceivedMap, mockLog) } +func TestUnpackAnonymousLogIntoMap(t *testing.T) { + mockLog := newMockLog(nil, common.HexToHash("0x0")) + + abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]` + parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) + bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) + + var received map[string]interface{} + err := bc.UnpackLogIntoMap(received, "received", mockLog) + if err == nil { + t.Error("unpacking anonymous event is not supported") + } + if err.Error() != "no event signature" { + t.Errorf("expected error 'no event signature', got '%s'", err) + } +} + func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"}) if err != nil { diff --git a/coreth/accounts/abi/bind/bind.go b/coreth/accounts/abi/bind/bind.go index 4b8e16e9..02638d2b 100644 --- a/coreth/accounts/abi/bind/bind.go +++ b/coreth/accounts/abi/bind/bind.go @@ -32,7 +32,6 @@ package bind import ( "bytes" - "errors" "fmt" "go/format" "regexp" @@ -49,8 +48,6 @@ type Lang int const ( LangGo Lang = iota - LangJava - LangObjC ) func isKeyWord(arg string) bool { @@ -146,12 +143,19 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // Normalize the method for capital cases and non-anonymous inputs/outputs normalized := original normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) - // Ensure there is no duplicated identifier var identifiers = callIdentifiers if !original.IsConstant() { identifiers = transactIdentifiers } + // Name shouldn't start with a digit. It will make the generated code invalid. + if len(normalizedName) > 0 && unicode.IsDigit(rune(normalizedName[0])) { + normalizedName = fmt.Sprintf("M%s", normalizedName) + normalizedName = abi.ResolveNameConflict(normalizedName, func(name string) bool { + _, ok := identifiers[name] + return ok + }) + } if identifiers[normalizedName] { return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName) } @@ -195,6 +199,14 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // Ensure there is no duplicated identifier normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) + // Name shouldn't start with a digit. It will make the generated code invalid. + if len(normalizedName) > 0 && unicode.IsDigit(rune(normalizedName[0])) { + normalizedName = fmt.Sprintf("E%s", normalizedName) + normalizedName = abi.ResolveNameConflict(normalizedName, func(name string) bool { + _, ok := eventIdentifiers[name] + return ok + }) + } if eventIdentifiers[normalizedName] { return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName) } @@ -231,11 +243,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] if evmABI.HasReceive() { receive = &tmplMethod{Original: evmABI.Receive} } - // There is no easy way to pass arbitrary java objects to the Go side. - if len(structs) > 0 && lang == LangJava { - return "", errors.New("java binding for tuple arguments is not supported yet") - } - contracts[types[i]] = &tmplContract{ Type: capitalise(types[i]), InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""), @@ -308,8 +315,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] // bindType is a set of type binders that convert Solidity types to some supported // programming language types. var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindTypeGo, - LangJava: bindTypeJava, + LangGo: bindTypeGo, } // bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones. @@ -352,86 +358,10 @@ func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones. -func bindBasicTypeJava(kind abi.Type) string { - switch kind.T { - case abi.AddressTy: - return "Address" - case abi.IntTy, abi.UintTy: - // Note that uint and int (without digits) are also matched, - // these are size 256, and will translate to BigInt (the default). - parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String()) - if len(parts) != 3 { - return kind.String() - } - // All unsigned integers should be translated to BigInt since gomobile doesn't - // support them. - if parts[1] == "u" { - return "BigInt" - } - - namedSize := map[string]string{ - "8": "byte", - "16": "short", - "32": "int", - "64": "long", - }[parts[2]] - - // default to BigInt - if namedSize == "" { - namedSize = "BigInt" - } - return namedSize - case abi.FixedBytesTy, abi.BytesTy: - return "byte[]" - case abi.BoolTy: - return "boolean" - case abi.StringTy: - return "String" - case abi.FunctionTy: - return "byte[24]" - default: - return kind.String() - } -} - -// pluralizeJavaType explicitly converts multidimensional types to predefined -// types in go side. -func pluralizeJavaType(typ string) string { - switch typ { - case "boolean": - return "Bools" - case "String": - return "Strings" - case "Address": - return "Addresses" - case "byte[]": - return "Binaries" - case "BigInt": - return "BigInts" - } - return typ + "[]" -} - -// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping -// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly -// mapped will use an upscaled type (e.g. BigDecimal). -func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - switch kind.T { - case abi.TupleTy: - return structs[kind.TupleRawName+kind.String()].Name - case abi.ArrayTy, abi.SliceTy: - return pluralizeJavaType(bindTypeJava(*kind.Elem, structs)) - default: - return bindBasicTypeJava(kind) - } -} - // bindTopicType is a set of type binders that convert Solidity types to some // supported programming language topic types. var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindTopicTypeGo, - LangJava: bindTopicTypeJava, + LangGo: bindTopicTypeGo, } // bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same @@ -451,28 +381,10 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { return bound } -// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same -// functionality as for simple types, but dynamic types get converted to hashes. -func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - bound := bindTypeJava(kind, structs) - - // todo(rjl493456442) according solidity documentation, indexed event - // parameters that are not value types i.e. arrays and structs are not - // stored directly but instead a keccak256-hash of an encoding is stored. - // - // We only convert strings and bytes to hash, still need to deal with - // array(both fixed-size and dynamic-size) and struct. - if bound == "String" || bound == "byte[]" { - bound = "Hash" - } - return bound -} - // bindStructType is a set of type binders that convert Solidity tuple types to some supported // programming language struct definition. var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{ - LangGo: bindStructTypeGo, - LangJava: bindStructTypeJava, + LangGo: bindStructTypeGo, } // bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping @@ -521,74 +433,10 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { } } -// bindStructTypeJava converts a Solidity tuple type to a Java one and records the mapping -// in the given map. -// Notably, this function will resolve and record nested struct recursively. -func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { - switch kind.T { - case abi.TupleTy: - // We compose a raw struct name and a canonical parameter expression - // together here. The reason is before solidity v0.5.11, kind.TupleRawName - // is empty, so we use canonical parameter expression to distinguish - // different struct definition. From the consideration of backward - // compatibility, we concat these two together so that if kind.TupleRawName - // is not empty, it can have unique id. - id := kind.TupleRawName + kind.String() - if s, exist := structs[id]; exist { - return s.Name - } - var fields []*tmplField - for i, elem := range kind.TupleElems { - field := bindStructTypeJava(*elem, structs) - fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem}) - } - name := kind.TupleRawName - if name == "" { - name = fmt.Sprintf("Class%d", len(structs)) - } - structs[id] = &tmplStruct{ - Name: name, - Fields: fields, - } - return name - case abi.ArrayTy, abi.SliceTy: - return pluralizeJavaType(bindStructTypeJava(*kind.Elem, structs)) - default: - return bindBasicTypeJava(kind) - } -} - // namedType is a set of functions that transform language specific types to // named versions that may be used inside method names. var namedType = map[Lang]func(string, abi.Type) string{ - LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, - LangJava: namedTypeJava, -} - -// namedTypeJava converts some primitive data types to named variants that can -// be used as parts of method names. -func namedTypeJava(javaKind string, solKind abi.Type) string { - switch javaKind { - case "byte[]": - return "Binary" - case "boolean": - return "Bool" - default: - parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String()) - if len(parts) != 4 { - return javaKind - } - switch parts[2] { - case "8", "16", "32", "64": - if parts[3] == "" { - return capitalise(fmt.Sprintf("%sint%s", parts[1], parts[2])) - } - return capitalise(fmt.Sprintf("%sint%ss", parts[1], parts[2])) - - default: - return javaKind - } - } + LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") }, } // alias returns an alias of the given string based on the aliasing rules @@ -603,8 +451,7 @@ func alias(aliases map[string]string, n string) string { // methodNormalizer is a name transformer that modifies Solidity method names to // conform to target language naming conventions. var methodNormalizer = map[Lang]func(string) string{ - LangGo: abi.ToCamelCase, - LangJava: decapitalise, + LangGo: abi.ToCamelCase, } // capitalise makes a camel-case string which starts with an upper case character. diff --git a/coreth/accounts/abi/bind/bind_test.go b/coreth/accounts/abi/bind/bind_test.go index b06e6bba..d76903c2 100644 --- a/coreth/accounts/abi/bind/bind_test.go +++ b/coreth/accounts/abi/bind/bind_test.go @@ -1458,13 +1458,11 @@ var bindTests = []struct { "github.com/ava-labs/coreth/accounts/abi/bind/backends" "github.com/ava-labs/coreth/core" "github.com/ethereum/go-ethereum/crypto" - "github.com/ava-labs/coreth/params" `, ` // Initialize test accounts key, _ := crypto.GenerateKey() auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - auth.GasFeeCap = new(big.Int).SetInt64(params.ApricotPhase4MaxBaseFee) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: new(big.Int).Mul(big.NewInt(10000000000000000), big.NewInt(1000))}}, 10000000) defer sim.Close() @@ -1760,6 +1758,7 @@ var bindTests = []struct { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) + sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000000000000)}}, 1000000) defer sim.Close() @@ -1881,7 +1880,7 @@ var bindTests = []struct { if count != 1 { t.Fatal("Unexpected contract event number") } - `, + `, nil, nil, nil, @@ -1891,51 +1890,51 @@ var bindTests = []struct { { `NewErrors`, ` - pragma solidity >0.8.4; - - contract NewErrors { - error MyError(uint256); - error MyError1(uint256); - error MyError2(uint256, uint256); - error MyError3(uint256 a, uint256 b, uint256 c); - function Error() public pure { - revert MyError3(1,2,3); - } + pragma solidity >0.8.4; + + contract NewErrors { + error MyError(uint256); + error MyError1(uint256); + error MyError2(uint256, uint256); + error MyError3(uint256 a, uint256 b, uint256 c); + function Error() public pure { + revert MyError3(1,2,3); } - `, + } + `, []string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"}, []string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`}, ` - "math/big" - - "github.com/ava-labs/coreth/accounts/abi/bind" - "github.com/ava-labs/coreth/accounts/abi/bind/backends" - "github.com/ava-labs/coreth/core" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, 10000000) - ) - defer sim.Close() - - _, tx, contract, err := DeployNewErrors(user, sim) - if err != nil { - t.Fatal(err) - } - sim.Commit(true) - _, err = bind.WaitDeployed(nil, sim, tx) - if err != nil { - t.Error(err) - } - if err := contract.Error(new(bind.CallOpts)); err == nil { - t.Fatalf("expected contract to throw error") - } - // TODO (MariusVanDerWijden unpack error using abigen - // once that is implemented - `, + "math/big" + + "github.com/ava-labs/coreth/accounts/abi/bind" + "github.com/ava-labs/coreth/accounts/abi/bind/backends" + "github.com/ava-labs/coreth/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, 10000000) + ) + defer sim.Close() + + _, tx, contract, err := DeployNewErrors(user, sim) + if err != nil { + t.Fatal(err) + } + sim.Commit(true) + _, err = bind.WaitDeployed(nil, sim, tx) + if err != nil { + t.Error(err) + } + if err := contract.Error(new(bind.CallOpts)); err == nil { + t.Fatalf("expected contract to throw error") + } + // TODO (MariusVanDerWijden unpack error using abigen + // once that is implemented + `, nil, nil, nil, @@ -2068,6 +2067,29 @@ var bindTests = []struct { t.Errorf("error deploying the contract: %v", err) } `, + }, { + name: "NumericMethodName", + contract: ` + // SPDX-License-Identifier: GPL-3.0 + pragma solidity >=0.4.22 <0.9.0; + + contract NumericMethodName { + event _1TestEvent(address _param); + function _1test() public pure {} + function __1test() public pure {} + function __2test() public pure {} + } + `, + bytecode: []string{"0x6080604052348015600f57600080fd5b5060958061001e6000396000f3fe6080604052348015600f57600080fd5b5060043610603c5760003560e01c80639d993132146041578063d02767c7146049578063ffa02795146051575b600080fd5b60476059565b005b604f605b565b005b6057605d565b005b565b565b56fea26469706673582212200382ca602dff96a7e2ba54657985e2b4ac423a56abe4a1f0667bc635c4d4371f64736f6c63430008110033"}, + abi: []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"_param","type":"address"}],"name":"_1TestEvent","type":"event"},{"inputs":[],"name":"_1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__2test","outputs":[],"stateMutability":"pure","type":"function"}]`}, + imports: ` + "github.com/ethereum/go-ethereum/common" + `, + tester: ` + if b, err := NewNumericMethodName(common.Address{}, nil); b == nil || err != nil { + t.Fatalf("combined binding (%v) nil or error (%v) not nil", b, nil) + } +`, }, } @@ -2151,7 +2173,7 @@ func golangBindings(t *testing.T, overload bool) { if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.19") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.21") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) @@ -2163,408 +2185,3 @@ func golangBindings(t *testing.T, overload bool) { t.Fatalf("failed to run binding test: %v\n%s", err, out) } } - -// Tests that java binding generated by the binder is exactly matched. -func TestJavaBindings(t *testing.T) { - var cases = []struct { - name string - contract string - abi string - bytecode string - expected string - }{ - { - "test", - ` - pragma experimental ABIEncoderV2; - pragma solidity ^0.5.2; - - contract test { - function setAddress(address a) public returns(address){} - function setAddressList(address[] memory a_l) public returns(address[] memory){} - function setAddressArray(address[2] memory a_a) public returns(address[2] memory){} - - function setUint8(uint8 u8) public returns(uint8){} - function setUint16(uint16 u16) public returns(uint16){} - function setUint32(uint32 u32) public returns(uint32){} - function setUint64(uint64 u64) public returns(uint64){} - function setUint256(uint256 u256) public returns(uint256){} - function setUint256List(uint256[] memory u256_l) public returns(uint256[] memory){} - function setUint256Array(uint256[2] memory u256_a) public returns(uint256[2] memory){} - - function setInt8(int8 i8) public returns(int8){} - function setInt16(int16 i16) public returns(int16){} - function setInt32(int32 i32) public returns(int32){} - function setInt64(int64 i64) public returns(int64){} - function setInt256(int256 i256) public returns(int256){} - function setInt256List(int256[] memory i256_l) public returns(int256[] memory){} - function setInt256Array(int256[2] memory i256_a) public returns(int256[2] memory){} - - function setBytes1(bytes1 b1) public returns(bytes1) {} - function setBytes32(bytes32 b32) public returns(bytes32) {} - function setBytes(bytes memory bs) public returns(bytes memory) {} - function setBytesList(bytes[] memory bs_l) public returns(bytes[] memory) {} - function setBytesArray(bytes[2] memory bs_a) public returns(bytes[2] memory) {} - - function setString(string memory s) public returns(string memory) {} - function setStringList(string[] memory s_l) public returns(string[] memory) {} - function setStringArray(string[2] memory s_a) public returns(string[2] memory) {} - - function setBool(bool b) public returns(bool) {} - function setBoolList(bool[] memory b_l) public returns(bool[] memory) {} - function setBoolArray(bool[2] memory b_a) public returns(bool[2] memory) {} - }`, - `[{"constant":false,"inputs":[{"name":"u16","type":"uint16"}],"name":"setUint16","outputs":[{"name":"","type":"uint16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_a","type":"bool[2]"}],"name":"setBoolArray","outputs":[{"name":"","type":"bool[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_a","type":"address[2]"}],"name":"setAddressArray","outputs":[{"name":"","type":"address[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_l","type":"bytes[]"}],"name":"setBytesList","outputs":[{"name":"","type":"bytes[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u8","type":"uint8"}],"name":"setUint8","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u32","type":"uint32"}],"name":"setUint32","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b","type":"bool"}],"name":"setBool","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_l","type":"int256[]"}],"name":"setInt256List","outputs":[{"name":"","type":"int256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_a","type":"uint256[2]"}],"name":"setUint256Array","outputs":[{"name":"","type":"uint256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b_l","type":"bool[]"}],"name":"setBoolList","outputs":[{"name":"","type":"bool[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs_a","type":"bytes[2]"}],"name":"setBytesArray","outputs":[{"name":"","type":"bytes[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a_l","type":"address[]"}],"name":"setAddressList","outputs":[{"name":"","type":"address[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256_a","type":"int256[2]"}],"name":"setInt256Array","outputs":[{"name":"","type":"int256[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_a","type":"string[2]"}],"name":"setStringArray","outputs":[{"name":"","type":"string[2]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s","type":"string"}],"name":"setString","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u64","type":"uint64"}],"name":"setUint64","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i16","type":"int16"}],"name":"setInt16","outputs":[{"name":"","type":"int16"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i8","type":"int8"}],"name":"setInt8","outputs":[{"name":"","type":"int8"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256_l","type":"uint256[]"}],"name":"setUint256List","outputs":[{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i256","type":"int256"}],"name":"setInt256","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i32","type":"int32"}],"name":"setInt32","outputs":[{"name":"","type":"int32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b32","type":"bytes32"}],"name":"setBytes32","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"s_l","type":"string[]"}],"name":"setStringList","outputs":[{"name":"","type":"string[]"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"u256","type":"uint256"}],"name":"setUint256","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"bs","type":"bytes"}],"name":"setBytes","outputs":[{"name":"","type":"bytes"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"a","type":"address"}],"name":"setAddress","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i64","type":"int64"}],"name":"setInt64","outputs":[{"name":"","type":"int64"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"b1","type":"bytes1"}],"name":"setBytes1","outputs":[{"name":"","type":"bytes1"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]`, - `608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037`, - ` -// This file is an automatically generated Java binding. Do not modify as any -// change will likely be lost upon the next re-generation! - -package bindtest; - -import org.ethereum.geth.*; -import java.util.*; - -public class Test { - // ABI is the input ABI used to generate the binding from. - public final static String ABI = "[{\"constant\":false,\"inputs\":[{\"name\":\"u16\",\"type\":\"uint16\"}],\"name\":\"setUint16\",\"outputs\":[{\"name\":\"\",\"type\":\"uint16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_a\",\"type\":\"bool[2]\"}],\"name\":\"setBoolArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_a\",\"type\":\"address[2]\"}],\"name\":\"setAddressArray\",\"outputs\":[{\"name\":\"\",\"type\":\"address[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_l\",\"type\":\"bytes[]\"}],\"name\":\"setBytesList\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u8\",\"type\":\"uint8\"}],\"name\":\"setUint8\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u32\",\"type\":\"uint32\"}],\"name\":\"setUint32\",\"outputs\":[{\"name\":\"\",\"type\":\"uint32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b\",\"type\":\"bool\"}],\"name\":\"setBool\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_l\",\"type\":\"int256[]\"}],\"name\":\"setInt256List\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_a\",\"type\":\"uint256[2]\"}],\"name\":\"setUint256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b_l\",\"type\":\"bool[]\"}],\"name\":\"setBoolList\",\"outputs\":[{\"name\":\"\",\"type\":\"bool[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs_a\",\"type\":\"bytes[2]\"}],\"name\":\"setBytesArray\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a_l\",\"type\":\"address[]\"}],\"name\":\"setAddressList\",\"outputs\":[{\"name\":\"\",\"type\":\"address[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256_a\",\"type\":\"int256[2]\"}],\"name\":\"setInt256Array\",\"outputs\":[{\"name\":\"\",\"type\":\"int256[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_a\",\"type\":\"string[2]\"}],\"name\":\"setStringArray\",\"outputs\":[{\"name\":\"\",\"type\":\"string[2]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s\",\"type\":\"string\"}],\"name\":\"setString\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u64\",\"type\":\"uint64\"}],\"name\":\"setUint64\",\"outputs\":[{\"name\":\"\",\"type\":\"uint64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i16\",\"type\":\"int16\"}],\"name\":\"setInt16\",\"outputs\":[{\"name\":\"\",\"type\":\"int16\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i8\",\"type\":\"int8\"}],\"name\":\"setInt8\",\"outputs\":[{\"name\":\"\",\"type\":\"int8\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256_l\",\"type\":\"uint256[]\"}],\"name\":\"setUint256List\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i256\",\"type\":\"int256\"}],\"name\":\"setInt256\",\"outputs\":[{\"name\":\"\",\"type\":\"int256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i32\",\"type\":\"int32\"}],\"name\":\"setInt32\",\"outputs\":[{\"name\":\"\",\"type\":\"int32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b32\",\"type\":\"bytes32\"}],\"name\":\"setBytes32\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"s_l\",\"type\":\"string[]\"}],\"name\":\"setStringList\",\"outputs\":[{\"name\":\"\",\"type\":\"string[]\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"u256\",\"type\":\"uint256\"}],\"name\":\"setUint256\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"bs\",\"type\":\"bytes\"}],\"name\":\"setBytes\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"a\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"i64\",\"type\":\"int64\"}],\"name\":\"setInt64\",\"outputs\":[{\"name\":\"\",\"type\":\"int64\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"b1\",\"type\":\"bytes1\"}],\"name\":\"setBytes1\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes1\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"; - - // BYTECODE is the compiled bytecode used for deploying new contracts. - public final static String BYTECODE = "0x608060405234801561001057600080fd5b5061265a806100206000396000f3fe608060405234801561001057600080fd5b50600436106101e1576000357c0100000000000000000000000000000000000000000000000000000000900480637fcaf66611610116578063c2b12a73116100b4578063da359dc81161008e578063da359dc814610666578063e30081a014610696578063e673eb32146106c6578063fba1a1c3146106f6576101e1565b8063c2b12a73146105d6578063c577796114610606578063d2282dc514610636576101e1565b80639a19a953116100f05780639a19a95314610516578063a0709e1914610546578063a53b1c1e14610576578063b7d5df31146105a6576101e1565b80637fcaf66614610486578063822cba69146104b657806386114cea146104e6576101e1565b806322722302116101835780635119655d1161015d5780635119655d146103c65780635be6b37e146103f65780636aa482fc146104265780637173b69514610456576101e1565b806322722302146103365780632766a755146103665780634d5ee6da14610396576101e1565b806316c105e2116101bf57806316c105e2146102765780631774e646146102a65780631c9352e2146102d65780631e26fd3314610306576101e1565b80630477988a146101e6578063118a971814610216578063151f547114610246575b600080fd5b61020060048036036101fb9190810190611599565b610726565b60405161020d9190611f01565b60405180910390f35b610230600480360361022b919081019061118d565b61072d565b60405161023d9190611ca6565b60405180910390f35b610260600480360361025b9190810190611123565b61073a565b60405161026d9190611c69565b60405180910390f35b610290600480360361028b9190810190611238565b610747565b60405161029d9190611d05565b60405180910390f35b6102c060048036036102bb919081019061163d565b61074e565b6040516102cd9190611f6d565b60405180910390f35b6102f060048036036102eb91908101906115eb565b610755565b6040516102fd9190611f37565b60405180910390f35b610320600480360361031b91908101906113cf565b61075c565b60405161032d9190611de5565b60405180910390f35b610350600480360361034b91908101906112a2565b610763565b60405161035d9190611d42565b60405180910390f35b610380600480360361037b9190810190611365565b61076a565b60405161038d9190611da8565b60405180910390f35b6103b060048036036103ab91908101906111b6565b610777565b6040516103bd9190611cc1565b60405180910390f35b6103e060048036036103db91908101906111f7565b61077e565b6040516103ed9190611ce3565b60405180910390f35b610410600480360361040b919081019061114c565b61078b565b60405161041d9190611c84565b60405180910390f35b610440600480360361043b9190810190611279565b610792565b60405161044d9190611d27565b60405180910390f35b610470600480360361046b91908101906112e3565b61079f565b60405161047d9190611d64565b60405180910390f35b6104a0600480360361049b9190810190611558565b6107ac565b6040516104ad9190611edf565b60405180910390f35b6104d060048036036104cb9190810190611614565b6107b3565b6040516104dd9190611f52565b60405180910390f35b61050060048036036104fb919081019061148b565b6107ba565b60405161050d9190611e58565b60405180910390f35b610530600480360361052b919081019061152f565b6107c1565b60405161053d9190611ec4565b60405180910390f35b610560600480360361055b919081019061138e565b6107c8565b60405161056d9190611dc3565b60405180910390f35b610590600480360361058b91908101906114b4565b6107cf565b60405161059d9190611e73565b60405180910390f35b6105c060048036036105bb91908101906114dd565b6107d6565b6040516105cd9190611e8e565b60405180910390f35b6105f060048036036105eb9190810190611421565b6107dd565b6040516105fd9190611e1b565b60405180910390f35b610620600480360361061b9190810190611324565b6107e4565b60405161062d9190611d86565b60405180910390f35b610650600480360361064b91908101906115c2565b6107eb565b60405161065d9190611f1c565b60405180910390f35b610680600480360361067b919081019061144a565b6107f2565b60405161068d9190611e36565b60405180910390f35b6106b060048036036106ab91908101906110fa565b6107f9565b6040516106bd9190611c4e565b60405180910390f35b6106e060048036036106db9190810190611506565b610800565b6040516106ed9190611ea9565b60405180910390f35b610710600480360361070b91908101906113f8565b610807565b60405161071d9190611e00565b60405180910390f35b6000919050565b61073561080e565b919050565b610742610830565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b610772610852565b919050565b6060919050565b610786610874565b919050565b6060919050565b61079a61089b565b919050565b6107a76108bd565b919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6060919050565b6000919050565b6060919050565b6000919050565b6000919050565b6000919050565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108835790505090565b6040805190810160405280600290602082028038833980820191505090505090565b60408051908101604052806002905b60608152602001906001900390816108cc5790505090565b60006108f082356124f2565b905092915050565b600082601f830112151561090b57600080fd5b600261091e61091982611fb5565b611f88565b9150818385602084028201111561093457600080fd5b60005b83811015610964578161094a88826108e4565b845260208401935060208301925050600181019050610937565b5050505092915050565b600082601f830112151561098157600080fd5b813561099461098f82611fd7565b611f88565b915081818352602084019350602081019050838560208402820111156109b957600080fd5b60005b838110156109e957816109cf88826108e4565b8452602084019350602083019250506001810190506109bc565b5050505092915050565b600082601f8301121515610a0657600080fd5b6002610a19610a1482611fff565b611f88565b91508183856020840282011115610a2f57600080fd5b60005b83811015610a5f5781610a458882610e9e565b845260208401935060208301925050600181019050610a32565b5050505092915050565b600082601f8301121515610a7c57600080fd5b8135610a8f610a8a82612021565b611f88565b91508181835260208401935060208101905083856020840282011115610ab457600080fd5b60005b83811015610ae45781610aca8882610e9e565b845260208401935060208301925050600181019050610ab7565b5050505092915050565b600082601f8301121515610b0157600080fd5b6002610b14610b0f82612049565b611f88565b9150818360005b83811015610b4b5781358601610b318882610eda565b845260208401935060208301925050600181019050610b1b565b5050505092915050565b600082601f8301121515610b6857600080fd5b8135610b7b610b768261206b565b611f88565b9150818183526020840193506020810190508360005b83811015610bc15781358601610ba78882610eda565b845260208401935060208301925050600181019050610b91565b5050505092915050565b600082601f8301121515610bde57600080fd5b6002610bf1610bec82612093565b611f88565b91508183856020840282011115610c0757600080fd5b60005b83811015610c375781610c1d8882610f9a565b845260208401935060208301925050600181019050610c0a565b5050505092915050565b600082601f8301121515610c5457600080fd5b8135610c67610c62826120b5565b611f88565b91508181835260208401935060208101905083856020840282011115610c8c57600080fd5b60005b83811015610cbc5781610ca28882610f9a565b845260208401935060208301925050600181019050610c8f565b5050505092915050565b600082601f8301121515610cd957600080fd5b6002610cec610ce7826120dd565b611f88565b9150818360005b83811015610d235781358601610d098882610fea565b845260208401935060208301925050600181019050610cf3565b5050505092915050565b600082601f8301121515610d4057600080fd5b8135610d53610d4e826120ff565b611f88565b9150818183526020840193506020810190508360005b83811015610d995781358601610d7f8882610fea565b845260208401935060208301925050600181019050610d69565b5050505092915050565b600082601f8301121515610db657600080fd5b6002610dc9610dc482612127565b611f88565b91508183856020840282011115610ddf57600080fd5b60005b83811015610e0f5781610df588826110aa565b845260208401935060208301925050600181019050610de2565b5050505092915050565b600082601f8301121515610e2c57600080fd5b8135610e3f610e3a82612149565b611f88565b91508181835260208401935060208101905083856020840282011115610e6457600080fd5b60005b83811015610e945781610e7a88826110aa565b845260208401935060208301925050600181019050610e67565b5050505092915050565b6000610eaa8235612504565b905092915050565b6000610ebe8235612510565b905092915050565b6000610ed2823561253c565b905092915050565b600082601f8301121515610eed57600080fd5b8135610f00610efb82612171565b611f88565b91508082526020830160208301858383011115610f1c57600080fd5b610f278382846125cd565b50505092915050565b600082601f8301121515610f4357600080fd5b8135610f56610f518261219d565b611f88565b91508082526020830160208301858383011115610f7257600080fd5b610f7d8382846125cd565b50505092915050565b6000610f928235612546565b905092915050565b6000610fa68235612553565b905092915050565b6000610fba823561255d565b905092915050565b6000610fce823561256a565b905092915050565b6000610fe28235612577565b905092915050565b600082601f8301121515610ffd57600080fd5b813561101061100b826121c9565b611f88565b9150808252602083016020830185838301111561102c57600080fd5b6110378382846125cd565b50505092915050565b600082601f830112151561105357600080fd5b8135611066611061826121f5565b611f88565b9150808252602083016020830185838301111561108257600080fd5b61108d8382846125cd565b50505092915050565b60006110a28235612584565b905092915050565b60006110b68235612592565b905092915050565b60006110ca823561259c565b905092915050565b60006110de82356125ac565b905092915050565b60006110f282356125c0565b905092915050565b60006020828403121561110c57600080fd5b600061111a848285016108e4565b91505092915050565b60006040828403121561113557600080fd5b6000611143848285016108f8565b91505092915050565b60006020828403121561115e57600080fd5b600082013567ffffffffffffffff81111561117857600080fd5b6111848482850161096e565b91505092915050565b60006040828403121561119f57600080fd5b60006111ad848285016109f3565b91505092915050565b6000602082840312156111c857600080fd5b600082013567ffffffffffffffff8111156111e257600080fd5b6111ee84828501610a69565b91505092915050565b60006020828403121561120957600080fd5b600082013567ffffffffffffffff81111561122357600080fd5b61122f84828501610aee565b91505092915050565b60006020828403121561124a57600080fd5b600082013567ffffffffffffffff81111561126457600080fd5b61127084828501610b55565b91505092915050565b60006040828403121561128b57600080fd5b600061129984828501610bcb565b91505092915050565b6000602082840312156112b457600080fd5b600082013567ffffffffffffffff8111156112ce57600080fd5b6112da84828501610c41565b91505092915050565b6000602082840312156112f557600080fd5b600082013567ffffffffffffffff81111561130f57600080fd5b61131b84828501610cc6565b91505092915050565b60006020828403121561133657600080fd5b600082013567ffffffffffffffff81111561135057600080fd5b61135c84828501610d2d565b91505092915050565b60006040828403121561137757600080fd5b600061138584828501610da3565b91505092915050565b6000602082840312156113a057600080fd5b600082013567ffffffffffffffff8111156113ba57600080fd5b6113c684828501610e19565b91505092915050565b6000602082840312156113e157600080fd5b60006113ef84828501610e9e565b91505092915050565b60006020828403121561140a57600080fd5b600061141884828501610eb2565b91505092915050565b60006020828403121561143357600080fd5b600061144184828501610ec6565b91505092915050565b60006020828403121561145c57600080fd5b600082013567ffffffffffffffff81111561147657600080fd5b61148284828501610f30565b91505092915050565b60006020828403121561149d57600080fd5b60006114ab84828501610f86565b91505092915050565b6000602082840312156114c657600080fd5b60006114d484828501610f9a565b91505092915050565b6000602082840312156114ef57600080fd5b60006114fd84828501610fae565b91505092915050565b60006020828403121561151857600080fd5b600061152684828501610fc2565b91505092915050565b60006020828403121561154157600080fd5b600061154f84828501610fd6565b91505092915050565b60006020828403121561156a57600080fd5b600082013567ffffffffffffffff81111561158457600080fd5b61159084828501611040565b91505092915050565b6000602082840312156115ab57600080fd5b60006115b984828501611096565b91505092915050565b6000602082840312156115d457600080fd5b60006115e2848285016110aa565b91505092915050565b6000602082840312156115fd57600080fd5b600061160b848285016110be565b91505092915050565b60006020828403121561162657600080fd5b6000611634848285016110d2565b91505092915050565b60006020828403121561164f57600080fd5b600061165d848285016110e6565b91505092915050565b61166f816123f7565b82525050565b61167e816122ab565b61168782612221565b60005b828110156116b95761169d858351611666565b6116a68261235b565b915060208501945060018101905061168a565b5050505050565b60006116cb826122b6565b8084526020840193506116dd8361222b565b60005b8281101561170f576116f3868351611666565b6116fc82612368565b91506020860195506001810190506116e0565b50849250505092915050565b611724816122c1565b61172d82612238565b60005b8281101561175f57611743858351611ab3565b61174c82612375565b9150602085019450600181019050611730565b5050505050565b6000611771826122cc565b80845260208401935061178383612242565b60005b828110156117b557611799868351611ab3565b6117a282612382565b9150602086019550600181019050611786565b50849250505092915050565b60006117cc826122d7565b836020820285016117dc8561224f565b60005b848110156118155783830388526117f7838351611b16565b92506118028261238f565b91506020880197506001810190506117df565b508196508694505050505092915050565b6000611831826122e2565b8084526020840193508360208202850161184a85612259565b60005b84811015611883578383038852611865838351611b16565b92506118708261239c565b915060208801975060018101905061184d565b508196508694505050505092915050565b61189d816122ed565b6118a682612266565b60005b828110156118d8576118bc858351611b5b565b6118c5826123a9565b91506020850194506001810190506118a9565b5050505050565b60006118ea826122f8565b8084526020840193506118fc83612270565b60005b8281101561192e57611912868351611b5b565b61191b826123b6565b91506020860195506001810190506118ff565b50849250505092915050565b600061194582612303565b836020820285016119558561227d565b60005b8481101561198e578383038852611970838351611bcd565b925061197b826123c3565b9150602088019750600181019050611958565b508196508694505050505092915050565b60006119aa8261230e565b808452602084019350836020820285016119c385612287565b60005b848110156119fc5783830388526119de838351611bcd565b92506119e9826123d0565b91506020880197506001810190506119c6565b508196508694505050505092915050565b611a1681612319565b611a1f82612294565b60005b82811015611a5157611a35858351611c12565b611a3e826123dd565b9150602085019450600181019050611a22565b5050505050565b6000611a6382612324565b808452602084019350611a758361229e565b60005b82811015611aa757611a8b868351611c12565b611a94826123ea565b9150602086019550600181019050611a78565b50849250505092915050565b611abc81612409565b82525050565b611acb81612415565b82525050565b611ada81612441565b82525050565b6000611aeb8261233a565b808452611aff8160208601602086016125dc565b611b088161260f565b602085010191505092915050565b6000611b218261232f565b808452611b358160208601602086016125dc565b611b3e8161260f565b602085010191505092915050565b611b558161244b565b82525050565b611b6481612458565b82525050565b611b7381612462565b82525050565b611b828161246f565b82525050565b611b918161247c565b82525050565b6000611ba282612350565b808452611bb68160208601602086016125dc565b611bbf8161260f565b602085010191505092915050565b6000611bd882612345565b808452611bec8160208601602086016125dc565b611bf58161260f565b602085010191505092915050565b611c0c81612489565b82525050565b611c1b816124b7565b82525050565b611c2a816124c1565b82525050565b611c39816124d1565b82525050565b611c48816124e5565b82525050565b6000602082019050611c636000830184611666565b92915050565b6000604082019050611c7e6000830184611675565b92915050565b60006020820190508181036000830152611c9e81846116c0565b905092915050565b6000604082019050611cbb600083018461171b565b92915050565b60006020820190508181036000830152611cdb8184611766565b905092915050565b60006020820190508181036000830152611cfd81846117c1565b905092915050565b60006020820190508181036000830152611d1f8184611826565b905092915050565b6000604082019050611d3c6000830184611894565b92915050565b60006020820190508181036000830152611d5c81846118df565b905092915050565b60006020820190508181036000830152611d7e818461193a565b905092915050565b60006020820190508181036000830152611da0818461199f565b905092915050565b6000604082019050611dbd6000830184611a0d565b92915050565b60006020820190508181036000830152611ddd8184611a58565b905092915050565b6000602082019050611dfa6000830184611ab3565b92915050565b6000602082019050611e156000830184611ac2565b92915050565b6000602082019050611e306000830184611ad1565b92915050565b60006020820190508181036000830152611e508184611ae0565b905092915050565b6000602082019050611e6d6000830184611b4c565b92915050565b6000602082019050611e886000830184611b5b565b92915050565b6000602082019050611ea36000830184611b6a565b92915050565b6000602082019050611ebe6000830184611b79565b92915050565b6000602082019050611ed96000830184611b88565b92915050565b60006020820190508181036000830152611ef98184611b97565b905092915050565b6000602082019050611f166000830184611c03565b92915050565b6000602082019050611f316000830184611c12565b92915050565b6000602082019050611f4c6000830184611c21565b92915050565b6000602082019050611f676000830184611c30565b92915050565b6000602082019050611f826000830184611c3f565b92915050565b6000604051905081810181811067ffffffffffffffff82111715611fab57600080fd5b8060405250919050565b600067ffffffffffffffff821115611fcc57600080fd5b602082029050919050565b600067ffffffffffffffff821115611fee57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561201657600080fd5b602082029050919050565b600067ffffffffffffffff82111561203857600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561206057600080fd5b602082029050919050565b600067ffffffffffffffff82111561208257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120aa57600080fd5b602082029050919050565b600067ffffffffffffffff8211156120cc57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff8211156120f457600080fd5b602082029050919050565b600067ffffffffffffffff82111561211657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561213e57600080fd5b602082029050919050565b600067ffffffffffffffff82111561216057600080fd5b602082029050602081019050919050565b600067ffffffffffffffff82111561218857600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121b457600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff8211156121e057600080fd5b601f19601f8301169050602081019050919050565b600067ffffffffffffffff82111561220c57600080fd5b601f19601f8301169050602081019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b6000819050919050565b6000602082019050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600061240282612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b60006124fd82612497565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b60008160010b9050919050565b6000819050919050565b60008160030b9050919050565b60008160070b9050919050565b60008160000b9050919050565b600061ffff82169050919050565b6000819050919050565b600063ffffffff82169050919050565b600067ffffffffffffffff82169050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b838110156125fa5780820151818401526020810190506125df565b83811115612609576000848401525b50505050565b6000601f19601f830116905091905056fea265627a7a723058206fe37171cf1b10ebd291cfdca61d67e7fc3c208795e999c833c42a14d86cf00d6c6578706572696d656e74616cf50037"; - - // deploy deploys a new Ethereum contract, binding an instance of Test to it. - public static Test deploy(TransactOpts auth, EthereumClient client) throws Exception { - Interfaces args = Geth.newInterfaces(0); - String bytecode = BYTECODE; - return new Test(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); - } - - // Internal constructor used by contract deployment. - private Test(BoundContract deployment) { - this.Address = deployment.getAddress(); - this.Deployer = deployment.getDeployer(); - this.Contract = deployment; - } - - // Ethereum address where this contract is located at. - public final Address Address; - - // Ethereum transaction in which this contract was deployed (if known!). - public final Transaction Deployer; - - // Contract instance bound to a blockchain address. - private final BoundContract Contract; - - // Creates a new instance of Test, bound to a specific deployed contract. - public Test(Address address, EthereumClient client) throws Exception { - this(Geth.bindContract(address, ABI, client)); - } - - // setAddress is a paid mutator transaction binding the contract method 0xe30081a0. - // - // Solidity: function setAddress(address a) returns(address) - public Transaction setAddress(TransactOpts opts, Address a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddress(a);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddress" , args); - } - - // setAddressArray is a paid mutator transaction binding the contract method 0x151f5471. - // - // Solidity: function setAddressArray(address[2] a_a) returns(address[2]) - public Transaction setAddressArray(TransactOpts opts, Addresses a_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddresses(a_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddressArray" , args); - } - - // setAddressList is a paid mutator transaction binding the contract method 0x5be6b37e. - // - // Solidity: function setAddressList(address[] a_l) returns(address[]) - public Transaction setAddressList(TransactOpts opts, Addresses a_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setAddresses(a_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setAddressList" , args); - } - - // setBool is a paid mutator transaction binding the contract method 0x1e26fd33. - // - // Solidity: function setBool(bool b) returns(bool) - public Transaction setBool(TransactOpts opts, boolean b) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBool(b);args.set(0,arg0); - - return this.Contract.transact(opts, "setBool" , args); - } - - // setBoolArray is a paid mutator transaction binding the contract method 0x118a9718. - // - // Solidity: function setBoolArray(bool[2] b_a) returns(bool[2]) - public Transaction setBoolArray(TransactOpts opts, Bools b_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBools(b_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setBoolArray" , args); - } - - // setBoolList is a paid mutator transaction binding the contract method 0x4d5ee6da. - // - // Solidity: function setBoolList(bool[] b_l) returns(bool[]) - public Transaction setBoolList(TransactOpts opts, Bools b_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBools(b_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setBoolList" , args); - } - - // setBytes is a paid mutator transaction binding the contract method 0xda359dc8. - // - // Solidity: function setBytes(bytes bs) returns(bytes) - public Transaction setBytes(TransactOpts opts, byte[] bs) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(bs);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes" , args); - } - - // setBytes1 is a paid mutator transaction binding the contract method 0xfba1a1c3. - // - // Solidity: function setBytes1(bytes1 b1) returns(bytes1) - public Transaction setBytes1(TransactOpts opts, byte[] b1) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(b1);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes1" , args); - } - - // setBytes32 is a paid mutator transaction binding the contract method 0xc2b12a73. - // - // Solidity: function setBytes32(bytes32 b32) returns(bytes32) - public Transaction setBytes32(TransactOpts opts, byte[] b32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinary(b32);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytes32" , args); - } - - // setBytesArray is a paid mutator transaction binding the contract method 0x5119655d. - // - // Solidity: function setBytesArray(bytes[2] bs_a) returns(bytes[2]) - public Transaction setBytesArray(TransactOpts opts, Binaries bs_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytesArray" , args); - } - - // setBytesList is a paid mutator transaction binding the contract method 0x16c105e2. - // - // Solidity: function setBytesList(bytes[] bs_l) returns(bytes[]) - public Transaction setBytesList(TransactOpts opts, Binaries bs_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBinaries(bs_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setBytesList" , args); - } - - // setInt16 is a paid mutator transaction binding the contract method 0x86114cea. - // - // Solidity: function setInt16(int16 i16) returns(int16) - public Transaction setInt16(TransactOpts opts, short i16) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt16(i16);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt16" , args); - } - - // setInt256 is a paid mutator transaction binding the contract method 0xa53b1c1e. - // - // Solidity: function setInt256(int256 i256) returns(int256) - public Transaction setInt256(TransactOpts opts, BigInt i256) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInt(i256);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256" , args); - } - - // setInt256Array is a paid mutator transaction binding the contract method 0x6aa482fc. - // - // Solidity: function setInt256Array(int256[2] i256_a) returns(int256[2]) - public Transaction setInt256Array(TransactOpts opts, BigInts i256_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256Array" , args); - } - - // setInt256List is a paid mutator transaction binding the contract method 0x22722302. - // - // Solidity: function setInt256List(int256[] i256_l) returns(int256[]) - public Transaction setInt256List(TransactOpts opts, BigInts i256_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(i256_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt256List" , args); - } - - // setInt32 is a paid mutator transaction binding the contract method 0xb7d5df31. - // - // Solidity: function setInt32(int32 i32) returns(int32) - public Transaction setInt32(TransactOpts opts, int i32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt32(i32);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt32" , args); - } - - // setInt64 is a paid mutator transaction binding the contract method 0xe673eb32. - // - // Solidity: function setInt64(int64 i64) returns(int64) - public Transaction setInt64(TransactOpts opts, long i64) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt64(i64);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt64" , args); - } - - // setInt8 is a paid mutator transaction binding the contract method 0x9a19a953. - // - // Solidity: function setInt8(int8 i8) returns(int8) - public Transaction setInt8(TransactOpts opts, byte i8) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setInt8(i8);args.set(0,arg0); - - return this.Contract.transact(opts, "setInt8" , args); - } - - // setString is a paid mutator transaction binding the contract method 0x7fcaf666. - // - // Solidity: function setString(string s) returns(string) - public Transaction setString(TransactOpts opts, String s) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setString(s);args.set(0,arg0); - - return this.Contract.transact(opts, "setString" , args); - } - - // setStringArray is a paid mutator transaction binding the contract method 0x7173b695. - // - // Solidity: function setStringArray(string[2] s_a) returns(string[2]) - public Transaction setStringArray(TransactOpts opts, Strings s_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setStrings(s_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setStringArray" , args); - } - - // setStringList is a paid mutator transaction binding the contract method 0xc5777961. - // - // Solidity: function setStringList(string[] s_l) returns(string[]) - public Transaction setStringList(TransactOpts opts, Strings s_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setStrings(s_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setStringList" , args); - } - - // setUint16 is a paid mutator transaction binding the contract method 0x0477988a. - // - // Solidity: function setUint16(uint16 u16) returns(uint16) - public Transaction setUint16(TransactOpts opts, BigInt u16) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint16(u16);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint16" , args); - } - - // setUint256 is a paid mutator transaction binding the contract method 0xd2282dc5. - // - // Solidity: function setUint256(uint256 u256) returns(uint256) - public Transaction setUint256(TransactOpts opts, BigInt u256) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInt(u256);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256" , args); - } - - // setUint256Array is a paid mutator transaction binding the contract method 0x2766a755. - // - // Solidity: function setUint256Array(uint256[2] u256_a) returns(uint256[2]) - public Transaction setUint256Array(TransactOpts opts, BigInts u256_a) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_a);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256Array" , args); - } - - // setUint256List is a paid mutator transaction binding the contract method 0xa0709e19. - // - // Solidity: function setUint256List(uint256[] u256_l) returns(uint256[]) - public Transaction setUint256List(TransactOpts opts, BigInts u256_l) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setBigInts(u256_l);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint256List" , args); - } - - // setUint32 is a paid mutator transaction binding the contract method 0x1c9352e2. - // - // Solidity: function setUint32(uint32 u32) returns(uint32) - public Transaction setUint32(TransactOpts opts, BigInt u32) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint32(u32);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint32" , args); - } - - // setUint64 is a paid mutator transaction binding the contract method 0x822cba69. - // - // Solidity: function setUint64(uint64 u64) returns(uint64) - public Transaction setUint64(TransactOpts opts, BigInt u64) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint64(u64);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint64" , args); - } - - // setUint8 is a paid mutator transaction binding the contract method 0x1774e646. - // - // Solidity: function setUint8(uint8 u8) returns(uint8) - public Transaction setUint8(TransactOpts opts, BigInt u8) throws Exception { - Interfaces args = Geth.newInterfaces(1); - Interface arg0 = Geth.newInterface();arg0.setUint8(u8);args.set(0,arg0); - - return this.Contract.transact(opts, "setUint8" , args); - } -} -`, - }, - } - for i, c := range cases { - binding, err := Bind([]string{c.name}, []string{c.abi}, []string{c.bytecode}, nil, "bindtest", LangJava, nil, nil) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - // Remove empty lines - removeEmptys := func(input string) string { - lines := strings.Split(input, "\n") - var index int - for _, line := range lines { - if strings.TrimSpace(line) != "" { - lines[index] = line - index += 1 - } - } - lines = lines[:index] - return strings.Join(lines, "\n") - } - binding = removeEmptys(binding) - expect := removeEmptys(c.expected) - if binding != expect { - t.Fatalf("test %d: generated binding mismatch, has %s, want %s", i, binding, c.expected) - } - } -} diff --git a/coreth/accounts/abi/bind/template.go b/coreth/accounts/abi/bind/template.go index df8e5fdb..22dca1e7 100644 --- a/coreth/accounts/abi/bind/template.go +++ b/coreth/accounts/abi/bind/template.go @@ -85,8 +85,7 @@ type tmplStruct struct { // tmplSource is language to template mapping containing all the supported // programming languages the package can generate to. var tmplSource = map[Lang]string{ - LangGo: tmplSourceGo, - LangJava: tmplSourceJava, + LangGo: tmplSourceGo, } // tmplSourceGo is the Go source template that the generated Go contract binding @@ -120,6 +119,7 @@ var ( _ = common.Big1 _ = types.BloomLookup _ = event.NewSubscription + _ = abi.ConvertType ) {{$structs := .Structs}} @@ -278,11 +278,11 @@ var ( // bind{{.Type}} binds a generic wrapper to an already deployed contract. func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader({{.Type}}ABI)) + parsed, err := {{.Type}}MetaData.GetAbi() if err != nil { return nil, err } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil } // Call invokes the (constant) contract method with params as input values and @@ -335,7 +335,7 @@ var ( if err != nil { return *outstruct, err } - {{range $i, $t := .Normalized.Outputs}} + {{range $i, $t := .Normalized.Outputs}} outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} return *outstruct, err @@ -345,7 +345,7 @@ var ( } {{range $i, $t := .Normalized.Outputs}} out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} - + return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err {{end}} } @@ -388,7 +388,7 @@ var ( } {{end}} - {{if .Fallback}} + {{if .Fallback}} // Fallback is a paid mutator transaction binding the contract fallback function. // // Solidity: {{.Fallback.Original.String}} @@ -402,16 +402,16 @@ var ( func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) } - + // Fallback is a paid mutator transaction binding the contract fallback function. - // + // // Solidity: {{.Fallback.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata) } {{end}} - {{if .Receive}} + {{if .Receive}} // Receive is a paid mutator transaction binding the contract receive function. // // Solidity: {{.Receive.Original.String}} @@ -425,9 +425,9 @@ var ( func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) } - + // Receive is a paid mutator transaction binding the contract receive function. - // + // // Solidity: {{.Receive.Original.String}} func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) { return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts) @@ -576,143 +576,6 @@ var ( return event, nil } - {{end}} -{{end}} -` - -// tmplSourceJava is the Java source template that the generated Java contract binding -// is based on. -const tmplSourceJava = ` -// This file is an automatically generated Java binding. Do not modify as any -// change will likely be lost upon the next re-generation! - -package {{.Package}}; - -import org.ethereum.geth.*; -import java.util.*; - -{{$structs := .Structs}} -{{range $contract := .Contracts}} -{{if not .Library}}public {{end}}class {{.Type}} { - // ABI is the input ABI used to generate the binding from. - public final static String ABI = "{{.InputABI}}"; - {{if $contract.FuncSigs}} - // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation. - public final static Map {{.Type}}FuncSigs; - static { - Hashtable temp = new Hashtable(); - {{range $strsig, $binsig := .FuncSigs}}temp.put("{{$binsig}}", "{{$strsig}}"); - {{end}} - {{.Type}}FuncSigs = Collections.unmodifiableMap(temp); - } - {{end}} - {{if .InputBin}} - // BYTECODE is the compiled bytecode used for deploying new contracts. - public final static String BYTECODE = "0x{{.InputBin}}"; - - // deploy deploys a new Ethereum contract, binding an instance of {{.Type}} to it. - public static {{.Type}} deploy(TransactOpts auth, EthereumClient client{{range .Constructor.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}}); - String bytecode = BYTECODE; - {{if .Libraries}} - - // "link" contract to dependent libraries by deploying them first. - {{range $pattern, $name := .Libraries}} - {{capitalise $name}} {{decapitalise $name}}Inst = {{capitalise $name}}.deploy(auth, client); - bytecode = bytecode.replace("__${{$pattern}}$__", {{decapitalise $name}}Inst.Address.getHex().substring(2)); - {{end}} - {{end}} - {{range $index, $element := .Constructor.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - return new {{.Type}}(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args)); - } - - // Internal constructor used by contract deployment. - private {{.Type}}(BoundContract deployment) { - this.Address = deployment.getAddress(); - this.Deployer = deployment.getDeployer(); - this.Contract = deployment; - } - {{end}} - - // Ethereum address where this contract is located at. - public final Address Address; - - // Ethereum transaction in which this contract was deployed (if known!). - public final Transaction Deployer; - - // Contract instance bound to a blockchain address. - private final BoundContract Contract; - - // Creates a new instance of {{.Type}}, bound to a specific deployed contract. - public {{.Type}}(Address address, EthereumClient client) throws Exception { - this(Geth.bindContract(address, ABI, client)); - } - - {{range .Calls}} - {{if gt (len .Normalized.Outputs) 1}} - // {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}. - public class {{capitalise .Normalized.Name}}Results { - {{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type $structs}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}}; - {{end}} - } - {{end}} - - // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}. - // - // Solidity: {{.Original.String}} - public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); - {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - - Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}}); - {{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type $structs) .Type}}(); results.set({{$index}}, result{{$index}}); - {{end}} - - if (opts == null) { - opts = Geth.newCallOpts(); - } - this.Contract.call(opts, results, "{{.Original.Name}}", args); - {{if gt (len .Normalized.Outputs) 1}} - {{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results(); - {{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type $structs) .Type}}(); - {{end}} - return result; - {{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type $structs) .Type}}();{{end}} - {{end}} - } - {{end}} - - {{range .Transacts}} - // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}. - // - // Solidity: {{.Original.String}} - public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception { - Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}}); - {{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}}); - {{end}} - return this.Contract.transact(opts, "{{.Original.Name}}" , args); - } {{end}} - - {{if .Fallback}} - // Fallback is a paid mutator transaction binding the contract fallback function. - // - // Solidity: {{.Fallback.Original.String}} - public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception { - return this.Contract.rawTransact(opts, calldata); - } - {{end}} - - {{if .Receive}} - // Receive is a paid mutator transaction binding the contract receive function. - // - // Solidity: {{.Receive.Original.String}} - public Transaction Receive(TransactOpts opts) throws Exception { - return this.Contract.rawTransact(opts, null); - } - {{end}} -} {{end}} ` diff --git a/coreth/accounts/abi/error.go b/coreth/accounts/abi/error.go index 682b4bd1..d94c2621 100644 --- a/coreth/accounts/abi/error.go +++ b/coreth/accounts/abi/error.go @@ -42,7 +42,7 @@ type Error struct { str string // Sig contains the string signature according to the ABI spec. - // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" + // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" // Please note that "int" is substitute for its canonical representation "int256" Sig string @@ -88,7 +88,7 @@ func NewError(name string, inputs Arguments) Error { } } -func (e *Error) String() string { +func (e Error) String() string { return e.str } diff --git a/coreth/accounts/abi/error_handling.go b/coreth/accounts/abi/error_handling.go index 969ab00b..a88e7da8 100644 --- a/coreth/accounts/abi/error_handling.go +++ b/coreth/accounts/abi/error_handling.go @@ -33,7 +33,15 @@ import ( ) var ( - errBadBool = errors.New("abi: improperly encoded boolean value") + errBadBool = errors.New("abi: improperly encoded boolean value") + errBadUint8 = errors.New("abi: improperly encoded uint8 value") + errBadUint16 = errors.New("abi: improperly encoded uint16 value") + errBadUint32 = errors.New("abi: improperly encoded uint32 value") + errBadUint64 = errors.New("abi: improperly encoded uint64 value") + errBadInt8 = errors.New("abi: improperly encoded int8 value") + errBadInt16 = errors.New("abi: improperly encoded int16 value") + errBadInt32 = errors.New("abi: improperly encoded int32 value") + errBadInt64 = errors.New("abi: improperly encoded int64 value") ) // formatSliceString formats the reflection kind with the given slice size diff --git a/coreth/accounts/abi/topics.go b/coreth/accounts/abi/topics.go index 5fe7071b..1c70fd3b 100644 --- a/coreth/accounts/abi/topics.go +++ b/coreth/accounts/abi/topics.go @@ -34,73 +34,100 @@ import ( "reflect" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" ) +// packTopic packs rule into the corresponding hash value for a log's topic +// according to the Solidity documentation: +// https://docs.soliditylang.org/en/v0.8.17/abi-spec.html#indexed-event-encoding. +func packTopic(rule interface{}) (common.Hash, error) { + var topic common.Hash + + // Try to generate the topic based on simple types + switch rule := rule.(type) { + case common.Hash: + copy(topic[:], rule[:]) + case common.Address: + copy(topic[common.HashLength-common.AddressLength:], rule[:]) + case *big.Int: + copy(topic[:], math.U256Bytes(rule)) + case bool: + if rule { + topic[common.HashLength-1] = 1 + } + case int8: + copy(topic[:], genIntType(int64(rule), 1)) + case int16: + copy(topic[:], genIntType(int64(rule), 2)) + case int32: + copy(topic[:], genIntType(int64(rule), 4)) + case int64: + copy(topic[:], genIntType(rule, 8)) + case uint8: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint16: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint32: + blob := new(big.Int).SetUint64(uint64(rule)).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case uint64: + blob := new(big.Int).SetUint64(rule).Bytes() + copy(topic[common.HashLength-len(blob):], blob) + case string: + hash := crypto.Keccak256Hash([]byte(rule)) + copy(topic[:], hash[:]) + case []byte: + hash := crypto.Keccak256Hash(rule) + copy(topic[:], hash[:]) + + default: + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert strings and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. + + // Attempt to generate the topic from funky types + val := reflect.ValueOf(rule) + switch { + // static byte array + case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: + reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) + default: + return common.Hash{}, fmt.Errorf("unsupported indexed type: %T", rule) + } + } + return topic, nil +} + +// PackTopics packs the array of filters into an array of corresponding topics +// according to the Solidity documentation. +// Note: PackTopics does not support array (fixed or dynamic-size) or struct types. +func PackTopics(filter []interface{}) ([]common.Hash, error) { + topics := make([]common.Hash, len(filter)) + for i, rule := range filter { + topic, err := packTopic(rule) + if err != nil { + return nil, err + } + topics[i] = topic + } + + return topics, nil +} + // MakeTopics converts a filter query argument list into a filter topic set. func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) { topics := make([][]common.Hash, len(query)) for i, filter := range query { for _, rule := range filter { - var topic common.Hash - - // Try to generate the topic based on simple types - switch rule := rule.(type) { - case common.Hash: - copy(topic[:], rule[:]) - case common.Address: - copy(topic[common.HashLength-common.AddressLength:], rule[:]) - case *big.Int: - blob := rule.Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case bool: - if rule { - topic[common.HashLength-1] = 1 - } - case int8: - copy(topic[:], genIntType(int64(rule), 1)) - case int16: - copy(topic[:], genIntType(int64(rule), 2)) - case int32: - copy(topic[:], genIntType(int64(rule), 4)) - case int64: - copy(topic[:], genIntType(rule, 8)) - case uint8: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint16: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint32: - blob := new(big.Int).SetUint64(uint64(rule)).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case uint64: - blob := new(big.Int).SetUint64(rule).Bytes() - copy(topic[common.HashLength-len(blob):], blob) - case string: - hash := crypto.Keccak256Hash([]byte(rule)) - copy(topic[:], hash[:]) - case []byte: - hash := crypto.Keccak256Hash(rule) - copy(topic[:], hash[:]) - - default: - // todo(rjl493456442) according solidity documentation, indexed event - // parameters that are not value types i.e. arrays and structs are not - // stored directly but instead a keccak256-hash of an encoding is stored. - // - // We only convert stringS and bytes to hash, still need to deal with - // array(both fixed-size and dynamic-size) and struct. - - // Attempt to generate the topic from funky types - val := reflect.ValueOf(rule) - switch { - // static byte array - case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: - reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) - default: - return nil, fmt.Errorf("unsupported indexed type: %T", rule) - } + topic, err := packTopic(rule) + if err != nil { + return nil, err } topics[i] = append(topics[i], topic) } diff --git a/coreth/accounts/abi/topics_test.go b/coreth/accounts/abi/topics_test.go index 77e9020f..6a7d3f04 100644 --- a/coreth/accounts/abi/topics_test.go +++ b/coreth/accounts/abi/topics_test.go @@ -27,6 +27,7 @@ package abi import ( + "math" "math/big" "reflect" "testing" @@ -35,6 +36,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +var MaxHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + func TestMakeTopics(t *testing.T) { type args struct { query [][]interface{} @@ -64,9 +67,27 @@ func TestMakeTopics(t *testing.T) { false, }, { - "support *big.Int types in topics", - args{[][]interface{}{{big.NewInt(1).Lsh(big.NewInt(2), 254)}}}, - [][]common.Hash{{common.Hash{128}}}, + "support positive *big.Int types in topics", + args{[][]interface{}{ + {big.NewInt(1)}, + {big.NewInt(1).Lsh(big.NewInt(2), 254)}, + }}, + [][]common.Hash{ + {common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001")}, + {common.Hash{128}}, + }, + false, + }, + { + "support negative *big.Int types in topics", + args{[][]interface{}{ + {big.NewInt(-1)}, + {big.NewInt(math.MinInt64)}, + }}, + [][]common.Hash{ + {MaxHash}, + {common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")}, + }, false, }, { diff --git a/coreth/accounts/abi/type.go b/coreth/accounts/abi/type.go index 8b877e8b..f7dc5e6a 100644 --- a/coreth/accounts/abi/type.go +++ b/coreth/accounts/abi/type.go @@ -164,6 +164,9 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty if varSize == 0 { typ.T = BytesTy } else { + if varSize > 32 { + return Type{}, fmt.Errorf("unsupported arg type: %s", t) + } typ.T = FixedBytesTy typ.Size = varSize } @@ -186,9 +189,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty return Type{}, errors.New("abi: purely anonymous or underscored field is not supported") } fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] }) - if err != nil { - return Type{}, err - } + used[fieldName] = true if !isValidFieldName(fieldName) { return Type{}, fmt.Errorf("field %d has invalid name", idx) diff --git a/coreth/accounts/abi/type_test.go b/coreth/accounts/abi/type_test.go index 5d946ae1..7c3e5080 100644 --- a/coreth/accounts/abi/type_test.go +++ b/coreth/accounts/abi/type_test.go @@ -376,3 +376,10 @@ func TestGetTypeSize(t *testing.T) { } } } + +func TestNewFixedBytesOver32(t *testing.T) { + _, err := NewType("bytes4096", "", nil) + if err == nil { + t.Errorf("fixed bytes with size over 32 is not spec'd") + } +} diff --git a/coreth/accounts/abi/unpack.go b/coreth/accounts/abi/unpack.go index 0bab50f7..bc57d71d 100644 --- a/coreth/accounts/abi/unpack.go +++ b/coreth/accounts/abi/unpack.go @@ -29,6 +29,7 @@ package abi import ( "encoding/binary" "fmt" + "math" "math/big" "reflect" @@ -43,43 +44,72 @@ var ( ) // ReadInteger reads the integer based on its kind and returns the appropriate value. -func ReadInteger(typ Type, b []byte) interface{} { +func ReadInteger(typ Type, b []byte) (interface{}, error) { + ret := new(big.Int).SetBytes(b) + if typ.T == UintTy { + u64, isu64 := ret.Uint64(), ret.IsUint64() switch typ.Size { case 8: - return b[len(b)-1] + if !isu64 || u64 > math.MaxUint8 { + return nil, errBadUint8 + } + return byte(u64), nil case 16: - return binary.BigEndian.Uint16(b[len(b)-2:]) + if !isu64 || u64 > math.MaxUint16 { + return nil, errBadUint16 + } + return uint16(u64), nil case 32: - return binary.BigEndian.Uint32(b[len(b)-4:]) + if !isu64 || u64 > math.MaxUint32 { + return nil, errBadUint32 + } + return uint32(u64), nil case 64: - return binary.BigEndian.Uint64(b[len(b)-8:]) + if !isu64 { + return nil, errBadUint64 + } + return u64, nil default: // the only case left for unsigned integer is uint256. - return new(big.Int).SetBytes(b) + return ret, nil } } + + // big.SetBytes can't tell if a number is negative or positive in itself. + // On EVM, if the returned number > max int256, it is negative. + // A number is > max int256 if the bit at position 255 is set. + if ret.Bit(255) == 1 { + ret.Add(MaxUint256, new(big.Int).Neg(ret)) + ret.Add(ret, common.Big1) + ret.Neg(ret) + } + i64, isi64 := ret.Int64(), ret.IsInt64() switch typ.Size { case 8: - return int8(b[len(b)-1]) + if !isi64 || i64 < math.MinInt8 || i64 > math.MaxInt8 { + return nil, errBadInt8 + } + return int8(i64), nil case 16: - return int16(binary.BigEndian.Uint16(b[len(b)-2:])) + if !isi64 || i64 < math.MinInt16 || i64 > math.MaxInt16 { + return nil, errBadInt16 + } + return int16(i64), nil case 32: - return int32(binary.BigEndian.Uint32(b[len(b)-4:])) + if !isi64 || i64 < math.MinInt32 || i64 > math.MaxInt32 { + return nil, errBadInt32 + } + return int32(i64), nil case 64: - return int64(binary.BigEndian.Uint64(b[len(b)-8:])) + if !isi64 { + return nil, errBadInt64 + } + return i64, nil default: // the only case left for integer is int256 - // big.SetBytes can't tell if a number is negative or positive in itself. - // On EVM, if the returned number > max int256, it is negative. - // A number is > max int256 if the bit at position 255 is set. - ret := new(big.Int).SetBytes(b) - if ret.Bit(255) == 1 { - ret.Add(MaxUint256, new(big.Int).Neg(ret)) - ret.Add(ret, common.Big1) - ret.Neg(ret) - } - return ret + + return ret, nil } } @@ -133,7 +163,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size) } if start+32*size > len(output) { - return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size) + return nil, fmt.Errorf("abi: cannot marshal into go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size) } // this value will become our slice or our array, depending on the type @@ -244,7 +274,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) { case StringTy: // variable arrays are written at the end of the return bytes return string(output[begin : begin+length]), nil case IntTy, UintTy: - return ReadInteger(t, returnOutput), nil + return ReadInteger(t, returnOutput) case BoolTy: return readBool(returnOutput) case AddressTy: diff --git a/coreth/accounts/abi/unpack_test.go b/coreth/accounts/abi/unpack_test.go index e5e400ec..b2e3c258 100644 --- a/coreth/accounts/abi/unpack_test.go +++ b/coreth/accounts/abi/unpack_test.go @@ -30,6 +30,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math" "math/big" "reflect" "strconv" @@ -953,3 +954,164 @@ func TestOOMMaliciousInput(t *testing.T) { } } } + +func TestPackAndUnpackIncompatibleNumber(t *testing.T) { + var encodeABI Arguments + uint256Ty, err := NewType("uint256", "", nil) + if err != nil { + panic(err) + } + encodeABI = Arguments{ + {Type: uint256Ty}, + } + + maxU64, ok := new(big.Int).SetString(strconv.FormatUint(math.MaxUint64, 10), 10) + if !ok { + panic("bug") + } + maxU64Plus1 := new(big.Int).Add(maxU64, big.NewInt(1)) + cases := []struct { + decodeType string + inputValue *big.Int + err error + expectValue interface{} + }{ + { + decodeType: "uint8", + inputValue: big.NewInt(math.MaxUint8 + 1), + err: errBadUint8, + }, + { + decodeType: "uint8", + inputValue: big.NewInt(math.MaxUint8), + err: nil, + expectValue: uint8(math.MaxUint8), + }, + { + decodeType: "uint16", + inputValue: big.NewInt(math.MaxUint16 + 1), + err: errBadUint16, + }, + { + decodeType: "uint16", + inputValue: big.NewInt(math.MaxUint16), + err: nil, + expectValue: uint16(math.MaxUint16), + }, + { + decodeType: "uint32", + inputValue: big.NewInt(math.MaxUint32 + 1), + err: errBadUint32, + }, + { + decodeType: "uint32", + inputValue: big.NewInt(math.MaxUint32), + err: nil, + expectValue: uint32(math.MaxUint32), + }, + { + decodeType: "uint64", + inputValue: maxU64Plus1, + err: errBadUint64, + }, + { + decodeType: "uint64", + inputValue: maxU64, + err: nil, + expectValue: uint64(math.MaxUint64), + }, + { + decodeType: "uint256", + inputValue: maxU64Plus1, + err: nil, + expectValue: maxU64Plus1, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MaxInt8 + 1), + err: errBadInt8, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MinInt8 - 1), + err: errBadInt8, + }, + { + decodeType: "int8", + inputValue: big.NewInt(math.MaxInt8), + err: nil, + expectValue: int8(math.MaxInt8), + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MaxInt16 + 1), + err: errBadInt16, + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MinInt16 - 1), + err: errBadInt16, + }, + { + decodeType: "int16", + inputValue: big.NewInt(math.MaxInt16), + err: nil, + expectValue: int16(math.MaxInt16), + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MaxInt32 + 1), + err: errBadInt32, + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MinInt32 - 1), + err: errBadInt32, + }, + { + decodeType: "int32", + inputValue: big.NewInt(math.MaxInt32), + err: nil, + expectValue: int32(math.MaxInt32), + }, + { + decodeType: "int64", + inputValue: new(big.Int).Add(big.NewInt(math.MaxInt64), big.NewInt(1)), + err: errBadInt64, + }, + { + decodeType: "int64", + inputValue: new(big.Int).Sub(big.NewInt(math.MinInt64), big.NewInt(1)), + err: errBadInt64, + }, + { + decodeType: "int64", + inputValue: big.NewInt(math.MaxInt64), + err: nil, + expectValue: int64(math.MaxInt64), + }, + } + for i, testCase := range cases { + packed, err := encodeABI.Pack(testCase.inputValue) + if err != nil { + panic(err) + } + ty, err := NewType(testCase.decodeType, "", nil) + if err != nil { + panic(err) + } + decodeABI := Arguments{ + {Type: ty}, + } + decoded, err := decodeABI.Unpack(packed) + if err != testCase.err { + t.Fatalf("Expected error %v, actual error %v. case %d", testCase.err, err, i) + } + if err != nil { + continue + } + if !reflect.DeepEqual(decoded[0], testCase.expectValue) { + t.Fatalf("Expected value %v, actual value %v", testCase.expectValue, decoded[0]) + } + } +} diff --git a/coreth/accounts/keystore/account_cache.go b/coreth/accounts/keystore/account_cache.go index 4c35aa74..12b19ef5 100644 --- a/coreth/accounts/keystore/account_cache.go +++ b/coreth/accounts/keystore/account_cache.go @@ -38,7 +38,7 @@ import ( "time" "github.com/ava-labs/coreth/accounts" - mapset "github.com/deckarep/golang-set" + mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -89,7 +89,7 @@ func newAccountCache(keydir string) (*accountCache, chan struct{}) { keydir: keydir, byAddr: make(map[common.Address][]accounts.Account), notify: make(chan struct{}, 1), - fileC: fileCache{all: mapset.NewThreadUnsafeSet()}, + fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()}, } ac.watcher = newWatcher(ac) return ac, ac.notify @@ -156,6 +156,14 @@ func (ac *accountCache) deleteByFile(path string) { } } +// watcherStarted returns true if the watcher loop started running (even if it +// has since also ended). +func (ac *accountCache) watcherStarted() bool { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.watcher.running || ac.watcher.runEnded +} + func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account { for i := range slice { if slice[i] == elem { @@ -285,16 +293,15 @@ func (ac *accountCache) scanAccounts() error { // Process all the file diffs start := time.Now() - for _, p := range creates.ToSlice() { - if a := readAccount(p.(string)); a != nil { + for _, path := range creates.ToSlice() { + if a := readAccount(path); a != nil { ac.add(*a) } } - for _, p := range deletes.ToSlice() { - ac.deleteByFile(p.(string)) + for _, path := range deletes.ToSlice() { + ac.deleteByFile(path) } - for _, p := range updates.ToSlice() { - path := p.(string) + for _, path := range updates.ToSlice() { ac.deleteByFile(path) if a := readAccount(path); a != nil { ac.add(*a) diff --git a/coreth/accounts/keystore/account_cache_test.go b/coreth/accounts/keystore/account_cache_test.go index a84a37e0..38952ffe 100644 --- a/coreth/accounts/keystore/account_cache_test.go +++ b/coreth/accounts/keystore/account_cache_test.go @@ -60,18 +60,48 @@ var ( } ) -func TestWatchNewFile(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") +// waitWatcherStart waits up to 1s for the keystore watcher to start. +func waitWatcherStart(ks *KeyStore) bool { + // On systems where file watch is not supported, just return "ok". + if !ks.cache.watcher.enabled() { + return true + } + // The watcher should start, and then exit. + for t0 := time.Now(); time.Since(t0) < 1*time.Second; time.Sleep(100 * time.Millisecond) { + if ks.cache.watcherStarted() { + return true + } } + return false +} + +func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { + var list []accounts.Account + for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(200 * time.Millisecond) { + list = ks.Accounts() + if reflect.DeepEqual(list, wantAccounts) { + // ks should have also received change notifications + select { + case <-ks.changes: + default: + return fmt.Errorf("wasn't notified of new accounts") + } + return nil + } + } + return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts) +} + +func TestWatchNewFile(t *testing.T) { t.Parallel() dir, ks := tmpKeyStore(t, false) // Ensure the watcher is started before adding any files. ks.Accounts() - time.Sleep(2000 * time.Millisecond) - + if !waitWatcherStart(ks) { + t.Fatal("keystore watcher didn't start in time") + } // Move in the files. wantAccounts := make([]accounts.Account, len(cachetestAccounts)) for i := range cachetestAccounts { @@ -85,36 +115,24 @@ func TestWatchNewFile(t *testing.T) { } // ks should see the accounts. - var list []accounts.Account - for { - list = ks.Accounts() - if reflect.DeepEqual(list, wantAccounts) { - // ks should have also received change notifications - select { - case <-ks.changes: - default: - t.Fatalf("wasn't notified of new accounts") - } - return - } - time.Sleep(500 * time.Millisecond) + if err := waitForAccounts(wantAccounts, ks); err != nil { + t.Error(err) } } func TestWatchNoDir(t *testing.T) { t.Parallel() - // Create ks but not the directory that it watches. - rand.Seed(time.Now().UnixNano()) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) - list := ks.Accounts() if len(list) > 0 { t.Error("initial account list not empty:", list) } - time.Sleep(100 * time.Millisecond) - + // The watcher should start, and then exit. + if !waitWatcherStart(ks) { + t.Fatal("keystore watcher didn't start in time") + } // Create the directory and copy a key file into it. os.MkdirAll(dir, 0700) defer os.RemoveAll(dir) @@ -307,34 +325,12 @@ func TestCacheFind(t *testing.T) { } } -func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { - var list []accounts.Account - for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 { - list = ks.Accounts() - if reflect.DeepEqual(list, wantAccounts) { - // ks should have also received change notifications - select { - case <-ks.changes: - default: - return fmt.Errorf("wasn't notified of new accounts") - } - return nil - } - time.Sleep(d) - } - return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts) -} - // TestUpdatedKeyfileContents tests that updating the contents of a keystore file // is noticed by the watcher, and the account cache is updated accordingly func TestUpdatedKeyfileContents(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } t.Parallel() // Create a temporary keystore to test with - rand.Seed(time.Now().UnixNano()) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) ks := NewKeyStore(dir, LightScryptN, LightScryptP) @@ -342,8 +338,9 @@ func TestUpdatedKeyfileContents(t *testing.T) { if len(list) > 0 { t.Error("initial account list not empty:", list) } - time.Sleep(100 * time.Millisecond) - + if !waitWatcherStart(ks) { + t.Fatal("keystore watcher didn't start in time") + } // Create the directory and copy a key file into it. os.MkdirAll(dir, 0700) defer os.RemoveAll(dir) @@ -361,9 +358,8 @@ func TestUpdatedKeyfileContents(t *testing.T) { t.Error(err) return } - // needed so that modTime of `file` is different to its current value after forceCopyFile - time.Sleep(1000 * time.Millisecond) + time.Sleep(time.Second) // Now replace file contents if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil { @@ -379,7 +375,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { } // needed so that modTime of `file` is different to its current value after forceCopyFile - time.Sleep(1000 * time.Millisecond) + time.Sleep(time.Second) // Now replace file contents again if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil { @@ -395,7 +391,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { } // needed so that modTime of `file` is different to its current value after os.WriteFile - time.Sleep(1000 * time.Millisecond) + time.Sleep(time.Second) // Now replace file contents with crap if err := os.WriteFile(file, []byte("foo"), 0600); err != nil { diff --git a/coreth/accounts/keystore/file_cache.go b/coreth/accounts/keystore/file_cache.go index 24dba599..ab24b5c5 100644 --- a/coreth/accounts/keystore/file_cache.go +++ b/coreth/accounts/keystore/file_cache.go @@ -33,20 +33,20 @@ import ( "sync" "time" - mapset "github.com/deckarep/golang-set" + mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/log" ) // fileCache is a cache of files seen during scan of keystore. type fileCache struct { - all mapset.Set // Set of all files from the keystore folder - lastMod time.Time // Last time instance when a file was modified + all mapset.Set[string] // Set of all files from the keystore folder + lastMod time.Time // Last time instance when a file was modified mu sync.Mutex } // scan performs a new scan on the given directory, compares against the already // cached filenames, and returns file sets: creates, deletes, updates. -func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { +func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string], mapset.Set[string], error) { t0 := time.Now() // List all the files from the keystore folder @@ -60,8 +60,8 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er defer fc.mu.Unlock() // Iterate all the files and gather their metadata - all := mapset.NewThreadUnsafeSet() - mods := mapset.NewThreadUnsafeSet() + all := mapset.NewThreadUnsafeSet[string]() + mods := mapset.NewThreadUnsafeSet[string]() var newLastMod time.Time for _, fi := range files { diff --git a/coreth/accounts/keystore/keystore.go b/coreth/accounts/keystore/keystore.go index ff82ef88..fb72f0eb 100644 --- a/coreth/accounts/keystore/keystore.go +++ b/coreth/accounts/keystore/keystore.go @@ -508,6 +508,14 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account return a, nil } +// isUpdating returns whether the event notification loop is running. +// This method is mainly meant for tests. +func (ks *KeyStore) isUpdating() bool { + ks.mu.RLock() + defer ks.mu.RUnlock() + return ks.updating +} + // zeroKey zeroes a private key in memory. func zeroKey(k *ecdsa.PrivateKey) { b := k.D.Bits() diff --git a/coreth/accounts/keystore/keystore_test.go b/coreth/accounts/keystore/keystore_test.go index b543dfd7..2d52a4c0 100644 --- a/coreth/accounts/keystore/keystore_test.go +++ b/coreth/accounts/keystore/keystore_test.go @@ -123,6 +123,7 @@ func TestSignWithPassphrase(t *testing.T) { } func TestTimedUnlock(t *testing.T) { + t.Parallel() _, ks := tmpKeyStore(t, true) pass := "foo" @@ -157,6 +158,7 @@ func TestTimedUnlock(t *testing.T) { } func TestOverrideUnlock(t *testing.T) { + t.Parallel() _, ks := tmpKeyStore(t, false) pass := "foo" @@ -197,6 +199,7 @@ func TestOverrideUnlock(t *testing.T) { // This test should fail under -race if signing races the expiration goroutine. func TestSignRace(t *testing.T) { + t.Parallel() _, ks := tmpKeyStore(t, false) // Create a test account. @@ -221,19 +224,33 @@ func TestSignRace(t *testing.T) { t.Errorf("Account did not lock within the timeout") } +// waitForKsUpdating waits until the updating-status of the ks reaches the +// desired wantStatus. +// It waits for a maximum time of maxTime, and returns false if it does not +// finish in time +func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time.Duration) bool { + t.Helper() + // Wait max 250 ms, then return false + for t0 := time.Now(); time.Since(t0) < maxTime; { + if ks.isUpdating() == wantStatus { + return true + } + time.Sleep(25 * time.Millisecond) + } + return false +} + // Tests that the wallet notifier loop starts and stops correctly based on the // addition and removal of wallet event subscriptions. func TestWalletNotifierLifecycle(t *testing.T) { + t.Parallel() // Create a temporary keystore to test with _, ks := tmpKeyStore(t, false) // Ensure that the notification updater is not running yet time.Sleep(250 * time.Millisecond) - ks.mu.RLock() - updating := ks.updating - ks.mu.RUnlock() - if updating { + if ks.isUpdating() { t.Errorf("wallet notifier running without subscribers") } // Subscribe to the wallet feed and ensure the updater boots up @@ -243,38 +260,26 @@ func TestWalletNotifierLifecycle(t *testing.T) { for i := 0; i < len(subs); i++ { // Create a new subscription subs[i] = ks.Subscribe(updates) - - // Ensure the notifier comes online - time.Sleep(250 * time.Millisecond) - ks.mu.RLock() - updating = ks.updating - ks.mu.RUnlock() - - if !updating { + if !waitForKsUpdating(t, ks, true, 250*time.Millisecond) { t.Errorf("sub %d: wallet notifier not running after subscription", i) } } - // Unsubscribe and ensure the updater terminates eventually - for i := 0; i < len(subs); i++ { + // Close all but one sub + for i := 0; i < len(subs)-1; i++ { // Close an existing subscription subs[i].Unsubscribe() + } + // Check that it is still running + time.Sleep(250 * time.Millisecond) - // Ensure the notifier shuts down at and only at the last close - for k := 0; k < int(walletRefreshCycle/(250*time.Millisecond))+2; k++ { - ks.mu.RLock() - updating = ks.updating - ks.mu.RUnlock() - - if i < len(subs)-1 && !updating { - t.Fatalf("sub %d: event notifier stopped prematurely", i) - } - if i == len(subs)-1 && !updating { - return - } - time.Sleep(250 * time.Millisecond) - } + if !ks.isUpdating() { + t.Fatal("event notifier stopped prematurely") + } + // Unsubscribe the last one and ensure the updater terminates eventually. + subs[len(subs)-1].Unsubscribe() + if !waitForKsUpdating(t, ks, false, 4*time.Second) { + t.Errorf("wallet notifier didn't terminate after unsubscribe") } - t.Errorf("wallet notifier didn't terminate after unsubscribe") } type walletEvent struct { diff --git a/coreth/accounts/keystore/watch.go b/coreth/accounts/keystore/watch.go index 1fec0f98..e84e4c85 100644 --- a/coreth/accounts/keystore/watch.go +++ b/coreth/accounts/keystore/watch.go @@ -33,25 +33,27 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/rjeczalik/notify" + "github.com/fsnotify/fsnotify" ) type watcher struct { ac *accountCache - starting bool - running bool - ev chan notify.EventInfo + running bool // set to true when runloop begins + runEnded bool // set to true when runloop ends + starting bool // set to true prior to runloop starting quit chan struct{} } func newWatcher(ac *accountCache) *watcher { return &watcher{ ac: ac, - ev: make(chan notify.EventInfo, 10), quit: make(chan struct{}), } } +// enabled returns false on systems not supported. +func (*watcher) enabled() bool { return true } + // starts the watcher loop in the background. // Start a watcher in the background if that's not already in progress. // The caller must hold w.ac.mu. @@ -72,16 +74,24 @@ func (w *watcher) loop() { w.ac.mu.Lock() w.running = false w.starting = false + w.runEnded = true w.ac.mu.Unlock() }() logger := log.New("path", w.ac.keydir) - if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil { - logger.Trace("Failed to watch keystore folder", "err", err) + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Error("Failed to start filesystem watcher", "err", err) + return + } + defer watcher.Close() + if err := watcher.Add(w.ac.keydir); err != nil { + logger.Warn("Failed to watch keystore folder", "err", err) return } - defer notify.Stop(w.ev) - logger.Trace("Started watching keystore folder") + + logger.Trace("Started watching keystore folder", "folder", w.ac.keydir) defer logger.Trace("Stopped watching keystore folder") w.ac.mu.Lock() @@ -105,12 +115,24 @@ func (w *watcher) loop() { select { case <-w.quit: return - case <-w.ev: + case _, ok := <-watcher.Events: + if !ok { + return + } // Trigger the scan (with delay), if not already triggered if !rescanTriggered { debounce.Reset(debounceDuration) rescanTriggered = true } + // The fsnotify library does provide more granular event-info, it + // would be possible to refresh individual affected files instead + // of scheduling a full rescan. For most cases though, the + // full rescan is quick and obviously simplest. + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Info("Filsystem watcher error", "err", err) case <-debounce.C: w.ac.scanAccounts() rescanTriggered = false diff --git a/coreth/accounts/keystore/watch_fallback.go b/coreth/accounts/keystore/watch_fallback.go index 1435a23c..520ff419 100644 --- a/coreth/accounts/keystore/watch_fallback.go +++ b/coreth/accounts/keystore/watch_fallback.go @@ -32,8 +32,14 @@ package keystore -type watcher struct{ running bool } +type watcher struct { + running bool + runEnded bool +} func newWatcher(*accountCache) *watcher { return new(watcher) } func (*watcher) start() {} func (*watcher) close() {} + +// enabled returns false on systems not supported. +func (*watcher) enabled() bool { return false } diff --git a/coreth/accounts/manager.go b/coreth/accounts/manager.go index 933405b4..02ef8987 100644 --- a/coreth/accounts/manager.go +++ b/coreth/accounts/manager.go @@ -267,7 +267,7 @@ func merge(slice []Wallet, wallets ...Wallet) []Wallet { return slice } -// drop is the couterpart of merge, which looks up wallets from within the sorted +// drop is the counterpart of merge, which looks up wallets from within the sorted // cache and removes the ones specified. func drop(slice []Wallet, wallets ...Wallet) []Wallet { for _, wallet := range wallets { diff --git a/coreth/accounts/scwallet/wallet.go b/coreth/accounts/scwallet/wallet.go index a95701b6..721ea38b 100644 --- a/coreth/accounts/scwallet/wallet.go +++ b/coreth/accounts/scwallet/wallet.go @@ -109,8 +109,8 @@ const ( P1DeriveKeyFromCurrent = uint8(0x10) statusP1WalletStatus = uint8(0x00) statusP1Path = uint8(0x01) - signP1PrecomputedHash = uint8(0x01) - signP2OnlyBlock = uint8(0x81) + signP1PrecomputedHash = uint8(0x00) + signP2OnlyBlock = uint8(0x00) exportP1Any = uint8(0x00) exportP2Pubkey = uint8(0x01) ) diff --git a/coreth/cmd/abigen/main.go b/coreth/cmd/abigen/main.go index c26c7685..2f10ccdb 100644 --- a/coreth/cmd/abigen/main.go +++ b/coreth/cmd/abigen/main.go @@ -43,14 +43,6 @@ import ( "github.com/urfave/cli/v2" ) -var ( - // Git SHA1 commit hash of the release (set via linker flags) - gitCommit = "" - gitDate = "" - - app *cli.App -) - var ( // Flags needed by abigen abiFlag = &cli.StringFlag{ @@ -83,7 +75,7 @@ var ( } langFlag = &cli.StringFlag{ Name: "lang", - Usage: "Destination language for the bindings (go, java, objc)", + Usage: "Destination language for the bindings (go)", Value: "go", } aliasFlag = &cli.StringFlag{ @@ -92,8 +84,9 @@ var ( } ) +var app = flags.NewApp("Ethereum ABI wrapper code generator") + func init() { - app = flags.NewApp(gitCommit, gitDate, "ethereum checkpoint helper tool") app.Name = "abigen" app.Flags = []cli.Flag{ abiFlag, @@ -119,11 +112,6 @@ func abigen(c *cli.Context) error { switch c.String(langFlag.Name) { case "go": lang = bind.LangGo - case "java": - lang = bind.LangJava - case "objc": - lang = bind.LangObjC - utils.Fatalf("Objc binding generation is uncompleted") default: utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name)) } @@ -171,9 +159,12 @@ func abigen(c *cli.Context) error { types = append(types, kind) } else { // Generate the list of types to exclude from binding - exclude := make(map[string]bool) - for _, kind := range strings.Split(c.String(excFlag.Name), ",") { - exclude[strings.ToLower(kind)] = true + var exclude *nameFilter + if c.IsSet(excFlag.Name) { + var err error + if exclude, err = newNameFilter(strings.Split(c.String(excFlag.Name), ",")...); err != nil { + utils.Fatalf("Failed to parse excludes: %v", err) + } } var contracts map[string]*compiler.Contract @@ -198,7 +189,11 @@ func abigen(c *cli.Context) error { } // Gather all non-excluded contract for binding for name, contract := range contracts { - if exclude[strings.ToLower(name)] { + // fully qualified name is of the form : + nameParts := strings.Split(name, ":") + typeName := nameParts[len(nameParts)-1] + if exclude != nil && exclude.Matches(name) { + fmt.Fprintf(os.Stderr, "excluding: %v\n", name) continue } abi, err := json.Marshal(contract.Info.AbiDefinition) // Flatten the compiler parse @@ -208,15 +203,14 @@ func abigen(c *cli.Context) error { abis = append(abis, string(abi)) bins = append(bins, contract.Code) sigs = append(sigs, contract.Hashes) - nameParts := strings.Split(name, ":") - types = append(types, nameParts[len(nameParts)-1]) + types = append(types, typeName) // Derive the library placeholder which is a 34 character prefix of the // hex encoding of the keccak256 hash of the fully qualified library name. // Note that the fully qualified library name is the path of its source // file and the library name separated by ":". libPattern := crypto.Keccak256Hash([]byte(name)).String()[2:36] // the first 2 chars are 0x - libs[libPattern] = nameParts[len(nameParts)-1] + libs[libPattern] = typeName } } // Extract all aliases from the flags diff --git a/coreth/cmd/abigen/namefilter.go b/coreth/cmd/abigen/namefilter.go new file mode 100644 index 00000000..5fcf6e17 --- /dev/null +++ b/coreth/cmd/abigen/namefilter.go @@ -0,0 +1,67 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package main + +import ( + "fmt" + "strings" +) + +type nameFilter struct { + fulls map[string]bool // path/to/contract.sol:Type + files map[string]bool // path/to/contract.sol:* + types map[string]bool // *:Type +} + +func newNameFilter(patterns ...string) (*nameFilter, error) { + f := &nameFilter{ + fulls: make(map[string]bool), + files: make(map[string]bool), + types: make(map[string]bool), + } + for _, pattern := range patterns { + if err := f.add(pattern); err != nil { + return nil, err + } + } + return f, nil +} + +func (f *nameFilter) add(pattern string) error { + ft := strings.Split(pattern, ":") + if len(ft) != 2 { + // filenames and types must not include ':' symbol + return fmt.Errorf("invalid pattern: %s", pattern) + } + + file, typ := ft[0], ft[1] + if file == "*" { + f.types[typ] = true + return nil + } else if typ == "*" { + f.files[file] = true + return nil + } + f.fulls[pattern] = true + return nil +} + +func (f *nameFilter) Matches(name string) bool { + ft := strings.Split(name, ":") + if len(ft) != 2 { + // If contract names are always of the fully-qualified form + // :, then this case will never happen. + return false + } + + file, typ := ft[0], ft[1] + // full paths > file paths > types + return f.fulls[name] || f.files[file] || f.types[typ] +} diff --git a/coreth/cmd/abigen/namefilter_test.go b/coreth/cmd/abigen/namefilter_test.go new file mode 100644 index 00000000..051eb04e --- /dev/null +++ b/coreth/cmd/abigen/namefilter_test.go @@ -0,0 +1,47 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNameFilter(t *testing.T) { + _, err := newNameFilter("Foo") + require.Error(t, err) + _, err = newNameFilter("too/many:colons:Foo") + require.Error(t, err) + + f, err := newNameFilter("a/path:A", "*:B", "c/path:*") + require.NoError(t, err) + + for _, tt := range []struct { + name string + match bool + }{ + {"a/path:A", true}, + {"unknown/path:A", false}, + {"a/path:X", false}, + {"unknown/path:X", false}, + {"any/path:B", true}, + {"c/path:X", true}, + {"c/path:foo:B", false}, + } { + match := f.Matches(tt.name) + if tt.match { + assert.True(t, match, "expected match") + } else { + assert.False(t, match, "expected no match") + } + } +} diff --git a/coreth/consensus/dummy/README.md b/coreth/consensus/dummy/README.md index 4edff330..23ff2007 100644 --- a/coreth/consensus/dummy/README.md +++ b/coreth/consensus/dummy/README.md @@ -14,7 +14,7 @@ The dummy consensus engine is responsible for performing verification on the hea As of Apricot Phase 3, the C-Chain includes a dynamic fee algorithm based off of (EIP-1559)[https://eips.ethereum.org/EIPS/eip-1559]. This introduces a field to the block type called `BaseFee`. The Base Fee sets a minimum gas price for any transaction to be included in the block. For example, a transaction with a gas price of 49 gwei, will be invalid to include in a block with a base fee of 50 gwei. -The dynamic fee algorithm aims to adjust the base fee to handle network congestion. Coreth sets a target utilization on the network, and the dynamic fee algorithm adjusts the base fee accordingly. If the network operates above the target utilization, the dynamic fee algorithm will increase the base fee to make utilizing he network more expensive and bring overall utilization down. If the network operates below the target utilization, the dynamic fee algorithm will decrease the base fee to make it cheaper to use the network. +The dynamic fee algorithm aims to adjust the base fee to handle network congestion. Coreth sets a target utilization on the network, and the dynamic fee algorithm adjusts the base fee accordingly. If the network operates above the target utilization, the dynamic fee algorithm will increase the base fee to make utilizing the network more expensive and bring overall utilization down. If the network operates below the target utilization, the dynamic fee algorithm will decrease the base fee to make it cheaper to use the network. - EIP-1559 is intended for Ethereum where a block is produced roughly every 10s - C-Chain typically produces blocks every 2 seconds, but the dynamic fee algorithm needs to handle the case that the network quiesces and there are no blocks for a long period of time diff --git a/coreth/consensus/dummy/consensus.go b/coreth/consensus/dummy/consensus.go index 935ecdf9..f51c1980 100644 --- a/coreth/consensus/dummy/consensus.go +++ b/coreth/consensus/dummy/consensus.go @@ -10,11 +10,11 @@ import ( "math/big" "time" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/rpc" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" ) @@ -31,16 +31,14 @@ var ( errExtDataGasUsedTooLarge = errors.New("extDataGasUsed is not uint64") ) -type Mode uint - -const ( - ModeSkipHeader Mode = 1 // Skip over header verification - ModeSkipBlockFee Mode = 2 // Skip block fee verification -) +type Mode struct { + ModeSkipHeader bool + ModeSkipBlockFee bool + ModeSkipCoinbase bool +} type ( OnFinalizeAndAssembleCallbackType = func(header *types.Header, state *state.StateDB, txs []*types.Transaction) (extraData []byte, blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) - OnAPIsCallbackType = func(consensus.ChainHeaderReader) []rpc.API OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) (blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) ConsensusCallbacks struct { @@ -49,45 +47,62 @@ type ( } DummyEngine struct { - cb *ConsensusCallbacks + cb ConsensusCallbacks + clock *mockable.Clock consensusMode Mode } ) -func NewDummyEngine(cb *ConsensusCallbacks) *DummyEngine { +func NewETHFaker() *DummyEngine { return &DummyEngine{ - cb: cb, + clock: &mockable.Clock{}, + consensusMode: Mode{ModeSkipBlockFee: true}, } } -func NewETHFaker() *DummyEngine { +func NewFaker() *DummyEngine { return &DummyEngine{ - cb: new(ConsensusCallbacks), - consensusMode: ModeSkipBlockFee, + clock: &mockable.Clock{}, } } -func NewComplexETHFaker(cb *ConsensusCallbacks) *DummyEngine { +func NewFakerWithClock(cb ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { + return &DummyEngine{ + cb: cb, + clock: clock, + } +} + +func NewFakerWithCallbacks(cb ConsensusCallbacks) *DummyEngine { + return &DummyEngine{ + cb: cb, + clock: &mockable.Clock{}, + } +} + +func NewFakerWithMode(cb ConsensusCallbacks, mode Mode) *DummyEngine { return &DummyEngine{ cb: cb, - consensusMode: ModeSkipBlockFee, + clock: &mockable.Clock{}, + consensusMode: mode, } } -func NewFaker() *DummyEngine { - return NewDummyEngine(new(ConsensusCallbacks)) +func NewCoinbaseFaker() *DummyEngine { + return &DummyEngine{ + clock: &mockable.Clock{}, + consensusMode: Mode{ModeSkipCoinbase: true}, + } } func NewFullFaker() *DummyEngine { return &DummyEngine{ - cb: new(ConsensusCallbacks), - consensusMode: ModeSkipHeader, + clock: &mockable.Clock{}, + consensusMode: Mode{ModeSkipHeader: true}, } } func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, header *types.Header, parent *types.Header) error { - timestamp := new(big.Int).SetUint64(header.Time) - // Verify that the gas limit is <= 2^63-1 if header.GasLimit > params.MaxGasLimit { return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) @@ -96,24 +111,39 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade if header.GasUsed > header.GasLimit { return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - if config.IsCortina(timestamp) { + if config.IsCortina(header.Time) { if header.GasLimit != params.CortinaGasLimit { return fmt.Errorf("expected gas limit to be %d in Cortina, but found %d", params.CortinaGasLimit, header.GasLimit) } } else { if config.IsSongbirdCode() { // Verify that the gas limit is correct for the current phase - if config.IsSongbirdTransition(timestamp) { + if config.IsSongbirdTransition(header.Time) { if header.GasLimit != params.SgbTransitionGasLimit { return fmt.Errorf("expected gas limit to be %d in SgbTransition but found %d", params.SgbTransitionGasLimit, header.GasLimit) } - } else if config.IsApricotPhase5(timestamp) { + } else if config.IsApricotPhase5(header.Time) { if header.GasLimit != params.SgbApricotPhase5GasLimit { return fmt.Errorf("expected gas limit to be %d in ApricotPhase5 but found %d", params.SgbApricotPhase5GasLimit, header.GasLimit) } + } else if config.IsApricotPhase1(header.Time) { + if header.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf("expected gas limit to be %d in ApricotPhase1, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) + } + } else { + // Verify that the gas limit remains within allowed bounds + diff := int64(parent.GasLimit) - int64(header.GasLimit) + if diff < 0 { + diff *= -1 + } + limit := parent.GasLimit / params.GasLimitBoundDivisor + + if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { + return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + } } } else { - if config.IsApricotPhase1(timestamp) { + if config.IsApricotPhase1(header.Time) { if header.GasLimit != params.ApricotPhase1GasLimit { return fmt.Errorf("expected gas limit to be %d in ApricotPhase1, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) } @@ -132,7 +162,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade } } - if !config.IsApricotPhase3(timestamp) { + if !config.IsApricotPhase3(header.Time) { // Verify BaseFee is not present before AP3 if header.BaseFee != nil { return fmt.Errorf("invalid baseFee before fork: have %d, want ", header.BaseFee) @@ -144,7 +174,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade if err != nil { return fmt.Errorf("failed to calculate base fee: %w", err) } - if !bytes.Equal(expectedRollupWindowBytes, header.Extra) { + if len(header.Extra) < len(expectedRollupWindowBytes) || !bytes.Equal(expectedRollupWindowBytes, header.Extra[:len(expectedRollupWindowBytes)]) { return fmt.Errorf("expected rollup window bytes: %x, found %x", expectedRollupWindowBytes, header.Extra) } if header.BaseFee == nil { @@ -159,7 +189,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade } // Verify BlockGasCost, ExtDataGasUsed not present before AP4 - if !config.IsApricotPhase4(timestamp) { + if !config.IsApricotPhase4(header.Time) { if header.BlockGasCost != nil { return fmt.Errorf("invalid blockGasCost before fork: have %d, want ", header.BlockGasCost) } @@ -171,7 +201,7 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade // Enforce BlockGasCost constraints blockGasCostStep := ApricotPhase4BlockGasCostStep - if config.IsApricotPhase5(timestamp) { + if config.IsApricotPhase5(header.Time) { blockGasCostStep = ApricotPhase5BlockGasCostStep } var apricotPhase4TargetBlockRate uint64 @@ -210,33 +240,36 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade // modified from consensus.go func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header, uncle bool) error { - var ( - config = chain.Config() - timestamp = new(big.Int).SetUint64(header.Time) - ) + config := chain.Config() // Ensure that we do not verify an uncle if uncle { return errUnclesUnsupported } - // Ensure that the header's extra-data section is of a reasonable size - if !config.IsApricotPhase3(timestamp) { + switch { + case config.IsDurango(header.Time): + if len(header.Extra) < params.DynamicFeeExtraDataSize { + return fmt.Errorf("expected extra-data field length >= %d, found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) + } + case config.IsApricotPhase3(header.Time): + if len(header.Extra) != params.DynamicFeeExtraDataSize { + return fmt.Errorf("expected extra-data field to be: %d, but found %d", params.DynamicFeeExtraDataSize, len(header.Extra)) + } + default: if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) } - } else { - if uint64(len(header.Extra)) != params.ApricotPhase3ExtraDataSize { - return fmt.Errorf("expected extra-data field to be: %d, but found %d", params.ApricotPhase3ExtraDataSize, len(header.Extra)) - } } // Ensure gas-related header fields are correct if err := self.verifyHeaderGasFields(config, header, parent); err != nil { return err } + // Verify the header's timestamp - if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) { + if header.Time > uint64(self.clock.Time().Add(allowedFutureBlockTime).Unix()) { return consensus.ErrFutureBlock } - //if header.Time <= parent.Time { + // Verify the header's timestamp is not earlier than parent's + // it does include equality(==), so multiple blocks per second is ok if header.Time < parent.Time { return errInvalidBlockTime } @@ -244,6 +277,14 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 { return consensus.ErrInvalidNumber } + // Verify the existence / non-existence of excessDataGas + cancun := chain.Config().IsCancun(header.Time) + if cancun && header.ExcessDataGas == nil { + return errors.New("missing excessDataGas") + } + if !cancun && header.ExcessDataGas != nil { + return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas) + } return nil } @@ -253,7 +294,7 @@ func (self *DummyEngine) Author(header *types.Header) (common.Address, error) { func (self *DummyEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { // If we're running a full engine faking, accept any input as valid - if self.consensusMode == ModeSkipHeader { + if self.consensusMode.ModeSkipHeader { return nil } // Short circuit if the header is known, or it's parent not @@ -288,7 +329,7 @@ func (self *DummyEngine) verifyBlockFee( receipts []*types.Receipt, extraStateChangeContribution *big.Int, ) error { - if self.consensusMode == ModeSkipBlockFee { + if self.consensusMode.ModeSkipBlockFee { return nil } if baseFee == nil || baseFee.Sign() <= 0 { @@ -328,6 +369,7 @@ func (self *DummyEngine) verifyBlockFee( // Minimum Fee = 10 gwei * 1M gas (minimum fee that would have been accepted for this transaction) // Fee Premium = 90 gwei // Total Overpaid = 90 gwei * 1M gas + blockFeeContribution.Mul(txFeePremium, gasUsed.SetUint64(receipt.GasUsed)) totalBlockFee.Add(totalBlockFee, blockFeeContribution) } @@ -361,7 +403,7 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type return err } } - if chain.Config().IsApricotPhase4(new(big.Int).SetUint64(block.Time())) { + if chain.Config().IsApricotPhase4(block.Time()) { // Validate extDataGasUsed and BlockGasCost match expectations // // NOTE: This is a duplicate check of what is already performed in @@ -373,7 +415,7 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", blockExtDataGasUsed, extDataGasUsed) } blockGasCostStep := ApricotPhase4BlockGasCostStep - if chain.Config().IsApricotPhase5(new(big.Int).SetUint64(block.Time())) { + if chain.Config().IsApricotPhase5(block.Time()) { blockGasCostStep = ApricotPhase5BlockGasCostStep } // Calculate the expected blockGasCost for this block. @@ -412,7 +454,8 @@ func (self *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *type } func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header, state *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { + uncles []*types.Header, receipts []*types.Receipt, +) (*types.Block, error) { var ( contribution, extDataGasUsed *big.Int extraData []byte @@ -424,13 +467,13 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, return nil, err } } - if chain.Config().IsApricotPhase4(new(big.Int).SetUint64(header.Time)) { + if chain.Config().IsApricotPhase4(header.Time) { header.ExtDataGasUsed = extDataGasUsed if header.ExtDataGasUsed == nil { header.ExtDataGasUsed = new(big.Int).Set(common.Big0) } blockGasCostStep := ApricotPhase4BlockGasCostStep - if chain.Config().IsApricotPhase5(new(big.Int).SetUint64(header.Time)) { + if chain.Config().IsApricotPhase5(header.Time) { blockGasCostStep = ApricotPhase5BlockGasCostStep } // Calculate the required block gas cost for this block. @@ -463,9 +506,9 @@ func (self *DummyEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) // Header seems complete, assemble into a block and return - return types.NewBlock( - header, txs, uncles, receipts, new(trie.Trie), extraData, - chain.Config().IsApricotPhase1(new(big.Int).SetUint64(header.Time)), + return types.NewBlockWithExtData( + header, txs, uncles, receipts, trie.NewStackTrie(nil), + extraData, chain.Config().IsApricotPhase1(header.Time), ), nil } diff --git a/coreth/consensus/dummy/dynamic_fees.go b/coreth/consensus/dummy/dynamic_fees.go index cd3e7473..b6817a01 100644 --- a/coreth/consensus/dummy/dynamic_fees.go +++ b/coreth/consensus/dummy/dynamic_fees.go @@ -41,20 +41,21 @@ var ( func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uint64) ([]byte, *big.Int, error) { // If the current block is the first EIP-1559 block, or it is the genesis block // return the initial slice and initial base fee. - bigTimestamp := new(big.Int).SetUint64(parent.Time) var ( - isApricotPhase3 = config.IsApricotPhase3(bigTimestamp) - isApricotPhase4 = config.IsApricotPhase4(bigTimestamp) - isApricotPhase5 = config.IsApricotPhase5(bigTimestamp) + isApricotPhase3 = config.IsApricotPhase3(parent.Time) + isApricotPhase4 = config.IsApricotPhase4(parent.Time) + isApricotPhase5 = config.IsApricotPhase5(parent.Time) ) if !isApricotPhase3 || parent.Number.Cmp(common.Big0) == 0 { - initialSlice := make([]byte, params.ApricotPhase3ExtraDataSize) + initialSlice := make([]byte, params.DynamicFeeExtraDataSize) initialBaseFee := big.NewInt(params.ApricotPhase3InitialBaseFee) return initialSlice, initialBaseFee, nil } - if uint64(len(parent.Extra)) != params.ApricotPhase3ExtraDataSize { - return nil, nil, fmt.Errorf("expected length of parent extra data to be %d, but found %d", params.ApricotPhase3ExtraDataSize, len(parent.Extra)) + + if uint64(len(parent.Extra)) < params.DynamicFeeExtraDataSize { + return nil, nil, fmt.Errorf("expected length of parent extra data to be %d, but found %d", params.DynamicFeeExtraDataSize, len(parent.Extra)) } + dynamicFeeWindow := parent.Extra[:params.DynamicFeeExtraDataSize] if timestamp < parent.Time { return nil, nil, fmt.Errorf("cannot calculate base fee for timestamp (%d) prior to parent timestamp (%d)", timestamp, parent.Time) @@ -63,7 +64,7 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uin // roll the window over by the difference between the timestamps to generate // the new rollup window. - newRollupWindow, err := rollLongWindow(parent.Extra, int(roll)) + newRollupWindow, err := rollLongWindow(dynamicFeeWindow, int(roll)) if err != nil { return nil, nil, err } @@ -77,7 +78,7 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, timestamp uin ) if isApricotPhase5 { baseFeeChangeDenominator = ApricotPhase5BaseFeeChangeDenominator - if config.IsSongbirdCode() && !config.IsCortina(bigTimestamp) { + if config.IsSongbirdCode() && !config.IsCortina(timestamp) { parentGasTarget = params.SgbApricotPhase5TargetGas } else { parentGasTarget = params.ApricotPhase5TargetGas @@ -341,7 +342,7 @@ func calcBlockGasCost( // // This function will return nil for all return values prior to Apricot Phase 4. func MinRequiredTip(config *params.ChainConfig, header *types.Header) (*big.Int, error) { - if !config.IsApricotPhase4(new(big.Int).SetUint64(header.Time)) { + if !config.IsApricotPhase4(header.Time) { return nil, nil } if header.BaseFee == nil { diff --git a/coreth/consensus/dummy/dynamic_fees_test.go b/coreth/consensus/dummy/dynamic_fees_test.go index 9bbfcc12..c8218140 100644 --- a/coreth/consensus/dummy/dynamic_fees_test.go +++ b/coreth/consensus/dummy/dynamic_fees_test.go @@ -107,6 +107,7 @@ type test struct { func TestDynamicFees(t *testing.T) { spacedTimestamps := []uint64{1, 1, 2, 5, 15, 120} + var tests []test = []test{ // Test minimal gas usage { @@ -276,7 +277,7 @@ func TestSelectBigWithinBounds(t *testing.T) { lower, value, upper, expected *big.Int } - var tests = map[string]test{ + tests := map[string]test{ "value within bounds": { lower: big.NewInt(0), value: big.NewInt(5), diff --git a/coreth/consensus/misc/eip4844.go b/coreth/consensus/misc/eip4844.go new file mode 100644 index 00000000..f0bca33d --- /dev/null +++ b/coreth/consensus/misc/eip4844.go @@ -0,0 +1,54 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "math/big" + + "github.com/ava-labs/coreth/params" +) + +var ( + minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice) + dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction) +) + +// CalcBlobFee calculates the blobfee from the header's excess data gas field. +func CalcBlobFee(excessDataGas *big.Int) *big.Int { + // If this block does not yet have EIP-4844 enabled, return the starting fee + if excessDataGas == nil { + return big.NewInt(params.BlobTxMinDataGasprice) + } + return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction) +} + +// fakeExponential approximates factor * e ** (numerator / denominator) using +// Taylor expansion. +func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { + var ( + output = new(big.Int) + accum = new(big.Int).Mul(factor, denominator) + ) + for i := 1; accum.Sign() > 0; i++ { + output.Add(output, accum) + + accum.Mul(accum, numerator) + accum.Div(accum, denominator) + accum.Div(accum, big.NewInt(int64(i))) + } + return output.Div(output, denominator) +} diff --git a/coreth/consensus/misc/eip4844_test.go b/coreth/consensus/misc/eip4844_test.go new file mode 100644 index 00000000..2d35f097 --- /dev/null +++ b/coreth/consensus/misc/eip4844_test.go @@ -0,0 +1,85 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "fmt" + "math/big" + "testing" + + "github.com/ava-labs/coreth/params" +) + +func TestCalcBlobFee(t *testing.T) { + tests := []struct { + excessDataGas int64 + blobfee int64 + }{ + {0, 1}, + {1542706, 1}, + {1542707, 2}, + {10 * 1024 * 1024, 111}, + } + have := CalcBlobFee(nil) + if have.Int64() != params.BlobTxMinDataGasprice { + t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice) + } + for i, tt := range tests { + have := CalcBlobFee(big.NewInt(tt.excessDataGas)) + if have.Int64() != tt.blobfee { + t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee) + } + } +} + +func TestFakeExponential(t *testing.T) { + tests := []struct { + factor int64 + numerator int64 + denominator int64 + want int64 + }{ + // When numerator == 0 the return value should always equal the value of factor + {1, 0, 1, 1}, + {38493, 0, 1000, 38493}, + {0, 1234, 2345, 0}, // should be 0 + {1, 2, 1, 6}, // approximate 7.389 + {1, 4, 2, 6}, + {1, 3, 1, 16}, // approximate 20.09 + {1, 6, 2, 18}, + {1, 4, 1, 49}, // approximate 54.60 + {1, 8, 2, 50}, + {10, 8, 2, 542}, // approximate 540.598 + {11, 8, 2, 596}, // approximate 600.58 + {1, 5, 1, 136}, // approximate 148.4 + {1, 5, 2, 11}, // approximate 12.18 + {2, 5, 2, 23}, // approximate 24.36 + {1, 50000000, 2225652, 5709098764}, + } + for i, tt := range tests { + f, n, d := big.NewInt(tt.factor), big.NewInt(tt.numerator), big.NewInt(tt.denominator) + original := fmt.Sprintf("%d %d %d", f, n, d) + have := fakeExponential(f, n, d) + if have.Int64() != tt.want { + t.Errorf("test %d: fake exponential mismatch: have %v want %v", i, have, tt.want) + } + later := fmt.Sprintf("%d %d %d", f, n, d) + if original != later { + t.Errorf("test %d: fake exponential modified arguments: have\n%v\nwant\n%v", i, later, original) + } + } +} diff --git a/coreth/constants/constants.go b/coreth/constants/constants.go index 5e490396..052235da 100644 --- a/coreth/constants/constants.go +++ b/coreth/constants/constants.go @@ -4,17 +4,30 @@ package constants import ( - "math/big" - "time" - "github.com/ethereum/go-ethereum/common" ) +// Network IDs: copied from avalanchego/utils/constants/network_ids.go +// There is an "import cycle" between coreth and avalanchego on Avalanche GitHub repository which lacks +// Flare and Songbird network ids. +const ( + MainnetID uint32 = 1 + CascadeID uint32 = 2 + DenaliID uint32 = 3 + EverestID uint32 = 4 + + UnitTestID uint32 = 10 + LocalID uint32 = 12345 + FlareID uint32 = 14 + CostwoID uint32 = 114 + LocalFlareID uint32 = 162 + SongbirdID uint32 = 5 + CostonID uint32 = 7 +) + var ( BlackholeAddr = common.Address{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } - - NativeAssetCallDeprecationTime = big.NewInt(time.Date(2022, time.September, 16, 15, 0, 0, 0, time.UTC).Unix()) ) diff --git a/coreth/contracts/contracts/ExampleWarp.sol b/coreth/contracts/contracts/ExampleWarp.sol new file mode 100644 index 00000000..b6247058 --- /dev/null +++ b/coreth/contracts/contracts/ExampleWarp.sol @@ -0,0 +1,58 @@ +//SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; +pragma experimental ABIEncoderV2; + +import "./interfaces/IWarpMessenger.sol"; + +contract ExampleWarp { + address constant WARP_ADDRESS = 0x0200000000000000000000000000000000000005; + IWarpMessenger warp = IWarpMessenger(WARP_ADDRESS); + + // sendWarpMessage sends a warp message containing the payload + function sendWarpMessage(bytes calldata payload) external { + warp.sendWarpMessage(payload); + } + + // validateWarpMessage retrieves the warp message attached to the transaction and verifies all of its attributes. + function validateWarpMessage( + uint32 index, + bytes32 sourceChainID, + address originSenderAddress, + bytes calldata payload + ) external view { + (WarpMessage memory message, bool valid) = warp.getVerifiedWarpMessage(index); + require(valid); + require(message.sourceChainID == sourceChainID); + require(message.originSenderAddress == originSenderAddress); + require(keccak256(message.payload) == keccak256(payload)); + } + + function validateInvalidWarpMessage(uint32 index) external view { + (WarpMessage memory message, bool valid) = warp.getVerifiedWarpMessage(index); + require(!valid); + require(message.sourceChainID == bytes32(0)); + require(message.originSenderAddress == address(0)); + require(keccak256(message.payload) == keccak256(bytes(""))); + } + + // validateWarpBlockHash retrieves the warp block hash attached to the transaction and verifies it matches the + // expected block hash. + function validateWarpBlockHash(uint32 index, bytes32 sourceChainID, bytes32 blockHash) external view { + (WarpBlockHash memory warpBlockHash, bool valid) = warp.getVerifiedWarpBlockHash(index); + require(valid); + require(warpBlockHash.sourceChainID == sourceChainID); + require(warpBlockHash.blockHash == blockHash); + } + + function validateInvalidWarpBlockHash(uint32 index) external view { + (WarpBlockHash memory warpBlockHash, bool valid) = warp.getVerifiedWarpBlockHash(index); + require(!valid); + require(warpBlockHash.sourceChainID == bytes32(0)); + require(warpBlockHash.blockHash == bytes32(0)); + } + + // validateGetBlockchainID checks that the blockchainID returned by warp matches the argument + function validateGetBlockchainID(bytes32 blockchainID) external view { + require(blockchainID == warp.getBlockchainID()); + } +} diff --git a/coreth/contracts/contracts/interfaces/IWarpMessenger.sol b/coreth/contracts/contracts/interfaces/IWarpMessenger.sol new file mode 100644 index 00000000..0a77d366 --- /dev/null +++ b/coreth/contracts/contracts/interfaces/IWarpMessenger.sol @@ -0,0 +1,51 @@ +// (c) 2022-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +struct WarpMessage { + bytes32 sourceChainID; + address originSenderAddress; + bytes payload; +} + +struct WarpBlockHash { + bytes32 sourceChainID; + bytes32 blockHash; +} + +interface IWarpMessenger { + event SendWarpMessage(address indexed sender, bytes32 indexed messageID, bytes message); + + // sendWarpMessage emits a request for the subnet to send a warp message from [msg.sender] + // with the specified parameters. + // This emits a SendWarpMessage log from the precompile. When the corresponding block is accepted + // the Accept hook of the Warp precompile is invoked with all accepted logs emitted by the Warp + // precompile. + // Each validator then adds the UnsignedWarpMessage encoded in the log to the set of messages + // it is willing to sign for an off-chain relayer to aggregate Warp signatures. + function sendWarpMessage(bytes calldata payload) external returns (bytes32 messageID); + + // getVerifiedWarpMessage parses the pre-verified warp message in the + // predicate storage slots as a WarpMessage and returns it to the caller. + // If the message exists and passes verification, returns the verified message + // and true. + // Otherwise, returns false and the empty value for the message. + function getVerifiedWarpMessage(uint32 index) external view returns (WarpMessage calldata message, bool valid); + + // getVerifiedWarpBlockHash parses the pre-verified WarpBlockHash message in the + // predicate storage slots as a WarpBlockHash message and returns it to the caller. + // If the message exists and passes verification, returns the verified message + // and true. + // Otherwise, returns false and the empty value for the message. + function getVerifiedWarpBlockHash( + uint32 index + ) external view returns (WarpBlockHash calldata warpBlockHash, bool valid); + + // getBlockchainID returns the snow.Context BlockchainID of this chain. + // This blockchainID is the hash of the transaction that created this blockchain on the P-Chain + // and is not related to the Ethereum ChainID. + function getBlockchainID() external view returns (bytes32 blockchainID); +} diff --git a/coreth/core/bench_test.go b/coreth/core/bench_test.go index 0bacc6f6..649e98f8 100644 --- a/coreth/core/bench_test.go +++ b/coreth/core/bench_test.go @@ -35,11 +35,11 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" ) func BenchmarkInsertChain_empty_memdb(b *testing.B) { @@ -88,8 +88,9 @@ func genValueTx(nbytes int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, nil, false, false, false, false) - tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, big.NewInt(225000000000), data), types.HomesteadSigner{}, benchRootKey) + gas, _ := IntrinsicGas(data, nil, false, params.Rules{}) // Disable Istanbul and EIP-2028 for this test + signer := types.MakeSigner(gen.config, big.NewInt(int64(i)), gen.header.Time) + tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, big.NewInt(225000000000), data), signer, benchRootKey) gen.AddTx(tx) } } @@ -118,6 +119,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { block := gen.PrevBlock(i - 1) gas := block.GasLimit() + signer := types.MakeSigner(gen.config, big.NewInt(int64(i)), gen.header.Time) for { gas -= params.TxGas if gas < params.TxGas { @@ -132,7 +134,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) { big.NewInt(225000000000), nil, ) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from]) + tx, _ = types.SignTx(tx, signer, ringKeys[from]) gen.AddTx(tx) from = to } @@ -160,11 +162,11 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { Config: params.TestChainConfig, Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}}, } - _, chain, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), b.N, 10, gen) + _, chain, _, _ := GenerateChainWithGenesis(gspec, dummy.NewCoinbaseFaker(), b.N, 10, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() @@ -221,8 +223,8 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { ParentHash: hash, Difficulty: big.NewInt(1), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, } hash = header.Hash() @@ -282,7 +284,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if full { hash := header.Hash() rawdb.ReadBody(db, hash, n) - rawdb.ReadReceipts(db, hash, n, chain.Config()) + rawdb.ReadReceipts(db, hash, n, header.Time, chain.Config()) } } chain.Stop() diff --git a/coreth/core/block_validator.go b/coreth/core/block_validator.go index 287a42fb..5433c8c4 100644 --- a/coreth/core/block_validator.go +++ b/coreth/core/block_validator.go @@ -60,20 +60,22 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin // header's transaction and uncle roots. The headers are assumed to be already // validated at this point. func (v *BlockValidator) ValidateBody(block *types.Block) error { - // Check whether the block's known, and if not, that it's linkable + // Check whether the block is already imported. if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) { return ErrKnownBlock } - // Header validity is known at this point, check the uncles and transactions + + // Header validity is known at this point. Here we verify that uncle and transactions + // given in the block body match the header. header := block.Header() if err := v.engine.VerifyUncles(v.bc, block); err != nil { return err } if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { - return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash) + return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash) } if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { - return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) + return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash) } if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { @@ -84,10 +86,8 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { return nil } -// ValidateState validates the various changes that happen after a state -// transition, such as amount of used gas, the receipt roots and the state root -// itself. ValidateState returns a database batch if the validation was a success -// otherwise nil and an error is returned. +// ValidateState validates the various changes that happen after a state transition, +// such as amount of used gas, the receipt roots and the state root itself. func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error { header := block.Header() if block.GasUsed() != usedGas { @@ -107,7 +107,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD // Validate the state root against the received state root and throw // an error if they don't match. if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) + return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error()) } return nil } diff --git a/coreth/core/blockchain.go b/coreth/core/blockchain.go index b102ce27..45ba15b8 100644 --- a/coreth/core/blockchain.go +++ b/coreth/core/blockchain.go @@ -32,7 +32,6 @@ import ( "errors" "fmt" "io" - "math/big" "runtime" "strings" "sync" @@ -45,14 +44,15 @@ import ( "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/internal/version" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - lru "github.com/hashicorp/golang-lru" ) var ( @@ -67,7 +67,8 @@ var ( snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil) snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil) snapshotCommitTimer = metrics.NewRegisteredCounter("chain/snapshot/commits", nil) - triedbCommitTimer = metrics.NewRegisteredCounter("chain/triedb/commits", nil) + + triedbCommitTimer = metrics.NewRegisteredCounter("chain/triedb/commits", nil) blockInsertTimer = metrics.NewRegisteredCounter("chain/block/inserts", nil) blockInsertCount = metrics.NewRegisteredCounter("chain/block/inserts/count", nil) @@ -75,7 +76,7 @@ var ( blockStateInitTimer = metrics.NewRegisteredCounter("chain/block/inits/state", nil) blockExecutionTimer = metrics.NewRegisteredCounter("chain/block/executions", nil) blockTrieOpsTimer = metrics.NewRegisteredCounter("chain/block/trie", nil) - blockStateValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/state", nil) + blockValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/state", nil) blockWriteTimer = metrics.NewRegisteredCounter("chain/block/writes", nil) acceptorQueueGauge = metrics.NewRegisteredGauge("chain/acceptor/queue/size", nil) @@ -139,7 +140,7 @@ const ( trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache" ) -// CacheConfig contains the configuration values for the trie caching/pruning +// CacheConfig contains the configuration values for the trie database // that's resident in a blockchain. type CacheConfig struct { TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory @@ -147,31 +148,35 @@ type CacheConfig struct { TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit + TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. Pruning bool // Whether to disable trie write caching and GC altogether (archive node) AcceptorQueueLimit int // Blocks to queue before blocking during acceptance PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. - PopulateMissingTriesParallelism int // Is the number of readers to use when trying to populate missing tries. + PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries. AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory - SnapshotAsync bool // Generate snapshot tree async SnapshotVerify bool // Verify generated snapshots - SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) Preimages bool // Whether to store preimage of trie key to the disk AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices + SkipTxIndexing bool // Whether to skip transaction indexing + + SnapshotNoBuild bool // Whether the background generation is allowed + SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } var DefaultCacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) - Pruning: true, - CommitInterval: 4096, - AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay - SnapshotLimit: 256, - AcceptedCacheSize: 32, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, // 20% overhead in memory counting (this targets 16 MB) + TriePrefetcherParallelism: 16, + Pruning: true, + CommitInterval: 4096, + AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay + SnapshotLimit: 256, + AcceptedCacheSize: 32, } // BlockChain represents the canonical chain given a database with a genesis @@ -192,9 +197,11 @@ type BlockChain struct { chainConfig *params.ChainConfig // Chain & network configuration cacheConfig *CacheConfig // Cache configuration for pruning - db ethdb.Database // Low level persistent database to store final content in - - snaps *snapshot.Tree // Snapshot tree for fast trie leaf access + db ethdb.Database // Low level persistent database to store final content in + snaps *snapshot.Tree // Snapshot tree for fast trie leaf access + triedb *trie.Database // The database handler for maintaining trie nodes. + stateCache state.Database // State database to reuse between imports (contains state cache) + stateManager TrieWriter hc *HeaderChain rmLogsFeed event.Feed @@ -213,24 +220,20 @@ type BlockChain struct { // Readers don't need to take it, they can just read the database. chainmu sync.RWMutex - currentBlock atomic.Value // Current head of the block chain + currentBlock atomic.Pointer[types.Header] // Current head of the block chain - stateCache state.Database // State database to reuse between imports (contains state cache) - stateManager TrieWriter - bodyCache *lru.Cache // Cache for the most recent block bodies - receiptsCache *lru.Cache // Cache for the most recent receipts per block - blockCache *lru.Cache // Cache for the most recent entire blocks - txLookupCache *lru.Cache // Cache for the most recent transaction lookup data. + bodyCache *lru.Cache[common.Hash, *types.Body] // Cache for the most recent block bodies + receiptsCache *lru.Cache[common.Hash, []*types.Receipt] // Cache for the most recent receipts per block + blockCache *lru.Cache[common.Hash, *types.Block] // Cache for the most recent entire blocks + txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry] // Cache for the most recent transaction lookup data. + badBlocks *lru.Cache[common.Hash, *badBlock] // Cache for bad blocks - running int32 // 0 if chain is running, 1 when stopped + stopping atomic.Bool // false if chain is running, true when stopped - engine consensus.Engine - validator Validator // Block and state validator interface - prefetcher Prefetcher // Block state prefetcher interface - processor Processor // Block transaction processor interface - vmConfig vm.Config - - badBlocks *lru.Cache // Bad block cache + engine consensus.Engine + validator Validator // Block and state validator interface + processor Processor // Block transaction processor interface + vmConfig vm.Config lastAccepted *types.Block // Prevents reorgs past this height @@ -287,54 +290,50 @@ func NewBlockChain( if cacheConfig == nil { return nil, errCacheConfigNotSpecified } - bodyCache, _ := lru.New(bodyCacheLimit) - receiptsCache, _ := lru.New(receiptsCacheLimit) - blockCache, _ := lru.New(blockCacheLimit) - txLookupCache, _ := lru.New(txLookupCacheLimit) - badBlocks, _ := lru.New(badBlockLimit) - + // Open trie database with provided config + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ + Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, + Preimages: cacheConfig.Preimages, + StatsPrefix: trieCleanCacheStatsNamespace, + }) // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the // stored one from database. // Note: In go-ethereum, the code rewinds the chain on an incompatible config upgrade. // We don't do this and expect the node operator to always update their node's configuration // before network upgrades take effect. - chainConfig, _, err := SetupGenesisBlock(db, genesis, lastAcceptedHash, skipChainConfigCheckCompatible) + chainConfig, _, err := SetupGenesisBlock(db, triedb, genesis, lastAcceptedHash, skipChainConfigCheckCompatible) if err != nil { return nil, err } log.Info("") log.Info(strings.Repeat("-", 153)) - for _, line := range strings.Split(chainConfig.String(), "\n") { + for _, line := range strings.Split(chainConfig.Description(), "\n") { log.Info(line) } log.Info(strings.Repeat("-", 153)) log.Info("") bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Journal: cacheConfig.TrieCleanJournal, - Preimages: cacheConfig.Preimages, - StatsPrefix: trieCleanCacheStatsNamespace, - }), - bodyCache: bodyCache, - receiptsCache: receiptsCache, - blockCache: blockCache, - txLookupCache: txLookupCache, + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triedb: triedb, + bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), + receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), + txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit), + badBlocks: lru.NewCache[common.Hash, *badBlock](badBlockLimit), engine: engine, vmConfig: vmConfig, - badBlocks: badBlocks, - senderCacher: newTxSenderCacher(runtime.NumCPU()), + senderCacher: NewTxSenderCacher(runtime.NumCPU()), acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit), quit: make(chan struct{}), acceptedLogsCache: NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize), } + bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) bc.validator = NewBlockValidator(chainConfig, bc, engine) - bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine) @@ -346,18 +345,10 @@ func NewBlockChain( return nil, ErrNoGenesis } - var nilBlock *types.Block - bc.currentBlock.Store(nilBlock) + bc.currentBlock.Store(nil) // Create the state manager - bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig) - - // loadLastState writes indices, so we should start the tx indexer after that. - // Start tx indexer/unindexer here. - if bc.cacheConfig.TxLookupLimit != 0 { - bc.wg.Add(1) - go bc.dispatchTxUnindexer() - } + bc.stateManager = NewTrieWriter(bc.triedb, cacheConfig) // Re-generate current block state if it is missing if err := bc.loadLastState(lastAcceptedHash); err != nil { @@ -373,8 +364,8 @@ func NewBlockChain( // Make sure the state associated with the block is available head := bc.CurrentBlock() - if !bc.HasState(head.Root()) { - return nil, fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash()) + if !bc.HasState(head.Root) { + return nil, fmt.Errorf("head state missing %d:%s", head.Number, head.Hash()) } if err := bc.protectTrieIndex(); err != nil { @@ -405,17 +396,36 @@ func NewBlockChain( if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 { log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal) - triedb := bc.stateCache.TrieDB() bc.wg.Add(1) go func() { defer bc.wg.Done() - triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) + bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) }() } + // Start tx indexer/unindexer if required. + if bc.cacheConfig.TxLookupLimit != 0 { + bc.wg.Add(1) + go bc.dispatchTxUnindexer() + } return bc, nil } +// unindexBlocks unindexes transactions depending on user configuration +func (bc *BlockChain) unindexBlocks(tail uint64, head uint64, done chan struct{}) { + start := time.Now() + txLookupLimit := bc.cacheConfig.TxLookupLimit + defer func() { + txUnindexTimer.Inc(time.Since(start).Milliseconds()) + close(done) + }() + + if head-txLookupLimit+1 >= tail { + // Unindex a part of stale indices and forward index tail to HEAD-limit + rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit) + } +} + // dispatchTxUnindexer is responsible for the deletion of the // transaction index. // Invariant: If TxLookupLimit is 0, it means all tx indices will be preserved. @@ -430,20 +440,6 @@ func (bc *BlockChain) dispatchTxUnindexer() { rawdb.WriteTxIndexTail(bc.db, 0) } - // unindexes transactions depending on user configuration - unindexBlocks := func(tail uint64, head uint64, done chan struct{}) { - start := time.Now() - defer func() { - txUnindexTimer.Inc(time.Since(start).Milliseconds()) - done <- struct{}{} - }() - - // Update the transaction index to the new chain state - if head-txLookupLimit+1 >= tail { - // Unindex a part of stale indices and forward index tail to HEAD-limit - rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit) - } - } // Any reindexing done, start listening to chain events and moving the index window var ( done chan struct{} // Non-nil if background unindexing or reindexing routine is active. @@ -468,7 +464,7 @@ func (bc *BlockChain) dispatchTxUnindexer() { done = make(chan struct{}) // Note: tail will not be nil since it is initialized in this function. tail := rawdb.ReadTxIndexTail(bc.db) - go unindexBlocks(*tail, headNum, done) + go bc.unindexBlocks(*tail, headNum, done) } case <-done: done = nil @@ -488,7 +484,9 @@ func (bc *BlockChain) dispatchTxUnindexer() { // - updating the acceptor tip index func (bc *BlockChain) writeBlockAcceptedIndices(b *types.Block) error { batch := bc.db.NewBatch() - rawdb.WriteTxLookupEntriesByBlock(batch, b) + if !bc.cacheConfig.SkipTxIndexing { + rawdb.WriteTxLookupEntriesByBlock(batch, b) + } if err := rawdb.WriteAcceptorTip(batch, b.Hash()); err != nil { return fmt.Errorf("%w: failed to write acceptor tip key", err) } @@ -549,14 +547,16 @@ func (bc *BlockChain) warmAcceptedCaches() { startIndex = lastAccepted - cacheDiff } for i := startIndex; i <= lastAccepted; i++ { - header := bc.GetHeaderByNumber(i) - if header == nil { + block := bc.GetBlockByNumber(i) + if block == nil { // This could happen if a node state-synced log.Info("Exiting accepted cache warming early because header is nil", "height", i, "t", time.Since(startTime)) break } - bc.hc.acceptedNumberCache.Put(header.Number.Uint64(), header) - bc.acceptedLogsCache.Put(header.Hash(), rawdb.ReadLogs(bc.db, header.Hash(), header.Number.Uint64())) + // TODO: handle blocks written to disk during state sync + bc.hc.acceptedNumberCache.Put(block.NumberU64(), block.Header()) + logs := bc.collectUnflattenedLogs(block, false) + bc.acceptedLogsCache.Put(block.Hash(), logs) } log.Info("Warmed accepted caches", "start", startIndex, "end", lastAccepted, "t", time.Since(startTime)) } @@ -583,9 +583,15 @@ func (bc *BlockChain) startAcceptor() { // Ensure [hc.acceptedNumberCache] and [acceptedLogsCache] have latest content bc.hc.acceptedNumberCache.Put(next.NumberU64(), next.Header()) - logs := rawdb.ReadLogs(bc.db, next.Hash(), next.NumberU64()) + logs := bc.collectUnflattenedLogs(next, false) bc.acceptedLogsCache.Put(next.Hash(), logs) + // Update the acceptor tip before sending events to ensure that any client acting based off of + // the events observes the updated acceptorTip on subsequent requests + bc.acceptorTipLock.Lock() + bc.acceptorTip = next + bc.acceptorTipLock.Unlock() + // Update accepted feeds flattenedLogs := types.FlattenLogs(logs) bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs}) @@ -596,9 +602,6 @@ func (bc *BlockChain) startAcceptor() { bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()}) } - bc.acceptorTipLock.Lock() - bc.acceptorTip = next - bc.acceptorTipLock.Unlock() bc.acceptorWg.Done() acceptorWorkTimer.Inc(time.Since(start).Milliseconds()) @@ -691,15 +694,15 @@ func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error { return errors.New("could not read head block hash") } // Make sure the entire head block is available - currentBlock := bc.GetBlockByHash(head) - if currentBlock == nil { + headBlock := bc.GetBlockByHash(head) + if headBlock == nil { return fmt.Errorf("could not load head block %s", head.Hex()) } // Everything seems to be fine, set as the head block - bc.currentBlock.Store(currentBlock) + bc.currentBlock.Store(headBlock.Header()) // Restore the last known head header - currentHeader := currentBlock.Header() + currentHeader := headBlock.Header() if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { if header := bc.GetHeaderByHash(head); header != nil { currentHeader = header @@ -708,7 +711,7 @@ func (bc *BlockChain) loadLastState(lastAcceptedHash common.Hash) error { bc.hc.SetCurrentHeader(currentHeader) log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) - log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) + log.Info("Loaded most recent local full block", "number", headBlock.Number(), "hash", headBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) // Otherwise, set the last accepted block and perform a re-org. bc.lastAccepted = bc.GetBlockByHash(lastAcceptedHash) @@ -738,7 +741,7 @@ func (bc *BlockChain) loadGenesisState() error { // Last update all in-memory chain markers bc.lastAccepted = bc.genesisBlock - bc.currentBlock.Store(bc.genesisBlock) + bc.currentBlock.Store(bc.genesisBlock.Header()) bc.hc.SetGenesis(bc.genesisBlock.Header()) bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) return nil @@ -746,7 +749,7 @@ func (bc *BlockChain) loadGenesisState() error { // Export writes the active chain to the given writer. func (bc *BlockChain) Export(w io.Writer) error { - return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) + return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64()) } // ExportN writes a subset of the active chain to the given writer. @@ -808,7 +811,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) { } // Update all in-memory chain markers in the last step bc.hc.SetCurrentHeader(block.Header()) - bc.currentBlock.Store(block) + bc.currentBlock.Store(block.Header()) } // ValidateCanonicalChain confirms a canonical chain is well-formed. @@ -818,19 +821,19 @@ func (bc *BlockChain) ValidateCanonicalChain() error { current := bc.CurrentBlock() i := 0 - log.Info("Beginning to validate canonical chain", "startBlock", current.NumberU64()) + log.Info("Beginning to validate canonical chain", "startBlock", current.Number) for current.Hash() != bc.genesisBlock.Hash() { blkByHash := bc.GetBlockByHash(current.Hash()) if blkByHash == nil { - return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number()) + return fmt.Errorf("couldn't find block by hash %s at height %d", current.Hash().String(), current.Number) } if blkByHash.Hash() != current.Hash() { return fmt.Errorf("blockByHash returned a block with an unexpected hash: %s, expected: %s", blkByHash.Hash().String(), current.Hash().String()) } - blkByNumber := bc.GetBlockByNumber(current.Number().Uint64()) + blkByNumber := bc.GetBlockByNumber(current.Number.Uint64()) if blkByNumber == nil { - return fmt.Errorf("couldn't find block by number at height %d", current.Number()) + return fmt.Errorf("couldn't find block by number at height %d", current.Number) } if blkByNumber.Hash() != current.Hash() { return fmt.Errorf("blockByNumber returned a block with unexpected hash: %s, expected: %s", blkByNumber.Hash().String(), current.Hash().String()) @@ -838,26 +841,33 @@ func (bc *BlockChain) ValidateCanonicalChain() error { hdrByHash := bc.GetHeaderByHash(current.Hash()) if hdrByHash == nil { - return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number()) + return fmt.Errorf("couldn't find block header by hash %s at height %d", current.Hash().String(), current.Number) } if hdrByHash.Hash() != current.Hash() { return fmt.Errorf("hdrByHash returned a block header with an unexpected hash: %s, expected: %s", hdrByHash.Hash().String(), current.Hash().String()) } - hdrByNumber := bc.GetHeaderByNumber(current.Number().Uint64()) + hdrByNumber := bc.GetHeaderByNumber(current.Number.Uint64()) if hdrByNumber == nil { - return fmt.Errorf("couldn't find block header by number at height %d", current.Number()) + return fmt.Errorf("couldn't find block header by number at height %d", current.Number) } if hdrByNumber.Hash() != current.Hash() { return fmt.Errorf("hdrByNumber returned a block header with unexpected hash: %s, expected: %s", hdrByNumber.Hash().String(), current.Hash().String()) } - txs := current.Body().Transactions + // Lookup the full block to get the transactions + block := bc.GetBlock(current.Hash(), current.Number.Uint64()) + if block == nil { + log.Error("Current block not found in database", "block", current.Number, "hash", current.Hash()) + return fmt.Errorf("current block missing: #%d [%x..]", current.Number, current.Hash().Bytes()[:4]) + } + txs := block.Transactions() // Transactions are only indexed beneath the last accepted block, so we only check // that the transactions have been indexed, if we are checking below the last accepted // block. - shouldIndexTxs := bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.NumberU64()+bc.cacheConfig.TxLookupLimit - if current.NumberU64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs { + shouldIndexTxs := !bc.cacheConfig.SkipTxIndexing && + (bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.Number.Uint64()+bc.cacheConfig.TxLookupLimit) + if current.Number.Uint64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs { // Ensure that all of the transactions have been stored correctly in the canonical // chain for txIndex, tx := range txs { @@ -868,8 +878,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { if txLookup.BlockHash != current.Hash() { return fmt.Errorf("tx lookup returned with incorrect block hash: %s, expected: %s", txLookup.BlockHash.String(), current.Hash().String()) } - if txLookup.BlockIndex != current.Number().Uint64() { - return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number().Uint64()) + if txLookup.BlockIndex != current.Number.Uint64() { + return fmt.Errorf("tx lookup returned with incorrect block index: %d, expected: %d", txLookup.BlockIndex, current.Number) } if txLookup.Index != uint64(txIndex) { return fmt.Errorf("tx lookup returned with incorrect transaction index: %d, expected: %d", txLookup.Index, txIndex) @@ -888,8 +898,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { if txReceipt.BlockHash != current.Hash() { return fmt.Errorf("transaction receipt had block hash %s, but expected %s", txReceipt.BlockHash.String(), current.Hash().String()) } - if txReceipt.BlockNumber.Uint64() != current.NumberU64() { - return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.NumberU64()) + if txReceipt.BlockNumber.Uint64() != current.Number.Uint64() { + return fmt.Errorf("transaction receipt had block number %d, but expected %d", txReceipt.BlockNumber.Uint64(), current.Number) } } @@ -898,9 +908,9 @@ func (bc *BlockChain) ValidateCanonicalChain() error { log.Info("Validate Canonical Chain Update", "totalBlocks", i) } - parent := bc.GetBlockByHash(current.ParentHash()) - if parent.Hash() != current.ParentHash() { - return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash().String()) + parent := bc.GetHeaderByHash(current.ParentHash) + if parent.Hash() != current.ParentHash { + return fmt.Errorf("getBlockByHash retrieved parent block with incorrect hash, found %s, expected: %s", parent.Hash().String(), current.ParentHash.String()) } current = parent } @@ -908,10 +918,14 @@ func (bc *BlockChain) ValidateCanonicalChain() error { return nil } -// Stop stops the blockchain service. If any imports are currently in progress -// it will abort them using the procInterrupt. -func (bc *BlockChain) Stop() { - if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { +// stopWithoutSaving stops the blockchain service. If any imports are currently in progress +// it will abort them using the procInterrupt. This method stops all running +// goroutines, but does not do all the post-stop work of persisting data. +// OBS! It is generally recommended to use the Stop method! +// This method has been exposed to allow tests to stop the blockchain while simulating +// a crash. +func (bc *BlockChain) stopWithoutSaving() { + if !bc.stopping.CompareAndSwap(false, true) { return } @@ -923,17 +937,6 @@ func (bc *BlockChain) Stop() { bc.stopAcceptor() log.Info("Acceptor queue drained", "t", time.Since(start)) - log.Info("Shutting down state manager") - start = time.Now() - if err := bc.stateManager.Shutdown(); err != nil { - log.Error("Failed to Shutdown state manager", "err", err) - } - log.Info("State manager shut down", "t", time.Since(start)) - // Flush the collected preimages to disk - if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil { - log.Error("Failed to commit trie preimages", "err", err) - } - // Stop senderCacher's goroutines log.Info("Shutting down sender cacher") bc.senderCacher.Shutdown() @@ -945,6 +948,23 @@ func (bc *BlockChain) Stop() { // Waiting for background processes to complete log.Info("Waiting for background processes to complete") bc.wg.Wait() +} + +// Stop stops the blockchain service. If any imports are currently in progress +// it will abort them using the procInterrupt. +func (bc *BlockChain) Stop() { + bc.stopWithoutSaving() + + log.Info("Shutting down state manager") + start := time.Now() + if err := bc.stateManager.Shutdown(); err != nil { + log.Error("Failed to Shutdown state manager", "err", err) + } + log.Info("State manager shut down", "t", time.Since(start)) + // Flush the collected preimages to disk + if err := bc.stateCache.TrieDB().Close(); err != nil { + log.Error("Failed to close trie db", "err", err) + } log.Info("Blockchain stopped") } @@ -1231,7 +1251,7 @@ func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error { func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { start := time.Now() - bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions()) + bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), block.Time()), block.Transactions()) substart := time.Now() err := bc.engine.VerifyHeader(bc, block.Header()) @@ -1296,13 +1316,11 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { blockStateInitTimer.Inc(time.Since(substart).Milliseconds()) // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) activeState = statedb - // If we have a followup block, run that against the current state to pre-cache - // transactions and probabilistically some of the account/storage trie nodes. // Process block using the parent state as reference point - substart = time.Now() + pstart := time.Now() receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig) if serr := statedb.Error(); serr != nil { log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash()) @@ -1311,32 +1329,32 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { bc.reportBlock(block, receipts, err) return err } - - // Update the metrics touched during block processing - accountReadTimer.Inc(statedb.AccountReads.Milliseconds()) // Account reads are complete, we can mark them - storageReadTimer.Inc(statedb.StorageReads.Milliseconds()) // Storage reads are complete, we can mark them - snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete, we can mark them - snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete, we can mark them - trieproc := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation - trieproc += statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates - trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates - blockExecutionTimer.Inc((time.Since(substart) - trieproc).Milliseconds()) + ptime := time.Since(pstart) // Validate the state using the default validator - substart = time.Now() + vstart := time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) return err } - - // Update the metrics touched during block validation - accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them - storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them - accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete, we can mark them - storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete, we can mark them - additionalTrieProc := statedb.AccountHashes + statedb.StorageHashes + statedb.AccountUpdates + statedb.StorageUpdates - trieproc - blockStateValidationTimer.Inc((time.Since(substart) - additionalTrieProc).Milliseconds()) - blockTrieOpsTimer.Inc((trieproc + additionalTrieProc).Milliseconds()) + vtime := time.Since(vstart) + + // Update the metrics touched during block processing and validation + accountReadTimer.Inc(statedb.AccountReads.Milliseconds()) // Account reads are complete(in processing) + storageReadTimer.Inc(statedb.StorageReads.Milliseconds()) // Storage reads are complete(in processing) + snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete(in processing) + snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete(in processing) + accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete(in validation) + storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete(in validation) + accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete(in validation) + storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete(in validation) + triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing + trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update + trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read + trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read + blockExecutionTimer.Inc((ptime - trieRead).Milliseconds()) // The time spent on EVM processing + blockValidationTimer.Inc((vtime - (triehash + trieUpdate)).Milliseconds()) // The time spent on block validation + blockTrieOpsTimer.Inc((triehash + trieUpdate + trieRead).Milliseconds()) // The time spent on trie operations // If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block // or the state trie to disk. @@ -1349,7 +1367,7 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // writeBlockWithState (called within writeBlockAndSethead) creates a reference that // will be cleaned up in Accept/Reject so we need to ensure an error cannot occur // later in verification, since that would cause the referenced root to never be dereferenced. - substart = time.Now() + wstart := time.Now() if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil { return err } @@ -1357,8 +1375,8 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { accountCommitTimer.Inc(statedb.AccountCommits.Milliseconds()) // Account commits are complete, we can mark them storageCommitTimer.Inc(statedb.StorageCommits.Milliseconds()) // Storage commits are complete, we can mark them snapshotCommitTimer.Inc(statedb.SnapshotCommits.Milliseconds()) // Snapshot commits are complete, we can mark them - triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds()) // Triedb commits are complete, we can mark them - blockWriteTimer.Inc((time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds()) + triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds()) // Trie database commits are complete, we can mark them + blockWriteTimer.Inc((time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds()) blockInsertTimer.Inc(time.Since(start).Milliseconds()) log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), @@ -1375,41 +1393,51 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { return nil } -// collectLogs collects the logs that were generated or removed during -// the processing of the block that corresponds with the given hash. -// These logs are later announced as deleted or reborn. -func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { - number := bc.hc.GetBlockNumber(hash) - if number == nil { - return nil - } - receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) - - var logs []*types.Log - for _, receipt := range receipts { - for _, log := range receipt.Logs { +// collectUnflattenedLogs collects the logs that were generated or removed during +// the processing of a block. +func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*types.Log { + receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) + receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), b.Transactions()) + + // Note: gross but this needs to be initialized here because returning nil will be treated specially as an incorrect + // error case downstream. + logs := make([][]*types.Log, len(receipts)) + for i, receipt := range receipts { + receiptLogs := make([]*types.Log, len(receipt.Logs)) + for i, log := range receipt.Logs { l := *log if removed { l.Removed = true } - logs = append(logs, &l) + receiptLogs[i] = &l } + logs[i] = receiptLogs } return logs } +// collectLogs collects the logs that were generated or removed during +// the processing of a block. These logs are later announced as deleted or reborn. +func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { + unflattenedLogs := bc.collectUnflattenedLogs(b, removed) + return types.FlattenLogs(unflattenedLogs) +} + // reorg takes two blocks, an old chain and a new chain and will reconstruct the // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. -func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { +func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { var ( - newHead = newBlock - oldHead = oldBlock - newChain types.Blocks oldChain types.Blocks commonBlock *types.Block ) + oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + if oldBlock == nil { + return errors.New("current head block missing") + } + newBlock := newHead + // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { // Old chain is longer, gather all transactions and logs as deleted ones @@ -1423,10 +1451,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } } if oldBlock == nil { - return fmt.Errorf("invalid old chain") + return errors.New("invalid old chain") } if newBlock == nil { - return fmt.Errorf("invalid new chain") + return errors.New("invalid new chain") } // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found @@ -1469,7 +1497,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) } else { - log.Debug("Preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) + log.Debug("Preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number, "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) } // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. @@ -1507,7 +1535,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) // Collect deleted logs for notification - if logs := bc.collectLogs(oldChain[i].Hash(), true); len(logs) > 0 { + if logs := bc.collectLogs(oldChain[i], true); len(logs) > 0 { deletedLogs = append(deletedLogs, logs...) } if len(deletedLogs) > 512 { @@ -1522,7 +1550,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // New logs: var rebirthLogs []*types.Log for i := len(newChain) - 1; i >= 1; i-- { - if logs := bc.collectLogs(newChain[i].Hash(), false); len(logs) > 0 { + if logs := bc.collectLogs(newChain[i], false); len(logs) > 0 { rebirthLogs = append(rebirthLogs, logs...) } if len(rebirthLogs) > 512 { @@ -1552,23 +1580,24 @@ type BadBlockReason struct { func (b *BadBlockReason) String() string { var receiptString string for i, receipt := range b.Receipts { - receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", + receiptString += fmt.Sprintf("\n %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x", i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) } - reason := fmt.Sprintf(` - ########## BAD BLOCK ######### - Chain config: %v - - Number: %v - Hash: %#x - %v - - Error: %s - ############################## - `, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error) - - return reason + version, vcs := version.Info() + platform := fmt.Sprintf("%s %s %s %s", version, runtime.Version(), runtime.GOARCH, runtime.GOOS) + if vcs != "" { + vcs = fmt.Sprintf("\nVCS: %s", vcs) + } + return fmt.Sprintf(` +########## BAD BLOCK ######### +Block: %v (%#x) +Error: %v +Platform: %v%v +Chain config: %#v +Receipts: %v +############################## +`, b.Number, b.Hash, b.Error, platform, vcs, b.ChainConfig, receiptString) } // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network and the BadBlockReason @@ -1578,8 +1607,7 @@ func (bc *BlockChain) BadBlocks() ([]*types.Block, []*BadBlockReason) { blocks := make([]*types.Block, 0, bc.badBlocks.Len()) reasons := make([]*BadBlockReason, 0, bc.badBlocks.Len()) for _, hash := range bc.badBlocks.Keys() { - if blk, exist := bc.badBlocks.Peek(hash); exist { - badBlk := blk.(*badBlock) + if badBlk, exist := bc.badBlocks.Peek(hash); exist { blocks = append(blocks, badBlk.block) reasons = append(reasons, badBlk.reason) } @@ -1661,7 +1689,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) } // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + statedb.StartPrefetcher("chain", bc.cacheConfig.TriePrefetcherParallelism) defer func() { statedb.StopPrefetcher() }() @@ -1688,7 +1716,7 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) } // initSnapshot instantiates a Snapshot instance and adds it to [bc] -func (bc *BlockChain) initSnapshot(b *types.Block) { +func (bc *BlockChain) initSnapshot(b *types.Header) { if bc.cacheConfig.SnapshotLimit <= 0 || bc.snaps != nil { return } @@ -1699,13 +1727,19 @@ func (bc *BlockChain) initSnapshot(b *types.Block) { // // Additionally, we should always repair a snapshot if starting at genesis // if [SnapshotLimit] > 0. - async := bc.cacheConfig.SnapshotAsync && b.NumberU64() > 0 - rebuild := !bc.cacheConfig.SkipSnapshotRebuild || b.NumberU64() == 0 - log.Info("Initializing snapshots", "async", async, "rebuild", rebuild, "headHash", b.Hash(), "headRoot", b.Root()) + asyncBuild := !bc.cacheConfig.SnapshotWait && b.Number.Uint64() > 0 + noBuild := bc.cacheConfig.SnapshotNoBuild && b.Number.Uint64() > 0 + log.Info("Initializing snapshots", "async", asyncBuild, "rebuild", !noBuild, "headHash", b.Hash(), "headRoot", b.Root) + snapconfig := snapshot.Config{ + CacheSize: bc.cacheConfig.SnapshotLimit, + NoBuild: noBuild, + AsyncBuild: asyncBuild, + SkipVerify: !bc.cacheConfig.SnapshotVerify, + } var err error - bc.snaps, err = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, b.Hash(), b.Root(), async, rebuild, bc.cacheConfig.SnapshotVerify) + bc.snaps, err = snapshot.New(snapconfig, bc.db, bc.triedb, b.Hash(), b.Root) if err != nil { - log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root(), "err", err, "async", async) + log.Error("failed to initialize snapshots", "headHash", b.Hash(), "headRoot", b.Root, "err", err, "async", asyncBuild) } } @@ -1775,7 +1809,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error start = time.Now() logged time.Time previousRoot common.Hash - triedb = bc.stateCache.TrieDB() + triedb = bc.triedb writeIndices bool ) // Note: we add 1 since in each iteration, we attempt to re-execute the next block. @@ -1805,7 +1839,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error // that the node stops mid-way through snapshot flattening (performed across multiple DB batches). // If snapshot initialization is delayed due to state sync, skip initializing snaps here if !bc.cacheConfig.SnapshotDelayInit { - bc.initSnapshot(parent) + bc.initSnapshot(parent.Header()) } writeIndices = true // Set [writeIndices] to true, so that the indices will be updated from the last accepted tip onwards. } @@ -1840,7 +1874,7 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error nodes, imgs := triedb.Size() log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) if previousRoot != (common.Hash{}) { - return triedb.Commit(previousRoot, true, nil) + return triedb.Commit(previousRoot, true) } return nil } @@ -1877,7 +1911,7 @@ func (bc *BlockChain) populateMissingTries() error { startHeight = *bc.cacheConfig.PopulateMissingTries startTime = time.Now() logged time.Time - triedb = bc.stateCache.TrieDB() + triedb = bc.triedb missing = 0 ) @@ -1923,7 +1957,7 @@ func (bc *BlockChain) populateMissingTries() error { } // Commit root to disk so that it can be accessed directly - if err := triedb.Commit(root, false, nil); err != nil { + if err := triedb.Commit(root, false); err != nil { return err } parent = current @@ -2041,28 +2075,32 @@ func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error { // Update all in-memory chain markers bc.lastAccepted = block bc.acceptorTip = block - bc.currentBlock.Store(block) + bc.currentBlock.Store(block.Header()) bc.hc.SetCurrentHeader(block.Header()) lastAcceptedHash := block.Hash() - bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{ - Cache: bc.cacheConfig.TrieCleanLimit, - Journal: bc.cacheConfig.TrieCleanJournal, - Preimages: bc.cacheConfig.Preimages, - StatsPrefix: trieCleanCacheStatsNamespace, - }) + bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb) + if err := bc.loadLastState(lastAcceptedHash); err != nil { return err } // Create the state manager - bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), bc.cacheConfig) + bc.stateManager = NewTrieWriter(bc.triedb, bc.cacheConfig) // Make sure the state associated with the block is available head := bc.CurrentBlock() - if !bc.HasState(head.Root()) { - return fmt.Errorf("head state missing %d:%s", head.Number(), head.Hash()) + if !bc.HasState(head.Root) { + return fmt.Errorf("head state missing %d:%s", head.Number, head.Hash()) } bc.initSnapshot(head) return nil } + +// CacheConfig returns a reference to [bc.cacheConfig] +// +// This is used by [miner] to set prefetch parallelism +// during block building. +func (bc *BlockChain) CacheConfig() *CacheConfig { + return bc.cacheConfig +} diff --git a/coreth/core/blockchain_log_test.go b/coreth/core/blockchain_log_test.go new file mode 100644 index 00000000..5c7df458 --- /dev/null +++ b/coreth/core/blockchain_log_test.go @@ -0,0 +1,100 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "math/big" + "strings" + "testing" + + "github.com/ava-labs/coreth/accounts/abi" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func TestAcceptedLogsSubscription(t *testing.T) { + /* + Example contract to test event emission: + + pragma solidity >=0.7.0 <0.9.0; + contract Callable { + event Called(); + function Call() public { emit Called(); } + } + */ + + const ( + callableABI = "[{\"anonymous\":false,\"inputs\":[],\"name\":\"Called\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"Call\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + callableBin = "6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806334e2292114602d575b600080fd5b60336035565b005b7f81fab7a4a0aa961db47eefc81f143a5220e8c8495260dd65b1356f1d19d3c7b860405160405180910390a156fea2646970667358221220029436d24f3ac598ceca41d4d712e13ced6d70727f4cdc580667de66d2f51d8b64736f6c63430008010033" + ) + var ( + require = require.New(t) + engine = dummy.NewCoinbaseFaker() + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + funds = new(big.Int).Mul(big.NewInt(100), big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{addr1: {Balance: funds}}, + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + } + contractAddress = crypto.CreateAddress(addr1, 0) + signer = types.LatestSigner(gspec.Config) + ) + + parsed, err := abi.JSON(strings.NewReader(callableABI)) + require.NoError(err) + + packedFunction, err := parsed.Pack("Call") + require.NoError(err) + + _, blocks, _, err := GenerateChainWithGenesis(gspec, engine, 2, 10, func(i int, b *BlockGen) { + switch i { + case 0: + // First, we deploy the contract + contractTx := types.NewContractCreation(0, common.Big0, 200000, big.NewInt(params.ApricotPhase3InitialBaseFee), common.FromHex(callableBin)) + contractSignedTx, err := types.SignTx(contractTx, signer, key1) + require.NoError(err) + b.AddTx(contractSignedTx) + case 1: + // In the next block, we call the contract function + tx := types.NewTransaction(1, contractAddress, common.Big0, 23000, big.NewInt(params.ApricotPhase3InitialBaseFee), packedFunction) + tx, err := types.SignTx(tx, signer, key1) + require.NoError(err) + b.AddTx(tx) + } + }) + require.NoError(err) + + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) + require.NoError(err) + defer chain.Stop() + + // Create Log Subscriber + logsCh := make(chan []*types.Log, 10) + defer close(logsCh) + + sub := chain.SubscribeAcceptedLogsEvent(logsCh) + defer sub.Unsubscribe() + + _, err = chain.InsertChain(blocks) + require.NoError(err) + + for _, block := range blocks { + err := chain.Accept(block) + require.NoError(err) + } + chain.DrainAcceptorQueue() + + logs := <-logsCh + require.Len(logs, 1) + require.Equal(blocks[1].Hash(), logs[0].BlockHash) + require.Equal(blocks[1].Number().Uint64(), logs[0].BlockNumber) +} diff --git a/coreth/core/blockchain_reader.go b/coreth/core/blockchain_reader.go index 96163694..89229ec5 100644 --- a/coreth/core/blockchain_reader.go +++ b/coreth/core/blockchain_reader.go @@ -34,6 +34,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" ) @@ -46,8 +47,8 @@ func (bc *BlockChain) CurrentHeader() *types.Header { // CurrentBlock retrieves the current head block of the canonical chain. The // block is retrieved from the blockchain's internal cache. -func (bc *BlockChain) CurrentBlock() *types.Block { - return bc.currentBlock.Load().(*types.Block) +func (bc *BlockChain) CurrentBlock() *types.Header { + return bc.currentBlock.Load() } // HasHeader checks if a block header is present in the database or not, caching @@ -79,8 +80,7 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := bc.bodyCache.Get(hash); ok { - body := cached.(*types.Body) - return body + return cached } number := bc.hc.GetBlockNumber(hash) if number == nil { @@ -122,7 +122,7 @@ func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { // Short circuit if the block's already in the cache, retrieve otherwise if block, ok := bc.blockCache.Get(hash); ok { - return block.(*types.Block) + return block } block := rawdb.ReadBlock(bc.db, hash, number) if block == nil { @@ -174,13 +174,17 @@ func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*type // GetReceiptsByHash retrieves the receipts for all transactions in a given block. func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { if receipts, ok := bc.receiptsCache.Get(hash); ok { - return receipts.(types.Receipts) + return receipts } number := rawdb.ReadHeaderNumber(bc.db, hash) if number == nil { return nil } - receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) + header := bc.GetHeader(hash, *number) + if header == nil { + return nil + } + receipts := rawdb.ReadReceipts(bc.db, hash, *number, header.Time, bc.chainConfig) if receipts == nil { return nil } @@ -198,7 +202,7 @@ func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash { func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry { // Short circuit if the txlookup already in the cache, retrieve otherwise if lookup, exist := bc.txLookupCache.Get(hash); exist { - return lookup.(*rawdb.LegacyTxLookupEntry) + return lookup } tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash) if tx == nil { @@ -229,18 +233,12 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { // TrieNode retrieves a blob of data associated with a trie node // either from ephemeral in-memory cache, or from persistent storage. func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { - return bc.stateCache.TrieDB().RawNode(hash) -} - -// ContractCode retrieves a blob of data associated with a contract hash -// either from ephemeral in-memory cache, or from persistent storage. -func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) { - return bc.stateCache.ContractCode(common.Hash{}, hash) + return bc.stateCache.TrieDB().Node(hash) } // State returns a new mutable state based on the current HEAD block. func (bc *BlockChain) State() (*state.StateDB, error) { - return bc.StateAt(bc.CurrentBlock().Root()) + return bc.StateAt(bc.CurrentBlock().Root) } // StateAt returns a new mutable state based on a particular point in time. @@ -276,7 +274,7 @@ func (bc *BlockChain) StateCache() state.Database { // GasLimit returns the gas limit of the current HEAD block. func (bc *BlockChain) GasLimit() uint64 { - return bc.CurrentBlock().GasLimit() + return bc.CurrentBlock().GasLimit } // Genesis retrieves the chain's genesis block. @@ -289,6 +287,11 @@ func (bc *BlockChain) GetVMConfig() *vm.Config { return &bc.vmConfig } +// TrieDB retrieves the low level trie database used for data storage. +func (bc *BlockChain) TrieDB() *trie.Database { + return bc.triedb +} + // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) @@ -341,5 +344,10 @@ func (bc *BlockChain) GetLogs(hash common.Hash, number uint64) [][]*types.Log { if ok { return logs } - return rawdb.ReadLogs(bc.db, hash, number) + block := bc.GetBlockByHash(hash) + if block == nil { + return nil + } + logs = bc.collectUnflattenedLogs(block, false) + return logs } diff --git a/coreth/core/blockchain_repair_test.go b/coreth/core/blockchain_repair_test.go index add53826..3ec47fe7 100644 --- a/coreth/core/blockchain_repair_test.go +++ b/coreth/core/blockchain_repair_test.go @@ -508,7 +508,9 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // Create a temporary persistent database datadir := t.TempDir() - db, err := rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) + db, err := rawdb.Open(rawdb.OpenOptions{ + Directory: datadir, + }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -522,9 +524,10 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } engine = dummy.NewFullFaker() config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - SnapshotLimit: 0, // Disable snapshot by default + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TriePrefetcherParallelism: 4, + SnapshotLimit: 0, // Disable snapshot by default } ) defer engine.Close() @@ -535,19 +538,20 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { if err != nil { t.Fatalf("Failed to create chain: %v", err) } + defer chain.Stop() lastAcceptedHash := chain.GetBlockByNumber(0).Hash() // If sidechain blocks are needed, make a light chain and import it var sideblocks types.Blocks if tt.sidechainBlocks > 0 { - sideblocks, _, _ = GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { + sideblocks, _, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x01}) }) if _, err := chain.InsertChain(sideblocks); err != nil { t.Fatalf("Failed to import side chain: %v", err) } } - canonblocks, _, _ := GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { + canonblocks, _, _ := GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x02}) b.SetDifficulty(big.NewInt(1000000)) }) @@ -571,9 +575,12 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // Pull the plug on the database, simulating a hard crash db.Close() + chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us - db, err = rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) + db, err = rawdb.Open(rawdb.OpenOptions{ + Directory: datadir, + }) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } @@ -594,7 +601,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadBlock { t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadBlock) } - if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) + if head := newChain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock) } } diff --git a/coreth/core/blockchain_snapshot_test.go b/coreth/core/blockchain_snapshot_test.go index 98c7456a..9b17dd66 100644 --- a/coreth/core/blockchain_snapshot_test.go +++ b/coreth/core/blockchain_snapshot_test.go @@ -42,9 +42,9 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // snapshotTestBasic wraps the common testing fields in the snapshot tests. @@ -70,7 +70,9 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Create a temporary persistent database datadir := t.TempDir() - db, err := rawdb.NewLevelDBDatabase(datadir, 0, 0, "", false) + db, err := rawdb.Open(rawdb.OpenOptions{ + Directory: datadir, + }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -143,8 +145,8 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [ if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadBlock { t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadBlock) } - if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock) + if head := chain.CurrentBlock(); head.Number.Uint64() != basic.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.Number, basic.expHeadBlock) } // Check the disk layer, ensure they are matched @@ -249,9 +251,12 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // Pull the plug on the database, simulating a hard crash db := chain.db db.Close() + chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.NewLevelDBDatabase(snaptest.datadir, 0, 0, "", false) + newdb, err := rawdb.Open(rawdb.OpenOptions{ + Directory: snaptest.datadir, + }) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } @@ -364,15 +369,19 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - _, err = NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) + tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } + // Simulate the blockchain crash. + tmp.stopWithoutSaving() + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } + defer newchain.Stop() snaptest.verify(t, newchain, blocks) } diff --git a/coreth/core/blockchain_test.go b/coreth/core/blockchain_test.go index 615c1757..84fc6bd8 100644 --- a/coreth/core/blockchain_test.go +++ b/coreth/core/blockchain_test.go @@ -16,10 +16,11 @@ import ( "github.com/ava-labs/coreth/core/state/pruner" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/eth/tracers/logger" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/fsnotify/fsnotify" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,25 +28,31 @@ import ( var ( archiveConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: false, // Archive mode - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: false, // Archive mode + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } pruningConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 256, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 256, + AcceptorQueueLimit: 64, } ) +func newGwei(n int64) *big.Int { + return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei)) +} + func createBlockChain( db ethdb.Database, cacheConfig *CacheConfig, @@ -57,7 +64,7 @@ func createBlockChain( db, cacheConfig, gspec, - dummy.NewDummyEngine(&TestCallbacks), + dummy.NewFakerWithCallbacks(TestCallbacks), vm.Config{}, lastAcceptedHash, false, @@ -175,12 +182,13 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: false, // Archive mode - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: false, // Archive mode + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -209,13 +217,14 @@ func TestPruningBlockChainSnapsDisabled(t *testing.T) { return createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -258,13 +267,14 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: 0, // Disable snapshots - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: 0, // Disable snapshots + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -293,13 +303,14 @@ func TestEnableSnapshots(t *testing.T) { blockchain, err := createBlockChain( db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, // Enable pruning - CommitInterval: 4096, - SnapshotLimit: snapLimit, - AcceptorQueueLimit: 64, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, // Enable pruning + CommitInterval: 4096, + SnapshotLimit: snapLimit, + AcceptorQueueLimit: 64, }, gspec, lastAcceptedHash, @@ -350,16 +361,25 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return blockchain, nil } - tempDir := t.TempDir() if err := blockchain.CleanBlockRootsAboveLastAccepted(); err != nil { return nil, err } - pruner, err := pruner.NewPruner(db, tempDir, 256) + // get the target root to prune to before stopping the blockchain + targetRoot := blockchain.LastAcceptedBlock().Root() + blockchain.Stop() + + tempDir := t.TempDir() + prunerConfig := pruner.Config{ + Datadir: tempDir, + BloomSize: 256, + Cachedir: pruningConfig.TrieCleanJournal, + } + + pruner, err := pruner.NewPruner(db, prunerConfig) if err != nil { return nil, fmt.Errorf("offline pruning failed (%s, %d): %w", tempDir, 256, err) } - targetRoot := blockchain.LastAcceptedBlock().Root() if err := pruner.Prune(targetRoot); err != nil { return nil, fmt.Errorf("failed to prune blockchain with target root: %s due to: %w", targetRoot, err) } @@ -367,8 +387,8 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { + tt := tt t.Run(tt.Name, func(t *testing.T) { - tt := tt t.Parallel() tt.testFunc(t, create) }) @@ -441,6 +461,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { TrieCleanLimit: 256, TrieDirtyLimit: 256, TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, Pruning: false, // Archive mode SnapshotLimit: 256, PopulateMissingTries: &startHeight, // Starting point for re-populating. @@ -453,6 +474,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { if err != nil { t.Fatal(err) } + defer blockchain.Stop() for _, block := range chain { if !blockchain.HasState(block.Root()) { @@ -472,14 +494,15 @@ func TestUngracefulAsyncShutdown(t *testing.T) { var ( create = func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain(db, &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 1000, // ensure channel doesn't block + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 1000, // ensure channel doesn't block }, gspec, lastAcceptedHash) if err != nil { return nil, err @@ -609,6 +632,266 @@ func TestUngracefulAsyncShutdown(t *testing.T) { } } +// TODO: simplify the unindexer logic and this test. +func TestTransactionIndices(t *testing.T) { + // Configure and generate a sample block chain + require := require.New(t) + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = big.NewInt(10000000000000) + gspec = &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewFakerWithCallbacks(TestCallbacks), 128, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) + + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFakerWithCallbacks(TestCallbacks), genDb, 10, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) + + check := func(tail *uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + var tailValue uint64 + if tail == nil { + require.Nil(stored) + tailValue = 0 + } else { + require.EqualValues(*tail, *stored, "expected tail %d, got %d", *tail, *stored) + tailValue = *tail + } + + for i := tailValue; i <= chain.CurrentBlock().Number.Uint64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex()) + } + } + + for i := uint64(0); i < tailValue; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + + conf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, + } + + // Init block chain and check all needed indices has been indexed. + chainDB := rawdb.NewMemoryDatabase() + chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{}) + require.NoError(err) + + _, err = chain.InsertChain(blocks) + require.NoError(err) + + for _, block := range blocks { + err := chain.Accept(block) + require.NoError(err) + } + chain.DrainAcceptorQueue() + + chain.Stop() + check(nil, chain) // check all indices has been indexed + + lastAcceptedHash := chain.CurrentHeader().Hash() + + // Reconstruct a block chain which only reserves limited tx indices + // 128 blocks were previously indexed. Now we add a new block at each test step. + limits := []uint64{ + 0, /* tip: 129 reserve all (don't run) */ + 131, /* tip: 130 reserve all */ + 140, /* tip: 131 reserve all */ + 64, /* tip: 132, limit:64 */ + 32, /* tip: 133, limit:32 */ + } + for i, l := range limits { + t.Run(fmt.Sprintf("test-%d, limit: %d", i+1, l), func(t *testing.T) { + conf.TxLookupLimit = l + + chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) + require.NoError(err) + + newBlks := blocks2[i : i+1] + _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. + require.NoError(err) + + err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater. + require.NoError(err) + + chain.DrainAcceptorQueue() + time.Sleep(50 * time.Millisecond) // Wait for indices initialisation + + chain.Stop() + var tail *uint64 + if l == 0 { + tail = nil + } else { + var tl uint64 + if chain.CurrentBlock().Number.Uint64() > l { + // tail should be the first block number which is indexed + // i.e the first block number that's in the lookup range + tl = chain.CurrentBlock().Number.Uint64() - l + 1 + } + tail = &tl + } + + check(tail, chain) + + lastAcceptedHash = chain.CurrentHeader().Hash() + }) + } +} + +func TestTransactionSkipIndexing(t *testing.T) { + // Configure and generate a sample block chain + require := require.New(t) + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = big.NewInt(10000000000000) + gspec = &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewFakerWithCallbacks(TestCallbacks), 5, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) + + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewFakerWithCallbacks(TestCallbacks), genDb, 5, 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) + + checkRemoved := func(tail *uint64, to uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + var tailValue uint64 + if tail == nil { + require.Nil(stored) + tailValue = 0 + } else { + require.EqualValues(*tail, *stored, "expected tail %d, got %d", *tail, *stored) + tailValue = *tail + } + + for i := tailValue; i < to; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex()) + } + } + + for i := uint64(0); i < tailValue; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + + for i := to; i <= chain.CurrentBlock().Number.Uint64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be skipped, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + + conf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, + SkipTxIndexing: true, + } + + // test1: Init block chain and check all indices has been skipped. + chainDB := rawdb.NewMemoryDatabase() + chain, err := createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}) + require.NoError(err) + checkRemoved(nil, 0, chain) // check all indices has been skipped + + // test2: specify lookuplimit with tx index skipping enabled. Blocks should not be indexed but tail should be updated. + conf.TxLookupLimit = 2 + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks2[0:1], chain.CurrentHeader().Hash()) + require.NoError(err) + tail := chain.CurrentBlock().Number.Uint64() - conf.TxLookupLimit + 1 + checkRemoved(&tail, 0, chain) + + // test3: tx index skipping and unindexer disabled. Blocks should be indexed and tail should be updated. + conf.TxLookupLimit = 0 + conf.SkipTxIndexing = false + chainDB = rawdb.NewMemoryDatabase() + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks, common.Hash{}) + require.NoError(err) + checkRemoved(nil, chain.CurrentBlock().Number.Uint64()+1, chain) // check all indices has been indexed + + // now change tx index skipping to true and check that the indices are skipped for the last block + // and old indices are removed up to the tail, but [tail, current) indices are still there. + conf.TxLookupLimit = 2 + conf.SkipTxIndexing = true + chain, err = createAndInsertChain(chainDB, conf, gspec, blocks2[0:1], chain.CurrentHeader().Hash()) + require.NoError(err) + tail = chain.CurrentBlock().Number.Uint64() - conf.TxLookupLimit + 1 + checkRemoved(&tail, chain.CurrentBlock().Number.Uint64(), chain) +} + // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted // correctly in case reorg is called. func TestCanonicalHashMarker(t *testing.T) { @@ -653,7 +936,7 @@ func TestCanonicalHashMarker(t *testing.T) { Alloc: GenesisAlloc{}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), } - engine = dummy.NewFaker() + engine = dummy.NewCoinbaseFaker() ) _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(i int, gen *BlockGen) {}) if err != nil { @@ -717,140 +1000,372 @@ func TestCanonicalHashMarker(t *testing.T) { } } } + chain.Stop() } } -func TestTransactionIndices(t *testing.T) { - // Configure and generate a sample block chain - require := require.New(t) - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = big.NewInt(10000000000000) - gspec = &Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, - Alloc: GenesisAlloc{addr1: {Balance: funds}}, - } - signer = types.LatestSigner(gspec.Config) - ) - height := uint64(128) - genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewDummyEngine(&TestCallbacks), int(height), 10, func(i int, block *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - require.NoError(err) - block.AddTx(tx) - }) - require.NoError(err) +func TestTxLookupBlockChain(t *testing.T) { + cacheConf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, // ensure channel doesn't block + TxLookupLimit: 5, + } + createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + tt.testFunc(t, createTxLookupBlockChain) + }) + } +} - blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewDummyEngine(&TestCallbacks), genDb, 10, 10, nil) - require.NoError(err) +func TestTxLookupSkipIndexingBlockChain(t *testing.T) { + cacheConf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 4, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SnapshotNoBuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, // ensure channel doesn't block + TxLookupLimit: 5, + SkipTxIndexing: true, + } + createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + tt.testFunc(t, createTxLookupBlockChain) + }) + } +} - check := func(tail *uint64, chain *BlockChain) { - stored := rawdb.ReadTxIndexTail(chain.db) - require.EqualValues(tail, stored) +func TestCreateThenDeletePreByzantium(t *testing.T) { + // We want to use pre-byzantium rules where we have intermediate state roots + // between transactions. + config := *params.TestLaunchConfig + config.ByzantiumBlock = nil + config.ConstantinopleBlock = nil + config.PetersburgBlock = nil + config.IstanbulBlock = nil + config.MuirGlacierBlock = nil + testCreateThenDelete(t, &config) +} +func TestCreateThenDeletePostByzantium(t *testing.T) { + testCreateThenDelete(t, params.TestChainConfig) +} - if tail == nil { - return +// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening +// within the same block. +func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { + var ( + engine = dummy.NewFaker() + // A sender who makes transactions, has some funds + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + destAddress = crypto.CreateAddress(address, 0) + funds = big.NewInt(params.Ether) // Note: additional funds are provided here compared to go-ethereum so test completes. + ) + + // runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF) + code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...) + initCode := []byte{ + // SSTORE 1:1 + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x1, + byte(vm.SSTORE), + // Get the runtime-code on the stack + byte(vm.PUSH32)} + initCode = append(initCode, code...) + initCode = append(initCode, []byte{ + byte(vm.PUSH1), 0x0, // offset + byte(vm.MSTORE), + byte(vm.PUSH1), 0x3, // size + byte(vm.PUSH1), 0x0, // offset + byte(vm.RETURN), // return 3 bytes of zero-code + }...) + gspec := &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + address: {Balance: funds}, + }, + } + nonce := uint64(0) + signer := types.HomesteadSigner{} + _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 2, 10, func(i int, b *BlockGen) { + fee := big.NewInt(1) + if b.header.BaseFee != nil { + fee = b.header.BaseFee } - for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ { - block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) - if block.Transactions().Len() == 0 { - continue - } - for _, tx := range block.Transactions() { - index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) - require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex()) - } + b.SetCoinbase(common.Address{1}) + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 100000, + Data: initCode, + }) + nonce++ + b.AddTx(tx) + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 100000, + To: &destAddress, + }) + b.AddTx(tx) + nonce++ + }) + // Import the canonical chain + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{ + //Debug: true, + //Tracer: logger.NewJSONLogger(nil, os.Stdout), + }, common.Hash{}, false) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + defer chain.Stop() + // Import the blocks + for _, block := range blocks { + if _, err := chain.InsertChain([]*types.Block{block}); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) } + } +} - for i := uint64(0); i < *tail; i++ { - block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) - if block.Transactions().Len() == 0 { - continue - } - for _, tx := range block.Transactions() { - index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) - require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) - } +// TestTransientStorageReset ensures the transient storage is wiped correctly +// between transactions. +func TestTransientStorageReset(t *testing.T) { + var ( + engine = dummy.NewFaker() + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + destAddress = crypto.CreateAddress(address, 0) + funds = big.NewInt(params.Ether) // Note: additional funds are provided here compared to go-ethereum so test completes. + vmConfig = vm.Config{ + ExtraEips: []int{1153}, // Enable transient storage EIP } + ) + code := append([]byte{ + // TLoad value with location 1 + byte(vm.PUSH1), 0x1, + byte(vm.TLOAD), + + // PUSH location + byte(vm.PUSH1), 0x1, + + // SStore location:value + byte(vm.SSTORE), + }, make([]byte, 32-6)...) + initCode := []byte{ + // TSTORE 1:1 + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x1, + byte(vm.TSTORE), + + // Get the runtime-code on the stack + byte(vm.PUSH32)} + initCode = append(initCode, code...) + initCode = append(initCode, []byte{ + byte(vm.PUSH1), 0x0, // offset + byte(vm.MSTORE), + byte(vm.PUSH1), 0x6, // size + byte(vm.PUSH1), 0x0, // offset + byte(vm.RETURN), // return 6 bytes of zero-code + }...) + gspec := &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{ + address: {Balance: funds}, + }, } + nonce := uint64(0) + signer := types.HomesteadSigner{} + _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 1, 10, func(i int, b *BlockGen) { + fee := big.NewInt(1) + if b.header.BaseFee != nil { + fee = b.header.BaseFee + } + b.SetCoinbase(common.Address{1}) + tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 100000, + Data: initCode, + }) + nonce++ + b.AddTxWithVMConfig(tx, vmConfig) + + tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ + Nonce: nonce, + GasPrice: new(big.Int).Set(fee), + Gas: 100000, + To: &destAddress, + }) + b.AddTxWithVMConfig(tx, vmConfig) + nonce++ + }) - conf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, + // Initialize the blockchain with 1153 enabled. + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vmConfig, common.Hash{}, false) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) } - - // Init block chain and check all needed indices has been indexed. - chainDB := rawdb.NewMemoryDatabase() - chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{}) - require.NoError(err) - - _, err = chain.InsertChain(blocks) - require.NoError(err) - - for _, block := range blocks { - err := chain.Accept(block) - require.NoError(err) + defer chain.Stop() + // Import the blocks + if _, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("failed to insert into chain: %v", err) } - chain.DrainAcceptorQueue() - - chain.Stop() - check(nil, chain) // check all indices has been indexed + // Check the storage + state, err := chain.StateAt(chain.CurrentHeader().Root) + if err != nil { + t.Fatalf("Failed to load state %v", err) + } + loc := common.BytesToHash([]byte{1}) + slot := state.GetState(destAddress, loc) + if slot != (common.Hash{}) { + t.Fatalf("Unexpected dirty storage slot") + } +} - lastAcceptedHash := chain.CurrentHeader().Hash() +func TestEIP3651(t *testing.T) { + var ( + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") + engine = dummy.NewCoinbaseFaker() - // Reconstruct a block chain which only reserves limited tx indices - // 128 blocks were previously indexed. Now we add a new block at each test step. - limit := []uint64{130 /* 129 + 1 reserve all */, 64 /* drop stale */, 32 /* shorten history */} - tails := []uint64{0 /* reserve all */, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */} - for i, l := range limit { - conf.TxLookupLimit = l + // A sender who makes transactions, has some funds + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + gspec = &Genesis{ + Config: params.TestChainConfig, + Alloc: GenesisAlloc{ + addr1: {Balance: funds}, + addr2: {Balance: funds}, + // The address 0xAAAA sloads 0x00 and 0x01 + aa: { + Code: []byte{ + byte(vm.PC), + byte(vm.PC), + byte(vm.SLOAD), + byte(vm.SLOAD), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + // The address 0xBBBB calls 0xAAAA + bb: { + Code: []byte{ + byte(vm.PUSH1), 0, // out size + byte(vm.DUP1), // out offset + byte(vm.DUP1), // out insize + byte(vm.DUP1), // in offset + byte(vm.PUSH2), // address + byte(0xaa), + byte(0xaa), + byte(vm.GAS), // gas + byte(vm.DELEGATECALL), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + } + ) - chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) - require.NoError(err) + signer := types.LatestSigner(gspec.Config) + + _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, 1, 10, func(i int, b *BlockGen) { + b.SetCoinbase(aa) + // One transaction to Coinbase + txdata := &types.DynamicFeeTx{ + ChainID: gspec.Config.ChainID, + Nonce: 0, + To: &bb, + Gas: 500000, + GasFeeCap: newGwei(225), + GasTipCap: big.NewInt(2), + AccessList: nil, + Data: []byte{}, + } + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) - newBlks := blocks2[i : i+1] - _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. - require.NoError(err) + b.AddTx(tx) + }) + chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, common.Hash{}, false) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + defer chain.Stop() + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } - err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater. - require.NoError(err) + block := chain.GetBlockByNumber(1) - chain.DrainAcceptorQueue() - time.Sleep(50 * time.Millisecond) // Wait for indices initialisation + // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. + innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2 + expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list + if block.GasUsed() != expectedGas { + t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) + } - chain.Stop() - check(&tails[i], chain) + state, _ := chain.State() + + // 3: Ensure that miner received the gasUsed * (block baseFee + effectiveGasTip). + // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, + // as our handling of the coinbase payment is different. + // Note we use block.GasUsed() here as there is only one tx. + actual := state.GetBalance(common.HexToAddress("0x000000000000000000000000000000000000dEaD")) + tx := block.Transactions()[0] + gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) + expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) + if actual.Cmp(expected) != 0 { + t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) + } - lastAcceptedHash = chain.CurrentHeader().Hash() + // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). + // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, + // as our handling of the coinbase payment is different. + actual = new(big.Int).Sub(funds, state.GetBalance(addr1)) + if actual.Cmp(expected) != 0 { + t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) } } -func TestTxLookupBlockChain(t *testing.T) { - cacheConf := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieDirtyCommitTarget: 20, - Pruning: true, - CommitInterval: 4096, - SnapshotLimit: 256, - SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails - AcceptorQueueLimit: 64, // ensure channel doesn't block - TxLookupLimit: 5, +func createAndInsertChain(db ethdb.Database, cacheConfig *CacheConfig, gspec *Genesis, blocks types.Blocks, lastAcceptedHash common.Hash) (*BlockChain, error) { + chain, err := createBlockChain(db, cacheConfig, gspec, lastAcceptedHash) + if err != nil { + return nil, err } - createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) + _, err = chain.InsertChain(blocks) + if err != nil { + return nil, err } - for _, tt := range tests { - t.Run(tt.Name, func(t *testing.T) { - tt.testFunc(t, createTxLookupBlockChain) - }) + for _, block := range blocks { + err := chain.Accept(block) + if err != nil { + return nil, err + } } + + chain.DrainAcceptorQueue() + time.Sleep(1000 * time.Millisecond) // Wait for indices initialisation + + chain.Stop() + return chain, nil } diff --git a/coreth/core/bloom_indexer.go b/coreth/core/bloom_indexer.go index 60ac039e..ab8bc706 100644 --- a/coreth/core/bloom_indexer.go +++ b/coreth/core/bloom_indexer.go @@ -23,9 +23,9 @@ import ( "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/ethdb" ) const ( diff --git a/coreth/core/bloombits/generator_test.go b/coreth/core/bloombits/generator_test.go index 067c1db6..40a4749c 100644 --- a/coreth/core/bloombits/generator_test.go +++ b/coreth/core/bloombits/generator_test.go @@ -28,6 +28,7 @@ package bloombits import ( "bytes" + crand "crypto/rand" "math/rand" "testing" @@ -88,7 +89,7 @@ func BenchmarkGenerator(b *testing.B) { } }) for i := 0; i < types.BloomBitLength; i++ { - rand.Read(input[i][:]) + crand.Read(input[i][:]) } b.Run("random", func(b *testing.B) { b.ReportAllocs() diff --git a/coreth/core/bloombits/matcher.go b/coreth/core/bloombits/matcher.go index 07e4dc64..12fd0ace 100644 --- a/coreth/core/bloombits/matcher.go +++ b/coreth/core/bloombits/matcher.go @@ -93,7 +93,7 @@ type Matcher struct { retrievals chan chan *Retrieval // Retriever processes waiting for task allocations deliveries chan *Retrieval // Retriever processes waiting for task response deliveries - running uint32 // Atomic flag whether a session is live or not + running atomic.Bool // Atomic flag whether a session is live or not } // NewMatcher creates a new pipeline for retrieving bloom bit streams and doing @@ -156,10 +156,10 @@ func (m *Matcher) addScheduler(idx uint) { // channel is closed. func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) { // Make sure we're not creating concurrent sessions - if atomic.SwapUint32(&m.running, 1) == 1 { + if m.running.Swap(true) { return nil, errors.New("matcher already running") } - defer atomic.StoreUint32(&m.running, 0) + defer m.running.Store(false) // Initiate a new matching round session := &MatcherSession{ @@ -622,7 +622,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan return case <-time.After(wait): - // Throttling up, fetch whatever's available + // Throttling up, fetch whatever is available } } // Allocate as much as we can handle and request servicing @@ -640,13 +640,16 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx} result := <-request + + // Deliver a result before s.Close() to avoid a deadlock + s.deliverSections(result.Bit, result.Sections, result.Bitsets) + if result.Error != nil { s.errLock.Lock() s.err = result.Error s.errLock.Unlock() s.Close() } - s.deliverSections(result.Bit, result.Sections, result.Bitsets) } } } diff --git a/coreth/core/bloombits/matcher_test.go b/coreth/core/bloombits/matcher_test.go index ae08ecba..54bd148b 100644 --- a/coreth/core/bloombits/matcher_test.go +++ b/coreth/core/bloombits/matcher_test.go @@ -170,7 +170,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in } } // Track the number of retrieval requests made - var requested uint32 + var requested atomic.Uint32 // Start the matching session for the filter and the retriever goroutines quit := make(chan struct{}) @@ -218,15 +218,15 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in session.Close() close(quit) - if retrievals != 0 && requested != retrievals { - t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals) + if retrievals != 0 && requested.Load() != retrievals { + t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals) } - return requested + return requested.Load() } // startRetrievers starts a batch of goroutines listening for section requests // and serving them. -func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) { +func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) { requests := make(chan chan *Retrieval) for i := 0; i < 10; i++ { @@ -248,7 +248,7 @@ func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *ui for i, section := range task.Sections { if rand.Int()%4 != 0 { // Handle occasional missing deliveries task.Bitsets[i] = generateBitset(task.Bit, section) - atomic.AddUint32(retrievals, 1) + retrievals.Add(1) } } request <- task diff --git a/coreth/core/bloombits/scheduler_test.go b/coreth/core/bloombits/scheduler_test.go index bbe149f4..2a35508e 100644 --- a/coreth/core/bloombits/scheduler_test.go +++ b/coreth/core/bloombits/scheduler_test.go @@ -29,11 +29,9 @@ package bloombits import ( "bytes" "math/big" - "math/rand" "sync" "sync/atomic" "testing" - "time" ) // Tests that the scheduler can deduplicate and forward retrieval requests to @@ -57,14 +55,13 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) { fetch := make(chan *request, 16) defer close(fetch) - var delivered uint32 + var delivered atomic.Uint32 for i := 0; i < fetchers; i++ { go func() { defer fetchPend.Done() for req := range fetch { - time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond)))) - atomic.AddUint32(&delivered, 1) + delivered.Add(1) f.deliver([]uint64{ req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds) @@ -110,7 +107,7 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) { } pend.Wait() - if have := atomic.LoadUint32(&delivered); int(have) != requests { + if have := delivered.Load(); int(have) != requests { t.Errorf("request count mismatch: have %v, want %v", have, requests) } } diff --git a/coreth/core/bounded_buffer.go b/coreth/core/bounded_buffer.go index b6170682..534b1cc6 100644 --- a/coreth/core/bounded_buffer.go +++ b/coreth/core/bounded_buffer.go @@ -11,14 +11,14 @@ package core type BoundedBuffer[K any] struct { lastPos int size int - callback func(K) + callback func(K) error buffer []K cycled bool } // NewBoundedBuffer creates a new [BoundedBuffer]. -func NewBoundedBuffer[K any](size int, callback func(K)) *BoundedBuffer[K] { +func NewBoundedBuffer[K any](size int, callback func(K) error) *BoundedBuffer[K] { return &BoundedBuffer[K]{ lastPos: -1, size: size, @@ -29,7 +29,7 @@ func NewBoundedBuffer[K any](size int, callback func(K)) *BoundedBuffer[K] { // Insert adds a new value to the buffer. If the buffer is full, the // oldest value will be overwritten and [callback] will be invoked. -func (b *BoundedBuffer[K]) Insert(h K) { +func (b *BoundedBuffer[K]) Insert(h K) error { nextPos := b.lastPos + 1 // the first item added to the buffer will be at position 0 if nextPos == b.size { nextPos = 0 @@ -39,10 +39,13 @@ func (b *BoundedBuffer[K]) Insert(h K) { if b.cycled { // We ensure we have cycled through the buffer once before invoking the // [callback] to ensure we don't call it with unset values. - b.callback(b.buffer[nextPos]) + if err := b.callback(b.buffer[nextPos]); err != nil { + return err + } } b.buffer[nextPos] = h b.lastPos = nextPos + return nil } // Last retrieves the last item added to the buffer. diff --git a/coreth/core/chain_indexer.go b/coreth/core/chain_indexer.go index 975f82b3..1ef815b5 100644 --- a/coreth/core/chain_indexer.go +++ b/coreth/core/chain_indexer.go @@ -36,8 +36,8 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -85,7 +85,7 @@ type ChainIndexer struct { backend ChainIndexerBackend // Background processor generating the index data content children []*ChainIndexer // Child indexers to cascade chain updates to - active uint32 // Flag whether the event loop was started + active atomic.Bool // Flag whether the event loop was started update chan struct{} // Notification channel that headers should be processed quit chan chan error // Quit channel to tear down running goroutines ctx context.Context @@ -176,7 +176,7 @@ func (c *ChainIndexer) Close() error { errs = append(errs, err) } // If needed, tear down the secondary event loop - if atomic.LoadUint32(&c.active) != 0 { + if c.active.Load() { c.quit <- errc if err := <-errc; err != nil { errs = append(errs, err) @@ -206,7 +206,7 @@ func (c *ChainIndexer) Close() error { // queue. func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription) { // Mark the chain indexer as active, requiring an additional teardown - atomic.StoreUint32(&c.active, 1) + c.active.Store(true) defer sub.Unsubscribe() diff --git a/coreth/core/chain_makers.go b/coreth/core/chain_makers.go index 1bb00ea8..b5894a6e 100644 --- a/coreth/core/chain_makers.go +++ b/coreth/core/chain_makers.go @@ -36,9 +36,10 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // BlockGen creates blocks for testing. @@ -78,6 +79,11 @@ func (b *BlockGen) SetExtra(data []byte) { b.header.Extra = data } +// AppendExtra appends data to the extra data field of the generated block. +func (b *BlockGen) AppendExtra(data []byte) { + b.header.Extra = append(b.header.Extra, data...) +} + // SetNonce sets the nonce field of the generated block. func (b *BlockGen) SetNonce(nonce types.BlockNonce) { b.header.Nonce = nonce @@ -90,6 +96,27 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) { b.header.Difficulty = diff } +// addTx adds a transaction to the generated block. If no coinbase has +// been set, the block's coinbase is set to the zero address. +// +// There are a few options can be passed as well in order to run some +// customized rules. +// - bc: enables the ability to query historical block hashes for BLOCKHASH +// - vmConfig: extends the flexibility for customizing evm rules, e.g. enable extra EIPs +func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transaction) { + if b.gasPool == nil { + b.SetCoinbase(common.Address{}) + } + b.statedb.SetTxContext(tx.Hash(), len(b.txs)) + blockContext := NewEVMBlockContext(b.header, bc, &b.header.Coinbase) + receipt, err := ApplyTransaction(b.config, bc, blockContext, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vmConfig) + if err != nil { + panic(err) + } + b.txs = append(b.txs, tx) + b.receipts = append(b.receipts, receipt) +} + // AddTx adds a transaction to the generated block. If no coinbase has // been set, the block's coinbase is set to the zero address. // @@ -99,7 +126,7 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) { // added. Notably, contract code relying on the BLOCKHASH instruction // will panic during execution. func (b *BlockGen) AddTx(tx *types.Transaction) { - b.AddTxWithChain(nil, tx) + b.addTx(nil, vm.Config{}, tx) } // AddTxWithChain adds a transaction to the generated block. If no coinbase has @@ -111,16 +138,14 @@ func (b *BlockGen) AddTx(tx *types.Transaction) { // added. If contract code relies on the BLOCKHASH instruction, // the block in chain will be returned. func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) { - if b.gasPool == nil { - b.SetCoinbase(common.Address{}) - } - b.statedb.Prepare(tx.Hash(), len(b.txs)) - receipt, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{}) - if err != nil { - panic(err) - } - b.txs = append(b.txs, tx) - b.receipts = append(b.receipts, receipt) + b.addTx(bc, vm.Config{}, tx) +} + +// AddTxWithVMConfig adds a transaction to the generated block. If no coinbase has +// been set, the block's coinbase is set to the zero address. +// The evm interpreter can be customized with the provided vm config. +func (b *BlockGen) AddTxWithVMConfig(tx *types.Transaction, config vm.Config) { + b.addTx(nil, config, tx) } // GetBalance returns the balance of the given address at the generated block. @@ -142,6 +167,11 @@ func (b *BlockGen) Number() *big.Int { return new(big.Int).Set(b.header.Number) } +// Timestamp returns the timestamp of the block being generated. +func (b *BlockGen) Timestamp() uint64 { + return b.header.Time +} + // BaseFee returns the EIP-1559 base fee of the block being generated. func (b *BlockGen) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) @@ -222,6 +252,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, config, parent, gap, statedb, b.engine) + err := ApplyUpgrades(config, &parent.Header().Time, b, statedb) + if err != nil { + return nil, nil, fmt.Errorf("failed to configure precompiles %v", err) + } + // Execute any user modifications to the block if gen != nil { gen(i, b) @@ -238,7 +273,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if err != nil { panic(fmt.Sprintf("state write error: %v", err)) } - if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil { + if err := statedb.Database().TrieDB().Commit(root, false); err != nil { panic(fmt.Sprintf("trie write error: %v", err)) } if b.onBlockGenerated != nil { @@ -269,11 +304,11 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gap uint64, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts, error) { db := rawdb.NewMemoryDatabase() - _, err := genesis.Commit(db) + _, err := genesis.Commit(db, trie.NewDatabase(db)) if err != nil { return nil, nil, nil, err } - blocks, receipts, err := GenerateChain(genesis.Config, genesis.ToBlock(nil), engine, db, n, gap, gen) + blocks, receipts, err := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gap, gen) return db, blocks, receipts, err } @@ -285,23 +320,22 @@ func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent time = parent.Time() + gap } - timestamp := new(big.Int).SetUint64(time) var gasLimit uint64 - if config.IsCortina(timestamp) { + if config.IsCortina(time) { gasLimit = params.CortinaGasLimit } else { if config.IsSongbirdCode() { - if config.IsSongbirdTransition(timestamp) { + if config.IsSongbirdTransition(time) { gasLimit = params.SgbTransitionGasLimit - } else if config.IsApricotPhase5(timestamp) { + } else if config.IsApricotPhase5(time) { gasLimit = params.SgbApricotPhase5GasLimit - } else if config.IsApricotPhase1(timestamp) { + } else if config.IsApricotPhase1(time) { gasLimit = params.ApricotPhase1GasLimit } else { gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) } } else { - if config.IsApricotPhase1(timestamp) { + if config.IsApricotPhase1(time) { gasLimit = params.ApricotPhase1GasLimit } else { gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) @@ -323,7 +357,7 @@ func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } - if chain.Config().IsApricotPhase3(timestamp) { + if chain.Config().IsApricotPhase3(time) { var err error header.Extra, header.BaseFee, err = dummy.CalcBaseFee(chain.Config(), parent.Header(), time) if err != nil { diff --git a/coreth/core/chain_makers_test.go b/coreth/core/chain_makers_test.go index a2e1bf2e..cdc27527 100644 --- a/coreth/core/chain_makers_test.go +++ b/coreth/core/chain_makers_test.go @@ -60,7 +60,7 @@ func ExampleGenerateChain() { // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - _, chain, _, err := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, dummy.NewCoinbaseFaker(), 3, 10, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -81,7 +81,7 @@ func ExampleGenerateChain() { } // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { @@ -90,7 +90,7 @@ func ExampleGenerateChain() { } state, _ := blockchain.State() - fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number()) + fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number) fmt.Println("balance of addr1:", state.GetBalance(addr1)) fmt.Println("balance of addr2:", state.GetBalance(addr2)) fmt.Println("balance of addr3:", state.GetBalance(addr3)) diff --git a/coreth/core/daemon.go b/coreth/core/daemon.go index aaf7d70d..dc4c43cb 100644 --- a/coreth/core/daemon.go +++ b/coreth/core/daemon.go @@ -21,11 +21,11 @@ import ( var ( // Define activation times for submitter contract - submitterContractActivationTimeFlare = big.NewInt(time.Date(2024, time.March, 26, 12, 0, 0, 0, time.UTC).Unix()) - submitterContractActivationTimeCostwo = big.NewInt(time.Date(2024, time.March, 7, 12, 0, 0, 0, time.UTC).Unix()) + submitterContractActivationTimeFlare = uint64(time.Date(2024, time.March, 26, 12, 0, 0, 0, time.UTC).Unix()) + submitterContractActivationTimeCostwo = uint64(time.Date(2024, time.March, 7, 12, 0, 0, 0, time.UTC).Unix()) - submitterContractActivationTimeSongbird = big.NewInt(time.Date(2024, time.March, 15, 12, 0, 0, 0, time.UTC).Unix()) - submitterContractActivationTimeCoston = big.NewInt(time.Date(2024, time.February, 29, 12, 0, 0, 0, time.UTC).Unix()) + submitterContractActivationTimeSongbird = uint64(time.Date(2024, time.March, 15, 12, 0, 0, 0, time.UTC).Unix()) + submitterContractActivationTimeCoston = uint64(time.Date(2024, time.February, 29, 12, 0, 0, 0, time.UTC).Unix()) // Define ftso and submitter contract addresses prioritisedFTSOContractAddress = common.HexToAddress("0x1000000000000000000000000000000000000003") @@ -58,23 +58,23 @@ const ( ) type prioritisedParams struct { - submitterActivationTime *big.Int + submitterActivationTime uint64 submitterAddress common.Address maxGasLimit uint64 - dataPrefixActivationTime *big.Int + dataPrefixActivationTime uint64 submitterDataPrefixes [][4]byte ftsoDataPrefixes [][4]byte } var ( prioritisedContractVariants = utils.NewChainValue(&prioritisedParams{ - big.NewInt(0), common.Address{}, 0, big.NewInt(0), [][4]byte{}, [][4]byte{}, + 0, common.Address{}, 0, 0, [][4]byte{}, [][4]byte{}, }). AddValue(params.FlareChainID, &prioritisedParams{ submitterContractActivationTimeFlare, prioritisedSubmitterContractAddress, 3000000, - big.NewInt(time.Date(2024, time.October, 10, 15, 0, 0, 0, time.UTC).Unix()), + uint64(time.Date(2024, time.October, 10, 15, 0, 0, 0, time.UTC).Unix()), submitterDataPrefixes, prioritisedFTSOContractDataPrefixesFlareNetworks, }). @@ -82,7 +82,7 @@ var ( submitterContractActivationTimeCostwo, prioritisedSubmitterContractAddress, 3000000, - big.NewInt(time.Date(2024, time.October, 10, 10, 0, 0, 0, time.UTC).Unix()), + uint64(time.Date(2024, time.October, 10, 10, 0, 0, 0, time.UTC).Unix()), submitterDataPrefixes, prioritisedFTSOContractDataPrefixesFlareNetworks, }). @@ -90,7 +90,7 @@ var ( submitterContractActivationTimeSongbird, prioritisedSubmitterContractAddress, math.MaxUint64, - big.NewInt(time.Date(2024, time.October, 10, 13, 0, 0, 0, time.UTC).Unix()), + uint64(time.Date(2024, time.October, 10, 13, 0, 0, 0, time.UTC).Unix()), submitterDataPrefixes, prioritisedFTSOContractDataPrefixesSongbirdNetworks, }). @@ -98,31 +98,23 @@ var ( submitterContractActivationTimeCoston, prioritisedSubmitterContractAddress, math.MaxUint64, - big.NewInt(time.Date(2024, time.October, 10, 8, 0, 0, 0, time.UTC).Unix()), + uint64(time.Date(2024, time.October, 10, 8, 0, 0, 0, time.UTC).Unix()), submitterDataPrefixes, prioritisedFTSOContractDataPrefixesSongbirdNetworks, }). AddValue(params.LocalFlareChainID, &prioritisedParams{ - big.NewInt(0), + 0, prioritisedSubmitterContractAddressEnv, 3000000, - big.NewInt(0), - [][4]byte{}, - [][4]byte{}, - }). - AddValue(params.StagingChainID, &prioritisedParams{ - big.NewInt(0), - prioritisedSubmitterContractAddressEnv, - 3000000, - big.NewInt(0), + 0, [][4]byte{}, [][4]byte{}, }). AddValue(params.LocalChainID, &prioritisedParams{ - big.NewInt(0), + 0, prioritisedSubmitterContractAddressEnv, math.MaxUint64, - big.NewInt(0), + 0, [][4]byte{}, [][4]byte{}, }) @@ -155,34 +147,34 @@ type EVMCaller interface { GetChainID() *big.Int DaemonCall(caller vm.ContractRef, addr common.Address, input []byte, gas uint64) (snapshot int, ret []byte, leftOverGas uint64, err error) DaemonRevertToSnapshot(snapshot int) - GetBlockTime() *big.Int + GetBlockTime() uint64 GetGasLimit() uint64 AddBalance(addr common.Address, amount *big.Int) } -func GetDaemonGasMultiplier(blockTime *big.Int) uint64 { +func GetDaemonGasMultiplier(blockTime uint64) uint64 { switch { default: return 100 } } -func GetDaemonContractAddr(blockTime *big.Int) string { +func GetDaemonContractAddr(blockTime uint64) string { switch { default: return "0x1000000000000000000000000000000000000002" } } -func GetDaemonSelector(blockTime *big.Int) []byte { +func GetDaemonSelector(blockTime uint64) []byte { switch { default: return []byte{0x7f, 0xec, 0x8d, 0x38} } } -func IsPrioritisedContractCall(chainID *big.Int, blockTime *big.Int, to *common.Address, data []byte, ret []byte, initialGas uint64) bool { - if to == nil || chainID == nil || blockTime == nil { +func IsPrioritisedContractCall(chainID *big.Int, blockTime uint64, to *common.Address, data []byte, ret []byte, initialGas uint64) bool { + if to == nil || chainID == nil { return false } @@ -192,12 +184,12 @@ func IsPrioritisedContractCall(chainID *big.Int, blockTime *big.Int, to *common. case initialGas > chainValue.maxGasLimit: return false case *to == prioritisedFTSOContractAddress: - if blockTime.Cmp(chainValue.dataPrefixActivationTime) > 0 { + if blockTime > chainValue.dataPrefixActivationTime { return checkDataPrefix(data, chainValue.ftsoDataPrefixes) } return true - case *to == chainValue.submitterAddress && blockTime.Cmp(chainValue.submitterActivationTime) > 0 && !isZeroSlice(ret): - if blockTime.Cmp(chainValue.dataPrefixActivationTime) > 0 { + case *to == chainValue.submitterAddress && blockTime > chainValue.submitterActivationTime && !isZeroSlice(ret): + if blockTime > chainValue.dataPrefixActivationTime { return len(data) <= prioritisedCallDataCap && checkDataPrefix(data, chainValue.submitterDataPrefixes) } return true @@ -206,9 +198,9 @@ func IsPrioritisedContractCall(chainID *big.Int, blockTime *big.Int, to *common. } } -func GetMaximumMintRequest(chainID *big.Int, blockTime *big.Int) *big.Int { +func GetMaximumMintRequest(chainID *big.Int, blockTime uint64) *big.Int { switch { - case chainID.Cmp(params.FlareChainID) == 0 || chainID.Cmp(params.CostwoChainID) == 0 || chainID.Cmp(params.LocalFlareChainID) == 0 || chainID.Cmp(params.StagingChainID) == 0: + case chainID.Cmp(params.FlareChainID) == 0 || chainID.Cmp(params.CostwoChainID) == 0 || chainID.Cmp(params.LocalFlareChainID) == 0: maxRequest, _ := new(big.Int).SetString("60000000000000000000000000", 10) return maxRequest default: // Songbird, Coston diff --git a/coreth/core/daemon_test.go b/coreth/core/daemon_test.go index d4373b5b..45a507dc 100644 --- a/coreth/core/daemon_test.go +++ b/coreth/core/daemon_test.go @@ -22,7 +22,7 @@ type MockEVMCallerData struct { addBalanceCalls int revertToSnapshotCalls int lastSnapshotValue int - blockTime big.Int + blockTime uint64 gasLimit uint64 mintRequestReturn big.Int lastAddBalanceAddr common.Address @@ -47,8 +47,8 @@ func defaultRevertToSnapshot(e *MockEVMCallerData, snapshot int) { e.lastSnapshotValue = snapshot } -func defaultGetBlockTime(e *MockEVMCallerData) *big.Int { - return &e.blockTime +func defaultGetBlockTime(e *MockEVMCallerData) uint64 { + return e.blockTime } func defaultGetGasLimit(e *MockEVMCallerData) uint64 { @@ -74,7 +74,7 @@ func (e *DefaultEVMMock) DaemonRevertToSnapshot(snapshot int) { defaultRevertToSnapshot(&e.mockEVMCallerData, snapshot) } -func (e *DefaultEVMMock) GetBlockTime() *big.Int { +func (e *DefaultEVMMock) GetBlockTime() uint64 { return defaultGetBlockTime(&e.mockEVMCallerData) } @@ -93,7 +93,7 @@ func (e *DefaultEVMMock) AddBalance(addr common.Address, amount *big.Int) { func TestDaemonShouldReturnMintRequest(t *testing.T) { mintRequestReturn, _ := new(big.Int).SetString("60000000000000000000000000", 10) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *mintRequestReturn, } @@ -115,7 +115,7 @@ func TestDaemonShouldNotLetMintRequestOverflow(t *testing.T) { mintRequestReturn.SetBytes(buffer) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: mintRequestReturn, } @@ -154,7 +154,7 @@ func (e *BadMintReturnSizeEVMMock) DaemonRevertToSnapshot(snapshot int) { defaultRevertToSnapshot(&e.mockEVMCallerData, snapshot) } -func (e *BadMintReturnSizeEVMMock) GetBlockTime() *big.Int { +func (e *BadMintReturnSizeEVMMock) GetBlockTime() uint64 { return defaultGetBlockTime(&e.mockEVMCallerData) } @@ -177,7 +177,7 @@ func TestDaemonValidatesMintRequestReturnValueSize(t *testing.T) { mintRequestReturn.SetBytes(buffer) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: mintRequestReturn, } @@ -213,7 +213,7 @@ func (e *BadDaemonCallEVMMock) DaemonRevertToSnapshot(snapshot int) { defaultRevertToSnapshot(&e.mockEVMCallerData, snapshot) } -func (e *BadDaemonCallEVMMock) GetBlockTime() *big.Int { +func (e *BadDaemonCallEVMMock) GetBlockTime() uint64 { return defaultGetBlockTime(&e.mockEVMCallerData) } @@ -308,7 +308,7 @@ func (e *ReturnNilMintRequestEVMMock) DaemonRevertToSnapshot(snapshot int) { defaultRevertToSnapshot(&e.mockEVMCallerData, snapshot) } -func (e *ReturnNilMintRequestEVMMock) GetBlockTime() *big.Int { +func (e *ReturnNilMintRequestEVMMock) GetBlockTime() uint64 { return defaultGetBlockTime(&e.mockEVMCallerData) } @@ -345,7 +345,7 @@ func TestDaemonHandlesNilMintRequest(t *testing.T) { func TestDaemonShouldNotMintMoreThanMax(t *testing.T) { mintRequest, _ := new(big.Int).SetString("60000000000000000000000001", 10) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *big.NewInt(0), } @@ -359,7 +359,7 @@ func TestDaemonShouldNotMintMoreThanMax(t *testing.T) { if err, ok := err.(*ErrMaxMintExceeded); !ok { want := &ErrMaxMintExceeded{ mintRequest: mintRequest, - mintMax: GetMaximumMintRequest(params.FlareChainID, big.NewInt(0)), + mintMax: GetMaximumMintRequest(params.FlareChainID, 0), } t.Errorf("got '%s' want '%s'", err.Error(), want.Error()) } @@ -371,7 +371,7 @@ func TestDaemonShouldNotMintMoreThanMax(t *testing.T) { func TestDaemonShouldNotMintNegative(t *testing.T) { mintRequest := big.NewInt(-1) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *big.NewInt(0), } @@ -395,7 +395,7 @@ func TestDaemonShouldMint(t *testing.T) { // Assemble mintRequest, _ := new(big.Int).SetString("60000000000000000000000000", 10) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *big.NewInt(0), } @@ -411,8 +411,8 @@ func TestDaemonShouldMint(t *testing.T) { if defaultEVMMock.mockEVMCallerData.addBalanceCalls != 1 { t.Errorf("AddBalance not called as expected") } - if defaultEVMMock.mockEVMCallerData.lastAddBalanceAddr.String() != GetDaemonContractAddr(big.NewInt(0)) { - t.Errorf("wanted addr %s; got addr %s", GetDaemonContractAddr(big.NewInt(0)), defaultEVMMock.mockEVMCallerData.lastAddBalanceAddr) + if defaultEVMMock.mockEVMCallerData.lastAddBalanceAddr.String() != GetDaemonContractAddr(0) { + t.Errorf("wanted addr %s; got addr %s", GetDaemonContractAddr(0), defaultEVMMock.mockEVMCallerData.lastAddBalanceAddr) } if defaultEVMMock.mockEVMCallerData.lastAddBalanceAmount.Cmp(mintRequest) != 0 { t.Errorf("wanted amount %s; got amount %s", mintRequest.Text(10), defaultEVMMock.mockEVMCallerData.lastAddBalanceAmount.Text(10)) @@ -426,7 +426,7 @@ func TestDaemonShouldNotErrorMintingZero(t *testing.T) { // Assemble mintRequest := big.NewInt(0) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *big.NewInt(0), } @@ -450,7 +450,7 @@ func TestDaemonShouldNotErrorMintingZero(t *testing.T) { func TestDaemonFiredAndMinted(t *testing.T) { mintRequestReturn, _ := new(big.Int).SetString("60000000000000000000000000", 10) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *mintRequestReturn, } @@ -474,7 +474,7 @@ func TestDaemonFiredAndMinted(t *testing.T) { func TestDaemonShouldNotMintMoreThanLimit(t *testing.T) { mintRequestReturn, _ := new(big.Int).SetString("60000000000000000000000001", 10) mockEVMCallerData := &MockEVMCallerData{ - blockTime: *big.NewInt(0), + blockTime: 0, gasLimit: 0, mintRequestReturn: *mintRequestReturn, } @@ -497,9 +497,9 @@ func TestDaemonShouldNotMintMoreThanLimit(t *testing.T) { func TestPrioritisedContract(t *testing.T) { address := common.HexToAddress("0x123456789aBCdEF123456789aBCdef123456789A") - preForkTime := big.NewInt(time.Date(2024, time.March, 20, 12, 0, 0, 0, time.UTC).Unix()) - postForkTime := big.NewInt(time.Date(2024, time.March, 27, 12, 0, 0, 0, time.UTC).Unix()) - postPrefixForkTime := big.NewInt(time.Date(2024, time.October, 11, 0, 0, 0, 0, time.UTC).Unix()) + preForkTime := uint64(time.Date(2024, time.March, 20, 12, 0, 0, 0, time.UTC).Unix()) + postForkTime := uint64(time.Date(2024, time.March, 27, 12, 0, 0, 0, time.UTC).Unix()) + postPrefixForkTime := uint64(time.Date(2024, time.October, 11, 0, 0, 0, 0, time.UTC).Unix()) initialGas := uint64(0) ret0 := [32]byte{} ret1 := [32]byte{} diff --git a/coreth/core/error.go b/coreth/core/error.go index 955049ee..0137b5da 100644 --- a/coreth/core/error.go +++ b/coreth/core/error.go @@ -68,6 +68,10 @@ var ( // have enough funds for transfer(topmost call only). ErrInsufficientFundsForTransfer = errors.New("insufficient funds for transfer") + // ErrMaxInitCodeSizeExceeded is returned if creation transaction provides the init code bigger + // than init code size limit. + ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/coreth/core/events.go b/coreth/core/events.go index 4898dbc0..462d26d9 100644 --- a/coreth/core/events.go +++ b/coreth/core/events.go @@ -36,7 +36,7 @@ type NewTxsEvent struct{ Txs []*types.Transaction } // NewTxPoolHeadEvent is posted when the pool receives a request to update // its head to [Block]. -type NewTxPoolHeadEvent struct{ Block *types.Block } +type NewTxPoolHeadEvent struct{ Head *types.Header } // NewTxPoolReorgEvent is posted when the pool head is updated. type NewTxPoolReorgEvent struct{ Head *types.Header } diff --git a/coreth/core/evm.go b/coreth/core/evm.go index 3f373e06..7be97f3b 100644 --- a/coreth/core/evm.go +++ b/coreth/core/evm.go @@ -32,7 +32,9 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/predicate" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" //"github.com/ethereum/go-ethereum/log" ) @@ -48,6 +50,33 @@ type ChainContext interface { // NewEVMBlockContext creates a new context for use in the EVM. func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { + predicateBytes, ok := predicate.GetPredicateResultBytes(header.Extra) + if !ok { + return newEVMBlockContext(header, chain, author, nil) + } + // Prior to Durango, the VM enforces the extra data is smaller than or + // equal to this size. After Durango, the VM pre-verifies the extra + // data past the dynamic fee rollup window is valid. + predicateResults, err := predicate.ParseResults(predicateBytes) + if err != nil { + log.Error("failed to parse predicate results creating new block context", "err", err, "extra", header.Extra) + // As mentioned above, we pre-verify the extra data to ensure this never happens. + // If we hit an error, construct a new block context rather than use a potentially half initialized value + // as defense in depth. + return newEVMBlockContext(header, chain, author, nil) + } + return newEVMBlockContext(header, chain, author, predicateResults) +} + +// NewEVMBlockContextWithPredicateResults creates a new context for use in the EVM with an override for the predicate results that is not present +// in header.Extra. +// This function is used to create a BlockContext when the header Extra data is not fully formed yet and it's more efficient to pass in predicateResults +// directly rather than re-encode the latest results when executing each individaul transaction. +func NewEVMBlockContextWithPredicateResults(header *types.Header, chain ChainContext, author *common.Address, predicateResults *predicate.Results) vm.BlockContext { + return newEVMBlockContext(header, chain, author, predicateResults) +} + +func newEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address, predicateResults *predicate.Results) vm.BlockContext { var ( beneficiary common.Address baseFee *big.Int @@ -68,9 +97,10 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common Transfer: Transfer, TransferMultiCoin: TransferMultiCoin, GetHash: GetHashFn(header, chain), + PredicateResults: predicateResults, Coinbase: beneficiary, BlockNumber: new(big.Int).Set(header.Number), - Time: new(big.Int).SetUint64(header.Time), + Time: header.Time, Difficulty: new(big.Int).Set(header.Difficulty), BaseFee: baseFee, GasLimit: header.GasLimit, @@ -78,10 +108,10 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common } // NewEVMTxContext creates a new transaction context for a single transaction. -func NewEVMTxContext(msg Message) vm.TxContext { +func NewEVMTxContext(msg *Message) vm.TxContext { return vm.TxContext{ - Origin: msg.From(), - GasPrice: new(big.Int).Set(msg.GasPrice()), + Origin: msg.From, + GasPrice: new(big.Int).Set(msg.GasPrice), } } diff --git a/coreth/core/fifo_cache.go b/coreth/core/fifo_cache.go index c941382f..63b65696 100644 --- a/coreth/core/fifo_cache.go +++ b/coreth/core/fifo_cache.go @@ -58,8 +58,9 @@ func (f *BufferFIFOCache[K, V]) Get(key K) (V, bool) { // remove is used as the callback in [BoundedBuffer]. It is assumed that the // [WriteLock] is held when this is accessed. -func (f *BufferFIFOCache[K, V]) remove(key K) { +func (f *BufferFIFOCache[K, V]) remove(key K) error { delete(f.m, key) + return nil } type NoOpFIFOCache[K comparable, V any] struct{} diff --git a/coreth/core/gaspool.go b/coreth/core/gaspool.go index b2091b88..a5c6be1c 100644 --- a/coreth/core/gaspool.go +++ b/coreth/core/gaspool.go @@ -59,6 +59,11 @@ func (gp *GasPool) Gas() uint64 { return uint64(*gp) } +// SetGas sets the amount of gas with the provided number. +func (gp *GasPool) SetGas(gas uint64) { + *(*uint64)(gp) = gas +} + func (gp *GasPool) String() string { return fmt.Sprintf("%d", *gp) } diff --git a/coreth/core/genesis.go b/coreth/core/genesis.go index 54da8dff..420b9c40 100644 --- a/coreth/core/genesis.go +++ b/coreth/core/genesis.go @@ -37,12 +37,12 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -157,12 +157,16 @@ func (e *GenesisMismatchError) Error() string { // +------------------------------------------ // db has no genesis | main-net default | genesis // db has genesis | from DB | genesis (if compatible) + +// The argument [genesis] must be specified and must contain a valid chain config. +// If the genesis block has already been set up, then we verify the hash matches the genesis passed in +// and that the chain config contained in genesis is backwards compatible with what is stored in the database. // // The stored chain configuration will be updated if it is compatible (i.e. does not // specify a fork block below the local head block). In case of a conflict, the // error is a *params.ConfigCompatError and the new, unwritten config is returned. func SetupGenesisBlock( - db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, + db ethdb.Database, triedb *trie.Database, genesis *Genesis, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, ) (*params.ChainConfig, common.Hash, error) { if genesis == nil { return nil, common.Hash{}, ErrNoGenesis @@ -174,7 +178,7 @@ func SetupGenesisBlock( stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { log.Info("Writing genesis to database") - block, err := genesis.Commit(db) + block, err := genesis.Commit(db, triedb) if err != nil { return genesis.Config, common.Hash{}, err } @@ -182,17 +186,17 @@ func SetupGenesisBlock( } // We have the genesis block in database but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if _, err := state.New(header.Root, state.NewDatabase(db), nil); err != nil { + if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { // Ensure the stored genesis matches with the given one. - hash := genesis.ToBlock(nil).Hash() + hash := genesis.ToBlock().Hash() if hash != stored { return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } - _, err := genesis.Commit(db) + _, err := genesis.Commit(db, triedb) return genesis.Config, common.Hash{}, err } // Check whether the genesis block is already written. - hash := genesis.ToBlock(nil).Hash() + hash := genesis.ToBlock().Hash() if hash != stored { return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } @@ -207,6 +211,7 @@ func SetupGenesisBlock( rawdb.WriteChainConfig(db, stored, newcfg) return newcfg, stored, nil } + storedData, _ := json.Marshal(storedcfg) // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. // we use last accepted block for cfg compatibility check. Note this allows @@ -225,21 +230,27 @@ func SetupGenesisBlock( log.Info("skipping verifying activated network upgrades on chain config") } else { compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) - if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { + if compatErr != nil && ((height != 0 && compatErr.RewindToBlock != 0) || (timestamp != 0 && compatErr.RewindToTime != 0)) { return newcfg, stored, compatErr } } - rawdb.WriteChainConfig(db, stored, newcfg) + // Don't overwrite if the old is identical to the new + if newData, _ := json.Marshal(newcfg); !bytes.Equal(storedData, newData) { + rawdb.WriteChainConfig(db, stored, newcfg) + } return newcfg, stored, nil } // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). -func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { - if db == nil { - db = rawdb.NewMemoryDatabase() - } - statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) +func (g *Genesis) ToBlock() *types.Block { + db := rawdb.NewMemoryDatabase() + return g.toBlock(db, trie.NewDatabase(db)) +} + +// TODO: migrate this function to "flush" for more similarity with upstream. +func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block { + statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { panic(err) } @@ -259,7 +270,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { } // Configure any stateful precompiles that should be enabled in the genesis. - g.Config.CheckConfigurePrecompiles(nil, types.NewBlockWithHeader(head), statedb) + err = ApplyPrecompileActivations(g.Config, nil, types.NewBlockWithHeader(head), statedb) + if err != nil { + panic(fmt.Sprintf("unable to configure precompiles in genesis block: %v", err)) + } for addr, account := range g.Alloc { statedb.AddBalance(addr, account.Balance) @@ -283,7 +297,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { if g.Difficulty == nil { head.Difficulty = params.GenesisDifficulty } - if g.Config != nil && g.Config.IsApricotPhase3(common.Big0) { + if g.Config != nil && g.Config.IsApricotPhase3(0) { if g.BaseFee != nil { head.BaseFee = g.BaseFee } else { @@ -291,17 +305,19 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { } } statedb.Commit(false, false) - if err := statedb.Database().TrieDB().Commit(root, true, nil); err != nil { - panic(fmt.Sprintf("unable to commit genesis block: %v", err)) + // Commit newly generated states into disk if it's not empty. + if root != types.EmptyRootHash { + if err := triedb.Commit(root, true); err != nil { + panic(fmt.Sprintf("unable to commit genesis block: %v", err)) + } } - - return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil), nil, false) + return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) } // Commit writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { - block := g.ToBlock(db) +func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) { + block := g.toBlock(db, triedb) if block.Number().Sign() != 0 { return nil, errors.New("can't commit genesis block with number > 0") } @@ -323,8 +339,10 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. +// Note the state changes will be committed in hash-based scheme, use Commit +// if path-scheme is preferred. func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - block, err := g.Commit(db) + block, err := g.Commit(db, trie.NewDatabase(db)) if err != nil { panic(err) } diff --git a/coreth/core/genesis_test.go b/coreth/core/genesis_test.go index ebfa92f5..3af99b35 100644 --- a/coreth/core/genesis_test.go +++ b/coreth/core/genesis_test.go @@ -35,15 +35,17 @@ import ( "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/stretchr/testify/require" ) -func setupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, genesis, lastAcceptedHash, false) +func setupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { + return SetupGenesisBlock(db, triedb, genesis, lastAcceptedHash, false) } func TestGenesisBlockForTesting(t *testing.T) { @@ -56,7 +58,7 @@ func TestGenesisBlockForTesting(t *testing.T) { func TestSetupGenesis(t *testing.T) { apricotPhase1Config := *params.TestApricotPhase1Config - apricotPhase1Config.ApricotPhase1BlockTimestamp = big.NewInt(100) + apricotPhase1Config.ApricotPhase1BlockTimestamp = utils.NewUint64(100) var ( customghash = common.HexToHash("0x1099a11e9e454bd3ef31d688cf21936671966407bc330f051d754b5ce401e7ed") customg = Genesis{ @@ -69,7 +71,7 @@ func TestSetupGenesis(t *testing.T) { ) rollbackApricotPhase1Config := apricotPhase1Config - rollbackApricotPhase1Config.ApricotPhase1BlockTimestamp = big.NewInt(90) + rollbackApricotPhase1Config.ApricotPhase1BlockTimestamp = utils.NewUint64(90) oldcustomg.Config = &rollbackApricotPhase1Config tests := []struct { name string @@ -81,7 +83,7 @@ func TestSetupGenesis(t *testing.T) { { name: "genesis without ChainConfig", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return setupGenesisBlock(db, new(Genesis), common.Hash{}) + return setupGenesisBlock(db, trie.NewDatabase(db), new(Genesis), common.Hash{}) }, wantErr: errGenesisNoConfig, wantConfig: nil, @@ -89,7 +91,7 @@ func TestSetupGenesis(t *testing.T) { { name: "no block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return setupGenesisBlock(db, nil, common.Hash{}) + return setupGenesisBlock(db, trie.NewDatabase(db), nil, common.Hash{}) }, wantErr: ErrNoGenesis, wantConfig: nil, @@ -98,7 +100,7 @@ func TestSetupGenesis(t *testing.T) { name: "custom block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { customg.MustCommit(db) - return setupGenesisBlock(db, nil, common.Hash{}) + return setupGenesisBlock(db, trie.NewDatabase(db), nil, common.Hash{}) }, wantErr: ErrNoGenesis, wantConfig: nil, @@ -107,7 +109,7 @@ func TestSetupGenesis(t *testing.T) { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { oldcustomg.MustCommit(db) - return setupGenesisBlock(db, &customg, customghash) + return setupGenesisBlock(db, trie.NewDatabase(db), &customg, customghash) }, wantHash: customghash, wantConfig: customg.Config, @@ -132,15 +134,15 @@ func TestSetupGenesis(t *testing.T) { } // This should return a compatibility error. - return setupGenesisBlock(db, &customg, bc.lastAccepted.Hash()) + return setupGenesisBlock(db, trie.NewDatabase(db), &customg, bc.lastAccepted.Hash()) }, wantHash: customghash, wantConfig: customg.Config, wantErr: ¶ms.ConfigCompatError{ What: "ApricotPhase1 fork block timestamp", - StoredConfig: big.NewInt(90), - NewConfig: big.NewInt(100), - RewindTo: 89, + StoredTime: u64(90), + NewTime: u64(100), + RewindToTime: 89, }, }, } @@ -173,7 +175,6 @@ func TestSetupGenesis(t *testing.T) { // regression test for precompile activation after header block func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { db := rawdb.NewMemoryDatabase() - customg := Genesis{ Config: params.TestApricotPhase1Config, Alloc: GenesisAlloc{ @@ -198,22 +199,22 @@ func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { require.Equal(blocks[1].Hash(), bc.lastAccepted.Hash()) // header must be bigger than last accepted - require.Greater(block.Time(), bc.lastAccepted.Time()) + require.Greater(block.Time, bc.lastAccepted.Time()) activatedGenesis := customg - apricotPhase2Timestamp := big.NewInt(51) + apricotPhase2Timestamp := utils.NewUint64(51) updatedApricotPhase2Config := *params.TestApricotPhase1Config updatedApricotPhase2Config.ApricotPhase2BlockTimestamp = apricotPhase2Timestamp activatedGenesis.Config = &updatedApricotPhase2Config // assert block is after the activation block - require.Greater(block.Time(), apricotPhase2Timestamp.Uint64()) + require.Greater(block.Time, *apricotPhase2Timestamp) // assert last accepted block is before the activation block - require.Less(bc.lastAccepted.Time(), apricotPhase2Timestamp.Uint64()) + require.Less(bc.lastAccepted.Time(), *apricotPhase2Timestamp) // This should not return any error since the last accepted block is before the activation block. - config, _, err := setupGenesisBlock(db, &activatedGenesis, bc.lastAccepted.Hash()) + config, _, err := setupGenesisBlock(db, trie.NewDatabase(db), &activatedGenesis, bc.lastAccepted.Hash()) require.NoError(err) if !reflect.DeepEqual(config, activatedGenesis.Config) { t.Errorf("returned %v\nwant %v", config, activatedGenesis.Config) diff --git a/coreth/core/governance_settings.go b/coreth/core/governance_settings.go index 0743ebda..6215e90c 100644 --- a/coreth/core/governance_settings.go +++ b/coreth/core/governance_settings.go @@ -14,38 +14,32 @@ import ( ) var ( - flareGovActivationTime = big.NewInt(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) - costwoGovActivationTime = big.NewInt(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) - stagingGovActivationTime = big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) - localFlareGovActivationTime = big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) - - flareInitialAirdropChangeActivationTime = big.NewInt(time.Date(2022, time.November, 10, 15, 0, 0, 0, time.UTC).Unix()) - costwoInitialAirdropChangeActivationTime = big.NewInt(time.Date(2022, time.October, 27, 20, 0, 0, 0, time.UTC).Unix()) - localFlareInitialAirdropChangeActivationTime = big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) - - flareDistributionChangeActivationTime = big.NewInt(time.Date(2023, time.March, 1, 15, 0, 0, 0, time.UTC).Unix()) - costwoDistributionChangeActivationTime = big.NewInt(time.Date(2023, time.January, 26, 15, 0, 0, 0, time.UTC).Unix()) - localFlareDistributionChangeActivationTime = big.NewInt(time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) + flareGovActivationTime = uint64(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) + costwoGovActivationTime = uint64(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) + localFlareGovActivationTime = uint64(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) + + flareInitialAirdropChangeActivationTime = uint64(time.Date(2022, time.November, 10, 15, 0, 0, 0, time.UTC).Unix()) + costwoInitialAirdropChangeActivationTime = uint64(time.Date(2022, time.October, 27, 20, 0, 0, 0, time.UTC).Unix()) + localFlareInitialAirdropChangeActivationTime = uint64(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) + + flareDistributionChangeActivationTime = uint64(time.Date(2023, time.March, 1, 15, 0, 0, 0, time.UTC).Unix()) + costwoDistributionChangeActivationTime = uint64(time.Date(2023, time.January, 26, 15, 0, 0, 0, time.UTC).Unix()) + localFlareDistributionChangeActivationTime = uint64(time.Date(2023, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()) ) -func GetGovernanceSettingIsActivatedAndCalled(chainID *big.Int, blockTime *big.Int, to common.Address) bool { +func GetGovernanceSettingIsActivatedAndCalled(chainID *big.Int, blockTime uint64, to common.Address) bool { switch { - case chainID.Cmp(params.FlareChainID) == 0 && blockTime.Cmp(flareGovActivationTime) >= 0: - switch blockTime { - default: - return to == common.HexToAddress("0x1000000000000000000000000000000000000007") - } - case chainID.Cmp(params.CostwoChainID) == 0 && blockTime.Cmp(costwoGovActivationTime) >= 0: + case chainID.Cmp(params.FlareChainID) == 0 && blockTime >= flareGovActivationTime: switch blockTime { default: return to == common.HexToAddress("0x1000000000000000000000000000000000000007") } - case chainID.Cmp(params.StagingChainID) == 0 && blockTime.Cmp(stagingGovActivationTime) >= 0: + case chainID.Cmp(params.CostwoChainID) == 0 && blockTime >= costwoGovActivationTime: switch blockTime { default: return to == common.HexToAddress("0x1000000000000000000000000000000000000007") } - case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime.Cmp(localFlareGovActivationTime) >= 0: + case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime >= localFlareGovActivationTime: switch blockTime { default: return to == common.HexToAddress("0x1000000000000000000000000000000000000007") @@ -55,19 +49,19 @@ func GetGovernanceSettingIsActivatedAndCalled(chainID *big.Int, blockTime *big.I } } -func GetInitialAirdropChangeIsActivatedAndCalled(chainID *big.Int, blockTime *big.Int, to common.Address) bool { +func GetInitialAirdropChangeIsActivatedAndCalled(chainID *big.Int, blockTime uint64, to common.Address) bool { switch { - case chainID.Cmp(params.FlareChainID) == 0 && blockTime.Cmp(flareInitialAirdropChangeActivationTime) >= 0: + case chainID.Cmp(params.FlareChainID) == 0 && blockTime >= flareInitialAirdropChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0x4AeE563140E36abA778944E2Ca68c3988CAd5730") } - case chainID.Cmp(params.CostwoChainID) == 0 && blockTime.Cmp(costwoInitialAirdropChangeActivationTime) >= 0: + case chainID.Cmp(params.CostwoChainID) == 0 && blockTime >= costwoInitialAirdropChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0x28561B938342efD0677f60Fd0912e1931367a612") } - case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime.Cmp(localFlareInitialAirdropChangeActivationTime) >= 0: + case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime >= localFlareInitialAirdropChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0x1000000000000000000000000000000000000008") @@ -77,19 +71,19 @@ func GetInitialAirdropChangeIsActivatedAndCalled(chainID *big.Int, blockTime *bi } } -func GetDistributionChangeIsActivatedAndCalled(chainID *big.Int, blockTime *big.Int, to common.Address) bool { +func GetDistributionChangeIsActivatedAndCalled(chainID *big.Int, blockTime uint64, to common.Address) bool { switch { - case chainID.Cmp(params.FlareChainID) == 0 && blockTime.Cmp(flareDistributionChangeActivationTime) >= 0: + case chainID.Cmp(params.FlareChainID) == 0 && blockTime >= flareDistributionChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0x4d1c42F41555Ae35DfC1819bd718f7D9Fb28abdD") } - case chainID.Cmp(params.CostwoChainID) == 0 && blockTime.Cmp(costwoDistributionChangeActivationTime) >= 0: + case chainID.Cmp(params.CostwoChainID) == 0 && blockTime >= costwoDistributionChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0xdF1deD5f1905C5012cbeE8367e3F4849afEAE545") } - case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime.Cmp(localFlareDistributionChangeActivationTime) >= 0: + case chainID.Cmp(params.LocalFlareChainID) == 0 && blockTime >= localFlareDistributionChangeActivationTime: switch blockTime { default: return to == common.HexToAddress("0x1000000000000000000000000000000000000009") @@ -103,21 +97,21 @@ func GetDistributionChangeIsActivatedAndCalled(chainID *big.Int, blockTime *big. // address public constant SIGNAL_COINBASE = address(0x00000000000000000000000000000000000DEaD0); //https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/4bb79bfe7266b43ea46e681f8a86ab8b9ef36446/contracts/genesis/implementation/GovernanceSettings.sol#L17 -func GetGovernanceSettingsCoinbaseSignalAddr(chainID *big.Int, blockTime *big.Int) common.Address { +func GetGovernanceSettingsCoinbaseSignalAddr(chainID *big.Int, blockTime uint64) common.Address { switch { default: return common.HexToAddress("0x00000000000000000000000000000000000DEaD0") } } -func GetInitialAirdropChangeCoinbaseSignalAddr(chainID *big.Int, blockTime *big.Int) common.Address { +func GetInitialAirdropChangeCoinbaseSignalAddr(chainID *big.Int, blockTime uint64) common.Address { switch { default: return common.HexToAddress("0x00000000000000000000000000000000000dead2") } } -func GetDistributionChangeCoinbaseSignalAddr(chainID *big.Int, blockTime *big.Int) common.Address { +func GetDistributionChangeCoinbaseSignalAddr(chainID *big.Int, blockTime uint64) common.Address { switch { default: return common.HexToAddress("0x00000000000000000000000000000000000deAD3") @@ -127,7 +121,7 @@ func GetDistributionChangeCoinbaseSignalAddr(chainID *big.Int, blockTime *big.In // function setGovernanceAddress(address _newGovernance) external // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/4bb79bfe7266b43ea46e681f8a86ab8b9ef36446/contracts/genesis/implementation/GovernanceSettings.sol#L73 -func SetGovernanceAddressSelector(chainID *big.Int, blockTime *big.Int) []byte { +func SetGovernanceAddressSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0xcf, 0xc1, 0x62, 0x54} @@ -137,7 +131,7 @@ func SetGovernanceAddressSelector(chainID *big.Int, blockTime *big.Int) []byte { // function setTimelock(uint256 _newTimelock) external // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/4bb79bfe7266b43ea46e681f8a86ab8b9ef36446/contracts/genesis/implementation/GovernanceSettings.sol#L85 -func SetTimelockSelector(chainID *big.Int, blockTime *big.Int) []byte { +func SetTimelockSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0x1e, 0x89, 0x1c, 0x0a} @@ -147,7 +141,7 @@ func SetTimelockSelector(chainID *big.Int, blockTime *big.Int) []byte { // function updateInitialAirdropAddress() external // https://flare-explorer.flare.network/address/0x4AeE563140E36abA778944E2Ca68c3988CAd5730/contracts#address-tabs -func UpdateInitialAirdropAddressSelector(chainID *big.Int, blockTime *big.Int) []byte { +func UpdateInitialAirdropAddressSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0x7d, 0x1f, 0x99, 0x46} @@ -157,14 +151,14 @@ func UpdateInitialAirdropAddressSelector(chainID *big.Int, blockTime *big.Int) [ // function updateDistributionAddress() external // https://flare-explorer.flare.network/address/0x4d1c42F41555Ae35DfC1819bd718f7D9Fb28abdD/contracts#address-tabs -func UpdateDistributionAddressSelector(chainID *big.Int, blockTime *big.Int) []byte { +func UpdateDistributionAddressSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0x5a, 0xce, 0x4f, 0x0d} } } -func NewGovernanceAddressIsPermitted(chainID *big.Int, blockTime *big.Int, newGovernanceAddress common.Address) bool { +func NewGovernanceAddressIsPermitted(chainID *big.Int, blockTime uint64, newGovernanceAddress common.Address) bool { switch { case chainID.Cmp(params.FlareChainID) == 0: switch { @@ -176,14 +170,9 @@ func NewGovernanceAddressIsPermitted(chainID *big.Int, blockTime *big.Int, newGo default: return false } - case chainID.Cmp(params.StagingChainID) == 0: - switch { - default: - return false - } case chainID.Cmp(params.LocalFlareChainID) == 0: switch { - case blockTime.Cmp(big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix())) >= 0: + case blockTime >= uint64(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()): return newGovernanceAddress == common.HexToAddress("0x100000000000000000000000000000000000000f") default: return false @@ -193,32 +182,25 @@ func NewGovernanceAddressIsPermitted(chainID *big.Int, blockTime *big.Int, newGo } } -func NewTimelockIsPermitted(chainID *big.Int, blockTime *big.Int, newTimelock uint64) bool { +func NewTimelockIsPermitted(chainID *big.Int, blockTime uint64, newTimelock uint64) bool { switch { case chainID.Cmp(params.FlareChainID) == 0: switch { - case blockTime.Cmp(big.NewInt(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix())) >= 0: + case blockTime >= uint64(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()): return newTimelock == 3600 default: return false } case chainID.Cmp(params.CostwoChainID) == 0: switch { - case blockTime.Cmp(big.NewInt(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix())) >= 0: - return newTimelock == 3600 - default: - return false - } - case chainID.Cmp(params.StagingChainID) == 0: - switch { - case blockTime.Cmp(big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix())) >= 0: + case blockTime >= uint64(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()): return newTimelock == 3600 default: return false } case chainID.Cmp(params.LocalFlareChainID) == 0: switch { - case blockTime.Cmp(big.NewInt(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix())) >= 0: + case blockTime >= uint64(time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()): return newTimelock == 3600 default: return false @@ -228,21 +210,21 @@ func NewTimelockIsPermitted(chainID *big.Int, blockTime *big.Int, newTimelock ui } } -func GetInitialAirdropContractAddress(chainID *big.Int, blockTime *big.Int) common.Address { +func GetInitialAirdropContractAddress(chainID *big.Int, blockTime uint64) common.Address { switch { default: return common.HexToAddress("0x1000000000000000000000000000000000000006") } } -func GetDistributionContractAddress(chainID *big.Int, blockTime *big.Int) common.Address { +func GetDistributionContractAddress(chainID *big.Int, blockTime uint64) common.Address { switch { default: return common.HexToAddress("0x1000000000000000000000000000000000000004") } } -func GetTargetAirdropContractAddress(chainID *big.Int, blockTime *big.Int) common.Address { +func GetTargetAirdropContractAddress(chainID *big.Int, blockTime uint64) common.Address { switch { case chainID.Cmp(params.FlareChainID) == 0: switch { @@ -259,7 +241,7 @@ func GetTargetAirdropContractAddress(chainID *big.Int, blockTime *big.Int) commo } } -func GetTargetDistributionContractAddress(chainID *big.Int, blockTime *big.Int) common.Address { +func GetTargetDistributionContractAddress(chainID *big.Int, blockTime uint64) common.Address { switch { case chainID.Cmp(params.FlareChainID) == 0: switch { @@ -276,7 +258,7 @@ func GetTargetDistributionContractAddress(chainID *big.Int, blockTime *big.Int) } } -func (st *StateTransition) SetGovernanceAddress(chainID *big.Int, timestamp *big.Int, newGovernanceAddress []byte) error { +func (st *StateTransition) SetGovernanceAddress(chainID *big.Int, timestamp uint64, newGovernanceAddress []byte) error { if NewGovernanceAddressIsPermitted(chainID, timestamp, common.BytesToAddress(newGovernanceAddress)) { coinbaseSignal := GetGovernanceSettingsCoinbaseSignalAddr(chainID, timestamp) originalCoinbase := st.evm.Context.Coinbase @@ -284,7 +266,7 @@ func (st *StateTransition) SetGovernanceAddress(chainID *big.Int, timestamp *big st.evm.Context.Coinbase = originalCoinbase }() st.evm.Context.Coinbase = coinbaseSignal - _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.data, st.evm.Context.GasLimit) + _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.msg.Data, st.evm.Context.GasLimit) if err != nil { return err } @@ -292,7 +274,7 @@ func (st *StateTransition) SetGovernanceAddress(chainID *big.Int, timestamp *big return nil } -func (st *StateTransition) SetTimelock(chainID *big.Int, timestamp *big.Int, newTimelock []byte) error { +func (st *StateTransition) SetTimelock(chainID *big.Int, timestamp uint64, newTimelock []byte) error { if NewTimelockIsPermitted(chainID, timestamp, binary.BigEndian.Uint64(newTimelock[24:32])) { coinbaseSignal := GetGovernanceSettingsCoinbaseSignalAddr(chainID, timestamp) originalCoinbase := st.evm.Context.Coinbase @@ -300,7 +282,7 @@ func (st *StateTransition) SetTimelock(chainID *big.Int, timestamp *big.Int, new st.evm.Context.Coinbase = originalCoinbase }() st.evm.Context.Coinbase = coinbaseSignal - _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.data, st.evm.Context.GasLimit) + _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.msg.Data, st.evm.Context.GasLimit) if err != nil { return err } @@ -308,40 +290,46 @@ func (st *StateTransition) SetTimelock(chainID *big.Int, timestamp *big.Int, new return nil } -func (st *StateTransition) UpdateInitialAirdropAddress(chainID *big.Int, timestamp *big.Int) error { +func (st *StateTransition) UpdateInitialAirdropAddress(chainID *big.Int, timestamp uint64) error { coinbaseSignal := GetInitialAirdropChangeCoinbaseSignalAddr(chainID, timestamp) originalCoinbase := st.evm.Context.Coinbase defer func() { st.evm.Context.Coinbase = originalCoinbase }() st.evm.Context.Coinbase = coinbaseSignal - _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.data, st.evm.Context.GasLimit) + _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.msg.Data, st.evm.Context.GasLimit) if err != nil { return err } initialAirdropAddress := GetInitialAirdropContractAddress(chainID, timestamp) targetAidropAddress := GetTargetAirdropContractAddress(chainID, timestamp) - airdropBalance := st.state.GetBalance(initialAirdropAddress) - st.state.SubBalance(initialAirdropAddress, airdropBalance) - st.state.AddBalance(targetAidropAddress, airdropBalance) + + if initialAirdropAddress != targetAidropAddress { + airdropBalance := st.state.GetBalance(initialAirdropAddress) + st.state.SubBalance(initialAirdropAddress, airdropBalance) + st.state.AddBalance(targetAidropAddress, airdropBalance) + } return nil } -func (st *StateTransition) UpdateDistributionAddress(chainID *big.Int, timestamp *big.Int) error { +func (st *StateTransition) UpdateDistributionAddress(chainID *big.Int, timestamp uint64) error { coinbaseSignal := GetDistributionChangeCoinbaseSignalAddr(chainID, timestamp) originalCoinbase := st.evm.Context.Coinbase defer func() { st.evm.Context.Coinbase = originalCoinbase }() st.evm.Context.Coinbase = coinbaseSignal - _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.data, st.evm.Context.GasLimit) + _, _, _, err := st.evm.DaemonCall(vm.AccountRef(coinbaseSignal), st.to(), st.msg.Data, st.evm.Context.GasLimit) if err != nil { return err } distributionAddress := GetDistributionContractAddress(chainID, timestamp) targetDistributionAddress := GetTargetDistributionContractAddress(chainID, timestamp) - distributionBalance := st.state.GetBalance(distributionAddress) - st.state.SubBalance(distributionAddress, distributionBalance) - st.state.AddBalance(targetDistributionAddress, distributionBalance) + + if distributionAddress != targetDistributionAddress { + distributionBalance := st.state.GetBalance(distributionAddress) + st.state.SubBalance(distributionAddress, distributionBalance) + st.state.AddBalance(targetDistributionAddress, distributionBalance) + } return nil } diff --git a/coreth/core/governance_settings_test.go b/coreth/core/governance_settings_test.go index e9c4583f..91e95ff2 100644 --- a/coreth/core/governance_settings_test.go +++ b/coreth/core/governance_settings_test.go @@ -4,7 +4,6 @@ package core import ( - "math/big" "testing" "time" @@ -22,7 +21,7 @@ func TestNewTimelockIsPermittedCostwo(t *testing.T) { // ==================================================================================== // Permitted timelock update: - blockTime := big.NewInt(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) + blockTime := uint64(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) newTimelock := uint64(3600) want := true have := NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -31,7 +30,7 @@ func TestNewTimelockIsPermittedCostwo(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(0) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -40,7 +39,7 @@ func TestNewTimelockIsPermittedCostwo(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2022, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(1000000) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -49,7 +48,7 @@ func TestNewTimelockIsPermittedCostwo(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2021, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2021, time.September, 8, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(3600) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -70,7 +69,7 @@ func TestNewTimelockIsPermittedFlare(t *testing.T) { // ==================================================================================== // Permitted timelock update: - blockTime := big.NewInt(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) + blockTime := uint64(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) newTimelock := uint64(3600) want := true have := NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -79,7 +78,7 @@ func TestNewTimelockIsPermittedFlare(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(0) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -88,7 +87,7 @@ func TestNewTimelockIsPermittedFlare(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2022, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(1000000) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) @@ -97,7 +96,7 @@ func TestNewTimelockIsPermittedFlare(t *testing.T) { } // Non-permitted timelock update: - blockTime = big.NewInt(time.Date(2021, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) + blockTime = uint64(time.Date(2021, time.September, 9, 0, 0, 0, 0, time.UTC).Unix()) newTimelock = uint64(3600) want = false have = NewTimelockIsPermitted(chainID, blockTime, newTimelock) diff --git a/coreth/core/headerchain.go b/coreth/core/headerchain.go index b782959a..fe8f752d 100644 --- a/coreth/core/headerchain.go +++ b/coreth/core/headerchain.go @@ -36,10 +36,10 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - lru "github.com/hashicorp/golang-lru" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -70,10 +70,9 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.Cache // Cache for the most recent block headers - tdCache *lru.Cache // Cache for the most recent block total difficulties - numberCache *lru.Cache // Cache for the most recent block numbers - acceptedNumberCache FIFOCache[uint64, *types.Header] // Cache for most recent accepted heights to headers (only modified in accept) + headerCache *lru.Cache[common.Hash, *types.Header] + numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers + acceptedNumberCache FIFOCache[uint64, *types.Header] // most recent accepted heights to headers (only modified in accept) rand *mrand.Rand engine consensus.Engine @@ -82,9 +81,6 @@ type HeaderChain struct { // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points // to the parent's interrupt semaphore. func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheConfig *CacheConfig, engine consensus.Engine) (*HeaderChain, error) { - headerCache, _ := lru.New(headerCacheLimit) - tdCache, _ := lru.New(tdCacheLimit) - numberCache, _ := lru.New(numberCacheLimit) acceptedNumberCache := NewFIFOCache[uint64, *types.Header](cacheConfig.AcceptedCacheSize) // Seed a fast but crypto originating random generator @@ -96,9 +92,8 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheCon hc := &HeaderChain{ config: config, chainDb: chainDb, - headerCache: headerCache, - tdCache: tdCache, - numberCache: numberCache, + headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), + numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), acceptedNumberCache: acceptedNumberCache, rand: mrand.New(mrand.NewSource(seed.Int64())), engine: engine, @@ -124,8 +119,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheCon // from the cache or database func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { if cached, ok := hc.numberCache.Get(hash); ok { - number := cached.(uint64) - return &number + return &cached } number := rawdb.ReadHeaderNumber(hc.chainDb, hash) if number != nil { @@ -139,7 +133,7 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header.(*types.Header) + return header } header := rawdb.ReadHeader(hc.chainDb, hash, number) if header == nil { diff --git a/coreth/core/headerchain_test.go b/coreth/core/headerchain_test.go index 2170dded..b70a9802 100644 --- a/coreth/core/headerchain_test.go +++ b/coreth/core/headerchain_test.go @@ -79,17 +79,19 @@ func TestHeaderInsertion(t *testing.T) { Config: params.TestChainConfig, } ) - genesis := gspec.ToBlock(nil) - chain, err := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + genesis := gspec.ToBlock() + chain, err := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) if err != nil { t.Fatal(err) } + defer chain.Stop() + // chain A: G->A1->A2...A128 - chainA, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(genesis.Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { + chainA, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(genesis.Header()), dummy.NewCoinbaseFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) // chain B: G->A1->B2...B128 - chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { + chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewCoinbaseFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) diff --git a/coreth/core/main_test.go b/coreth/core/main_test.go new file mode 100644 index 00000000..1d0e299f --- /dev/null +++ b/coreth/core/main_test.go @@ -0,0 +1,22 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "testing" + + "go.uber.org/goleak" +) + +// TestMain uses goleak to verify tests in this package do not leak unexpected +// goroutines. +func TestMain(m *testing.M) { + opts := []goleak.Option{ + // No good way to shut down these goroutines: + goleak.IgnoreTopFunction("github.com/ava-labs/coreth/core/state/snapshot.(*diskLayer).generate"), + goleak.IgnoreTopFunction("github.com/ava-labs/coreth/metrics.(*meterArbiter).tick"), + goleak.IgnoreTopFunction("github.com/syndtr/goleveldb/leveldb.(*DB).mpoolDrain"), + } + goleak.VerifyTestMain(m, opts...) +} diff --git a/coreth/core/predicate_check.go b/coreth/core/predicate_check.go new file mode 100644 index 00000000..b4694a25 --- /dev/null +++ b/coreth/core/predicate_check.go @@ -0,0 +1,67 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/predicate" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +var ErrMissingPredicateContext = errors.New("missing predicate context") + +// CheckPredicates verifies the predicates of [tx] and returns the result. Returning an error invalidates the block. +func CheckPredicates(rules params.Rules, predicateContext *precompileconfig.PredicateContext, tx *types.Transaction) (map[common.Address][]byte, error) { + // Check that the transaction can cover its IntrinsicGas (including the gas required by the predicate) before + // verifying the predicate. + intrinsicGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, rules) + if err != nil { + return nil, err + } + if tx.Gas() < intrinsicGas { + return nil, fmt.Errorf("%w for predicate verification (%d) < intrinsic gas (%d)", ErrIntrinsicGas, tx.Gas(), intrinsicGas) + } + + predicateResults := make(map[common.Address][]byte) + // Short circuit early if there are no precompile predicates to verify + if !rules.PredicatersExist() { + return predicateResults, nil + } + + // Prepare the predicate storage slots from the transaction's access list + predicateArguments := predicate.PreparePredicateStorageSlots(rules, tx.AccessList()) + + // If there are no predicates to verify, return early and skip requiring the proposervm block + // context to be populated. + if len(predicateArguments) == 0 { + return predicateResults, nil + } + + if predicateContext == nil || predicateContext.ProposerVMBlockCtx == nil { + return nil, ErrMissingPredicateContext + } + + for address, predicates := range predicateArguments { + // Since [address] is only added to [predicateArguments] when there's a valid predicate in the ruleset + // there's no need to check if the predicate exists here. + predicaterContract := rules.Predicaters[address] + bitset := set.NewBits() + for i, predicate := range predicates { + if err := predicaterContract.VerifyPredicate(predicateContext, predicate); err != nil { + bitset.Add(i) + } + } + res := bitset.Bytes() + log.Debug("predicate verify", "tx", tx.Hash(), "address", address, "res", res) + predicateResults[address] = res + } + return predicateResults, nil +} diff --git a/coreth/core/predicate_check_test.go b/coreth/core/predicate_check_test.go new file mode 100644 index 00000000..11338144 --- /dev/null +++ b/coreth/core/predicate_check_test.go @@ -0,0 +1,461 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "errors" + "testing" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +type predicateCheckTest struct { + accessList types.AccessList + gas uint64 + predicateContext *precompileconfig.PredicateContext + createPredicates func(t testing.TB) map[common.Address]precompileconfig.Predicater + expectedRes map[common.Address][]byte + expectedErr error +} + +func TestCheckPredicate(t *testing.T) { + testErr := errors.New("test error") + addr1 := common.HexToAddress("0xaa") + addr2 := common.HexToAddress("0xbb") + addr3 := common.HexToAddress("0xcc") + addr4 := common.HexToAddress("0xdd") + predicateContext := &precompileconfig.PredicateContext{ + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 10, + }, + } + for name, test := range map[string]predicateCheckTest{ + "no predicates, no access list, no context passes": { + gas: 53000, + predicateContext: nil, + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "no predicates, no access list, with context passes": { + gas: 53000, + predicateContext: predicateContext, + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "no predicates, with access list, no context passes": { + gas: 57300, + predicateContext: nil, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "predicate, no access list, no context passes": { + gas: 53000, + predicateContext: nil, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "predicate, no access list, no block context passes": { + gas: 53000, + predicateContext: &precompileconfig.PredicateContext{ + ProposerVMBlockCtx: nil, + }, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "predicate named by access list, without context errors": { + gas: 53000, + predicateContext: nil, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(0), nil).Times(1) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedErr: ErrMissingPredicateContext, + }, + "predicate named by access list, without block context errors": { + gas: 53000, + predicateContext: &precompileconfig.PredicateContext{ + ProposerVMBlockCtx: nil, + }, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(0), nil).Times(1) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedErr: ErrMissingPredicateContext, + }, + "predicate named by access list returns non-empty": { + gas: 53000, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(0), nil).Times(2) + predicater.EXPECT().VerifyPredicate(gomock.Any(), arg[:]).Return(nil) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedRes: map[common.Address][]byte{ + addr1: {}, // valid bytes + }, + expectedErr: nil, + }, + "predicate returns gas err": { + gas: 53000, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(0), testErr) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedErr: testErr, + }, + "two predicates one named by access list returns non-empty": { + gas: 53000, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(0), nil).Times(2) + predicater.EXPECT().VerifyPredicate(gomock.Any(), arg[:]).Return(nil) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + addr2: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedRes: map[common.Address][]byte{ + addr1: {}, // valid bytes + }, + expectedErr: nil, + }, + "two predicates both named by access list returns non-empty": { + gas: 53000, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + ctrl := gomock.NewController(t) + predicate1 := precompileconfig.NewMockPredicater(ctrl) + arg1 := common.Hash{1} + predicate1.EXPECT().PredicateGas(arg1[:]).Return(uint64(0), nil).Times(2) + predicate1.EXPECT().VerifyPredicate(gomock.Any(), arg1[:]).Return(nil) + predicate2 := precompileconfig.NewMockPredicater(ctrl) + arg2 := common.Hash{2} + predicate2.EXPECT().PredicateGas(arg2[:]).Return(uint64(0), nil).Times(2) + predicate2.EXPECT().VerifyPredicate(gomock.Any(), arg2[:]).Return(testErr) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicate1, + addr2: predicate2, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + { + Address: addr2, + StorageKeys: []common.Hash{ + {2}, + }, + }, + }), + expectedRes: map[common.Address][]byte{ + addr1: {}, // valid bytes + addr2: {1}, // invalid bytes + }, + expectedErr: nil, + }, + "two predicates niether named by access list": { + gas: 61600, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + addr2: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr3, + StorageKeys: []common.Hash{ + {1}, + }, + }, + { + Address: addr4, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedRes: make(map[common.Address][]byte), + expectedErr: nil, + }, + "insufficient gas": { + gas: 53000, + predicateContext: predicateContext, + createPredicates: func(t testing.TB) map[common.Address]precompileconfig.Predicater { + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + arg := common.Hash{1} + predicater.EXPECT().PredicateGas(arg[:]).Return(uint64(1), nil) + return map[common.Address]precompileconfig.Predicater{ + addr1: predicater, + } + }, + accessList: types.AccessList([]types.AccessTuple{ + { + Address: addr1, + StorageKeys: []common.Hash{ + {1}, + }, + }, + }), + expectedErr: ErrIntrinsicGas, + }, + } { + test := test + t.Run(name, func(t *testing.T) { + require := require.New(t) + // Create the rules from TestChainConfig and update the predicates based on the test params + rules := params.TestChainConfig.AvalancheRules(common.Big0, 0) + if test.createPredicates != nil { + for address, predicater := range test.createPredicates(t) { + rules.Predicaters[address] = predicater + } + } + + // Specify only the access list, since this test should not depend on any other values + tx := types.NewTx(&types.DynamicFeeTx{ + AccessList: test.accessList, + Gas: test.gas, + }) + predicateRes, err := CheckPredicates(rules, test.predicateContext, tx) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { + return + } + require.Equal(test.expectedRes, predicateRes) + intrinsicGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), true, rules) + require.NoError(err) + require.Equal(tx.Gas(), intrinsicGas) // Require test specifies exact amount of gas consumed + }) + } +} + +func TestCheckPredicatesOutput(t *testing.T) { + testErr := errors.New("test error") + addr1 := common.HexToAddress("0xaa") + addr2 := common.HexToAddress("0xbb") + validHash := common.Hash{1} + invalidHash := common.Hash{2} + predicateContext := &precompileconfig.PredicateContext{ + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 10, + }, + } + type testTuple struct { + address common.Address + isValidPredicate bool + } + type resultTest struct { + name string + expectedRes map[common.Address][]byte + testTuple []testTuple + } + tests := []resultTest{ + {name: "no predicates", expectedRes: map[common.Address][]byte{}}, + { + name: "one address one predicate", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: true}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits().Bytes()}, + }, + { + name: "one address one invalid predicate", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: false}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(0).Bytes()}, + }, + { + name: "one address two invalid predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(0, 1).Bytes()}, + }, + { + name: "one address two mixed predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: true}, + {address: addr1, isValidPredicate: false}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(1).Bytes()}, + }, + { + name: "one address mixed predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: true}, + {address: addr1, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + {address: addr1, isValidPredicate: true}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(1, 2).Bytes()}, + }, + { + name: "two addresses mixed predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: true}, + {address: addr2, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + {address: addr2, isValidPredicate: true}, + {address: addr2, isValidPredicate: true}, + {address: addr2, isValidPredicate: false}, + {address: addr2, isValidPredicate: true}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(1, 2).Bytes(), addr2: set.NewBits(0, 3).Bytes()}, + }, + { + name: "two addresses all valid predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: true}, + {address: addr2, isValidPredicate: true}, + {address: addr1, isValidPredicate: true}, + {address: addr1, isValidPredicate: true}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits().Bytes(), addr2: set.NewBits().Bytes()}, + }, + { + name: "two addresses all invalid predicates", + testTuple: []testTuple{ + {address: addr1, isValidPredicate: false}, + {address: addr2, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + {address: addr1, isValidPredicate: false}, + }, + expectedRes: map[common.Address][]byte{addr1: set.NewBits(0, 1, 2).Bytes(), addr2: set.NewBits(0).Bytes()}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + // Create the rules from TestChainConfig and update the predicates based on the test params + rules := params.TestChainConfig.AvalancheRules(common.Big0, 0) + predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) + predicater.EXPECT().PredicateGas(gomock.Any()).Return(uint64(0), nil).Times(len(test.testTuple)) + + var txAccessList types.AccessList + for _, tuple := range test.testTuple { + var predicateHash common.Hash + if tuple.isValidPredicate { + predicateHash = validHash + predicater.EXPECT().VerifyPredicate(gomock.Any(), validHash[:]).Return(nil) + } else { + predicateHash = invalidHash + predicater.EXPECT().VerifyPredicate(gomock.Any(), invalidHash[:]).Return(testErr) + } + txAccessList = append(txAccessList, types.AccessTuple{ + Address: tuple.address, + StorageKeys: []common.Hash{ + predicateHash, + }, + }) + } + + rules.Predicaters[addr1] = predicater + rules.Predicaters[addr2] = predicater + + // Specify only the access list, since this test should not depend on any other values + tx := types.NewTx(&types.DynamicFeeTx{ + AccessList: txAccessList, + Gas: 53000, + }) + predicateRes, err := CheckPredicates(rules, predicateContext, tx) + require.NoError(err) + require.Equal(test.expectedRes, predicateRes) + }) + } +} diff --git a/coreth/core/rawdb/accessors_chain.go b/coreth/core/rawdb/accessors_chain.go index 6d4b1e55..07c12f5c 100644 --- a/coreth/core/rawdb/accessors_chain.go +++ b/coreth/core/rawdb/accessors_chain.go @@ -30,11 +30,12 @@ import ( "bytes" "encoding/binary" "errors" + "math/big" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -384,22 +385,25 @@ func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Rec // The current implementation populates these metadata fields by reading the receipts' // corresponding block body, so if the block body is not found it will return nil even // if the receipt itself is stored. -func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { +func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, config *params.ChainConfig) types.Receipts { // We're deriving many fields from the block body, retrieve beside the receipt receipts := ReadRawReceipts(db, hash, number) if receipts == nil { return nil } - header := ReadHeader(db, hash, number) - if header == nil { - return nil - } body := ReadBody(db, hash, number) if body == nil { log.Error("Missing body but have receipt", "hash", hash, "number", number) return nil } - if err := receipts.DeriveFields(config, hash, number, header.Time, body.Transactions); err != nil { + header := ReadHeader(db, hash, number) + var baseFee *big.Int + if header == nil { + baseFee = big.NewInt(0) + } else { + baseFee = header.BaseFee + } + if err := receipts.DeriveFields(config, hash, number, time, baseFee, body.Transactions); err != nil { log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) return nil } @@ -432,10 +436,11 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // storedReceiptRLP is the storage encoding of a receipt. // Re-definition in core/types/receipt.go. +// TODO: Re-use the existing definition. type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - Logs []*types.LogForStorage + Logs []*types.Log } // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps @@ -451,10 +456,7 @@ func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { if err := s.Decode(&stored); err != nil { return err } - r.Logs = make([]*types.Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*types.Log)(log) - } + r.Logs = stored.Logs return nil } @@ -479,9 +481,9 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t return nil } -// ReadLogs retrieves the logs for all transactions in a block. The log fields -// are populated with metadata. In case the receipts or the block body -// are not found, a nil is returned. +// ReadLogs retrieves the logs for all transactions in a block. In case +// receipts is not found, a nil is returned. +// Note: ReadLogs does not derive unstored log fields. func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { // Retrieve the flattened receipt slice data := ReadReceiptsRLP(db, hash, number) @@ -494,15 +496,6 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { return nil } - body := ReadBody(db, hash, number) - if body == nil { - log.Error("Missing body but have receipt", "hash", hash, "number", number) - return nil - } - if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { - log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) - return nil - } logs := make([][]*types.Log, len(receipts)) for i, receipt := range receipts { logs[i] = receipt.Logs @@ -525,7 +518,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.Version, body.ExtData) + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles).WithExtData(body.Version, body.ExtData) } // WriteBlock serializes a block into the database, header and body separately. @@ -590,8 +583,7 @@ func ReadHeadBlock(db ethdb.Reader) *types.Block { } // ReadTxIndexTail retrieves the number of oldest indexed block -// whose transaction indices has been indexed. If the corresponding entry -// is non-existent in database it means the indexing has been finished. +// whose transaction indices has been indexed. func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { data, _ := db.Get(txIndexTailKey) if len(data) != 8 { diff --git a/coreth/core/rawdb/accessors_chain_test.go b/coreth/core/rawdb/accessors_chain_test.go index 9feb4c0d..02e36e75 100644 --- a/coreth/core/rawdb/accessors_chain_test.go +++ b/coreth/core/rawdb/accessors_chain_test.go @@ -111,8 +111,8 @@ func TestBlockStorage(t *testing.T) { block := types.NewBlockWithHeader(&types.Header{ Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) @@ -159,8 +159,8 @@ func TestPartialBlockStorage(t *testing.T) { block := types.NewBlockWithHeader(&types.Header{ Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) // Store a header and check that it's not recognized as a block WriteHeader(db, block.Header()) @@ -278,7 +278,7 @@ func TestBlockReceiptStorage(t *testing.T) { // Check that no receipt entries are in a pristine database header := &types.Header{Number: big.NewInt(0), Extra: []byte("test header")} hash := header.Hash() - if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 { + if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 { t.Fatalf("non existent receipts returned: %v", rs) } // Insert the body that corresponds to the receipts @@ -290,7 +290,7 @@ func TestBlockReceiptStorage(t *testing.T) { // Insert the receipt slice into the database and check presence WriteReceipts(db, hash, 0, receipts) - if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) == 0 { + if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) == 0 { t.Fatalf("no receipts returned") } else { if err := checkReceiptsRLP(rs, receipts); err != nil { @@ -303,7 +303,7 @@ func TestBlockReceiptStorage(t *testing.T) { if header := ReadHeader(db, hash, 0); header != nil { t.Fatal("header is not nil") } - if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); rs != nil { + if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); rs != nil { t.Fatalf("receipts returned when body was deleted: %v", rs) } // Ensure that receipts without metadata can be returned without the block body too @@ -315,7 +315,7 @@ func TestBlockReceiptStorage(t *testing.T) { WriteBody(db, hash, 0, body) DeleteReceipts(db, hash, 0) - if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 { + if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 { t.Fatalf("deleted receipts returned: %v", rs) } } @@ -476,7 +476,7 @@ func TestReadLogs(t *testing.T) { hash := common.BytesToHash([]byte{0x03, 0x14}) // Check that no receipt entries are in a pristine database - if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 { + if rs := ReadReceipts(db, hash, 0, 0, params.TestChainConfig); len(rs) != 0 { t.Fatalf("non existent receipts returned: %v", rs) } // Insert the body that corresponds to the receipts @@ -499,10 +499,6 @@ func TestReadLogs(t *testing.T) { t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want) } - // Fill in log fields so we can compare their rlp encoding - if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, 0, body.Transactions); err != nil { - t.Fatal(err) - } for i, pr := range receipts { for j, pl := range pr.Logs { rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j])) diff --git a/coreth/core/rawdb/accessors_indexes.go b/coreth/core/rawdb/accessors_indexes.go index 0ac33214..511fa39d 100644 --- a/coreth/core/rawdb/accessors_indexes.go +++ b/coreth/core/rawdb/accessors_indexes.go @@ -31,9 +31,9 @@ import ( "math/big" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -140,8 +140,12 @@ func ReadReceipt(db ethdb.Reader, hash common.Hash, config *params.ChainConfig) if blockHash == (common.Hash{}) { return nil, common.Hash{}, 0, 0 } + blockHeader := ReadHeader(db, blockHash, *blockNumber) + if blockHeader == nil { + return nil, common.Hash{}, 0, 0 + } // Read all the receipts from the block and return the one with the matching hash - receipts := ReadReceipts(db, blockHash, *blockNumber, config) + receipts := ReadReceipts(db, blockHash, *blockNumber, blockHeader.Time, config) for receiptIndex, receipt := range receipts { if receipt.TxHash == hash { return receipt, blockHash, *blockNumber, uint64(receiptIndex) diff --git a/coreth/core/rawdb/accessors_indexes_test.go b/coreth/core/rawdb/accessors_indexes_test.go index e64818b1..408b1869 100644 --- a/coreth/core/rawdb/accessors_indexes_test.go +++ b/coreth/core/rawdb/accessors_indexes_test.go @@ -23,8 +23,8 @@ import ( "testing" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/crypto/sha3" ) @@ -44,9 +44,10 @@ func (h *testHasher) Reset() { h.hasher.Reset() } -func (h *testHasher) Update(key, val []byte) { +func (h *testHasher) Update(key, val []byte) error { h.hasher.Write(key) h.hasher.Write(val) + return nil } func (h *testHasher) Hash() common.Hash { @@ -98,7 +99,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher(), nil, true) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/coreth/core/rawdb/accessors_metadata.go b/coreth/core/rawdb/accessors_metadata.go index 0e19cfe0..890448b9 100644 --- a/coreth/core/rawdb/accessors_metadata.go +++ b/coreth/core/rawdb/accessors_metadata.go @@ -30,9 +30,9 @@ import ( "encoding/json" "time" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) diff --git a/coreth/core/rawdb/accessors_snapshot.go b/coreth/core/rawdb/accessors_snapshot.go index af855e23..5ddad719 100644 --- a/coreth/core/rawdb/accessors_snapshot.go +++ b/coreth/core/rawdb/accessors_snapshot.go @@ -27,8 +27,8 @@ package rawdb import ( - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) diff --git a/coreth/core/rawdb/accessors_state.go b/coreth/core/rawdb/accessors_state.go index 16fd608e..cda5acb4 100644 --- a/coreth/core/rawdb/accessors_state.go +++ b/coreth/core/rawdb/accessors_state.go @@ -27,8 +27,8 @@ package rawdb import ( - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -38,6 +38,17 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } + } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) +} + // ReadCode retrieves the contract code of the provided code hash. func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { // Try with the prefixed code scheme first and only. The legacy scheme was never used in coreth. @@ -45,12 +56,6 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } -// ReadTrieNode retrieves the trie node of the provided hash. -func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(hash.Bytes()) - return data -} - // HasCode checks if the contract code corresponding to the // provided code hash is present in the db. func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { @@ -59,23 +64,6 @@ func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { return ok } -// HasTrieNode checks if the trie node with the provided hash is present in db. -func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { - ok, _ := db.Has(hash.Bytes()) - return ok -} - -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} - // WriteCode writes the provided contract code database. func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { if err := db.Put(codeKey(hash), code); err != nil { @@ -83,23 +71,9 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { } } -// WriteTrieNode writes the provided trie node database. -func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { - if err := db.Put(hash.Bytes(), node); err != nil { - log.Crit("Failed to store trie node", "err", err) - } -} - // DeleteCode deletes the specified contract code from the database. func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { if err := db.Delete(codeKey(hash)); err != nil { log.Crit("Failed to delete contract code", "err", err) } } - -// DeleteTrieNode deletes the specified trie node from the database. -func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { - if err := db.Delete(hash.Bytes()); err != nil { - log.Crit("Failed to delete trie node", "err", err) - } -} diff --git a/coreth/core/rawdb/accessors_state_sync.go b/coreth/core/rawdb/accessors_state_sync.go index ab947e9f..32fb8013 100644 --- a/coreth/core/rawdb/accessors_state_sync.go +++ b/coreth/core/rawdb/accessors_state_sync.go @@ -7,8 +7,8 @@ import ( "encoding/binary" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -86,13 +86,12 @@ func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error { segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) copy(segmentsPrefix, syncSegmentsPrefix) copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) - - return ClearPrefix(db, segmentsPrefix) + return ClearPrefix(db, segmentsPrefix, syncSegmentsKeyLength) } // ClearAllSyncSegments removes all segment markers from db func ClearAllSyncSegments(db ethdb.KeyValueStore) error { - return ClearPrefix(db, syncSegmentsPrefix) + return ClearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength) } // UnpackSyncSegmentKey returns the root and start position for a trie segment @@ -131,12 +130,12 @@ func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error { accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength) copy(accountsPrefix, syncStorageTriesPrefix) copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:]) - return ClearPrefix(db, accountsPrefix) + return ClearPrefix(db, accountsPrefix, syncStorageTriesKeyLength) } // ClearAllSyncStorageTries removes all storage tries added for syncing from db func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error { - return ClearPrefix(db, syncStorageTriesPrefix) + return ClearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength) } // UnpackSyncStorageTrieKey returns the root and account for a storage trie diff --git a/coreth/core/rawdb/accessors_state_sync_test.go b/coreth/core/rawdb/accessors_state_sync_test.go new file mode 100644 index 00000000..5c51eb7b --- /dev/null +++ b/coreth/core/rawdb/accessors_state_sync_test.go @@ -0,0 +1,33 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rawdb + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestClearPrefix(t *testing.T) { + require := require.New(t) + db := NewMemoryDatabase() + // add a key that should be cleared + require.NoError(WriteSyncSegment(db, common.Hash{1}, common.Hash{}.Bytes())) + + // add a key that should not be cleared + key := append(syncSegmentsPrefix, []byte("foo")...) + require.NoError(db.Put(key, []byte("bar"))) + + require.NoError(ClearAllSyncSegments(db)) + + count := 0 + it := db.NewIterator(syncSegmentsPrefix, nil) + defer it.Release() + for it.Next() { + count++ + } + require.NoError(it.Error()) + require.Equal(1, count) +} diff --git a/coreth/core/rawdb/accessors_trie.go b/coreth/core/rawdb/accessors_trie.go new file mode 100644 index 00000000..a5e3a517 --- /dev/null +++ b/coreth/core/rawdb/accessors_trie.go @@ -0,0 +1,273 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package rawdb + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "golang.org/x/crypto/sha3" +) + +// HashScheme is the legacy hash-based state scheme with which trie nodes are +// stored in the disk with node hash as the database key. The advantage of this +// scheme is that different versions of trie nodes can be stored in disk, which +// is very beneficial for constructing archive nodes. The drawback is it will +// store different trie nodes on the same path to different locations on the disk +// with no data locality, and it's unfriendly for designing state pruning. +// +// Now this scheme is still kept for backward compatibility, and it will be used +// for archive node and some other tries(e.g. light trie). +const HashScheme = "hashScheme" + +// PathScheme is the new path-based state scheme with which trie nodes are stored +// in the disk with node path as the database key. This scheme will only store one +// version of state data in the disk, which means that the state pruning operation +// is native. At the same time, this scheme will put adjacent trie nodes in the same +// area of the disk with good data locality property. But this scheme needs to rely +// on extra state diffs to survive deep reorg. +const PathScheme = "pathScheme" + +// nodeHasher used to derive the hash of trie node. +type nodeHasher struct{ sha crypto.KeccakState } + +var hasherPool = sync.Pool{ + New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, +} + +func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } +func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } + +func (h *nodeHasher) hashData(data []byte) (n common.Hash) { + h.sha.Reset() + h.sha.Write(data) + h.sha.Read(n[:]) + return n +} + +// ReadAccountTrieNode retrieves the account trie node and the associated node +// hash with the specified node path. +func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) { + data, err := db.Get(accountTrieNodeKey(path)) + if err != nil { + return nil, common.Hash{} + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) +} + +// HasAccountTrieNode checks the account trie node presence with the specified +// node path and the associated node hash. +func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool { + data, err := db.Get(accountTrieNodeKey(path)) + if err != nil { + return false + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash +} + +// WriteAccountTrieNode writes the provided account trie node into database. +func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { + if err := db.Put(accountTrieNodeKey(path), node); err != nil { + log.Crit("Failed to store account trie node", "err", err) + } +} + +// DeleteAccountTrieNode deletes the specified account trie node from the database. +func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) { + if err := db.Delete(accountTrieNodeKey(path)); err != nil { + log.Crit("Failed to delete account trie node", "err", err) + } +} + +// ReadStorageTrieNode retrieves the storage trie node and the associated node +// hash with the specified node path. +func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) { + data, err := db.Get(storageTrieNodeKey(accountHash, path)) + if err != nil { + return nil, common.Hash{} + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) +} + +// HasStorageTrieNode checks the storage trie node presence with the provided +// node path and the associated node hash. +func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool { + data, err := db.Get(storageTrieNodeKey(accountHash, path)) + if err != nil { + return false + } + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash +} + +// WriteStorageTrieNode writes the provided storage trie node into database. +func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { + if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { + log.Crit("Failed to store storage trie node", "err", err) + } +} + +// DeleteStorageTrieNode deletes the specified storage trie node from the database. +func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) { + if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil { + log.Crit("Failed to delete storage trie node", "err", err) + } +} + +// ReadLegacyTrieNode retrieves the legacy trie node with the given +// associated node hash. +func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, err := db.Get(hash.Bytes()) + if err != nil { + return nil + } + return data +} + +// HasLegacyTrieNode checks if the trie node with the provided hash is present in db. +func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { + ok, _ := db.Has(hash.Bytes()) + return ok +} + +// WriteLegacyTrieNode writes the provided legacy trie node to database. +func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { + if err := db.Put(hash.Bytes(), node); err != nil { + log.Crit("Failed to store legacy trie node", "err", err) + } +} + +// DeleteLegacyTrieNode deletes the specified legacy trie node from database. +func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(hash.Bytes()); err != nil { + log.Crit("Failed to delete legacy trie node", "err", err) + } +} + +// HasTrieNode checks the trie node presence with the provided node info and +// the associated node hash. +func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool { + switch scheme { + case HashScheme: + return HasLegacyTrieNode(db, hash) + case PathScheme: + if owner == (common.Hash{}) { + return HasAccountTrieNode(db, path, hash) + } + return HasStorageTrieNode(db, owner, path, hash) + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// ReadTrieNode retrieves the trie node from database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte { + switch scheme { + case HashScheme: + return ReadLegacyTrieNode(db, hash) + case PathScheme: + var ( + blob []byte + nHash common.Hash + ) + if owner == (common.Hash{}) { + blob, nHash = ReadAccountTrieNode(db, path) + } else { + blob, nHash = ReadStorageTrieNode(db, owner, path) + } + if nHash != hash { + return nil + } + return blob + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// WriteTrieNode writes the trie node into database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) { + switch scheme { + case HashScheme: + WriteLegacyTrieNode(db, hash, node) + case PathScheme: + if owner == (common.Hash{}) { + WriteAccountTrieNode(db, path, node) + } else { + WriteStorageTrieNode(db, owner, path, node) + } + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} + +// DeleteTrieNode deletes the trie node from database with the provided node info +// and associated node hash. +// hashScheme-based lookup requires the following: +// - hash +// +// pathScheme-based lookup requires the following: +// - owner +// - path +func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) { + switch scheme { + case HashScheme: + DeleteLegacyTrieNode(db, hash) + case PathScheme: + if owner == (common.Hash{}) { + DeleteAccountTrieNode(db, path) + } else { + DeleteStorageTrieNode(db, owner, path) + } + default: + panic(fmt.Sprintf("Unknown scheme %v", scheme)) + } +} diff --git a/coreth/core/rawdb/chain_iterator.go b/coreth/core/rawdb/chain_iterator.go index 5a436f1d..f302c99c 100644 --- a/coreth/core/rawdb/chain_iterator.go +++ b/coreth/core/rawdb/chain_iterator.go @@ -32,9 +32,9 @@ import ( "time" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -89,11 +89,12 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool } } // process runs in parallel - nThreadsAlive := int32(threads) + var nThreadsAlive atomic.Int32 + nThreadsAlive.Store(int32(threads)) process := func() { defer func() { // Last processor closes the result channel - if atomic.AddInt32(&nThreadsAlive, -1) == 0 { + if nThreadsAlive.Add(-1) == 0 { close(hashesCh) } }() @@ -148,7 +149,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan // in to be [to-1]. Therefore, setting lastNum to means that the // prqueue gap-evaluation will work correctly lastNum = to - queue = prque.New(nil) + queue = prque.New[int64, *blockTxHashes](nil) // for stats reporting blocks, txs = 0, 0 ) @@ -167,7 +168,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan break } // Next block available, pop it off and index it - delivery := queue.PopItem().(*blockTxHashes) + delivery := queue.PopItem() lastNum = delivery.number WriteTxLookupEntries(batch, delivery.number, delivery.hashes) blocks++ @@ -200,7 +201,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan case <-interrupt: log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) default: - log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) } } @@ -239,7 +240,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch // we expect the first number to come in to be [from]. Therefore, setting // nextNum to from means that the prqueue gap-evaluation will work correctly nextNum = from - queue = prque.New(nil) + queue = prque.New[int64, *blockTxHashes](nil) // for stats reporting blocks, txs = 0, 0 ) @@ -256,7 +257,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch if hook != nil && !hook(nextNum) { break } - delivery := queue.PopItem().(*blockTxHashes) + delivery := queue.PopItem() nextNum = delivery.number + 1 DeleteTxLookupEntries(batch, delivery.hashes) txs += len(delivery.hashes) @@ -292,7 +293,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch case <-interrupt: log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) default: - log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) } } diff --git a/coreth/core/rawdb/chain_iterator_test.go b/coreth/core/rawdb/chain_iterator_test.go index 282849c6..9a8415dd 100644 --- a/coreth/core/rawdb/chain_iterator_test.go +++ b/coreth/core/rawdb/chain_iterator_test.go @@ -44,7 +44,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -70,7 +70,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -121,7 +121,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -148,7 +148,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/coreth/core/rawdb/database.go b/coreth/core/rawdb/database.go index 2989344a..bde290cd 100644 --- a/coreth/core/rawdb/database.go +++ b/coreth/core/rawdb/database.go @@ -28,14 +28,16 @@ package rawdb import ( "bytes" + "errors" "fmt" "os" + "path/filepath" "time" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/leveldb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/leveldb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" "github.com/olekukonko/tablewriter" ) @@ -45,6 +47,83 @@ type nofreezedb struct { ethdb.KeyValueStore } +// HasAncient returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) { + return false, errNotSupported +} + +// Ancient returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { + return nil, errNotSupported +} + +// AncientRange returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { + return nil, errNotSupported +} + +// Ancients returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) Ancients() (uint64, error) { + return 0, errNotSupported +} + +// Tail returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) Tail() (uint64, error) { + return 0, errNotSupported +} + +// AncientSize returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientSize(kind string) (uint64, error) { + return 0, errNotSupported +} + +// ModifyAncients is not supported. +func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { + return 0, errNotSupported +} + +// TruncateHead returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) TruncateHead(items uint64) error { + return errNotSupported +} + +// TruncateTail returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) TruncateTail(items uint64) error { + return errNotSupported +} + +// Sync returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) Sync() error { + return errNotSupported +} + +func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { + // Unlike other ancient-related methods, this method does not return + // errNotSupported when invoked. + // The reason for this is that the caller might want to do several things: + // 1. Check if something is in freezer, + // 2. If not, check leveldb. + // + // This will work, since the ancient-checks inside 'fn' will return errors, + // and the leveldb work will continue. + // + // If we instead were to return errNotSupported here, then the caller would + // have to explicitly check for that, having an extra clause to do the + // non-ancient operations. + return fn(db) +} + +// MigrateTable processes the entries in a given table in sequence +// converting them to a new format if they're of an old format. +func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error { + return errNotSupported +} + +// AncientDatadir returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientDatadir() (string, error) { + return "", errNotSupported +} + // NewDatabase creates a high level database on top of a given key-value data // store without a freezer moving immutable chain segments into cold storage. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { @@ -71,9 +150,95 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r if err != nil { return nil, err } + log.Info("Using LevelDB as the backing database") return NewDatabase(db), nil } +const ( + dbPebble = "pebble" + dbLeveldb = "leveldb" +) + +// hasPreexistingDb checks the given data directory whether a database is already +// instantiated at that location, and if so, returns the type of database (or the +// empty string). +func hasPreexistingDb(path string) string { + if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { + return "" // No pre-existing db + } + if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { + if err != nil { + panic(err) // only possible if the pattern is malformed + } + return dbPebble + } + return dbLeveldb +} + +// OpenOptions contains the options to apply when opening a database. +// OBS: If AncientsDirectory is empty, it indicates that no freezer is to be used. +type OpenOptions struct { + Type string // "leveldb" | "pebble" + Directory string // the datadir + Namespace string // the namespace for database relevant metrics + Cache int // the capacity(in megabytes) of the data caching + Handles int // number of files to be open simultaneously + ReadOnly bool +} + +// openKeyValueDatabase opens a disk-based key-value database, e.g. leveldb or pebble. +// +// type == null type != null +// +---------------------------------------- +// db is non-existent | pebble default | specified type +// db is existent | from db | specified type (if compatible) +func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { + // Reject any unsupported database type + if len(o.Type) != 0 && o.Type != dbLeveldb && o.Type != dbPebble { + return nil, fmt.Errorf("unknown db.engine %v", o.Type) + } + // Retrieve any pre-existing database's type and use that or the requested one + // as long as there's no conflict between the two types + existingDb := hasPreexistingDb(o.Directory) + if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { + return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) + } + if o.Type == dbPebble || existingDb == dbPebble { + if PebbleEnabled { + log.Info("Using pebble as the backing database") + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + } else { + return nil, errors.New("db.engine 'pebble' not supported on this platform") + } + } + if o.Type == dbLeveldb || existingDb == dbLeveldb { + log.Info("Using leveldb as the backing database") + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + } + // No pre-existing database, no user-requested one either. Default to Pebble + // on supported platforms and LevelDB on anything else. + if PebbleEnabled { + log.Info("Defaulting to pebble as the backing database") + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + } else { + log.Info("Defaulting to leveldb as the backing database") + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + } +} + +// Open opens both a disk-based key-value database such as leveldb or pebble, but also +// integrates it with a freezer database -- if the AncientDir option has been +// set on the provided OpenOptions. +// The passed o.AncientDir indicates the path of root ancient directory where +// the chain freezer can be opened. +func Open(o OpenOptions) (ethdb.Database, error) { + kvdb, err := openKeyValueDatabase(o) + if err != nil { + return nil, err + } + return kvdb, nil +} + type counter uint64 func (c counter) String() string { @@ -175,7 +340,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { accountSnaps.Add(size) case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): storageSnaps.Add(size) - case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): + case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): preimages.Add(size) case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): metadata.Add(size) @@ -183,16 +348,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { bloomBits.Add(size) case bytes.HasPrefix(key, BloomBitsIndexPrefix): bloomBits.Add(size) - case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: - cliqueSnaps.Add(size) - case bytes.HasPrefix(key, []byte("cht-")) || - bytes.HasPrefix(key, []byte("chtIndexV2-")) || - bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie - chtTrieNodes.Add(size) - case bytes.HasPrefix(key, []byte("blt-")) || - bytes.HasPrefix(key, []byte("bltIndex-")) || - bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub - bloomTrieNodes.Add(size) case bytes.HasPrefix(key, syncStorageTriesPrefix) && len(key) == syncStorageTriesKeyLength: syncProgress.Add(size) case bytes.HasPrefix(key, syncSegmentsPrefix) && len(key) == syncSegmentsKeyLength: @@ -256,18 +411,22 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { if unaccounted.size > 0 { log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count) } - return nil } -// ClearPrefix removes all keys in db that begin with prefix -func ClearPrefix(db ethdb.KeyValueStore, prefix []byte) error { +// ClearPrefix removes all keys in db that begin with prefix and match an +// expected key length. [keyLen] should include the length of the prefix. +func ClearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error { it := db.NewIterator(prefix, nil) defer it.Release() batch := db.NewBatch() for it.Next() { key := common.CopyBytes(it.Key()) + if len(key) != keyLen { + // avoid deleting keys that do not match the expected length + continue + } if err := batch.Delete(key); err != nil { return err } diff --git a/coreth/core/rawdb/databases_64bit.go b/coreth/core/rawdb/databases_64bit.go new file mode 100644 index 00000000..73bfeb20 --- /dev/null +++ b/coreth/core/rawdb/databases_64bit.go @@ -0,0 +1,37 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +//go:build (arm64 || amd64) && !openbsd + +package rawdb + +import ( + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/pebble" +) + +// Pebble is unsuported on 32bit architecture +const PebbleEnabled = true + +// NewPebbleDBDatabase creates a persistent key-value database without a freezer +// moving immutable chain segments into cold storage. +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { + db, err := pebble.New(file, cache, handles, namespace, readonly) + if err != nil { + return nil, err + } + return NewDatabase(db), nil +} diff --git a/coreth/ethdb/memorydb/memorydb_test.go b/coreth/core/rawdb/databases_non64bit.go similarity index 64% rename from coreth/ethdb/memorydb/memorydb_test.go rename to coreth/core/rawdb/databases_non64bit.go index 34361e9f..65ad802a 100644 --- a/coreth/ethdb/memorydb/memorydb_test.go +++ b/coreth/core/rawdb/databases_non64bit.go @@ -1,4 +1,4 @@ -// (c) 2020-2021, Ava Labs, Inc. +// (c) 2023, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2018 The go-ethereum Authors +// Copyright 2023 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,19 +24,21 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package memorydb +//go:build !((arm64 || amd64) && !openbsd) + +package rawdb import ( - "testing" + "errors" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/dbtest" + "github.com/ethereum/go-ethereum/ethdb" ) -func TestMemoryDB(t *testing.T) { - t.Run("DatabaseSuite", func(t *testing.T) { - dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { - return New() - }) - }) +// Pebble is unsuported on 32bit architecture +const PebbleEnabled = false + +// NewPebbleDBDatabase creates a persistent key-value database without a freezer +// moving immutable chain segments into cold storage. +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { + return nil, errors.New("pebble is not supported on this platform") } diff --git a/coreth/core/rawdb/freezer.go b/coreth/core/rawdb/freezer.go new file mode 100644 index 00000000..622cbb3f --- /dev/null +++ b/coreth/core/rawdb/freezer.go @@ -0,0 +1,21 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +// convertLegacyFn takes a raw freezer entry in an older format and +// returns it in the new format. +type convertLegacyFn = func([]byte) ([]byte, error) diff --git a/coreth/core/rawdb/freezer_table.go b/coreth/core/rawdb/freezer_table.go new file mode 100644 index 00000000..bc999be2 --- /dev/null +++ b/coreth/core/rawdb/freezer_table.go @@ -0,0 +1,24 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import "errors" + +var ( + // errNotSupported is returned if the database doesn't support the required operation. + errNotSupported = errors.New("this operation is not supported") +) diff --git a/coreth/core/rawdb/key_length_iterator.go b/coreth/core/rawdb/key_length_iterator.go index 6878777d..fe95d719 100644 --- a/coreth/core/rawdb/key_length_iterator.go +++ b/coreth/core/rawdb/key_length_iterator.go @@ -26,7 +26,7 @@ package rawdb -import "github.com/ava-labs/coreth/ethdb" +import "github.com/ethereum/go-ethereum/ethdb" // KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs // with a specific key length will be returned. diff --git a/coreth/core/rawdb/schema.go b/coreth/core/rawdb/schema.go index f817d218..2f070f42 100644 --- a/coreth/core/rawdb/schema.go +++ b/coreth/core/rawdb/schema.go @@ -34,6 +34,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/metrics" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" ) // The fields below define the low level database schema prefixing. @@ -89,6 +90,19 @@ var ( SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code + // Path-based storage scheme of merkle patricia trie. + trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node + trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node + + PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage + configPrefix = []byte("ethereum-config-") // config prefix for the db + + // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress + BloomBitsIndexPrefix = []byte("iB") + + preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) + preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) + // State sync progress keys and prefixes syncRootKey = []byte("sync_root") // indicates the root of the main account trie currently being synced syncStorageTriesPrefix = []byte("sync_storage") // syncStorageTriesPrefix + trie root + account hash: indicates a storage trie must be fetched for the account @@ -103,15 +117,6 @@ var ( // State sync metadata syncPerformedPrefix = []byte("sync_performed") syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen // prefix + block number as uint64 - - preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage - configPrefix = []byte("ethereum-config-") // config prefix for the db - - // Chain index prefixes (use `i` + single byte to avoid mixing data types). - BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress - - preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil) - preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) ) // LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary @@ -191,7 +196,7 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { // preimageKey = preimagePrefix + hash func preimageKey(hash common.Hash) []byte { - return append(preimagePrefix, hash.Bytes()...) + return append(PreimagePrefix, hash.Bytes()...) } // codeKey = CodePrefix + hash @@ -212,3 +217,58 @@ func IsCodeKey(key []byte) (bool, []byte) { func configKey(hash common.Hash) []byte { return append(configPrefix, hash.Bytes()...) } + +// accountTrieNodeKey = trieNodeAccountPrefix + nodePath. +func accountTrieNodeKey(path []byte) []byte { + return append(trieNodeAccountPrefix, path...) +} + +// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. +func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { + return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...) +} + +// IsLegacyTrieNode reports whether a provided database entry is a legacy trie +// node. The characteristics of legacy trie node are: +// - the key length is 32 bytes +// - the key is the hash of val +func IsLegacyTrieNode(key []byte, val []byte) bool { + if len(key) != common.HashLength { + return false + } + return bytes.Equal(key, crypto.Keccak256(val)) +} + +// IsAccountTrieNode reports whether a provided database entry is an account +// trie node in path-based state scheme. +func IsAccountTrieNode(key []byte) (bool, []byte) { + if !bytes.HasPrefix(key, trieNodeAccountPrefix) { + return false, nil + } + // The remaining key should only consist a hex node path + // whose length is in the range 0 to 64 (64 is excluded + // since leaves are always wrapped with shortNode). + if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 { + return false, nil + } + return true, key[len(trieNodeAccountPrefix):] +} + +// IsStorageTrieNode reports whether a provided database entry is a storage +// trie node in path-based state scheme. +func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) { + if !bytes.HasPrefix(key, trieNodeStoragePrefix) { + return false, common.Hash{}, nil + } + // The remaining key consists of 2 parts: + // - 32 bytes account hash + // - hex node path whose length is in the range 0 to 64 + if len(key) < len(trieNodeStoragePrefix)+common.HashLength { + return false, common.Hash{}, nil + } + if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 { + return false, common.Hash{}, nil + } + accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength]) + return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:] +} diff --git a/coreth/core/rawdb/table.go b/coreth/core/rawdb/table.go index 13dd9b97..9563f8cc 100644 --- a/coreth/core/rawdb/table.go +++ b/coreth/core/rawdb/table.go @@ -27,7 +27,7 @@ package rawdb import ( - "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/ethdb" ) // table is a wrapper around a database that prefixes each key access with a pre- @@ -60,6 +60,80 @@ func (t *table) Get(key []byte) ([]byte, error) { return t.db.Get(append([]byte(t.prefix), key...)) } +// HasAncient is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) HasAncient(kind string, number uint64) (bool, error) { + return t.db.HasAncient(kind, number) +} + +// Ancient is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) Ancient(kind string, number uint64) ([]byte, error) { + return t.db.Ancient(kind, number) +} + +// AncientRange is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return t.db.AncientRange(kind, start, count, maxBytes) +} + +// Ancients is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) Ancients() (uint64, error) { + return t.db.Ancients() +} + +// Tail is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) Tail() (uint64, error) { + return t.db.Tail() +} + +// AncientSize is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) AncientSize(kind string) (uint64, error) { + return t.db.AncientSize(kind) +} + +// ModifyAncients runs an ancient write operation on the underlying database. +func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { + return t.db.ModifyAncients(fn) +} + +func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { + return t.db.ReadAncients(fn) +} + +// TruncateHead is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) TruncateHead(items uint64) error { + return t.db.TruncateHead(items) +} + +// TruncateTail is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) TruncateTail(items uint64) error { + return t.db.TruncateTail(items) +} + +// Sync is a noop passthrough that just forwards the request to the underlying +// database. +func (t *table) Sync() error { + return t.db.Sync() +} + +// MigrateTable processes the entries in a given table in sequence +// converting them to a new format if they're of an old format. +func (t *table) MigrateTable(kind string, convert convertLegacyFn) error { + return t.db.MigrateTable(kind, convert) +} + +// AncientDatadir returns the ancient datadir of the underlying database. +func (t *table) AncientDatadir() (string, error) { + return t.db.AncientDatadir() +} + // Put inserts the given value into the database at a prefixed version of the // provided key. func (t *table) Put(key []byte, value []byte) error { @@ -136,6 +210,13 @@ func (t *table) NewBatchWithSize(size int) ethdb.Batch { return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} } +// NewSnapshot creates a database snapshot based on the current state. +// The created snapshot will not be affected by all following mutations +// happened on the database. +func (t *table) NewSnapshot() (ethdb.Snapshot, error) { + return t.db.NewSnapshot() +} + // tableBatch is a wrapper around a database batch that prefixes each key access // with a pre-configured string. type tableBatch struct { diff --git a/coreth/core/rawdb/table_test.go b/coreth/core/rawdb/table_test.go index c7cac982..9cb913c2 100644 --- a/coreth/core/rawdb/table_test.go +++ b/coreth/core/rawdb/table_test.go @@ -30,7 +30,7 @@ import ( "bytes" "testing" - "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/ethdb" ) func TestTableDatabase(t *testing.T) { testTableDatabase(t, "prefix") } diff --git a/coreth/core/tx_cacher.go b/coreth/core/sender_cacher.go similarity index 97% rename from coreth/core/tx_cacher.go rename to coreth/core/sender_cacher.go index feff21d5..a1c09ec3 100644 --- a/coreth/core/tx_cacher.go +++ b/coreth/core/sender_cacher.go @@ -55,9 +55,9 @@ type TxSenderCacher struct { tasksMu sync.RWMutex } -// newTxSenderCacher creates a new transaction sender background cacher and starts +// NewTxSenderCacher creates a new transaction sender background cacher and starts // as many processing goroutines as allowed by the GOMAXPROCS on construction. -func newTxSenderCacher(threads int) *TxSenderCacher { +func NewTxSenderCacher(threads int) *TxSenderCacher { cacher := &TxSenderCacher{ tasks: make(chan *txSenderCacherRequest, threads), threads: threads, diff --git a/coreth/core/state/database.go b/coreth/core/state/database.go index 181fbec4..a17f351b 100644 --- a/coreth/core/state/database.go +++ b/coreth/core/state/database.go @@ -30,13 +30,13 @@ import ( "errors" "fmt" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" - lru "github.com/hashicorp/golang-lru" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -53,7 +53,7 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(addrHash, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie @@ -79,29 +79,36 @@ type Trie interface { // TODO(fjl): remove this when StateTrie is removed GetKey([]byte) []byte - // TryGet returns the value for key stored in the trie. The value bytes must - // not be modified by the caller. If a node was not found in the database, a - // trie.MissingNodeError is returned. - TryGet(key []byte) ([]byte, error) - - // TryGetAccount abstract an account read from the trie. - TryGetAccount(key []byte) (*types.StateAccount, error) - - // TryUpdate associates key with value in the trie. If value has length zero, any - // existing value is deleted from the trie. The value bytes must not be modified + // GetStorage returns the value for key stored in the trie. The value bytes + // must not be modified by the caller. If a node was not found in the database, + // a trie.MissingNodeError is returned. + GetStorage(addr common.Address, key []byte) ([]byte, error) + + // GetAccount abstracts an account read from the trie. It retrieves the + // account blob from the trie with provided account address and decodes it + // with associated decoding algorithm. If the specified account is not in + // the trie, nil will be returned. If the trie is corrupted(e.g. some nodes + // are missing or the account blob is incorrect for decoding), an error will + // be returned. + GetAccount(address common.Address) (*types.StateAccount, error) + + // UpdateStorage associates key with value in the trie. If value has length zero, + // any existing value is deleted from the trie. The value bytes must not be modified // by the caller while they are stored in the trie. If a node was not found in the // database, a trie.MissingNodeError is returned. - TryUpdate(key, value []byte) error + UpdateStorage(addr common.Address, key, value []byte) error - // TryUpdateAccount abstract an account write to the trie. - TryUpdateAccount(key []byte, account *types.StateAccount) error + // UpdateAccount abstracts an account write to the trie. It encodes the + // provided account object with associated algorithm and then updates it + // in the trie with provided address. + UpdateAccount(address common.Address, account *types.StateAccount) error - // TryDelete removes any existing value for key from the trie. If a node was not - // found in the database, a trie.MissingNodeError is returned. - TryDelete(key []byte) error + // DeleteStorage removes any existing value for key from the trie. If a node + // was not found in the database, a trie.MissingNodeError is returned. + DeleteStorage(addr common.Address, key []byte) error - // TryDeleteAccount abstracts an account deletion from the trie. - TryDeleteAccount(key []byte) error + // DeleteAccount abstracts an account deletion from the trie. + DeleteAccount(address common.Address) error // Hash returns the root hash of the trie. It does not write to the database and // can be used even if the trie doesn't have one. @@ -113,7 +120,7 @@ type Trie interface { // The returned nodeset can be nil if the trie is clean(nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage - Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) // NodeIterator returns an iterator that returns nodes of the trie. Iteration // starts at the key after the given start key. @@ -140,25 +147,34 @@ func NewDatabase(db ethdb.Database) Database { // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { - csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ - db: trie.NewDatabaseWithConfig(db, config), disk: db, - codeSizeCache: csc, - codeCache: fastcache.New(codeCacheSize), + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: trie.NewDatabaseWithConfig(db, config), + } +} + +// NewDatabaseWithNodeDB creates a state database with an already initialized node database. +func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { + return &cachingDB{ + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: triedb, } } type cachingDB struct { - db *trie.Database disk ethdb.KeyValueStore - codeSizeCache *lru.Cache - codeCache *fastcache.Cache + codeSizeCache *lru.Cache[common.Hash, int] + codeCache *lru.SizeConstrainedCache[common.Hash, []byte] + triedb *trie.Database } // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - tr, err := trie.NewStateTrie(common.Hash{}, root, db.db) + tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { return nil, err } @@ -166,8 +182,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { - tr, err := trie.NewStateTrie(addrHash, root, db.db) +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) { + tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb) if err != nil { return nil, err } @@ -186,12 +202,13 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { // ContractCode retrieves a particular contract's code. func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { - if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { + code, _ := db.codeCache.Get(codeHash) + if len(code) > 0 { return code, nil } - code := rawdb.ReadCode(db.disk, codeHash) + code = rawdb.ReadCode(db.disk, codeHash) if len(code) > 0 { - db.codeCache.Set(codeHash.Bytes(), code) + db.codeCache.Add(codeHash, code) db.codeSizeCache.Add(codeHash, len(code)) return code, nil } @@ -201,7 +218,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error // ContractCodeSize retrieves a particular contracts code's size. func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { - return cached.(int), nil + return cached, nil } code, err := db.ContractCode(addrHash, codeHash) return len(code), err @@ -214,5 +231,5 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { // TrieDB retrieves any intermediate trie-node caching layer. func (db *cachingDB) TrieDB() *trie.Database { - return db.db + return db.triedb } diff --git a/coreth/core/state/dump.go b/coreth/core/state/dump.go index 8f30d482..cb5e0df7 100644 --- a/coreth/core/state/dump.go +++ b/coreth/core/state/dump.go @@ -39,7 +39,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// DumpConfig is a set of options to control what portions of the statewill be +// DumpConfig is a set of options to control what portions of the state will be // iterated and collected. type DumpConfig struct { SkipCode bool @@ -54,7 +54,7 @@ type DumpCollector interface { // OnRoot is called with the state root OnRoot(common.Hash) // OnAccount is called once for each account in the trie - OnAccount(common.Address, DumpAccount) + OnAccount(*common.Address, DumpAccount) } // DumpAccount represents an account in the state. @@ -83,8 +83,10 @@ func (d *Dump) OnRoot(root common.Hash) { } // OnAccount implements DumpCollector interface -func (d *Dump) OnAccount(addr common.Address, account DumpAccount) { - d.Accounts[addr] = account +func (d *Dump) OnAccount(addr *common.Address, account DumpAccount) { + if addr != nil { + d.Accounts[*addr] = account + } } // IteratorDump is an implementation for iterating over data. @@ -100,8 +102,10 @@ func (d *IteratorDump) OnRoot(root common.Hash) { } // OnAccount implements DumpCollector interface -func (d *IteratorDump) OnAccount(addr common.Address, account DumpAccount) { - d.Accounts[addr] = account +func (d *IteratorDump) OnAccount(addr *common.Address, account DumpAccount) { + if addr != nil { + d.Accounts[*addr] = account + } } // iterativeDump is a DumpCollector-implementation which dumps output line-by-line iteratively. @@ -110,7 +114,7 @@ type iterativeDump struct { } // OnAccount implements DumpCollector interface -func (d iterativeDump) OnAccount(addr common.Address, account DumpAccount) { +func (d iterativeDump) OnAccount(addr *common.Address, account DumpAccount) { dumpAccount := &DumpAccount{ Balance: account.Balance, Nonce: account.Nonce, @@ -120,10 +124,7 @@ func (d iterativeDump) OnAccount(addr common.Address, account DumpAccount) { Code: account.Code, Storage: account.Storage, SecureKey: account.SecureKey, - Address: nil, - } - if addr != (common.Address{}) { - dumpAccount.Address = &addr + Address: addr, } d.Encode(dumpAccount) } @@ -165,23 +166,32 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] IsMultiCoin: data.IsMultiCoin, SecureKey: it.Key, } - addrBytes := s.trie.GetKey(it.Key) + var ( + addrBytes = s.trie.GetKey(it.Key) + addr = common.BytesToAddress(addrBytes) + address *common.Address + ) if addrBytes == nil { // Preimage missing missingPreimages++ if conf.OnlyWithAddresses { continue } - account.SecureKey = it.Key + } else { + address = &addr } - addr := common.BytesToAddress(addrBytes) obj := newObject(s, addr, data) if !conf.SkipCode { account.Code = obj.Code(s.db) } if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) - storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil)) + tr, err := obj.getTrie(s.db) + if err != nil { + log.Error("Failed to load storage trie", "err", err) + continue + } + storageIt := trie.NewIterator(tr.NodeIterator(nil)) for storageIt.Next() { _, content, _, err := rlp.Split(storageIt.Value) if err != nil { @@ -191,7 +201,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] account.Storage[common.BytesToHash(s.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(content) } } - c.OnAccount(addr, account) + c.OnAccount(address, account) accounts++ if time.Since(logged) > 8*time.Second { log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts, diff --git a/coreth/core/state/iterator.go b/coreth/core/state/iterator.go index 2ad4ed93..4d6e2ec4 100644 --- a/coreth/core/state/iterator.go +++ b/coreth/core/state/iterator.go @@ -36,9 +36,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// NodeIterator is an iterator to traverse the entire state trie post-order, +// nodeIterator is an iterator to traverse the entire state trie post-order, // including all of the contract code and contract state tries. -type NodeIterator struct { +type nodeIterator struct { state *StateDB // State being iterated stateIt trie.NodeIterator // Primary iterator for the global state trie @@ -54,9 +54,9 @@ type NodeIterator struct { Error error // Failure set in case of an internal error in the iterator } -// NewNodeIterator creates an post-order state node iterator. -func NewNodeIterator(state *StateDB) *NodeIterator { - return &NodeIterator{ +// newNodeIterator creates an post-order state node iterator. +func newNodeIterator(state *StateDB) *nodeIterator { + return &nodeIterator{ state: state, } } @@ -64,7 +64,7 @@ func NewNodeIterator(state *StateDB) *NodeIterator { // Next moves the iterator to the next node, returning whether there are any // further nodes. In case of an internal error this method returns false and // sets the Error field to the encountered failure. -func (it *NodeIterator) Next() bool { +func (it *nodeIterator) Next() bool { // If the iterator failed previously, don't do anything if it.Error != nil { return false @@ -78,7 +78,7 @@ func (it *NodeIterator) Next() bool { } // step moves the iterator to the next entry of the state trie. -func (it *NodeIterator) step() error { +func (it *nodeIterator) step() error { // Abort if we reached the end of the iteration if it.state == nil { return nil @@ -119,7 +119,7 @@ func (it *NodeIterator) step() error { if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { return err } - dataTrie, err := it.state.db.OpenStorageTrie(common.BytesToHash(it.stateIt.LeafKey()), account.Root) + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) if err != nil { return err } @@ -127,7 +127,7 @@ func (it *NodeIterator) step() error { if !it.dataIt.Next(true) { it.dataIt = nil } - if !bytes.Equal(account.CodeHash, emptyCodeHash) { + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { it.codeHash = common.BytesToHash(account.CodeHash) addrHash := common.BytesToHash(it.stateIt.LeafKey()) it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) @@ -141,7 +141,7 @@ func (it *NodeIterator) step() error { // retrieve pulls and caches the current state entry the iterator is traversing. // The method returns whether there are any more data left for inspection. -func (it *NodeIterator) retrieve() bool { +func (it *nodeIterator) retrieve() bool { // Clear out any previously set values it.Hash = common.Hash{} diff --git a/coreth/core/state/iterator_test.go b/coreth/core/state/iterator_test.go new file mode 100644 index 00000000..99e2d81e --- /dev/null +++ b/coreth/core/state/iterator_test.go @@ -0,0 +1,113 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "testing" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// Tests that the node iterator indeed walks over the entire database contents. +func TestNodeIteratorCoverage(t *testing.T) { + // Create some arbitrary test state to iterate + db, sdb, root, _ := makeTestState() + sdb.TrieDB().Commit(root, false) + + state, err := New(root, sdb, nil) + if err != nil { + t.Fatalf("failed to create state trie at %x: %v", root, err) + } + // Gather all the node hashes found by the iterator + hashes := make(map[common.Hash]struct{}) + for it := newNodeIterator(state); it.Next(); { + if it.Hash != (common.Hash{}) { + hashes[it.Hash] = struct{}{} + } + } + // Check in-disk nodes + var ( + seenNodes = make(map[common.Hash]struct{}) + seenCodes = make(map[common.Hash]struct{}) + ) + it := db.NewIterator(nil, nil) + for it.Next() { + ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) + if !ok { + continue + } + seenNodes[hash] = struct{}{} + } + it.Release() + + // Check in-disk codes + it = db.NewIterator(nil, nil) + for it.Next() { + ok, hash := rawdb.IsCodeKey(it.Key()) + if !ok { + continue + } + if _, ok := hashes[common.BytesToHash(hash)]; !ok { + t.Errorf("state entry not reported %x", it.Key()) + } + seenCodes[common.BytesToHash(hash)] = struct{}{} + } + it.Release() + + // Cross check the iterated hashes and the database/nodepool content + for hash := range hashes { + _, ok := seenNodes[hash] + if !ok { + _, ok = seenCodes[hash] + } + if !ok { + t.Errorf("failed to retrieve reported node %x", hash) + } + } +} + +// isTrieNode is a helper function which reports if the provided +// database entry belongs to a trie node or not. +func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { + if scheme == rawdb.HashScheme { + if rawdb.IsLegacyTrieNode(key, val) { + return true, common.BytesToHash(key) + } + } else { + ok, _ := rawdb.IsAccountTrieNode(key) + if ok { + return true, crypto.Keccak256Hash(val) + } + ok, _, _ = rawdb.IsStorageTrieNode(key) + if ok { + return true, crypto.Keccak256Hash(val) + } + } + return false, common.Hash{} +} diff --git a/coreth/core/state/journal.go b/coreth/core/state/journal.go index 4b61d3c2..eb87a725 100644 --- a/coreth/core/state/journal.go +++ b/coreth/core/state/journal.go @@ -151,6 +151,11 @@ type ( address *common.Address slot *common.Hash } + + transientStorageChange struct { + account *common.Address + key, prevalue common.Hash + } ) func (ch createObjectChange) revert(s *StateDB) { @@ -164,8 +169,8 @@ func (ch createObjectChange) dirtied() *common.Address { func (ch resetObjectChange) revert(s *StateDB) { s.setStateObject(ch.prev) - if !ch.prevdestruct && s.snap != nil { - delete(s.snapDestructs, ch.prev.addrHash) + if !ch.prevdestruct { + delete(s.stateObjectsDestruct, ch.prev.address) } } @@ -234,6 +239,14 @@ func (ch storageChange) dirtied() *common.Address { return ch.account } +func (ch transientStorageChange) revert(s *StateDB) { + s.setTransientState(*ch.account, ch.key, ch.prevalue) +} + +func (ch transientStorageChange) dirtied() *common.Address { + return nil +} + func (ch refundChange) revert(s *StateDB) { s.refund = ch.prev } diff --git a/coreth/core/state/metrics.go b/coreth/core/state/metrics.go index 6d702312..f06d0ed2 100644 --- a/coreth/core/state/metrics.go +++ b/coreth/core/state/metrics.go @@ -29,10 +29,12 @@ package state import "github.com/ava-labs/coreth/metrics" var ( - accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) - storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) - accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) - storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) - accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil) - storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil) + accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) + storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) + accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) + storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) + accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) + storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) + accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) + storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) ) diff --git a/coreth/core/state/pruner/bloom.go b/coreth/core/state/pruner/bloom.go index 1670be0e..158077a4 100644 --- a/coreth/core/state/pruner/bloom.go +++ b/coreth/core/state/pruner/bloom.go @@ -137,6 +137,6 @@ func (bloom *stateBloom) Delete(key []byte) error { panic("not supported") } // reports whether the key is contained. // - If it says yes, the key may be contained // - If it says no, the key is definitely not contained. -func (bloom *stateBloom) Contain(key []byte) (bool, error) { - return bloom.bloom.Contains(stateBloomHasher(key)), nil +func (bloom *stateBloom) Contain(key []byte) bool { + return bloom.bloom.Contains(stateBloomHasher(key)) } diff --git a/coreth/core/state/pruner/pruner.go b/coreth/core/state/pruner/pruner.go index 665ed8f9..aaa5148f 100644 --- a/coreth/core/state/pruner/pruner.go +++ b/coreth/core/state/pruner/pruner.go @@ -40,10 +40,9 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -65,13 +64,12 @@ const ( rangeCompactionThreshold = 100000 ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256(nil) -) +// Config includes all the configurations for pruning. +type Config struct { + Datadir string // The directory of the state database + Cachedir string // The directory of state clean cache + BloomSize uint64 // The Megabytes of memory allocated to bloom-filter +} // Pruner is an offline tool to prune the stale state with the // help of the snapshot. The workflow of pruner is very simple: @@ -85,41 +83,47 @@ var ( // periodically in order to release the disk usage and improve the // disk read performance to some extent. type Pruner struct { - db ethdb.Database - stateBloom *stateBloom - datadir string - headHeader *types.Header - snaptree *snapshot.Tree + config Config + chainHeader *types.Header + db ethdb.Database + stateBloom *stateBloom + snaptree *snapshot.Tree } // NewPruner creates the pruner instance. -func NewPruner(db ethdb.Database, datadir string, bloomSize uint64) (*Pruner, error) { +func NewPruner(db ethdb.Database, config Config) (*Pruner, error) { headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return nil, errors.New("Failed to load head block") + return nil, errors.New("failed to load head block") } // Note: we refuse to start a pruning session unless the snapshot disk layer exists, which should prevent // us from ever needing to enter RecoverPruning in an invalid pruning session (a session where we do not have // the protected trie in the triedb and in the snapshot disk layer). - snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Hash(), headBlock.Root(), false, false, false) + snapconfig := snapshot.Config{ + CacheSize: 256, + AsyncBuild: false, + NoBuild: true, + SkipVerify: true, + } + snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Hash(), headBlock.Root()) if err != nil { return nil, fmt.Errorf("failed to create snapshot for pruning, must restart without offline pruning disabled to recover: %w", err) // The relevant snapshot(s) might not exist } // Sanitize the bloom filter size if it's too small. - if bloomSize < 256 { - log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256) - bloomSize = 256 + if config.BloomSize < 256 { + log.Warn("Sanitizing bloomfilter size", "provided(MB)", config.BloomSize, "updated(MB)", 256) + config.BloomSize = 256 } - stateBloom, err := newStateBloomWithSize(bloomSize) + stateBloom, err := newStateBloomWithSize(config.BloomSize) if err != nil { return nil, err } return &Pruner{ - db: db, - stateBloom: stateBloom, - datadir: datadir, - headHeader: headBlock.Header(), - snaptree: snaptree, + config: config, + chainHeader: headBlock.Header(), + db: db, + stateBloom: stateBloom, + snaptree: snaptree, }, nil } @@ -159,9 +163,7 @@ func prune(maindb ethdb.Database, stateBloom *stateBloom, bloomPath string, star if isCode { checkKey = codeKey } - if ok, err := stateBloom.Contain(checkKey); err != nil { - return err - } else if ok { + if stateBloom.Contain(checkKey) { continue } count += 1 @@ -254,12 +256,12 @@ func (p *Pruner) Prune(root common.Hash) error { // reuse it for pruning instead of generating a new one. It's // mandatory because a part of state may already be deleted, // the recovery procedure is necessary. - _, stateBloomRoot, err := findBloomFilter(p.datadir) + _, stateBloomRoot, err := findBloomFilter(p.config.Datadir) if err != nil { return err } if stateBloomRoot != (common.Hash{}) { - return RecoverPruning(p.datadir, p.db) + return RecoverPruning(p.config.Datadir, p.db, p.config.Cachedir) } // If the target state root is not specified, return a fatal error. @@ -269,11 +271,16 @@ func (p *Pruner) Prune(root common.Hash) error { // Ensure the root is really present. The weak assumption // is the presence of root can indicate the presence of the // entire trie. - if !rawdb.HasTrieNode(p.db, root) { + if !rawdb.HasLegacyTrieNode(p.db, root) { return fmt.Errorf("associated state[%x] is not present", root) } else { log.Info("Selecting last accepted block root as the pruning target", "root", root) } + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(p.config.Cachedir) // Traverse the target state, re-construct the whole state trie and // commit to the given bloom filter. @@ -286,7 +293,7 @@ func (p *Pruner) Prune(root common.Hash) error { if err := extractGenesis(p.db, p.stateBloom); err != nil { return err } - filterName := bloomFilterName(p.datadir, root) + filterName := bloomFilterName(p.config.Datadir, root) log.Info("Writing state bloom to disk", "name", filterName) if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil { @@ -303,7 +310,7 @@ func (p *Pruner) Prune(root common.Hash) error { // pruning can be resumed. What's more if the bloom filter is constructed, the // pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left // in the disk. -func RecoverPruning(datadir string, db ethdb.Database) error { +func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error { stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir) if err != nil { return err @@ -313,7 +320,7 @@ func RecoverPruning(datadir string, db ethdb.Database) error { } headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return errors.New("Failed to load head block") + return errors.New("failed to load head block") } stateBloom, err := NewStateBloomFromDisk(stateBloomPath) if err != nil { @@ -321,6 +328,12 @@ func RecoverPruning(datadir string, db ethdb.Database) error { } log.Info("Loaded state bloom filter", "path", stateBloomPath) + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(trieCachePath) + // All the state roots of the middle layers should be forcibly pruned, // otherwise the dangling state will be left. if stateBloomRoot != headBlock.Root() { @@ -341,7 +354,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if genesis == nil { return errors.New("missing genesis block") } - t, err := trie.NewStateTrie(common.Hash{}, genesis.Root(), trie.NewDatabase(db)) + t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db)) if err != nil { return err } @@ -360,8 +373,9 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { return err } - if acc.Root != emptyRoot { - storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db)) + if acc.Root != types.EmptyRootHash { + id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) + storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db)) if err != nil { return err } @@ -376,7 +390,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { return storageIter.Error() } } - if !bytes.Equal(acc.CodeHash, emptyCode) { + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) { stateBloom.Put(acc.CodeHash, nil) } } @@ -415,3 +429,23 @@ func findBloomFilter(datadir string) (string, common.Hash, error) { } return stateBloomPath, stateBloomRoot, nil } + +const warningLog = ` + +WARNING! + +The clean trie cache is not found. Please delete it by yourself after the +pruning. Remember don't start the Coreth without deleting the clean trie cache +otherwise the entire database may be damaged! + +Check the configuration option "offline-pruning-enabled" for more details. +` + +func deleteCleanTrieCache(path string) { + if !common.FileExist(path) { + log.Warn(warningLog) + return + } + os.RemoveAll(path) + log.Info("Deleted trie clean cache", "path", path) +} diff --git a/coreth/core/state/snapshot/account.go b/coreth/core/state/snapshot/account.go index 296c6e77..35aa33a9 100644 --- a/coreth/core/state/snapshot/account.go +++ b/coreth/core/state/snapshot/account.go @@ -30,6 +30,7 @@ import ( "bytes" "math/big" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" ) @@ -53,10 +54,10 @@ func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []by Balance: balance, IsMultiCoin: isMultiCoin, } - if root != emptyRoot { + if root != types.EmptyRootHash { slim.Root = root[:] } - if !bytes.Equal(codehash, emptyCode[:]) { + if !bytes.Equal(codehash, types.EmptyCodeHash[:]) { slim.CodeHash = codehash } return slim @@ -80,10 +81,10 @@ func FullAccount(data []byte) (Account, error) { return Account{}, err } if len(account.Root) == 0 { - account.Root = emptyRoot[:] + account.Root = types.EmptyRootHash[:] } if len(account.CodeHash) == 0 { - account.CodeHash = emptyCode[:] + account.CodeHash = types.EmptyCodeHash[:] } return account, nil } diff --git a/coreth/core/state/snapshot/conversion.go b/coreth/core/state/snapshot/conversion.go index f7726181..94711213 100644 --- a/coreth/core/state/snapshot/conversion.go +++ b/coreth/core/state/snapshot/conversion.go @@ -37,9 +37,10 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -53,7 +54,7 @@ type trieKV struct { type ( // trieGeneratorFn is the interface of trie generation which can // be implemented by different trie algorithm. - trieGeneratorFn func(db ethdb.KeyValueWriter, owner common.Hash, in chan (trieKV), out chan (common.Hash)) + trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash)) // leafCallbackFn is the callback invoked at the leaves of the trie, // returns the subtrie root with the specified subtrie identifier. @@ -62,12 +63,12 @@ type ( // GenerateAccountTrieRoot takes an account iterator and reproduces the root hash. func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { - return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash. func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { - return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true) + return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateTrie takes the whole snapshot tree as the input, traverses all the @@ -81,9 +82,10 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer acctIt.Release() - got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + scheme := snaptree.triedb.Scheme() + got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { // Migrate the code first, commit the contract code into the tmp db. - if codeHash != emptyCode { + if codeHash != types.EmptyCodeHash { code := rawdb.ReadCode(src, codeHash) if len(code) == 0 { return common.Hash{}, errors.New("failed to read contract code") @@ -97,7 +99,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd } defer storageIt.Release() - hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -146,7 +148,7 @@ func (stat *generateStats) progressAccounts(account common.Hash, done uint64) { stat.head = account } -// finishAccounts updates the gemerator stats for the finished account range. +// finishAccounts updates the generator stats for the finished account range. func (stat *generateStats) finishAccounts(done uint64) { stat.lock.Lock() defer stat.lock.Unlock() @@ -252,7 +254,7 @@ func runReport(stats *generateStats, stop chan bool) { // generateTrieRoot generates the trie hash based on the snapshot iterator. // It can be used for generating account trie, storage trie or even the // whole state which connects the accounts and the corresponding storages. -func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { +func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { var ( in = make(chan trieKV) // chan to pass leaves out = make(chan common.Hash, 1) // chan to collect result @@ -263,7 +265,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, wg.Add(1) go func() { defer wg.Done() - generatorFn(db, account, in, out) + generatorFn(db, scheme, account, in, out) }() // Spin up a go-routine for progress logging if report && stats != nil { @@ -370,10 +372,16 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, return stop(nil) } -func stackTrieGenerate(db ethdb.KeyValueWriter, owner common.Hash, in chan trieKV, out chan common.Hash) { - t := trie.NewStackTrieWithOwner(db, owner) +func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { + var nodeWriter trie.NodeWriteFunc + if db != nil { + nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) + } + } + t := trie.NewStackTrieWithOwner(nodeWriter, owner) for leaf := range in { - t.TryUpdate(leaf.key[:], leaf.value) + t.Update(leaf.key[:], leaf.value) } var root common.Hash if db == nil { diff --git a/coreth/core/state/snapshot/difflayer.go b/coreth/core/state/snapshot/difflayer.go index 6b897a14..74dcfc92 100644 --- a/coreth/core/state/snapshot/difflayer.go +++ b/coreth/core/state/snapshot/difflayer.go @@ -114,7 +114,7 @@ type diffLayer struct { blockHash common.Hash // Block hash to which this snapshot diff belongs to root common.Hash // Root hash to which this snapshot diff belongs to - stale uint32 // Signals that the layer became stale (state progressed) + stale atomic.Bool // Signals that the layer became stale (state progressed) // destructSet is a very special helper marker. If an account is marked as // deleted, then it's recorded in this set. However it's allowed that an account @@ -284,7 +284,7 @@ func (dl *diffLayer) Parent() snapshot { // Stale return whether this layer has become stale (was flattened across) or if // it's still live. func (dl *diffLayer) Stale() bool { - return atomic.LoadUint32(&dl.stale) != 0 + return dl.stale.Load() } // Account directly retrieves the account associated with a particular hash in @@ -309,9 +309,14 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { // // Note the returned account is not a copy, please don't modify it. func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { + dl.lock.RLock() + // Check staleness before reaching further. + if dl.Stale() { + dl.lock.RUnlock() + return nil, ErrSnapshotStale + } // Check the bloom filter first whether there's even a point in reaching into // all the maps in all the layers below - dl.lock.RLock() hit := dl.diffed.Contains(accountBloomHasher(hash)) if !hit { hit = dl.diffed.Contains(destructBloomHasher(hash)) @@ -378,6 +383,11 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro // Check the bloom filter first whether there's even a point in reaching into // all the maps in all the layers below dl.lock.RLock() + // Check staleness before reaching further. + if dl.Stale() { + dl.lock.RUnlock() + return nil, ErrSnapshotStale + } hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash}) if !hit { hit = dl.diffed.Contains(destructBloomHasher(accountHash)) @@ -466,7 +476,7 @@ func (dl *diffLayer) flatten() snapshot { // Before actually writing all our data to the parent, first ensure that the // parent hasn't been 'corrupted' by someone else already flattening into it - if atomic.SwapUint32(&parent.stale, 1) != 0 { + if parent.stale.Swap(true) { panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo } // Overwrite all the updated accounts blindly, merge the sorted list diff --git a/coreth/core/state/snapshot/difflayer_test.go b/coreth/core/state/snapshot/difflayer_test.go index 29520fa8..0f104410 100644 --- a/coreth/core/state/snapshot/difflayer_test.go +++ b/coreth/core/state/snapshot/difflayer_test.go @@ -28,13 +28,14 @@ package snapshot import ( "bytes" + crand "crypto/rand" "math/rand" "testing" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" ) func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { @@ -83,7 +84,7 @@ func TestMergeBasics(t *testing.T) { if rand.Intn(2) == 0 { accStorage := make(map[common.Hash][]byte) value := make([]byte, 32) - rand.Read(value) + crand.Read(value) accStorage[randomHash()] = value storage[h] = accStorage } @@ -304,7 +305,7 @@ func BenchmarkSearchSlot(b *testing.B) { accStorage := make(map[common.Hash][]byte) for i := 0; i < 5; i++ { value := make([]byte, 32) - rand.Read(value) + crand.Read(value) accStorage[randomHash()] = value storage[accountKey] = accStorage } @@ -340,7 +341,7 @@ func BenchmarkFlatten(b *testing.B) { accStorage := make(map[common.Hash][]byte) for i := 0; i < 20; i++ { value := make([]byte, 32) - rand.Read(value) + crand.Read(value) accStorage[randomHash()] = value } storage[accountKey] = accStorage diff --git a/coreth/core/state/snapshot/disklayer.go b/coreth/core/state/snapshot/disklayer.go index 051a4ac5..ccd60b8d 100644 --- a/coreth/core/state/snapshot/disklayer.go +++ b/coreth/core/state/snapshot/disklayer.go @@ -32,10 +32,10 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" ) diff --git a/coreth/core/state/snapshot/disklayer_test.go b/coreth/core/state/snapshot/disklayer_test.go index aad656e9..3dede623 100644 --- a/coreth/core/state/snapshot/disklayer_test.go +++ b/coreth/core/state/snapshot/disklayer_test.go @@ -31,8 +31,8 @@ import ( "testing" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/rlp" ) diff --git a/coreth/core/state/snapshot/generate.go b/coreth/core/state/snapshot/generate.go index c985abe2..e5f98635 100644 --- a/coreth/core/state/snapshot/generate.go +++ b/coreth/core/state/snapshot/generate.go @@ -34,12 +34,12 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -49,14 +49,6 @@ const ( snapshotCacheStatsUpdateFrequency = 1000 // update stats from the snapshot fastcache once per 1000 ops ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256Hash(nil) -) - // generatorStats is a collection of statistics gathered by the snapshot generator // for logging purposes. type generatorStats struct { @@ -279,7 +271,8 @@ func (dl *diskLayer) generate(stats *generatorStats) { } } // Create an account and state iterator pointing to the current generator marker - accTrie, err := trie.NewStateTrie(common.Hash{}, dl.root, dl.triedb) + trieId := trie.StateTrieID(dl.root) + accTrie, err := trie.NewStateTrie(trieId, dl.triedb) if err != nil { // The account trie is missing (GC), surf the chain until one becomes available stats.Info("Trie missing, state snapshotting paused", dl.root, dl.genMarker) @@ -333,8 +326,9 @@ func (dl *diskLayer) generate(stats *generatorStats) { } // If the iterated account is a contract, iterate through corresponding contract // storage to generate snapshot entries. - if acc.Root != emptyRoot { - storeTrie, err := trie.NewStateTrie(accountHash, acc.Root, dl.triedb) + if acc.Root != types.EmptyRootHash { + storeTrieId := trie.StorageTrieID(dl.root, accountHash, acc.Root) + storeTrie, err := trie.NewStateTrie(storeTrieId, dl.triedb) if err != nil { log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) abort := <-dl.genAbort diff --git a/coreth/core/state/snapshot/generate_test.go b/coreth/core/state/snapshot/generate_test.go index 973276f9..0fc61d1b 100644 --- a/coreth/core/state/snapshot/generate_test.go +++ b/coreth/core/state/snapshot/generate_test.go @@ -34,9 +34,11 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "golang.org/x/crypto/sha3" @@ -60,9 +62,9 @@ func TestGeneration(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -95,16 +97,16 @@ func TestGenerateExistentState(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) root, snap := helper.CommitAndGenerate() @@ -127,12 +129,12 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) { t.Helper() accIt := snap.AccountIterator(common.Hash{}) defer accIt.Release() - snapRoot, err := generateTrieRoot(nil, accIt, common.Hash{}, stackTrieGenerate, + snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, _ := snap.StorageIterator(accountHash, common.Hash{}) defer storageIt.Release() - hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -153,24 +155,24 @@ type testHelper struct { diskdb ethdb.Database triedb *trie.Database accTrie *trie.StateTrie - nodes *trie.MergedNodeSet + nodes *trienode.MergedNodeSet } func newHelper() *testHelper { diskdb := rawdb.NewMemoryDatabase() triedb := trie.NewDatabase(diskdb) - accTrie, _ := trie.NewStateTrie(common.Hash{}, common.Hash{}, triedb) + accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb) return &testHelper{ diskdb: diskdb, triedb: triedb, accTrie: accTrie, - nodes: trie.NewMergedNodeSet(), + nodes: trienode.NewMergedNodeSet(), } } func (t *testHelper) addTrieAccount(acckey string, acc *Account) { val, _ := rlp.EncodeToBytes(acc) - t.accTrie.Update([]byte(acckey), val) + t.accTrie.MustUpdate([]byte(acckey), val) } func (t *testHelper) addSnapAccount(acckey string, acc *Account) { @@ -192,14 +194,15 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) } func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte { - stTrie, _ := trie.NewStateTrie(owner, common.Hash{}, t.triedb) + id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash) + stTrie, _ := trie.NewStateTrie(id, t.triedb) for i, k := range keys { - stTrie.Update([]byte(k), []byte(vals[i])) + stTrie.MustUpdate([]byte(k), []byte(vals[i])) } if !commit { return stTrie.Hash().Bytes() } - root, nodes, _ := stTrie.Commit(false) + root, nodes := stTrie.Commit(false) if nodes != nil { t.nodes.Merge(nodes) } @@ -207,12 +210,12 @@ func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []st } func (t *testHelper) Commit() common.Hash { - root, nodes, _ := t.accTrie.Commit(true) + root, nodes := t.accTrie.Commit(true) if nodes != nil { t.nodes.Merge(nodes) } - t.triedb.Update(t.nodes) - t.triedb.Commit(root, false, nil) + t.triedb.Update(root, types.EmptyRootHash, t.nodes) + t.triedb.Commit(root, false) return root } @@ -244,28 +247,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { helper := newHelper() // Account one, empty root but non-empty database - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) // Account two, non empty root but empty database stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Miss slots { // Account three, non empty root but misses slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) // Account four, non empty root but misses slots in the middle helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) // Account five, non empty root but misses slots in the end helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) } @@ -273,22 +276,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account six, non empty root but wrong slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) // Account seven, non empty root but wrong slots in the middle helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) // Account eight, non empty root but wrong slots in the end helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) // Account 9, non empty root but rotated slots helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) } @@ -296,17 +299,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account 10, non empty root but extra slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) // Account 11, non empty root but extra slots in the middle helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) // Account 12, non empty root but extra slots in the end helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) } @@ -346,25 +349,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Missing accounts, only in the trie { - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning - helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle - helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning + helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle + helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End } // Wrong accounts { - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) } // Extra accounts, only in the snap { - helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()}) // before the beginning - helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle - helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()}) // after the end + helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning + helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle + helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end } root, snap := helper.CommitAndGenerate() @@ -393,14 +396,14 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // without any storage slots to keep the test smaller. helper := newHelper() - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x7dd654835190324640832972b7c4c6eaa0c50541e36766d054ed57721f1dc7eb + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x515d3de35e143cd976ad476398d910aa7bf8a02e8fd7eb9e3baacddbbcbfcb41 root := helper.Commit() // Root: 0xfa04f652e8bd3938971bf7d71c3c688574af334ca8bc20e64b01ba610ae93cad // Delete an account trie leaf and ensure the generator chokes - helper.triedb.Commit(root, false, nil) + helper.triedb.Commit(root, false) helper.diskdb.Delete(common.HexToHash("0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e").Bytes()) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) @@ -428,10 +431,11 @@ func TestGenerateMissingStorageTrie(t *testing.T) { helper := newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 + root := helper.Commit() // Delete a storage trie root and ensure the generator chokes @@ -461,10 +465,11 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { helper := newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xf73118e0254ce091588d66038744a0afae5f65a194de67cff310c683ae43329e + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x70da4ebd7602dd313c936b39000ed9ab7f849986a90ea934f0c3ec4cc9840441 + root := helper.Commit() // Delete a storage trie leaf and ensure the generator chokes @@ -495,9 +500,9 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e // Identical in the snap key := hashData([]byte("acc-1")) @@ -515,7 +520,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -566,9 +571,9 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.Update([]byte("acc-1"), val) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb + helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x547b07c3a71669c00eda14077d85c7fd14575b92d459572540b25b9a11914dcb // Identical in the snap key := hashData([]byte("acc-1")) @@ -580,8 +585,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { { // 100 accounts exist only in snapshot for i := 0; i < 1000; i++ { - //acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()} - acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(fmt.Sprintf("acc-%d", i))) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -617,10 +621,10 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { } helper := newHelper() { - acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) - helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val) + helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) + helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val) rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val) rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val) @@ -653,9 +657,9 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { } helper := newHelper() { - acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) - helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) + helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) junk := make([]byte, 100) copy(junk, []byte{0xde, 0xad}) @@ -690,7 +694,7 @@ func TestGenerateFromEmptySnap(t *testing.T) { for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount(fmt.Sprintf("acc-%d", i), - &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) } root, snap := helper.CommitAndGenerate() t.Logf("Root: %#x\n", root) // Root: 0x2609234ce43f5e471202c87e017ffb4dfecdb3163cfcbaa55de04baa59cad42d @@ -726,7 +730,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) { for i := 0; i < 8; i++ { accKey := fmt.Sprintf("acc-%d", i) stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true) - helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) var moddedKeys []string var moddedVals []string for ii := 0; ii < 8; ii++ { @@ -818,11 +822,11 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) @@ -853,11 +857,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) populateDangling(helper.diskdb) diff --git a/coreth/core/state/snapshot/iterator.go b/coreth/core/state/snapshot/iterator.go index c9b98353..b7cf84ec 100644 --- a/coreth/core/state/snapshot/iterator.go +++ b/coreth/core/state/snapshot/iterator.go @@ -32,8 +32,8 @@ import ( "sort" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // Iterator is an iterator to step over all the accounts or the specific diff --git a/coreth/core/state/snapshot/iterator_fast.go b/coreth/core/state/snapshot/iterator_fast.go index bf0e3acd..04a61d4a 100644 --- a/coreth/core/state/snapshot/iterator_fast.go +++ b/coreth/core/state/snapshot/iterator_fast.go @@ -285,7 +285,7 @@ func (fi *fastIterator) next(idx int) bool { return false } // The elem we're placing it next to has the same value, - // so whichever winds up on n+1 will need further iteraton + // so whichever winds up on n+1 will need further iteration clash = n + 1 return cur.priority < fi.iterators[n+1].priority diff --git a/coreth/core/state/snapshot/iterator_test.go b/coreth/core/state/snapshot/iterator_test.go index 9d1b7440..7fc374a1 100644 --- a/coreth/core/state/snapshot/iterator_test.go +++ b/coreth/core/state/snapshot/iterator_test.go @@ -28,6 +28,7 @@ package snapshot import ( "bytes" + crand "crypto/rand" "encoding/binary" "fmt" "math/rand" @@ -56,7 +57,7 @@ func TestAccountIteratorBasics(t *testing.T) { if rand.Intn(2) == 0 { accStorage := make(map[common.Hash][]byte) value := make([]byte, 32) - rand.Read(value) + crand.Read(value) accStorage[randomHash()] = value storage[h] = accStorage } @@ -88,7 +89,7 @@ func TestStorageIteratorBasics(t *testing.T) { var nilstorage int for i := 0; i < 100; i++ { - rand.Read(value) + crand.Read(value) if rand.Intn(2) == 0 { accStorage[randomHash()] = common.CopyBytes(value) } else { @@ -762,7 +763,7 @@ func TestStorageIteratorDeletions(t *testing.T) { // only spit out 200 values eventually. // // The value-fetching benchmark is easy on the binary iterator, since it never has to reach -// down at any depth for retrieving the values -- all are on the toppmost layer +// down at any depth for retrieving the values -- all are on the topmost layer // // BenchmarkAccountIteratorTraversal/binary_iterator_keys-6 2239 483674 ns/op // BenchmarkAccountIteratorTraversal/binary_iterator_values-6 2403 501810 ns/op diff --git a/coreth/core/state/snapshot/journal.go b/coreth/core/state/snapshot/journal.go index 31318da7..f605029b 100644 --- a/coreth/core/state/snapshot/journal.go +++ b/coreth/core/state/snapshot/journal.go @@ -33,9 +33,9 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -56,7 +56,7 @@ type journalGenerator struct { // loadSnapshot loads a pre-existing state snapshot backed by a key-value // store. If loading the snapshot from disk is successful, this function also // returns a boolean indicating whether or not the snapshot is fully generated. -func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash) (snapshot, bool, error) { +func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, noBuild bool) (snapshot, bool, error) { // Retrieve the block number and hash of the snapshot, failing if no snapshot // is present in the database (or crashed mid-update). baseBlockHash := rawdb.ReadSnapshotBlockHash(diskdb) @@ -96,12 +96,12 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, created: time.Now(), } - // Everything loaded correctly, resume any suspended operations + var wiper chan struct{} + // Load the disk layer status from the generator if it's not complete if !generator.Done { // If the generator was still wiping, restart one from scratch (fine for // now as it's rare and the wiper deletes the stuff it touches anyway, so // restarting won't incur a lot of extra database hops. - var wiper chan struct{} if generator.Wiping { log.Info("Resuming previous snapshot wipe") wiper = WipeSnapshot(diskdb, false) @@ -111,6 +111,11 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, if snapshot.genMarker == nil { snapshot.genMarker = []byte{} } + } + + // Everything loaded correctly, resume any suspended operations + // if the background generation is allowed + if !generator.Done && !noBuild { snapshot.genPending = make(chan struct{}) snapshot.genAbort = make(chan chan struct{}) diff --git a/coreth/core/state/snapshot/snapshot.go b/coreth/core/state/snapshot/snapshot.go index b5b62f2d..0fd4e23f 100644 --- a/coreth/core/state/snapshot/snapshot.go +++ b/coreth/core/state/snapshot/snapshot.go @@ -32,15 +32,14 @@ import ( "errors" "fmt" "sync" - "sync/atomic" "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -167,6 +166,14 @@ type snapshot interface { Stale() bool } +// Config includes the configurations for snapshots. +type Config struct { + CacheSize int // Megabytes permitted to use for read caches + NoBuild bool // Indicator that the snapshots generation is disallowed + AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously + SkipVerify bool // Indicator that all verification should be bypassed +} + // Tree is an Ethereum state snapshot tree. It consists of one persistent base // layer backed by a key-value store, on top of which arbitrarily many in-memory // diff layers are topped. The memory diffs can form a tree with branching, but @@ -177,9 +184,9 @@ type snapshot interface { // storage data to avoid expensive multi-level trie lookups; and to allow sorted, // cheap iteration of the account/storage tries for sync aid. type Tree struct { + config Config // Snapshots configurations diskdb ethdb.KeyValueStore // Persistent database to store the snapshot triedb *trie.Database // In-memory cache to access the trie through - cache int // Megabytes permitted to use for read caches // Collection of all known layers // blockHash -> snapshot blockLayers map[common.Hash]snapshot @@ -201,24 +208,24 @@ type Tree struct { // If the snapshot is missing or the disk layer is broken, the snapshot will be // reconstructed using both the existing data and the state trie. // The repair happens on a background thread. -func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, async bool, rebuild bool, verify bool) (*Tree, error) { +func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, blockHash, root common.Hash) (*Tree, error) { // Create a new, empty snapshot tree snap := &Tree{ + config: config, diskdb: diskdb, triedb: triedb, - cache: cache, blockLayers: make(map[common.Hash]snapshot), stateLayers: make(map[common.Hash]map[common.Hash]snapshot), - verified: !verify, // if verify is false, all verification will be bypassed + verified: config.SkipVerify, // if SkipVerify is true, all verification will be bypassed } // Attempt to load a previously persisted snapshot and rebuild one if failed - head, generated, err := loadSnapshot(diskdb, triedb, cache, blockHash, root) + head, generated, err := loadSnapshot(diskdb, triedb, config.CacheSize, blockHash, root, config.NoBuild) if err != nil { - if rebuild { - log.Warn("Failed to load snapshot, regenerating", "err", err) + log.Warn("Failed to load snapshot, regenerating", "err", err) + if !config.NoBuild { snap.Rebuild(blockHash, root) - if !async { + if !config.AsyncBuild { if err := snap.verifyIntegrity(snap.disklayer(), true); err != nil { return nil, err } @@ -239,8 +246,8 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash } // Verify any synchronously generated or loaded snapshot from disk - if !async || generated { - if err := snap.verifyIntegrity(snap.disklayer(), !async && !generated); err != nil { + if !config.AsyncBuild || generated { + if err := snap.verifyIntegrity(snap.disklayer(), !config.AsyncBuild && !generated); err != nil { return nil, err } } @@ -627,7 +634,7 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Ensure we don't delete too much data blindly (contract can be // huge). It's ok to flush, the root will go missing in case of a // crash and we'll detect and regenerate the snapshot. - if batch.ValueSize() > ethdb.IdealBatchSize { + if batch.ValueSize() > 64*1024*1024 { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -653,7 +660,7 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Ensure we don't write too much data blindly. It's ok to flush, the // root will go missing in case of a crash and we'll detect and regen // the snapshot. - if batch.ValueSize() > ethdb.IdealBatchSize { + if batch.ValueSize() > 64*1024*1024 { if err := batch.Write(); err != nil { log.Crit("Failed to write storage deletions", "err", err) } @@ -764,7 +771,7 @@ func (t *Tree) Rebuild(blockHash, root common.Hash) { case *diffLayer: // If the layer is a simple diff, simply mark as stale layer.lock.Lock() - atomic.StoreUint32(&layer.stale, 1) + layer.stale.Store(true) layer.lock.Unlock() default: @@ -774,7 +781,7 @@ func (t *Tree) Rebuild(blockHash, root common.Hash) { // Start generating a new snapshot from scratch on a background thread. The // generator will run a wiper first if there's not one running right now. log.Info("Rebuilding state snapshot") - base := generateSnapshot(t.diskdb, t.triedb, t.cache, blockHash, root, wiper) + base := generateSnapshot(t.diskdb, t.triedb, t.config.CacheSize, blockHash, root, wiper) t.blockLayers = map[common.Hash]snapshot{ blockHash: base, } @@ -836,14 +843,14 @@ func (t *Tree) verify(root common.Hash, force bool) error { } defer acctIt.Release() - got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}, force) if err != nil { return common.Hash{}, err } defer storageIt.Release() - hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { return common.Hash{}, err } @@ -905,7 +912,7 @@ func (t *Tree) generating() (bool, error) { return layer.genMarker != nil, nil } -// diskRoot is a external helper function to return the disk layer root. +// DiskRoot is a external helper function to return the disk layer root. func (t *Tree) DiskRoot() common.Hash { t.lock.Lock() defer t.lock.Unlock() diff --git a/coreth/core/state/snapshot/snapshot_test.go b/coreth/core/state/snapshot/snapshot_test.go index 5bebc406..6fab030a 100644 --- a/coreth/core/state/snapshot/snapshot_test.go +++ b/coreth/core/state/snapshot/snapshot_test.go @@ -27,6 +27,7 @@ package snapshot import ( + crand "crypto/rand" "fmt" "math/big" "math/rand" @@ -34,6 +35,7 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" ) @@ -41,7 +43,7 @@ import ( // randomHash generates a random blob of data and returns it as a hash. func randomHash() common.Hash { var hash common.Hash - if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil { + if n, err := crand.Read(hash[:]); n != common.HashLength || err != nil { panic(err) } return hash @@ -54,7 +56,7 @@ func randomAccount() []byte { Balance: big.NewInt(rand.Int63()), Nonce: rand.Uint64(), Root: root[:], - CodeHash: emptyCode[:], + CodeHash: types.EmptyCodeHash[:], } data, _ := rlp.EncodeToBytes(a) return data @@ -119,7 +121,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatalf("failed to merge diff layer onto disk: %v", err) } - // Since the base layer was modified, ensure that data retrieval on the external reference fail + // Since the base layer was modified, ensure that data retrievals on the external reference fail if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } @@ -166,7 +168,7 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatalf("Failed to flatten diff layer onto disk: %v", err) } - // Since the base layer was modified, ensure that data retrieval on the external reference fails + // Since the base layer was modified, ensure that data retrieval on the external reference fail if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } @@ -185,6 +187,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { // be returned with junk data. This version of the test retains the bottom diff // layer to check the usual mode of operation where the accumulator is retained. func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { + // Un-commenting this triggers the bloom set to be deterministic. The values below + // were used to trigger the flaw described in https://github.com/ethereum/go-ethereum/issues/27254. + // bloomDestructHasherOffset, bloomAccountHasherOffset, bloomStorageHasherOffset = 14, 24, 5 + // Create an empty base layer and a snapshot tree out of it snaps := NewTestTree(rawdb.NewMemoryDatabase(), common.HexToHash("0x01"), common.HexToHash("0xff01")) // Commit three diffs on top and retrieve a reference to the bottommost @@ -212,7 +218,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { if err := snaps.Flatten(common.HexToHash("0x02")); err != nil { t.Fatal(err) } - // Since the accumulator diff layer was modified, ensure that data retrieval on the external reference fails + // Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %v (err: %v)", acc, err) } diff --git a/coreth/core/state/snapshot/utils.go b/coreth/core/state/snapshot/utils.go index 8036cfce..f9e2db5a 100644 --- a/coreth/core/state/snapshot/utils.go +++ b/coreth/core/state/snapshot/utils.go @@ -32,8 +32,8 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) diff --git a/coreth/core/state/snapshot/wipe.go b/coreth/core/state/snapshot/wipe.go index 36bf376f..37963032 100644 --- a/coreth/core/state/snapshot/wipe.go +++ b/coreth/core/state/snapshot/wipe.go @@ -31,8 +31,8 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) diff --git a/coreth/core/state/snapshot/wipe_test.go b/coreth/core/state/snapshot/wipe_test.go index e7ac20c1..74afec5f 100644 --- a/coreth/core/state/snapshot/wipe_test.go +++ b/coreth/core/state/snapshot/wipe_test.go @@ -31,8 +31,8 @@ import ( "testing" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb/memorydb" ) // Tests that given a database with random data content, all parts of a snapshot diff --git a/coreth/core/state/state_object.go b/coreth/core/state/state_object.go index 7d1ac913..02ca6700 100644 --- a/coreth/core/state/state_object.go +++ b/coreth/core/state/state_object.go @@ -36,14 +36,12 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) -var emptyCodeHash = crypto.Keccak256(nil) - type Code []byte func (c Code) String() string { @@ -56,7 +54,6 @@ func (s Storage) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } - return } @@ -65,7 +62,6 @@ func (s Storage) Copy() Storage { for key, value := range s { cpy[key] = value } - return cpy } @@ -74,7 +70,7 @@ func (s Storage) Copy() Storage { // The usage pattern is as follows: // First you need to obtain a state object. // Account values can be accessed and modified through the object. -// Finally, call CommitTrie to write the modified storage trie into a database. +// Finally, call commitTrie to write the modified storage trie into a database. type stateObject struct { address common.Address addrHash common.Hash // hash of ethereum address of the account @@ -85,13 +81,6 @@ type stateObject struct { data types.StateAccount db *StateDB - // DB error. - // State objects are used by the consensus core and VM which are - // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be returned - // by StateDB.Commit. - dbErr error - // Write caches. trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded @@ -99,10 +88,9 @@ type stateObject struct { originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block dirtyStorage Storage // Storage entries that have been modified in the current transaction execution - fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. // Cache flags. - // When an object is marked suicided it will be delete from the trie + // When an object is marked suicided it will be deleted from the trie // during the "update" phase of the state transition. dirtyCode bool // true if the code was updated suicided bool @@ -111,7 +99,7 @@ type stateObject struct { // empty returns whether the account is considered empty. func (s *stateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) && !s.data.IsMultiCoin + return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) && !s.data.IsMultiCoin } // newObject creates a state object. @@ -120,10 +108,10 @@ func newObject(db *StateDB, address common.Address, data types.StateAccount) *st data.Balance = new(big.Int) } if data.CodeHash == nil { - data.CodeHash = emptyCodeHash + data.CodeHash = types.EmptyCodeHash.Bytes() } if data.Root == (common.Hash{}) { - data.Root = emptyRoot + data.Root = types.EmptyRootHash } return &stateObject{ db: db, @@ -141,13 +129,6 @@ func (s *stateObject) EncodeRLP(w io.Writer) error { return rlp.Encode(w, &s.data) } -// setError remembers the first non-nil error it is called with. -func (s *stateObject) setError(err error) { - if s.dbErr == nil { - s.dbErr = err - } -} - func (s *stateObject) markSuicided() { s.suicided = true } @@ -163,33 +144,31 @@ func (s *stateObject) touch() { } } -func (s *stateObject) getTrie(db Database) Trie { +// getTrie returns the associated storage trie. The trie will be opened +// if it's not loaded previously. An error will be returned if trie can't +// be loaded. +func (s *stateObject) getTrie(db Database) (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first // We don't prefetch empty tries - if s.data.Root != emptyRoot && s.db.prefetcher != nil { + if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { // When the miner is creating the pending state, there is no // prefetcher s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } if s.trie == nil { - var err error - s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root) + tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root) if err != nil { - s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{}) - s.setError(fmt.Errorf("can't create storage trie: %v", err)) + return nil, err } + s.trie = tr } } - return s.trie + return s.trie, nil } // GetState retrieves a value from the account storage trie. func (s *stateObject) GetState(db Database, key common.Hash) common.Hash { - // If the fake storage is set, only lookup the state here(in the debugging mode) - if s.fakeStorage != nil { - return s.fakeStorage[key] - } // If we have a dirty value for this state entry, return it value, dirty := s.dirtyStorage[key] if dirty { @@ -201,10 +180,6 @@ func (s *stateObject) GetState(db Database, key common.Hash) common.Hash { // GetCommittedState retrieves a value from the committed account storage trie. func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash { - // If the fake storage is set, only lookup the state here(in the debugging mode) - if s.fakeStorage != nil { - return s.fakeStorage[key] - } // If we have a pending write or clean cached, return that if value, pending := s.pendingStorage[key]; pending { return value @@ -212,21 +187,21 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has if value, cached := s.originStorage[key]; cached { return value } + // If the object was destructed in *this* block (and potentially resurrected), + // the storage has been cleared out, and we should *not* consult the previous + // database about any storage values. The only possible alternatives are: + // 1) resurrect happened, and new slot values were set -- those should + // have been handles via pendingStorage above. + // 2) we don't have new values, and can deliver empty response back + if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + return common.Hash{} + } // If no live objects are available, attempt to use snapshots var ( enc []byte err error ) if s.db.snap != nil { - // If the object was destructed in *this* block (and potentially resurrected), - // the storage has been cleared out, and we should *not* consult the previous - // snapshot about any storage values. The only possible alternatives are: - // 1) resurrect happened, and new slot values were set -- those should - // have been handles via pendingStorage above. - // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.snapDestructs[s.addrHash]; destructed { - return common.Hash{} - } start := time.Now() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) if metrics.EnabledExpensive { @@ -236,12 +211,17 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { start := time.Now() - enc, err = s.getTrie(db).TryGet(key.Bytes()) + tr, err := s.getTrie(db) + if err != nil { + s.db.setError(err) + return common.Hash{} + } + enc, err = tr.GetStorage(s.address, key.Bytes()) if metrics.EnabledExpensive { s.db.StorageReads += time.Since(start) } if err != nil { - s.setError(err) + s.db.setError(err) return common.Hash{} } } @@ -249,7 +229,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has if len(enc) > 0 { _, content, _, err := rlp.Split(enc) if err != nil { - s.setError(err) + s.db.setError(err) } value.SetBytes(content) } @@ -259,11 +239,6 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has // SetState updates a value in account storage. func (s *stateObject) SetState(db Database, key, value common.Hash) { - // If the fake storage is set, put the temporary state update here. - if s.fakeStorage != nil { - s.fakeStorage[key] = value - return - } // If the new value is the same as old, don't set prev := s.GetState(db, key) if prev == value { @@ -278,24 +253,6 @@ func (s *stateObject) SetState(db Database, key, value common.Hash) { s.setState(key, value) } -// SetStorage replaces the entire state storage with the given one. -// -// After this function is called, all original state will be ignored and state -// lookup only happens in the fake state storage. -// -// Note this function should only be used for debugging purpose. -func (s *stateObject) SetStorage(storage map[common.Hash]common.Hash) { - // Allocate fake storage if it's nil. - if s.fakeStorage == nil { - s.fakeStorage = make(Storage) - } - for key, value := range storage { - s.fakeStorage[key] = value - } - // Don't bother journal since this function should only be used for - // debugging and the `fake` storage won't be committed to database. -} - func (s *stateObject) setState(key, value common.Hash) { s.dirtyStorage[key] = value } @@ -310,8 +267,8 @@ func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure } } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { - s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch) + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { + s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) @@ -319,23 +276,29 @@ func (s *stateObject) finalise(prefetch bool) { } // updateTrie writes cached storage modifications into the object's storage trie. -// It will return nil if the trie has not been loaded and no changes have been made -func (s *stateObject) updateTrie(db Database) Trie { +// It will return nil if the trie has not been loaded and no changes have been +// made. An error will be returned if the trie can't be loaded/updated correctly. +func (s *stateObject) updateTrie(db Database) (Trie, error) { // Make sure all dirty slots are finalized into the pending storage area s.finalise(false) // Don't prefetch anymore, pull directly if need be if len(s.pendingStorage) == 0 { - return s.trie + return s.trie, nil } // Track the amount of time wasted on updating the storage trie if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) } // The snapshot storage map for the object - var storage map[common.Hash][]byte + var ( + storage map[common.Hash][]byte + hasher = s.db.hasher + ) + tr, err := s.getTrie(db) + if err != nil { + s.db.setError(err) + return nil, err + } // Insert all the pending updates into the trie - tr := s.getTrie(db) - hasher := s.db.hasher - usedStorage := make([][]byte, 0, len(s.pendingStorage)) for key, value := range s.pendingStorage { // Skip noop changes, persist actual changes @@ -346,12 +309,18 @@ func (s *stateObject) updateTrie(db Database) Trie { var v []byte if (value == common.Hash{}) { - s.setError(tr.TryDelete(key[:])) + if err := tr.DeleteStorage(s.address, key[:]); err != nil { + s.db.setError(err) + return nil, err + } s.db.StorageDeleted += 1 } else { // Encoding []byte cannot fail, ok to ignore the error. v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) - s.setError(tr.TryUpdate(key[:], v)) + if err := tr.UpdateStorage(s.address, key[:], v); err != nil { + s.db.setError(err) + return nil, err + } s.db.StorageUpdated += 1 } // If state snapshotting is active, cache the data til commit @@ -373,41 +342,45 @@ func (s *stateObject) updateTrie(db Database) Trie { if len(s.pendingStorage) > 0 { s.pendingStorage = make(Storage) } - return tr + return tr, nil } -// UpdateRoot sets the trie root to the current root hash of +// UpdateRoot sets the trie root to the current root hash of. An error +// will be returned if trie root hash is not computed correctly. func (s *stateObject) updateRoot(db Database) { + tr, err := s.updateTrie(db) + if err != nil { + return + } // If nothing changed, don't bother with hashing anything - if s.updateTrie(db) == nil { + if tr == nil { return } // Track the amount of time wasted on hashing the storage trie if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) } - s.data.Root = s.trie.Hash() + s.data.Root = tr.Hash() } -// CommitTrie the storage trie of the object to db. -// This updates the trie root. -func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) { - // If nothing changed, don't bother with hashing anything - if s.updateTrie(db) == nil { - return nil, nil +// commitTrie submits the storage changes into the storage trie and re-computes +// the root. Besides, all trie changes will be collected in a nodeset and returned. +func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) { + tr, err := s.updateTrie(db) + if err != nil { + return nil, err } - if s.dbErr != nil { - return nil, s.dbErr + // If nothing changed, don't bother with committing anything + if tr == nil { + return nil, nil } // Track the amount of time wasted on committing the storage trie if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) } - root, nodes, err := s.trie.Commit(false) - if err == nil { - s.data.Root = root - } - return nodes, err + root, nodes := tr.Commit(false) + s.data.Root = root + return nodes, nil } // AddBalance adds amount to s's balance. @@ -496,7 +469,7 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject { // Attribute accessors // -// Returns the address of the contract/account +// Address returns the address of the contract/account func (s *stateObject) Address() common.Address { return s.address } @@ -506,12 +479,12 @@ func (s *stateObject) Code(db Database) []byte { if s.code != nil { return s.code } - if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { - s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) + s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } s.code = code return code @@ -524,12 +497,12 @@ func (s *stateObject) CodeSize(db Database) int { if s.code != nil { return len(s.code) } - if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return 0 } size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { - s.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) + s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) } return size } @@ -609,10 +582,3 @@ func (s *stateObject) Nonce() uint64 { defer s.dataLock.RUnlock() return s.data.Nonce } - -// Never called, but must be present to allow stateObject to be used -// as a vm.Account interface that also satisfies the vm.ContractRef -// interface. Interfaces are awesome. -func (s *stateObject) Value() *big.Int { - panic("Value on stateObject should never be called") -} diff --git a/coreth/core/state/state_test.go b/coreth/core/state/state_test.go index e7a05ef2..b9be35f9 100644 --- a/coreth/core/state/state_test.go +++ b/coreth/core/state/state_test.go @@ -27,9 +27,17 @@ package state import ( + "bytes" + "encoding/json" + "math/big" + "testing" + "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" ) type stateTest struct { @@ -39,6 +47,41 @@ type stateTest struct { func newStateTest() *stateTest { db := rawdb.NewMemoryDatabase() - sdb, _ := New(common.Hash{}, NewDatabase(db), nil) + sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil) return &stateTest{db: db, state: sdb} } + +func TestIterativeDump(t *testing.T) { + db := rawdb.NewMemoryDatabase() + sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) + s := &stateTest{db: db, state: sdb} + + // generate a few entries + obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01})) + obj1.AddBalance(big.NewInt(22)) + obj2 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01, 0x02})) + obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) + obj3 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x02})) + obj3.SetBalance(big.NewInt(44)) + obj4 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x00})) + obj4.AddBalance(big.NewInt(1337)) + + // write some of them to the trie + s.state.updateStateObject(obj1) + s.state.updateStateObject(obj2) + s.state.Commit(false, false) + + b := &bytes.Buffer{} + s.state.IterativeDump(nil, json.NewEncoder(b)) + // check that DumpToCollector contains the state objects that are in trie + got := b.String() + want := `{"root":"0x0ffca661efa3b7504ac015083994c94fd7d0d24db60354c717c936afcced762a"} +{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"} +{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"} +{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"} +{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"} +` + if got != want { + t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want) + } +} diff --git a/coreth/core/state/statedb.go b/coreth/core/state/statedb.go index 9af59926..77fa2e22 100644 --- a/coreth/core/state/statedb.go +++ b/coreth/core/state/statedb.go @@ -38,7 +38,10 @@ import ( "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/predicate" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -50,11 +53,6 @@ type revision struct { journalIndex int } -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") -) - type proofList [][]byte func (n *proofList) Put(key []byte, value []byte) error { @@ -81,21 +79,23 @@ type StateDB struct { // It will be updated when the Commit is called. originalRoot common.Hash - snap snapshot.Snapshot - snapDestructs map[common.Hash]struct{} - snapAccounts map[common.Hash][]byte - snapStorage map[common.Hash]map[common.Hash][]byte + snap snapshot.Snapshot + snapAccounts map[common.Hash][]byte + snapStorage map[common.Hash]map[common.Hash][]byte // This map holds 'live' objects, which will get modified while processing a state transition. - stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjects map[common.Address]*stateObject + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block // DB error. // State objects are used by the consensus core and VM which are // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be returned - // by StateDB.Commit. + // during a database read is memoized here and will eventually be + // returned by StateDB.Commit. Notably, this error is also shared + // by all cached state objects in case the database failure occurs + // when accessing state of accounts. dbErr error // The refund counter, also used by state transitioning. @@ -110,6 +110,12 @@ type StateDB struct { // Per-transaction access list accessList *accessList + // Ordered storage slots to be used in predicate verification as set in the tx access list. + // Only set in PrepareAccessList, and un-modified through execution. + predicateStorageSlots map[common.Address][][]byte + + // Transient storage + transientStorage transientStorage // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. @@ -156,24 +162,26 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St return nil, err } sdb := &StateDB{ - db: db, - trie: tr, - originalRoot: root, - stateObjects: make(map[common.Address]*stateObject), - stateObjectsPending: make(map[common.Address]struct{}), - stateObjectsDirty: make(map[common.Address]struct{}), - logs: make(map[common.Hash][]*types.Log), - preimages: make(map[common.Hash][]byte), - journal: newJournal(), - accessList: newAccessList(), - hasher: crypto.NewKeccakState(), + db: db, + trie: tr, + originalRoot: root, + stateObjects: make(map[common.Address]*stateObject), + stateObjectsPending: make(map[common.Address]struct{}), + stateObjectsDirty: make(map[common.Address]struct{}), + stateObjectsDestruct: make(map[common.Address]struct{}), + logs: make(map[common.Hash][]*types.Log), + preimages: make(map[common.Hash][]byte), + journal: newJournal(), + predicateStorageSlots: make(map[common.Address][][]byte), + accessList: newAccessList(), + transientStorage: newTransientStorage(), + hasher: crypto.NewKeccakState(), } if snap != nil { if snap.Root() != root { return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex()) } sdb.snap = snap - sdb.snapDestructs = make(map[common.Hash]struct{}) sdb.snapAccounts = make(map[common.Hash][]byte) sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) } @@ -183,13 +191,13 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. -func (s *StateDB) StartPrefetcher(namespace string) { +func (s *StateDB) StartPrefetcher(namespace string, maxConcurrency int) { if s.prefetcher != nil { s.prefetcher.close() s.prefetcher = nil } if s.snap != nil { - s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) + s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace, maxConcurrency) } } @@ -209,11 +217,21 @@ func (s *StateDB) setError(err error) { } } +// Error returns the memorized database failure occurred earlier. func (s *StateDB) Error() error { return s.dbErr } -func (s *StateDB) AddLog(log *types.Log) { +// AddLog adds a log with the specified parameters to the statedb +// Note: blockNumber is a required argument because StateDB does not +// know the current block number. +func (s *StateDB) AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) { + log := &types.Log{ + Address: addr, + Topics: topics, + Data: data, + BlockNumber: blockNumber, + } s.journal.append(addLogChange{txhash: s.thash}) log.TxHash = s.thash @@ -223,9 +241,12 @@ func (s *StateDB) AddLog(log *types.Log) { s.logSize++ } -func (s *StateDB) GetLogs(hash common.Hash, blockHash common.Hash) []*types.Log { +// GetLogs returns the logs matching the specified transaction hash, and annotates +// them with the given blockNumber and blockHash. +func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash) []*types.Log { logs := s.logs[hash] for _, l := range logs { + l.BlockNumber = blockNumber l.BlockHash = blockHash } return logs @@ -239,6 +260,20 @@ func (s *StateDB) Logs() []*types.Log { return logs } +// GetLogData returns the underlying topics and data from each log included in the StateDB +// Test helper function. +func (s *StateDB) GetLogData() ([][]common.Hash, [][]byte) { + var logData [][]byte + var topics [][]common.Hash + for _, lgs := range s.logs { + for _, log := range lgs { + topics = append(topics, log.Topics) + logData = append(logData, common.CopyBytes(log.Data)) + } + } + return topics, logData +} + // AddPreimage records a SHA3 preimage seen by the VM. func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { if _, ok := s.preimages[hash]; !ok { @@ -365,13 +400,19 @@ func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { // GetStorageProof returns the Merkle proof for given storage slot. func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { - var proof proofList - trie := s.StorageTrie(a) + trie, err := s.StorageTrie(a) + if err != nil { + return nil, err + } if trie == nil { - return proof, errors.New("storage trie for requested address does not exist") + return nil, errors.New("storage trie for requested address does not exist") } - err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return proof, err + var proof proofList + err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + if err != nil { + return nil, err + } + return proof, nil } // GetCommittedState retrieves a value from the given account's committed storage trie. @@ -398,15 +439,18 @@ func (s *StateDB) Database() Database { return s.db } -// StorageTrie returns the storage trie of an account. -// The return value is a copy and is nil for non-existent accounts. -func (s *StateDB) StorageTrie(addr common.Address) Trie { +// StorageTrie returns the storage trie of an account. The return value is a copy +// and is nil for non-existent accounts. An error will be returned if storage trie +// is existent but can't be loaded correctly. +func (s *StateDB) StorageTrie(addr common.Address) (Trie, error) { stateObject := s.getStateObject(addr) if stateObject == nil { - return nil + return nil, nil } cpy := stateObject.deepCopy(s) - cpy.updateTrie(s.db) + if _, err := cpy.updateTrie(s.db); err != nil { + return nil, err + } return cpy.getTrie(s.db) } @@ -493,9 +537,15 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { // SetStorage replaces the entire storage for the specified account with given // storage. This function should only be used for debugging. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { + // SetStorage needs to wipe existing storage. We achieve this by pretending + // that the account self-destructed earlier in this block, by flagging + // it in stateObjectsDestruct. The effect of doing so is that storage lookups + // will not hit disk, since it is assumed that the disk-data is belonging + // to a previous incarnation of the object. + s.stateObjectsDestruct[addr] = struct{}{} stateObject := s.GetOrNewStateObject(addr) - if stateObject != nil { - stateObject.SetStorage(storage) + for k, v := range storage { + stateObject.SetState(s.db, k, v) } } @@ -520,6 +570,33 @@ func (s *StateDB) Suicide(addr common.Address) bool { return true } +// SetTransientState sets transient storage for a given account. It +// adds the change to the journal so that it can be rolled back +// to its previous value if there is a revert. +func (s *StateDB) SetTransientState(addr common.Address, key, value common.Hash) { + prev := s.GetTransientState(addr, key) + if prev == value { + return + } + s.journal.append(transientStorageChange{ + account: &addr, + key: key, + prevalue: prev, + }) + s.setTransientState(addr, key, value) +} + +// setTransientState is a lower level setter for transient storage. It +// is called during a revert to prevent modifications to the journal. +func (s *StateDB) setTransientState(addr common.Address, key, value common.Hash) { + s.transientStorage.Set(addr, key, value) +} + +// GetTransientState gets transient storage for a given account. +func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common.Hash { + return s.transientStorage.Get(addr, key) +} + // // Setting, updating & deleting state object methods. // @@ -532,7 +609,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) { } // Encode the account and update the account trie addr := obj.Address() - if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil { + if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } @@ -553,7 +630,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) { } // Delete the account from the trie addr := obj.Address() - if err := s.trie.TryDeleteAccount(addr[:]); err != nil { + if err := s.trie.DeleteAccount(addr); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } } @@ -597,10 +674,10 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { Root: common.BytesToHash(acc.Root), } if len(data.CodeHash) == 0 { - data.CodeHash = emptyCodeHash + data.CodeHash = types.EmptyCodeHash.Bytes() } if data.Root == (common.Hash{}) { - data.Root = emptyRoot + data.Root = types.EmptyRootHash } } } @@ -608,7 +685,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { if data == nil { start := time.Now() var err error - data, err = s.trie.TryGetAccount(addr.Bytes()) + data, err = s.trie.GetAccount(addr) if metrics.EnabledExpensive { s.AccountReads += time.Since(start) } @@ -645,10 +722,10 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! var prevdestruct bool - if s.snap != nil && prev != nil { - _, prevdestruct = s.snapDestructs[prev.addrHash] + if prev != nil { + _, prevdestruct = s.stateObjectsDestruct[prev.address] if !prevdestruct { - s.snapDestructs[prev.addrHash] = struct{}{} + s.stateObjectsDestruct[prev.address] = struct{}{} } } newobj = newObject(s, addr, types.StateAccount{}) @@ -686,7 +763,11 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common if so == nil { return nil } - it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil)) + tr, err := so.getTrie(db.db) + if err != nil { + return err + } + it := trie.NewIterator(tr.NodeIterator(nil)) for it.Next() { key := common.BytesToHash(db.trie.GetKey(it.Key)) @@ -710,23 +791,37 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common return nil } +// copyPredicateStorageSlots creates a deep copy of the provided predicateStorageSlots map. +func copyPredicateStorageSlots(predicateStorageSlots map[common.Address][][]byte) map[common.Address][][]byte { + res := make(map[common.Address][][]byte, len(predicateStorageSlots)) + for address, predicates := range predicateStorageSlots { + copiedPredicates := make([][]byte, len(predicates)) + for i, predicateBytes := range predicates { + copiedPredicates[i] = common.CopyBytes(predicateBytes) + } + res[address] = copiedPredicates + } + return res +} + // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { // Copy all the basic fields, initialize the memory ones state := &StateDB{ - db: s.db, - trie: s.db.CopyTrie(s.trie), - originalRoot: s.originalRoot, - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), - stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - refund: s.refund, - logs: make(map[common.Hash][]*types.Log, len(s.logs)), - logSize: s.logSize, - preimages: make(map[common.Hash][]byte, len(s.preimages)), - journal: newJournal(), - hasher: crypto.NewKeccakState(), + db: s.db, + trie: s.db.CopyTrie(s.trie), + originalRoot: s.originalRoot, + stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), + stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), + stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + stateObjectsDestruct: make(map[common.Address]struct{}, len(s.stateObjectsDestruct)), + refund: s.refund, + logs: make(map[common.Hash][]*types.Log, len(s.logs)), + logSize: s.logSize, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: newJournal(), + hasher: crypto.NewKeccakState(), } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -736,7 +831,7 @@ func (s *StateDB) Copy() *StateDB { // nil if object, exist := s.stateObjects[addr]; exist { // Even though the original object is dirty, we are not copying the journal, - // so we need to make sure that anyside effect the journal would have caused + // so we need to make sure that any side-effect the journal would have caused // during a commit (or similar op) is already applied to the copy. state.stateObjects[addr] = object.deepCopy(state) @@ -744,9 +839,10 @@ func (s *StateDB) Copy() *StateDB { state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits } } - // Above, we don't copy the actual journal. This means that if the copy is copied, the - // loop above will be a no-op, since the copy's journal is empty. - // Thus, here we iterate over stateObjects, to enable copies of copies + // Above, we don't copy the actual journal. This means that if the copy + // is copied, the loop above will be a no-op, since the copy's journal + // is empty. Thus, here we iterate over stateObjects, to enable copies + // of copies. for addr := range s.stateObjectsPending { if _, exist := state.stateObjects[addr]; !exist { state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) @@ -759,6 +855,10 @@ func (s *StateDB) Copy() *StateDB { } state.stateObjectsDirty[addr] = struct{}{} } + // Deep copy the destruction flag. + for addr := range s.stateObjectsDestruct { + state.stateObjectsDestruct[addr] = struct{}{} + } for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) for i, l := range logs { @@ -770,12 +870,15 @@ func (s *StateDB) Copy() *StateDB { for hash, preimage := range s.preimages { state.preimages[hash] = preimage } - // Do we need to copy the access list? In practice: No. At the start of a - // transaction, the access list is empty. In practice, we only ever copy state - // _between_ transactions/blocks, never in the middle of a transaction. - // However, it doesn't cost us much to copy an empty list, so we do it anyway - // to not blow up if we ever decide copy it in the middle of a transaction + // Do we need to copy the access list and transient storage? + // In practice: No. At the start of a transaction, these two lists are empty. + // In practice, we only ever copy state _between_ transactions/blocks, never + // in the middle of a transaction. However, it doesn't cost us much to copy + // empty lists, so we do it anyway to not blow up if we ever decide copy them + // in the middle of a transaction. state.accessList = s.accessList.Copy() + state.transientStorage = s.transientStorage.Copy() + state.predicateStorageSlots = copyPredicateStorageSlots(s.predicateStorageSlots) // If there's a prefetcher running, make an inactive copy of it that can // only access data but does not actively preload (since the user will not @@ -785,22 +888,19 @@ func (s *StateDB) Copy() *StateDB { } if s.snap != nil { // In order for the miner to be able to use and make additions - // to the snapshot tree, we need to copy that aswell. + // to the snapshot tree, we need to copy that as well. // Otherwise, any block mined by ourselves will cause gaps in the tree, // and force the miner to operate trie-backed only state.snap = s.snap + // deep copy needed - state.snapDestructs = make(map[common.Hash]struct{}) - for k, v := range s.snapDestructs { - state.snapDestructs[k] = v - } - state.snapAccounts = make(map[common.Hash][]byte) + state.snapAccounts = make(map[common.Hash][]byte, len(s.snapAccounts)) for k, v := range s.snapAccounts { state.snapAccounts[k] = v } - state.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + state.snapStorage = make(map[common.Hash]map[common.Hash][]byte, len(s.snapStorage)) for k, v := range s.snapStorage { - temp := make(map[common.Hash][]byte) + temp := make(map[common.Hash][]byte, len(v)) for kk, vv := range v { temp[kk] = vv } @@ -858,14 +958,17 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { if obj.suicided || (deleteEmptyObjects && obj.empty()) { obj.deleted = true + // We need to maintain account deletions explicitly (will remain + // set indefinitely). + s.stateObjectsDestruct[obj.address] = struct{}{} + // If state snapshotting is active, also mark the destruction there. // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then // resurrect an account; but the snapshotter needs both events. if s.snap != nil { - s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) - delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect) - delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect) + delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) } } else { obj.finalise(true) // Prefetch slots in the background @@ -879,7 +982,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, addressesToPrefetch) + s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -948,9 +1051,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { return s.trie.Hash() } -// Prepare sets the current transaction hash and index which are -// used when the EVM emits new state logs. -func (s *StateDB) Prepare(thash common.Hash, ti int) { +// SetTxContext sets the current transaction hash and index which are +// used when the EVM emits new state logs. It should be invoked before +// transaction execution. +func (s *StateDB) SetTxContext(thash common.Hash, ti int) { s.thash = thash s.txIndex = ti } @@ -976,6 +1080,7 @@ func (s *StateDB) CommitWithSnap(deleteEmptyObjects bool, snaps *snapshot.Tree, // Commit writes the state to the underlying in-memory trie database. func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { + // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } @@ -984,11 +1089,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas // Commit objects to the trie, measuring the elapsed time var ( - accountTrieNodes int - storageTrieNodes int - nodes = trie.NewMergedNodeSet() + accountTrieNodesUpdated int + accountTrieNodesDeleted int + storageTrieNodesUpdated int + storageTrieNodesDeleted int + nodes = trienode.NewMergedNodeSet() + codeWriter = s.db.DiskDB().NewBatch() ) - codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object @@ -997,18 +1104,26 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas obj.dirtyCode = false } // Write any storage changes in the state object to its storage trie - set, err := obj.CommitTrie(s.db) + set, err := obj.commitTrie(s.db) if err != nil { return common.Hash{}, err } - // Merge the dirty nodes of storage trie into global set + // Merge the dirty nodes of storage trie into global set. if set != nil { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - storageTrieNodes += set.Len() + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted } } + // If the contract is destructed, the storage is still left in the + // database as dangling data. Theoretically it's should be wiped from + // database as well, but in hash-based-scheme it's extremely hard to + // determine that if the trie nodes are also referenced by other storage, + // and in path-based-scheme some technical challenges are still unsolved. + // Although it won't affect the correctness but please fix it TODO(rjl493456442). } if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) @@ -1023,16 +1138,13 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas if metrics.EnabledExpensive { start = time.Now() } - root, set, err := s.trie.Commit(true) - if err != nil { - return common.Hash{}, err - } + root, set := s.trie.Commit(true) // Merge the dirty nodes of account trie into global set if set != nil { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - accountTrieNodes = set.Len() + accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() } if metrics.EnabledExpensive { s.AccountCommits += time.Since(start) @@ -1041,67 +1153,97 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieCommittedMeter.Mark(int64(accountTrieNodes)) - storageTriesCommittedMeter.Mark(int64(storageTrieNodes)) + accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) + accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) + storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) + storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) s.AccountUpdated, s.AccountDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0 } // If snapshotting is enabled, update the snapshot tree with this new version if snaps != nil { + start := time.Now() if s.snap == nil { log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash)) } - if metrics.EnabledExpensive { - defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) - } - if err := snaps.Update(blockHash, root, parentHash, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil { + if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.snapAccounts, s.snapStorage); err != nil { log.Warn("Failed to update snapshot tree", "to", root, "err", err) } - s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil + if metrics.EnabledExpensive { + s.SnapshotCommits += time.Since(start) + } + s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil } - if referenceRoot { - if err := s.db.TrieDB().UpdateAndReferenceRoot(nodes, root); err != nil { - return common.Hash{}, err + if len(s.stateObjectsDestruct) > 0 { + s.stateObjectsDestruct = make(map[common.Address]struct{}) + } + if root == (common.Hash{}) { + root = types.EmptyRootHash + } + origin := s.originalRoot + if origin == (common.Hash{}) { + origin = types.EmptyRootHash + } + if root != origin { + start := time.Now() + if referenceRoot { + if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, nodes); err != nil { + return common.Hash{}, err + } + } else { + if err := s.db.TrieDB().Update(root, origin, nodes); err != nil { + return common.Hash{}, err + } } - } else { - if err := s.db.TrieDB().Update(nodes); err != nil { - return common.Hash{}, err + s.originalRoot = root + if metrics.EnabledExpensive { + s.TrieDBCommits += time.Since(start) } } - s.originalRoot = root - if metrics.EnabledExpensive { - s.TrieDBCommits += time.Since(start) - } - return root, err + return root, nil } -// PrepareAccessList handles the preparatory steps for executing a state transition with -// regards to both EIP-2929 and EIP-2930: +// Prepare handles the preparatory steps for executing a state transition with. +// This method must be invoked before state transition. // +// Berlin fork (aka ApricotPhase2): // - Add sender to access list (2929) // - Add destination to access list (2929) // - Add precompiles to access list (2929) // - Add the contents of the optional tx access list (2930) // -// This method should only be called if Berlin/ApricotPhase2/2929+2930 is applicable at the current number. -func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - // Clear out any leftover from previous executions - s.accessList = newAccessList() - - s.AddAddressToAccessList(sender) - if dst != nil { - s.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create - } - for _, addr := range precompiles { - s.AddAddressToAccessList(addr) - } - for _, el := range list { - s.AddAddressToAccessList(el.Address) - for _, key := range el.StorageKeys { - s.AddSlotToAccessList(el.Address, key) +// Potential EIPs: +// - Reset access list (Berlin/ApricotPhase2) +// - Add coinbase to access list (EIP-3651/Durango) +// - Reset transient storage (EIP-1153) +func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + if rules.IsApricotPhase2 { + // Clear out any leftover from previous executions + al := newAccessList() + s.accessList = al + + al.AddAddress(sender) + if dst != nil { + al.AddAddress(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + al.AddAddress(addr) + } + for _, el := range list { + al.AddAddress(el.Address) + for _, key := range el.StorageKeys { + al.AddSlot(el.Address, key) + } + } + if rules.IsDurango { // EIP-3651: warm coinbase + al.AddAddress(coinbase) } + + s.predicateStorageSlots = predicate.PreparePredicateStorageSlots(rules, list) } + // Reset transient storage at the beginning of transaction execution + s.transientStorage = newTransientStorage() } // AddAddressToAccessList adds the given address to the access list @@ -1138,3 +1280,48 @@ func (s *StateDB) AddressInAccessList(addr common.Address) bool { func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) { return s.accessList.Contains(addr, slot) } + +// GetTxHash returns the current tx hash on the StateDB set by SetTxContext. +func (s *StateDB) GetTxHash() common.Hash { + return s.thash +} + +// GetPredicateStorageSlots returns the storage slots associated with the address, index pair. +// A list of access tuples can be included within transaction types post EIP-2930. The address +// is declared directly on the access tuple and the index is the i'th occurrence of an access +// tuple with the specified address. +// +// Ex. AccessList[[AddrA, Predicate1], [AddrB, Predicate2], [AddrA, Predicate3]] +// In this case, the caller could retrieve predicates 1-3 with the following calls: +// GetPredicateStorageSlots(AddrA, 0) -> Predicate1 +// GetPredicateStorageSlots(AddrB, 0) -> Predicate2 +// GetPredicateStorageSlots(AddrA, 1) -> Predicate3 +func (s *StateDB) GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) { + predicates, exists := s.predicateStorageSlots[address] + if !exists { + return nil, false + } + if index >= len(predicates) { + return nil, false + } + return predicates[index], true +} + +// convertAccountSet converts a provided account set from address keyed to hash keyed. +func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} { + ret := make(map[common.Hash]struct{}, len(set)) + for addr := range set { + obj, exist := s.stateObjects[addr] + if !exist { + ret[crypto.Keccak256Hash(addr[:])] = struct{}{} + } else { + ret[obj.addrHash] = struct{}{} + } + } + return ret +} + +// SetPredicateStorageSlots sets the predicate storage slots for the given address +func (s *StateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) { + s.predicateStorageSlots[address] = predicates +} diff --git a/coreth/core/state/statedb_test.go b/coreth/core/state/statedb_test.go index c2156b2e..130a7df7 100644 --- a/coreth/core/state/statedb_test.go +++ b/coreth/core/state/statedb_test.go @@ -51,7 +51,7 @@ import ( func TestUpdateLeaks(t *testing.T) { // Create an empty state database db := rawdb.NewMemoryDatabase() - state, _ := New(common.Hash{}, NewDatabase(db), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(db), nil) // Update it with some accounts for i := byte(0); i < 255; i++ { @@ -67,7 +67,7 @@ func TestUpdateLeaks(t *testing.T) { } root := state.IntermediateRoot(false) - if err := state.Database().TrieDB().Commit(root, false, nil); err != nil { + if err := state.Database().TrieDB().Commit(root, false); err != nil { t.Errorf("can not commit trie %v to persistent database", root.Hex()) } @@ -85,8 +85,8 @@ func TestIntermediateLeaks(t *testing.T) { // Create two state databases, one transitioning to the final state, the other final from the beginning transDb := rawdb.NewMemoryDatabase() finalDb := rawdb.NewMemoryDatabase() - transState, _ := New(common.Hash{}, NewDatabase(transDb), nil) - finalState, _ := New(common.Hash{}, NewDatabase(finalDb), nil) + transState, _ := New(types.EmptyRootHash, NewDatabase(transDb), nil) + finalState, _ := New(types.EmptyRootHash, NewDatabase(finalDb), nil) modify := func(state *StateDB, addr common.Address, i, tweak byte) { state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak))) @@ -118,7 +118,7 @@ func TestIntermediateLeaks(t *testing.T) { if err != nil { t.Fatalf("failed to commit transition state: %v", err) } - if err = transState.Database().TrieDB().Commit(transRoot, false, nil); err != nil { + if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) } @@ -126,7 +126,7 @@ func TestIntermediateLeaks(t *testing.T) { if err != nil { t.Fatalf("failed to commit final state: %v", err) } - if err = finalState.Database().TrieDB().Commit(finalRoot, false, nil); err != nil { + if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex()) } @@ -161,7 +161,7 @@ func TestIntermediateLeaks(t *testing.T) { // https://github.com/ethereum/go-ethereum/pull/15549. func TestCopy(t *testing.T) { // Create a random state test to copy and modify "independently" - orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + orig, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) for i := byte(0); i < 255; i++ { obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i})) @@ -327,7 +327,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { fn: func(a testAction, s *StateDB) { data := make([]byte, 2) binary.BigEndian.PutUint16(data, uint16(a.args[0])) - s.AddLog(&types.Log{Address: addr, Data: data}) + s.AddLog(addr, nil, data, 0) }, args: make([]int64, 1), }, @@ -354,6 +354,16 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { }, args: make([]int64, 1), }, + { + name: "SetTransientState", + fn: func(a testAction, s *StateDB) { + var key, val common.Hash + binary.BigEndian.PutUint16(key[:], uint16(a.args[0])) + binary.BigEndian.PutUint16(val[:], uint16(a.args[1])) + s.SetTransientState(addr, key, val) + }, + args: make([]int64, 2), + }, } action := actions[r.Intn(len(actions))] var nameargs []string @@ -411,7 +421,7 @@ func (test *snapshotTest) String() string { func (test *snapshotTest) run() bool { // Run all actions and create snapshots. var ( - state, _ = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ = New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) snapshotRevs = make([]int, len(test.snapshots)) sindex = 0 ) @@ -425,7 +435,7 @@ func (test *snapshotTest) run() bool { // Revert all snapshots in reverse order. Each revert must yield a state // that is equivalent to fresh state with all actions up the snapshot applied. for sindex--; sindex >= 0; sindex-- { - checkstate, _ := New(common.Hash{}, state.Database(), nil) + checkstate, _ := New(types.EmptyRootHash, state.Database(), nil) for _, action := range test.actions[:test.snapshots[sindex]] { action.fn(action, checkstate) } @@ -475,9 +485,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { return fmt.Errorf("got GetRefund() == %d, want GetRefund() == %d", state.GetRefund(), checkstate.GetRefund()) } - if !reflect.DeepEqual(state.GetLogs(common.Hash{}, common.Hash{}), checkstate.GetLogs(common.Hash{}, common.Hash{})) { + if !reflect.DeepEqual(state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) { return fmt.Errorf("got GetLogs(common.Hash{}) == %v, want GetLogs(common.Hash{}) == %v", - state.GetLogs(common.Hash{}, common.Hash{}), checkstate.GetLogs(common.Hash{}, common.Hash{})) + state.GetLogs(common.Hash{}, 0, common.Hash{}), checkstate.GetLogs(common.Hash{}, 0, common.Hash{})) } return nil } @@ -503,7 +513,7 @@ func TestTouchDelete(t *testing.T) { // TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy. // See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512 func TestCopyOfCopy(t *testing.T) { - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) addr := common.HexToAddress("aaaa") state.SetBalance(addr, big.NewInt(42)) @@ -520,7 +530,7 @@ func TestCopyOfCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCommitCopy(t *testing.T) { - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -592,7 +602,7 @@ func TestCopyCommitCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCopyCommitCopy(t *testing.T) { - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -682,7 +692,7 @@ func TestCopyCopyCommitCopy(t *testing.T) { // first, but the journal wiped the entire state object on create-revert. func TestDeleteCreateRevert(t *testing.T) { // Create an initial state with a single contract - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) addr := common.BytesToAddress([]byte("so")) state.SetBalance(addr, big.NewInt(1)) @@ -715,7 +725,7 @@ func TestMissingTrieNodes(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) var root common.Hash - state, _ := New(common.Hash{}, db, nil) + state, _ := New(types.EmptyRootHash, db, nil) addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, big.NewInt(1)) @@ -764,7 +774,7 @@ func TestStateDBAccessList(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) - state, _ := New(common.Hash{}, db, nil) + state, _ := New(types.EmptyRootHash, db, nil) state.accessList = newAccessList() verifyAddrs := func(astrings ...string) { @@ -1041,11 +1051,17 @@ func TestGenerateMultiCoinAccounts(t *testing.T) { } triedb := database.TrieDB() - if err := triedb.Commit(root, true, nil); err != nil { + if err := triedb.Commit(root, true); err != nil { t.Fatal(err) } // Build snapshot from scratch - snaps, err := snapshot.New(diskdb, triedb, 16, common.Hash{}, root, false, true, false) + snapConfig := snapshot.Config{ + CacheSize: 16, + AsyncBuild: false, + NoBuild: false, + SkipVerify: true, + } + snaps, err := snapshot.New(snapConfig, diskdb, triedb, common.Hash{}, root) if err != nil { t.Error("Unexpected error while rebuilding snapshot:", err) } @@ -1080,7 +1096,7 @@ func TestFlushOrderDataLoss(t *testing.T) { var ( memdb = rawdb.NewMemoryDatabase() statedb = NewDatabase(memdb) - state, _ = New(common.Hash{}, statedb, nil) + state, _ = New(types.EmptyRootHash, statedb, nil) ) for a := byte(0); a < 10; a++ { state.CreateAccount(common.Address{a}) @@ -1096,7 +1112,7 @@ func TestFlushOrderDataLoss(t *testing.T) { if err := statedb.TrieDB().Cap(1024); err != nil { t.Fatalf("failed to cap trie dirty cache: %v", err) } - if err := statedb.TrieDB().Commit(root, false, nil); err != nil { + if err := statedb.TrieDB().Commit(root, false); err != nil { t.Fatalf("failed to commit state trie: %v", err) } // Reopen the state trie from flushed disk and verify it @@ -1112,3 +1128,37 @@ func TestFlushOrderDataLoss(t *testing.T) { } } } + +func TestStateDBTransientStorage(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(types.EmptyRootHash, db, nil) + + key := common.Hash{0x01} + value := common.Hash{0x02} + addr := common.Address{} + + state.SetTransientState(addr, key, value) + if exp, got := 1, state.journal.length(); exp != got { + t.Fatalf("journal length mismatch: have %d, want %d", got, exp) + } + // the retrieved value should equal what was set + if got := state.GetTransientState(addr, key); got != value { + t.Fatalf("transient storage mismatch: have %x, want %x", got, value) + } + + // revert the transient state being set and then check that the + // value is now the empty hash + state.journal.revert(state, 0) + if got, exp := state.GetTransientState(addr, key), (common.Hash{}); exp != got { + t.Fatalf("transient storage mismatch: have %x, want %x", got, exp) + } + + // set transient state and then copy the statedb and ensure that + // the transient state is copied + state.SetTransientState(addr, key, value) + cpy := state.Copy() + if got := cpy.GetTransientState(addr, key); got != value { + t.Fatalf("transient storage mismatch: have %x, want %x", got, value) + } +} diff --git a/coreth/core/state/sync_test.go b/coreth/core/state/sync_test.go new file mode 100644 index 00000000..71f858f4 --- /dev/null +++ b/coreth/core/state/sync_test.go @@ -0,0 +1,73 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "math/big" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" +) + +// testAccount is the data associated with an account used by the state tests. +type testAccount struct { + address common.Address + balance *big.Int + nonce uint64 + code []byte +} + +// makeTestState create a sample test state to test node-wise reconstruction. +func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { + // Create an empty state + db := rawdb.NewMemoryDatabase() + sdb := NewDatabase(db) + state, _ := New(types.EmptyRootHash, sdb, nil) + + // Fill it with some arbitrary data + var accounts []*testAccount + for i := byte(0); i < 96; i++ { + obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i})) + acc := &testAccount{address: common.BytesToAddress([]byte{i})} + + obj.AddBalance(big.NewInt(int64(11 * i))) + acc.balance = big.NewInt(int64(11 * i)) + + obj.SetNonce(uint64(42 * i)) + acc.nonce = uint64(42 * i) + + if i%3 == 0 { + obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i}) + acc.code = []byte{i, i, i, i, i} + } + if i%5 == 0 { + for j := byte(0); j < 5; j++ { + hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) + obj.SetState(sdb, hash, hash) + } + } + state.updateStateObject(obj) + accounts = append(accounts, acc) + } + root, _ := state.Commit(false, false) + + // Return the generated state + return db, sdb, root, accounts +} diff --git a/coreth/core/state/test_statedb.go b/coreth/core/state/test_statedb.go new file mode 100644 index 00000000..c34fdec8 --- /dev/null +++ b/coreth/core/state/test_statedb.go @@ -0,0 +1,20 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func NewTestStateDB(t testing.TB) contract.StateDB { + db := rawdb.NewMemoryDatabase() + stateDB, err := New(common.Hash{}, NewDatabase(db), nil) + require.NoError(t, err) + return stateDB +} diff --git a/coreth/core/state/transient_storage.go b/coreth/core/state/transient_storage.go new file mode 100644 index 00000000..b5ee4f46 --- /dev/null +++ b/coreth/core/state/transient_storage.go @@ -0,0 +1,65 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" +) + +// transientStorage is a representation of EIP-1153 "Transient Storage". +type transientStorage map[common.Address]Storage + +// newTransientStorage creates a new instance of a transientStorage. +func newTransientStorage() transientStorage { + return make(transientStorage) +} + +// Set sets the transient-storage `value` for `key` at the given `addr`. +func (t transientStorage) Set(addr common.Address, key, value common.Hash) { + if _, ok := t[addr]; !ok { + t[addr] = make(Storage) + } + t[addr][key] = value +} + +// Get gets the transient storage for `key` at the given `addr`. +func (t transientStorage) Get(addr common.Address, key common.Hash) common.Hash { + val, ok := t[addr] + if !ok { + return common.Hash{} + } + return val[key] +} + +// Copy does a deep copy of the transientStorage +func (t transientStorage) Copy() transientStorage { + storage := make(transientStorage) + for key, value := range t { + storage[key] = value.Copy() + } + return storage +} diff --git a/coreth/core/state/trie_prefetcher.go b/coreth/core/state/trie_prefetcher.go index c058214a..e6dabeb0 100644 --- a/coreth/core/state/trie_prefetcher.go +++ b/coreth/core/state/trie_prefetcher.go @@ -28,16 +28,16 @@ package state import ( "sync" + "time" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var ( - // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. - triePrefetchMetricsPrefix = "trie/prefetch/" -) +// triePrefetchMetricsPrefix is the prefix under which to publish the metrics. +const triePrefetchMetricsPrefix = "trie/prefetch/" // triePrefetcher is an active prefetcher, which receives accounts or storage // items and does trie-loading of them. The goal is to get as much useful content @@ -50,63 +50,91 @@ type triePrefetcher struct { fetches map[string]Trie // Partially or fully fetcher tries fetchers map[string]*subfetcher // Subfetchers for each trie - deliveryCopyMissMeter metrics.Meter - deliveryRequestMissMeter metrics.Meter - deliveryWaitMissMeter metrics.Meter + maxConcurrency int + workers *utils.BoundedWorkers + + subfetcherWorkersMeter metrics.Meter + subfetcherWaitTimer metrics.Counter + subfetcherCopiesMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter - storageLoadMeter metrics.Meter - storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter - storageWasteMeter metrics.Meter + + storageFetchersMeter metrics.Meter + storageLoadMeter metrics.Meter + storageLargestLoadMeter metrics.Meter + storageDupMeter metrics.Meter + storageSkipMeter metrics.Meter + storageWasteMeter metrics.Meter } -func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { +func newTriePrefetcher(db Database, root common.Hash, namespace string, maxConcurrency int) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - p := &triePrefetcher{ + return &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map - deliveryCopyMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/copy", nil), - deliveryRequestMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/request", nil), - deliveryWaitMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/wait", nil), + maxConcurrency: maxConcurrency, + workers: utils.NewBoundedWorkers(maxConcurrency), // Scale up as needed to [maxConcurrency] + + subfetcherWorkersMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/workers", nil), + subfetcherWaitTimer: metrics.GetOrRegisterCounter(prefix+"/subfetcher/wait", nil), + subfetcherCopiesMeter: metrics.GetOrRegisterMeter(prefix+"/subfetcher/copies", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), - storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), - storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), - storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), + + storageFetchersMeter: metrics.GetOrRegisterMeter(prefix+"/storage/fetchers", nil), + storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), + storageLargestLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/lload", nil), + storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), + storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), + storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } - return p } // close iterates over all the subfetchers, aborts any that were left spinning // and reports the stats to the metrics subsystem. func (p *triePrefetcher) close() { + // If the prefetcher is an inactive one, bail out + if p.fetches != nil { + return + } + + // Collect stats from all fetchers + var ( + storageFetchers int64 + largestLoad int64 + ) for _, fetcher := range p.fetchers { - fetcher.abort() // safe to do multiple times + fetcher.abort() // safe to call multiple times (should be a no-op on happy path) if metrics.Enabled { + p.subfetcherCopiesMeter.Mark(int64(fetcher.copies())) + if fetcher.root == p.root { p.accountLoadMeter.Mark(int64(len(fetcher.seen))) p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) + p.accountSkipMeter.Mark(int64(fetcher.skips())) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) } p.accountWasteMeter.Mark(int64(len(fetcher.seen))) } else { - p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + storageFetchers++ + oseen := int64(len(fetcher.seen)) + if oseen > largestLoad { + largestLoad = oseen + } + p.storageLoadMeter.Mark(oseen) p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) + p.storageSkipMeter.Mark(int64(fetcher.skips())) for _, key := range fetcher.used { delete(fetcher.seen, string(key)) @@ -115,6 +143,20 @@ func (p *triePrefetcher) close() { } } } + if metrics.Enabled { + p.storageFetchersMeter.Mark(storageFetchers) + p.storageLargestLoadMeter.Mark(largestLoad) + } + + // Stop all workers once fetchers are aborted (otherwise + // could stop while waiting) + // + // Record number of workers that were spawned during this run + workersUsed := int64(p.workers.Wait()) + if metrics.Enabled { + p.subfetcherWorkersMeter.Mark(workersUsed) + } + // Clear out all fetchers (will crash on a second call, deliberate) p.fetchers = nil } @@ -127,20 +169,23 @@ func (p *triePrefetcher) copy() *triePrefetcher { copy := &triePrefetcher{ db: p.db, root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetches map + fetches: make(map[string]Trie), // Active prefetchers use the fetchers map - deliveryCopyMissMeter: p.deliveryCopyMissMeter, - deliveryRequestMissMeter: p.deliveryRequestMissMeter, - deliveryWaitMissMeter: p.deliveryWaitMissMeter, + subfetcherWorkersMeter: p.subfetcherWorkersMeter, + subfetcherWaitTimer: p.subfetcherWaitTimer, + subfetcherCopiesMeter: p.subfetcherCopiesMeter, accountLoadMeter: p.accountLoadMeter, accountDupMeter: p.accountDupMeter, accountSkipMeter: p.accountSkipMeter, accountWasteMeter: p.accountWasteMeter, - storageLoadMeter: p.storageLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, + + storageFetchersMeter: p.storageFetchersMeter, + storageLoadMeter: p.storageLoadMeter, + storageLargestLoadMeter: p.storageLargestLoadMeter, + storageDupMeter: p.storageDupMeter, + storageSkipMeter: p.storageSkipMeter, + storageWasteMeter: p.storageWasteMeter, } // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { @@ -160,16 +205,17 @@ func (p *triePrefetcher) copy() *triePrefetcher { } // prefetch schedules a batch of trie items to prefetch. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) { +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { // If the prefetcher is an inactive one, bail out if p.fetches != nil { return } + // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { - fetcher = newSubfetcher(p.db, owner, root) + fetcher = newSubfetcher(p, owner, root, addr) p.fetchers[id] = fetcher } fetcher.schedule(keys) @@ -183,24 +229,27 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { if p.fetches != nil { trie := p.fetches[id] if trie == nil { - p.deliveryCopyMissMeter.Mark(1) return nil } return p.db.CopyTrie(trie) } + // Otherwise the prefetcher is active, bail if no trie was prefetched for this root fetcher := p.fetchers[id] if fetcher == nil { - p.deliveryRequestMissMeter.Mark(1) return nil } - // Interrupt the prefetcher if it's by any chance still running and return - // a copy of any pre-loaded trie. - fetcher.abort() // safe to do multiple times + // Wait for the fetcher to finish and shutdown orchestrator, if it exists + start := time.Now() + fetcher.wait() + if metrics.Enabled { + p.subfetcherWaitTimer.Inc(time.Since(start).Milliseconds()) + } + + // Return a copy of one of the prefetched tries trie := fetcher.peek() if trie == nil { - p.deliveryWaitMissMeter.Mark(1) return nil } return trie @@ -224,18 +273,15 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { // main prefetcher is paused and either all requested items are processed or if // the trie being worked on is retrieved from the prefetcher. type subfetcher struct { - db Database // Database to load trie nodes through - owner common.Hash // Owner of the trie, usually account hash - root common.Hash // Root hash of the trie to prefetch - trie Trie // Trie being populated with nodes + p *triePrefetcher - tasks [][]byte // Items queued up for retrieval - lock sync.Mutex // Lock protecting the task queue + db Database // Database to load trie nodes through + state common.Hash // Root hash of the state to prefetch + owner common.Hash // Owner of the trie, usually account hash + root common.Hash // Root hash of the trie to prefetch + addr common.Address // Address of the account that the trie belongs to - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal interruption - copy chan chan Trie // Channel to request a copy of the current trie + to *trieOrchestrator // Orchestrate concurrent fetching of a single trie seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -244,137 +290,346 @@ type subfetcher struct { // newSubfetcher creates a goroutine to prefetch state items belonging to a // particular root hash. -func newSubfetcher(db Database, owner common.Hash, root common.Hash) *subfetcher { +func newSubfetcher(p *triePrefetcher, owner common.Hash, root common.Hash, addr common.Address) *subfetcher { sf := &subfetcher{ - db: db, + p: p, + db: p.db, + state: p.root, owner: owner, root: root, - wake: make(chan struct{}, 1), - stop: make(chan struct{}), - term: make(chan struct{}), - copy: make(chan chan Trie), + addr: addr, seen: make(map[string]struct{}), } - go sf.loop() + sf.to = newTrieOrchestrator(sf) + if sf.to != nil { + go sf.to.processTasks() + } + // We return [sf] here to ensure we don't try to re-create if + // we aren't able to setup a [newTrieOrchestrator] the first time. return sf } // schedule adds a batch of trie keys to the queue to prefetch. +// This should never block, so an array is used instead of a channel. +// +// This is not thread-safe. func (sf *subfetcher) schedule(keys [][]byte) { // Append the tasks to the current queue - sf.lock.Lock() - sf.tasks = append(sf.tasks, keys...) - sf.lock.Unlock() + tasks := make([][]byte, 0, len(keys)) + for _, key := range keys { + // Check if keys already seen + sk := string(key) + if _, ok := sf.seen[sk]; ok { + sf.dups++ + continue + } + sf.seen[sk] = struct{}{} + tasks = append(tasks, key) + } - // Notify the prefetcher, it's fine if it's already terminated - select { - case sf.wake <- struct{}{}: - default: + // After counting keys, exit if they can't be prefetched + if sf.to == nil { + return } + + // Add tasks to queue for prefetching + sf.to.enqueueTasks(tasks) } // peek tries to retrieve a deep copy of the fetcher's trie in whatever form it // is currently. func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) - select { - case sf.copy <- ch: - // Subfetcher still alive, return copy from it - return <-ch + if sf.to == nil { + return nil + } + return sf.to.copyBase() +} - case <-sf.term: - // Subfetcher already terminated, return a copy directly - if sf.trie == nil { - return nil - } - return sf.db.CopyTrie(sf.trie) +// wait must only be called if [triePrefetcher] has not been closed. If this happens, +// workers will not finish. +func (sf *subfetcher) wait() { + if sf.to == nil { + // Unable to open trie + return } + sf.to.wait() } -// abort interrupts the subfetcher immediately. It is safe to call abort multiple -// times but it is not thread safe. func (sf *subfetcher) abort() { - select { - case <-sf.stop: - default: - close(sf.stop) + if sf.to == nil { + // Unable to open trie + return + } + sf.to.abort() +} + +func (sf *subfetcher) skips() int { + if sf.to == nil { + // Unable to open trie + return 0 + } + return sf.to.skipCount() +} + +func (sf *subfetcher) copies() int { + if sf.to == nil { + // Unable to open trie + return 0 } - <-sf.term + return sf.to.copies } -// loop waits for new tasks to be scheduled and keeps loading them until it runs -// out of tasks or its underlying trie is retrieved for committing. -func (sf *subfetcher) loop() { - // No matter how the loop stops, signal anyone waiting that it's terminated - defer close(sf.term) +// trieOrchestrator is not thread-safe. +type trieOrchestrator struct { + sf *subfetcher + + // base is an unmodified Trie we keep for + // creating copies for each worker goroutine. + // + // We care more about quick copies than good copies + // because most (if not all) of the nodes that will be populated + // in the copy will come from the underlying triedb cache. Ones + // that don't come from this cache probably had to be fetched + // from disk anyways. + base Trie + baseLock sync.Mutex + + tasksAllowed bool + skips int // number of tasks skipped + pendingTasks [][]byte + taskLock sync.Mutex + + processingTasks sync.WaitGroup + + wake chan struct{} + stop chan struct{} + stopOnce sync.Once + loopTerm chan struct{} + + copies int + copyChan chan Trie + copySpawner chan struct{} +} +func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator { // Start by opening the trie and stop processing if it fails + var ( + base Trie + err error + ) if sf.owner == (common.Hash{}) { - trie, err := sf.db.OpenTrie(sf.root) + base, err = sf.db.OpenTrie(sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return + return nil } - sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.owner, sf.root) + base, err = sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) - return + return nil } - sf.trie = trie } - // Trie opened successfully, keep prefetching items + // Instantiate trieOrchestrator + to := &trieOrchestrator{ + sf: sf, + base: base, + + tasksAllowed: true, + wake: make(chan struct{}, 1), + stop: make(chan struct{}), + loopTerm: make(chan struct{}), + + copyChan: make(chan Trie, sf.p.maxConcurrency), + copySpawner: make(chan struct{}, sf.p.maxConcurrency), + } + + // Create initial trie copy + to.copies++ + to.copySpawner <- struct{}{} + to.copyChan <- to.copyBase() + return to +} + +func (to *trieOrchestrator) copyBase() Trie { + to.baseLock.Lock() + defer to.baseLock.Unlock() + + return to.sf.db.CopyTrie(to.base) +} + +func (to *trieOrchestrator) skipCount() int { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + return to.skips +} + +func (to *trieOrchestrator) enqueueTasks(tasks [][]byte) { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + if len(tasks) == 0 { + return + } + + // Add tasks to [pendingTasks] + if !to.tasksAllowed { + to.skips += len(tasks) + return + } + to.processingTasks.Add(len(tasks)) + to.pendingTasks = append(to.pendingTasks, tasks...) + + // Wake up processor + select { + case to.wake <- struct{}{}: + default: + } +} + +func (to *trieOrchestrator) handleStop(remaining int) { + to.taskLock.Lock() + to.skips += remaining + to.taskLock.Unlock() + to.processingTasks.Add(-remaining) +} + +func (to *trieOrchestrator) processTasks() { + defer close(to.loopTerm) + for { + // Determine if we should process or exit select { - case <-sf.wake: - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock - sf.lock.Lock() - tasks := sf.tasks - sf.tasks = nil - sf.lock.Unlock() - - // Prefetch any tasks until the loop is interrupted - for i, task := range tasks { + case <-to.wake: + case <-to.stop: + return + } + + // Get current tasks + to.taskLock.Lock() + tasks := to.pendingTasks + to.pendingTasks = nil + to.taskLock.Unlock() + + // Enqueue more work as soon as trie copies are available + lt := len(tasks) + for i := 0; i < lt; i++ { + // Try to stop as soon as possible, if channel is closed + remaining := lt - i + select { + case <-to.stop: + to.handleStop(remaining) + return + default: + } + + // Try to create to get an active copy first (select is non-deterministic, + // so we may end up creating a new copy when we don't need to) + var t Trie + select { + case t = <-to.copyChan: + default: + // Wait for an available copy or create one, if we weren't + // able to get a previously created copy select { - case <-sf.stop: - // If termination is requested, add any leftover back and return - sf.lock.Lock() - sf.tasks = append(sf.tasks, tasks[i:]...) - sf.lock.Unlock() + case <-to.stop: + to.handleStop(remaining) return - - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - - default: - // No termination request yet, prefetch the next entry - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - } else { - var err error - if len(task) == len(common.Address{}) { - _, err = sf.trie.TryGetAccount(task) - } else { - _, err = sf.trie.TryGet(task) - } - if err != nil { - log.Error("Trie prefetcher failed fetching", "root", sf.root, "err", err) - } - sf.seen[string(task)] = struct{}{} - } + case t = <-to.copyChan: + case to.copySpawner <- struct{}{}: + to.copies++ + t = to.copyBase() } } - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) + // Enqueue work, unless stopped. + fTask := tasks[i] + f := func() { + // Perform task + var err error + if len(fTask) == common.AddressLength { + _, err = t.GetAccount(common.BytesToAddress(fTask)) + } else { + _, err = t.GetStorage(to.sf.addr, fTask) + } + if err != nil { + log.Error("Trie prefetcher failed fetching", "root", to.sf.root, "err", err) + } + to.processingTasks.Done() + + // Return copy when we are done with it, so someone else can use it + // + // channel is buffered and will not block + to.copyChan <- t + } - case <-sf.stop: - // Termination is requested, abort and leave remaining tasks - return + // Enqueue task for processing (may spawn new goroutine + // if not at [maxConcurrency]) + // + // If workers are stopped before calling [Execute], this function may + // panic. + to.sf.p.workers.Execute(f) } } } + +func (to *trieOrchestrator) stopAcceptingTasks() { + to.taskLock.Lock() + defer to.taskLock.Unlock() + + if !to.tasksAllowed { + return + } + to.tasksAllowed = false + + // We don't clear [to.pendingTasks] here because + // it will be faster to prefetch them even though we + // are still waiting. +} + +// wait stops accepting new tasks and waits for ongoing tasks to complete. If +// wait is called, it is not necessary to call [abort]. +// +// It is safe to call wait multiple times. +func (to *trieOrchestrator) wait() { + // Prevent more tasks from being enqueued + to.stopAcceptingTasks() + + // Wait for processing tasks to complete + to.processingTasks.Wait() + + // Stop orchestrator loop + to.stopOnce.Do(func() { + close(to.stop) + }) + <-to.loopTerm +} + +// abort stops any ongoing tasks and shuts down the orchestrator loop. If abort +// is called, it is not necessary to call [wait]. +// +// It is safe to call abort multiple times. +func (to *trieOrchestrator) abort() { + // Prevent more tasks from being enqueued + to.stopAcceptingTasks() + + // Stop orchestrator loop + to.stopOnce.Do(func() { + close(to.stop) + }) + <-to.loopTerm + + // Capture any dangling pending tasks (processTasks + // may exit before enqueing all pendingTasks) + to.taskLock.Lock() + pendingCount := len(to.pendingTasks) + to.skips += pendingCount + to.pendingTasks = nil + to.taskLock.Unlock() + to.processingTasks.Add(-pendingCount) + + // Wait for processing tasks to complete + to.processingTasks.Wait() +} diff --git a/coreth/core/state/trie_prefetcher_test.go b/coreth/core/state/trie_prefetcher_test.go index a1783b78..285a7b16 100644 --- a/coreth/core/state/trie_prefetcher_test.go +++ b/coreth/core/state/trie_prefetcher_test.go @@ -32,11 +32,14 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) +const maxConcurrency = 4 + func filledStateDB() *StateDB { - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -55,21 +58,21 @@ func filledStateDB() *StateDB { func TestCopyAndClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) time.Sleep(1 * time.Second) a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) b := prefetcher.trie(common.Hash{}, db.originalRoot) cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) + cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) c := cpy.trie(common.Hash{}, db.originalRoot) prefetcher.close() cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) d := cpy2.trie(common.Hash{}, db.originalRoot) cpy.close() cpy2.close() @@ -80,9 +83,9 @@ func TestCopyAndClose(t *testing.T) { func TestUseAfterClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) a := prefetcher.trie(common.Hash{}, db.originalRoot) prefetcher.close() b := prefetcher.trie(common.Hash{}, db.originalRoot) @@ -96,9 +99,9 @@ func TestUseAfterClose(t *testing.T) { func TestCopyClose(t *testing.T) { db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") + prefetcher := newTriePrefetcher(db.db, db.originalRoot, "", maxConcurrency) skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()}) + prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) cpy := prefetcher.copy() a := prefetcher.trie(common.Hash{}, db.originalRoot) b := cpy.trie(common.Hash{}, db.originalRoot) diff --git a/coreth/core/state_connector.go b/coreth/core/state_connector.go index bb1d949f..eb14a7c5 100644 --- a/coreth/core/state_connector.go +++ b/coreth/core/state_connector.go @@ -24,16 +24,16 @@ const ( ) var ( - flareActivationTime = big.NewInt(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) - costwoActivationTime = big.NewInt(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) - stagingActivationTime = big.NewInt(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) - localFlareActivationTime = big.NewInt(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) + flareActivationTime = uint64(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) + costwoActivationTime = uint64(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) + stagingActivationTime = uint64(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) + localFlareActivationTime = uint64(time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC).Unix()) - songbirdActivationTime = big.NewInt(time.Date(2022, time.March, 28, 14, 0, 0, 0, time.UTC).Unix()) - songbirdOct22ForkTime = big.NewInt(time.Date(2022, time.October, 19, 15, 0, 0, 0, time.UTC).Unix()) + songbirdActivationTime = uint64(time.Date(2022, time.March, 28, 14, 0, 0, 0, time.UTC).Unix()) + songbirdOct22ForkTime = uint64(time.Date(2022, time.October, 19, 15, 0, 0, 0, time.UTC).Unix()) - costonActivationTime = big.NewInt(time.Date(2022, time.February, 25, 17, 0, 0, 0, time.UTC).Unix()) - costonOct22ForkTime = big.NewInt(time.Date(2022, time.October, 6, 15, 0, 0, 0, time.UTC).Unix()) + costonActivationTime = uint64(time.Date(2022, time.February, 25, 17, 0, 0, 0, time.UTC).Unix()) + costonOct22ForkTime = uint64(time.Date(2022, time.October, 6, 15, 0, 0, 0, time.UTC).Unix()) ) type AttestationVotes struct { @@ -45,105 +45,59 @@ type AttestationVotes struct { } var ( - stateConnectorActivationVariants = utils.NewChainValue(func(*big.Int, common.Address) bool { return false }). + stateConnectorActivationVariants = utils.NewChainValue(func(uint64, common.Address) bool { return false }). AddValue(params.FlareChainID, GetStateConnectorIsActivatedAndCalledFlare). AddValue(params.SongbirdChainID, GetStateConnectorIsActivatedAndCalledSongbird). AddValue(params.CostwoChainID, GetStateConnectorIsActivatedAndCalledCostwo). AddValue(params.CostonChainID, GetStateConnectorIsActivatedAndCalledCoston). - AddValue(params.StagingChainID, GetStateConnectorIsActivatedAndCalledStaging). AddValue(params.LocalFlareChainID, GetStateConnectorIsActivatedAndCalledLocalFlare). AddValue(params.LocalChainID, GetStateConnectorIsActivatedAndCalledLocal) ) -func GetStateConnectorIsActivatedAndCalled(chainID *big.Int, blockTime *big.Int, to common.Address) bool { - return stateConnectorActivationVariants.GetValue(chainID)(blockTime, to) - - // Move variants to functions for better readability - // - // switch { - // case chainID.Cmp(params.FlareChainID) == 0: - // switch { - // case blockTime.Cmp(flareActivationTime) >= 0: - // return to == common.HexToAddress("0x1000000000000000000000000000000000000001") - // } - // case chainID.Cmp(params.SongbirdChainID) == 0: - // switch { - // case blockTime.Cmp(songbirdOct22ForkTime) > 0: - // return to == common.HexToAddress("0x0c13aDA1C7143Cf0a0795FFaB93eEBb6FAD6e4e3") - // case blockTime.Cmp(songbirdActivationTime) > 0: - // return to == common.HexToAddress("0x3A1b3220527aBA427d1e13e4b4c48c31460B4d91") - // } - // case chainID.Cmp(params.CostwoChainID) == 0: - // switch { - // case blockTime.Cmp(costwoActivationTime) >= 0: - // default: - // return to == common.HexToAddress("0x1000000000000000000000000000000000000001") - // } - // case chainID.Cmp(params.CostonChainID) == 0: - // switch { - // case blockTime.Cmp(costonOct22ForkTime) > 0: - // return to == common.HexToAddress("0x0c13aDA1C7143Cf0a0795FFaB93eEBb6FAD6e4e3") - // case blockTime.Cmp(costonActivationTime) > 0: - // return to == common.HexToAddress("0x947c76694491d3fD67a73688003c4d36C8780A97") - // default: - // return false - // } - // case chainID.Cmp(params.StagingChainID) == 0: - // switch { - // case blockTime.Cmp(stagingActivationTime) >= 0: - // return to == common.HexToAddress("0x1000000000000000000000000000000000000001") - // } - // case chainID.Cmp(params.LocalFlareChainID) == 0: - // switch { - // case blockTime.Cmp(localFlareActivationTime) >= 0: - // return to == common.HexToAddress("0x1000000000000000000000000000000000000001") - // } - // case chainID.Cmp(params.LocalChainID) == 0: - // return to == common.HexToAddress("0x1000000000000000000000000000000000000001") - // } - // return false +func GetStateConnectorIsActivatedAndCalled(isDurango bool, chainID *big.Int, blockTime uint64, to common.Address) bool { + return !isDurango && stateConnectorActivationVariants.GetValue(chainID)(blockTime, to) } -func GetStateConnectorIsActivatedAndCalledFlare(blockTime *big.Int, to common.Address) bool { - return blockTime.Cmp(flareActivationTime) >= 0 && +func GetStateConnectorIsActivatedAndCalledFlare(blockTime uint64, to common.Address) bool { + return blockTime >= flareActivationTime && to == common.HexToAddress("0x1000000000000000000000000000000000000001") } -func GetStateConnectorIsActivatedAndCalledCostwo(blockTime *big.Int, to common.Address) bool { - return blockTime.Cmp(costwoActivationTime) >= 0 && +func GetStateConnectorIsActivatedAndCalledCostwo(blockTime uint64, to common.Address) bool { + return blockTime >= costwoActivationTime && to == common.HexToAddress("0x1000000000000000000000000000000000000001") } -func GetStateConnectorIsActivatedAndCalledStaging(blockTime *big.Int, to common.Address) bool { - return blockTime.Cmp(stagingActivationTime) >= 0 && +func GetStateConnectorIsActivatedAndCalledStaging(blockTime uint64, to common.Address) bool { + return blockTime >= stagingActivationTime && to == common.HexToAddress("0x1000000000000000000000000000000000000001") } -func GetStateConnectorIsActivatedAndCalledLocalFlare(blockTime *big.Int, to common.Address) bool { - return blockTime.Cmp(localFlareActivationTime) >= 0 && +func GetStateConnectorIsActivatedAndCalledLocalFlare(blockTime uint64, to common.Address) bool { + return blockTime >= localFlareActivationTime && to == common.HexToAddress("0x1000000000000000000000000000000000000001") } -func GetStateConnectorIsActivatedAndCalledLocal(_ *big.Int, to common.Address) bool { +func GetStateConnectorIsActivatedAndCalledLocal(_ uint64, to common.Address) bool { return to == common.HexToAddress("0x1000000000000000000000000000000000000001") } -func GetStateConnectorIsActivatedAndCalledSongbird(blockTime *big.Int, to common.Address) bool { +func GetStateConnectorIsActivatedAndCalledSongbird(blockTime uint64, to common.Address) bool { switch { - case blockTime.Cmp(songbirdOct22ForkTime) > 0: + case blockTime > songbirdOct22ForkTime: return to == common.HexToAddress("0x0c13aDA1C7143Cf0a0795FFaB93eEBb6FAD6e4e3") - case blockTime.Cmp(songbirdActivationTime) > 0: + case blockTime > songbirdActivationTime: return to == common.HexToAddress("0x3A1b3220527aBA427d1e13e4b4c48c31460B4d91") default: return false } } -func GetStateConnectorIsActivatedAndCalledCoston(blockTime *big.Int, to common.Address) bool { +func GetStateConnectorIsActivatedAndCalledCoston(blockTime uint64, to common.Address) bool { switch { - case blockTime.Cmp(costonOct22ForkTime) > 0: + case blockTime > costonOct22ForkTime: return to == common.HexToAddress("0x0c13aDA1C7143Cf0a0795FFaB93eEBb6FAD6e4e3") - case blockTime.Cmp(costonActivationTime) > 0: + case blockTime > costonActivationTime: return to == common.HexToAddress("0x947c76694491d3fD67a73688003c4d36C8780A97") default: return false @@ -154,20 +108,20 @@ func GetStateConnectorIsActivatedAndCalledCoston(blockTime *big.Int, to common.A // address public constant SIGNAL_COINBASE = address(0x00000000000000000000000000000000000DEaD1); // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/6b6e5480c3cf769b5a650b961992b4f082761d76/contracts/genesis/implementation/StateConnector.sol#L17 -func GetStateConnectorCoinbaseSignalAddr(chainID *big.Int, blockTime *big.Int) common.Address { +func GetStateConnectorCoinbaseSignalAddr(chainID *big.Int, blockTime uint64) common.Address { switch { - case chainID.Cmp(params.FlareChainID) == 0 || chainID.Cmp(params.CostwoChainID) == 0 || chainID.Cmp(params.StagingChainID) == 0 || chainID.Cmp(params.LocalFlareChainID) == 0: + case chainID.Cmp(params.FlareChainID) == 0 || chainID.Cmp(params.CostwoChainID) == 0 || chainID.Cmp(params.LocalFlareChainID) == 0: return common.HexToAddress("0x00000000000000000000000000000000000DEaD1") case chainID.Cmp(params.SongbirdChainID) == 0: switch { - case blockTime.Cmp(songbirdOct22ForkTime) > 0: + case blockTime > songbirdOct22ForkTime: return common.HexToAddress("0x00000000000000000000000000000000000DEaD1") default: return common.HexToAddress("0x000000000000000000000000000000000000dEaD") } case chainID.Cmp(params.CostonChainID) == 0: switch { - case blockTime.Cmp(costonOct22ForkTime) > 0: + case blockTime > costonOct22ForkTime: return common.HexToAddress("0x00000000000000000000000000000000000DEaD1") default: return common.HexToAddress("0x000000000000000000000000000000000000dEaD") @@ -180,7 +134,7 @@ func GetStateConnectorCoinbaseSignalAddr(chainID *big.Int, blockTime *big.Int) c // function submitAttestation(uint256 _bufferNumber, bytes32 _commitHash, bytes32 _merkleRoot, bytes32 _randomNumber) external returns (bool _isInitialBufferSlot) // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/6b6e5480c3cf769b5a650b961992b4f082761d76/contracts/genesis/implementation/StateConnector.sol#L98 -func SubmitAttestationSelector(chainID *big.Int, blockTime *big.Int) []byte { +func SubmitAttestationSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0xcf, 0xd1, 0xfd, 0xad} @@ -190,7 +144,7 @@ func SubmitAttestationSelector(chainID *big.Int, blockTime *big.Int) []byte { // function getAttestation(uint256 _bufferNumber) external view returns (bytes32 _merkleRoot) // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/6b6e5480c3cf769b5a650b961992b4f082761d76/contracts/genesis/implementation/StateConnector.sol#L123 -func GetAttestationSelector(chainID *big.Int, blockTime *big.Int) []byte { +func GetAttestationSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0x29, 0xbe, 0x4d, 0xb2} @@ -200,7 +154,7 @@ func GetAttestationSelector(chainID *big.Int, blockTime *big.Int) []byte { // function finaliseRound(uint256 _bufferNumber, bytes32 _merkleRoot) external // https://gitlab.com/flarenetwork/flare-smart-contracts/-/blob/6b6e5480c3cf769b5a650b961992b4f082761d76/contracts/genesis/implementation/StateConnector.sol#L137 -func FinaliseRoundSelector(chainID *big.Int, blockTime *big.Int) []byte { +func FinaliseRoundSelector(chainID *big.Int, blockTime uint64) []byte { switch { default: return []byte{0xea, 0xeb, 0xf6, 0xd3} @@ -208,10 +162,10 @@ func FinaliseRoundSelector(chainID *big.Int, blockTime *big.Int) []byte { } // The default attestation providers for the state connector will be drawn from the top weighted/performing FTSOs. -func GetDefaultAttestors(chainID *big.Int, blockTime *big.Int) []common.Address { +func GetDefaultAttestors(chainID *big.Int, blockTime uint64) []common.Address { switch { case chainID.Cmp(params.FlareChainID) == 0: - if blockTime.Cmp(submitterContractActivationTimeFlare) > 0 { + if blockTime > submitterContractActivationTimeFlare { return []common.Address{ common.HexToAddress("0x4E07E1F3DB3Dc9BAd56Cc829747cc0148234329F"), common.HexToAddress("0xB264Fad6Fdc65767998f93501945aB8F9108809d"), @@ -238,7 +192,7 @@ func GetDefaultAttestors(chainID *big.Int, blockTime *big.Int) []common.Address } case chainID.Cmp(params.SongbirdChainID) == 0: switch { - case blockTime.Cmp(submitterContractActivationTimeSongbird) > 0: + case blockTime > submitterContractActivationTimeSongbird: return []common.Address{ common.HexToAddress("0xcE397b9a395ace2e328030699bDDf4E2F049A05B"), common.HexToAddress("0xeDBb013BBC314124a9f842c1887e34cfeB03B052"), @@ -250,7 +204,7 @@ func GetDefaultAttestors(chainID *big.Int, blockTime *big.Int) []common.Address common.HexToAddress("0x35f4F0Bb73a6040F24927e1735B089d7769F7674"), common.HexToAddress("0x3B583C919fD4C863F3A17d11929346C687FfB7c3"), } - case blockTime.Cmp(songbirdOct22ForkTime) > 0: + case blockTime > songbirdOct22ForkTime: return []common.Address{ common.HexToAddress("0x2D3e7e4b19bDc920fd9C57BD3072A31F5a59FeC8"), common.HexToAddress("0x442DD539Fe78D43A1a9358FF3460CfE63e2bC9CC"), @@ -281,7 +235,7 @@ func GetDefaultAttestors(chainID *big.Int, blockTime *big.Int) []common.Address } case chainID.Cmp(params.CostonChainID) == 0: switch { - case blockTime.Cmp(costonOct22ForkTime) > 0: + case blockTime > costonOct22ForkTime: return []common.Address{ common.HexToAddress("0x30e4b4542b4aAf615838B113f14c46dE1469212e"), common.HexToAddress("0x3519E14183252794aaA52aA824f34482ef44cE1d"), @@ -298,18 +252,6 @@ func GetDefaultAttestors(chainID *big.Int, blockTime *big.Int) []common.Address common.HexToAddress("0x3a6e101103ec3d9267d08f484a6b70e1440a8255"), } } - case chainID.Cmp(params.StagingChainID) == 0: - return []common.Address{ - common.HexToAddress("0x0988Cf4828F4e4eD0cE7c07467E70e19095Ee152"), - common.HexToAddress("0x6BC7DCa62010D418eB72CCdc58561e00C5868Ef1"), - common.HexToAddress("0xE34Bb361536610a9DCcEa5292262e36AfF65c06c"), - common.HexToAddress("0x8A3D627D86A81F5D21683F4963565C63DB5e1309"), - common.HexToAddress("0x2D3e7e4b19bDc920fd9C57BD3072A31F5a59FeC8"), - common.HexToAddress("0x6455dC38fdF739b6fE021b30C7D9672C1c6DEb5c"), - common.HexToAddress("0x49893c5Dfc035F4eE4E46faC014f6D4bC80F7f92"), - common.HexToAddress("0x08e8b2Af4874e920de27723576A13d66008Af523"), - common.HexToAddress("0x5D2f75392DdDa69a2818021dd6a64937904c8352"), - } case chainID.Cmp(params.LocalFlareChainID) == 0: return []common.Address{ common.HexToAddress("0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC"), // Private key: 56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027 @@ -372,7 +314,7 @@ func CountAttestations(attestationVotes AttestationVotes, numAttestors int, hash return attestationVotes } -func (st *StateTransition) FinalisePreviousRound(chainID *big.Int, timestamp *big.Int, currentRoundNumber []byte) error { +func (st *StateTransition) FinalisePreviousRound(chainID *big.Int, timestamp uint64, currentRoundNumber []byte) error { getAttestationSelector := GetAttestationSelector(chainID, timestamp) instructions := append(getAttestationSelector[:], currentRoundNumber[:]...) defaultAttestors := GetDefaultAttestors(chainID, timestamp) diff --git a/coreth/core/state_manager.go b/coreth/core/state_manager.go index 02521aa5..256cc759 100644 --- a/coreth/core/state_manager.go +++ b/coreth/core/state_manager.go @@ -32,8 +32,8 @@ import ( "time" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) func init() { @@ -65,8 +65,8 @@ type TrieWriter interface { } type TrieDB interface { - Dereference(root common.Hash) - Commit(root common.Hash, report bool, callback func(common.Hash)) error + Dereference(root common.Hash) error + Commit(root common.Hash, report bool) error Size() (common.StorageSize, common.StorageSize) Cap(limit common.StorageSize) error } @@ -103,12 +103,11 @@ func (np *noPruningTrieWriter) InsertTrie(block *types.Block) error { func (np *noPruningTrieWriter) AcceptTrie(block *types.Block) error { // We don't need to call [Dereference] on the block root at the end of this // function because it is removed from the [TrieDB.Dirties] map in [Commit]. - return np.TrieDB.Commit(block.Root(), false, nil) + return np.TrieDB.Commit(block.Root(), false) } func (np *noPruningTrieWriter) RejectTrie(block *types.Block) error { - np.TrieDB.Dereference(block.Root()) - return nil + return np.TrieDB.Dereference(block.Root()) } func (np *noPruningTrieWriter) Shutdown() error { return nil } @@ -146,12 +145,14 @@ func (cm *cappedMemoryTrieWriter) AcceptTrie(block *types.Block) error { // // Note: It is safe to dereference roots that have been committed to disk // (they are no-ops). - cm.tipBuffer.Insert(root) + if err := cm.tipBuffer.Insert(root); err != nil { + return err + } // Commit this root if we have reached the [commitInterval]. modCommitInterval := block.NumberU64() % cm.commitInterval if modCommitInterval == 0 { - if err := cm.TrieDB.Commit(root, true, nil); err != nil { + if err := cm.TrieDB.Commit(root, true); err != nil { return fmt.Errorf("failed to commit trie for block %s: %w", block.Hash().Hex(), err) } return nil @@ -199,5 +200,5 @@ func (cm *cappedMemoryTrieWriter) Shutdown() error { // Attempt to commit last item added to [dereferenceQueue] on shutdown to avoid // re-processing the state on the next startup. - return cm.TrieDB.Commit(last, true, nil) + return cm.TrieDB.Commit(last, true) } diff --git a/coreth/core/state_manager_test.go b/coreth/core/state_manager_test.go index c25ae0c2..6afb779d 100644 --- a/coreth/core/state_manager_test.go +++ b/coreth/core/state_manager_test.go @@ -18,10 +18,11 @@ type MockTrieDB struct { LastCommit common.Hash } -func (t *MockTrieDB) Dereference(root common.Hash) { +func (t *MockTrieDB) Dereference(root common.Hash) error { t.LastDereference = root + return nil } -func (t *MockTrieDB) Commit(root common.Hash, report bool, callback func(common.Hash)) error { +func (t *MockTrieDB) Commit(root common.Hash, report bool) error { t.LastCommit = root return nil } @@ -44,7 +45,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) { Root: common.BigToHash(bigI), Number: bigI, }, - nil, nil, nil, nil, nil, true, + nil, nil, nil, nil, ) assert.NoError(w.InsertTrie(block)) @@ -83,7 +84,7 @@ func TestNoPruningTrieWriter(t *testing.T) { Root: common.BigToHash(bigI), Number: bigI, }, - nil, nil, nil, nil, nil, true, + nil, nil, nil, nil, ) assert.NoError(w.InsertTrie(block)) diff --git a/coreth/core/state_prefetcher.go b/coreth/core/state_prefetcher.go deleted file mode 100644 index a1e0cde5..00000000 --- a/coreth/core/state_prefetcher.go +++ /dev/null @@ -1,105 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "math/big" - "sync/atomic" - - "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" -) - -// statePrefetcher is a basic Prefetcher, which blindly executes a block on top -// of an arbitrary state with the goal of prefetching potentially useful state -// data from disk before the main block processor start executing. -type statePrefetcher struct { - config *params.ChainConfig // Chain configuration options - bc *BlockChain // Canonical block chain - engine consensus.Engine // Consensus engine used for block rewards -} - -// newStatePrefetcher initialises a new statePrefetcher. -func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *statePrefetcher { - return &statePrefetcher{ - config: config, - bc: bc, - engine: engine, - } -} - -// Prefetch processes the state changes according to the Ethereum rules by running -// the transaction messages using the statedb, but any changes are discarded. The -// only goal is to pre-cache transaction signatures and state trie nodes. -func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) { - var ( - header = block.Header() - gaspool = new(GasPool).AddGas(block.GasLimit()) - blockContext = NewEVMBlockContext(header, p.bc, nil) - evm = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) - signer = types.MakeSigner(p.config, header.Number, new(big.Int).SetUint64(header.Time)) - ) - // Iterate over and process the individual transactions - byzantium := p.config.IsByzantium(block.Number()) - for i, tx := range block.Transactions() { - // If block precaching was interrupted, abort - if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { - return - } - // Convert the transaction into an executable message and pre-cache its sender - msg, err := tx.AsMessage(signer, header.BaseFee) - if err != nil { - return // Also invalid block, bail out - } - statedb.Prepare(tx.Hash(), i) - if err := precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil { - return // Ugh, something went horribly wrong, bail out - } - // If we're pre-byzantium, pre-load trie nodes for the intermediate root - if !byzantium { - statedb.IntermediateRoot(true) - } - } - // If were post-byzantium, pre-load trie nodes for the final root hash - if byzantium { - statedb.IntermediateRoot(true) - } -} - -// precacheTransaction attempts to apply a transaction to the given state database -// and uses the input parameters for its environment. The goal is not to execute -// the transaction successfully, rather to warm up touched data slots. -func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) error { - // Update the evm with the new transaction context. - evm.Reset(NewEVMTxContext(msg), statedb) - // Add addresses to access list if applicable - _, err := ApplyMessage(evm, msg, gaspool) - return err -} diff --git a/coreth/core/state_processor.go b/coreth/core/state_processor.go index 68d118fd..77fc4cc8 100644 --- a/coreth/core/state_processor.go +++ b/coreth/core/state_processor.go @@ -35,8 +35,11 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/modules" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -74,22 +77,28 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state blockNumber = block.Number() allLogs []*types.Log gp = new(GasPool).AddGas(block.GasLimit()) - timestamp = new(big.Int).SetUint64(header.Time) ) - // Configure any stateful precompiles that should go into effect during this block. - p.config.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time), block, statedb) + // Configure any upgrades that should go into effect during this block. + err := ApplyUpgrades(p.config, &parent.Time, block, statedb) + if err != nil { + log.Error("failed to configure precompiles processing block", "hash", block.Hash(), "number", block.NumberU64(), "timestamp", block.Time(), "err", err) + return nil, nil, 0, err + } - blockContext := NewEVMBlockContext(header, p.bc, nil) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) + var ( + context = NewEVMBlockContext(header, p.bc, nil) + vmenv = vm.NewEVM(context, vm.TxContext{}, statedb, p.config, cfg) + signer = types.MakeSigner(p.config, header.Number, header.Time) + ) // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number, timestamp), header.BaseFee) + msg, err := TransactionToMessage(tx, signer, header.BaseFee) if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } - statedb.Prepare(tx.Hash(), i) - receipt, err := applyTransaction(msg, p.config, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) + statedb.SetTxContext(tx.Hash(), i) + receipt, err := applyTransaction(msg, p.config, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) if err != nil { return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } @@ -104,7 +113,7 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state return receipts, allLogs, *usedGas, nil } -func applyTransaction(msg types.Message, config *params.ChainConfig, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { +func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) evm.Reset(txContext, statedb) @@ -136,12 +145,12 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, author *com receipt.GasUsed = result.UsedGas // If the transaction created a contract, store the creation address in the receipt. - if msg.To() == nil { + if msg.To == nil { receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } // Set the receipt logs and create the bloom filter. - receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash) + receipt.Logs = statedb.GetLogs(tx.Hash(), blockNumber.Uint64(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.BlockHash = blockHash receipt.BlockNumber = blockNumber @@ -153,13 +162,63 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, author *com // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { - msg, err := tx.AsMessage(types.MakeSigner(config, header.Number, new(big.Int).SetUint64(header.Time)), header.BaseFee) +func ApplyTransaction(config *params.ChainConfig, bc ChainContext, blockContext vm.BlockContext, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { + msg, err := TransactionToMessage(tx, types.MakeSigner(config, header.Number, header.Time), header.BaseFee) if err != nil { return nil, err } // Create a new context to be used in the EVM environment - blockContext := NewEVMBlockContext(header, bc, author) vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) - return applyTransaction(msg, config, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) + return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) +} + +// ApplyPrecompileActivations checks if any of the precompiles specified by the chain config are enabled or disabled by the block +// transition from [parentTimestamp] to the timestamp set in [blockContext]. If this is the case, it calls [Configure] +// to apply the necessary state transitions for the upgrade. +// This function is called within genesis setup to configure the starting state for precompiles enabled at genesis. +// In block processing and building, ApplyUpgrades is called instead which also applies state upgrades. +func ApplyPrecompileActivations(c *params.ChainConfig, parentTimestamp *uint64, blockContext contract.ConfigurationBlockContext, statedb *state.StateDB) error { + blockTimestamp := blockContext.Timestamp() + // Note: RegisteredModules returns precompiles sorted by module addresses. + // This ensures that the order we call Configure for each precompile is consistent. + // This ensures even if precompiles read/write state other than their own they will observe + // an identical global state in a deterministic order when they are configured. + for _, module := range modules.RegisteredModules() { + for _, activatingConfig := range c.GetActivatingPrecompileConfigs(module.Address, parentTimestamp, blockTimestamp, c.PrecompileUpgrades) { + // If this transition activates the upgrade, configure the stateful precompile. + // (or deconfigure it if it is being disabled.) + if activatingConfig.IsDisabled() { + log.Info("Disabling precompile", "name", module.ConfigKey) + statedb.Suicide(module.Address) + // Calling Finalise here effectively commits Suicide call and wipes the contract state. + // This enables re-configuration of the same contract state in the same block. + // Without an immediate Finalise call after the Suicide, a reconfigured precompiled state can be wiped out + // since Suicide will be committed after the reconfiguration. + statedb.Finalise(true) + } else { + log.Info("Activating new precompile", "name", module.ConfigKey, "config", activatingConfig) + // Set the nonce of the precompile's address (as is done when a contract is created) to ensure + // that it is marked as non-empty and will not be cleaned up when the statedb is finalized. + statedb.SetNonce(module.Address, 1) + // Set the code of the precompile's address to a non-zero length byte slice to ensure that the precompile + // can be called from within Solidity contracts. Solidity adds a check before invoking a contract to ensure + // that it does not attempt to invoke a non-existent contract. + statedb.SetCode(module.Address, []byte{0x1}) + if err := module.Configure(c, activatingConfig, statedb, blockContext); err != nil { + return fmt.Errorf("could not configure precompile, name: %s, reason: %w", module.ConfigKey, err) + } + } + } + } + return nil +} + +// ApplyUpgrades checks if any of the precompile or state upgrades specified by the chain config are activated by the block +// transition from [parentTimestamp] to the timestamp set in [header]. If this is the case, it calls [Configure] +// to apply the necessary state transitions for the upgrade. +// This function is called: +// - in block processing to update the state when processing a block. +// - in the miner to apply the state upgrades when producing a block. +func ApplyUpgrades(c *params.ChainConfig, parentTimestamp *uint64, blockContext contract.ConfigurationBlockContext, statedb *state.StateDB) error { + return ApplyPrecompileActivations(c, parentTimestamp, blockContext, statedb) } diff --git a/coreth/core/state_processor_test.go b/coreth/core/state_processor_test.go index 05e90592..db400702 100644 --- a/coreth/core/state_processor_test.go +++ b/coreth/core/state_processor_test.go @@ -27,7 +27,6 @@ package core import ( - "fmt" "math/big" "testing" @@ -38,6 +37,7 @@ import ( "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "golang.org/x/crypto/sha3" @@ -84,6 +84,8 @@ func mkDynamicCreationTx(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *bi return tx } +func u64(val uint64) *uint64 { return &val } + // TestStateProcessorErrors tests the output from the 'core' errors // as defined in core/error.go. These errors are generated when the // blockchain imports bad blocks, meaning blocks which have valid headers but @@ -102,7 +104,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) @@ -201,7 +203,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, } { - block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -221,7 +223,6 @@ func TestStateProcessorErrors(t *testing.T) { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), - EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -229,8 +230,8 @@ func TestStateProcessorErrors(t *testing.T) { PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), }, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ @@ -240,7 +241,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.ApricotPhase1GasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -254,7 +255,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported", }, } { - block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -280,7 +281,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -294,7 +295,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", }, } { - block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -305,7 +306,7 @@ func TestStateProcessorErrors(t *testing.T) { } } - // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (DUpgrade/EIP-3860) enabled. + // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (Durango/EIP-3860) enabled. { var ( db = rawdb.NewMemoryDatabase() @@ -316,7 +317,6 @@ func TestStateProcessorErrors(t *testing.T) { DAOForkBlock: big.NewInt(0), DAOForkSupport: true, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -324,17 +324,17 @@ func TestStateProcessorErrors(t *testing.T) { PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - DUpgradeBlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + DurangoBlockTimestamp: utils.NewUint64(0), }, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ @@ -344,7 +344,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.CortinaGasLimit, } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} smallInitCode = [320]byte{} ) @@ -355,7 +355,6 @@ func TestStateProcessorErrors(t *testing.T) { }{ { // ErrMaxInitCodeSizeExceeded txs: []*types.Transaction{ - mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), tooBigInitCode[:]), }, want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152", @@ -367,7 +366,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", }, } { - block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -398,10 +397,10 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr Time: parent.Time() + 10, UncleHash: types.EmptyUncleHash, } - if config.IsApricotPhase3(new(big.Int).SetUint64(header.Time)) { + if config.IsApricotPhase3(header.Time) { header.Extra, header.BaseFee, _ = dummy.CalcBaseFee(config, parent.Header(), header.Time) } - if config.IsApricotPhase4(new(big.Int).SetUint64(header.Time)) { + if config.IsApricotPhase4(header.Time) { header.BlockGasCost = big.NewInt(0) header.ExtDataGasUsed = big.NewInt(0) } @@ -422,115 +421,5 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } header.Root = common.BytesToHash(hasher.Sum(nil)) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil), nil, true) -} - -func CostOfUsingGasLimitEachBlock(gspec *Genesis) { - genesis := gspec.ToBlock(nil) - totalPaid := big.NewInt(0) - parent := genesis.Header() - gasLimit := new(big.Int).SetUint64(gspec.GasLimit) - totalGasUsed := big.NewInt(0) - - for i := 1; i < 20; i++ { - header := nextBlock(gspec.Config, parent, gspec.GasLimit) - baseFee := header.BaseFee - gasCost := new(big.Int).Mul(baseFee, gasLimit) - totalGasUsed = new(big.Int).Add(totalGasUsed, gasLimit) - totalPaid = new(big.Int).Add(totalPaid, gasCost) - parent = header - - avg := new(big.Int).Div(totalPaid, totalGasUsed) - fmt.Printf( - "Number: %d, BaseFee: %vGWei, TotalGasUsed: %d, TotalPaid (Ether): %d, AvgGasPrice: %dGWei\n", - header.Number, - new(big.Int).Div(baseFee, big.NewInt(params.GWei)), // baseFee in GWei - totalGasUsed, - new(big.Int).Div(totalPaid, big.NewInt(params.Ether)), // totalPaid in Ether - new(big.Int).Div(avg, big.NewInt(params.GWei)), // avgGasPrice in GWei - ) - } -} - -func ExampleCostOfUsingGasLimitEachBlock() { - banff := &Genesis{ - Config: params.TestBanffChainConfig, - Alloc: GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ - Balance: big.NewInt(4000000000000000000), // 4 ether - Nonce: 0, - }, - }, - BaseFee: big.NewInt(225 * params.GWei), - GasLimit: params.ApricotPhase1GasLimit, - } - cortina := &Genesis{ - Config: params.TestCortinaChainConfig, - Alloc: GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ - Balance: big.NewInt(4000000000000000000), // 4 ether - Nonce: 0, - }, - }, - BaseFee: big.NewInt(225 * params.GWei), - GasLimit: params.CortinaGasLimit, - } - fmt.Println("----- banff ----") - CostOfUsingGasLimitEachBlock(banff) - fmt.Println("----- cortina ----") - CostOfUsingGasLimitEachBlock(cortina) - // Output: - // ----- banff ---- - // Number: 1, BaseFee: 225GWei, TotalGasUsed: 8000000, TotalPaid (Ether): 1, AvgGasPrice: 225GWei - // Number: 2, BaseFee: 222GWei, TotalGasUsed: 16000000, TotalPaid (Ether): 3, AvgGasPrice: 223GWei - // Number: 3, BaseFee: 222GWei, TotalGasUsed: 24000000, TotalPaid (Ether): 5, AvgGasPrice: 223GWei - // Number: 4, BaseFee: 226GWei, TotalGasUsed: 32000000, TotalPaid (Ether): 7, AvgGasPrice: 223GWei - // Number: 5, BaseFee: 233GWei, TotalGasUsed: 40000000, TotalPaid (Ether): 9, AvgGasPrice: 225GWei - // Number: 6, BaseFee: 240GWei, TotalGasUsed: 48000000, TotalPaid (Ether): 10, AvgGasPrice: 228GWei - // Number: 7, BaseFee: 248GWei, TotalGasUsed: 56000000, TotalPaid (Ether): 12, AvgGasPrice: 231GWei - // Number: 8, BaseFee: 256GWei, TotalGasUsed: 64000000, TotalPaid (Ether): 14, AvgGasPrice: 234GWei - // Number: 9, BaseFee: 264GWei, TotalGasUsed: 72000000, TotalPaid (Ether): 17, AvgGasPrice: 237GWei - // Number: 10, BaseFee: 272GWei, TotalGasUsed: 80000000, TotalPaid (Ether): 19, AvgGasPrice: 241GWei - // Number: 11, BaseFee: 281GWei, TotalGasUsed: 88000000, TotalPaid (Ether): 21, AvgGasPrice: 244GWei - // Number: 12, BaseFee: 289GWei, TotalGasUsed: 96000000, TotalPaid (Ether): 23, AvgGasPrice: 248GWei - // Number: 13, BaseFee: 298GWei, TotalGasUsed: 104000000, TotalPaid (Ether): 26, AvgGasPrice: 252GWei - // Number: 14, BaseFee: 308GWei, TotalGasUsed: 112000000, TotalPaid (Ether): 28, AvgGasPrice: 256GWei - // Number: 15, BaseFee: 318GWei, TotalGasUsed: 120000000, TotalPaid (Ether): 31, AvgGasPrice: 260GWei - // Number: 16, BaseFee: 328GWei, TotalGasUsed: 128000000, TotalPaid (Ether): 33, AvgGasPrice: 264GWei - // Number: 17, BaseFee: 338GWei, TotalGasUsed: 136000000, TotalPaid (Ether): 36, AvgGasPrice: 269GWei - // Number: 18, BaseFee: 349GWei, TotalGasUsed: 144000000, TotalPaid (Ether): 39, AvgGasPrice: 273GWei - // Number: 19, BaseFee: 360GWei, TotalGasUsed: 152000000, TotalPaid (Ether): 42, AvgGasPrice: 278GWei - // ----- cortina ---- - // Number: 1, BaseFee: 225GWei, TotalGasUsed: 15000000, TotalPaid (Ether): 3, AvgGasPrice: 225GWei - // Number: 2, BaseFee: 225GWei, TotalGasUsed: 30000000, TotalPaid (Ether): 6, AvgGasPrice: 225GWei - // Number: 3, BaseFee: 231GWei, TotalGasUsed: 45000000, TotalPaid (Ether): 10, AvgGasPrice: 227GWei - // Number: 4, BaseFee: 244GWei, TotalGasUsed: 60000000, TotalPaid (Ether): 13, AvgGasPrice: 231GWei - // Number: 5, BaseFee: 264GWei, TotalGasUsed: 75000000, TotalPaid (Ether): 17, AvgGasPrice: 237GWei - // Number: 6, BaseFee: 286GWei, TotalGasUsed: 90000000, TotalPaid (Ether): 22, AvgGasPrice: 246GWei - // Number: 7, BaseFee: 310GWei, TotalGasUsed: 105000000, TotalPaid (Ether): 26, AvgGasPrice: 255GWei - // Number: 8, BaseFee: 336GWei, TotalGasUsed: 120000000, TotalPaid (Ether): 31, AvgGasPrice: 265GWei - // Number: 9, BaseFee: 364GWei, TotalGasUsed: 135000000, TotalPaid (Ether): 37, AvgGasPrice: 276GWei - // Number: 10, BaseFee: 394GWei, TotalGasUsed: 150000000, TotalPaid (Ether): 43, AvgGasPrice: 288GWei - // Number: 11, BaseFee: 427GWei, TotalGasUsed: 165000000, TotalPaid (Ether): 49, AvgGasPrice: 300GWei - // Number: 12, BaseFee: 463GWei, TotalGasUsed: 180000000, TotalPaid (Ether): 56, AvgGasPrice: 314GWei - // Number: 13, BaseFee: 501GWei, TotalGasUsed: 195000000, TotalPaid (Ether): 64, AvgGasPrice: 328GWei - // Number: 14, BaseFee: 543GWei, TotalGasUsed: 210000000, TotalPaid (Ether): 72, AvgGasPrice: 344GWei - // Number: 15, BaseFee: 588GWei, TotalGasUsed: 225000000, TotalPaid (Ether): 81, AvgGasPrice: 360GWei - // Number: 16, BaseFee: 637GWei, TotalGasUsed: 240000000, TotalPaid (Ether): 90, AvgGasPrice: 377GWei - // Number: 17, BaseFee: 690GWei, TotalGasUsed: 255000000, TotalPaid (Ether): 101, AvgGasPrice: 396GWei - // Number: 18, BaseFee: 748GWei, TotalGasUsed: 270000000, TotalPaid (Ether): 112, AvgGasPrice: 415GWei - // Number: 19, BaseFee: 810GWei, TotalGasUsed: 285000000, TotalPaid (Ether): 124, AvgGasPrice: 436GWei -} - -func nextBlock(config *params.ChainConfig, parent *types.Header, gasUsed uint64) *types.Header { - header := &types.Header{ - ParentHash: parent.Hash(), - Number: new(big.Int).Add(parent.Number, common.Big1), - Time: parent.Time + 2, - } - if config.IsApricotPhase3(new(big.Int).SetUint64(header.Time)) { - header.Extra, header.BaseFee, _ = dummy.CalcBaseFee(config, parent, header.Time) - } - header.GasUsed = gasUsed - return header + return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) } diff --git a/coreth/core/state_transition.go b/coreth/core/state_transition.go index d512ffbe..b4fcaeab 100644 --- a/coreth/core/state_transition.go +++ b/coreth/core/state_transition.go @@ -33,69 +33,17 @@ import ( "math" "math/big" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" ) -var emptyCodeHash = crypto.Keccak256Hash(nil) - -// The State Transitioning Model -// -// A state transition is a change made when a transaction is applied to the current world -// state. The state transitioning model does all the necessary work to work out a valid new -// state root. -// -// 1. Nonce handling -// 2. Pre pay gas -// 3. Create a new state object if the recipient is \0*32 -// 4. Value transfer -// -// == If contract creation == -// -// 4a. Attempt to run transaction data -// 4b. If valid, use result as code for the new state object -// -// == end == -// -// 5. Run Script section -// 6. Derive new state root -type StateTransition struct { - gp *GasPool - msg Message - gas uint64 - gasPrice *big.Int - gasFeeCap *big.Int - gasTipCap *big.Int - initialGas uint64 - value *big.Int - data []byte - state vm.StateDB - evm *vm.EVM -} - -// Message represents a message sent to a contract. -type Message interface { - From() common.Address - To() *common.Address - - GasPrice() *big.Int - GasFeeCap() *big.Int - GasTipCap() *big.Int - Gas() uint64 - Value() *big.Int - - Nonce() uint64 - IsFake() bool - Data() []byte - AccessList() types.AccessList -} - // ExecutionResult includes all output after executing given evm // message no matter the execution itself is successful or not. type ExecutionResult struct { @@ -143,7 +91,7 @@ func (st *StateTransition) GetChainID() *big.Int { return st.evm.ChainConfig().ChainID } -func (st *StateTransition) GetBlockTime() *big.Int { +func (st *StateTransition) GetBlockTime() uint64 { return st.evm.Context.Time } @@ -156,10 +104,10 @@ func (st *StateTransition) AddBalance(addr common.Address, amount *big.Int) { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool, isEIP3860 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, rules params.Rules) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 - if isContractCreation && isHomestead { + if isContractCreation && rules.IsHomestead { gas = params.TxGasContractCreation } else { gas = params.TxGas @@ -176,7 +124,7 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } // Make sure we don't exceed uint64 for all data combinations nonZeroGas := params.TxDataNonZeroGasFrontier - if isEIP2028 { + if rules.IsIstanbul { nonZeroGas = params.TxDataNonZeroGasEIP2028 } if (math.MaxUint64-gas)/nonZeroGas < nz { @@ -190,7 +138,7 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } gas += z * params.TxDataZeroGas - if isContractCreation && isEIP3860 { + if isContractCreation && rules.IsDurango { lenWords := toWordSize(dataLen) if (math.MaxUint64-gas)/params.InitCodeWordGas < lenWords { return 0, ErrGasUintOverflow @@ -199,9 +147,55 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } } if accessList != nil { + accessListGas, err := accessListGas(rules, accessList) + if err != nil { + return 0, err + } + totalGas, overflow := cmath.SafeAdd(gas, accessListGas) + if overflow { + return 0, ErrGasUintOverflow + } + gas = totalGas + } + + return gas, nil +} + +func accessListGas(rules params.Rules, accessList types.AccessList) (uint64, error) { + var gas uint64 + if !rules.PredicatersExist() { gas += uint64(len(accessList)) * params.TxAccessListAddressGas gas += uint64(accessList.StorageKeys()) * params.TxAccessListStorageKeyGas + return gas, nil + } + + for _, accessTuple := range accessList { + address := accessTuple.Address + predicaterContract, ok := rules.Predicaters[address] + if !ok { + // Previous access list gas calculation does not use safemath because an overflow would not be possible with + // the size of access lists that could be included in a block and standard access list gas costs. + // Therefore, we only check for overflow when adding to [totalGas], which could include the sum of values + // returned by a predicate. + accessTupleGas := params.TxAccessListAddressGas + uint64(len(accessTuple.StorageKeys))*params.TxAccessListStorageKeyGas + totalGas, overflow := cmath.SafeAdd(gas, accessTupleGas) + if overflow { + return 0, ErrGasUintOverflow + } + gas = totalGas + } else { + predicateGas, err := predicaterContract.PredicateGas(utils.HashSliceToBytes(accessTuple.StorageKeys)) + if err != nil { + return 0, err + } + totalGas, overflow := cmath.SafeAdd(gas, predicateGas) + if overflow { + return 0, ErrGasUintOverflow + } + gas = totalGas + } } + return gas, nil } @@ -214,19 +208,47 @@ func toWordSize(size uint64) uint64 { return (size + 31) / 32 } -// NewStateTransition initialises and returns a new state transition object. -func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition { - return &StateTransition{ - gp: gp, - evm: evm, - msg: msg, - gasPrice: msg.GasPrice(), - gasFeeCap: msg.GasFeeCap(), - gasTipCap: msg.GasTipCap(), - value: msg.Value(), - data: msg.Data(), - state: evm.StateDB, +// A Message contains the data derived from a single transaction that is relevant to state +// processing. +type Message struct { + To *common.Address + From common.Address + Nonce uint64 + Value *big.Int + GasLimit uint64 + GasPrice *big.Int + GasFeeCap *big.Int + GasTipCap *big.Int + Data []byte + AccessList types.AccessList + + // When SkipAccountChecks is true, the message nonce is not checked against the + // account nonce in state. It also disables checking that the sender is an EOA. + // This field will be set to true for operations like RPC eth_call. + SkipAccountChecks bool +} + +// TransactionToMessage converts a transaction into a Message. +func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.Int) (*Message, error) { + msg := &Message{ + Nonce: tx.Nonce(), + GasLimit: tx.Gas(), + GasPrice: new(big.Int).Set(tx.GasPrice()), + GasFeeCap: new(big.Int).Set(tx.GasFeeCap()), + GasTipCap: new(big.Int).Set(tx.GasTipCap()), + To: tx.To(), + Value: tx.Value(), + Data: tx.Data(), + AccessList: tx.AccessList(), + SkipAccountChecks: false, } + // If baseFee provided, set gasPrice to effectiveGasPrice. + if baseFee != nil { + msg.GasPrice = cmath.BigMin(msg.GasPrice.Add(msg.GasTipCap, baseFee), msg.GasFeeCap) + } + var err error + msg.From, err = types.Sender(s, tx) + return msg, err } // ApplyMessage computes the new state by applying the given message @@ -236,86 +258,130 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition // the gas used (which includes gas refunds) and an error if it failed. An error always // indicates a core error meaning that the message would always fail for that particular // state and would never be accepted within a block. -func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) (*ExecutionResult, error) { +func ApplyMessage(evm *vm.EVM, msg *Message, gp *GasPool) (*ExecutionResult, error) { return NewStateTransition(evm, msg, gp).TransitionDb() } +// StateTransition represents a state transition. +// +// == The State Transitioning Model +// +// A state transition is a change made when a transaction is applied to the current world +// state. The state transitioning model does all the necessary work to work out a valid new +// state root. +// +// 1. Nonce handling +// 2. Pre pay gas +// 3. Create a new state object if the recipient is nil +// 4. Value transfer +// +// == If contract creation == +// +// 4a. Attempt to run transaction data +// 4b. If valid, use result as code for the new state object +// +// == end == +// +// 5. Run Script section +// 6. Derive new state root +type StateTransition struct { + gp *GasPool + msg *Message + gasRemaining uint64 + initialGas uint64 + state vm.StateDB + evm *vm.EVM +} + +// NewStateTransition initialises and returns a new state transition object. +func NewStateTransition(evm *vm.EVM, msg *Message, gp *GasPool) *StateTransition { + return &StateTransition{ + gp: gp, + evm: evm, + msg: msg, + state: evm.StateDB, + } +} + // to returns the recipient of the message. func (st *StateTransition) to() common.Address { - if st.msg == nil || st.msg.To() == nil /* contract creation */ { + if st.msg == nil || st.msg.To == nil /* contract creation */ { return common.Address{} } - return *st.msg.To() + return *st.msg.To } func (st *StateTransition) buyGas() error { - mgval := new(big.Int).SetUint64(st.msg.Gas()) - mgval = mgval.Mul(mgval, st.gasPrice) + mgval := new(big.Int).SetUint64(st.msg.GasLimit) + mgval = mgval.Mul(mgval, st.msg.GasPrice) balanceCheck := mgval - if st.gasFeeCap != nil { - balanceCheck = new(big.Int).SetUint64(st.msg.Gas()) - balanceCheck.Mul(balanceCheck, st.gasFeeCap) - balanceCheck.Add(balanceCheck, st.value) + if st.msg.GasFeeCap != nil { + balanceCheck = new(big.Int).SetUint64(st.msg.GasLimit) + balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) + balanceCheck.Add(balanceCheck, st.msg.Value) } - if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { - return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) + if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 { + return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want) } - if err := st.gp.SubGas(st.msg.Gas()); err != nil { + if err := st.gp.SubGas(st.msg.GasLimit); err != nil { return err } - st.gas += st.msg.Gas() + st.gasRemaining += st.msg.GasLimit - st.initialGas = st.msg.Gas() - st.state.SubBalance(st.msg.From(), mgval) + st.initialGas = st.msg.GasLimit + st.state.SubBalance(st.msg.From, mgval) return nil } func (st *StateTransition) preCheck() error { // Only check transactions that are not fake - if !st.msg.IsFake() { + msg := st.msg + if !msg.SkipAccountChecks { // Make sure this transaction's nonce is correct. - stNonce := st.state.GetNonce(st.msg.From()) - if msgNonce := st.msg.Nonce(); stNonce < msgNonce { + stNonce := st.state.GetNonce(msg.From) + if msgNonce := msg.Nonce; stNonce < msgNonce { return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, - st.msg.From().Hex(), msgNonce, stNonce) + msg.From.Hex(), msgNonce, stNonce) } else if stNonce > msgNonce { return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, - st.msg.From().Hex(), msgNonce, stNonce) + msg.From.Hex(), msgNonce, stNonce) } else if stNonce+1 < stNonce { return fmt.Errorf("%w: address %v, nonce: %d", ErrNonceMax, - st.msg.From().Hex(), stNonce) + msg.From.Hex(), stNonce) } // Make sure the sender is an EOA - if codeHash := st.state.GetCodeHash(st.msg.From()); codeHash != emptyCodeHash && codeHash != (common.Hash{}) { + codeHash := st.state.GetCodeHash(msg.From) + if codeHash != (common.Hash{}) && codeHash != types.EmptyCodeHash { return fmt.Errorf("%w: address %v, codehash: %s", ErrSenderNoEOA, - st.msg.From().Hex(), codeHash) + msg.From.Hex(), codeHash) } // Make sure the sender is not prohibited - if vm.IsProhibited(st.msg.From()) { - return fmt.Errorf("%w: address %v", vmerrs.ErrAddrProhibited, st.msg.From()) + if vm.IsProhibited(msg.From) { + return fmt.Errorf("%w: address %v", vmerrs.ErrAddrProhibited, msg.From) } } + // Make sure that transaction gasFeeCap is greater than the baseFee (post london) if st.evm.ChainConfig().IsApricotPhase3(st.evm.Context.Time) { // Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call) - if !st.evm.Config.NoBaseFee || st.gasFeeCap.BitLen() > 0 || st.gasTipCap.BitLen() > 0 { - if l := st.gasFeeCap.BitLen(); l > 256 { + if !st.evm.Config.NoBaseFee || msg.GasFeeCap.BitLen() > 0 || msg.GasTipCap.BitLen() > 0 { + if l := msg.GasFeeCap.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, maxFeePerGas bit length: %d", ErrFeeCapVeryHigh, - st.msg.From().Hex(), l) + msg.From.Hex(), l) } - if l := st.gasTipCap.BitLen(); l > 256 { + if l := msg.GasTipCap.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, maxPriorityFeePerGas bit length: %d", ErrTipVeryHigh, - st.msg.From().Hex(), l) + msg.From.Hex(), l) } - if st.gasFeeCap.Cmp(st.gasTipCap) < 0 { + if msg.GasFeeCap.Cmp(msg.GasTipCap) < 0 { return fmt.Errorf("%w: address %v, maxPriorityFeePerGas: %s, maxFeePerGas: %s", ErrTipAboveFeeCap, - st.msg.From().Hex(), st.gasTipCap, st.gasFeeCap) + msg.From.Hex(), msg.GasTipCap, msg.GasFeeCap) } // This will panic if baseFee is nil, but basefee presence is verified // as part of header validation. - if st.gasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 { + if msg.GasFeeCap.Cmp(st.evm.Context.BaseFee) < 0 { return fmt.Errorf("%w: address %v, maxFeePerGas: %s baseFee: %s", ErrFeeCapTooLow, - st.msg.From().Hex(), st.gasFeeCap, st.evm.Context.BaseFee) + msg.From.Hex(), msg.GasFeeCap, st.evm.Context.BaseFee) } } } @@ -348,49 +414,50 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { return nil, err } - if st.evm.Config.Debug { - st.evm.Config.Tracer.CaptureTxStart(st.initialGas) + if tracer := st.evm.Config.Tracer; tracer != nil { + tracer.CaptureTxStart(st.initialGas) defer func() { - st.evm.Config.Tracer.CaptureTxEnd(st.gas) + tracer.CaptureTxEnd(st.gasRemaining) }() } var ( msg = st.msg - sender = vm.AccountRef(msg.From()) + sender = vm.AccountRef(msg.From) rules = st.evm.ChainConfig().AvalancheRules(st.evm.Context.BlockNumber, st.evm.Context.Time) - contractCreation = msg.To() == nil + contractCreation = msg.To == nil ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsDUpgrade) + gas, err := IntrinsicGas(msg.Data, msg.AccessList, contractCreation, rules) if err != nil { return nil, err } - if st.gas < gas { - return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas) + if st.gasRemaining < gas { + return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gasRemaining, gas) } - st.gas -= gas + st.gasRemaining -= gas // Check clause 6 - if msg.Value().Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From(), msg.Value()) { - return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From().Hex()) + if msg.Value.Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From, msg.Value) { + return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex()) } // Check whether the init code size has been exceeded. - if rules.IsDUpgrade && contractCreation && len(st.data) > params.MaxInitCodeSize { - return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(st.data), params.MaxInitCodeSize) + if rules.IsDurango && contractCreation && len(msg.Data) > params.MaxInitCodeSize { + return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(msg.Data), params.MaxInitCodeSize) } - // Set up the initial access list. - if rules.IsApricotPhase2 { - st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList()) - } + // Execute the preparatory steps for state transition which includes: + // - prepare accessList(post-berlin/ApricotPhase2) + // - reset transient storage(eip 1153) + st.state.Prepare(rules, msg.From, st.evm.Context.Coinbase, msg.To, vm.ActivePrecompiles(rules), msg.AccessList) + var ( ret []byte vmerr error // vm errors do not affect consensus and are therefore not assigned to err chainID *big.Int - timestamp *big.Int + timestamp uint64 ) chainID = st.evm.ChainConfig().ChainID @@ -402,36 +469,36 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } if contractCreation { - ret, _, st.gas, vmerr = st.evm.Create(sender, st.data, st.gas, st.value) + ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, msg.Data, st.gasRemaining, msg.Value) } else { // Increment the nonce for the next transaction - st.state.SetNonce(msg.From(), st.state.GetNonce(sender.Address())+1) - ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value) - if vmerr == nil && chainID != nil && timestamp != nil { - if isSongbird { - handleSongbirdTransitionDbContracts(st, chainID, timestamp, msg, ret) - } else { - handleFlareTransitionDbContracts(st, chainID, timestamp, msg, ret) + st.state.SetNonce(msg.From, st.state.GetNonce(sender.Address())+1) + ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), msg.Data, st.gasRemaining, msg.Value) + if vmerr == nil && chainID != nil { + if isSongbird { // Songbird, Coston, Local (Songbird) + handleSongbirdTransitionDbContracts(st, rules.IsDurango, chainID, timestamp, msg, ret) + } else if isFlare { // Flare, Coston2, Local (Flare) + handleFlareTransitionDbContracts(st, rules.IsDurango, chainID, timestamp, msg, ret) } } } st.refundGas(rules.IsApricotPhase1) - if vmerr == nil && IsPrioritisedContractCall(chainID, timestamp, msg.To(), st.data, ret, st.initialGas) { + if vmerr == nil && IsPrioritisedContractCall(chainID, timestamp, msg.To, msg.Data, ret, st.initialGas) { nominalGasUsed := params.TxGas // 21000 nominalFee := new(big.Int).Mul(new(big.Int).SetUint64(nominalGasUsed), new(big.Int).SetUint64(nominalGasPrice)) actualGasUsed := st.gasUsed() - actualGasPrice := st.gasPrice + actualGasPrice := msg.GasPrice actualFee := new(big.Int).Mul(new(big.Int).SetUint64(actualGasUsed), actualGasPrice) if actualFee.Cmp(nominalFee) > 0 { feeRefund := new(big.Int).Sub(actualFee, nominalFee) - st.state.AddBalance(st.msg.From(), feeRefund) + st.state.AddBalance(st.msg.From, feeRefund) st.state.AddBalance(burnAddress, nominalFee) } else { st.state.AddBalance(burnAddress, actualFee) } } else { - st.state.AddBalance(burnAddress, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) + st.state.AddBalance(burnAddress, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), msg.GasPrice)) } // Call the daemon if there is no vm error @@ -447,47 +514,47 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { }, nil } -func handleSongbirdTransitionDbContracts(st *StateTransition, chainID *big.Int, timestamp *big.Int, msg Message, ret []byte) { - if GetStateConnectorIsActivatedAndCalled(chainID, timestamp, *msg.To()) && - len(st.data) >= 36 && len(ret) == 32 && - bytes.Equal(st.data[0:4], SubmitAttestationSelector(chainID, timestamp)) && +func handleSongbirdTransitionDbContracts(st *StateTransition, isDurango bool, chainID *big.Int, timestamp uint64, msg *Message, ret []byte) { + if GetStateConnectorIsActivatedAndCalled(isDurango, chainID, timestamp, *msg.To) && + len(msg.Data) >= 36 && len(ret) == 32 && + bytes.Equal(msg.Data[0:4], SubmitAttestationSelector(chainID, timestamp)) && binary.BigEndian.Uint64(ret[24:32]) > 0 { - if err := st.FinalisePreviousRound(chainID, timestamp, st.data[4:36]); err != nil { + if err := st.FinalisePreviousRound(chainID, timestamp, msg.Data[4:36]); err != nil { log.Warn("Error finalising state connector round", "error", err) } } } -func handleFlareTransitionDbContracts(st *StateTransition, chainID *big.Int, timestamp *big.Int, msg Message, ret []byte) { +func handleFlareTransitionDbContracts(st *StateTransition, isDurango bool, chainID *big.Int, timestamp uint64, msg *Message, ret []byte) { if st.evm.Context.Coinbase != common.HexToAddress("0x0100000000000000000000000000000000000000") { return } - if GetStateConnectorIsActivatedAndCalled(chainID, timestamp, *msg.To()) && - len(st.data) >= 36 && len(ret) == 32 && - bytes.Equal(st.data[0:4], SubmitAttestationSelector(chainID, timestamp)) && + if GetStateConnectorIsActivatedAndCalled(isDurango, chainID, timestamp, *msg.To) && + len(msg.Data) >= 36 && len(ret) == 32 && + bytes.Equal(msg.Data[0:4], SubmitAttestationSelector(chainID, timestamp)) && binary.BigEndian.Uint64(ret[24:32]) > 0 { - if err := st.FinalisePreviousRound(chainID, timestamp, st.data[4:36]); err != nil { + if err := st.FinalisePreviousRound(chainID, timestamp, msg.Data[4:36]); err != nil { log.Warn("Error finalising state connector round", "error", err) } - } else if GetGovernanceSettingIsActivatedAndCalled(chainID, timestamp, *msg.To()) && len(st.data) == 36 { - if bytes.Equal(st.data[0:4], SetGovernanceAddressSelector(chainID, timestamp)) { - if err := st.SetGovernanceAddress(chainID, timestamp, st.data[4:36]); err != nil { + } else if GetGovernanceSettingIsActivatedAndCalled(chainID, timestamp, *msg.To) && len(msg.Data) == 36 { + if bytes.Equal(msg.Data[0:4], SetGovernanceAddressSelector(chainID, timestamp)) { + if err := st.SetGovernanceAddress(chainID, timestamp, msg.Data[4:36]); err != nil { log.Warn("Error setting governance address", "error", err) } - } else if bytes.Equal(st.data[0:4], SetTimelockSelector(chainID, timestamp)) { - if err := st.SetTimelock(chainID, timestamp, st.data[4:36]); err != nil { + } else if bytes.Equal(msg.Data[0:4], SetTimelockSelector(chainID, timestamp)) { + if err := st.SetTimelock(chainID, timestamp, msg.Data[4:36]); err != nil { log.Warn("Error setting governance timelock", "error", err) } } - } else if GetInitialAirdropChangeIsActivatedAndCalled(chainID, timestamp, *msg.To()) && len(st.data) == 4 { - if bytes.Equal(st.data[0:4], UpdateInitialAirdropAddressSelector(chainID, timestamp)) { + } else if GetInitialAirdropChangeIsActivatedAndCalled(chainID, timestamp, *msg.To) && len(msg.Data) == 4 { + if bytes.Equal(msg.Data[0:4], UpdateInitialAirdropAddressSelector(chainID, timestamp)) { if err := st.UpdateInitialAirdropAddress(chainID, timestamp); err != nil { log.Warn("Error updating initialAirdrop contract", "error", err) } } - } else if GetDistributionChangeIsActivatedAndCalled(chainID, timestamp, *msg.To()) && len(st.data) == 4 { - if bytes.Equal(st.data[0:4], UpdateDistributionAddressSelector(chainID, timestamp)) { + } else if GetDistributionChangeIsActivatedAndCalled(chainID, timestamp, *msg.To) && len(msg.Data) == 4 { + if bytes.Equal(msg.Data[0:4], UpdateDistributionAddressSelector(chainID, timestamp)) { if err := st.UpdateDistributionAddress(chainID, timestamp); err != nil { log.Warn("Error updating distribution contract", "error", err) } @@ -503,22 +570,22 @@ func (st *StateTransition) refundGas(apricotPhase1 bool) { if refund > st.state.GetRefund() { refund = st.state.GetRefund() } - st.gas += refund + st.gasRemaining += refund } // Return ETH for remaining gas, exchanged at the original rate. - remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gas), st.gasPrice) - st.state.AddBalance(st.msg.From(), remaining) + remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gasRemaining), st.msg.GasPrice) + st.state.AddBalance(st.msg.From, remaining) // Also return remaining gas to the block gas counter so it is // available for the next transaction. - st.gp.AddGas(st.gas) + st.gp.AddGas(st.gasRemaining) } // gasUsed returns the amount of gas used up by the state transition. func (st *StateTransition) gasUsed() uint64 { - if st.initialGas < st.gas { + if st.initialGas < st.gasRemaining { return uint64(0) } - return st.initialGas - st.gas + return st.initialGas - st.gasRemaining } diff --git a/coreth/core/state_transition_ext_test.go b/coreth/core/state_transition_ext_test.go index 651a2298..77a0182a 100644 --- a/coreth/core/state_transition_ext_test.go +++ b/coreth/core/state_transition_ext_test.go @@ -11,11 +11,11 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" ) // Test prioritized contract (Submitter) being partially refunded when fee is high @@ -27,7 +27,7 @@ func TestStateTransitionPrioritizedContract(t *testing.T) { from := crypto.PubkeyToAddress(key.PublicKey) gas := uint64(3000000) to := prioritisedSubmitterContractAddress - daemon := common.HexToAddress(GetDaemonContractAddr(new(big.Int))) + daemon := common.HexToAddress(GetDaemonContractAddr(0)) signer := types.LatestSignerForChainID(config.ChainID) tx, err := types.SignNewTx(key, signer, &types.LegacyTx{ @@ -48,7 +48,7 @@ func TestStateTransitionPrioritizedContract(t *testing.T) { Transfer: Transfer, Coinbase: common.HexToAddress("0x0100000000000000000000000000000000000000"), BlockNumber: big.NewInt(5), - Time: big.NewInt(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), // Time after setting Submitter contract address on all chains + Time: uint64(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), // Time after setting Submitter contract address on all chains Difficulty: big.NewInt(0xffffffff), GasLimit: gas, BaseFee: big.NewInt(8), @@ -77,21 +77,22 @@ func TestStateTransitionPrioritizedContract(t *testing.T) { tracer := logger.NewStructLogger(&logger.Config{ Debug: false, }) - cfg := vm.Config{Debug: true, Tracer: tracer} + cfg := vm.Config{Tracer: tracer} evm := vm.NewEVM(context, txContext, statedb, config, cfg) - msg, err := tx.AsMessage(signer, nil) + + msg, err := TransactionToMessage(tx, signer, nil) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } st := NewStateTransition(evm, msg, new(GasPool).AddGas(tx.Gas())) - balanceBefore := st.state.GetBalance(st.msg.From()) + balanceBefore := st.state.GetBalance(st.msg.From) _, err = st.TransitionDb() if err != nil { t.Fatal(err) } - balanceAfter := st.state.GetBalance(st.msg.From()) + balanceAfter := st.state.GetBalance(st.msg.From) // max fee (funds above which are returned) depends on the chain used _, limit, _, _, _ := stateTransitionVariants.GetValue(config.ChainID)(st) @@ -112,7 +113,7 @@ func TestStateTransitionDaemon(t *testing.T) { key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") from := crypto.PubkeyToAddress(key.PublicKey) gas := uint64(3000000) - daemon := common.HexToAddress(GetDaemonContractAddr(new(big.Int))) + daemon := common.HexToAddress(GetDaemonContractAddr(0)) to := common.HexToAddress("0x7e22C4A78675ae3Be11Fb389Da9b9fb15996bb6a") signer := types.LatestSignerForChainID(config.ChainID) tx, err := types.SignNewTx(key, signer, @@ -134,7 +135,7 @@ func TestStateTransitionDaemon(t *testing.T) { Transfer: Transfer, Coinbase: common.HexToAddress("0x0100000000000000000000000000000000000000"), BlockNumber: big.NewInt(5), - Time: big.NewInt(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), + Time: uint64(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), Difficulty: big.NewInt(0xffffffff), GasLimit: gas, BaseFee: big.NewInt(8), @@ -163,9 +164,9 @@ func TestStateTransitionDaemon(t *testing.T) { tracer := logger.NewStructLogger(&logger.Config{ Debug: false, }) - cfg := vm.Config{Debug: true, Tracer: tracer} + cfg := vm.Config{Tracer: tracer} evm := vm.NewEVM(context, txContext, statedb, config, cfg) - msg, err := tx.AsMessage(signer, nil) + msg, err := TransactionToMessage(tx, signer, nil) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } @@ -201,9 +202,15 @@ func makePreState(db ethdb.Database, accounts GenesisAlloc, snapshotter bool) (* // Commit and re-open to start with a clean state. root, _ := statedb.Commit(false, false) + snapConfig := snapshot.Config{ + CacheSize: 64, + AsyncBuild: false, + NoBuild: false, + SkipVerify: true, + } var snaps *snapshot.Tree if snapshotter { - snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, common.Hash{}, root, false, true, false) + snaps, _ = snapshot.New(snapConfig, sdb.DiskDB(), sdb.TrieDB(), common.Hash{}, root) } statedb, _ = state.New(root, sdb, snaps) return snaps, statedb diff --git a/coreth/core/state_transition_params.go b/coreth/core/state_transition_params.go index e22ed1e2..c8ea2847 100644 --- a/coreth/core/state_transition_params.go +++ b/coreth/core/state_transition_params.go @@ -11,7 +11,7 @@ import ( var ( stateTransitionVariants = utils.NewChainValue(nonFlareChain). - AddValues([]*big.Int{params.FlareChainID, params.CostwoChainID, params.StagingChainID, params.LocalFlareChainID}, stateTransitionParamsFlare). + AddValues([]*big.Int{params.FlareChainID, params.CostwoChainID, params.LocalFlareChainID}, stateTransitionParamsFlare). AddValues([]*big.Int{params.SongbirdChainID, params.CostonChainID, params.LocalChainID}, stateTransitionParamsSongbird) ) diff --git a/coreth/core/state_transition_test.go b/coreth/core/state_transition_test.go index 028a8f09..8c702517 100644 --- a/coreth/core/state_transition_test.go +++ b/coreth/core/state_transition_test.go @@ -89,7 +89,7 @@ func executeStateTransitionTest(t *testing.T, st stateTransitionTest) { }, GasLimit: params.ApricotPhase1GasLimit, } - genesis = gspec.ToBlock(nil) + genesis = gspec.ToBlock() engine = dummy.NewFaker() blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) ) diff --git a/coreth/core/stateful_precompile_test.go b/coreth/core/stateful_precompile_test.go deleted file mode 100644 index dcda4532..00000000 --- a/coreth/core/stateful_precompile_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package core - -import ( - "math/big" - - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/precompile" - "github.com/ethereum/go-ethereum/common" -) - -var ( - _ precompile.BlockContext = &mockBlockContext{} - _ precompile.PrecompileAccessibleState = &mockAccessibleState{} -) - -type mockBlockContext struct { - blockNumber *big.Int - timestamp uint64 -} - -func (mb *mockBlockContext) Number() *big.Int { return mb.blockNumber } -func (mb *mockBlockContext) Timestamp() *big.Int { return new(big.Int).SetUint64(mb.timestamp) } - -type mockAccessibleState struct { - state *state.StateDB - blockContext *mockBlockContext - - // NativeAssetCall return values - ret []byte - remainingGas uint64 - err error -} - -func (m *mockAccessibleState) GetStateDB() precompile.StateDB { return m.state } - -func (m *mockAccessibleState) GetBlockContext() precompile.BlockContext { return m.blockContext } - -func (m *mockAccessibleState) NativeAssetCall(common.Address, []byte, uint64, uint64, bool) ([]byte, uint64, error) { - return m.ret, m.remainingGas, m.err -} diff --git a/coreth/core/test_blockchain.go b/coreth/core/test_blockchain.go index 10a7887a..eab50a4f 100644 --- a/coreth/core/test_blockchain.go +++ b/coreth/core/test_blockchain.go @@ -13,10 +13,10 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" ) var TestCallbacks = dummy.ConsensusCallbacks{ @@ -128,6 +128,7 @@ func checkBlockChainState( if err != nil { t.Fatalf("Failed to create new blockchain instance: %s", err) } + defer newBlockChain.Stop() for i := uint64(1); i <= lastAcceptedBlock.NumberU64(); i++ { block := bc.GetBlockByNumber(i) @@ -168,7 +169,7 @@ func checkBlockChainState( } defer restartedChain.Stop() if currentBlock := restartedChain.CurrentBlock(); currentBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } if restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock(); restartedLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), restartedLastAcceptedBlock.Hash().Hex(), restartedLastAcceptedBlock.NumberU64()) @@ -201,7 +202,6 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) @@ -329,7 +329,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspe currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -473,7 +473,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gs currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -569,7 +569,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain[len(chain)-1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } if err := blockchain.ValidateCanonicalChain(); err != nil { @@ -585,7 +585,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain[0] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() @@ -881,7 +881,7 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genes Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.ToBlock(nil) + genesis := gspec.ToBlock() blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { @@ -1039,7 +1039,7 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } // Accept the first block in [chain1] and reject all of [chain2] @@ -1181,7 +1181,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } blockchain.Stop() @@ -1206,7 +1206,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain1[1] if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.NumberU64()) + t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) } // Accept the first block in [chain1] and reject all of [chain2] @@ -1341,7 +1341,7 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - eng := dummy.NewComplexETHFaker(&TestCallbacks) + eng := dummy.NewFakerWithMode(TestCallbacks, dummy.Mode{ModeSkipBlockFee: true, ModeSkipCoinbase: true}) _, chain, _, err := GenerateChainWithGenesis(gspec, eng, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, diff --git a/coreth/core/tx_journal.go b/coreth/core/txpool/journal.go similarity index 92% rename from coreth/core/tx_journal.go rename to coreth/core/txpool/journal.go index 05f59a30..73302baa 100644 --- a/coreth/core/tx_journal.go +++ b/coreth/core/txpool/journal.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "errors" @@ -51,23 +51,23 @@ type devNull struct{} func (*devNull) Write(p []byte) (n int, err error) { return len(p), nil } func (*devNull) Close() error { return nil } -// txJournal is a rotating log of transactions with the aim of storing locally +// journal is a rotating log of transactions with the aim of storing locally // created transactions to allow non-executed ones to survive node restarts. -type txJournal struct { +type journal struct { path string // Filesystem path to store the transactions at writer io.WriteCloser // Output stream to write new transactions into } // newTxJournal creates a new transaction journal to -func newTxJournal(path string) *txJournal { - return &txJournal{ +func newTxJournal(path string) *journal { + return &journal{ path: path, } } // load parses a transaction journal dump from disk, loading its contents into // the specified pool. -func (journal *txJournal) load(add func([]*types.Transaction) []error) error { +func (journal *journal) load(add func([]*types.Transaction) []error) error { // Open the journal for loading any past transactions input, err := os.Open(journal.path) if errors.Is(err, fs.ErrNotExist) { @@ -128,7 +128,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { } // insert adds the specified transaction to the local disk journal. -func (journal *txJournal) insert(tx *types.Transaction) error { +func (journal *journal) insert(tx *types.Transaction) error { if journal.writer == nil { return errNoActiveJournal } @@ -140,7 +140,7 @@ func (journal *txJournal) insert(tx *types.Transaction) error { // rotate regenerates the transaction journal based on the current contents of // the transaction pool. -func (journal *txJournal) rotate(all map[common.Address]types.Transactions) error { +func (journal *journal) rotate(all map[common.Address]types.Transactions) error { // Close the current journal (if any is open) if journal.writer != nil { if err := journal.writer.Close(); err != nil { @@ -180,7 +180,7 @@ func (journal *txJournal) rotate(all map[common.Address]types.Transactions) erro } // close flushes the transaction journal contents to disk and closes the file. -func (journal *txJournal) close() error { +func (journal *journal) close() error { var err error if journal.writer != nil { diff --git a/coreth/core/tx_list.go b/coreth/core/txpool/list.go similarity index 81% rename from coreth/core/tx_list.go rename to coreth/core/txpool/list.go index 4a6999e3..96f655a6 100644 --- a/coreth/core/tx_list.go +++ b/coreth/core/txpool/list.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "container/heap" @@ -55,34 +55,35 @@ func (h *nonceHeap) Pop() interface{} { old := *h n := len(old) x := old[n-1] + old[n-1] = 0 *h = old[0 : n-1] return x } -// txSortedMap is a nonce->transaction hash map with a heap based index to allow +// sortedMap is a nonce->transaction hash map with a heap based index to allow // iterating over the contents in a nonce-incrementing way. -type txSortedMap struct { +type sortedMap struct { items map[uint64]*types.Transaction // Hash map storing the transaction data index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) cache types.Transactions // Cache of the transactions already sorted } -// newTxSortedMap creates a new nonce-sorted transaction map. -func newTxSortedMap() *txSortedMap { - return &txSortedMap{ +// newSortedMap creates a new nonce-sorted transaction map. +func newSortedMap() *sortedMap { + return &sortedMap{ items: make(map[uint64]*types.Transaction), index: new(nonceHeap), } } // Get retrieves the current transactions associated with the given nonce. -func (m *txSortedMap) Get(nonce uint64) *types.Transaction { +func (m *sortedMap) Get(nonce uint64) *types.Transaction { return m.items[nonce] } // Put inserts a new transaction into the map, also updating the map's nonce // index. If a transaction already exists with the same nonce, it's overwritten. -func (m *txSortedMap) Put(tx *types.Transaction) { +func (m *sortedMap) Put(tx *types.Transaction) { nonce := tx.Nonce() if m.items[nonce] == nil { heap.Push(m.index, nonce) @@ -93,7 +94,7 @@ func (m *txSortedMap) Put(tx *types.Transaction) { // Forward removes all transactions from the map with a nonce lower than the // provided threshold. Every removed transaction is returned for any post-removal // maintenance. -func (m *txSortedMap) Forward(threshold uint64) types.Transactions { +func (m *sortedMap) Forward(threshold uint64) types.Transactions { var removed types.Transactions // Pop off heap items until the threshold is reached @@ -114,7 +115,7 @@ func (m *txSortedMap) Forward(threshold uint64) types.Transactions { // Filter, as opposed to 'filter', re-initialises the heap after the operation is done. // If you want to do several consecutive filterings, it's therefore better to first // do a .filter(func1) followed by .Filter(func2) or reheap() -func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { +func (m *sortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { removed := m.filter(filter) // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { @@ -123,7 +124,7 @@ func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transac return removed } -func (m *txSortedMap) reheap() { +func (m *sortedMap) reheap() { *m.index = make([]uint64, 0, len(m.items)) for nonce := range m.items { *m.index = append(*m.index, nonce) @@ -134,7 +135,7 @@ func (m *txSortedMap) reheap() { // filter is identical to Filter, but **does not** regenerate the heap. This method // should only be used if followed immediately by a call to Filter or reheap() -func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transactions { +func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transactions { var removed types.Transactions // Collect all the transactions to filter out @@ -152,7 +153,7 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. -func (m *txSortedMap) Cap(threshold int) types.Transactions { +func (m *sortedMap) Cap(threshold int) types.Transactions { // Short circuit if the number of items is under the limit if len(m.items) <= threshold { return nil @@ -177,7 +178,7 @@ func (m *txSortedMap) Cap(threshold int) types.Transactions { // Remove deletes a transaction from the maintained map, returning whether the // transaction was found. -func (m *txSortedMap) Remove(nonce uint64) bool { +func (m *sortedMap) Remove(nonce uint64) bool { // Short circuit if no transaction is present _, ok := m.items[nonce] if !ok { @@ -203,7 +204,7 @@ func (m *txSortedMap) Remove(nonce uint64) bool { // Note, all transactions with nonces lower than start will also be returned to // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! -func (m *txSortedMap) Ready(start uint64) types.Transactions { +func (m *sortedMap) Ready(start uint64) types.Transactions { // Short circuit if no transactions are available if m.index.Len() == 0 || (*m.index)[0] > start { return nil @@ -221,11 +222,11 @@ func (m *txSortedMap) Ready(start uint64) types.Transactions { } // Len returns the length of the transaction map. -func (m *txSortedMap) Len() int { +func (m *sortedMap) Len() int { return len(m.items) } -func (m *txSortedMap) flatten() types.Transactions { +func (m *sortedMap) flatten() types.Transactions { // If the sorting was not cached yet, create and cache it if m.cache == nil { m.cache = make(types.Transactions, 0, len(m.items)) @@ -240,7 +241,7 @@ func (m *txSortedMap) flatten() types.Transactions { // Flatten creates a nonce-sorted slice of transactions based on the loosely // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. -func (m *txSortedMap) Flatten() types.Transactions { +func (m *sortedMap) Flatten() types.Transactions { // Copy the cache to prevent accidental modifications cache := m.flatten() txs := make(types.Transactions, len(cache)) @@ -250,37 +251,39 @@ func (m *txSortedMap) Flatten() types.Transactions { // LastElement returns the last element of a flattened list, thus, the // transaction with the highest nonce -func (m *txSortedMap) LastElement() *types.Transaction { +func (m *sortedMap) LastElement() *types.Transaction { cache := m.flatten() return cache[len(cache)-1] } -// txList is a "list" of transactions belonging to an account, sorted by account +// list is a "list" of transactions belonging to an account, sorted by account // nonce. The same type can be used both for storing contiguous transactions for // the executable/pending queue; and for storing gapped transactions for the non- // executable/future queue, with minor behavioral changes. -type txList struct { - strict bool // Whether nonces are strictly continuous or not - txs *txSortedMap // Heap indexed sorted hash map of the transactions +type list struct { + strict bool // Whether nonces are strictly continuous or not + txs *sortedMap // Heap indexed sorted hash map of the transactions - costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance) - gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance) + gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) + totalcost *big.Int // Total cost of all transactions in the list } -// newTxList create a new transaction list for maintaining nonce-indexable fast, +// newList create a new transaction list for maintaining nonce-indexable fast, // gapped, sortable transaction lists. -func newTxList(strict bool) *txList { - return &txList{ - strict: strict, - txs: newTxSortedMap(), - costcap: new(big.Int), +func newList(strict bool) *list { + return &list{ + strict: strict, + txs: newSortedMap(), + costcap: new(big.Int), + totalcost: new(big.Int), } } -// Overlaps returns whether the transaction specified has the same nonce as one -// already contained within the list. -func (l *txList) Overlaps(tx *types.Transaction) bool { - return l.txs.Get(tx.Nonce()) != nil +// Contains returns whether the list contains a transaction +// with the provided nonce. +func (l *list) Contains(nonce uint64) bool { + return l.txs.Get(nonce) != nil } // Add tries to insert a new transaction into the list, returning whether the @@ -288,7 +291,7 @@ func (l *txList) Overlaps(tx *types.Transaction) bool { // // If the new transaction is accepted into the list, the lists' cost and gas // thresholds are also potentially updated. -func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) { +func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) { // If there's an older better transaction, abort old := l.txs.Get(tx.Nonce()) if old != nil { @@ -311,7 +314,11 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { return false, nil } + // Old is being replaced, subtract old cost + l.subTotalCost([]*types.Transaction{old}) } + // Add new tx cost to totalcost + l.totalcost.Add(l.totalcost, tx.Cost()) // Otherwise overwrite the old transaction with the current one l.txs.Put(tx) if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 { @@ -326,8 +333,10 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran // Forward removes all transactions from the list with a nonce lower than the // provided threshold. Every removed transaction is returned for any post-removal // maintenance. -func (l *txList) Forward(threshold uint64) types.Transactions { - return l.txs.Forward(threshold) +func (l *list) Forward(threshold uint64) types.Transactions { + txs := l.txs.Forward(threshold) + l.subTotalCost(txs) + return txs } // Filter removes all transactions from the list with a cost or gas limit higher @@ -339,7 +348,7 @@ func (l *txList) Forward(threshold uint64) types.Transactions { // a point in calculating all the costs or if the balance covers all. If the threshold // is lower than the costgas cap, the caps will be reset to a new high after removing // the newly invalidated transactions. -func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) { +func (l *list) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) { // If all transactions are below the threshold, short circuit if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit { return nil, nil @@ -366,28 +375,36 @@ func (l *txList) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions } invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest }) } + // Reset total cost + l.subTotalCost(removed) + l.subTotalCost(invalids) l.txs.reheap() return removed, invalids } // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. -func (l *txList) Cap(threshold int) types.Transactions { - return l.txs.Cap(threshold) +func (l *list) Cap(threshold int) types.Transactions { + txs := l.txs.Cap(threshold) + l.subTotalCost(txs) + return txs } // Remove deletes a transaction from the maintained list, returning whether the // transaction was found, and also returning any transaction invalidated due to // the deletion (strict mode only). -func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { +func (l *list) Remove(tx *types.Transaction) (bool, types.Transactions) { // Remove the transaction from the set nonce := tx.Nonce() if removed := l.txs.Remove(nonce); !removed { return false, nil } + l.subTotalCost([]*types.Transaction{tx}) // In strict mode, filter out non-executable transactions if l.strict { - return true, l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + txs := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Nonce() > nonce }) + l.subTotalCost(txs) + return true, txs } return true, nil } @@ -399,33 +416,43 @@ func (l *txList) Remove(tx *types.Transaction) (bool, types.Transactions) { // Note, all transactions with nonces lower than start will also be returned to // prevent getting into and invalid state. This is not something that should ever // happen but better to be self correcting than failing! -func (l *txList) Ready(start uint64) types.Transactions { - return l.txs.Ready(start) +func (l *list) Ready(start uint64) types.Transactions { + txs := l.txs.Ready(start) + l.subTotalCost(txs) + return txs } // Len returns the length of the transaction list. -func (l *txList) Len() int { +func (l *list) Len() int { return l.txs.Len() } // Empty returns whether the list of transactions is empty or not. -func (l *txList) Empty() bool { +func (l *list) Empty() bool { return l.Len() == 0 } // Flatten creates a nonce-sorted slice of transactions based on the loosely // sorted internal representation. The result of the sorting is cached in case // it's requested again before any modifications are made to the contents. -func (l *txList) Flatten() types.Transactions { +func (l *list) Flatten() types.Transactions { return l.txs.Flatten() } // LastElement returns the last element of a flattened list, thus, the // transaction with the highest nonce -func (l *txList) LastElement() *types.Transaction { +func (l *list) LastElement() *types.Transaction { return l.txs.LastElement() } +// subTotalCost subtracts the cost of the given transactions from the +// total cost of all transactions. +func (l *list) subTotalCost(txs []*types.Transaction) { + for _, tx := range txs { + l.totalcost.Sub(l.totalcost, tx.Cost()) + } +} + // priceHeap is a heap.Interface implementation over transactions for retrieving // price-sorted transactions to discard when the pool fills up. If baseFee is set // then the heap is sorted based on the effective tip based on the given base fee. @@ -478,8 +505,8 @@ func (h *priceHeap) Pop() interface{} { return x } -// txPricedList is a price-sorted heap to allow operating on transactions pool -// contents in a price-incrementing way. It's built opon the all transactions +// pricedList is a price-sorted heap to allow operating on transactions pool +// contents in a price-incrementing way. It's built upon the all transactions // in txpool but only interested in the remote part. It means only remote transactions // will be considered for tracking, sorting, eviction, etc. // @@ -489,14 +516,11 @@ func (h *priceHeap) Pop() interface{} { // In some cases (during a congestion, when blocks are full) the urgent heap can provide // better candidates for inclusion while in other cases (at the top of the baseFee peak) // the floating heap is better. When baseFee is decreasing they behave similarly. -type txPricedList struct { +type pricedList struct { // Number of stale price points to (re-heap trigger). - // This field is accessed atomically, and must be the first field - // to ensure it has correct alignment for atomic.AddInt64. - // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - stales int64 + stales atomic.Int64 - all *txLookup // Pointer to the map of all transactions + all *lookup // Pointer to the map of all transactions urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions reheapMu sync.Mutex // Mutex asserts that only one routine is reheaping the list } @@ -507,15 +531,15 @@ const ( floatingRatio = 1 ) -// newTxPricedList creates a new price-sorted transaction heap. -func newTxPricedList(all *txLookup) *txPricedList { - return &txPricedList{ +// newPricedList creates a new price-sorted transaction heap. +func newPricedList(all *lookup) *pricedList { + return &pricedList{ all: all, } } // Put inserts a new transaction into the heap. -func (l *txPricedList) Put(tx *types.Transaction, local bool) { +func (l *pricedList) Put(tx *types.Transaction, local bool) { if local { return } @@ -526,9 +550,9 @@ func (l *txPricedList) Put(tx *types.Transaction, local bool) { // Removed notifies the prices transaction list that an old transaction dropped // from the pool. The list will just keep a counter of stale objects and update // the heap if a large enough ratio of transactions go stale. -func (l *txPricedList) Removed(count int) { +func (l *pricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) - stales := atomic.AddInt64(&l.stales, int64(count)) + stales := l.stales.Add(int64(count)) if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 { return } @@ -538,7 +562,7 @@ func (l *txPricedList) Removed(count int) { // Underpriced checks whether a transaction is cheaper than (or as cheap as) the // lowest priced (remote) transaction currently being tracked. -func (l *txPricedList) Underpriced(tx *types.Transaction) bool { +func (l *pricedList) Underpriced(tx *types.Transaction) bool { // Note: with two queues, being underpriced is defined as being worse than the worst item // in all non-empty queues if there is any. If both queues are empty then nothing is underpriced. return (l.underpricedFor(&l.urgent, tx) || len(l.urgent.list) == 0) && @@ -548,12 +572,12 @@ func (l *txPricedList) Underpriced(tx *types.Transaction) bool { // underpricedFor checks whether a transaction is cheaper than (or as cheap as) the // lowest priced (remote) transaction in the given heap. -func (l *txPricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { +func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { // Discard stale price points if found at the heap start for len(h.list) > 0 { head := h.list[0] if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) heap.Pop(h) continue } @@ -570,16 +594,17 @@ func (l *txPricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool // Discard finds a number of most underpriced transactions, removes them from the // priced list and returns them for further removal from the entire pool. +// If noPending is set to true, we will only consider the floating list // // Note local transaction won't be considered for eviction. -func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) { +func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop for slots > 0 { if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio || floatingRatio == 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, move to floating heap @@ -592,7 +617,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) // Discard stale transactions if found during cleanup tx := heap.Pop(&l.floating).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated - atomic.AddInt64(&l.stales, -1) + l.stales.Add(-1) continue } // Non stale transaction found, discard it @@ -611,11 +636,11 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) } // Reheap forcibly rebuilds the heap based on the current remote transaction set. -func (l *txPricedList) Reheap() { +func (l *pricedList) Reheap() { l.reheapMu.Lock() defer l.reheapMu.Unlock() start := time.Now() - atomic.StoreInt64(&l.stales, 0) + l.stales.Store(0) l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount()) l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { l.urgent.list = append(l.urgent.list, tx) @@ -639,7 +664,7 @@ func (l *txPricedList) Reheap() { // SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not // necessary to call right before SetBaseFee when processing a new block. -func (l *txPricedList) SetBaseFee(baseFee *big.Int) { +func (l *pricedList) SetBaseFee(baseFee *big.Int) { l.urgent.baseFee = baseFee l.Reheap() } diff --git a/coreth/core/tx_list_test.go b/coreth/core/txpool/list_test.go similarity index 86% rename from coreth/core/tx_list_test.go rename to coreth/core/txpool/list_test.go index ecfa9154..8c2a3e05 100644 --- a/coreth/core/tx_list_test.go +++ b/coreth/core/txpool/list_test.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "math/big" @@ -37,7 +37,7 @@ import ( // Tests that transactions can be added to strict lists and list contents and // nonce boundaries are correctly maintained. -func TestStrictTxListAdd(t *testing.T) { +func TestStrictListAdd(t *testing.T) { // Generate a list of transactions to insert key, _ := crypto.GenerateKey() @@ -46,9 +46,9 @@ func TestStrictTxListAdd(t *testing.T) { txs[i] = transaction(uint64(i), 0, key) } // Insert the transactions in a random order - list := newTxList(true) + list := newList(true) for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultTxPoolConfig.PriceBump) + list.Add(txs[v], DefaultConfig.PriceBump) } // Verify internal state if len(list.txs.items) != len(txs) { @@ -61,7 +61,7 @@ func TestStrictTxListAdd(t *testing.T) { } } -func BenchmarkTxListAdd(b *testing.B) { +func BenchmarkListAdd(b *testing.B) { // Generate a list of transactions to insert key, _ := crypto.GenerateKey() @@ -70,13 +70,13 @@ func BenchmarkTxListAdd(b *testing.B) { txs[i] = transaction(uint64(i), 0, key) } // Insert the transactions in a random order - priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit)) + priceLimit := big.NewInt(int64(DefaultConfig.PriceLimit)) b.ResetTimer() for i := 0; i < b.N; i++ { - list := newTxList(true) + list := newList(true) for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultTxPoolConfig.PriceBump) - list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) + list.Add(txs[v], DefaultConfig.PriceBump) + list.Filter(priceLimit, DefaultConfig.PriceBump) } } } diff --git a/coreth/core/tx_noncer.go b/coreth/core/txpool/noncer.go similarity index 78% rename from coreth/core/tx_noncer.go rename to coreth/core/txpool/noncer.go index 948453cf..828717e2 100644 --- a/coreth/core/tx_noncer.go +++ b/coreth/core/txpool/noncer.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "sync" @@ -33,18 +33,18 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// txNoncer is a tiny virtual state database to manage the executable nonces of +// noncer is a tiny virtual state database to manage the executable nonces of // accounts in the pool, falling back to reading from a real state database if // an account is unknown. -type txNoncer struct { +type noncer struct { fallback *state.StateDB nonces map[common.Address]uint64 lock sync.Mutex } -// newTxNoncer creates a new virtual state database to track the pool nonces. -func newTxNoncer(statedb *state.StateDB) *txNoncer { - return &txNoncer{ +// newNoncer creates a new virtual state database to track the pool nonces. +func newNoncer(statedb *state.StateDB) *noncer { + return &noncer{ fallback: statedb.Copy(), nonces: make(map[common.Address]uint64), } @@ -52,21 +52,23 @@ func newTxNoncer(statedb *state.StateDB) *txNoncer { // get returns the current nonce of an account, falling back to a real state // database if the account is unknown. -func (txn *txNoncer) get(addr common.Address) uint64 { +func (txn *noncer) get(addr common.Address) uint64 { // We use mutex for get operation is the underlying // state will mutate db even for read access. txn.lock.Lock() defer txn.lock.Unlock() if _, ok := txn.nonces[addr]; !ok { - txn.nonces[addr] = txn.fallback.GetNonce(addr) + if nonce := txn.fallback.GetNonce(addr); nonce != 0 { + txn.nonces[addr] = nonce + } } return txn.nonces[addr] } // set inserts a new virtual nonce into the virtual state database to be returned // whenever the pool requests it instead of reaching into the real state database. -func (txn *txNoncer) set(addr common.Address, nonce uint64) { +func (txn *noncer) set(addr common.Address, nonce uint64) { txn.lock.Lock() defer txn.lock.Unlock() @@ -75,12 +77,14 @@ func (txn *txNoncer) set(addr common.Address, nonce uint64) { // setIfLower updates a new virtual nonce into the virtual state database if the // new one is lower. -func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { +func (txn *noncer) setIfLower(addr common.Address, nonce uint64) { txn.lock.Lock() defer txn.lock.Unlock() if _, ok := txn.nonces[addr]; !ok { - txn.nonces[addr] = txn.fallback.GetNonce(addr) + if nonce := txn.fallback.GetNonce(addr); nonce != 0 { + txn.nonces[addr] = nonce + } } if txn.nonces[addr] <= nonce { return @@ -89,7 +93,7 @@ func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { } // setAll sets the nonces for all accounts to the given map. -func (txn *txNoncer) setAll(all map[common.Address]uint64) { +func (txn *noncer) setAll(all map[common.Address]uint64) { txn.lock.Lock() defer txn.lock.Unlock() diff --git a/coreth/core/tx_pool.go b/coreth/core/txpool/txpool.go similarity index 84% rename from coreth/core/tx_pool.go rename to coreth/core/txpool/txpool.go index 9c58b545..479967ae 100644 --- a/coreth/core/tx_pool.go +++ b/coreth/core/txpool/txpool.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "errors" @@ -37,10 +37,12 @@ import ( "time" "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" @@ -79,7 +81,7 @@ var ( // configured for the transaction pool. ErrUnderpriced = errors.New("transaction underpriced") - // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet + // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept // another remote transaction. ErrTxPoolOverflow = errors.New("txpool is full") @@ -99,6 +101,14 @@ var ( // than some meaningful limit a user might use. This is not a consensus error // making the transaction invalid, rather a DOS protection. ErrOversizedData = errors.New("oversized data") + + // ErrFutureReplacePending is returned if a future transaction replaces a pending + // transaction. Future transactions should only be able to replace other future transactions. + ErrFutureReplacePending = errors.New("future transaction tries to replace pending") + + // ErrOverdraft is returned if a transaction would cause the senders balance to go negative + // thus invalidating a potential large number of transactions. + ErrOverdraft = errors.New("transaction would cause overdraft") ) var ( @@ -127,6 +137,7 @@ var ( invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) + // throttleTxMeter counts how many transactions are rejected due to too-many-changes between // txpool reorgs. throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) @@ -156,16 +167,16 @@ const ( // blockChain provides the state of blockchain and current gas limit to do // some pre checks in tx pool and event subscribers. type blockChain interface { - CurrentBlock() *types.Block + CurrentBlock() *types.Header GetBlock(hash common.Hash, number uint64) *types.Block StateAt(root common.Hash) (*state.StateDB, error) - SenderCacher() *TxSenderCacher + SenderCacher() *core.TxSenderCacher - SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription + SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription } -// TxPoolConfig are the configuration parameters of the transaction pool. -type TxPoolConfig struct { +// Config are the configuration parameters of the transaction pool. +type Config struct { Locals []common.Address // Addresses that should be treated by default as local NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts @@ -182,9 +193,9 @@ type TxPoolConfig struct { Lifetime time.Duration // Maximum amount of time non-executable transaction are queued } -// DefaultTxPoolConfig contains the default configurations for the transaction +// DefaultConfig contains the default configurations for the transaction // pool. -var DefaultTxPoolConfig = TxPoolConfig{ +var DefaultConfig = Config{ Journal: "transactions.rlp", Rejournal: time.Hour, @@ -201,39 +212,39 @@ var DefaultTxPoolConfig = TxPoolConfig{ // sanitize checks the provided user configurations and changes anything that's // unreasonable or unworkable. -func (config *TxPoolConfig) sanitize() TxPoolConfig { +func (config *Config) sanitize() Config { conf := *config if conf.Rejournal < time.Second { log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) conf.Rejournal = time.Second } if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) - conf.PriceLimit = DefaultTxPoolConfig.PriceLimit + log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) + conf.PriceLimit = DefaultConfig.PriceLimit } if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) - conf.PriceBump = DefaultTxPoolConfig.PriceBump + log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) + conf.PriceBump = DefaultConfig.PriceBump } if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) - conf.AccountSlots = DefaultTxPoolConfig.AccountSlots + log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) + conf.AccountSlots = DefaultConfig.AccountSlots } if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) - conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots + log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) + conf.GlobalSlots = DefaultConfig.GlobalSlots } if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) - conf.AccountQueue = DefaultTxPoolConfig.AccountQueue + log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) + conf.AccountQueue = DefaultConfig.AccountQueue } if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) - conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue + log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) + conf.GlobalQueue = DefaultConfig.GlobalQueue } if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) - conf.Lifetime = DefaultTxPoolConfig.Lifetime + log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) + conf.Lifetime = DefaultConfig.Lifetime } return conf } @@ -246,7 +257,7 @@ func (config *TxPoolConfig) sanitize() TxPoolConfig { // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { - config TxPoolConfig + config Config chainconfig *params.ChainConfig chain blockChain gasPrice *big.Int @@ -258,10 +269,10 @@ type TxPool struct { signer types.Signer mu sync.RWMutex - istanbul bool // Fork indicator whether we are in the istanbul stage. - eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. - eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. - eip3860 bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum) + rules atomic.Pointer[params.Rules] // Rules for the currentHead + eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions. + eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions. + eip3860 atomic.Bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum) currentHead *types.Header // [currentState] is the state of the blockchain head. It is reset whenever @@ -271,19 +282,19 @@ type TxPool struct { // and balances during reorgs and gossip handling. currentStateLock sync.Mutex - pendingNonces *txNoncer // Pending state tracking virtual nonces - currentMaxGas uint64 // Current gas limit for transaction caps + pendingNonces *noncer // Pending state tracking virtual nonces + currentMaxGas atomic.Uint64 // Current gas limit for transaction caps locals *accountSet // Set of local transaction to exempt from eviction rules - journal *txJournal // Journal of local transaction to back up to disk + journal *journal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions + pending map[common.Address]*list // All currently processable transactions + queue map[common.Address]*list // Queued but non-processable transactions beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + all *lookup // All transactions to allow lookups + priced *pricedList // All transactions sorted by price - chainHeadCh chan ChainHeadEvent + chainHeadCh chan core.ChainHeadEvent chainHeadSub event.Subscription reqResetCh chan *txpoolResetRequest reqPromoteCh chan *accountSet @@ -304,7 +315,7 @@ type txpoolResetRequest struct { // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { +func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() @@ -314,11 +325,11 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block chainconfig: chainconfig, chain: chain, signer: types.LatestSigner(chainconfig), - pending: make(map[common.Address]*txList), - queue: make(map[common.Address]*txList), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), beats: make(map[common.Address]time.Time), - all: newTxLookup(), - chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), + all: newLookup(), + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), queueTxEventCh: make(chan *types.Transaction), @@ -333,8 +344,8 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block log.Info("Setting new local account", "address", addr) pool.locals.add(addr) } - pool.priced = newTxPricedList(pool.all) - pool.reset(nil, chain.CurrentBlock().Header()) + pool.priced = newPricedList(pool.all) + pool.reset(nil, chain.CurrentBlock()) // Start the reorg loop early so it can handle requests generated during journal loading. pool.wg.Add(1) @@ -388,9 +399,9 @@ func (pool *TxPool) loop() { // Handle ChainHeadEvent case ev := <-pool.chainHeadCh: if ev.Block != nil { - pool.requestReset(head.Header(), ev.Block.Header()) - head = ev.Block - pool.headFeed.Send(NewTxPoolHeadEvent{Block: head}) + pool.requestReset(head, ev.Block.Header()) + head = ev.Block.Header() + pool.headFeed.Send(core.NewTxPoolHeadEvent{Head: head}) } // System shutdown. @@ -403,7 +414,7 @@ func (pool *TxPool) loop() { pool.mu.RLock() pending, queued := pool.stats() pool.mu.RUnlock() - stales := int(atomic.LoadInt64(&pool.priced.stales)) + stales := int(pool.priced.stales.Load()) if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) @@ -460,19 +471,19 @@ func (pool *TxPool) Stop() { // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // starts sending event to the given channel. -func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { +func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return pool.scope.Track(pool.txFeed.Subscribe(ch)) } // SubscribeNewHeadEvent registers a subscription of NewHeadEvent and // starts sending event to the given channel. -func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- NewTxPoolHeadEvent) event.Subscription { +func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- core.NewTxPoolHeadEvent) event.Subscription { return pool.scope.Track(pool.headFeed.Subscribe(ch)) } // SubscribeNewReorgEvent registers a subscription of NewReorgEvent and // starts sending event to the given channel. -func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- NewTxPoolReorgEvent) event.Subscription { +func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription { return pool.scope.Track(pool.reorgFeed.Subscribe(ch)) } @@ -550,11 +561,11 @@ func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common pool.mu.Lock() defer pool.mu.Unlock() - pending := make(map[common.Address]types.Transactions) + pending := make(map[common.Address]types.Transactions, len(pool.pending)) for addr, list := range pool.pending { pending[addr] = list.Flatten() } - queued := make(map[common.Address]types.Transactions) + queued := make(map[common.Address]types.Transactions, len(pool.queue)) for addr, list := range pool.queue { queued[addr] = list.Flatten() } @@ -586,17 +597,26 @@ func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types. // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { + return pool.PendingWithBaseFee(enforceTips, nil) +} + +// If baseFee is nil, then pool.priced.urgent.baseFee is used. +func (pool *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address]types.Transactions { pool.mu.Lock() defer pool.mu.Unlock() - pending := make(map[common.Address]types.Transactions) + if baseFee == nil { + baseFee = pool.priced.urgent.baseFee + } + + pending := make(map[common.Address]types.Transactions, len(pool.pending)) for addr, list := range pool.pending { txs := list.Flatten() // If the miner requests tip enforcement, cap the lists now if enforceTips && !pool.locals.contains(addr) { for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { + if tx.EffectiveGasTipIntCmp(pool.gasPrice, baseFee) < 0 { txs = txs[:i] break } @@ -610,8 +630,12 @@ func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transacti } // PendingSize returns the number of pending txs in the tx pool. -func (pool *TxPool) PendingSize() int { - pending := pool.Pending(true) +// +// The enforceTips parameter can be used to do an extra filtering on the pending +// transactions and only return those whose **effective** tip is large enough in +// the next pending execution environment. +func (pool *TxPool) PendingSize(enforceTips bool) int { + pending := pool.Pending(enforceTips) count := 0 for _, txs := range pending { count += len(txs) @@ -619,6 +643,21 @@ func (pool *TxPool) PendingSize() int { return count } +// IteratePending iterates over [pool.pending] until [f] returns false. +// The caller must not modify [tx]. +func (pool *TxPool) IteratePending(f func(tx *types.Transaction) bool) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + for _, list := range pool.pending { + for _, tx := range list.txs.items { + if !f(tx) { + return + } + } + } +} + // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() @@ -648,37 +687,59 @@ func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) err pool.currentStateLock.Lock() defer pool.currentStateLock.Unlock() - // cost == V + GP * GL - if balance, cost := pool.currentState.GetBalance(from), tx.Cost(); balance.Cmp(cost) < 0 { - return fmt.Errorf("%w: address %s have (%d) want (%d)", ErrInsufficientFunds, from.Hex(), balance, cost) - } - txNonce := tx.Nonce() // Ensure the transaction adheres to nonce ordering if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce { return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)", - ErrNonceTooLow, from.Hex(), currentNonce, txNonce) + core.ErrNonceTooLow, from.Hex(), currentNonce, txNonce) + } + + // cost == V + GP * GL + balance := pool.currentState.GetBalance(from) + if balance.Cmp(tx.Cost()) < 0 { + return fmt.Errorf("%w: address %s have (%d) want (%d)", core.ErrInsufficientFunds, from.Hex(), balance, tx.Cost()) + } + + // Verify that replacing transactions will not result in overdraft + list := pool.pending[from] + if list != nil { // Sender already has pending txs + sum := new(big.Int).Add(tx.Cost(), list.totalcost) + if repl := list.txs.Get(tx.Nonce()); repl != nil { + // Deduct the cost of a transaction replaced by this + sum.Sub(sum, repl.Cost()) + } + if balance.Cmp(sum) < 0 { + log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) + return ErrOverdraft + } } + return nil } -// validateTx checks whether a transaction is valid according to the consensus -// rules and adheres to some heuristic limits of the local node (price and size). -func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { +// validateTxBasics checks whether a transaction is valid according to the consensus +// rules, but does not check state-dependent validation such as sufficient balance. +// This check is meant as an early check which only needs to be performed once, +// and does not require the pool mutex to be held. +func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error { // Accept only legacy transactions until EIP-2718/2930 activates. - if !pool.eip2718 && tx.Type() != types.LegacyTxType { - return ErrTxTypeNotSupported + if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType { + return core.ErrTxTypeNotSupported } // Reject dynamic fee transactions until EIP-1559 activates. - if !pool.eip1559 && tx.Type() == types.DynamicFeeTxType { - return ErrTxTypeNotSupported + if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType { + return core.ErrTxTypeNotSupported + } + // Reject blob transactions forever, those will have their own pool. + if tx.Type() == types.BlobTxType { + return core.ErrTxTypeNotSupported } // Reject transactions over defined size to prevent DOS attacks - if txSize := uint64(tx.Size()); txSize > txMaxSize { - return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize) + if tx.Size() > txMaxSize { + return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, tx.Size(), txMaxSize) } // Check whether the init code size has been exceeded. - if pool.eip3860 && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + if pool.eip3860.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) } // Transactions can't be negative. This may never happen using RLP decoded @@ -687,19 +748,24 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { return ErrNegativeValue } // Ensure the transaction doesn't exceed the current block limit gas. - if txGas := tx.Gas(); pool.currentMaxGas < txGas { - return fmt.Errorf("%w: tx gas (%d) > current max gas (%d)", ErrGasLimit, txGas, pool.currentMaxGas) + if txGas := tx.Gas(); pool.currentMaxGas.Load() < txGas { + return fmt.Errorf( + "%w: tx gas (%d) > current max gas (%d)", + ErrGasLimit, + txGas, + pool.currentMaxGas.Load(), + ) } // Sanity check for extremely large numbers if tx.GasFeeCap().BitLen() > 256 { - return ErrFeeCapVeryHigh + return core.ErrFeeCapVeryHigh } if tx.GasTipCap().BitLen() > 256 { - return ErrTipVeryHigh + return core.ErrTipVeryHigh } // Ensure gasFeeCap is greater than or equal to gasTipCap. if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - return ErrTipAboveFeeCap + return core.ErrTipAboveFeeCap } // Make sure the transaction is signed properly. from, err := types.Sender(pool.signer, tx) @@ -710,31 +776,32 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice) } + // Ensure the transaction has more gas than the basic tx fee. + intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, *pool.rules.Load()) + if err != nil { + return err + } + if txGas := tx.Gas(); txGas < intrGas { + return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) + } + return nil +} + +// validateTx checks whether a transaction is valid according to the consensus +// rules and adheres to some heuristic limits of the local node (price and size). +func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { + // Signature has been checked already, this cannot error. + from, _ := types.Sender(pool.signer, tx) // Drop the transaction if the gas fee cap is below the pool's minimum fee if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 { return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee) } // Ensure the transaction adheres to nonce ordering - if err := pool.checkTxState(from, tx); err != nil { - return err - } // Transactor should have enough funds to cover the costs - - // Flare and Songbird specific checks - err = validateTxData(tx) - if err != nil { - return err - } - - // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.eip3860) - if err != nil { + if err := pool.checkTxState(from, tx); err != nil { return err } - if txGas := tx.Gas(); txGas < intrGas { - return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) - } return nil } @@ -763,6 +830,10 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e invalidTxMeter.Mark(1) return false, err } + + // already validated by this point + from, _ := types.Sender(pool.signer, tx) + // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it @@ -771,6 +842,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e underpricedTxMeter.Mark(1) return false, ErrUnderpriced } + // We're about to replace a transaction. The reorg does a more thorough // analysis of what to remove and how, but it runs async. We don't want to // do too many replacements between reorg-runs, so we cap the number of @@ -791,18 +863,38 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e overflowedTxMeter.Mark(1) return false, ErrTxPoolOverflow } - // Bump the counter of rejections-since-reorg - pool.changesSinceReorg += len(drop) + + // If the new transaction is a future transaction it should never churn pending transactions + if !isLocal && pool.isGapped(from, tx) { + var replacesPending bool + for _, dropTx := range drop { + dropSender, _ := types.Sender(pool.signer, dropTx) + if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { + replacesPending = true + break + } + } + // Add all transactions back to the priced queue + if replacesPending { + for _, dropTx := range drop { + pool.priced.Put(dropTx, false) + } + log.Trace("Discarding future transaction replacing pending tx", "hash", hash) + return false, ErrFutureReplacePending + } + } + // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) underpricedTxMeter.Mark(1) - pool.removeTx(tx.Hash(), false) + dropped := pool.removeTx(tx.Hash(), false) + pool.changesSinceReorg += dropped } } + // Try to replace an existing transaction in the pending pool - from, _ := types.Sender(pool.signer, tx) // already validated - if list := pool.pending[from]; list != nil && list.Overlaps(tx) { + if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { @@ -845,6 +937,28 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e return replaced, nil } +// isGapped reports whether the given transaction is immediately executable. +func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool { + // Short circuit if transaction matches pending nonce and can be promoted + // to pending list as an executable transaction. + next := pool.pendingNonces.get(from) + if tx.Nonce() == next { + return false + } + // The transaction has a nonce gap with pending list, it's only considered + // as executable if transactions in queue can fill up the nonce gap. + queue, ok := pool.queue[from] + if !ok { + return true + } + for nonce := next; nonce < tx.Nonce(); nonce++ { + if !queue.Contains(nonce) { + return true // txs in queue can't fill up the nonce gap + } + } + return false +} + // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! @@ -852,7 +966,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local boo // Try to insert the transaction into the future queue from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { - pool.queue[from] = newTxList(false) + pool.queue[from] = newList(false) } inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { @@ -904,7 +1018,7 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { - pool.pending[addr] = newTxList(true) + pool.pending[addr] = newList(true) } list := pool.pending[addr] @@ -943,7 +1057,7 @@ func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { } // AddLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper aroundd AddLocals. +// a convenience wrapper around AddLocals. func (pool *TxPool) AddLocal(tx *types.Transaction) error { errs := pool.AddLocals([]*types.Transaction{tx}) return errs[0] @@ -958,7 +1072,7 @@ func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { return pool.addTxs(txs, false, false) } -// This is like AddRemotes, but waits for pool reorganization. Tests use this method. +// AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method. func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { return pool.addTxs(txs, false, true) } @@ -992,12 +1106,12 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { knownTxMeter.Mark(1) continue } - // Exclude transactions with invalid signatures as soon as - // possible and cache senders in transactions before - // obtaining lock - _, err := types.Sender(pool.signer, tx) - if err != nil { - errs[i] = ErrInvalidSender + // Exclude transactions with basic errors, e.g invalid signatures and + // insufficient intrinsic gas as soon as possible and cache senders + // in transactions before obtaining lock + + if err := pool.validateTxBasics(tx, local); err != nil { + errs[i] = err invalidTxMeter.Mark(1) continue } @@ -1013,7 +1127,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { newErrs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() - nilSlot := 0 + var nilSlot = 0 for _, err := range newErrs { for errs[nilSlot] != nil { nilSlot++ @@ -1085,6 +1199,8 @@ func (pool *TxPool) HasLocal(hash common.Hash) bool { return pool.all.GetLocal(hash) != nil } +// RemoveTx removes a single transaction from the queue, moving all subsequent +// transactions back to the future queue. func (pool *TxPool) RemoveTx(hash common.Hash) { pool.mu.Lock() defer pool.mu.Unlock() @@ -1094,11 +1210,12 @@ func (pool *TxPool) RemoveTx(hash common.Hash) { // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. -func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { +// Returns the number of transactions removed from the pending queue. +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { - return + return 0 } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion @@ -1126,7 +1243,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { pool.pendingNonces.setIfLower(addr, tx.Nonce()) // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) - return + return 1 + len(invalids) } } // Transaction is in the future queue @@ -1140,6 +1257,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { delete(pool.beats, addr) } } + return 0 } // requestReset requests a pool reset to the new head block. @@ -1184,7 +1302,7 @@ func (pool *TxPool) scheduleReorgLoop() { launchNextRun bool reset *txpoolResetRequest dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*txSortedMap) + queuedEvents = make(map[common.Address]*sortedMap) ) for { // Launch next background reorg if needed @@ -1197,7 +1315,7 @@ func (pool *TxPool) scheduleReorgLoop() { launchNextRun = false reset, dirtyAccounts = nil, nil - queuedEvents = make(map[common.Address]*txSortedMap) + queuedEvents = make(map[common.Address]*sortedMap) } select { @@ -1226,7 +1344,7 @@ func (pool *TxPool) scheduleReorgLoop() { // request one later if they want the events sent. addr, _ := types.Sender(pool.signer, tx) if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newTxSortedMap() + queuedEvents[addr] = newSortedMap() } queuedEvents[addr].Put(tx) @@ -1245,7 +1363,7 @@ func (pool *TxPool) scheduleReorgLoop() { } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { +func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { defer func(t0 time.Time) { reorgDurationTimer.Update(time.Since(t0)) }(time.Now()) @@ -1284,7 +1402,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt // because of another transaction (e.g. higher gas price). if reset != nil { pool.demoteUnexecutables() - if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(new(big.Int).SetUint64(reset.newHead.Time)) { + if reset.newHead != nil && pool.chainconfig.IsApricotPhase3(reset.newHead.Time) { _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, reset.newHead, uint64(time.Now().Unix())) if err == nil { pool.priced.SetBaseFee(baseFeeEstimate) @@ -1308,14 +1426,14 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt pool.mu.Unlock() if reset != nil && reset.newHead != nil { - pool.reorgFeed.Send(NewTxPoolReorgEvent{reset.newHead}) + pool.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: reset.newHead}) } // Notify subsystems for newly added transactions for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) if _, ok := events[addr]; !ok { - events[addr] = newTxSortedMap() + events[addr] = newSortedMap() } events[addr].Put(tx) } @@ -1324,7 +1442,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt for _, set := range events { txs = append(txs, set.Flatten()...) } - pool.txFeed.Send(NewTxsEvent{txs}) + pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) } } @@ -1351,7 +1469,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { if rem == nil { // This can happen if a setHead is performed, where we simply discard the old // head from the chain. - // If that is the case, we don't have the lost transactions any more, and + // If that is the case, we don't have the lost transactions anymore, and // there's nothing to add if newNum >= oldNum { // If we reorged to a same or higher number, then it's not a case of setHead @@ -1396,7 +1514,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { } // Initialize the internal state to the current head if newHead == nil { - newHead = pool.chain.CurrentBlock().Header() // Special case during testing + newHead = pool.chain.CurrentBlock() // Special case during testing } statedb, err := pool.chain.StateAt(newHead.Root) if err != nil { @@ -1407,8 +1525,8 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { pool.currentStateLock.Lock() pool.currentState = statedb pool.currentStateLock.Unlock() - pool.pendingNonces = newTxNoncer(statedb) - pool.currentMaxGas = newHead.GasLimit + pool.pendingNonces = newNoncer(statedb) + pool.currentMaxGas.Store(newHead.GasLimit) // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) @@ -1417,12 +1535,12 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // Update all fork indicator by next pending block number. next := new(big.Int).Add(newHead.Number, big.NewInt(1)) - pool.istanbul = pool.chainconfig.IsIstanbul(next) + rules := pool.chainconfig.AvalancheRules(next, newHead.Time) - timestamp := new(big.Int).SetUint64(newHead.Time) - pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp) - pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp) - pool.eip3860 = pool.chainconfig.IsDUpgrade(timestamp) + pool.rules.Store(&rules) + pool.eip2718.Store(rules.IsApricotPhase2) + pool.eip1559.Store(rules.IsApricotPhase3) + pool.eip3860.Store(rules.IsDurango) } // promoteExecutables moves transactions that have become processable from the @@ -1449,7 +1567,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans } log.Trace("Removed old queued transactions", "count", len(forwards)) // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) for _, tx := range drops { hash := tx.Hash() pool.all.Remove(hash) @@ -1508,7 +1626,7 @@ func (pool *TxPool) truncatePending() { pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first - spammers := prque.New(nil) + spammers := prque.New[int64, common.Address](nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { @@ -1520,12 +1638,12 @@ func (pool *TxPool) truncatePending() { for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() - offenders = append(offenders, offender.(common.Address)) + offenders = append(offenders, offender) // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender.(common.Address)].Len() + threshold := pool.pending[offender].Len() // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { @@ -1649,7 +1767,7 @@ func (pool *TxPool) demoteUnexecutables() { log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) + drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) @@ -1678,7 +1796,6 @@ func (pool *TxPool) demoteUnexecutables() { // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) } - // This might happen in a reorg, so log it to the metering pendingGauge.Dec(int64(len(gapped))) } // Delete the entire pending entry if it became empty. @@ -1695,7 +1812,7 @@ func (pool *TxPool) startPeriodicFeeUpdate() { // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay // when starting up in ApricotPhase3 before the base fee is updated. - if time.Now().After(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0)) { + if time.Now().After(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp)) { pool.updateBaseFee() } @@ -1708,7 +1825,7 @@ func (pool *TxPool) periodicBaseFeeUpdate() { // Sleep until its time to start the periodic base fee update or the tx pool is shutting down select { - case <-time.After(time.Until(time.Unix(pool.chainconfig.ApricotPhase3BlockTimestamp.Int64(), 0))): + case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.ApricotPhase3BlockTimestamp))): case <-pool.generalShutdownChan: return // Return early if shutting down } @@ -1737,79 +1854,6 @@ func (pool *TxPool) updateBaseFee() { } } -// Flare and Songbird specific checks -func validateTxData(tx *types.Transaction) error { - to := tx.To() - if to == nil { - return nil - } - - // Check if transaction is being sent to the prioritized submitter contract - if *to == prioritisedSubmitterContractAddress { - isValidDaemon := checkDataPrefix(tx.Data(), submitterDataPrefixes) - additionalAllowedMethodIdentifiers := [][4]byte{ - {0x67, 0xfc, 0x40, 0x29}, // cancelGovernanceCall - {0x5f, 0xf2, 0x70, 0x79}, // executeGovernanceCall - {0xf8, 0xae, 0x8a, 0x2f}, // initNewVotingRound - {0xef, 0x88, 0xbf, 0x13}, // initialise - {0x94, 0x18, 0x77, 0xd0}, // setSubmit3MethodEnabled - {0x9e, 0xe7, 0xfe, 0x4d}, // setSubmitAndPassData - {0x6c, 0x53, 0x2f, 0xae}, // submit1 - {0x9d, 0x00, 0xc9, 0xfd}, // submit2 - {0xe1, 0xb1, 0x57, 0xe7}, // submit3 - {0x83, 0x3b, 0xf6, 0xc0}, // submitAndPass - {0x57, 0xee, 0xd5, 0x80}, // submitSignatures - {0xf5, 0xa9, 0x83, 0x83}, // switchToProductionMode - {0xb0, 0x0c, 0x0b, 0x76}, // updateContractAddresses - } - isValidAdditional := checkDataPrefix(tx.Data(), additionalAllowedMethodIdentifiers) - if !(isValidDaemon || isValidAdditional) { - return fmt.Errorf("invalid transaction data on prioritised submitter contract") - } - } - // Check if transaction is on allow list for FTSO v1 contract - if *to == prioritisedFTSOContractAddress && (tx.ChainId().Cmp(params.FlareChainID) == 0 || tx.ChainId().Cmp(params.CostwoChainID) == 0 || tx.ChainId().Cmp(params.LocalFlareChainID) == 0) { - isValidDaemon := checkDataPrefix(tx.Data(), prioritisedFTSOContractDataPrefixesFlareNetworks) - additionalAllowedMethodIdentifiers := [][4]byte{ - {0x67, 0xfc, 0x40, 0x29}, // cancelGovernanceCall - {0x5f, 0xf2, 0x70, 0x79}, // executeGovernanceCall - {0xc9, 0xf9, 0x60, 0xeb}, // initialiseFixedAddress - {0xe2, 0xdb, 0x5a, 0x52}, // revealPrices - {0xae, 0xa3, 0x6b, 0x53}, // setAddressUpdater - {0x9e, 0xc2, 0xb5, 0x81}, // setTrustedAddresses - {0x8f, 0xc6, 0xf6, 0x67}, // submitHash - {0xf5, 0xa9, 0x83, 0x83}, // switchToProductionMode - {0xb0, 0x0c, 0x0b, 0x76}, // updateContractAddresses - {0x9d, 0x98, 0x6f, 0x91}, // voterWhitelisted - {0x76, 0x79, 0x4e, 0xfb}, // votersRemovedFromWhitelist - } - isValidAdditional := checkDataPrefix(tx.Data(), additionalAllowedMethodIdentifiers) - if !(isValidDaemon || isValidAdditional) { - return fmt.Errorf("invalid transaction data on prioritised FTSO contract Flare") - } - } - if *to == prioritisedFTSOContractAddress && (tx.ChainId().Cmp(params.SongbirdChainID) == 0 || tx.ChainId().Cmp(params.CostonChainID) == 0 || tx.ChainId().Cmp(params.LocalChainID) == 0) { - isValidDaemon := checkDataPrefix(tx.Data(), prioritisedFTSOContractDataPrefixesSongbirdNetworks) - additionalAllowedMethodIdentifiers := [][4]byte{ - {0x5d, 0x36, 0xb1, 0x90}, // claimGovernance - {0xc9, 0xf9, 0x60, 0xeb}, // initialiseFixedAddress - {0xc3, 0x73, 0xa0, 0x8e}, // proposeGovernance - {0x60, 0x84, 0x8b, 0x44}, // revealPrices - {0x8a, 0xb6, 0x33, 0x80}, // setContractAddresses - {0x9e, 0xc2, 0xb5, 0x81}, // setTrustedAddresses - {0xc5, 0xad, 0xc5, 0x39}, // submitPriceHashes - {0xd3, 0x8b, 0xff, 0xf4}, // transferGovernance - {0x9d, 0x98, 0x6f, 0x91}, // voterWhitelisted - {0x76, 0x79, 0x4e, 0xfb}, // votersRemovedFromWhitelist - } - isValidAdditional := checkDataPrefix(tx.Data(), additionalAllowedMethodIdentifiers) - if !(isValidDaemon || isValidAdditional) { - return fmt.Errorf("invalid transaction data on prioritised FTSO contract Songbird") - } - } - return nil -} - // addressByHeartbeat is an account address tagged with its last activity timestamp. type addressByHeartbeat struct { address common.Address @@ -1834,7 +1878,7 @@ type accountSet struct { // derivations. func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { as := &accountSet{ - accounts: make(map[common.Address]struct{}), + accounts: make(map[common.Address]struct{}, len(addrs)), signer: signer, } for _, addr := range addrs { @@ -1892,7 +1936,7 @@ func (as *accountSet) merge(other *accountSet) { as.cache = nil } -// txLookup is used internally by TxPool to track transactions while allowing +// lookup is used internally by TxPool to track transactions while allowing // lookup without mutex contention. // // Note, although this type is properly protected against concurrent access, it @@ -1904,16 +1948,16 @@ func (as *accountSet) merge(other *accountSet) { // // This lookup set combines the notion of "local transactions", which is useful // to build upper-level structure. -type txLookup struct { +type lookup struct { slots int lock sync.RWMutex locals map[common.Hash]*types.Transaction remotes map[common.Hash]*types.Transaction } -// newTxLookup returns a new txLookup structure. -func newTxLookup() *txLookup { - return &txLookup{ +// newLookup returns a new lookup structure. +func newLookup() *lookup { + return &lookup{ locals: make(map[common.Hash]*types.Transaction), remotes: make(map[common.Hash]*types.Transaction), } @@ -1922,7 +1966,7 @@ func newTxLookup() *txLookup { // Range calls f on each key and value present in the map. The callback passed // should return the indicator whether the iteration needs to be continued. // Callers need to specify which set (or both) to be iterated. -func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { +func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { t.lock.RLock() defer t.lock.RUnlock() @@ -1943,7 +1987,7 @@ func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local b } // Get returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) Get(hash common.Hash) *types.Transaction { +func (t *lookup) Get(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1954,7 +1998,7 @@ func (t *txLookup) Get(hash common.Hash) *types.Transaction { } // GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { +func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1962,7 +2006,7 @@ func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { } // GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { +func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() @@ -1970,7 +2014,7 @@ func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { } // Count returns the current number of transactions in the lookup. -func (t *txLookup) Count() int { +func (t *lookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() @@ -1978,7 +2022,7 @@ func (t *txLookup) Count() int { } // LocalCount returns the current number of local transactions in the lookup. -func (t *txLookup) LocalCount() int { +func (t *lookup) LocalCount() int { t.lock.RLock() defer t.lock.RUnlock() @@ -1986,7 +2030,7 @@ func (t *txLookup) LocalCount() int { } // RemoteCount returns the current number of remote transactions in the lookup. -func (t *txLookup) RemoteCount() int { +func (t *lookup) RemoteCount() int { t.lock.RLock() defer t.lock.RUnlock() @@ -1994,7 +2038,7 @@ func (t *txLookup) RemoteCount() int { } // Slots returns the current number of slots used in the lookup. -func (t *txLookup) Slots() int { +func (t *lookup) Slots() int { t.lock.RLock() defer t.lock.RUnlock() @@ -2002,7 +2046,7 @@ func (t *txLookup) Slots() int { } // Add adds a transaction to the lookup. -func (t *txLookup) Add(tx *types.Transaction, local bool) { +func (t *lookup) Add(tx *types.Transaction, local bool) { t.lock.Lock() defer t.lock.Unlock() @@ -2017,7 +2061,7 @@ func (t *txLookup) Add(tx *types.Transaction, local bool) { } // Remove removes a transaction from the lookup. -func (t *txLookup) Remove(hash common.Hash) { +func (t *lookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() @@ -2038,7 +2082,7 @@ func (t *txLookup) Remove(hash common.Hash) { // RemoteToLocals migrates the transactions belongs to the given locals to locals // set. The assumption is held the locals set is thread-safe to be used. -func (t *txLookup) RemoteToLocals(locals *accountSet) int { +func (t *lookup) RemoteToLocals(locals *accountSet) int { t.lock.Lock() defer t.lock.Unlock() @@ -2054,7 +2098,7 @@ func (t *txLookup) RemoteToLocals(locals *accountSet) int { } // RemotesBelowTip finds all remote transactions below the given tip threshold. -func (t *txLookup) RemotesBelowTip(threshold *big.Int) types.Transactions { +func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { found := make(types.Transactions, 0, 128) t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { if tx.GasTipCapIntCmp(threshold) < 0 { diff --git a/coreth/core/txpool/txpool2_test.go b/coreth/core/txpool/txpool2_test.go new file mode 100644 index 00000000..330ed60a --- /dev/null +++ b/coreth/core/txpool/txpool2_test.go @@ -0,0 +1,246 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package txpool + +import ( + "crypto/ecdsa" + "math/big" + "testing" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" +) + +func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(value), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + return tx +} + +func count(t *testing.T, pool *TxPool) (pending int, queued int) { + t.Helper() + pending, queued = pool.stats() + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + return pending, queued +} + +func fillPool(t testing.TB, pool *TxPool) { + t.Helper() + // Create a number of test accounts, fund them and make transactions + executableTxs := types.Transactions{} + nonExecutableTxs := types.Transactions{} + for i := 0; i < 384; i++ { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(10000000000)) + // Add executable ones + for j := 0; j < int(pool.config.AccountSlots); j++ { + executableTxs = append(executableTxs, pricedTransaction(uint64(j), 100000, big.NewInt(300), key)) + } + } + // Import the batch and verify that limits have been enforced + pool.AddRemotesSync(executableTxs) + pool.AddRemotesSync(nonExecutableTxs) + pending, queued := pool.Stats() + slots := pool.all.Slots() + // sanity-check that the test prerequisites are ok (pending full) + if have, want := pending, slots; have != want { + t.Fatalf("have %d, want %d", have, want) + } + if have, want := queued, 0; have != want { + t.Fatalf("have %d, want %d", have, want) + } + + t.Logf("pool.config: GlobalSlots=%d, GlobalQueue=%d\n", pool.config.GlobalSlots, pool.config.GlobalQueue) + t.Logf("pending: %d queued: %d, all: %d\n", pending, queued, slots) +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFutureAttack(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + config := testTxPoolConfig + config.GlobalQueue = 100 + config.GlobalSlots = 100 + pool := NewTxPool(config, eip1559Config, blockchain) + defer pool.Stop() + fillPool(t, pool) + pending, _ := pool.Stats() + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key)) + } + for i := 0; i < 5; i++ { + pool.AddRemotesSync(futureTxs) + newPending, newQueued := count(t, pool) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + } + } + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have < want { + t.Errorf("wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch high-priced of non-executables arrive, they do not kick out +// executable transactions +func TestTransactionFuture1559(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() + + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + pending, _ := pool.Stats() + + // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { + futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key)) + } + pool.AddRemotesSync(futureTxs) + } + newPending, _ := pool.Stats() + // Pending should not have been touched + if have, want := newPending, pending; have != want { + t.Errorf("Wrong pending-count, have %d, want %d (GlobalSlots: %d)", + have, want, pool.config.GlobalSlots) + } +} + +// Tests that if a batch of balance-overdraft txs arrive, they do not kick out +// executable transactions +func TestTransactionZAttack(t *testing.T) { + t.Parallel() + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() + // Create a number of test accounts, fund them and make transactions + fillPool(t, pool) + + countInvalidPending := func() int { + t.Helper() + var ivpendingNum int + pendingtxs, _ := pool.Content() + for account, txs := range pendingtxs { + cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account)) + for _, tx := range txs { + if cur_balance.Cmp(tx.Value()) <= 0 { + ivpendingNum++ + } else { + cur_balance.Sub(cur_balance, tx.Value()) + } + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + return ivpendingNum + } + ivPending := countInvalidPending() + t.Logf("invalid pending: %d\n", ivPending) + + // Now, DETER-Z attack starts, let's add a bunch of expensive non-executables (from N accounts) along with balance-overdraft txs (from one account), and see if the pending-count drops + for j := 0; j < int(pool.config.GlobalQueue); j++ { + futureTxs := types.Transactions{} + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key)) + pool.AddRemotesSync(futureTxs) + } + + overDraftTxs := types.Transactions{} + { + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + for j := 0; j < int(pool.config.GlobalSlots); j++ { + overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key)) + } + } + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + + newPending, newQueued := count(t, pool) + newIvPending := countInvalidPending() + t.Logf("pool.all.Slots(): %d\n", pool.all.Slots()) + t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) + t.Logf("invalid pending: %d\n", newIvPending) + + // Pending should not have been touched + if newIvPending != ivPending { + t.Errorf("Wrong invalid pending-count, have %d, want %d (GlobalSlots: %d, queued: %d)", + newIvPending, ivPending, pool.config.GlobalSlots, newQueued) + } +} + +func BenchmarkFutureAttack(b *testing.B) { + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + config := testTxPoolConfig + config.GlobalQueue = 100 + config.GlobalSlots = 100 + pool := NewTxPool(config, eip1559Config, blockchain) + defer pool.Stop() + fillPool(b, pool) + + key, _ := crypto.GenerateKey() + pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) + futureTxs := types.Transactions{} + + for n := 0; n < b.N; n++ { + futureTxs = append(futureTxs, pricedTransaction(1000+uint64(n), 100000, big.NewInt(500), key)) + } + b.ResetTimer() + for i := 0; i < 5; i++ { + pool.AddRemotesSync(futureTxs) + } +} diff --git a/coreth/core/tx_pool_test.go b/coreth/core/txpool/txpool_test.go similarity index 86% rename from coreth/core/tx_pool_test.go rename to coreth/core/txpool/txpool_test.go index a185bc7f..d6baefaa 100644 --- a/coreth/core/tx_pool_test.go +++ b/coreth/core/txpool/txpool_test.go @@ -24,10 +24,11 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package txpool import ( "crypto/ecdsa" + crand "crypto/rand" "errors" "fmt" "math/big" @@ -35,14 +36,17 @@ import ( "os" "strings" "sync" + "sync/atomic" "testing" "time" + "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" @@ -51,35 +55,33 @@ import ( var ( // testTxPoolConfig is a transaction pool configuration without stateful disk // sideeffects used during testing. - testTxPoolConfig TxPoolConfig + testTxPoolConfig Config // eip1559Config is a chain config with EIP-1559 enabled at block 0. eip1559Config *params.ChainConfig ) func init() { - testTxPoolConfig = DefaultTxPoolConfig + testTxPoolConfig = DefaultConfig testTxPoolConfig.Journal = "" cpy := *params.TestChainConfig eip1559Config = &cpy - eip1559Config.ApricotPhase2BlockTimestamp = common.Big0 - eip1559Config.ApricotPhase3BlockTimestamp = common.Big0 + eip1559Config.ApricotPhase2BlockTimestamp = utils.NewUint64(0) + eip1559Config.ApricotPhase3BlockTimestamp = utils.NewUint64(0) } type testBlockChain struct { statedb *state.StateDB - gasLimit uint64 + gasLimit atomic.Uint64 chainHeadFeed *event.Feed lock sync.Mutex } -func newTestBlockchain(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) *testBlockChain { - return &testBlockChain{ - statedb: statedb, - gasLimit: gasLimit, - chainHeadFeed: chainHeadFeed, - } +func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { + bc := testBlockChain{statedb: statedb, chainHeadFeed: chainHeadFeed} + bc.gasLimit.Store(gasLimit) + return &bc } func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) { @@ -87,21 +89,22 @@ func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHe defer bc.lock.Unlock() bc.statedb = statedb - bc.gasLimit = gasLimit + bc.gasLimit.Store(gasLimit) bc.chainHeadFeed = chainHeadFeed } -func (bc *testBlockChain) CurrentBlock() *types.Block { +func (bc *testBlockChain) CurrentBlock() *types.Header { bc.lock.Lock() defer bc.lock.Unlock() - return types.NewBlock(&types.Header{ - GasLimit: bc.gasLimit, - }, nil, nil, nil, trie.NewStackTrie(nil), nil, true) + return &types.Header{ + Number: new(big.Int), + GasLimit: bc.gasLimit.Load(), + } } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return bc.CurrentBlock() + return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { @@ -111,15 +114,16 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { return bc.statedb, nil } -func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription { +func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { bc.lock.Lock() defer bc.lock.Unlock() return bc.chainHeadFeed.Subscribe(ch) } -func (bc *testBlockChain) SenderCacher() *TxSenderCacher { - return newTxSenderCacher(1) +func (bc *testBlockChain) SenderCacher() *core.TxSenderCacher { + // Zero threads avoids starting goroutines. + return core.NewTxSenderCacher(0) } func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { @@ -133,7 +137,7 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ec func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey, bytes uint64) *types.Transaction { data := make([]byte, bytes) - rand.Read(data) + crand.Read(data) tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(0), gaslimit, gasprice, data), types.HomesteadSigner{}, key) return tx @@ -154,13 +158,13 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int, return tx } -func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { - return setupTxPoolWithConfig(params.TestChainConfig) +func setupPool() (*TxPool, *ecdsa.PrivateKey) { + return setupPoolWithConfig(params.TestChainConfig) } -func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 10000000, new(event.Feed)) +func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() pool := NewTxPool(testTxPoolConfig, config, blockchain) @@ -170,8 +174,8 @@ func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateK return pool, key } -// validateTxPoolInternals checks various consistency invariants within the pool. -func validateTxPoolInternals(pool *TxPool) error { +// validatePoolInternals checks various consistency invariants within the pool. +func validatePoolInternals(pool *TxPool) error { pool.mu.RLock() defer pool.mu.RUnlock() @@ -197,13 +201,16 @@ func validateTxPoolInternals(pool *TxPool) error { if nonce := pool.pendingNonces.get(addr); nonce != last+1 { return fmt.Errorf("pending nonce mismatch: have %v, want %v", nonce, last+1) } + if txs.totalcost.Cmp(common.Big0) < 0 { + return fmt.Errorf("totalcost went negative: %v", txs.totalcost) + } } return nil } // validateEvents checks that the correct number of transaction addition events // were fired on the pool's event feed. -func validateEvents(events chan NewTxsEvent, count int) error { +func validateEvents(events chan core.NewTxsEvent, count int) error { var received []*types.Transaction for len(received) < count { @@ -248,7 +255,7 @@ func (c *testChain) State() (*state.StateDB, error) { // a state change between those fetches. stdb := c.statedb if *c.trigger { - c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) // simulate that the new head block included tx0 and tx1 c.statedb.SetNonce(c.address, 2) c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether)) @@ -260,19 +267,19 @@ func (c *testChain) State() (*state.StateDB, error) { // This test simulates a scenario where a new block is imported during a // state reset and tests whether the pending state is in sync with the // block head event that initiated the resetState(). -func TestStateChangeDuringTransactionPoolReset(t *testing.T) { +func TestStateChangeDuringReset(t *testing.T) { t.Parallel() var ( key, _ = crypto.GenerateKey() address = crypto.PubkeyToAddress(key.PublicKey) - statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) trigger = false ) // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{newTestBlockchain(statedb, 1000000000, new(event.Feed)), address, &trigger} + blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) @@ -317,44 +324,45 @@ func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) { func TestInvalidTransactions(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() tx := transaction(0, 100, key) from, _ := deriveSender(tx) + // Intrinsic gas too low testAddBalance(pool, from, big.NewInt(1)) - if err := pool.AddRemote(tx); !errors.Is(err, ErrInsufficientFunds) { - t.Error("expected", ErrInsufficientFunds) + if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) } - balance := new(big.Int).Add(tx.Value(), new(big.Int).Mul(new(big.Int).SetUint64(tx.Gas()), tx.GasPrice())) - testAddBalance(pool, from, balance) - if err := pool.AddRemote(tx); !errors.Is(err, ErrIntrinsicGas) { - t.Error("expected", ErrIntrinsicGas, "got", err) + // Insufficient funds + tx = transaction(0, 100000, key) + if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) } testSetNonce(pool, from, 1) testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) tx = transaction(0, 100000, key) - if err := pool.AddRemote(tx); !errors.Is(err, ErrNonceTooLow) { - t.Error("expected", ErrNonceTooLow) + if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) } tx = transaction(1, 100000, key) pool.gasPrice = big.NewInt(1000) - if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { - t.Error("expected error to contain", ErrUnderpriced, "got", err) + if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) } if err := pool.AddLocal(tx); err != nil { t.Error("expected", nil, "got", err) } } -func TestTransactionQueue(t *testing.T) { +func TestQueue(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() tx := transaction(0, 100, key) @@ -382,10 +390,10 @@ func TestTransactionQueue(t *testing.T) { } } -func TestTransactionQueue2(t *testing.T) { +func TestQueue2(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() tx1 := transaction(0, 100, key) @@ -408,10 +416,10 @@ func TestTransactionQueue2(t *testing.T) { } } -func TestTransactionNegativeValue(t *testing.T) { +func TestNegativeValue(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) @@ -422,48 +430,48 @@ func TestTransactionNegativeValue(t *testing.T) { } } -func TestTransactionTipAboveFeeCap(t *testing.T) { +func TestTipAboveFeeCap(t *testing.T) { t.Parallel() - pool, key := setupTxPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config) defer pool.Stop() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.AddRemote(tx); err != ErrTipAboveFeeCap { - t.Error("expected", ErrTipAboveFeeCap, "got", err) + if err := pool.AddRemote(tx); err != core.ErrTipAboveFeeCap { + t.Error("expected", core.ErrTipAboveFeeCap, "got", err) } } -func TestTransactionVeryHighValues(t *testing.T) { +func TestVeryHighValues(t *testing.T) { t.Parallel() - pool, key := setupTxPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config) defer pool.Stop() veryBigNumber := big.NewInt(1) veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.AddRemote(tx); err != ErrTipVeryHigh { - t.Error("expected", ErrTipVeryHigh, "got", err) + if err := pool.AddRemote(tx); err != core.ErrTipVeryHigh { + t.Error("expected", core.ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.AddRemote(tx2); err != ErrFeeCapVeryHigh { - t.Error("expected", ErrFeeCapVeryHigh, "got", err) + if err := pool.AddRemote(tx2); err != core.ErrFeeCapVeryHigh { + t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) } } -func TestTransactionChainFork(t *testing.T) { +func TestChainFork(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) @@ -484,15 +492,15 @@ func TestTransactionChainFork(t *testing.T) { } } -func TestTransactionDoubleNonce(t *testing.T) { +func TestDoubleNonce(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) @@ -535,10 +543,10 @@ func TestTransactionDoubleNonce(t *testing.T) { } } -func TestTransactionMissingNonce(t *testing.T) { +func TestMissingNonce(t *testing.T) { t.Parallel() - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -558,11 +566,11 @@ func TestTransactionMissingNonce(t *testing.T) { } } -func TestTransactionNonceRecovery(t *testing.T) { +func TestNonceRecovery(t *testing.T) { t.Parallel() const n = 10 - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -584,11 +592,11 @@ func TestTransactionNonceRecovery(t *testing.T) { // Tests that if an account runs out of funds, any pending and queued transactions // are dropped. -func TestTransactionDropping(t *testing.T) { +func TestDropping(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) @@ -689,12 +697,12 @@ func TestTransactionDropping(t *testing.T) { // Tests that if a transaction is dropped from the current pending pool (e.g. out // of fund), all consecutive (still valid, but not executable) transactions are // postponed back into the future queue to prevent broadcasting them. -func TestTransactionPostponing(t *testing.T) { +func TestPostponing(t *testing.T) { t.Parallel() // Create the pool to test the postponing with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -802,18 +810,18 @@ func TestTransactionPostponing(t *testing.T) { // Tests that if the transaction pool has both executable and non-executable // transactions from an origin account, filling the nonce gap moves all queued // ones into the pending pool. -func TestTransactionGapFilling(t *testing.T) { +func TestGapFilling(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -832,7 +840,7 @@ func TestTransactionGapFilling(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Fill the nonce gap and ensure all transactions become pending @@ -849,18 +857,18 @@ func TestTransactionGapFilling(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("gap-filling event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that if the transaction count belonging to a single account goes above // some threshold, the higher transactions are dropped to prevent DOS attacks. -func TestTransactionQueueAccountLimiting(t *testing.T) { +func TestQueueAccountLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) @@ -894,19 +902,19 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { // // This logic should not hold for local transactions, unless the local tracking // mechanism is disabled. -func TestTransactionQueueGlobalLimiting(t *testing.T) { - testTransactionQueueGlobalLimiting(t, false) +func TestQueueGlobalLimiting(t *testing.T) { + testQueueGlobalLimiting(t, false) } -func TestTransactionQueueGlobalLimitingNoLocals(t *testing.T) { - testTransactionQueueGlobalLimiting(t, true) +func TestQueueGlobalLimitingNoLocals(t *testing.T) { + testQueueGlobalLimiting(t, true) } -func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { +func testQueueGlobalLimiting(t *testing.T, nolocals bool) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals @@ -984,21 +992,21 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { // // This logic should not hold for local transactions, unless the local tracking // mechanism is disabled. -func TestTransactionQueueTimeLimiting(t *testing.T) { - testTransactionQueueTimeLimiting(t, false) +func TestQueueTimeLimiting(t *testing.T) { + testQueueTimeLimiting(t, false) } -func TestTransactionQueueTimeLimitingNoLocals(t *testing.T) { - testTransactionQueueTimeLimiting(t, true) +func TestQueueTimeLimitingNoLocals(t *testing.T) { + testQueueTimeLimiting(t, true) } -func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { +func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Reduce the eviction interval to a testable amount defer func(old time.Duration) { evictionInterval = old }(evictionInterval) evictionInterval = time.Millisecond * 100 // Create the pool to test the non-expiration enforcement - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.Lifetime = time.Second @@ -1028,7 +1036,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1043,7 +1051,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1063,7 +1071,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1080,7 +1088,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1110,7 +1118,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1129,7 +1137,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1137,18 +1145,18 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { // Tests that even if the transaction count belonging to a single account goes // above some threshold, as long as the transactions are executable, they are // accepted. -func TestTransactionPendingLimiting(t *testing.T) { +func TestPendingLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1170,7 +1178,7 @@ func TestTransactionPendingLimiting(t *testing.T) { if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil { t.Fatalf("event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1178,12 +1186,12 @@ func TestTransactionPendingLimiting(t *testing.T) { // Tests that if the transaction count belonging to multiple accounts go above // some hard threshold, the higher transactions are dropped to prevent DOS // attacks. -func TestTransactionPendingGlobalLimiting(t *testing.T) { +func TestPendingGlobalLimiting(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 @@ -1218,7 +1226,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { if pending > int(config.GlobalSlots) { t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1226,11 +1234,11 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { // Test the limit on transaction size is enforced correctly. // This test verifies every transaction having allowed size // is added to the pool, and longer transactions are rejected. -func TestTransactionAllowedTxSize(t *testing.T) { +func TestAllowedTxSize(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) @@ -1248,22 +1256,22 @@ func TestTransactionAllowedTxSize(t *testing.T) { // All those fields are summed up to at most 213 bytes. baseSize := uint64(213) dataSize := txMaxSize - baseSize - + maxGas := pool.currentMaxGas.Load() // Try adding a transaction with maximal allowed size - tx := pricedDataTransaction(0, pool.currentMaxGas, big.NewInt(1), key, dataSize) + tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize) if err := pool.addRemoteSync(tx); err != nil { t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) } // Try adding a transaction with random allowed size - if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { t.Fatalf("failed to add transaction of random allowed size: %v", err) } // Try adding a transaction of minimal not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, txMaxSize)); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals @@ -1274,18 +1282,18 @@ func TestTransactionAllowedTxSize(t *testing.T) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that if transactions start being capped, transactions are also removed from 'all' -func TestTransactionCapClearsFromAll(t *testing.T) { +func TestCapClearsFromAll(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.AccountSlots = 2 @@ -1306,7 +1314,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) { } // Import the batch and verify that limits have been enforced pool.AddRemotes(txs) - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1314,12 +1322,12 @@ func TestTransactionCapClearsFromAll(t *testing.T) { // Tests that if the transaction count belonging to multiple accounts go above // some hard threshold, if they are under the minimum guaranteed slot count then // the transactions are still kept. -func TestTransactionPendingMinimumAllowance(t *testing.T) { +func TestPendingMinimumAllowance(t *testing.T) { t.Parallel() // Create the pool to test the limit enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 1 @@ -1352,7 +1360,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1362,18 +1370,18 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { // from the pending pool to the queue. // // Note, local transactions are never allowed to be dropped. -func TestTransactionPoolRepricing(t *testing.T) { +func TestRepricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1414,7 +1422,7 @@ func TestTransactionPoolRepricing(t *testing.T) { if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped @@ -1430,7 +1438,7 @@ func TestTransactionPoolRepricing(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back @@ -1446,7 +1454,7 @@ func TestTransactionPoolRepricing(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // However we can add local underpriced transactions @@ -1460,7 +1468,7 @@ func TestTransactionPoolRepricing(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions @@ -1476,7 +1484,7 @@ func TestTransactionPoolRepricing(t *testing.T) { if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1486,15 +1494,15 @@ func TestTransactionPoolRepricing(t *testing.T) { // gapped transactions back from the pending pool to the queue. // // Note, local transactions are never allowed to be dropped. -func TestTransactionPoolRepricingDynamicFee(t *testing.T) { +func TestRepricingDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, _ := setupTxPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config) defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1535,7 +1543,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 7); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped @@ -1551,7 +1559,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back @@ -1570,7 +1578,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // However we can add local underpriced transactions @@ -1584,7 +1592,7 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("post-reprice local event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions @@ -1603,19 +1611,19 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { if err := validateEvents(events, 5); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that setting the transaction pool gas price to a higher value does not // remove local transactions (legacy & dynamic fee). -func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { +func TestRepricingKeepsLocals(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) defer pool.Stop() @@ -1624,7 +1632,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { keys := make([]*ecdsa.PrivateKey, 3) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000*1000000)) + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) } // Create transaction (both pending and queued) with a linearly growing gasprice for i := uint64(0); i < 500; i++ { @@ -1661,7 +1669,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1683,12 +1691,12 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { // pending transactions are moved into the queue. // // Note, local transactions are never allowed to be dropped. -func TestTransactionPoolUnderpricing(t *testing.T) { +func TestUnderpricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 2 @@ -1698,12 +1706,12 @@ func TestTransactionPoolUnderpricing(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() // Create a number of test accounts and fund them - keys := make([]*ecdsa.PrivateKey, 4) + keys := make([]*ecdsa.PrivateKey, 5) for i := 0; i < len(keys); i++ { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) @@ -1732,13 +1740,17 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := validateEvents(events, 3); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } + // Replace a future transaction with a future transaction + if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("failed to add well priced transaction: %v", err) + } // Ensure that adding high priced transactions drops cheap ones, but not own if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) @@ -1749,6 +1761,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } + // Ensure that replacing a pending transaction with a future transaction fails + if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending) + } pending, queued = pool.Stats() if pending != 2 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) @@ -1756,10 +1772,10 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding local transactions can push out even higher priced ones @@ -1781,7 +1797,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("local event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1789,12 +1805,12 @@ func TestTransactionPoolUnderpricing(t *testing.T) { // Tests that more expensive transactions push out cheap ones from the pool, but // without producing instability by creating gaps that start jumping transactions // back and forth between queued/pending. -func TestTransactionPoolStableUnderpricing(t *testing.T) { +func TestStableUnderpricing(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 128 @@ -1804,7 +1820,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1831,7 +1847,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { if err := validateEvents(events, int(config.GlobalSlots)); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap @@ -1848,7 +1864,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { if err := validateEvents(events, 1); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1858,17 +1874,17 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { // expensive ones and any gapped pending transactions are moved into the queue. // // Note, local transactions are never allowed to be dropped. -func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { +func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() - pool, _ := setupTxPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config) defer pool.Stop() pool.config.GlobalSlots = 2 pool.config.GlobalQueue = 2 // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1902,7 +1918,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { if err := validateEvents(events, 3); err != nil { t.Fatalf("original event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1918,11 +1934,11 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { t.Fatalf("failed to add well priced transaction: %v", err) } - tx = pricedTransaction(2, 100000, big.NewInt(3), keys[1]) + tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - tx = dynamicFeeTx(3, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } @@ -1933,10 +1949,10 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateEvents(events, 1); err != nil { + if err := validateEvents(events, 2); err != nil { t.Fatalf("additional event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding local transactions can push out even higher priced ones @@ -1958,7 +1974,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { if err := validateEvents(events, 2); err != nil { t.Fatalf("local event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1968,7 +1984,7 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { func TestDualHeapEviction(t *testing.T) { t.Parallel() - pool, _ := setupTxPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config) defer pool.Stop() pool.config.GlobalSlots = 10 @@ -2017,18 +2033,18 @@ func TestDualHeapEviction(t *testing.T) { check(highTip, "effective tip") } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects duplicate transactions. -func TestTransactionDeduplication(t *testing.T) { +func TestDeduplication(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2082,25 +2098,25 @@ func TestTransactionDeduplication(t *testing.T) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects replacement transactions that don't meet the minimum // price bump required. -func TestTransactionReplacement(t *testing.T) { +func TestReplacement(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -2162,23 +2178,23 @@ func TestTransactionReplacement(t *testing.T) { if err := validateEvents(events, 0); err != nil { t.Fatalf("queued replacement event firing failed: %v", err) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that the pool rejects replacement dynamic fee transactions that don't // meet the minimum price bump required. -func TestTransactionReplacementDynamicFee(t *testing.T) { +func TestReplacementDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, key := setupTxPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config) defer pool.Stop() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan NewTxsEvent, 32) + events := make(chan core.NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -2203,7 +2219,7 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { stages := []string{"pending", "queued"} for _, stage := range stages { // Since state is empty, 0 nonce txs are "executable" and can go - // into pending immediately. 2 nonce txs are "happed + // into pending immediately. 2 nonce txs are "gapped" nonce := uint64(0) if stage == "queued" { nonce = 2 @@ -2272,17 +2288,17 @@ func TestTransactionReplacementDynamicFee(t *testing.T) { } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } } // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. -func TestTransactionJournaling(t *testing.T) { testTransactionJournaling(t, false) } -func TestTransactionJournalingNoLocals(t *testing.T) { testTransactionJournaling(t, true) } +func TestJournaling(t *testing.T) { testJournaling(t, false) } +func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } -func testTransactionJournaling(t *testing.T, nolocals bool) { +func testJournaling(t *testing.T, nolocals bool) { t.Parallel() // Create a temporary file for the journal @@ -2298,8 +2314,8 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { os.Remove(journal) // Create the original pool to inject transaction into the journal - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals @@ -2335,13 +2351,13 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockchain(statedb, 1000000, new(event.Feed)) + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) pool = NewTxPool(config, params.TestChainConfig, blockchain) @@ -2358,7 +2374,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Bump the nonce temporarily and ensure the newly invalidated transaction is removed @@ -2368,7 +2384,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockchain(statedb, 1000000, new(event.Feed)) + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() @@ -2384,20 +2400,20 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) } } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } pool.Stop() } -// TestTransactionStatusCheck tests that the pool can correctly retrieve the +// TestStatusCheck tests that the pool can correctly retrieve the // pending status of individual transactions. -func TestTransactionStatusCheck(t *testing.T) { +func TestStatusCheck(t *testing.T) { t.Parallel() // Create the pool to test the status retrievals with - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockchain(statedb, 1000000, new(event.Feed)) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) defer pool.Stop() @@ -2426,7 +2442,7 @@ func TestTransactionStatusCheck(t *testing.T) { if queued != 2 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) } - if err := validateTxPoolInternals(pool); err != nil { + if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } // Retrieve the status of each transaction and validate them @@ -2447,7 +2463,7 @@ func TestTransactionStatusCheck(t *testing.T) { } // Test the transaction slots consumption is computed correctly -func TestTransactionSlotCount(t *testing.T) { +func TestSlotCount(t *testing.T) { t.Parallel() key, _ := crypto.GenerateKey() @@ -2472,7 +2488,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2497,7 +2513,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2515,21 +2531,21 @@ func benchmarkFuturePromotion(b *testing.B, size int) { } // Benchmarks the speed of batched transaction insertion. -func BenchmarkPoolBatchInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, false) } -func BenchmarkPoolBatchInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, false) } -func BenchmarkPoolBatchInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, false) } +func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, false) } +func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, false) } +func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, false) } -func BenchmarkPoolBatchLocalInsert100(b *testing.B) { benchmarkPoolBatchInsert(b, 100, true) } -func BenchmarkPoolBatchLocalInsert1000(b *testing.B) { benchmarkPoolBatchInsert(b, 1000, true) } -func BenchmarkPoolBatchLocalInsert10000(b *testing.B) { benchmarkPoolBatchInsert(b, 10000, true) } +func BenchmarkBatchLocalInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, true) } +func BenchmarkBatchLocalInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, true) } +func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, true) } -func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { +func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool - pool, key := setupTxPool() + pool, key := setupPool() defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) - testAddBalance(pool, account, big.NewInt(1000000)) + testAddBalance(pool, account, big.NewInt(1000000000000000000)) batches := make([]types.Transactions, b.N) for i := 0; i < b.N; i++ { @@ -2569,7 +2585,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - pool, _ := setupTxPool() + pool, _ := setupPool() testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { pool.AddLocal(local) @@ -2585,9 +2601,9 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { } // Benchmarks the speed of batch transaction insertion in case of multiple accounts. -func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) { +func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool - pool, _ := setupTxPool() + pool, _ := setupPool() defer pool.Stop() b.ReportAllocs() batches := make(types.Transactions, b.N) diff --git a/coreth/core/types.go b/coreth/core/types.go index aa8d9873..77e6dd4d 100644 --- a/coreth/core/types.go +++ b/coreth/core/types.go @@ -44,14 +44,6 @@ type Validator interface { ValidateState(block *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64) error } -// Prefetcher is an interface for pre-caching transaction signatures and state. -type Prefetcher interface { - // Prefetch processes the state changes according to the Ethereum rules by running - // the transaction messages using the statedb, but any changes are discarded. The - // only goal is to pre-cache transaction signatures and state trie nodes. - Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) -} - // Processor is an interface for processing blocks using a given initial state. type Processor interface { // Process processes the state changes according to the Ethereum rules by running diff --git a/coreth/core/types/block.go b/coreth/core/types/block.go index caed1144..7a4be251 100644 --- a/coreth/core/types/block.go +++ b/coreth/core/types/block.go @@ -34,20 +34,11 @@ import ( "reflect" "sync/atomic" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" ) -var ( - EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - EmptyCodeHash = common.BytesToHash(crypto.Keccak256(nil)) - EmptyUncleHash = rlpHash([]*Header(nil)) - EmptyExtDataHash = rlpHash([]byte(nil)) -) - // A BlockNonce is a 64-bit hash which proves (combined with the // mix-hash) that a sufficient amount of computation has been carried // out on a block. @@ -110,6 +101,9 @@ type Header struct { // BlockGasCost was added by Apricot Phase 4 and is ignored in legacy // headers. BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` + + // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers. + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` } // field type overrides for gencodec @@ -137,18 +131,22 @@ var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) // Size returns the approximate memory used by all internal contents. It is used // to approximate and limit the memory consumption of various caches. func (h *Header) Size() common.StorageSize { - return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) + var baseFeeBits int + if h.BaseFee != nil { + baseFeeBits = h.BaseFee.BitLen() + } + return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+baseFeeBits)/8) } // EmptyBody returns true if there is no additional 'body' to complete the header // that is: no transactions and no uncles. func (h *Header) EmptyBody() bool { - return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash + return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash } // EmptyReceipts returns true if there are no receipts for this header/block. func (h *Header) EmptyReceipts() bool { - return h.ReceiptHash == EmptyRootHash + return h.ReceiptHash == EmptyReceiptsHash } // Body is a simple (mutable, non-safe) data container for storing and moving @@ -192,14 +190,13 @@ type extblock struct { // are ignored and set to values derived from the given txs, uncles // and receipts. func NewBlock( - header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, - hasher TrieHasher, extdata []byte, recalc bool, + header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher, ) *Block { b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { - b.header.TxHash = EmptyRootHash + b.header.TxHash = EmptyTxsHash } else { b.header.TxHash = DeriveSha(Transactions(txs), hasher) b.transactions = make(Transactions, len(txs)) @@ -207,7 +204,7 @@ func NewBlock( } if len(receipts) == 0 { - b.header.ReceiptHash = EmptyRootHash + b.header.ReceiptHash = EmptyReceiptsHash } else { b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) @@ -223,7 +220,6 @@ func NewBlock( } } - b.setExtData(extdata, recalc) return b } @@ -268,42 +264,10 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { return err } b.header, b.uncles, b.transactions, b.version, b.extdata = eb.Header, eb.Uncles, eb.Txs, eb.Version, eb.ExtData - b.size.Store(common.StorageSize(rlp.ListSize(size))) + b.size.Store(rlp.ListSize(size)) return nil } -func (b *Block) setExtDataHelper(data *[]byte, recalc bool) { - if data == nil { - b.setExtData(nil, recalc) - return - } - b.setExtData(*data, recalc) -} - -func (b *Block) setExtData(data []byte, recalc bool) { - _data := make([]byte, len(data)) - b.extdata = &_data - copy(*b.extdata, data) - if recalc { - b.header.ExtDataHash = CalcExtDataHash(*b.extdata) - } -} - -func (b *Block) ExtData() []byte { - if b.extdata == nil { - return nil - } - return *b.extdata -} - -func (b *Block) SetVersion(ver uint32) { - b.version = ver -} - -func (b *Block) Version() uint32 { - return b.version -} - // EncodeRLP serializes b into the Ethereum RLP block format. func (b *Block) EncodeRLP(w io.Writer) error { return rlp.Encode(w, extblock{ @@ -334,7 +298,7 @@ func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasUsed() uint64 { return b.header.GasUsed } func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) } func (b *Block) Time() uint64 { return b.header.Time } -func (b *Block) Timestamp() *big.Int { return new(big.Int).SetUint64(b.header.Time) } +func (b *Block) Timestamp() uint64 { return b.header.Time } func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() } func (b *Block) MixDigest() common.Hash { return b.header.MixDigest } @@ -355,13 +319,6 @@ func (b *Block) BaseFee() *big.Int { return new(big.Int).Set(b.header.BaseFee) } -func (b *Block) ExtDataGasUsed() *big.Int { - if b.header.ExtDataGasUsed == nil { - return nil - } - return new(big.Int).Set(b.header.ExtDataGasUsed) -} - func (b *Block) BlockGasCost() *big.Int { if b.header.BlockGasCost == nil { return nil @@ -376,30 +333,23 @@ func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles, b.version, // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. -func (b *Block) Size() common.StorageSize { +func (b *Block) Size() uint64 { if size := b.size.Load(); size != nil { - return size.(common.StorageSize) + return size.(uint64) } c := writeCounter(0) rlp.Encode(&c, b) - b.size.Store(common.StorageSize(c)) - return common.StorageSize(c) + b.size.Store(uint64(c)) + return uint64(c) } -type writeCounter common.StorageSize +type writeCounter uint64 func (c *writeCounter) Write(b []byte) (int, error) { *c += writeCounter(len(b)) return len(b), nil } -func CalcExtDataHash(extdata []byte) common.Hash { - if len(extdata) == 0 { - return EmptyExtDataHash - } - return rlpHash(extdata) -} - func CalcUncleHash(uncles []*Header) common.Hash { if len(uncles) == 0 { return EmptyUncleHash @@ -420,18 +370,16 @@ func (b *Block) WithSeal(header *Header) *Block { } // WithBody returns a new block with the given transaction and uncle contents. -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, version uint32, extdata *[]byte) *Block { +func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { block := &Block{ header: CopyHeader(b.header), transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), - version: version, } copy(block.transactions, transactions) for i := range uncles { block.uncles[i] = CopyHeader(uncles[i]) } - block.setExtDataHelper(extdata, false) return block } diff --git a/coreth/core/types/block_ext.go b/coreth/core/types/block_ext.go new file mode 100644 index 00000000..5ebb7aeb --- /dev/null +++ b/coreth/core/types/block_ext.go @@ -0,0 +1,67 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +func (b *Block) WithExtData(version uint32, extdata *[]byte) *Block { + b.version = version + b.setExtDataHelper(extdata, false) + return b +} + +func (b *Block) setExtDataHelper(data *[]byte, recalc bool) { + if data == nil { + b.setExtData(nil, recalc) + return + } + b.setExtData(*data, recalc) +} + +func (b *Block) setExtData(data []byte, recalc bool) { + _data := make([]byte, len(data)) + b.extdata = &_data + copy(*b.extdata, data) + if recalc { + b.header.ExtDataHash = CalcExtDataHash(*b.extdata) + } +} + +func (b *Block) ExtData() []byte { + if b.extdata == nil { + return nil + } + return *b.extdata +} + +func (b *Block) Version() uint32 { + return b.version +} + +func (b *Block) ExtDataGasUsed() *big.Int { + if b.header.ExtDataGasUsed == nil { + return nil + } + return new(big.Int).Set(b.header.ExtDataGasUsed) +} + +func CalcExtDataHash(extdata []byte) common.Hash { + if len(extdata) == 0 { + return EmptyExtDataHash + } + return rlpHash(extdata) +} + +func NewBlockWithExtData( + header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, + hasher TrieHasher, extdata []byte, recalc bool, +) *Block { + b := NewBlock(header, txs, uncles, receipts, hasher) + b.setExtData(extdata, recalc) + return b +} diff --git a/coreth/core/types/block_test.go b/coreth/core/types/block_test.go index 892e5c4c..19920e52 100644 --- a/coreth/core/types/block_test.go +++ b/coreth/core/types/block_test.go @@ -75,7 +75,7 @@ func TestBlockEncoding(t *testing.T) { check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) - check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("Size", block.Size(), uint64(len(blockEnc))) check("BlockHash", block.Hash(), common.HexToHash("0608e5d5e13c337f226b621a0b08b3d50470f1961329826fd59f5a241d1df49e")) txHash := common.HexToHash("f5a60149da2ea4e97061a9f47c66036ee843fa76cd1f9ce5a71eb55ff90b2e0e") @@ -112,7 +112,7 @@ func TestEIP1559BlockEncoding(t *testing.T) { check("Hash", block.Hash(), common.HexToHash("2aefaa81ae43541bf2d608e2bb26a157212394abad4d219c06163be0d5d010f8")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("Size", block.Size(), uint64(len(blockEnc))) check("BaseFee", block.BaseFee(), new(big.Int).SetUint64(1_000_000_000)) check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) @@ -177,7 +177,7 @@ func TestEIP2718BlockEncoding(t *testing.T) { check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("Size", block.Size(), uint64(len(blockEnc))) check("ExtDataHash", block.header.ExtDataHash, common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")) check("BaseFee", block.BaseFee(), (*big.Int)(nil)) check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) @@ -207,11 +207,11 @@ func TestEIP2718BlockEncoding(t *testing.T) { }) sig2 := common.Hex2Bytes("3dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef1401") tx2, _ = tx2.WithSignature(NewEIP2930Signer(big.NewInt(1)), sig2) - check("Transactions[1].Type()", block.Transactions()[1].Type(), uint8(AccessListTxType)) check("len(Transactions)", len(block.Transactions()), 2) check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash()) check("Transactions[1].Hash", block.Transactions()[1].Hash(), tx2.Hash()) + check("Transactions[1].Type()", block.Transactions()[1].Type(), uint8(AccessListTxType)) if !bytes.Equal(block.ExtData(), []byte{}) { t.Errorf("Block ExtraData field mismatch, expected empty byte array, but found 0x%x", block.ExtData()) @@ -258,7 +258,7 @@ func TestBlockEncodingWithExtraData(t *testing.T) { check("ExtDataGasUsed", block.ExtDataGasUsed(), (*big.Int)(nil)) check("BlockGasCost", block.BlockGasCost(), (*big.Int)(nil)) - check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("Size", block.Size(), uint64(len(blockEnc))) check("BlockHash", block.Hash(), common.HexToHash("4504ee98a94d16dbd70a35370501a3cb00c2965b012672085fbd328a72962902")) check("len(Transactions)", len(block.Transactions()), 0) @@ -315,9 +315,10 @@ func (h *testHasher) Reset() { h.hasher.Reset() } -func (h *testHasher) Update(key, val []byte) { +func (h *testHasher) Update(key, val []byte) error { h.hasher.Write(key) h.hasher.Write(val) + return nil } func (h *testHasher) Hash() common.Hash { @@ -362,7 +363,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, newHasher(), nil, true) + return NewBlock(header, txs, uncles, receipts, newHasher()) } func TestAP4BlockEncoding(t *testing.T) { @@ -388,7 +389,7 @@ func TestAP4BlockEncoding(t *testing.T) { check("Hash", block.Hash(), common.HexToHash("0xc41340f5d2af79a12373bc8d6f0f05f9f98b240834608f428da171449e8a1468")) check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) check("Time", block.Time(), uint64(1426516743)) - check("Size", block.Size(), common.StorageSize(len(blockEnc))) + check("Size", block.Size(), uint64(len(blockEnc))) check("BaseFee", block.BaseFee(), big.NewInt(1_000_000_000)) check("ExtDataGasUsed", block.ExtDataGasUsed(), big.NewInt(25_000)) check("BlockGasCost", block.BlockGasCost(), big.NewInt(1_000_000)) diff --git a/coreth/core/types/gen_access_tuple.go b/coreth/core/types/gen_access_tuple.go index fc48a84c..d740b709 100644 --- a/coreth/core/types/gen_access_tuple.go +++ b/coreth/core/types/gen_access_tuple.go @@ -12,8 +12,8 @@ import ( // MarshalJSON marshals as JSON. func (a AccessTuple) MarshalJSON() ([]byte, error) { type AccessTuple struct { - Address common.Address `json:"address" gencodec:"required"` - StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` + Address common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` } var enc AccessTuple enc.Address = a.Address @@ -24,8 +24,8 @@ func (a AccessTuple) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (a *AccessTuple) UnmarshalJSON(input []byte) error { type AccessTuple struct { - Address *common.Address `json:"address" gencodec:"required"` - StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` + Address *common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` } var dec AccessTuple if err := json.Unmarshal(input, &dec); err != nil { diff --git a/coreth/core/types/gen_header_json.go b/coreth/core/types/gen_header_json.go index 26f934c5..8ee37232 100644 --- a/coreth/core/types/gen_header_json.go +++ b/coreth/core/types/gen_header_json.go @@ -35,6 +35,7 @@ func (h Header) MarshalJSON() ([]byte, error) { BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` Hash common.Hash `json:"hash"` } var enc Header @@ -57,6 +58,7 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.BaseFee = (*hexutil.Big)(h.BaseFee) enc.ExtDataGasUsed = (*hexutil.Big)(h.ExtDataGasUsed) enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost) + enc.ExcessDataGas = h.ExcessDataGas enc.Hash = h.Hash() return json.Marshal(&enc) } @@ -83,6 +85,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -159,5 +162,8 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.BlockGasCost != nil { h.BlockGasCost = (*big.Int)(dec.BlockGasCost) } + if dec.ExcessDataGas != nil { + h.ExcessDataGas = dec.ExcessDataGas + } return nil } diff --git a/coreth/core/types/gen_header_rlp.go b/coreth/core/types/gen_header_rlp.go index 1dea7fce..74f0feb7 100644 --- a/coreth/core/types/gen_header_rlp.go +++ b/coreth/core/types/gen_header_rlp.go @@ -44,7 +44,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { _tmp1 := obj.BaseFee != nil _tmp2 := obj.ExtDataGasUsed != nil _tmp3 := obj.BlockGasCost != nil - if _tmp1 || _tmp2 || _tmp3 { + _tmp4 := obj.ExcessDataGas != nil + if _tmp1 || _tmp2 || _tmp3 || _tmp4 { if obj.BaseFee == nil { w.Write(rlp.EmptyString) } else { @@ -54,7 +55,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BaseFee) } } - if _tmp2 || _tmp3 { + if _tmp2 || _tmp3 || _tmp4 { if obj.ExtDataGasUsed == nil { w.Write(rlp.EmptyString) } else { @@ -64,7 +65,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.ExtDataGasUsed) } } - if _tmp3 { + if _tmp3 || _tmp4 { if obj.BlockGasCost == nil { w.Write(rlp.EmptyString) } else { @@ -74,6 +75,16 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BlockGasCost) } } + if _tmp4 { + if obj.ExcessDataGas == nil { + w.Write(rlp.EmptyString) + } else { + if obj.ExcessDataGas.Sign() == -1 { + return rlp.ErrNegativeBigInt + } + w.WriteBigInt(obj.ExcessDataGas) + } + } w.ListEnd(_tmp0) return w.Flush() } diff --git a/coreth/core/types/gen_receipt_json.go b/coreth/core/types/gen_receipt_json.go index bb892f85..d83be144 100644 --- a/coreth/core/types/gen_receipt_json.go +++ b/coreth/core/types/gen_receipt_json.go @@ -25,6 +25,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { TxHash common.Hash `json:"transactionHash" gencodec:"required"` ContractAddress common.Address `json:"contractAddress"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex hexutil.Uint `json:"transactionIndex"` @@ -39,6 +40,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { enc.TxHash = r.TxHash enc.ContractAddress = r.ContractAddress enc.GasUsed = hexutil.Uint64(r.GasUsed) + enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice) enc.BlockHash = r.BlockHash enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) @@ -57,6 +59,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { TxHash *common.Hash `json:"transactionHash" gencodec:"required"` ContractAddress *common.Address `json:"contractAddress"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` BlockHash *common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex *hexutil.Uint `json:"transactionIndex"` @@ -97,6 +100,9 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'gasUsed' for Receipt") } r.GasUsed = uint64(*dec.GasUsed) + if dec.EffectiveGasPrice != nil { + r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice) + } if dec.BlockHash != nil { r.BlockHash = *dec.BlockHash } diff --git a/coreth/core/types/hashes.go b/coreth/core/types/hashes.go new file mode 100644 index 00000000..41631821 --- /dev/null +++ b/coreth/core/types/hashes.go @@ -0,0 +1,52 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + // EmptyRootHash is the known root hash of an empty trie. + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyUncleHash is the known hash of the empty uncle set. + EmptyUncleHash = rlpHash([]*Header(nil)) // 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347 + + // EmptyCodeHash is the known hash of the empty EVM bytecode. + EmptyCodeHash = crypto.Keccak256Hash(nil) // c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 + + // EmptyTxsHash is the known hash of the empty transaction set. + EmptyTxsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyReceiptsHash is the known hash of the empty receipt set. + EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyExtDataHash is the known hash of empty extdata bytes. + EmptyExtDataHash = rlpHash([]byte(nil)) +) diff --git a/coreth/core/types/hashing.go b/coreth/core/types/hashing.go index 2d909733..a82c18a9 100644 --- a/coreth/core/types/hashing.go +++ b/coreth/core/types/hashing.go @@ -72,7 +72,7 @@ func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { // This is internal, do not use. type TrieHasher interface { Reset() - Update([]byte, []byte) + Update([]byte, []byte) error Hash() common.Hash } @@ -93,7 +93,7 @@ func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { return common.CopyBytes(buf.Bytes()) } -// DeriveSha creates the tree hashes of transactions and receipts in a block header. +// DeriveSha creates the tree hashes of transactions, receipts, and withdrawals in a block header. func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { hasher.Reset() @@ -103,6 +103,9 @@ func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { // StackTrie requires values to be inserted in increasing hash order, which is not the // order that `list` provides hashes in. This insertion sequence ensures that the // order is correct. + // + // The error returned by hasher is omitted because hasher will produce an incorrect + // hash in case any error occurs. var indexBuf []byte for i := 1; i < list.Len() && i <= 0x7f; i++ { indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) diff --git a/coreth/core/types/hashing_test.go b/coreth/core/types/hashing_test.go index 9b727cba..b60467bb 100644 --- a/coreth/core/types/hashing_test.go +++ b/coreth/core/types/hashing_test.go @@ -229,9 +229,10 @@ func (d *hashToHumanReadable) Reset() { d.data = make([]byte, 0) } -func (d *hashToHumanReadable) Update(i []byte, i2 []byte) { +func (d *hashToHumanReadable) Update(i []byte, i2 []byte) error { l := fmt.Sprintf("%x %x\n", i, i2) d.data = append(d.data, []byte(l)...) + return nil } func (d *hashToHumanReadable) Hash() common.Hash { diff --git a/coreth/core/types/log.go b/coreth/core/types/log.go index 131ef859..926327bb 100644 --- a/coreth/core/types/log.go +++ b/coreth/core/types/log.go @@ -74,24 +74,13 @@ type logMarshaling struct { //go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type rlpLog -out gen_log_rlp.go +// rlpLog is used to RLP-encode both the consensus and storage formats. type rlpLog struct { Address common.Address Topics []common.Hash Data []byte } -// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. -type legacyRlpStorageLog struct { - Address common.Address - Topics []common.Hash - Data []byte - BlockNumber uint64 - TxHash common.Hash - TxIndex uint - BlockHash common.Hash - Index uint -} - // EncodeRLP implements rlp.Encoder. func (l *Log) EncodeRLP(w io.Writer) error { rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data} @@ -108,47 +97,6 @@ func (l *Log) DecodeRLP(s *rlp.Stream) error { return err } -// LogForStorage is a wrapper around a Log that handles -// backward compatibility with prior storage formats. -type LogForStorage Log - -// EncodeRLP implements rlp.Encoder. -func (l *LogForStorage) EncodeRLP(w io.Writer) error { - rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data} - return rlp.Encode(w, &rl) -} - -// DecodeRLP implements rlp.Decoder. -// -// Note some redundant fields(e.g. block number, tx hash etc) will be assembled later. -func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { - blob, err := s.Raw() - if err != nil { - return err - } - var dec rlpLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } else { - // Try to decode log with previous definition. - var dec legacyRlpStorageLog - err = rlp.DecodeBytes(blob, &dec) - if err == nil { - *l = LogForStorage{ - Address: dec.Address, - Topics: dec.Topics, - Data: dec.Data, - } - } - } - return err -} - // FlattenLogs converts a nested array of logs to a single array of logs. func FlattenLogs(list [][]*Log) []*Log { var flat []*Log diff --git a/coreth/core/types/receipt.go b/coreth/core/types/receipt.go index 9cb704a9..beca87e9 100644 --- a/coreth/core/types/receipt.go +++ b/coreth/core/types/receipt.go @@ -69,10 +69,10 @@ type Receipt struct { Logs []*Log `json:"logs" gencodec:"required"` // Implementation fields: These fields are added by geth when processing a transaction. - // They are stored in the chain database. - TxHash common.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress common.Address `json:"contractAddress"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` + TxHash common.Hash `json:"transactionHash" gencodec:"required"` + ContractAddress common.Address `json:"contractAddress"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. @@ -87,6 +87,7 @@ type receiptMarshaling struct { Status hexutil.Uint64 CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 + EffectiveGasPrice *hexutil.Big BlockNumber *hexutil.Big TransactionIndex hexutil.Uint } @@ -103,28 +104,7 @@ type receiptRLP struct { type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - Logs []*LogForStorage -} - -// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. -type v4StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 -} - -// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. -type v3StoredReceiptRLP struct { - PostStateOrStatus []byte - CumulativeGasUsed uint64 - Bloom Bloom - TxHash common.Hash - ContractAddress common.Address - Logs []*LogForStorage - GasUsed uint64 + Logs []*Log } // NewReceipt creates a barebone transaction receipt, copying the init fields. @@ -302,82 +282,20 @@ func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error { // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // fields of a receipt from an RLP stream. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { - // Retrieve the entire receipt blob as we need to try multiple decoders - blob, err := s.Raw() - if err != nil { - return err - } - // Try decoding from the newest format for future proofness, then the older one - // for old nodes that just upgraded. V4 was an intermediate unreleased format so - // we do need to decode it, but it's not common (try last). - if err := decodeStoredReceiptRLP(r, blob); err == nil { - return nil - } - if err := decodeV3StoredReceiptRLP(r, blob); err == nil { - return nil - } - return decodeV4StoredReceiptRLP(r, blob) -} - -func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { + if err := s.Decode(&stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { return err } r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } + r.Logs = stored.Logs r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) return nil } -func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v4StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) - - return nil -} - -func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { - var stored v3StoredReceiptRLP - if err := rlp.DecodeBytes(blob, &stored); err != nil { - return err - } - if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { - return err - } - r.CumulativeGasUsed = stored.CumulativeGasUsed - r.Bloom = stored.Bloom - r.TxHash = stored.TxHash - r.ContractAddress = stored.ContractAddress - r.GasUsed = stored.GasUsed - r.Logs = make([]*Log, len(stored.Logs)) - for i, log := range stored.Logs { - r.Logs[i] = (*Log)(log) - } - return nil -} - // Receipts implements DerivableList for receipts. type Receipts []*Receipt @@ -406,8 +324,8 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { // DeriveFields fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, txs Transactions) error { - signer := MakeSigner(config, new(big.Int).SetUint64(number), new(big.Int).SetUint64(time)) +func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, txs []*Transaction) error { + signer := MakeSigner(config, new(big.Int).SetUint64(number), time) logIndex := uint(0) if len(txs) != len(rs) { @@ -418,6 +336,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu rs[i].Type = txs[i].Type() rs[i].TxHash = txs[i].Hash() + rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) + // block location fields rs[i].BlockHash = hash rs[i].BlockNumber = new(big.Int).SetUint64(number) @@ -428,13 +348,17 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu // Deriving the signer is expensive, only do if it's actually needed from, _ := Sender(signer, txs[i]) rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) + } else { + rs[i].ContractAddress = common.Address{} } + // The used gas can be calculated based on previous r if i == 0 { rs[i].GasUsed = rs[i].CumulativeGasUsed } else { rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed } + // The derived log fields can simply be set from the block and transaction for j := 0; j < len(rs[i].Logs); j++ { rs[i].Logs[j].BlockNumber = number diff --git a/coreth/core/types/receipt_test.go b/coreth/core/types/receipt_test.go index a34a7742..b73fd997 100644 --- a/coreth/core/types/receipt_test.go +++ b/coreth/core/types/receipt_test.go @@ -28,6 +28,7 @@ package types import ( "bytes" + "encoding/json" "math" "math/big" "reflect" @@ -35,8 +36,9 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" + "github.com/kylelemons/godebug/diff" ) var ( @@ -90,263 +92,281 @@ var ( }, Type: DynamicFeeTxType, } -) - -func TestDecodeEmptyTypedReceipt(t *testing.T) { - input := []byte{0x80} - var r Receipt - err := rlp.DecodeBytes(input, &r) - if err != errShortTypedReceipt { - t.Fatal("wrong error:", err) - } -} - -func TestLegacyReceiptDecoding(t *testing.T) { - tests := []struct { - name string - encode func(*Receipt) ([]byte, error) - }{ - { - "StoredReceiptRLP", - encodeAsStoredReceiptRLP, - }, - { - "V4StoredReceiptRLP", - encodeAsV4StoredReceiptRLP, - }, - { - "V3StoredReceiptRLP", - encodeAsV3StoredReceiptRLP, - }, - } - tx := NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil) - receipt := &Receipt{ - Status: ReceiptStatusFailed, - CumulativeGasUsed: 1, - Logs: []*Log{ - { - Address: common.BytesToAddress([]byte{0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - { - Address: common.BytesToAddress([]byte{0x01, 0x11}), - Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, - Data: []byte{0x01, 0x00, 0xff}, - }, - }, - TxHash: tx.Hash(), - ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), - GasUsed: 111111, - } - receipt.Bloom = CreateBloom(Receipts{receipt}) - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - enc, err := tc.encode(receipt) - if err != nil { - t.Fatalf("Error encoding receipt: %v", err) - } - var dec ReceiptForStorage - if err := rlp.DecodeBytes(enc, &dec); err != nil { - t.Fatalf("Error decoding RLP receipt: %v", err) - } - // Check whether all consensus fields are correct. - if dec.Status != receipt.Status { - t.Fatalf("Receipt status mismatch, want %v, have %v", receipt.Status, dec.Status) - } - if dec.CumulativeGasUsed != receipt.CumulativeGasUsed { - t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed) - } - if dec.Bloom != receipt.Bloom { - t.Fatalf("Bloom data mismatch, want %v, have %v", receipt.Bloom, dec.Bloom) - } - if len(dec.Logs) != len(receipt.Logs) { - t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) - } - for i := 0; i < len(dec.Logs); i++ { - if dec.Logs[i].Address != receipt.Logs[i].Address { - t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) - } - if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { - t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) - } - if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { - t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) - } - } - }) - } -} - -func encodeAsStoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &storedReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - Logs: make([]*LogForStorage, len(want.Logs)), - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - -func encodeAsV4StoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &v4StoredReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - TxHash: want.TxHash, - ContractAddress: want.ContractAddress, - Logs: make([]*LogForStorage, len(want.Logs)), - GasUsed: want.GasUsed, - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - -func encodeAsV3StoredReceiptRLP(want *Receipt) ([]byte, error) { - stored := &v3StoredReceiptRLP{ - PostStateOrStatus: want.statusEncoding(), - CumulativeGasUsed: want.CumulativeGasUsed, - Bloom: want.Bloom, - TxHash: want.TxHash, - ContractAddress: want.ContractAddress, - Logs: make([]*LogForStorage, len(want.Logs)), - GasUsed: want.GasUsed, - } - for i, log := range want.Logs { - stored.Logs[i] = (*LogForStorage)(log) - } - return rlp.EncodeToBytes(stored) -} - -// Tests that receipt data can be correctly derived from the contextual infos -func TestDeriveFields(t *testing.T) { // Create a few transactions to have receipts for - to2 := common.HexToAddress("0x2") - to3 := common.HexToAddress("0x3") - txs := Transactions{ + to2 = common.HexToAddress("0x2") + to3 = common.HexToAddress("0x3") + to4 = common.HexToAddress("0x4") + to5 = common.HexToAddress("0x5") + to6 = common.HexToAddress("0x6") + to7 = common.HexToAddress("0x7") + txs = Transactions{ NewTx(&LegacyTx{ Nonce: 1, Value: big.NewInt(1), Gas: 1, - GasPrice: big.NewInt(1), + GasPrice: big.NewInt(11), }), NewTx(&LegacyTx{ To: &to2, Nonce: 2, Value: big.NewInt(2), Gas: 2, - GasPrice: big.NewInt(2), + GasPrice: big.NewInt(22), }), NewTx(&AccessListTx{ To: &to3, Nonce: 3, Value: big.NewInt(3), Gas: 3, - GasPrice: big.NewInt(3), + GasPrice: big.NewInt(33), + }), + // EIP-1559 transactions. + NewTx(&DynamicFeeTx{ + To: &to4, + Nonce: 4, + Value: big.NewInt(4), + Gas: 4, + GasTipCap: big.NewInt(44), + GasFeeCap: big.NewInt(1044), + }), + NewTx(&DynamicFeeTx{ + To: &to5, + Nonce: 5, + Value: big.NewInt(5), + Gas: 5, + GasTipCap: big.NewInt(55), + GasFeeCap: big.NewInt(1055), + }), + // EIP-4844 transactions. + NewTx(&BlobTx{ + To: &to6, + Nonce: 6, + Value: uint256.NewInt(6), + Gas: 6, + GasTipCap: uint256.NewInt(66), + GasFeeCap: uint256.NewInt(1066), + BlobFeeCap: uint256.NewInt(100066), + }), + NewTx(&BlobTx{ + To: &to7, + Nonce: 7, + Value: uint256.NewInt(7), + Gas: 7, + GasTipCap: uint256.NewInt(77), + GasFeeCap: uint256.NewInt(1077), + BlobFeeCap: uint256.NewInt(100077), }), } + + blockNumber = big.NewInt(1) + blockTime = uint64(2) + blockHash = common.BytesToHash([]byte{0x03, 0x14}) + // Create the corresponding receipts - receipts := Receipts{ + receipts = Receipts{ &Receipt{ Status: ReceiptStatusFailed, CumulativeGasUsed: 1, Logs: []*Log{ - {Address: common.BytesToAddress([]byte{0x11})}, - {Address: common.BytesToAddress([]byte{0x01, 0x11})}, + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 0, + }, + { + Address: common.BytesToAddress([]byte{0x01, 0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[0].Hash(), + TxIndex: 0, + BlockHash: blockHash, + Index: 1, + }, }, - TxHash: txs[0].Hash(), - ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), - GasUsed: 1, + // derived fields: + TxHash: txs[0].Hash(), + ContractAddress: common.HexToAddress("0x5a443704dd4b594b382c22a083e2bd3090a6fef3"), + GasUsed: 1, + EffectiveGasPrice: big.NewInt(11), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 0, }, &Receipt{ PostState: common.Hash{2}.Bytes(), CumulativeGasUsed: 3, Logs: []*Log{ - {Address: common.BytesToAddress([]byte{0x22})}, - {Address: common.BytesToAddress([]byte{0x02, 0x22})}, + { + Address: common.BytesToAddress([]byte{0x22}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[1].Hash(), + TxIndex: 1, + BlockHash: blockHash, + Index: 2, + }, + { + Address: common.BytesToAddress([]byte{0x02, 0x22}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + // derived fields: + BlockNumber: blockNumber.Uint64(), + TxHash: txs[1].Hash(), + TxIndex: 1, + BlockHash: blockHash, + Index: 3, + }, }, - TxHash: txs[1].Hash(), - ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), - GasUsed: 2, + // derived fields: + TxHash: txs[1].Hash(), + GasUsed: 2, + EffectiveGasPrice: big.NewInt(22), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 1, }, &Receipt{ Type: AccessListTxType, PostState: common.Hash{3}.Bytes(), CumulativeGasUsed: 6, - Logs: []*Log{ - {Address: common.BytesToAddress([]byte{0x33})}, - {Address: common.BytesToAddress([]byte{0x03, 0x33})}, - }, - TxHash: txs[2].Hash(), - ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), - GasUsed: 3, + Logs: []*Log{}, + // derived fields: + TxHash: txs[2].Hash(), + GasUsed: 3, + EffectiveGasPrice: big.NewInt(33), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 2, + }, + &Receipt{ + Type: DynamicFeeTxType, + PostState: common.Hash{4}.Bytes(), + CumulativeGasUsed: 10, + Logs: []*Log{}, + // derived fields: + TxHash: txs[3].Hash(), + GasUsed: 4, + EffectiveGasPrice: big.NewInt(1044), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 3, + }, + &Receipt{ + Type: DynamicFeeTxType, + PostState: common.Hash{5}.Bytes(), + CumulativeGasUsed: 15, + Logs: []*Log{}, + // derived fields: + TxHash: txs[4].Hash(), + GasUsed: 5, + EffectiveGasPrice: big.NewInt(1055), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 4, + }, + &Receipt{ + Type: BlobTxType, + PostState: common.Hash{6}.Bytes(), + CumulativeGasUsed: 21, + Logs: []*Log{}, + // derived fields: + TxHash: txs[5].Hash(), + GasUsed: 6, + EffectiveGasPrice: big.NewInt(1066), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 5, + }, + &Receipt{ + Type: BlobTxType, + PostState: common.Hash{7}.Bytes(), + CumulativeGasUsed: 28, + Logs: []*Log{}, + // derived fields: + TxHash: txs[6].Hash(), + GasUsed: 7, + EffectiveGasPrice: big.NewInt(1077), + BlockHash: blockHash, + BlockNumber: blockNumber, + TransactionIndex: 6, }, } - // Clear all the computed fields and re-derive them - number := big.NewInt(1) - hash := common.BytesToHash([]byte{0x03, 0x14}) +) - clearComputedFieldsOnReceipts(t, receipts) - if err := receipts.DeriveFields(params.TestChainConfig, hash, number.Uint64(), number.Uint64(), txs); err != nil { +func TestDecodeEmptyTypedReceipt(t *testing.T) { + input := []byte{0x80} + var r Receipt + err := rlp.DecodeBytes(input, &r) + if err != errShortTypedReceipt { + t.Fatal("wrong error:", err) + } +} + +// Tests that receipt data can be correctly derived from the contextual infos +func TestDeriveFields(t *testing.T) { + // Re-derive receipts. + basefee := big.NewInt(1000) + derivedReceipts := clearComputedFieldsOnReceipts(receipts) + err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, txs) + if err != nil { t.Fatalf("DeriveFields(...) = %v, want ", err) } - // Iterate over all the computed fields and check that they're correct - signer := MakeSigner(params.TestChainConfig, number, number) - logIndex := uint(0) + // Check diff of receipts against derivedReceipts. + r1, err := json.MarshalIndent(receipts, "", " ") + if err != nil { + t.Fatal("error marshaling input receipts:", err) + } + + r2, err := json.MarshalIndent(derivedReceipts, "", " ") + if err != nil { + t.Fatal("error marshaling derived receipts:", err) + } + d := diff.Diff(string(r1), string(r2)) + if d != "" { + t.Fatal("receipts differ:", d) + } +} + +// Test that we can marshal/unmarshal receipts to/from json without errors. +// This also confirms that our test receipts contain all the required fields. +func TestReceiptJSON(t *testing.T) { for i := range receipts { - if receipts[i].Type != txs[i].Type() { - t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) - } - if receipts[i].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) - } - if receipts[i].BlockHash != hash { - t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String()) - } - if receipts[i].BlockNumber.Cmp(number) != 0 { - t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String()) - } - if receipts[i].TransactionIndex != uint(i) { - t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i) - } - if receipts[i].GasUsed != txs[i].Gas() { - t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].Gas()) + b, err := receipts[i].MarshalJSON() + if err != nil { + t.Fatal("error marshaling receipt to json:", err) } - if txs[i].To() != nil && receipts[i].ContractAddress != (common.Address{}) { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (common.Address{}).String()) - } - from, _ := Sender(signer, txs[i]) - contractAddress := crypto.CreateAddress(from, txs[i].Nonce()) - if txs[i].To() == nil && receipts[i].ContractAddress != contractAddress { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String()) - } - for j := range receipts[i].Logs { - if receipts[i].Logs[j].BlockNumber != number.Uint64() { - t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64()) - } - if receipts[i].Logs[j].BlockHash != hash { - t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String()) - } - if receipts[i].Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) - } - if receipts[i].Logs[j].TxIndex != uint(i) { - t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i) - } - if receipts[i].Logs[j].Index != logIndex { - t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex) - } - logIndex++ + r := Receipt{} + err = r.UnmarshalJSON(b) + if err != nil { + t.Fatal("error unmarshaling receipt from json:", err) } } } +// Test we can still parse receipt without EffectiveGasPrice for backwards compatibility, even +// though it is required per the spec. +func TestEffectiveGasPriceNotRequired(t *testing.T) { + r := *receipts[0] + r.EffectiveGasPrice = nil + b, err := r.MarshalJSON() + if err != nil { + t.Fatal("error marshaling receipt to json:", err) + } + r2 := Receipt{} + err = r2.UnmarshalJSON(b) + if err != nil { + t.Fatal("error unmarshaling receipt from json:", err) + } +} + // TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt // rlp decoder, which failed due to a shadowing error. func TestTypedReceiptEncodingDecoding(t *testing.T) { @@ -474,41 +494,36 @@ func TestReceiptUnmarshalBinary(t *testing.T) { } } -func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) { - t.Helper() - - for _, receipt := range receipts { - clearComputedFieldsOnReceipt(t, receipt) +func clearComputedFieldsOnReceipts(receipts []*Receipt) []*Receipt { + r := make([]*Receipt, len(receipts)) + for i, receipt := range receipts { + r[i] = clearComputedFieldsOnReceipt(receipt) } + return r } -func clearComputedFieldsOnReceipt(t *testing.T, receipt *Receipt) { - t.Helper() - - receipt.TxHash = common.Hash{} - receipt.BlockHash = common.Hash{} - receipt.BlockNumber = big.NewInt(math.MaxUint32) - receipt.TransactionIndex = math.MaxUint32 - receipt.ContractAddress = common.Address{} - receipt.GasUsed = 0 - - clearComputedFieldsOnLogs(t, receipt.Logs) +func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt { + cpy := *receipt + cpy.TxHash = common.Hash{0xff, 0xff, 0x11} + cpy.BlockHash = common.Hash{0xff, 0xff, 0x22} + cpy.BlockNumber = big.NewInt(math.MaxUint32) + cpy.TransactionIndex = math.MaxUint32 + cpy.ContractAddress = common.Address{0xff, 0xff, 0x33} + cpy.GasUsed = 0xffffffff + cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs) + return &cpy } -func clearComputedFieldsOnLogs(t *testing.T, logs []*Log) { - t.Helper() - - for _, log := range logs { - clearComputedFieldsOnLog(t, log) - } -} - -func clearComputedFieldsOnLog(t *testing.T, log *Log) { - t.Helper() - - log.BlockNumber = math.MaxUint32 - log.BlockHash = common.Hash{} - log.TxHash = common.Hash{} - log.TxIndex = math.MaxUint32 - log.Index = math.MaxUint32 +func clearComputedFieldsOnLogs(logs []*Log) []*Log { + l := make([]*Log, len(logs)) + for i, log := range logs { + cpy := *log + cpy.BlockNumber = math.MaxUint32 + cpy.BlockHash = common.Hash{} + cpy.TxHash = common.Hash{} + cpy.TxIndex = math.MaxUint32 + cpy.Index = math.MaxUint32 + l[i] = &cpy + } + return l } diff --git a/coreth/core/types/transaction.go b/coreth/core/types/transaction.go index 3ddbd4b6..34c185a7 100644 --- a/coreth/core/types/transaction.go +++ b/coreth/core/types/transaction.go @@ -52,9 +52,10 @@ var ( // Transaction types. const ( - LegacyTxType = iota - AccessListTxType - DynamicFeeTxType + LegacyTxType = 0x00 + AccessListTxType = 0x01 + DynamicFeeTxType = 0x02 + BlobTxType = 0x03 ) // Transaction is an Ethereum transaction. @@ -92,9 +93,20 @@ type TxData interface { value() *big.Int nonce() uint64 to() *common.Address + blobGas() uint64 + blobGasFeeCap() *big.Int + blobHashes() []common.Hash rawSignatureValues() (v, r, s *big.Int) setSignatureValues(chainID, v, r, s *big.Int) + + // effectiveGasPrice computes the gas price paid by the transaction, given + // the inclusion block baseFee. + // + // Unlike other TxData methods, the returned *big.Int should be an independent + // copy of the computed value, i.e. callers are allowed to mutate the result. + // Method implementations can use 'dst' to store the result. + effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int } // EncodeRLP implements rlp.Encoder @@ -141,7 +153,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { var inner LegacyTx err := s.Decode(&inner) if err == nil { - tx.setDecoded(&inner, int(rlp.ListSize(size))) + tx.setDecoded(&inner, rlp.ListSize(size)) } return err default: @@ -152,7 +164,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { } inner, err := tx.decodeTyped(b) if err == nil { - tx.setDecoded(inner, len(b)) + tx.setDecoded(inner, uint64(len(b))) } return err } @@ -168,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { if err != nil { return err } - tx.setDecoded(&data, len(b)) + tx.setDecoded(&data, uint64(len(b))) return nil } // It's an EIP2718 typed transaction envelope. @@ -176,7 +188,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error { if err != nil { return err } - tx.setDecoded(inner, len(b)) + tx.setDecoded(inner, uint64(len(b))) return nil } @@ -194,17 +206,21 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { var inner DynamicFeeTx err := rlp.DecodeBytes(b[1:], &inner) return &inner, err + case BlobTxType: + var inner BlobTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err default: return nil, ErrTxTypeNotSupported } } // setDecoded sets the inner transaction and size after decoding. -func (tx *Transaction) setDecoded(inner TxData, size int) { +func (tx *Transaction) setDecoded(inner TxData, size uint64) { tx.inner = inner tx.time = time.Now() if size > 0 { - tx.size.Store(common.StorageSize(size)) + tx.size.Store(size) } } @@ -283,6 +299,15 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g // GasFeeCap returns the fee cap per gas of the transaction. func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } +// BlobGas returns the data gas limit of the transaction for blob transactions, 0 otherwise. +func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() } + +// BlobGasFeeCap returns the data gas fee cap per data gas of the transaction for blob transactions, nil otherwise. +func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() } + +// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. +func (tx *Transaction) BlobHashes() []common.Hash { return tx.inner.blobHashes() } + // Value returns the ether amount of the transaction. func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } @@ -295,9 +320,12 @@ func (tx *Transaction) To() *common.Address { return copyAddressPtr(tx.inner.to()) } -// Cost returns gas * gasPrice + value. +// Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value. func (tx *Transaction) Cost() *big.Int { total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) + if tx.Type() == BlobTxType { + total.Add(total, new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas()))) + } total.Add(total, tx.Value()) return total } @@ -366,6 +394,16 @@ func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) i return tx.EffectiveGasTipValue(baseFee).Cmp(other) } +// BlobGasFeeCapCmp compares the blob fee cap of two transactions. +func (tx *Transaction) BlobGasFeeCapCmp(other *Transaction) int { + return tx.inner.blobGasFeeCap().Cmp(other.inner.blobGasFeeCap()) +} + +// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap. +func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int { + return tx.inner.blobGasFeeCap().Cmp(other) +} + // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { @@ -382,16 +420,21 @@ func (tx *Transaction) Hash() common.Hash { return h } -// Size returns the true RLP encoded storage size of the transaction, either by -// encoding and returning it, or returning a previously cached value. -func (tx *Transaction) Size() common.StorageSize { +// Size returns the true encoded storage size of the transaction, either by encoding +// and returning it, or returning a previously cached value. +func (tx *Transaction) Size() uint64 { if size := tx.size.Load(); size != nil { - return size.(common.StorageSize) + return size.(uint64) } c := writeCounter(0) rlp.Encode(&c, &tx.inner) - tx.size.Store(common.StorageSize(c)) - return common.StorageSize(c) + + size := uint64(c) + if tx.Type() != LegacyTxType { + size += 1 // type byte + } + tx.size.Store(size) + return size } // WithSignature returns a new transaction with the given signature. @@ -523,6 +566,7 @@ func (s *TxByPriceAndTime) Pop() interface{} { old := *s n := len(old) x := old[n-1] + old[n-1] = nil *s = old[0 : n-1] return x } @@ -595,74 +639,6 @@ func (t *TransactionsByPriceAndNonce) Pop() { heap.Pop(&t.heads) } -// Message is a fully derived transaction and implements core.Message -// -// NOTE: In a future PR this will be removed. -type Message struct { - to *common.Address - from common.Address - nonce uint64 - amount *big.Int - gasLimit uint64 - gasPrice *big.Int - gasFeeCap *big.Int - gasTipCap *big.Int - data []byte - accessList AccessList - isFake bool -} - -func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice, gasFeeCap, gasTipCap *big.Int, data []byte, accessList AccessList, isFake bool) Message { - return Message{ - from: from, - to: to, - nonce: nonce, - amount: amount, - gasLimit: gasLimit, - gasPrice: gasPrice, - gasFeeCap: gasFeeCap, - gasTipCap: gasTipCap, - data: data, - accessList: accessList, - isFake: isFake, - } -} - -// AsMessage returns the transaction as a core.Message. -func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { - msg := Message{ - nonce: tx.Nonce(), - gasLimit: tx.Gas(), - gasPrice: new(big.Int).Set(tx.GasPrice()), - gasFeeCap: new(big.Int).Set(tx.GasFeeCap()), - gasTipCap: new(big.Int).Set(tx.GasTipCap()), - to: tx.To(), - amount: tx.Value(), - data: tx.Data(), - accessList: tx.AccessList(), - isFake: false, - } - // If baseFee provided, set gasPrice to effectiveGasPrice. - if baseFee != nil { - msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap) - } - var err error - msg.from, err = Sender(s, tx) - return msg, err -} - -func (m Message) From() common.Address { return m.from } -func (m Message) To() *common.Address { return m.to } -func (m Message) GasPrice() *big.Int { return m.gasPrice } -func (m Message) GasFeeCap() *big.Int { return m.gasFeeCap } -func (m Message) GasTipCap() *big.Int { return m.gasTipCap } -func (m Message) Value() *big.Int { return m.amount } -func (m Message) Gas() uint64 { return m.gasLimit } -func (m Message) Nonce() uint64 { return m.nonce } -func (m Message) Data() []byte { return m.data } -func (m Message) AccessList() AccessList { return m.accessList } -func (m Message) IsFake() bool { return m.isFake } - // copyAddressPtr copies an address. func copyAddressPtr(a *common.Address) *common.Address { if a == nil { diff --git a/coreth/core/types/transaction_marshalling.go b/coreth/core/types/transaction_marshalling.go index b5eaf017..4d0dd233 100644 --- a/coreth/core/types/transaction_marshalling.go +++ b/coreth/core/types/transaction_marshalling.go @@ -33,83 +33,101 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/holiman/uint256" ) // txJSON is the JSON representation of transactions. type txJSON struct { Type hexutil.Uint64 `json:"type"` - // Common transaction fields: + ChainID *hexutil.Big `json:"chainId,omitempty"` Nonce *hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` GasPrice *hexutil.Big `json:"gasPrice"` MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` - Gas *hexutil.Uint64 `json:"gas"` + MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"` Value *hexutil.Big `json:"value"` - Data *hexutil.Bytes `json:"input"` + Input *hexutil.Bytes `json:"input"` + AccessList *AccessList `json:"accessList,omitempty"` + BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` - To *common.Address `json:"to"` - - // Access list transaction fields: - ChainID *hexutil.Big `json:"chainId,omitempty"` - AccessList *AccessList `json:"accessList,omitempty"` // Only used for encoding: Hash common.Hash `json:"hash"` } // MarshalJSON marshals as JSON with a hash. -func (t *Transaction) MarshalJSON() ([]byte, error) { +func (tx *Transaction) MarshalJSON() ([]byte, error) { var enc txJSON // These are set for all tx types. - enc.Hash = t.Hash() - enc.Type = hexutil.Uint64(t.Type()) + enc.Hash = tx.Hash() + enc.Type = hexutil.Uint64(tx.Type()) // Other fields are set conditionally depending on tx type. - switch tx := t.inner.(type) { + switch itx := tx.inner.(type) { case *LegacyTx: - enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) - enc.Gas = (*hexutil.Uint64)(&tx.Gas) - enc.GasPrice = (*hexutil.Big)(tx.GasPrice) - enc.Value = (*hexutil.Big)(tx.Value) - enc.Data = (*hexutil.Bytes)(&tx.Data) - enc.To = t.To() - enc.V = (*hexutil.Big)(tx.V) - enc.R = (*hexutil.Big)(tx.R) - enc.S = (*hexutil.Big)(tx.S) + enc.Nonce = (*hexutil.Uint64)(&itx.Nonce) + enc.To = tx.To() + enc.Gas = (*hexutil.Uint64)(&itx.Gas) + enc.GasPrice = (*hexutil.Big)(itx.GasPrice) + enc.Value = (*hexutil.Big)(itx.Value) + enc.Input = (*hexutil.Bytes)(&itx.Data) + enc.V = (*hexutil.Big)(itx.V) + enc.R = (*hexutil.Big)(itx.R) + enc.S = (*hexutil.Big)(itx.S) + case *AccessListTx: - enc.ChainID = (*hexutil.Big)(tx.ChainID) - enc.AccessList = &tx.AccessList - enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) - enc.Gas = (*hexutil.Uint64)(&tx.Gas) - enc.GasPrice = (*hexutil.Big)(tx.GasPrice) - enc.Value = (*hexutil.Big)(tx.Value) - enc.Data = (*hexutil.Bytes)(&tx.Data) - enc.To = t.To() - enc.V = (*hexutil.Big)(tx.V) - enc.R = (*hexutil.Big)(tx.R) - enc.S = (*hexutil.Big)(tx.S) + enc.ChainID = (*hexutil.Big)(itx.ChainID) + enc.Nonce = (*hexutil.Uint64)(&itx.Nonce) + enc.To = tx.To() + enc.Gas = (*hexutil.Uint64)(&itx.Gas) + enc.GasPrice = (*hexutil.Big)(itx.GasPrice) + enc.Value = (*hexutil.Big)(itx.Value) + enc.Input = (*hexutil.Bytes)(&itx.Data) + enc.AccessList = &itx.AccessList + enc.V = (*hexutil.Big)(itx.V) + enc.R = (*hexutil.Big)(itx.R) + enc.S = (*hexutil.Big)(itx.S) + case *DynamicFeeTx: - enc.ChainID = (*hexutil.Big)(tx.ChainID) - enc.AccessList = &tx.AccessList - enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) - enc.Gas = (*hexutil.Uint64)(&tx.Gas) - enc.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap) - enc.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap) - enc.Value = (*hexutil.Big)(tx.Value) - enc.Data = (*hexutil.Bytes)(&tx.Data) - enc.To = t.To() - enc.V = (*hexutil.Big)(tx.V) - enc.R = (*hexutil.Big)(tx.R) - enc.S = (*hexutil.Big)(tx.S) + enc.ChainID = (*hexutil.Big)(itx.ChainID) + enc.Nonce = (*hexutil.Uint64)(&itx.Nonce) + enc.To = tx.To() + enc.Gas = (*hexutil.Uint64)(&itx.Gas) + enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap) + enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap) + enc.Value = (*hexutil.Big)(itx.Value) + enc.Input = (*hexutil.Bytes)(&itx.Data) + enc.AccessList = &itx.AccessList + enc.V = (*hexutil.Big)(itx.V) + enc.R = (*hexutil.Big)(itx.R) + enc.S = (*hexutil.Big)(itx.S) + + case *BlobTx: + enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig()) + enc.Nonce = (*hexutil.Uint64)(&itx.Nonce) + enc.Gas = (*hexutil.Uint64)(&itx.Gas) + enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig()) + enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig()) + enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig()) + enc.Value = (*hexutil.Big)(itx.Value.ToBig()) + enc.Input = (*hexutil.Bytes)(&itx.Data) + enc.AccessList = &itx.AccessList + enc.BlobVersionedHashes = itx.BlobHashes + enc.To = tx.To() + enc.V = (*hexutil.Big)(itx.V.ToBig()) + enc.R = (*hexutil.Big)(itx.R.ToBig()) + enc.S = (*hexutil.Big)(itx.S.ToBig()) } return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. -func (t *Transaction) UnmarshalJSON(input []byte) error { +func (tx *Transaction) UnmarshalJSON(input []byte) error { var dec txJSON if err := json.Unmarshal(input, &dec); err != nil { return err @@ -121,29 +139,29 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { case LegacyTxType: var itx LegacyTx inner = &itx - if dec.To != nil { - itx.To = dec.To - } if dec.Nonce == nil { return errors.New("missing required field 'nonce' in transaction") } itx.Nonce = uint64(*dec.Nonce) - if dec.GasPrice == nil { - return errors.New("missing required field 'gasPrice' in transaction") + if dec.To != nil { + itx.To = dec.To } - itx.GasPrice = (*big.Int)(dec.GasPrice) if dec.Gas == nil { return errors.New("missing required field 'gas' in transaction") } itx.Gas = uint64(*dec.Gas) + if dec.GasPrice == nil { + return errors.New("missing required field 'gasPrice' in transaction") + } + itx.GasPrice = (*big.Int)(dec.GasPrice) if dec.Value == nil { return errors.New("missing required field 'value' in transaction") } itx.Value = (*big.Int)(dec.Value) - if dec.Data == nil { + if dec.Input == nil { return errors.New("missing required field 'input' in transaction") } - itx.Data = *dec.Data + itx.Data = *dec.Input if dec.V == nil { return errors.New("missing required field 'v' in transaction") } @@ -166,40 +184,39 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { case AccessListTxType: var itx AccessListTx inner = &itx - // Access list is optional for now. - if dec.AccessList != nil { - itx.AccessList = *dec.AccessList - } if dec.ChainID == nil { return errors.New("missing required field 'chainId' in transaction") } itx.ChainID = (*big.Int)(dec.ChainID) - if dec.To != nil { - itx.To = dec.To - } if dec.Nonce == nil { return errors.New("missing required field 'nonce' in transaction") } itx.Nonce = uint64(*dec.Nonce) - if dec.GasPrice == nil { - return errors.New("missing required field 'gasPrice' in transaction") + if dec.To != nil { + itx.To = dec.To } - itx.GasPrice = (*big.Int)(dec.GasPrice) if dec.Gas == nil { return errors.New("missing required field 'gas' in transaction") } itx.Gas = uint64(*dec.Gas) + if dec.GasPrice == nil { + return errors.New("missing required field 'gasPrice' in transaction") + } + itx.GasPrice = (*big.Int)(dec.GasPrice) if dec.Value == nil { return errors.New("missing required field 'value' in transaction") } itx.Value = (*big.Int)(dec.Value) - if dec.Data == nil { + if dec.Input == nil { return errors.New("missing required field 'input' in transaction") } - itx.Data = *dec.Data + itx.Data = *dec.Input if dec.V == nil { return errors.New("missing required field 'v' in transaction") } + if dec.AccessList != nil { + itx.AccessList = *dec.AccessList + } itx.V = (*big.Int)(dec.V) if dec.R == nil { return errors.New("missing required field 'r' in transaction") @@ -219,21 +236,21 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { case DynamicFeeTxType: var itx DynamicFeeTx inner = &itx - // Access list is optional for now. - if dec.AccessList != nil { - itx.AccessList = *dec.AccessList - } if dec.ChainID == nil { return errors.New("missing required field 'chainId' in transaction") } itx.ChainID = (*big.Int)(dec.ChainID) - if dec.To != nil { - itx.To = dec.To - } if dec.Nonce == nil { return errors.New("missing required field 'nonce' in transaction") } itx.Nonce = uint64(*dec.Nonce) + if dec.To != nil { + itx.To = dec.To + } + if dec.Gas == nil { + return errors.New("missing required field 'gas' for txdata") + } + itx.Gas = uint64(*dec.Gas) if dec.MaxPriorityFeePerGas == nil { return errors.New("missing required field 'maxPriorityFeePerGas' for txdata") } @@ -242,21 +259,20 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'maxFeePerGas' for txdata") } itx.GasFeeCap = (*big.Int)(dec.MaxFeePerGas) - if dec.Gas == nil { - return errors.New("missing required field 'gas' for txdata") - } - itx.Gas = uint64(*dec.Gas) if dec.Value == nil { return errors.New("missing required field 'value' in transaction") } itx.Value = (*big.Int)(dec.Value) - if dec.Data == nil { + if dec.Input == nil { return errors.New("missing required field 'input' in transaction") } - itx.Data = *dec.Data + itx.Data = *dec.Input if dec.V == nil { return errors.New("missing required field 'v' in transaction") } + if dec.AccessList != nil { + itx.AccessList = *dec.AccessList + } itx.V = (*big.Int)(dec.V) if dec.R == nil { return errors.New("missing required field 'r' in transaction") @@ -273,12 +289,76 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { } } + case BlobTxType: + var itx BlobTx + inner = &itx + if dec.ChainID == nil { + return errors.New("missing required field 'chainId' in transaction") + } + itx.ChainID = uint256.MustFromBig((*big.Int)(dec.ChainID)) + if dec.Nonce == nil { + return errors.New("missing required field 'nonce' in transaction") + } + itx.Nonce = uint64(*dec.Nonce) + if dec.To != nil { + itx.To = dec.To + } + if dec.Gas == nil { + return errors.New("missing required field 'gas' for txdata") + } + itx.Gas = uint64(*dec.Gas) + if dec.MaxPriorityFeePerGas == nil { + return errors.New("missing required field 'maxPriorityFeePerGas' for txdata") + } + itx.GasTipCap = uint256.MustFromBig((*big.Int)(dec.MaxPriorityFeePerGas)) + if dec.MaxFeePerGas == nil { + return errors.New("missing required field 'maxFeePerGas' for txdata") + } + itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas)) + if dec.MaxFeePerDataGas == nil { + return errors.New("missing required field 'maxFeePerDataGas' for txdata") + } + itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas)) + if dec.Value == nil { + return errors.New("missing required field 'value' in transaction") + } + itx.Value = uint256.MustFromBig((*big.Int)(dec.Value)) + if dec.Input == nil { + return errors.New("missing required field 'input' in transaction") + } + itx.Data = *dec.Input + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + if dec.AccessList != nil { + itx.AccessList = *dec.AccessList + } + if dec.BlobVersionedHashes == nil { + return errors.New("missing required field 'blobVersionedHashes' in transaction") + } + itx.BlobHashes = dec.BlobVersionedHashes + itx.V = uint256.MustFromBig((*big.Int)(dec.V)) + if dec.R == nil { + return errors.New("missing required field 'r' in transaction") + } + itx.R = uint256.MustFromBig((*big.Int)(dec.R)) + if dec.S == nil { + return errors.New("missing required field 's' in transaction") + } + itx.S = uint256.MustFromBig((*big.Int)(dec.S)) + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { + if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil { + return err + } + } + default: return ErrTxTypeNotSupported } // Now set the inner transaction. - t.setDecoded(inner, 0) + tx.setDecoded(inner, 0) // TODO: check hash here? return nil diff --git a/coreth/core/types/transaction_signing.go b/coreth/core/types/transaction_signing.go index a717749b..09f6c974 100644 --- a/coreth/core/types/transaction_signing.go +++ b/coreth/core/types/transaction_signing.go @@ -47,8 +47,10 @@ type sigCache struct { } // MakeSigner returns a Signer based on the given chain config and block number or time. -func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime *big.Int) Signer { +func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer { switch { + case config.IsCancun(blockTime): + return NewCancunSigner(config.ChainID) case config.IsApricotPhase3(blockTime): return NewLondonSigner(config.ChainID) case config.IsApricotPhase2(blockTime): @@ -71,6 +73,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime *big // have the current block number available, use MakeSigner instead. func LatestSigner(config *params.ChainConfig) Signer { if config.ChainID != nil { + if config.CancunTime != nil { + return NewCancunSigner(config.ChainID) + } if config.ApricotPhase3BlockTimestamp != nil { return NewLondonSigner(config.ChainID) } @@ -95,7 +100,7 @@ func LatestSignerForChainID(chainID *big.Int) Signer { if chainID == nil { return HomesteadSigner{} } - return NewLondonSigner(chainID) + return NewCancunSigner(chainID) } // SignTx signs the transaction using the given signer and private key. @@ -178,6 +183,75 @@ type Signer interface { Equal(Signer) bool } +type cancunSigner struct{ londonSigner } + +// NewCancunSigner returns a signer that accepts +// - EIP-4844 blob transactions +// - EIP-1559 dynamic fee transactions +// - EIP-2930 access list transactions, +// - EIP-155 replay protected transactions, and +// - legacy Homestead transactions. +func NewCancunSigner(chainId *big.Int) Signer { + return cancunSigner{londonSigner{eip2930Signer{NewEIP155Signer(chainId)}}} +} + +func (s cancunSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != BlobTxType { + return s.londonSigner.Sender(tx) + } + V, R, S := tx.RawSignatureValues() + // Blob txs are defined to use 0 and 1 as their recovery + // id, add 27 to become equivalent to unprotected Homestead signatures. + V = new(big.Int).Add(V, big.NewInt(27)) + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, tx.ChainId(), s.chainId) + } + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +func (s cancunSigner) Equal(s2 Signer) bool { + x, ok := s2.(cancunSigner) + return ok && x.chainId.Cmp(s.chainId) == 0 +} + +func (s cancunSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + txdata, ok := tx.inner.(*BlobTx) + if !ok { + return s.londonSigner.SignatureValues(tx, sig) + } + // Check that chain ID of tx matches the signer. We also accept ID zero here, + // because it indicates that the chain ID was not specified in the tx. + if txdata.ChainID.Sign() != 0 && txdata.ChainID.ToBig().Cmp(s.chainId) != 0 { + return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId) + } + R, S, _ = decodeSignature(sig) + V = big.NewInt(int64(sig[64])) + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s cancunSigner) Hash(tx *Transaction) common.Hash { + if tx.Type() != BlobTxType { + return s.londonSigner.Hash(tx) + } + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.Nonce(), + tx.GasTipCap(), + tx.GasFeeCap(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + tx.AccessList(), + tx.BlobGasFeeCap(), + tx.BlobHashes(), + }) +} + type londonSigner struct{ eip2930Signer } // NewLondonSigner returns a signer that accepts @@ -198,7 +272,7 @@ func (s londonSigner) Sender(tx *Transaction) (common.Address, error) { // id, add 27 to become equivalent to unprotected Homestead signatures. V = new(big.Int).Add(V, big.NewInt(27)) if tx.ChainId().Cmp(s.chainId) != 0 { - return common.Address{}, ErrInvalidChainId + return common.Address{}, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, tx.ChainId(), s.chainId) } return recoverPlain(s.Hash(tx), R, S, V, true) } @@ -216,7 +290,7 @@ func (s londonSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big // Check that chain ID of tx matches the signer. We also accept ID zero here, // because it indicates that the chain ID was not specified in the tx. if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { - return nil, nil, nil, ErrInvalidChainId + return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId) } R, S, _ = decodeSignature(sig) V = big.NewInt(int64(sig[64])) @@ -278,7 +352,7 @@ func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { return common.Address{}, ErrTxTypeNotSupported } if tx.ChainId().Cmp(s.chainId) != 0 { - return common.Address{}, ErrInvalidChainId + return common.Address{}, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, tx.ChainId(), s.chainId) } return recoverPlain(s.Hash(tx), R, S, V, true) } @@ -291,7 +365,7 @@ func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *bi // Check that chain ID of tx matches the signer. We also accept ID zero here, // because it indicates that the chain ID was not specified in the tx. if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { - return nil, nil, nil, ErrInvalidChainId + return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId) } R, S, _ = decodeSignature(sig) V = big.NewInt(int64(sig[64])) @@ -372,7 +446,7 @@ func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) { return HomesteadSigner{}.Sender(tx) } if tx.ChainId().Cmp(s.chainId) != 0 { - return common.Address{}, ErrInvalidChainId + return common.Address{}, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, tx.ChainId(), s.chainId) } V, R, S := tx.RawSignatureValues() V = new(big.Int).Sub(V, s.chainIdMul) @@ -408,7 +482,7 @@ func (s EIP155Signer) Hash(tx *Transaction) common.Hash { }) } -// HomesteadTransaction implements TransactionInterface using the +// HomesteadSigner implements Signer interface using the // homestead rules. type HomesteadSigner struct{ FrontierSigner } @@ -435,6 +509,8 @@ func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) { return recoverPlain(hs.Hash(tx), r, s, v, true) } +// FrontierSigner implements Signer interface using the +// frontier rules. type FrontierSigner struct{} func (s FrontierSigner) ChainID() *big.Int { diff --git a/coreth/core/types/transaction_signing_test.go b/coreth/core/types/transaction_signing_test.go index 674173dc..0976f597 100644 --- a/coreth/core/types/transaction_signing_test.go +++ b/coreth/core/types/transaction_signing_test.go @@ -27,6 +27,7 @@ package types import ( + "errors" "math/big" "testing" @@ -139,8 +140,8 @@ func TestChainId(t *testing.T) { } _, err = Sender(NewEIP155Signer(big.NewInt(2)), tx) - if err != ErrInvalidChainId { - t.Error("expected error:", ErrInvalidChainId) + if !errors.Is(err, ErrInvalidChainId) { + t.Error("expected error:", ErrInvalidChainId, err) } _, err = Sender(NewEIP155Signer(big.NewInt(1)), tx) diff --git a/coreth/core/types/transaction_test.go b/coreth/core/types/transaction_test.go index 4a737558..995afbb9 100644 --- a/coreth/core/types/transaction_test.go +++ b/coreth/core/types/transaction_test.go @@ -30,6 +30,7 @@ import ( "bytes" "crypto/ecdsa" "encoding/json" + "errors" "fmt" "math/big" "math/rand" @@ -180,14 +181,14 @@ func TestEIP2930Signer(t *testing.T) { t.Errorf("test %d: wrong sig hash: got %x, want %x", i, sigHash, test.wantSignerHash) } sender, err := Sender(test.signer, test.tx) - if err != test.wantSenderErr { + if !errors.Is(err, test.wantSenderErr) { t.Errorf("test %d: wrong Sender error %q", i, err) } if err == nil && sender != keyAddr { t.Errorf("test %d: wrong sender address %x", i, sender) } signedTx, err := SignTx(test.tx, test.signer, key) - if err != test.wantSignErr { + if !errors.Is(err, test.wantSignErr) { t.Fatalf("test %d: wrong SignTx error %q", i, err) } if signedTx != nil { @@ -540,3 +541,71 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } return nil } + +func TestTransactionSizes(t *testing.T) { + signer := NewLondonSigner(big.NewInt(123)) + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + to := common.HexToAddress("0x01") + for i, txdata := range []TxData{ + &AccessListTx{ + ChainID: big.NewInt(123), + Nonce: 0, + To: nil, + Value: big.NewInt(1000), + Gas: 21000, + GasPrice: big.NewInt(100000), + }, + &LegacyTx{ + Nonce: 1, + GasPrice: big.NewInt(500), + Gas: 1000000, + To: &to, + Value: big.NewInt(1), + }, + &AccessListTx{ + ChainID: big.NewInt(123), + Nonce: 1, + GasPrice: big.NewInt(500), + Gas: 1000000, + To: &to, + Value: big.NewInt(1), + AccessList: AccessList{ + AccessTuple{ + Address: common.HexToAddress("0x01"), + StorageKeys: []common.Hash{common.HexToHash("0x01")}, + }}, + }, + &DynamicFeeTx{ + ChainID: big.NewInt(123), + Nonce: 1, + Gas: 1000000, + To: &to, + Value: big.NewInt(1), + GasTipCap: big.NewInt(500), + GasFeeCap: big.NewInt(500), + }, + } { + tx, err := SignNewTx(key, signer, txdata) + if err != nil { + t.Fatalf("test %d: %v", i, err) + } + bin, _ := tx.MarshalBinary() + + // Check initial calc + if have, want := int(tx.Size()), len(bin); have != want { + t.Errorf("test %d: size wrong, have %d want %d", i, have, want) + } + // Check cached version too + if have, want := int(tx.Size()), len(bin); have != want { + t.Errorf("test %d: (cached) size wrong, have %d want %d", i, have, want) + } + // Check unmarshalled version too + utx := new(Transaction) + if err := utx.UnmarshalBinary(bin); err != nil { + t.Fatalf("test %d: failed to unmarshal tx: %v", i, err) + } + if have, want := int(utx.Size()), len(bin); have != want { + t.Errorf("test %d: (unmarshalled) size wrong, have %d want %d", i, have, want) + } + } +} diff --git a/coreth/core/types/access_list_tx.go b/coreth/core/types/tx_access_list.go similarity index 72% rename from coreth/core/types/access_list_tx.go rename to coreth/core/types/tx_access_list.go index 51a6bd01..e233410d 100644 --- a/coreth/core/types/access_list_tx.go +++ b/coreth/core/types/tx_access_list.go @@ -39,8 +39,8 @@ type AccessList []AccessTuple // AccessTuple is the element type of an access list. type AccessTuple struct { - Address common.Address `json:"address" gencodec:"required"` - StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` + Address common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` } // StorageKeys returns the total number of storage keys in the access list. @@ -104,17 +104,24 @@ func (tx *AccessListTx) copy() TxData { } // accessors for innerTx. -func (tx *AccessListTx) txType() byte { return AccessListTxType } -func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } -func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } -func (tx *AccessListTx) data() []byte { return tx.Data } -func (tx *AccessListTx) gas() uint64 { return tx.Gas } -func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *AccessListTx) value() *big.Int { return tx.Value } -func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } -func (tx *AccessListTx) to() *common.Address { return tx.To } +func (tx *AccessListTx) txType() byte { return AccessListTxType } +func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } +func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } +func (tx *AccessListTx) data() []byte { return tx.Data } +func (tx *AccessListTx) gas() uint64 { return tx.Gas } +func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } +func (tx *AccessListTx) blobGas() uint64 { return 0 } +func (tx *AccessListTx) blobGasFeeCap() *big.Int { return nil } +func (tx *AccessListTx) blobHashes() []common.Hash { return nil } + +func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + return dst.Set(tx.GasPrice) +} func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/coreth/core/types/tx_blob.go b/coreth/core/types/tx_blob.go new file mode 100644 index 00000000..3141749b --- /dev/null +++ b/coreth/core/types/tx_blob.go @@ -0,0 +1,132 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" +) + +// BlobTx represents an EIP-4844 transaction. +type BlobTx struct { + ChainID *uint256.Int + Nonce uint64 + GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas + GasFeeCap *uint256.Int // a.k.a. maxFeePerGas + Gas uint64 + To *common.Address `rlp:"nil"` // nil means contract creation + Value *uint256.Int + Data []byte + AccessList AccessList + BlobFeeCap *uint256.Int // a.k.a. maxFeePerDataGas + BlobHashes []common.Hash + + // Signature values + V *uint256.Int `json:"v" gencodec:"required"` + R *uint256.Int `json:"r" gencodec:"required"` + S *uint256.Int `json:"s" gencodec:"required"` +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *BlobTx) copy() TxData { + cpy := &BlobTx{ + Nonce: tx.Nonce, + To: copyAddressPtr(tx.To), + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + BlobHashes: make([]common.Hash, len(tx.BlobHashes)), + Value: new(uint256.Int), + ChainID: new(uint256.Int), + GasTipCap: new(uint256.Int), + GasFeeCap: new(uint256.Int), + BlobFeeCap: new(uint256.Int), + V: new(uint256.Int), + R: new(uint256.Int), + S: new(uint256.Int), + } + copy(cpy.AccessList, tx.AccessList) + copy(cpy.BlobHashes, tx.BlobHashes) + + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasTipCap != nil { + cpy.GasTipCap.Set(tx.GasTipCap) + } + if tx.GasFeeCap != nil { + cpy.GasFeeCap.Set(tx.GasFeeCap) + } + if tx.BlobFeeCap != nil { + cpy.BlobFeeCap.Set(tx.BlobFeeCap) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. +func (tx *BlobTx) txType() byte { return BlobTxType } +func (tx *BlobTx) chainID() *big.Int { return tx.ChainID.ToBig() } +func (tx *BlobTx) accessList() AccessList { return tx.AccessList } +func (tx *BlobTx) data() []byte { return tx.Data } +func (tx *BlobTx) gas() uint64 { return tx.Gas } +func (tx *BlobTx) gasFeeCap() *big.Int { return tx.GasFeeCap.ToBig() } +func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() } +func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() } +func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() } +func (tx *BlobTx) nonce() uint64 { return tx.Nonce } +func (tx *BlobTx) to() *common.Address { return tx.To } +func (tx *BlobTx) blobGas() uint64 { return params.BlobTxDataGasPerBlob * uint64(len(tx.BlobHashes)) } +func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() } +func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes } + +func (tx *BlobTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + if baseFee == nil { + return dst.Set(tx.GasFeeCap.ToBig()) + } + tip := dst.Sub(tx.GasFeeCap.ToBig(), baseFee) + if tip.Cmp(tx.GasTipCap.ToBig()) > 0 { + tip.Set(tx.GasTipCap.ToBig()) + } + return tip.Add(tip, baseFee) +} + +func (tx *BlobTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V.ToBig(), tx.R.ToBig(), tx.S.ToBig() +} + +func (tx *BlobTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.ChainID.SetFromBig(chainID) + tx.V.SetFromBig(v) + tx.R.SetFromBig(r) + tx.S.SetFromBig(s) +} diff --git a/coreth/core/types/dynamic_fee_tx.go b/coreth/core/types/tx_dynamic_fee.go similarity index 68% rename from coreth/core/types/dynamic_fee_tx.go rename to coreth/core/types/tx_dynamic_fee.go index c4ec28c5..bf6dbb74 100644 --- a/coreth/core/types/dynamic_fee_tx.go +++ b/coreth/core/types/tx_dynamic_fee.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) +// DynamicFeeTx represents an EIP-1559 transaction. type DynamicFeeTx struct { ChainID *big.Int Nonce uint64 @@ -92,17 +93,31 @@ func (tx *DynamicFeeTx) copy() TxData { } // accessors for innerTx. -func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } -func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } -func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } -func (tx *DynamicFeeTx) data() []byte { return tx.Data } -func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } -func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } -func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } -func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } -func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } -func (tx *DynamicFeeTx) to() *common.Address { return tx.To } +func (tx *DynamicFeeTx) txType() byte { return DynamicFeeTxType } +func (tx *DynamicFeeTx) chainID() *big.Int { return tx.ChainID } +func (tx *DynamicFeeTx) accessList() AccessList { return tx.AccessList } +func (tx *DynamicFeeTx) data() []byte { return tx.Data } +func (tx *DynamicFeeTx) gas() uint64 { return tx.Gas } +func (tx *DynamicFeeTx) gasFeeCap() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) gasTipCap() *big.Int { return tx.GasTipCap } +func (tx *DynamicFeeTx) gasPrice() *big.Int { return tx.GasFeeCap } +func (tx *DynamicFeeTx) value() *big.Int { return tx.Value } +func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } +func (tx *DynamicFeeTx) to() *common.Address { return tx.To } +func (tx *DynamicFeeTx) blobGas() uint64 { return 0 } +func (tx *DynamicFeeTx) blobGasFeeCap() *big.Int { return nil } +func (tx *DynamicFeeTx) blobHashes() []common.Hash { return nil } + +func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + if baseFee == nil { + return dst.Set(tx.GasFeeCap) + } + tip := dst.Sub(tx.GasFeeCap, baseFee) + if tip.Cmp(tx.GasTipCap) > 0 { + tip.Set(tx.GasTipCap) + } + return tip.Add(tip, baseFee) +} func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/coreth/core/types/legacy_tx.go b/coreth/core/types/tx_legacy.go similarity index 74% rename from coreth/core/types/legacy_tx.go rename to coreth/core/types/tx_legacy.go index a32340cb..dc05354d 100644 --- a/coreth/core/types/legacy_tx.go +++ b/coreth/core/types/tx_legacy.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// LegacyTx is the transaction data of regular Ethereum transactions. +// LegacyTx is the transaction data of the original Ethereum transactions. type LegacyTx struct { Nonce uint64 // nonce of sender account GasPrice *big.Int // wei per gas @@ -101,17 +101,24 @@ func (tx *LegacyTx) copy() TxData { } // accessors for innerTx. -func (tx *LegacyTx) txType() byte { return LegacyTxType } -func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } -func (tx *LegacyTx) accessList() AccessList { return nil } -func (tx *LegacyTx) data() []byte { return tx.Data } -func (tx *LegacyTx) gas() uint64 { return tx.Gas } -func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } -func (tx *LegacyTx) value() *big.Int { return tx.Value } -func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } -func (tx *LegacyTx) to() *common.Address { return tx.To } +func (tx *LegacyTx) txType() byte { return LegacyTxType } +func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } +func (tx *LegacyTx) accessList() AccessList { return nil } +func (tx *LegacyTx) data() []byte { return tx.Data } +func (tx *LegacyTx) gas() uint64 { return tx.Gas } +func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasTipCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) gasFeeCap() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } +func (tx *LegacyTx) blobGas() uint64 { return 0 } +func (tx *LegacyTx) blobGasFeeCap() *big.Int { return nil } +func (tx *LegacyTx) blobHashes() []common.Hash { return nil } + +func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int { + return dst.Set(tx.GasPrice) +} func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { return tx.V, tx.R, tx.S diff --git a/coreth/core/vm/analysis.go b/coreth/core/vm/analysis.go index cd1dc542..d35fd628 100644 --- a/coreth/core/vm/analysis.go +++ b/coreth/core/vm/analysis.go @@ -73,7 +73,7 @@ func (bits *bitvec) codeSegment(pos uint64) bool { // codeBitmap collects data locations in code. func codeBitmap(code []byte) bitvec { // The bitmap is 4 bytes longer than necessary, in case the code - // ends with a PUSH32, the algorithm will push zeroes onto the + // ends with a PUSH32, the algorithm will set bits on the // bitvector outside the bounds of the actual code. bits := make(bitvec, len(code)/8+1+4) return codeBitmapInternal(code, bits) diff --git a/coreth/core/vm/contracts.go b/coreth/core/vm/contracts.go index 578bf0c5..3fd6fc12 100644 --- a/coreth/core/vm/contracts.go +++ b/coreth/core/vm/contracts.go @@ -33,9 +33,9 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/modules" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" @@ -43,7 +43,6 @@ import ( "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" - big2 "github.com/holiman/big" "golang.org/x/crypto/ripemd160" ) @@ -57,7 +56,7 @@ type PrecompiledContract interface { // PrecompiledContractsHomestead contains the default set of pre-compiled Ethereum // contracts used in the Frontier and Homestead releases. -var PrecompiledContractsHomestead = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsHomestead = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -66,7 +65,7 @@ var PrecompiledContractsHomestead = map[common.Address]precompile.StatefulPrecom // PrecompiledContractsByzantium contains the default set of pre-compiled Ethereum // contracts used in the Byzantium release. -var PrecompiledContractsByzantium = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsByzantium = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -79,7 +78,7 @@ var PrecompiledContractsByzantium = map[common.Address]precompile.StatefulPrecom // PrecompiledContractsIstanbul contains the default set of pre-compiled Ethereum // contracts used in the Istanbul release. -var PrecompiledContractsIstanbul = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsIstanbul = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -93,7 +92,7 @@ var PrecompiledContractsIstanbul = map[common.Address]precompile.StatefulPrecomp // PrecompiledContractsApricotPhase2 contains the default set of pre-compiled Ethereum // contracts used in the Apricot Phase 2 release. -var PrecompiledContractsApricotPhase2 = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsApricotPhase2 = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -110,7 +109,7 @@ var PrecompiledContractsApricotPhase2 = map[common.Address]precompile.StatefulPr // PrecompiledContractsApricotPhasePre6 contains the default set of pre-compiled Ethereum // contracts used in the PrecompiledContractsApricotPhasePre6 release. -var PrecompiledContractsApricotPhasePre6 = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsApricotPhasePre6 = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -127,7 +126,7 @@ var PrecompiledContractsApricotPhasePre6 = map[common.Address]precompile.Statefu // PrecompiledContractsApricotPhase6 contains the default set of pre-compiled Ethereum // contracts used in the Apricot Phase 6 release. -var PrecompiledContractsApricotPhase6 = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsApricotPhase6 = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -144,7 +143,7 @@ var PrecompiledContractsApricotPhase6 = map[common.Address]precompile.StatefulPr // PrecompiledContractsBanff contains the default set of pre-compiled Ethereum // contracts used in the Banff release. -var PrecompiledContractsBanff = map[common.Address]precompile.StatefulPrecompiledContract{ +var PrecompiledContractsBanff = map[common.Address]contract.StatefulPrecompiledContract{ common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), @@ -208,24 +207,10 @@ func init() { // Ensure that this package will panic during init if there is a conflict present with the declared // precompile addresses. - for _, k := range precompile.UsedAddresses { - if _, ok := PrecompileAllNativeAddresses[k]; ok { - panic(fmt.Errorf("precompile address collides with existing native address: %s", k)) - } - if k == constants.BlackholeAddr { - panic(fmt.Errorf("cannot use address %s for stateful precompile - overlaps with blackhole address", k)) - } - - // check that [k] belongs to at least one ReservedRange - found := false - for _, reservedRange := range precompile.ReservedRanges { - if reservedRange.Contains(k) { - found = true - break - } - } - if !found { - panic(fmt.Errorf("address %s used for stateful precompile but not specified in any reserved range", k)) + for _, module := range modules.RegisteredModules() { + address := module.Address + if _, ok := PrecompileAllNativeAddresses[address]; ok { + panic(fmt.Errorf("precompile address collides with existing native address: %s", address)) } } } @@ -341,7 +326,7 @@ func (c *dataCopy) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.IdentityPerWordGas + params.IdentityBaseGas } func (c *dataCopy) Run(in []byte) ([]byte, error) { - return in, nil + return common.CopyBytes(in), nil } // bigModExp implements a native big integer exponential modular operation. @@ -483,9 +468,9 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { } // Retrieve the operands and execute the exponentiation var ( - base = new(big2.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big2.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big2.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + base = new(big.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) v []byte ) switch { @@ -1048,7 +1033,7 @@ func (c *bls12381Pairing) Run(input []byte) ([]byte, error) { return nil, errBLS12381G2PointSubgroup } - // Update pairing engine with G1 and G2 ponits + // Update pairing engine with G1 and G2 points e.AddPair(p1, p2) } // Prepare 32 byte output diff --git a/coreth/core/vm/contracts_stateful.go b/coreth/core/vm/contracts_stateful.go index 096026e5..64a34ffc 100644 --- a/coreth/core/vm/contracts_stateful.go +++ b/coreth/core/vm/contracts_stateful.go @@ -4,25 +4,8 @@ package vm import ( - "fmt" - "math/big" - - "github.com/ava-labs/coreth/precompile" - "github.com/ava-labs/coreth/vmerrs" + "github.com/ava-labs/coreth/precompile/contract" "github.com/ethereum/go-ethereum/common" - "github.com/holiman/uint256" -) - -// PrecompiledContractsApricot contains the default set of pre-compiled Ethereum -// contracts used in the Istanbul release and the stateful precompiled contracts -// added for the Avalanche Apricot release. -// Apricot is incompatible with the YoloV3 Release since it does not include the -// BLS12-381 Curve Operations added to the set of precompiled contracts - -var ( - genesisContractAddr = common.HexToAddress("0x0100000000000000000000000000000000000000") - NativeAssetBalanceAddr = common.HexToAddress("0x0100000000000000000000000000000000000001") - NativeAssetCallAddr = common.HexToAddress("0x0100000000000000000000000000000000000002") ) // wrappedPrecompiledContract implements StatefulPrecompiledContract by wrapping stateless native precompiled contracts @@ -31,103 +14,18 @@ type wrappedPrecompiledContract struct { p PrecompiledContract } -func newWrappedPrecompiledContract(p PrecompiledContract) precompile.StatefulPrecompiledContract { +// newWrappedPrecompiledContract returns a wrapped version of [PrecompiledContract] to be executed according to the StatefulPrecompiledContract +// interface. +func newWrappedPrecompiledContract(p PrecompiledContract) contract.StatefulPrecompiledContract { return &wrappedPrecompiledContract{p: p} } // Run implements the StatefulPrecompiledContract interface -func (w *wrappedPrecompiledContract) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { +func (w *wrappedPrecompiledContract) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { return RunPrecompiledContract(w.p, input, suppliedGas) } // RunStatefulPrecompiledContract confirms runs [precompile] with the specified parameters. -func RunStatefulPrecompiledContract(precompile precompile.StatefulPrecompiledContract, accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { +func RunStatefulPrecompiledContract(precompile contract.StatefulPrecompiledContract, accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { return precompile.Run(accessibleState, caller, addr, input, suppliedGas, readOnly) } - -// nativeAssetBalance is a precompiled contract used to retrieve the native asset balance -type nativeAssetBalance struct { - gasCost uint64 -} - -// PackNativeAssetBalanceInput packs the arguments into the required input data for a transaction to be passed into -// the native asset balance precompile. -func PackNativeAssetBalanceInput(address common.Address, assetID common.Hash) []byte { - input := make([]byte, 52) - copy(input, address.Bytes()) - copy(input[20:], assetID.Bytes()) - return input -} - -// UnpackNativeAssetBalanceInput attempts to unpack [input] into the arguments to the native asset balance precompile -func UnpackNativeAssetBalanceInput(input []byte) (common.Address, common.Hash, error) { - if len(input) != 52 { - return common.Address{}, common.Hash{}, fmt.Errorf("native asset balance input had unexpcted length %d", len(input)) - } - address := common.BytesToAddress(input[:20]) - assetID := common.Hash{} - assetID.SetBytes(input[20:52]) - return address, assetID, nil -} - -// Run implements StatefulPrecompiledContract -func (b *nativeAssetBalance) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - // input: encodePacked(address 20 bytes, assetID 32 bytes) - if suppliedGas < b.gasCost { - return nil, 0, vmerrs.ErrOutOfGas - } - remainingGas = suppliedGas - b.gasCost - - address, assetID, err := UnpackNativeAssetBalanceInput(input) - if err != nil { - return nil, remainingGas, vmerrs.ErrExecutionReverted - } - - res, overflow := uint256.FromBig(accessibleState.GetStateDB().GetBalanceMultiCoin(address, assetID)) - if overflow { - return nil, remainingGas, vmerrs.ErrExecutionReverted - } - return common.LeftPadBytes(res.Bytes(), 32), remainingGas, nil -} - -// nativeAssetCall atomically transfers a native asset to a recipient address as well as calling that -// address -type nativeAssetCall struct { - gasCost uint64 -} - -// PackNativeAssetCallInput packs the arguments into the required input data for a transaction to be passed into -// the native asset precompile. -// Assumes that [assetAmount] is non-nil. -func PackNativeAssetCallInput(address common.Address, assetID common.Hash, assetAmount *big.Int, callData []byte) []byte { - input := make([]byte, 84+len(callData)) - copy(input[0:20], address.Bytes()) - copy(input[20:52], assetID.Bytes()) - assetAmount.FillBytes(input[52:84]) - copy(input[84:], callData) - return input -} - -// UnpackNativeAssetCallInput attempts to unpack [input] into the arguments to the native asset call precompile -func UnpackNativeAssetCallInput(input []byte) (common.Address, common.Hash, *big.Int, []byte, error) { - if len(input) < 84 { - return common.Address{}, common.Hash{}, nil, nil, fmt.Errorf("native asset call input had unexpected length %d", len(input)) - } - to := common.BytesToAddress(input[:20]) - assetID := common.BytesToHash(input[20:52]) - assetAmount := new(big.Int).SetBytes(input[52:84]) - callData := input[84:] - return to, assetID, assetAmount, callData, nil -} - -// Run implements StatefulPrecompiledContract -func (c *nativeAssetCall) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - // input: encodePacked(address 20 bytes, assetID 32 bytes, assetAmount 32 bytes, callData variable length bytes) - return accessibleState.NativeAssetCall(caller, input, suppliedGas, c.gasCost, readOnly) -} - -type deprecatedContract struct{} - -func (*deprecatedContract) Run(accessibleState precompile.PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - return nil, suppliedGas, vmerrs.ErrExecutionReverted -} diff --git a/coreth/core/vm/contracts_stateful_native_asset.go b/coreth/core/vm/contracts_stateful_native_asset.go new file mode 100644 index 00000000..6791efe2 --- /dev/null +++ b/coreth/core/vm/contracts_stateful_native_asset.go @@ -0,0 +1,113 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "fmt" + "math/big" + + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" +) + +// PrecompiledContractsApricot contains the default set of pre-compiled Ethereum +// contracts used in the Istanbul release and the stateful precompiled contracts +// added for the Avalanche Apricot release. +// Apricot is incompatible with the YoloV3 Release since it does not include the +// BLS12-381 Curve Operations added to the set of precompiled contracts + +var ( + genesisContractAddr = common.HexToAddress("0x0100000000000000000000000000000000000000") + NativeAssetBalanceAddr = common.HexToAddress("0x0100000000000000000000000000000000000001") + NativeAssetCallAddr = common.HexToAddress("0x0100000000000000000000000000000000000002") +) + +// nativeAssetBalance is a precompiled contract used to retrieve the native asset balance +type nativeAssetBalance struct { + gasCost uint64 +} + +// PackNativeAssetBalanceInput packs the arguments into the required input data for a transaction to be passed into +// the native asset balance contract. +func PackNativeAssetBalanceInput(address common.Address, assetID common.Hash) []byte { + input := make([]byte, 52) + copy(input, address.Bytes()) + copy(input[20:], assetID.Bytes()) + return input +} + +// UnpackNativeAssetBalanceInput attempts to unpack [input] into the arguments to the native asset balance precompile +func UnpackNativeAssetBalanceInput(input []byte) (common.Address, common.Hash, error) { + if len(input) != 52 { + return common.Address{}, common.Hash{}, fmt.Errorf("native asset balance input had unexpcted length %d", len(input)) + } + address := common.BytesToAddress(input[:20]) + assetID := common.Hash{} + assetID.SetBytes(input[20:52]) + return address, assetID, nil +} + +// Run implements StatefulPrecompiledContract +func (b *nativeAssetBalance) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // input: encodePacked(address 20 bytes, assetID 32 bytes) + if suppliedGas < b.gasCost { + return nil, 0, vmerrs.ErrOutOfGas + } + remainingGas = suppliedGas - b.gasCost + + address, assetID, err := UnpackNativeAssetBalanceInput(input) + if err != nil { + return nil, remainingGas, vmerrs.ErrExecutionReverted + } + + res, overflow := uint256.FromBig(accessibleState.GetStateDB().GetBalanceMultiCoin(address, assetID)) + if overflow { + return nil, remainingGas, vmerrs.ErrExecutionReverted + } + return common.LeftPadBytes(res.Bytes(), 32), remainingGas, nil +} + +// nativeAssetCall atomically transfers a native asset to a recipient address as well as calling that +// address +type nativeAssetCall struct { + gasCost uint64 +} + +// PackNativeAssetCallInput packs the arguments into the required input data for a transaction to be passed into +// the native asset contract. +// Assumes that [assetAmount] is non-nil. +func PackNativeAssetCallInput(address common.Address, assetID common.Hash, assetAmount *big.Int, callData []byte) []byte { + input := make([]byte, 84+len(callData)) + copy(input[0:20], address.Bytes()) + copy(input[20:52], assetID.Bytes()) + assetAmount.FillBytes(input[52:84]) + copy(input[84:], callData) + return input +} + +// UnpackNativeAssetCallInput attempts to unpack [input] into the arguments to the native asset call precompile +func UnpackNativeAssetCallInput(input []byte) (common.Address, common.Hash, *big.Int, []byte, error) { + if len(input) < 84 { + return common.Address{}, common.Hash{}, nil, nil, fmt.Errorf("native asset call input had unexpected length %d", len(input)) + } + to := common.BytesToAddress(input[:20]) + assetID := common.BytesToHash(input[20:52]) + assetAmount := new(big.Int).SetBytes(input[52:84]) + callData := input[84:] + return to, assetID, assetAmount, callData, nil +} + +// Run implements StatefulPrecompiledContract +func (c *nativeAssetCall) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // input: encodePacked(address 20 bytes, assetID 32 bytes, assetAmount 32 bytes, callData variable length bytes) + return accessibleState.NativeAssetCall(caller, input, suppliedGas, c.gasCost, readOnly) +} + +type deprecatedContract struct{} + +func (*deprecatedContract) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + return nil, suppliedGas, vmerrs.ErrExecutionReverted +} diff --git a/coreth/core/vm/contracts_stateful_test.go b/coreth/core/vm/contracts_stateful_test.go index 589fdc4f..ef3eb24a 100644 --- a/coreth/core/vm/contracts_stateful_test.go +++ b/coreth/core/vm/contracts_stateful_test.go @@ -73,7 +73,7 @@ func TestPackNativeAssetCallInput(t *testing.T) { func TestStatefulPrecompile(t *testing.T) { vmCtx := BlockContext{ BlockNumber: big.NewInt(0), - Time: big.NewInt(0), + Time: 0, CanTransfer: CanTransfer, CanTransferMC: CanTransferMC, Transfer: Transfer, diff --git a/coreth/core/vm/eips.go b/coreth/core/vm/eips.go index dfcd548d..f7e3f7e3 100644 --- a/coreth/core/vm/eips.go +++ b/coreth/core/vm/eips.go @@ -31,6 +31,8 @@ import ( "sort" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -42,6 +44,7 @@ var activators = map[int]func(*JumpTable){ 2200: enable2200, 1884: enable1884, 1344: enable1344, + 1153: enable1153, } // EnableEIP enables the given EIP on the config. @@ -184,6 +187,45 @@ func enable3198(jt *JumpTable) { } } +// enable1153 applies EIP-1153 "Transient Storage" +// - Adds TLOAD that reads from transient storage +// - Adds TSTORE that writes to transient storage +func enable1153(jt *JumpTable) { + jt[TLOAD] = &operation{ + execute: opTload, + constantGas: params.WarmStorageReadCostEIP2929, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[TSTORE] = &operation{ + execute: opTstore, + constantGas: params.WarmStorageReadCostEIP2929, + minStack: minStack(2, 0), + maxStack: maxStack(2, 0), + } +} + +// opTload implements TLOAD opcode +func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + loc := scope.Stack.peek() + hash := common.Hash(loc.Bytes32()) + val := interpreter.evm.StateDB.GetTransientState(scope.Contract.Address(), hash) + loc.SetBytes(val.Bytes()) + return nil, nil +} + +// opTstore implements TSTORE opcode +func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, vmerrs.ErrWriteProtection + } + loc := scope.Stack.pop() + val := scope.Stack.pop() + interpreter.evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) + return nil, nil +} + // opBaseFee implements BASEFEE opcode func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee) diff --git a/coreth/core/vm/evm.go b/coreth/core/vm/evm.go index d0bf6279..9c66640e 100644 --- a/coreth/core/vm/evm.go +++ b/coreth/core/vm/evm.go @@ -31,9 +31,13 @@ import ( "sync/atomic" "time" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/precompile" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/predicate" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -41,22 +45,18 @@ import ( ) var ( - _ precompile.PrecompileAccessibleState = &EVM{} - _ precompile.BlockContext = &BlockContext{} + _ contract.AccessibleState = &EVM{} + _ contract.BlockContext = &BlockContext{} ) -// IsProhibited returns true if [addr] is the blackhole address or is -// with a range reserved for precompiled contracts. +// IsProhibited returns true if [addr] is in the prohibited list of addresses which should +// not be allowed as an EOA or newly created contract address. func IsProhibited(addr common.Address) bool { if addr == constants.BlackholeAddr { return true } - for _, reservedRange := range precompile.ReservedRanges { - if reservedRange.Contains(addr) { - return true - } - } - return false + + return modules.ReservedAddress(addr) } // emptyCodeHash is used by create to ensure deployment is disallowed to already @@ -75,8 +75,8 @@ type ( GetHashFunc func(uint64) common.Hash ) -func (evm *EVM) precompile(addr common.Address) (precompile.StatefulPrecompiledContract, bool) { - var precompiles map[common.Address]precompile.StatefulPrecompiledContract +func (evm *EVM) precompile(addr common.Address) (contract.StatefulPrecompiledContract, bool) { + var precompiles map[common.Address]contract.StatefulPrecompiledContract switch { case evm.chainRules.IsBanff: precompiles = PrecompiledContractsBanff @@ -101,8 +101,12 @@ func (evm *EVM) precompile(addr common.Address) (precompile.StatefulPrecompiledC } // Otherwise, check the chain rules for the additionally configured precompiles. - p, ok = evm.chainRules.Precompiles[addr] - return p, ok + if _, ok = evm.chainRules.ActivePrecompiles[addr]; ok { + module, ok := modules.GetPrecompileModuleByAddress(addr) + return module.Contract, ok + } + + return nil, false } // BlockContext provides the EVM with auxiliary information. Once provided @@ -120,12 +124,15 @@ type BlockContext struct { TransferMultiCoin TransferMCFunc // GetHash returns the hash corresponding to n GetHash GetHashFunc + // PredicateResults are the results of predicate verification available throughout the EVM's execution. + // PredicateResults may be nil if it is not encoded in the block's header. + PredicateResults *predicate.Results // Block information Coinbase common.Address // Provides information for COINBASE GasLimit uint64 // Provides information for GASLIMIT BlockNumber *big.Int // Provides information for NUMBER - Time *big.Int // Provides information for TIME + Time uint64 // Provides information for TIME Difficulty *big.Int // Provides information for DIFFICULTY BaseFee *big.Int // Provides information for BASEFEE } @@ -134,10 +141,17 @@ func (b *BlockContext) Number() *big.Int { return b.BlockNumber } -func (b *BlockContext) Timestamp() *big.Int { +func (b *BlockContext) Timestamp() uint64 { return b.Time } +func (b *BlockContext) GetPredicateResults(txHash common.Hash, address common.Address) []byte { + if b.PredicateResults == nil { + return nil + } + return b.PredicateResults.GetResults(txHash, address) +} + // TxContext provides the EVM with information about a transaction. // All fields can change between transactions. type TxContext struct { @@ -175,8 +189,7 @@ type EVM struct { // used throughout the execution of the tx. interpreter *EVMInterpreter // abort is used to abort the EVM calling operations - // NOTE: must be set atomically - abort int32 + abort atomic.Bool // callGasTemp holds the gas available for the current call. This is needed because the // available gas is calculated in gasCall* according to the 63/64 rule and later // applied in opCall*. @@ -194,7 +207,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainConfig: chainConfig, chainRules: chainConfig.AvalancheRules(blockCtx.BlockNumber, blockCtx.Time), } - evm.interpreter = NewEVMInterpreter(evm, config) + evm.interpreter = NewEVMInterpreter(evm) return evm } @@ -208,21 +221,26 @@ func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { // Cancel cancels any running EVM operation. This may be called concurrently and // it's safe to be called multiple times. func (evm *EVM) Cancel() { - atomic.StoreInt32(&evm.abort, 1) + evm.abort.Store(true) } // Cancelled returns true if Cancel has been called func (evm *EVM) Cancelled() bool { - return atomic.LoadInt32(&evm.abort) == 1 + return evm.abort.Load() +} + +// GetSnowContext returns the evm's snow.Context. +func (evm *EVM) GetSnowContext() *snow.Context { + return evm.chainConfig.SnowCtx } // GetStateDB returns the evm's StateDB -func (evm *EVM) GetStateDB() precompile.StateDB { +func (evm *EVM) GetStateDB() contract.StateDB { return evm.StateDB } // GetBlockContext returns the evm's BlockContext -func (evm *EVM) GetBlockContext() precompile.BlockContext { +func (evm *EVM) GetBlockContext() contract.BlockContext { return &evm.Context } @@ -231,19 +249,23 @@ func (evm *EVM) Interpreter() *EVMInterpreter { return evm.interpreter } +// SetBlockContext updates the block context of the EVM. +func (evm *EVM) SetBlockContext(blockCtx BlockContext) { + evm.Context = blockCtx + num := blockCtx.BlockNumber + evm.chainRules = evm.chainConfig.AvalancheRules(num, blockCtx.Time) +} + // DaemonCall separates a regular call from taking a snapshot and reverting to it in case of error. // The function returns the snapshot in order to permit another opportunity for reverting to the // snapshot in the event that the subsequent call to mint() in coreth/core/daemon.go fails. func (evm *EVM) DaemonCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (snapshot int, ret []byte, leftOverGas uint64, err error) { // Temporarily disable EVM debugging - oldDebug := evm.Config.Debug - oldInterpreterDebug := evm.interpreter.cfg.Debug + oldTracer := evm.Config.Tracer defer func() { - evm.Config.Debug = oldDebug - evm.interpreter.cfg.Debug = oldInterpreterDebug + evm.Config.Tracer = oldTracer }() - evm.Config.Debug = false - evm.interpreter.cfg.Debug = false + evm.Config.Tracer = nil value := big.NewInt(0) // Fail if we're trying to execute above the call depth limit @@ -283,10 +305,10 @@ func (evm *EVM) CallWithoutSnapshot(caller ContractRef, addr common.Address, inp if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer - if evm.Config.Debug { + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) + evm.Config.Tracer.CaptureEnd(ret, 0, nil) } else { evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) evm.Config.Tracer.CaptureExit(ret, 0, nil) @@ -299,11 +321,11 @@ func (evm *EVM) CallWithoutSnapshot(caller ContractRef, addr common.Address, inp evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) // Capture the tracer start/end events in debug mode - if evm.Config.Debug { + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters - evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) + evm.Config.Tracer.CaptureEnd(ret, startGas-gas, err) }(gas, time.Now()) } else { // Handle tracer events for entering and exiting a call frame @@ -353,14 +375,15 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas } snapshot := evm.StateDB.Snapshot() p, isPrecompile := evm.precompile(addr) + debug := evm.Config.Tracer != nil if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer - if evm.Config.Debug { + if debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) + evm.Config.Tracer.CaptureEnd(ret, 0, nil) } else { evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) evm.Config.Tracer.CaptureExit(ret, 0, nil) @@ -373,12 +396,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) // Capture the tracer start/end events in debug mode - if evm.Config.Debug { + if debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters - evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) - }(gas, time.Now()) + defer func(startGas uint64) { // Lazy evaluation of the parameters + evm.Config.Tracer.CaptureEnd(ret, startGas-gas, err) + }(gas) } else { // Handle tracer events for entering and exiting a call frame evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) @@ -458,10 +481,11 @@ func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte evm.Context.TransferMultiCoin(evm.StateDB, caller.Address(), addr, coinID, value2) // Capture the tracer start/end events in debug mode - if evm.Config.Debug && evm.depth == 0 { + debug := evm.Config.Tracer != nil + if debug && evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters - evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err) + evm.Config.Tracer.CaptureEnd(ret, startGas-gas, err) }(gas, time.Now()) } @@ -523,7 +547,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Debug { + if evm.Config.Tracer != nil { evm.Config.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) @@ -564,8 +588,12 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Debug { - evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil) + if evm.Config.Tracer != nil { + // NOTE: caller must, at all times be a contract. It should never happen + // that caller is something other than a Contract. + parent := caller.(*Contract) + // DELEGATECALL inherits value from parent call + evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, parent.value) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) }(gas) @@ -614,7 +642,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte evm.StateDB.AddBalance(addr, big0) // Invoke tracer hooks that signal entering/exiting a call frame - if evm.Config.Debug { + if evm.Config.Tracer != nil { evm.Config.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil) defer func(startGas uint64) { evm.Config.Tracer.CaptureExit(ret, startGas-gas, err) @@ -705,7 +733,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) - if evm.Config.Debug { + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) } else { @@ -713,8 +741,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - start := time.Now() - ret, err := evm.interpreter.Run(contract, nil, false) // Check whether the max code size has been exceeded, assign err if the case. @@ -750,9 +776,9 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } - if evm.Config.Debug { + if evm.Config.Tracer != nil { if evm.depth == 0 { - evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err) + evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err) } else { evm.Config.Tracer.CaptureExit(ret, gas-contract.Gas, err) } @@ -779,6 +805,9 @@ func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment * // ChainConfig returns the environment's chain configuration func (evm *EVM) ChainConfig() *params.ChainConfig { return evm.chainConfig } +// GetChainConfig implements AccessibleState +func (evm *EVM) GetChainConfig() precompileconfig.ChainConfig { return evm.chainConfig } + func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasCost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { if suppliedGas < gasCost { return nil, 0, vmerrs.ErrOutOfGas diff --git a/coreth/core/vm/evm_test.go b/coreth/core/vm/evm_test.go index 021dc570..5a7e72f3 100644 --- a/coreth/core/vm/evm_test.go +++ b/coreth/core/vm/evm_test.go @@ -16,9 +16,20 @@ func TestIsProhibited(t *testing.T) { assert.True(t, IsProhibited(common.HexToAddress("0x0100000000000000000000000000000000000010"))) assert.True(t, IsProhibited(common.HexToAddress("0x01000000000000000000000000000000000000f0"))) assert.True(t, IsProhibited(common.HexToAddress("0x01000000000000000000000000000000000000ff"))) + assert.True(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000000"))) + assert.True(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000010"))) + assert.True(t, IsProhibited(common.HexToAddress("0x02000000000000000000000000000000000000f0"))) + assert.True(t, IsProhibited(common.HexToAddress("0x02000000000000000000000000000000000000ff"))) + // reserved addresses (custom precompiles) + assert.True(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000000"))) + assert.True(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000010"))) + assert.True(t, IsProhibited(common.HexToAddress("0x03000000000000000000000000000000000000f0"))) + assert.True(t, IsProhibited(common.HexToAddress("0x03000000000000000000000000000000000000ff"))) // allowed for use assert.False(t, IsProhibited(common.HexToAddress("0x00000000000000000000000000000000000000ff"))) + assert.False(t, IsProhibited(common.HexToAddress("0x00ffffffffffffffffffffffffffffffffffffff"))) assert.False(t, IsProhibited(common.HexToAddress("0x0100000000000000000000000000000000000100"))) - assert.False(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000000"))) + assert.False(t, IsProhibited(common.HexToAddress("0x0200000000000000000000000000000000000100"))) + assert.False(t, IsProhibited(common.HexToAddress("0x0300000000000000000000000000000000000100"))) } diff --git a/coreth/core/vm/gas_table.go b/coreth/core/vm/gas_table.go index 86e6fdef..7201c577 100644 --- a/coreth/core/vm/gas_table.go +++ b/coreth/core/vm/gas_table.go @@ -174,7 +174,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.NetSstoreDirtyGas, nil } -// Here come the EIP220 rules: +// Here come the EIP2200 rules: // // (0.) If *gasleft* is less than or equal to 2300, fail the current call. // (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted. diff --git a/coreth/core/vm/gas_table_test.go b/coreth/core/vm/gas_table_test.go index e2388c2f..a6ae777d 100644 --- a/coreth/core/vm/gas_table_test.go +++ b/coreth/core/vm/gas_table_test.go @@ -35,6 +35,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" @@ -94,7 +95,7 @@ func TestEIP2200(t *testing.T) { for i, tt := range eip2200Tests { address := common.BytesToAddress([]byte("contract")) - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.CreateAccount(address) statedb.SetCode(address, hexutil.MustDecode(tt.input)) statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) @@ -146,7 +147,7 @@ func TestCreateGas(t *testing.T) { var gasUsed = uint64(0) doCheck := func(testGas int) bool { address := common.BytesToAddress([]byte("contract")) - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.CreateAccount(address) statedb.SetCode(address, hexutil.MustDecode(tt.code)) statedb.Finalise(true) @@ -160,7 +161,9 @@ func TestCreateGas(t *testing.T) { config.ExtraEips = []int{3860} } - vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestChainConfig, config) + // Note: we use Cortina instead of AllEthashProtocolChanges (upstream) + // because it is the last fork before the activation of EIP-3860 + vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestCortinaChainConfig, config) var startGas = uint64(testGas) ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(big.Int)) if err != nil { diff --git a/coreth/core/vm/instructions.go b/coreth/core/vm/instructions.go index e79f059d..fb549dc3 100644 --- a/coreth/core/vm/instructions.go +++ b/coreth/core/vm/instructions.go @@ -28,14 +28,12 @@ package vm import ( "errors" - "sync/atomic" - "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" - "golang.org/x/crypto/sha3" ) func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { @@ -250,7 +248,7 @@ func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) if interpreter.hasher == nil { - interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState) + interpreter.hasher = crypto.NewKeccakState() } else { interpreter.hasher.Reset() } @@ -483,8 +481,7 @@ func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - v, _ := uint256.FromBig(interpreter.evm.Context.Time) - scope.Stack.push(v) + scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time)) return nil, nil } @@ -544,13 +541,12 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } loc := scope.Stack.pop() val := scope.Stack.pop() - interpreter.evm.StateDB.SetState(scope.Contract.Address(), - loc.Bytes32(), val.Bytes32()) + interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32()) return nil, nil } func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + if interpreter.evm.abort.Load() { return nil, errStopToken } pos := scope.Stack.pop() @@ -562,7 +558,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + if interpreter.evm.abort.Load() { return nil, errStopToken } pos, cond := scope.Stack.pop(), scope.Stack.pop() @@ -712,7 +708,6 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { - ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -721,6 +716,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt return ret, nil } +// Note: opCallExpert was de-activated in ApricotPhase2. func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { stack := scope.Stack // Pop gas. The actual gas in interpreter.evm.callGasTemp. @@ -734,6 +730,8 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // Get the arguments from the memory. args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + // Note: this code fails to check that value2 is zero, which was a bug when CALLEX was active. + // The CALLEX opcode was de-activated in ApricotPhase2 resolving this issue. if interpreter.readOnly && !value.IsZero() { return nil, vmerrs.ErrWriteProtection } @@ -762,7 +760,6 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { - ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -797,7 +794,6 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { - ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -826,7 +822,6 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { - ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -855,7 +850,6 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } stack.push(&temp) if err == nil || err == vmerrs.ErrExecutionReverted { - ret = common.CopyBytes(ret) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -895,9 +889,9 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) interpreter.evm.StateDB.Suicide(scope.Contract.Address()) - if interpreter.cfg.Debug { - interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) - interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) + if tracer := interpreter.evm.Config.Tracer; tracer != nil { + tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) + tracer.CaptureExit([]byte{}, 0, nil) } return nil, errStopToken } @@ -919,14 +913,14 @@ func makeLog(size int) executionFunc { } d := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) - interpreter.evm.StateDB.AddLog(&types.Log{ - Address: scope.Contract.Address(), - Topics: topics, - Data: d, + interpreter.evm.StateDB.AddLog( + scope.Contract.Address(), + topics, + d, // This is a non-consensus field, but assigned here because // core/state doesn't know the current block number. - BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(), - }) + interpreter.evm.Context.BlockNumber.Uint64(), + ) return nil, nil } diff --git a/coreth/core/vm/instructions_test.go b/coreth/core/vm/instructions_test.go index 71e5224f..187617eb 100644 --- a/coreth/core/vm/instructions_test.go +++ b/coreth/core/vm/instructions_test.go @@ -34,6 +34,9 @@ import ( "os" "testing" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -55,6 +58,14 @@ var alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffff var commonParams []*twoOperandParams var twoOpMethods map[string]executionFunc +type contractRef struct { + addr common.Address +} + +func (c contractRef) Address() common.Address { + return c.addr +} + func init() { // Params is a list of common edgecases that should be used for some common tests params := []string{ @@ -203,7 +214,7 @@ func TestAddMod(t *testing.T) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) pc = uint64(0) ) tests := []struct { @@ -293,7 +304,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() scope = &ScopeContext{nil, stack, nil} - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -534,7 +545,7 @@ func TestOpMstore(t *testing.T) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -560,7 +571,7 @@ func BenchmarkOpMstore(bench *testing.B) { env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter @@ -577,12 +588,55 @@ func BenchmarkOpMstore(bench *testing.B) { } } +func TestOpTstore(t *testing.T) { + var ( + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + env = NewEVM(BlockContext{}, TxContext{}, statedb, params.TestChainConfig, Config{}) + stack = newstack() + mem = NewMemory() + evmInterpreter = NewEVMInterpreter(env) + caller = common.Address{} + to = common.Address{1} + contractRef = contractRef{caller} + contract = NewContract(contractRef, AccountRef(to), new(big.Int), 0) + scopeContext = ScopeContext{mem, stack, contract} + value = common.Hex2Bytes("abcdef00000000000000abba000000000deaf000000c0de00100000000133700") + ) + + // Add a stateObject for the caller and the contract being called + statedb.CreateAccount(caller) + statedb.CreateAccount(to) + + env.interpreter = evmInterpreter + pc := uint64(0) + // push the value to the stack + stack.push(new(uint256.Int).SetBytes(value)) + // push the location to the stack + stack.push(new(uint256.Int)) + opTstore(&pc, evmInterpreter, &scopeContext) + // there should be no elements on the stack after TSTORE + if stack.len() != 0 { + t.Fatal("stack wrong size") + } + // push the location to the stack + stack.push(new(uint256.Int)) + opTload(&pc, evmInterpreter, &scopeContext) + // there should be one element on the stack after TLOAD + if stack.len() != 1 { + t.Fatal("stack wrong size") + } + val := stack.peek() + if !bytes.Equal(val.Bytes(), value) { + t.Fatal("incorrect element read from transient storage") + } +} + func BenchmarkOpKeccak256(bench *testing.B) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() mem = NewMemory() - evmInterpreter = NewEVMInterpreter(env, env.Config) + evmInterpreter = NewEVMInterpreter(env) ) env.interpreter = evmInterpreter mem.Resize(32) diff --git a/coreth/core/vm/interface.go b/coreth/core/vm/interface.go index bde4b08e..1362a4e3 100644 --- a/coreth/core/vm/interface.go +++ b/coreth/core/vm/interface.go @@ -30,6 +30,7 @@ import ( "math/big" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" ) @@ -62,6 +63,9 @@ type StateDB interface { GetState(common.Address, common.Hash) common.Hash SetState(common.Address, common.Hash, common.Hash) + GetTransientState(addr common.Address, key common.Hash) common.Hash + SetTransientState(addr common.Address, key, value common.Hash) + Suicide(common.Address) bool HasSuicided(common.Address) bool @@ -72,7 +76,6 @@ type StateDB interface { // is defined according to EIP161 (balance = nonce = code = 0). Empty(common.Address) bool - PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) AddressInAccessList(addr common.Address) bool SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform @@ -81,26 +84,30 @@ type StateDB interface { // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform // even if the feature/fork is not active yet AddSlotToAccessList(addr common.Address, slot common.Hash) + Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) RevertToSnapshot(int) Snapshot() int - AddLog(*types.Log) - AddPreimage(common.Hash, []byte) + AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) + GetLogData() (topics [][]common.Hash, data [][]byte) + GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) + SetPredicateStorageSlots(address common.Address, predicates [][]byte) - ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + GetTxHash() common.Hash + + AddPreimage(common.Hash, []byte) } // CallContext provides a basic interface for the EVM calling conventions. The EVM // depends on this context being implemented for doing subcalls and initialising new EVM contracts. type CallContext interface { - // Call another contract + // Call calls another contract. Call(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) - CallExpert(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int, coinID *common.Hash, value2 *big.Int) ([]byte, error) - // Take another's contract code and execute within our own context + // CallCode takes another contracts code and execute within our own context CallCode(env *EVM, me ContractRef, addr common.Address, data []byte, gas, value *big.Int) ([]byte, error) - // Same as CallCode except sender and value is propagated from parent to child scope + // DelegateCall is same as CallCode except sender and value is propagated from parent to child scope DelegateCall(env *EVM, me ContractRef, addr common.Address, data []byte, gas *big.Int) ([]byte, error) - // Create a new contract + // Create creates a new contract Create(env *EVM, me ContractRef, data []byte, gas, value *big.Int) ([]byte, common.Address, error) } diff --git a/coreth/core/vm/interpreter.go b/coreth/core/vm/interpreter.go index b9ce3f46..9f04c32a 100644 --- a/coreth/core/vm/interpreter.go +++ b/coreth/core/vm/interpreter.go @@ -27,34 +27,24 @@ package vm import ( - "hash" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" ) -var ( - BuiltinAddr = common.Address{ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } -) +var BuiltinAddr = common.Address{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} // Config are the configuration options for the Interpreter type Config struct { - Debug bool // Enables debugging Tracer EVMLogger // Opcode logger NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages - - JumpTable *JumpTable // EVM instruction table, automatically populated if unset - - ExtraEips []int // Additional EIPS that are to be enabled - - // AllowUnfinalizedQueries allow unfinalized queries - AllowUnfinalizedQueries bool + ExtraEips []int // Additional EIPS that are to be enabled } // ScopeContext contains the things that are per-call, such as stack and memory, @@ -65,72 +55,61 @@ type ScopeContext struct { Contract *Contract } -// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports -// Read to get a variable amount of data from the hash state. Read is faster than Sum -// because it doesn't copy the internal state, but also modifies the internal state. -type keccakState interface { - hash.Hash - Read([]byte) (int, error) -} - // EVMInterpreter represents an EVM interpreter type EVMInterpreter struct { - evm *EVM - cfg Config + evm *EVM + table *JumpTable - hasher keccakState // Keccak256 hasher instance shared across opcodes - hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes + hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes + hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes readOnly bool // Whether to throw on stateful modifications returnData []byte // Last CALL's return data for subsequent reuse } // NewEVMInterpreter returns a new instance of the Interpreter. -func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { +func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. - if cfg.JumpTable == nil { - switch { - case evm.chainRules.IsDUpgrade: - cfg.JumpTable = &dUpgradeInstructionSet - case evm.chainRules.IsApricotPhase3: - cfg.JumpTable = &apricotPhase3InstructionSet - case evm.chainRules.IsApricotPhase2: - cfg.JumpTable = &apricotPhase2InstructionSet - case evm.chainRules.IsApricotPhase1: - cfg.JumpTable = &apricotPhase1InstructionSet - case evm.chainRules.IsIstanbul: - cfg.JumpTable = &istanbulInstructionSet - case evm.chainRules.IsConstantinople: - cfg.JumpTable = &constantinopleInstructionSet - case evm.chainRules.IsByzantium: - cfg.JumpTable = &byzantiumInstructionSet - case evm.chainRules.IsEIP158: - cfg.JumpTable = &spuriousDragonInstructionSet - case evm.chainRules.IsEIP150: - cfg.JumpTable = &tangerineWhistleInstructionSet - case evm.chainRules.IsHomestead: - cfg.JumpTable = &homesteadInstructionSet - default: - cfg.JumpTable = &frontierInstructionSet - } - // TODO: update this to the new go-ethereum code when the relevant code is merged. - if len(evm.Config.ExtraEips) > 0 { - // Deep-copy jumptable to prevent modification of opcodes in other tables - cfg.JumpTable = copyJumpTable(cfg.JumpTable) - } - for i, eip := range cfg.ExtraEips { - if err := EnableEIP(eip, cfg.JumpTable); err != nil { - // Disable it, so caller can check if it's activated or not - cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...) - log.Error("EIP activation failed", "eip", eip, "error", err) - } - } + var table *JumpTable + switch { + case evm.chainRules.IsDurango: + table = &durangoInstructionSet + case evm.chainRules.IsApricotPhase3: + table = &apricotPhase3InstructionSet + case evm.chainRules.IsApricotPhase2: + table = &apricotPhase2InstructionSet + case evm.chainRules.IsApricotPhase1: + table = &apricotPhase1InstructionSet + case evm.chainRules.IsIstanbul: + table = &istanbulInstructionSet + case evm.chainRules.IsConstantinople: + table = &constantinopleInstructionSet + case evm.chainRules.IsByzantium: + table = &byzantiumInstructionSet + case evm.chainRules.IsEIP158: + table = &spuriousDragonInstructionSet + case evm.chainRules.IsEIP150: + table = &tangerineWhistleInstructionSet + case evm.chainRules.IsHomestead: + table = &homesteadInstructionSet + default: + table = &frontierInstructionSet } - - return &EVMInterpreter{ - evm: evm, - cfg: cfg, + var extraEips []int + if len(evm.Config.ExtraEips) > 0 { + // Deep-copy jumptable to prevent modification of opcodes in other tables + table = copyJumpTable(table) + } + for _, eip := range evm.Config.ExtraEips { + if err := EnableEIP(eip, table); err != nil { + // Disable it, so caller can check if it's activated or not + log.Error("EIP activation failed", "eip", eip, "error", err) + } else { + extraEips = append(extraEips, eip) + } } + evm.Config.ExtraEips = extraEips + return &EVMInterpreter{evm: evm, table: table} } // Run loops and evaluates the contract's code with the given input data and returns @@ -193,6 +172,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( gasCopy uint64 // for EVMLogger to log gas remaining before execution logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function + debug = in.evm.Config.Tracer != nil ) // Don't move this deferred function, it's placed before the capturestate-deferred method, @@ -203,13 +183,13 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( }() contract.Input = input - if in.cfg.Debug { + if debug { defer func() { if err != nil { if !logged { - in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + in.evm.Config.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) } else { - in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) + in.evm.Config.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.evm.depth, err) } } }() @@ -219,14 +199,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // the execution of one of the operations or until the done flag is set by the // parent context. for { - if in.cfg.Debug { + if debug { // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) - operation := in.cfg.JumpTable[op] + operation := in.table[op] cost = operation.constantGas // For tracing // Validate stack if sLen := stack.len(); sLen < operation.minStack { @@ -265,15 +245,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( return nil, vmerrs.ErrOutOfGas } // Do tracing before memory expansion - if in.cfg.Debug { - in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + if debug { + in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } if memorySize > 0 { mem.Resize(memorySize) } - } else if in.cfg.Debug { - in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) + } else if debug { + in.evm.Config.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } diff --git a/coreth/core/vm/interpreter_test.go b/coreth/core/vm/interpreter_test.go index b8a3b485..018d7af1 100644 --- a/coreth/core/vm/interpreter_test.go +++ b/coreth/core/vm/interpreter_test.go @@ -33,6 +33,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" @@ -52,7 +53,7 @@ func TestLoopInterrupt(t *testing.T) { } for i, tt := range loopInterruptTests { - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.CreateAccount(address) statedb.SetCode(address, common.Hex2Bytes(tt)) statedb.Finalise(true) diff --git a/coreth/core/vm/jump_table.go b/coreth/core/vm/jump_table.go index 6e51d9b4..27029f7d 100644 --- a/coreth/core/vm/jump_table.go +++ b/coreth/core/vm/jump_table.go @@ -65,7 +65,7 @@ var ( apricotPhase1InstructionSet = newApricotPhase1InstructionSet() apricotPhase2InstructionSet = newApricotPhase2InstructionSet() apricotPhase3InstructionSet = newApricotPhase3InstructionSet() - dUpgradeInstructionSet = newDUpgradeInstructionSet() + durangoInstructionSet = newDurangoInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -89,7 +89,9 @@ func validate(jt JumpTable) JumpTable { return jt } -func newDUpgradeInstructionSet() JumpTable { +// newDurangoInstructionSet returns the frontier, homestead, byzantium, +// constantinople, istanbul, petersburg, subnet-evm, durango instructions. +func newDurangoInstructionSet() JumpTable { instructionSet := newApricotPhase3InstructionSet() enable3855(&instructionSet) // PUSH0 instruction enable3860(&instructionSet) // Limit and meter initcode @@ -97,7 +99,7 @@ func newDUpgradeInstructionSet() JumpTable { } // newApricotPhase3InstructionSet returns the frontier, homestead, byzantium, -// contantinople, istanbul, petersburg, apricotPhase1, 2, and 3 instructions. +// constantinople, istanbul, petersburg, apricotPhase1, 2, and 3 instructions. func newApricotPhase3InstructionSet() JumpTable { instructionSet := newApricotPhase2InstructionSet() enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198 @@ -105,7 +107,7 @@ func newApricotPhase3InstructionSet() JumpTable { } // newApricotPhase1InstructionSet returns the frontier, -// homestead, byzantium, contantinople petersburg, +// homestead, byzantium, constantinople petersburg, // istanbul, and apricotPhase1 instructions. func newApricotPhase2InstructionSet() JumpTable { instructionSet := newApricotPhase1InstructionSet() @@ -117,7 +119,7 @@ func newApricotPhase2InstructionSet() JumpTable { } // newApricotPhase1InstructionSet returns the frontier, -// homestead, byzantium, contantinople petersburg, +// homestead, byzantium, constantinople petersburg, // and istanbul instructions. func newApricotPhase1InstructionSet() JumpTable { instructionSet := newIstanbulInstructionSet() @@ -128,7 +130,7 @@ func newApricotPhase1InstructionSet() JumpTable { } // newIstanbulInstructionSet returns the frontier, -// homestead, byzantium, contantinople and petersburg instructions. +// homestead, byzantium, constantinople and petersburg instructions. func newIstanbulInstructionSet() JumpTable { instructionSet := newConstantinopleInstructionSet() @@ -140,7 +142,7 @@ func newIstanbulInstructionSet() JumpTable { } // newConstantinopleInstructionSet returns the frontier, homestead, -// byzantium and contantinople instructions. +// byzantium and constantinople instructions. func newConstantinopleInstructionSet() JumpTable { instructionSet := newByzantiumInstructionSet() instructionSet[SHL] = &operation{ diff --git a/coreth/core/vm/jump_table_export.go b/coreth/core/vm/jump_table_export.go new file mode 100644 index 00000000..f5bedf61 --- /dev/null +++ b/coreth/core/vm/jump_table_export.go @@ -0,0 +1,71 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "github.com/ava-labs/coreth/params" +) + +// LookupInstructionSet returns the instructionset for the fork configured by +// the rules. +func LookupInstructionSet(rules params.Rules) (JumpTable, error) { + switch { + case rules.IsDurango: + return newDurangoInstructionSet(), nil + case rules.IsApricotPhase3, rules.IsApricotPhase4, + rules.IsApricotPhase5, rules.IsApricotPhasePre6, + rules.IsApricotPhase6, rules.IsApricotPhasePost6, + rules.IsBanff, rules.IsCortina: + return newApricotPhase3InstructionSet(), nil + case rules.IsApricotPhase2: + return newApricotPhase2InstructionSet(), nil + case rules.IsApricotPhase1: + return newApricotPhase1InstructionSet(), nil + case rules.IsIstanbul: + return newIstanbulInstructionSet(), nil + case rules.IsConstantinople: + return newConstantinopleInstructionSet(), nil + case rules.IsByzantium: + return newByzantiumInstructionSet(), nil + case rules.IsEIP158: + return newSpuriousDragonInstructionSet(), nil + case rules.IsEIP150: + return newTangerineWhistleInstructionSet(), nil + case rules.IsHomestead: + return newHomesteadInstructionSet(), nil + } + return newFrontierInstructionSet(), nil +} + +// Stack returns the mininum and maximum stack requirements. +func (op *operation) Stack() (int, int) { + return op.minStack, op.maxStack +} + +// HasCost returns true if the opcode has a cost. Opcodes which do _not_ have +// a cost assigned are one of two things: +// - undefined, a.k.a invalid opcodes, +// - the STOP opcode. +// This method can thus be used to check if an opcode is "Invalid (or STOP)". +func (op *operation) HasCost() bool { + // Ideally, we'd check this: + // return op.execute == opUndefined + // However, go-lang does now allow that. So we'll just check some other + // 'indicators' that this is an invalid op. Alas, STOP is impossible to + // filter out + return op.dynamicGas != nil || op.constantGas != 0 +} diff --git a/coreth/ethdb/leveldb/leveldb_test.go b/coreth/core/vm/jump_table_test.go similarity index 65% rename from coreth/ethdb/leveldb/leveldb_test.go rename to coreth/core/vm/jump_table_test.go index 8498a5a6..6e838337 100644 --- a/coreth/ethdb/leveldb/leveldb_test.go +++ b/coreth/core/vm/jump_table_test.go @@ -1,4 +1,4 @@ -// (c) 2021-2022, Ava Labs, Inc. +// (c) 2023, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2019 The go-ethereum Authors +// Copyright 2022 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,27 +24,22 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package leveldb +package vm import ( "testing" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/dbtest" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/stretchr/testify/require" ) -func TestLevelDB(t *testing.T) { - t.Run("DatabaseSuite", func(t *testing.T) { - dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { - db, err := leveldb.Open(storage.NewMemStorage(), nil) - if err != nil { - t.Fatal(err) - } - return &Database{ - db: db, - } - }) - }) +// TestJumpTableCopy tests that deep copy is necessery to prevent modify shared jump table +func TestJumpTableCopy(t *testing.T) { + tbl := newDurangoInstructionSet() + require.Equal(t, uint64(0), tbl[SLOAD].constantGas) + + // a deep copy won't modify the shared jump table + deepCopy := copyJumpTable(&tbl) + deepCopy[SLOAD].constantGas = 100 + require.Equal(t, uint64(100), deepCopy[SLOAD].constantGas) + require.Equal(t, uint64(0), tbl[SLOAD].constantGas) } diff --git a/coreth/core/vm/logger.go b/coreth/core/vm/logger.go index b376739c..397aff70 100644 --- a/coreth/core/vm/logger.go +++ b/coreth/core/vm/logger.go @@ -28,7 +28,6 @@ package vm import ( "math/big" - "time" "github.com/ethereum/go-ethereum/common" ) @@ -44,7 +43,7 @@ type EVMLogger interface { CaptureTxEnd(restGas uint64) // Top call frame CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) - CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) + CaptureEnd(output []byte, gasUsed uint64, err error) // Rest of call frames CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) CaptureExit(output []byte, gasUsed uint64, err error) diff --git a/coreth/core/vm/opcodes.go b/coreth/core/vm/opcodes.go index 82edbf38..f8a86784 100644 --- a/coreth/core/vm/opcodes.go +++ b/coreth/core/vm/opcodes.go @@ -35,11 +35,7 @@ type OpCode byte // IsPush specifies if an opcode is a PUSH opcode. func (op OpCode) IsPush() bool { - switch op { - case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: - return true - } - return false + return PUSH1 <= op && op <= PUSH32 } // 0x0 range - arithmetic ops. @@ -216,6 +212,12 @@ const ( LOG4 ) +// 0xb0 range. +const ( + TLOAD OpCode = 0xb3 + TSTORE OpCode = 0xb4 +) + const ( BALANCEMC = 0xcd CALLEX = 0xcf @@ -302,9 +304,7 @@ var opCodeToString = map[OpCode]string{ BASEFEE: "BASEFEE", // 0x50 range - 'storage' and execution. - POP: "POP", - //DUP: "DUP", - //SWAP: "SWAP", + POP: "POP", MLOAD: "MLOAD", MSTORE: "MSTORE", MSTORE8: "MSTORE8", @@ -318,7 +318,7 @@ var opCodeToString = map[OpCode]string{ JUMPDEST: "JUMPDEST", PUSH0: "PUSH0", - // 0x60 range - push. + // 0x60 range - pushes. PUSH1: "PUSH1", PUSH2: "PUSH2", PUSH3: "PUSH3", @@ -352,6 +352,7 @@ var opCodeToString = map[OpCode]string{ PUSH31: "PUSH31", PUSH32: "PUSH32", + // 0x80 - dups. DUP1: "DUP1", DUP2: "DUP2", DUP3: "DUP3", @@ -369,6 +370,7 @@ var opCodeToString = map[OpCode]string{ DUP15: "DUP15", DUP16: "DUP16", + // 0x90 - swaps. SWAP1: "SWAP1", SWAP2: "SWAP2", SWAP3: "SWAP3", @@ -385,13 +387,19 @@ var opCodeToString = map[OpCode]string{ SWAP14: "SWAP14", SWAP15: "SWAP15", SWAP16: "SWAP16", - LOG0: "LOG0", - LOG1: "LOG1", - LOG2: "LOG2", - LOG3: "LOG3", - LOG4: "LOG4", - // 0xf0 range. + // 0xa0 range - logging ops. + LOG0: "LOG0", + LOG1: "LOG1", + LOG2: "LOG2", + LOG3: "LOG3", + LOG4: "LOG4", + + // 0xb0 range. + TLOAD: "TLOAD", + TSTORE: "TSTORE", + + // 0xf0 range - closures. CREATE: "CREATE", CALL: "CALL", CALLEX: "CALLEX", @@ -552,6 +560,8 @@ var stringToOp = map[string]OpCode{ "LOG2": LOG2, "LOG3": LOG3, "LOG4": LOG4, + "TLOAD": TLOAD, + "TSTORE": TSTORE, "CREATE": CREATE, "CREATE2": CREATE2, "CALL": CALL, diff --git a/coreth/core/vm/runtime/runtime.go b/coreth/core/vm/runtime/runtime.go index 74d5499f..81ea6792 100644 --- a/coreth/core/vm/runtime/runtime.go +++ b/coreth/core/vm/runtime/runtime.go @@ -29,10 +29,10 @@ package runtime import ( "math" "math/big" - "time" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" @@ -47,7 +47,7 @@ type Config struct { Origin common.Address Coinbase common.Address BlockNumber *big.Int - Time *big.Int + Time uint64 GasLimit uint64 GasPrice *big.Int Value *big.Int @@ -68,7 +68,6 @@ func setDefaults(cfg *Config) { DAOForkBlock: new(big.Int), DAOForkSupport: false, EIP150Block: new(big.Int), - EIP150Hash: common.Hash{}, EIP155Block: new(big.Int), EIP158Block: new(big.Int), ByzantiumBlock: new(big.Int), @@ -76,19 +75,16 @@ func setDefaults(cfg *Config) { PetersburgBlock: new(big.Int), IstanbulBlock: new(big.Int), MuirGlacierBlock: new(big.Int), - ApricotPhase1BlockTimestamp: new(big.Int), - ApricotPhase2BlockTimestamp: new(big.Int), - ApricotPhase3BlockTimestamp: new(big.Int), - ApricotPhase4BlockTimestamp: new(big.Int), + ApricotPhase1BlockTimestamp: new(uint64), + ApricotPhase2BlockTimestamp: new(uint64), + ApricotPhase3BlockTimestamp: new(uint64), + ApricotPhase4BlockTimestamp: new(uint64), } } if cfg.Difficulty == nil { cfg.Difficulty = new(big.Int) } - if cfg.Time == nil { - cfg.Time = big.NewInt(time.Now().Unix()) - } if cfg.GasLimit == 0 { cfg.GasLimit = math.MaxUint64 } @@ -123,16 +119,19 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { setDefaults(cfg) if cfg.State == nil { - cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) } var ( address = common.BytesToAddress([]byte("contract")) vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) - if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { - cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) - } + // Execute the preparatory steps for state transition which includes: + // - prepare accessList(post-berlin/ApricotPhase2) + // - reset transient storage(eip 1153) + cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) + cfg.State.CreateAccount(address) // set the receiver's (the executing contract) code for execution. cfg.State.SetCode(address, code) @@ -144,7 +143,6 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { cfg.GasLimit, cfg.Value, ) - return ret, cfg.State, err } @@ -156,15 +154,18 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { setDefaults(cfg) if cfg.State == nil { - cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) } var ( vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) - if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { - cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil) - } + // Execute the preparatory steps for state transition which includes: + // - prepare accessList(post-berlin/ApricotPhase2) + // - reset transient storage(eip 1153) + cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, nil, vm.ActivePrecompiles(rules), nil) + // Call the code with the given configuration. code, address, leftOverGas, err := vmenv.Create( sender, @@ -183,14 +184,17 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, error) { setDefaults(cfg) - vmenv := NewEnv(cfg) - - sender := cfg.State.GetOrNewStateObject(cfg.Origin) - statedb := cfg.State + var ( + vmenv = NewEnv(cfg) + sender = cfg.State.GetOrNewStateObject(cfg.Origin) + statedb = cfg.State + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) + ) + // Execute the preparatory steps for state transition which includes: + // - prepare accessList(post-berlin/ApricotPhase2) + // - reset transient storage(eip 1153) + statedb.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) - if rules := cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time); rules.IsApricotPhase2 { - statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil) - } // Call the code with the given configuration. ret, leftOverGas, err := vmenv.Call( sender, diff --git a/coreth/core/vm/runtime/runtime_test.go b/coreth/core/vm/runtime/runtime_test.go index 58daa614..5203bb3f 100644 --- a/coreth/core/vm/runtime/runtime_test.go +++ b/coreth/core/vm/runtime/runtime_test.go @@ -58,9 +58,6 @@ func TestDefaults(t *testing.T) { t.Error("expected difficulty to be non nil") } - if cfg.Time == nil { - t.Error("expected time to be non nil") - } if cfg.GasLimit == 0 { t.Error("didn't expect gaslimit to be zero") } @@ -116,7 +113,7 @@ func TestExecute(t *testing.T) { } func TestCall(t *testing.T) { - state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) address := common.HexToAddress("0x0a") state.SetCode(address, []byte{ byte(vm.PUSH1), 10, @@ -172,7 +169,7 @@ func BenchmarkCall(b *testing.B) { } func benchmarkEVM_Create(bench *testing.B, code string) { var ( - statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) sender = common.BytesToAddress([]byte("sender")) receiver = common.BytesToAddress([]byte("receiver")) ) @@ -184,7 +181,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) { State: statedb, GasLimit: 10000000, Difficulty: big.NewInt(0x200000), - Time: new(big.Int).SetUint64(0), + Time: 0, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(1), ChainConfig: ¶ms.ChainConfig{ @@ -340,15 +337,14 @@ func TestBlockhash(t *testing.T) { func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) { cfg := new(Config) setDefaults(cfg) - cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + cfg.State, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) cfg.GasLimit = gas if len(tracerCode) > 0 { - tracer, err := tracers.New(tracerCode, new(tracers.Context), nil) + tracer, err := tracers.DefaultDirectory.New(tracerCode, new(tracers.Context), nil) if err != nil { b.Fatal(err) } cfg.EVMConfig = vm.Config{ - Debug: true, Tracer: tracer, } } @@ -524,7 +520,6 @@ func TestEip2929Cases(t *testing.T) { code, ops) Execute(code, nil, &Config{ EVMConfig: vm.Config{ - Debug: true, Tracer: logger.NewMarkdownLogger(nil, os.Stdout), ExtraEips: []int{2929}, }, @@ -678,7 +673,6 @@ func TestColdAccountAccessCost(t *testing.T) { tracer := logger.NewStructLogger(nil) Execute(tc.code, nil, &Config{ EVMConfig: vm.Config{ - Debug: true, Tracer: tracer, }, }) @@ -695,30 +689,30 @@ func TestColdAccountAccessCost(t *testing.T) { func TestRuntimeJSTracer(t *testing.T) { jsTracers := []string{ `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - step: function() { this.steps++}, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; + step: function() { this.steps++}, + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; + }, + exit: function(res) { + this.exits++; this.gasUsed = res.getGasUsed(); }}`, `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, - fault: function() {}, - result: function() { - return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") - }, - enter: function(frame) { - this.enters++; + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; this.enterGas = frame.getGas(); - }, - exit: function(res) { - this.exits++; + }, + exit: function(res) { + this.exits++; this.gasUsed = res.getGasUsed(); }}`} tests := []struct { @@ -834,7 +828,7 @@ func TestRuntimeJSTracer(t *testing.T) { main := common.HexToAddress("0xaa") for i, jsTracer := range jsTracers { for j, tc := range tests { - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.SetCode(main, tc.code) statedb.SetCode(common.HexToAddress("0xbb"), calleeCode) statedb.SetCode(common.HexToAddress("0xcc"), calleeCode) @@ -842,7 +836,7 @@ func TestRuntimeJSTracer(t *testing.T) { statedb.SetCode(common.HexToAddress("0xee"), calleeCode) statedb.SetCode(common.HexToAddress("0xff"), depressedCode) - tracer, err := tracers.New(jsTracer, new(tracers.Context), nil) + tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil) if err != nil { t.Fatal(err) } @@ -850,7 +844,6 @@ func TestRuntimeJSTracer(t *testing.T) { GasLimit: 1000000, State: statedb, EVMConfig: vm.Config{ - Debug: true, Tracer: tracer, }}) if err != nil { @@ -877,15 +870,14 @@ func TestJSTracerCreateTx(t *testing.T) { exit: function(res) { this.exits++ }}` code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)} - statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - tracer, err := tracers.New(jsTracer, new(tracers.Context), nil) + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil) if err != nil { t.Fatal(err) } _, _, _, err = Create(code, &Config{ State: statedb, EVMConfig: vm.Config{ - Debug: true, Tracer: tracer, }}) if err != nil { diff --git a/coreth/eth/api.go b/coreth/eth/api.go index 17ba6525..8aa03fa1 100644 --- a/coreth/eth/api.go +++ b/coreth/eth/api.go @@ -96,7 +96,7 @@ func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool return false, errors.New("location would overwrite an existing file") } // Make sure we can create the file to export into - out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return false, err } @@ -195,16 +195,20 @@ func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { OnlyWithAddresses: true, Max: AccountRangeMaxResults, // Sanity limit over RPC } - var block *types.Block + var header *types.Header if blockNr.IsAccepted() { - block = api.eth.LastAcceptedBlock() + header = api.eth.LastAcceptedBlock().Header() } else { - block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) + block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) + if block == nil { + return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) + } + header = block.Header() } - if block == nil { + if header == nil { return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) } - stateDb, err := api.eth.BlockChain().StateAt(block.Root()) + stateDb, err := api.eth.BlockChain().StateAt(header.Root) if err != nil { return state.Dump{}, err } @@ -235,16 +239,20 @@ func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hex var err error if number, ok := blockNrOrHash.Number(); ok { - var block *types.Block + var header *types.Header if number.IsAccepted() { - block = api.eth.LastAcceptedBlock() + header = api.eth.LastAcceptedBlock().Header() } else { - block = api.eth.blockchain.GetBlockByNumber(uint64(number)) + block := api.eth.blockchain.GetBlockByNumber(uint64(number)) + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) + } + header = block.Header() } - if block == nil { + if header == nil { return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) } - stateDb, err = api.eth.BlockChain().StateAt(block.Root()) + stateDb, err = api.eth.BlockChain().StateAt(header.Root) if err != nil { return state.IteratorDump{}, err } @@ -288,19 +296,22 @@ type storageEntry struct { } // StorageRangeAt returns the storage at the given block height and transaction index. -func (api *DebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { +func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { // Retrieve the block block := api.eth.blockchain.GetBlockByHash(blockHash) if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0) + _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } defer release() - st := statedb.StorageTrie(contractAddress) + st, err := statedb.StorageTrie(contractAddress) + if err != nil { + return StorageRangeResult{}, err + } if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) } @@ -391,11 +402,11 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c } triedb := api.eth.BlockChain().StateCache().TrieDB() - oldTrie, err := trie.NewStateTrie(common.Hash{}, startBlock.Root(), triedb) + oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) if err != nil { return nil, err } - newTrie, err := trie.NewStateTrie(common.Hash{}, endBlock.Root(), triedb) + newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb) if err != nil { return nil, err } @@ -424,9 +435,9 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error if num.Int64() < 0 { block := api.eth.blockchain.CurrentBlock() if block == nil { - return 0, fmt.Errorf("current block missing") + return 0, errors.New("current block missing") } - return block.NumberU64(), nil + return block.Number.Uint64(), nil } return uint64(num.Int64()), nil } @@ -444,7 +455,7 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error return 0, err } if start == end { - return 0, fmt.Errorf("from and to needs to be different") + return 0, errors.New("from and to needs to be different") } if start > end { delta = -1 diff --git a/coreth/eth/api_backend.go b/coreth/eth/api_backend.go index b387407c..f8854809 100644 --- a/coreth/eth/api_backend.go +++ b/coreth/eth/api_backend.go @@ -43,10 +43,10 @@ import ( "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/gasprice" "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) @@ -57,6 +57,7 @@ type EthAPIBackend struct { extRPCEnabled bool allowUnprotectedTxs bool allowUnprotectedTxHashes map[common.Hash]struct{} // Invariant: read-only after creation. + allowUnfinalizedQueries bool eth *Ethereum gpo *gasprice.Oracle } @@ -66,11 +67,15 @@ func (b *EthAPIBackend) ChainConfig() *params.ChainConfig { return b.eth.blockchain.Config() } -func (b *EthAPIBackend) GetVMConfig() *vm.Config { - return b.eth.blockchain.GetVMConfig() +func (b *EthAPIBackend) IsAllowUnfinalizedQueries() bool { + return b.allowUnfinalizedQueries } -func (b *EthAPIBackend) CurrentBlock() *types.Block { +func (b *EthAPIBackend) SetAllowUnfinalizedQueries(allow bool) { + b.allowUnfinalizedQueries = allow +} + +func (b *EthAPIBackend) CurrentBlock() *types.Header { return b.eth.blockchain.CurrentBlock() } @@ -89,7 +94,7 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb return acceptedBlock.Header(), nil } - if !b.GetVMConfig().AllowUnfinalizedQueries && acceptedBlock != nil { + if !b.IsAllowUnfinalizedQueries() && acceptedBlock != nil { if number.Int64() > acceptedBlock.Number().Int64() { return nil, ErrUnfinalizedData } @@ -113,7 +118,7 @@ func (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*ty } acceptedBlock := b.eth.LastAcceptedBlock() - if !b.GetVMConfig().AllowUnfinalizedQueries && acceptedBlock != nil { + if !b.IsAllowUnfinalizedQueries() && acceptedBlock != nil { if header.Number.Cmp(acceptedBlock.Number()) > 0 { return nil, ErrUnfinalizedData } @@ -149,7 +154,7 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe return acceptedBlock, nil } - if !b.GetVMConfig().AllowUnfinalizedQueries && acceptedBlock != nil { + if !b.IsAllowUnfinalizedQueries() && acceptedBlock != nil { if number.Int64() > acceptedBlock.Number().Int64() { return nil, ErrUnfinalizedData } @@ -174,7 +179,7 @@ func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ } acceptedBlock := b.eth.LastAcceptedBlock() - if !b.GetVMConfig().AllowUnfinalizedQueries && acceptedBlock != nil { + if !b.IsAllowUnfinalizedQueries() && acceptedBlock != nil { if number.Cmp(acceptedBlock.Number()) > 0 { return nil, ErrUnfinalizedData } @@ -182,6 +187,17 @@ func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ return block, nil } +// GetBody returns body of a block. It does not resolve special block numbers. +func (b *EthAPIBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if number < 0 || hash == (common.Hash{}) { + return nil, errors.New("invalid arguments; expect hash and no special block numbers") + } + if body := b.eth.blockchain.GetBody(hash); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) @@ -254,14 +270,18 @@ func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number ui return b.eth.blockchain.GetLogs(hash, number), nil } -func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { - vmError := func() error { return nil } +func (b *EthAPIBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) (*vm.EVM, func() error) { if vmConfig == nil { vmConfig = b.eth.blockchain.GetVMConfig() } txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) - return vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *vmConfig), vmError, nil + var context vm.BlockContext + if blockCtx != nil { + context = *blockCtx + } else { + context = core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) + } + return vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *vmConfig), state.Error } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { @@ -330,7 +350,7 @@ func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) // expectations with clients (expect an empty response when a transaction // does not exist). acceptedBlock := b.eth.LastAcceptedBlock() - if !b.GetVMConfig().AllowUnfinalizedQueries && acceptedBlock != nil && tx != nil { + if !b.IsAllowUnfinalizedQueries() && acceptedBlock != nil && tx != nil { if blockNumber > acceptedBlock.NumberU64() { return nil, common.Hash{}, 0, 0, nil } @@ -371,7 +391,7 @@ func (b *EthAPIBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) return b.gpo.SuggestTipCap(ctx) } -func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) { +func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) { return b.gpo.FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles) } @@ -447,11 +467,15 @@ func (b *EthAPIBackend) GetMaxBlocksPerRequest() int64 { } func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { - return b.eth.StateAtBlock(block, reexec, base, readOnly, preferDisk) + return b.eth.StateAtBlock(ctx, block, reexec, base, readOnly, preferDisk) +} + +func (b *EthAPIBackend) StateAtNextBlock(ctx context.Context, parent, nextBlock *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { + return b.eth.StateAtNextBlock(ctx, parent, nextBlock, reexec, base, readOnly, preferDisk) } -func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { - return b.eth.stateAtTransaction(block, txIndex, reexec) +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { + return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) } func (b *EthAPIBackend) MinRequiredTip(ctx context.Context, header *types.Header) (*big.Int, error) { diff --git a/coreth/eth/backend.go b/coreth/eth/backend.go index db74165f..55d674d9 100644 --- a/coreth/eth/backend.go +++ b/coreth/eth/backend.go @@ -41,13 +41,13 @@ import ( "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/pruner" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/ethconfig" "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/eth/gasprice" "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/internal/shutdowncheck" "github.com/ava-labs/coreth/miner" @@ -55,6 +55,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -74,7 +75,7 @@ type Ethereum struct { config *Config // Handlers - txPool *core.TxPool + txPool *txpool.TxPool blockchain *core.BlockChain // DB interfaces @@ -116,7 +117,7 @@ func roundUpCacheSize(input int, allocSize int) int { func New( stack *node.Node, config *Config, - cb *dummy.ConsensusCallbacks, + cb dummy.ConsensusCallbacks, chainDb ethdb.Database, settings Settings, lastAcceptedHash common.Hash, @@ -144,7 +145,7 @@ func New( // Since RecoverPruning will only continue a pruning run that already began, we do not need to ensure that // reprocessState has already been called and completed successfully. To ensure this, we must maintain // that Prune is only run after reprocessState has finished successfully. - if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb); err != nil { + if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb, config.TrieCleanJournal); err != nil { log.Error("Failed to recover state", "error", err) } @@ -153,7 +154,7 @@ func New( chainDb: chainDb, eventMux: new(event.TypeMux), accountManager: stack.AccountManager(), - engine: dummy.NewDummyEngine(cb), + engine: dummy.NewFakerWithClock(cb, clock), closeBloomHandler: make(chan struct{}), networkID: config.NetworkId, etherbase: config.Miner.Etherbase, @@ -181,7 +182,6 @@ func New( var ( vmConfig = vm.Config{ EnablePreimageRecording: config.EnablePreimageRecording, - AllowUnfinalizedQueries: config.AllowUnfinalizedQueries, } cacheConfig = &core.CacheConfig{ TrieCleanLimit: config.TrieCleanCache, @@ -189,6 +189,7 @@ func New( TrieCleanRejournal: config.TrieCleanRejournal, TrieDirtyLimit: config.TrieDirtyCache, TrieDirtyCommitTarget: config.TrieDirtyCommitTarget, + TriePrefetcherParallelism: config.TriePrefetcherParallelism, Pruning: config.Pruning, AcceptorQueueLimit: config.AcceptorQueueLimit, CommitInterval: config.CommitInterval, @@ -197,12 +198,13 @@ func New( AllowMissingTries: config.AllowMissingTries, SnapshotDelayInit: config.SnapshotDelayInit, SnapshotLimit: config.SnapshotCache, - SnapshotAsync: config.SnapshotAsync, + SnapshotWait: config.SnapshotWait, SnapshotVerify: config.SnapshotVerify, - SkipSnapshotRebuild: config.SkipSnapshotRebuild, + SnapshotNoBuild: config.SkipSnapshotRebuild, Preimages: config.Preimages, AcceptedCacheSize: config.AcceptedCacheSize, TxLookupLimit: config.TxLookupLimit, + SkipTxIndexing: config.SkipTxIndexing, } ) @@ -223,7 +225,7 @@ func New( eth.bloomIndexer.Start(eth.blockchain) config.TxPool.Journal = "" - eth.txPool = core.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) + eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock) @@ -236,6 +238,7 @@ func New( extRPCEnabled: stack.Config().ExtRPCEnabled(), allowUnprotectedTxs: config.AllowUnprotectedTxs, allowUnprotectedTxHashes: allowUnprotectedTxHashes, + allowUnfinalizedQueries: config.AllowUnfinalizedQueries, eth: eth, } if config.AllowUnprotectedTxs { @@ -282,7 +285,7 @@ func (s *Ethereum) APIs() []rpc.API { Name: "eth", }, { Namespace: "eth", - Service: filters.NewFilterAPI(filterSystem, false /* isLightClient */), + Service: filters.NewFilterAPI(filterSystem), Name: "eth-filter", }, { Namespace: "admin", @@ -308,18 +311,6 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) { if etherbase != (common.Address{}) { return etherbase, nil } - if wallets := s.AccountManager().Wallets(); len(wallets) > 0 { - if accounts := wallets[0].Accounts(); len(accounts) > 0 { - etherbase := accounts[0].Address - - s.lock.Lock() - s.etherbase = etherbase - s.lock.Unlock() - - log.Info("Etherbase automatically configured", "address", etherbase) - return etherbase, nil - } - } return common.Address{}, fmt.Errorf("etherbase must be explicitly specified") } @@ -336,7 +327,7 @@ func (s *Ethereum) Miner() *miner.Miner { return s.miner } func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager } func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain } -func (s *Ethereum) TxPool() *core.TxPool { return s.txPool } +func (s *Ethereum) TxPool() *txpool.TxPool { return s.txPool } func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux } func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } @@ -367,9 +358,12 @@ func (s *Ethereum) Stop() error { // Clean shutdown marker as the last thing before closing db s.shutdownTracker.Stop() + log.Info("Stopped shutdownTracker") s.chainDb.Close() + log.Info("Closed chaindb") s.eventMux.Stop() + log.Info("Stopped EventMux") return nil } @@ -439,7 +433,13 @@ func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, gspec *co s.blockchain.Stop() s.blockchain = nil log.Info("Starting offline pruning", "dataDir", s.config.OfflinePruningDataDirectory, "bloomFilterSize", s.config.OfflinePruningBloomFilterSize) - pruner, err := pruner.NewPruner(s.chainDb, s.config.OfflinePruningDataDirectory, s.config.OfflinePruningBloomFilterSize) + prunerConfig := pruner.Config{ + BloomSize: s.config.OfflinePruningBloomFilterSize, + Cachedir: s.config.TrieCleanJournal, + Datadir: s.config.OfflinePruningDataDirectory, + } + + pruner, err := pruner.NewPruner(s.chainDb, prunerConfig) if err != nil { return fmt.Errorf("failed to create new pruner with data directory: %s, size: %d, due to: %w", s.config.OfflinePruningDataDirectory, s.config.OfflinePruningBloomFilterSize, err) } diff --git a/coreth/eth/ethconfig/config.go b/coreth/eth/ethconfig/config.go index 740cc5c3..51d88aee 100644 --- a/coreth/eth/ethconfig/config.go +++ b/coreth/eth/ethconfig/config.go @@ -32,6 +32,7 @@ import ( "time" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/eth/gasprice" "github.com/ava-labs/coreth/miner" "github.com/ethereum/go-ethereum/common" @@ -77,18 +78,19 @@ func init() { func NewDefaultConfig() Config { return Config{ - NetworkId: 1, - TrieCleanCache: 512, - TrieDirtyCache: 256, - TrieDirtyCommitTarget: 20, - SnapshotCache: 256, - AcceptedCacheSize: 32, - Miner: miner.Config{}, - TxPool: core.DefaultTxPoolConfig, - RPCGasCap: 25000000, - RPCEVMTimeout: 5 * time.Second, - GPO: DefaultFullGPOConfig, - RPCTxFeeCap: 1, // 1 AVAX + NetworkId: 1, + TrieCleanCache: 512, + TrieDirtyCache: 256, + TrieDirtyCommitTarget: 20, + TriePrefetcherParallelism: 16, + SnapshotCache: 256, + AcceptedCacheSize: 32, + Miner: miner.Config{}, + TxPool: txpool.DefaultConfig, + RPCGasCap: 25000000, + RPCEVMTimeout: 5 * time.Second, + GPO: DefaultFullGPOConfig, + RPCTxFeeCap: 1, // 1 AVAX } } @@ -101,7 +103,7 @@ func NewDefaultSgbConfig() Config { SnapshotCache: 256, AcceptedCacheSize: 32, Miner: miner.Config{}, - TxPool: core.DefaultTxPoolConfig, + TxPool: txpool.DefaultConfig, RPCGasCap: 25000000, RPCEVMTimeout: 5 * time.Second, GPO: DefaultFullGPOSgbConfig, @@ -127,7 +129,7 @@ type Config struct { PopulateMissingTriesParallelism int // Number of concurrent readers to use when re-populating missing tries on startup. AllowMissingTries bool // Whether to allow an archival node to run with pruning enabled and corrupt a complete index. SnapshotDelayInit bool // Whether snapshot tree should be initialized on startup or delayed until explicit call - SnapshotAsync bool // Whether to generate the initial snapshot in async mode + SnapshotWait bool // Whether to wait for the initial snapshot generation SnapshotVerify bool // Whether to verify generated snapshots SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) @@ -135,13 +137,14 @@ type Config struct { SkipBcVersionCheck bool `toml:"-"` // TrieDB and snapshot options - TrieCleanCache int - TrieCleanJournal string - TrieCleanRejournal time.Duration - TrieDirtyCache int - TrieDirtyCommitTarget int - SnapshotCache int - Preimages bool + TrieCleanCache int + TrieCleanJournal string + TrieCleanRejournal time.Duration + TrieDirtyCache int + TrieDirtyCommitTarget int + TriePrefetcherParallelism int + SnapshotCache int + Preimages bool // AcceptedCacheSize is the depth of accepted headers cache and accepted // logs cache at the accepted tip. @@ -151,7 +154,7 @@ type Config struct { Miner miner.Config // Transaction pool options - TxPool core.TxPoolConfig + TxPool txpool.Config // Gas Price Oracle options GPO gasprice.Config @@ -197,4 +200,9 @@ type Config struct { // * 0: means no limit // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes TxLookupLimit uint64 + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool } diff --git a/coreth/eth/filters/api.go b/coreth/eth/filters/api.go index 1c30597d..c9610789 100644 --- a/coreth/eth/filters/api.go +++ b/coreth/eth/filters/api.go @@ -37,25 +37,33 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/interfaces" + "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" ) +var ( + errInvalidTopic = errors.New("invalid topic(s)") + errFilterNotFound = errors.New("filter not found") +) + // filter is a helper struct that holds meta information over the filter type // and associated subscription in the event system. type filter struct { typ Type deadline *time.Timer // filter is inactive when deadline triggers hashes []common.Hash + fullTx bool + txs []*types.Transaction crit FilterCriteria logs []*types.Log s *Subscription // associated subscription in event system } // FilterAPI offers support to create and manage filters. This will allow external clients to retrieve various -// information related to the Ethereum protocol such als blocks, transactions and logs. +// information related to the Ethereum protocol such as blocks, transactions and logs. type FilterAPI struct { sys *FilterSystem events *EventSystem @@ -65,10 +73,10 @@ type FilterAPI struct { } // NewFilterAPI returns a new FilterAPI instance. -func NewFilterAPI(system *FilterSystem, lightMode bool) *FilterAPI { +func NewFilterAPI(system *FilterSystem) *FilterAPI { api := &FilterAPI{ sys: system, - events: NewEventSystem(system, lightMode), + events: NewEventSystem(system), filters: make(map[rpc.ID]*filter), timeout: system.cfg.Timeout, } @@ -107,28 +115,28 @@ func (api *FilterAPI) timeoutLoop(timeout time.Duration) { } } -// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes +// NewPendingTransactionFilter creates a filter that fetches pending transactions // as transactions enter the pending state. // // It is part of the filter package because this filter can be used through the // `eth_getFilterChanges` polling method that is also used for log filters. -func (api *FilterAPI) NewPendingTransactionFilter() rpc.ID { +func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { var ( - pendingTxs = make(chan []common.Hash) + pendingTxs = make(chan []*types.Transaction) pendingTxSub = api.events.SubscribePendingTxs(pendingTxs) ) api.filtersMu.Lock() - api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub} + api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, fullTx: fullTx != nil && *fullTx, deadline: time.NewTimer(api.timeout), txs: make([]*types.Transaction, 0), s: pendingTxSub} api.filtersMu.Unlock() go func() { for { select { - case ph := <-pendingTxs: + case pTx := <-pendingTxs: api.filtersMu.Lock() if f, found := api.filters[pendingTxSub.ID]; found { - f.hashes = append(f.hashes, ph...) + f.txs = append(f.txs, pTx...) } api.filtersMu.Unlock() case <-pendingTxSub.Err(): @@ -143,9 +151,10 @@ func (api *FilterAPI) NewPendingTransactionFilter() rpc.ID { return pendingTxSub.ID } -// NewPendingTransactions creates a subscription that is triggered each time a transaction -// enters the transaction pool and was signed from one of the transactions this nodes manages. -func (api *FilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { +// NewPendingTransactions creates a subscription that is triggered each time a +// transaction enters the transaction pool. If fullTx is true the full tx is +// sent to the client, otherwise the hash is sent. +func (api *FilterAPI) NewPendingTransactions(ctx context.Context, fullTx *bool) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -154,16 +163,23 @@ func (api *FilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscrip rpcSub := notifier.CreateSubscription() go func() { - txHashes := make(chan []common.Hash, 128) - pendingTxSub := api.events.SubscribePendingTxs(txHashes) + txs := make(chan []*types.Transaction, 128) + pendingTxSub := api.events.SubscribePendingTxs(txs) + chainConfig := api.sys.backend.ChainConfig() for { select { - case hashes := <-txHashes: + case txs := <-txs: // To keep the original behaviour, send a single tx hash in one notification. // TODO(rjl493456442) Send a batch of tx hashes in one notification - for _, h := range hashes { - notifier.Notify(rpcSub.ID, h) + latest := api.sys.backend.CurrentHeader() + for _, tx := range txs { + if fullTx != nil && *fullTx { + rpcTx := ethapi.NewRPCTransaction(tx, latest, latest.BaseFee, chainConfig) + notifier.Notify(rpcSub.ID, rpcTx) + } else { + notifier.Notify(rpcSub.ID, tx.Hash()) + } } case <-rpcSub.Err(): pendingTxSub.Unsubscribe() @@ -178,8 +194,10 @@ func (api *FilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscrip return rpcSub, nil } -// NewAcceptedTransactions creates a subscription that is triggered each time a transaction is accepted. -func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context) (*rpc.Subscription, error) { +// NewAcceptedTransactions creates a subscription that is triggered each time a +// transaction is accepted. If fullTx is true the full tx is +// sent to the client, otherwise the hash is sent. +func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context, fullTx *bool) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported @@ -188,14 +206,23 @@ func (api *FilterAPI) NewAcceptedTransactions(ctx context.Context) (*rpc.Subscri rpcSub := notifier.CreateSubscription() go func() { - txHashes := make(chan []common.Hash, 128) - acceptedTxSub := api.events.SubscribeAcceptedTxs(txHashes) + txs := make(chan []*types.Transaction, 128) + acceptedTxSub := api.events.SubscribeAcceptedTxs(txs) + chainConfig := api.sys.backend.ChainConfig() for { select { - case hashes := <-txHashes: - for _, h := range hashes { - notifier.Notify(rpcSub.ID, h) + case txs := <-txs: + // To keep the original behaviour, send a single tx hash in one notification. + // TODO(rjl493456442) Send a batch of tx hashes in one notification + latest := api.sys.backend.LastAcceptedBlock().Header() + for _, tx := range txs { + if fullTx != nil && *fullTx { + rpcTx := ethapi.NewRPCTransaction(tx, latest, latest.BaseFee, chainConfig) + notifier.Notify(rpcSub.ID, rpcTx) + } else { + notifier.Notify(rpcSub.ID, tx.Hash()) + } } case <-rpcSub.Err(): acceptedTxSub.Unsubscribe() @@ -218,7 +245,7 @@ func (api *FilterAPI) NewBlockFilter() rpc.ID { headerSub *Subscription ) - if api.sys.backend.GetVMConfig().AllowUnfinalizedQueries { + if api.sys.backend.IsAllowUnfinalizedQueries() { headerSub = api.events.SubscribeNewHeads(headers) } else { headerSub = api.events.SubscribeAcceptedHeads(headers) @@ -264,7 +291,7 @@ func (api *FilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) { headersSub event.Subscription ) - if api.sys.backend.GetVMConfig().AllowUnfinalizedQueries { + if api.sys.backend.IsAllowUnfinalizedQueries() { headersSub = api.events.SubscribeNewHeads(headers) } else { headersSub = api.events.SubscribeAcceptedHeads(headers) @@ -301,7 +328,7 @@ func (api *FilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subsc err error ) - if api.sys.backend.GetVMConfig().AllowUnfinalizedQueries { + if api.sys.backend.IsAllowUnfinalizedQueries() { logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), matchedLogs) if err != nil { return nil, err @@ -356,7 +383,7 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { err error ) - if api.sys.backend.GetVMConfig().AllowUnfinalizedQueries { + if api.sys.backend.IsAllowUnfinalizedQueries() { logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), logs) if err != nil { return rpc.ID(""), err @@ -449,7 +476,7 @@ func (api *FilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Lo api.filtersMu.Unlock() if !found || f.typ != LogsSubscription { - return nil, fmt.Errorf("filter not found") + return nil, errFilterNotFound } var filter *Filter @@ -493,6 +520,14 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { api.filtersMu.Lock() defer api.filtersMu.Unlock() + chainConfig := api.sys.backend.ChainConfig() + latest := api.sys.backend.CurrentHeader() + + var baseFee *big.Int + if latest != nil { + baseFee = latest.BaseFee + } + if f, found := api.filters[id]; found { if !f.deadline.Stop() { // timer expired but filter is not yet removed in timeout loop @@ -502,10 +537,26 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { f.deadline.Reset(api.timeout) switch f.typ { - case PendingTransactionsSubscription, BlocksSubscription, AcceptedBlocksSubscription, AcceptedTransactionsSubscription: + case BlocksSubscription, AcceptedBlocksSubscription: hashes := f.hashes f.hashes = nil return returnHashes(hashes), nil + case PendingTransactionsSubscription, AcceptedTransactionsSubscription: + if f.fullTx { + txs := make([]*ethapi.RPCTransaction, 0, len(f.txs)) + for _, tx := range f.txs { + txs = append(txs, ethapi.NewRPCTransaction(tx, latest, baseFee, chainConfig)) + } + f.txs = nil + return txs, nil + } else { + hashes := make([]common.Hash, 0, len(f.txs)) + for _, tx := range f.txs { + hashes = append(hashes, tx.Hash()) + } + f.txs = nil + return hashes, nil + } case LogsSubscription, AcceptedLogsSubscription, MinedAndPendingLogsSubscription: logs := f.logs f.logs = nil @@ -513,7 +564,7 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { } } - return []interface{}{}, fmt.Errorf("filter not found") + return []interface{}{}, errFilterNotFound } // returnHashes is a helper that will return an empty hash array case the given hash array is nil, @@ -552,7 +603,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { if raw.BlockHash != nil { if raw.FromBlock != nil || raw.ToBlock != nil { // BlockHash is mutually exclusive with FromBlock/ToBlock criteria - return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") + return errors.New("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other") } args.BlockHash = raw.BlockHash } else { @@ -625,11 +676,11 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { } args.Topics[i] = append(args.Topics[i], parsed) } else { - return fmt.Errorf("invalid topic(s)") + return errInvalidTopic } } default: - return fmt.Errorf("invalid topic(s)") + return errInvalidTopic } } } diff --git a/coreth/eth/filters/api_test.go b/coreth/eth/filters/api_test.go index 48a09d34..72838b4d 100644 --- a/coreth/eth/filters/api_test.go +++ b/coreth/eth/filters/api_test.go @@ -56,7 +56,7 @@ func TestUnmarshalJSONNewFilterArgs(t *testing.T) { // from, to block number var test1 FilterCriteria - vector := fmt.Sprintf(`{"fromBlock":"%#x","toBlock":"%#x"}`, fromBlock, toBlock) + vector := fmt.Sprintf(`{"fromBlock":"%v","toBlock":"%v"}`, fromBlock, toBlock) if err := json.Unmarshal([]byte(vector), &test1); err != nil { t.Fatal(err) } diff --git a/coreth/eth/filters/bench_test.go b/coreth/eth/filters/bench_test.go index 7100e9fc..de3f9b26 100644 --- a/coreth/eth/filters/bench_test.go +++ b/coreth/eth/filters/bench_test.go @@ -35,9 +35,9 @@ import ( "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/ethdb" "github.com/stretchr/testify/require" ) diff --git a/coreth/eth/filters/filter.go b/coreth/eth/filters/filter.go index 17c7b104..c873ea1f 100644 --- a/coreth/eth/filters/filter.go +++ b/coreth/eth/filters/filter.go @@ -54,7 +54,7 @@ type Filter struct { // NewRangeFilter creates a new filter which uses a bloom filter on blocks to // figure out whether a particular block is interesting or not. func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) (*Filter, error) { - allowUnfinalizedQueries := sys.backend.GetVMConfig().AllowUnfinalizedQueries + allowUnfinalizedQueries := sys.backend.IsAllowUnfinalizedQueries() acceptedBlock := sys.backend.LastAcceptedBlock() // Flatten the address and topic filter clauses into a single bloombits filter @@ -128,7 +128,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if header == nil { return nil, errors.New("unknown block") } - return f.blockLogs(ctx, header, false) + return f.blockLogs(ctx, header) } // Short-cut if all we care about is pending logs if f.begin == rpc.PendingBlockNumber.Int64() { @@ -171,7 +171,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If the requested range of blocks exceeds the maximum number of blocks allowed by the backend // return an error instead of searching for the logs. - if maxBlocks := f.sys.backend.GetMaxBlocksPerRequest(); int64(end)-f.begin > maxBlocks && maxBlocks > 0 { + if maxBlocks := f.sys.backend.GetMaxBlocksPerRequest(); int64(end)-f.begin >= maxBlocks && maxBlocks > 0 { return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, int64(end), maxBlocks) } // Gather all indexed logs, and finish with non indexed ones @@ -229,7 +229,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header, true) + found, err := f.checkMatches(ctx, header) if err != nil { return logs, err } @@ -247,11 +247,14 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e var logs []*types.Log for ; f.begin <= int64(end); f.begin++ { + if f.begin%10 == 0 && ctx.Err() != nil { + return logs, ctx.Err() + } header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header, false) + found, err := f.blockLogs(ctx, header) if err != nil { return logs, err } @@ -261,15 +264,8 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e } // blockLogs returns the logs matching the filter criteria within a single block. -func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) { - // Fast track: no filtering criteria - if len(f.addresses) == 0 && len(f.topics) == 0 { - list, err := f.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) - if err != nil { - return nil, err - } - return types.FlattenLogs(list), nil - } else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) { +func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) { + if bloomFilter(header.Bloom, f.addresses, f.topics) { return f.checkMatches(ctx, header) } return nil, nil diff --git a/coreth/eth/filters/filter_system.go b/coreth/eth/filters/filter_system.go index 1b5e33fc..10cdd24e 100644 --- a/coreth/eth/filters/filter_system.go +++ b/coreth/eth/filters/filter_system.go @@ -30,19 +30,19 @@ package filters import ( "context" + "errors" "fmt" "sync" "time" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/bloombits" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/interfaces" + "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -63,9 +63,12 @@ type Backend interface { ChainDb() ethdb.Database HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) + CurrentHeader() *types.Header + ChainConfig() *params.ChainConfig SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription @@ -81,13 +84,15 @@ type Backend interface { ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) // Added to the backend interface to support limiting of logs requests - GetVMConfig() *vm.Config + IsAllowUnfinalizedQueries() bool LastAcceptedBlock() *types.Block GetMaxBlocksPerRequest() int64 } // FilterSystem holds resources shared by all filters. type FilterSystem struct { + // Note: go-ethereum uses an LRU cache for logs, + // instead we cache logs on the blockchain object itself. backend Backend cfg *Config } @@ -129,16 +134,16 @@ const ( PendingLogsSubscription // MinedAndPendingLogsSubscription queries for logs in mined and pending blocks. MinedAndPendingLogsSubscription - // PendingTransactionsSubscription queries tx hashes for pending - // transactions entering the pending state + // PendingTransactionsSubscription queries for pending transactions entering + // the pending state PendingTransactionsSubscription - // AcceptedTransactionsSubscription queries tx hashes for accepted transactions + // AcceptedTransactionsSubscription queries for accepted transactions AcceptedTransactionsSubscription // BlocksSubscription queries hashes for blocks that are imported BlocksSubscription // AcceptedBlocksSubscription queries hashes for blocks that are accepted AcceptedBlocksSubscription - // LastSubscription keeps track of the last index + // LastIndexSubscription keeps track of the last index LastIndexSubscription ) @@ -160,7 +165,7 @@ type subscription struct { created time.Time logsCrit interfaces.FilterQuery logs chan []*types.Log - hashes chan []common.Hash + txs chan []*types.Transaction headers chan *types.Header installed chan struct{} // closed when the filter is installed err chan error // closed when the filter is uninstalled @@ -169,10 +174,8 @@ type subscription struct { // EventSystem creates subscriptions, processes events and broadcasts them to the // subscription which match the subscription criteria. type EventSystem struct { - backend Backend - sys *FilterSystem - lightMode bool - lastHead *types.Header + backend Backend + sys *FilterSystem // Subscriptions txsSub event.Subscription // Subscription for new transaction event @@ -203,11 +206,10 @@ type EventSystem struct { // // The returned manager has a loop that needs to be stopped with the Stop function // or by stopping the given mux. -func NewEventSystem(sys *FilterSystem, lightMode bool) *EventSystem { +func NewEventSystem(sys *FilterSystem) *EventSystem { m := &EventSystem{ sys: sys, backend: sys.backend, - lightMode: lightMode, install: make(chan *subscription), uninstall: make(chan *subscription), txsCh: make(chan core.NewTxsEvent, txChanSize), @@ -265,7 +267,7 @@ func (sub *Subscription) Unsubscribe() { case sub.es.uninstall <- sub.f: break uninstallLoop case <-sub.f.logs: - case <-sub.f.hashes: + case <-sub.f.txs: case <-sub.f.headers: } } @@ -320,7 +322,7 @@ func (es *EventSystem) SubscribeLogs(crit interfaces.FilterQuery, logs chan []*t if from >= 0 && to == rpc.LatestBlockNumber { return es.subscribeLogs(crit, logs), nil } - return nil, fmt.Errorf("invalid from and to block combination: from > to") + return nil, errors.New("invalid from and to block combination: from > to") } func (es *EventSystem) SubscribeAcceptedLogs(crit interfaces.FilterQuery, logs chan []*types.Log) (*Subscription, error) { @@ -355,7 +357,7 @@ func (es *EventSystem) subscribeAcceptedLogs(crit interfaces.FilterQuery, logs c logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -372,7 +374,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit interfaces.FilterQuery, lo logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -389,7 +391,7 @@ func (es *EventSystem) subscribeLogs(crit interfaces.FilterQuery, logs chan []*t logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -406,7 +408,7 @@ func (es *EventSystem) subscribePendingLogs(crit interfaces.FilterQuery, logs ch logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -422,7 +424,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti typ: BlocksSubscription, created: time.Now(), logs: make(chan []*types.Log), - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: headers, installed: make(chan struct{}), err: make(chan error), @@ -438,7 +440,7 @@ func (es *EventSystem) SubscribeAcceptedHeads(headers chan *types.Header) *Subsc typ: AcceptedBlocksSubscription, created: time.Now(), logs: make(chan []*types.Log), - hashes: make(chan []common.Hash), + txs: make(chan []*types.Transaction), headers: headers, installed: make(chan struct{}), err: make(chan error), @@ -446,15 +448,15 @@ func (es *EventSystem) SubscribeAcceptedHeads(headers chan *types.Header) *Subsc return es.subscribe(sub) } -// SubscribePendingTxs creates a subscription that writes transaction hashes for +// SubscribePendingTxs creates a subscription that writes transactions for // transactions that enter the transaction pool. -func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription { +func (es *EventSystem) SubscribePendingTxs(txs chan []*types.Transaction) *Subscription { sub := &subscription{ id: rpc.NewID(), typ: PendingTransactionsSubscription, created: time.Now(), logs: make(chan []*types.Log), - hashes: hashes, + txs: txs, headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -462,15 +464,15 @@ func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscript return es.subscribe(sub) } -// SubscribeAcceptedTxs creates a subscription that writes transaction hashes for +// SubscribeAcceptedTxs creates a subscription that writes transactions for // transactions have been accepted. -func (es *EventSystem) SubscribeAcceptedTxs(hashes chan []common.Hash) *Subscription { +func (es *EventSystem) SubscribeAcceptedTxs(txs chan []*types.Transaction) *Subscription { sub := &subscription{ id: rpc.NewID(), typ: AcceptedTransactionsSubscription, created: time.Now(), logs: make(chan []*types.Log), - hashes: hashes, + txs: txs, headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -526,16 +528,12 @@ func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLog } func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent, accepted bool) { - hashes := make([]common.Hash, 0, len(ev.Txs)) - for _, tx := range ev.Txs { - hashes = append(hashes, tx.Hash()) - } for _, f := range filters[PendingTransactionsSubscription] { - f.hashes <- hashes + f.txs <- ev.Txs } if accepted { for _, f := range filters[AcceptedTransactionsSubscription] { - f.hashes <- hashes + f.txs <- ev.Txs } } } @@ -544,103 +542,12 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) for _, f := range filters[BlocksSubscription] { f.headers <- ev.Block.Header() } - if es.lightMode && len(filters[LogsSubscription]) > 0 { - es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { - for _, f := range filters[LogsSubscription] { - if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } - }) - } } func (es *EventSystem) handleChainAcceptedEvent(filters filterIndex, ev core.ChainEvent) { for _, f := range filters[AcceptedBlocksSubscription] { f.headers <- ev.Block.Header() } - if es.lightMode && len(filters[LogsSubscription]) > 0 { - es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) { - for _, f := range filters[LogsSubscription] { - if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 { - f.logs <- matchedLogs - } - } - }) - } -} - -func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) { - oldh := es.lastHead - es.lastHead = newHeader - if oldh == nil { - return - } - newh := newHeader - // find common ancestor, create list of rolled back and new block hashes - var oldHeaders, newHeaders []*types.Header - for oldh.Hash() != newh.Hash() { - if oldh.Number.Uint64() >= newh.Number.Uint64() { - oldHeaders = append(oldHeaders, oldh) - oldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1) - } - if oldh.Number.Uint64() < newh.Number.Uint64() { - newHeaders = append(newHeaders, newh) - newh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1) - if newh == nil { - // happens when CHT syncing, nothing to do - newh = oldh - } - } - } - // roll back old blocks - for _, h := range oldHeaders { - callBack(h, true) - } - // check new blocks (array is in reverse order) - for i := len(newHeaders) - 1; i >= 0; i-- { - callBack(newHeaders[i], false) - } -} - -// filter logs of a single header in light client mode -func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log { - if bloomFilter(header.Bloom, addresses, topics) { - // Get the logs of the block - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - logsList, err := es.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) - if err != nil { - return nil - } - var unfiltered []*types.Log - for _, logs := range logsList { - for _, log := range logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs := filterLogs(unfiltered, nil, nil, addresses, topics) - if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) { - // We have matching but non-derived logs - receipts, err := es.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - for _, log := range receipt.Logs { - logcopy := *log - logcopy.Removed = remove - unfiltered = append(unfiltered, &logcopy) - } - } - logs = filterLogs(unfiltered, nil, nil, addresses, topics) - } - return logs - } - return nil } // eventLoop (un)installs filters and processes mux events. diff --git a/coreth/eth/filters/filter_system_test.go b/coreth/eth/filters/filter_system_test.go index 5b708e77..131c5b27 100644 --- a/coreth/eth/filters/filter_system_test.go +++ b/coreth/eth/filters/filter_system_test.go @@ -28,6 +28,7 @@ package filters import ( "context" + "errors" "fmt" "math/big" "math/rand" @@ -41,33 +42,43 @@ import ( "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/interfaces" + "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/stretchr/testify/require" ) type testBackend struct { - db ethdb.Database - sections uint64 - txFeed event.Feed - acceptedTxFeed event.Feed - logsFeed event.Feed - rmLogsFeed event.Feed - pendingLogsFeed event.Feed - chainFeed event.Feed + db ethdb.Database + sections uint64 + txFeed event.Feed + acceptedTxFeed event.Feed + logsFeed event.Feed + rmLogsFeed event.Feed + pendingLogsFeed event.Feed + chainFeed event.Feed + chainAcceptedFeed event.Feed +} + +func (b *testBackend) ChainConfig() *params.ChainConfig { + return params.TestChainConfig +} + +func (b *testBackend) CurrentHeader() *types.Header { + hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber) + return hdr } func (b *testBackend) ChainDb() ethdb.Database { return b.db } -func (b *testBackend) GetVMConfig() *vm.Config { - return &vm.Config{AllowUnfinalizedQueries: true} +func (b *testBackend) IsAllowUnfinalizedQueries() bool { + return true } func (b *testBackend) GetMaxBlocksPerRequest() int64 { @@ -83,14 +94,15 @@ func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumbe hash common.Hash num uint64 ) - if blockNr == rpc.LatestBlockNumber { + switch blockNr { + case rpc.LatestBlockNumber, rpc.AcceptedBlockNumber: hash = rawdb.ReadHeadBlockHash(b.db) number := rawdb.ReadHeaderNumber(b.db, hash) if number == nil { return nil, nil } num = *number - } else { + default: num = uint64(blockNr) hash = rawdb.ReadCanonicalHash(b.db, num) } @@ -105,9 +117,18 @@ func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*type return rawdb.ReadHeader(b.db, hash, *number), nil } +func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil { + return body, nil + } + return nil, errors.New("block body not found") +} + func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { - return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil + if header := rawdb.ReadHeader(b.db, hash, *number); header != nil { + return rawdb.ReadReceipts(b.db, hash, *number, header.Time, params.TestChainConfig), nil + } } return nil, nil } @@ -146,7 +167,7 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc } func (b *testBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription { - return b.chainFeed.Subscribe(ch) + return b.chainAcceptedFeed.Subscribe(ch) } func (b *testBackend) BloomStatus() (uint64, uint64) { @@ -198,10 +219,10 @@ func TestBlockSubscription(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) genesis = &core.Genesis{ Config: params.TestChainConfig, - BaseFee: big.NewInt(params.ApricotPhase4MinBaseFee), + BaseFee: big.NewInt(1), } _, chain, _, _ = core.GenerateChainWithGenesis(genesis, dummy.NewFaker(), 10, 10, func(i int, b *core.BlockGen) {}) chainEvents = []core.ChainEvent{} @@ -253,7 +274,7 @@ func TestPendingTxFilter(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) transactions = []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), @@ -266,7 +287,7 @@ func TestPendingTxFilter(t *testing.T) { hashes []common.Hash ) - fid0 := api.NewPendingTransactionFilter() + fid0 := api.NewPendingTransactionFilter(nil) time.Sleep(1 * time.Second) backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) @@ -302,13 +323,70 @@ func TestPendingTxFilter(t *testing.T) { } } +// TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. +func TestPendingTxFilterFullTx(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys) + + transactions = []*types.Transaction{ + types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + } + + txs []*ethapi.RPCTransaction + ) + + fullTx := true + fid0 := api.NewPendingTransactionFilter(&fullTx) + + time.Sleep(1 * time.Second) + backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) + + timeout := time.Now().Add(1 * time.Second) + for { + results, err := api.GetFilterChanges(fid0) + if err != nil { + t.Fatalf("Unable to retrieve logs: %v", err) + } + + tx := results.([]*ethapi.RPCTransaction) + txs = append(txs, tx...) + if len(txs) >= len(transactions) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(txs) != len(transactions) { + t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(txs)) + return + } + for i := range txs { + if txs[i].Hash != transactions[i].Hash() { + t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash) + } + } +} + // TestLogFilterCreation test whether a given filter criteria makes sense. // If not it must return an error. func TestLogFilterCreation(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() _, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) testCases = []struct { crit FilterCriteria @@ -355,7 +433,7 @@ func TestInvalidLogFilterCreation(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() _, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) ) // different situations where log filter creation should fail. @@ -377,7 +455,7 @@ func TestInvalidGetLogsRequest(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() _, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") ) @@ -402,7 +480,7 @@ func TestLogFilter(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -516,7 +594,7 @@ func TestPendingLogsSubscription(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -700,7 +778,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) done = make(chan struct{}) ) @@ -724,7 +802,7 @@ func TestPendingTxFilterDeadlock(t *testing.T) { // timeout either in 100ms or 200ms fids := make([]rpc.ID, 20) for i := 0; i < len(fids); i++ { - fid := api.NewPendingTransactionFilter() + fid := api.NewPendingTransactionFilter(nil) fids[i] = fid // Wait for at least one tx to arrive in filter for { @@ -769,7 +847,7 @@ func TestGetLogsRegression(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() _, sys = newSectionedTestFilterSystem(t, db, Config{}, 4096) - api = NewFilterAPI(sys, false) + api = NewFilterAPI(sys) genesis = &core.Genesis{ Config: params.TestChainConfig, } diff --git a/coreth/eth/filters/filter_test.go b/coreth/eth/filters/filter_test.go index a49ecd45..bae301f1 100644 --- a/coreth/eth/filters/filter_test.go +++ b/coreth/eth/filters/filter_test.go @@ -29,6 +29,7 @@ package filters import ( "context" "math/big" + "reflect" "testing" "github.com/ava-labs/coreth/consensus/dummy" @@ -36,6 +37,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" @@ -63,11 +65,10 @@ func BenchmarkFilters(b *testing.B) { gspec = &core.Genesis{ Config: params.TestChainConfig, Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + BaseFee: big.NewInt(1), } ) defer db.Close() - _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 100010, 10, func(i int, gen *core.BlockGen) { switch i { case 2403: @@ -101,10 +102,11 @@ func BenchmarkFilters(b *testing.B) { } b.ResetTimer() - filter, err := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) + filter, err := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr1, addr2, addr3, addr4}, nil) require.NoError(b, err) for i := 0; i < b.N; i++ { + filter.begin = 0 logs, _ := filter.Logs(context.Background()) if len(logs) != 4 { b.Fatal("expected 4 logs, got", len(logs)) @@ -127,7 +129,7 @@ func TestFilters(t *testing.T) { gspec = &core.Genesis{ Config: params.TestChainConfig, Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}}, - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + BaseFee: big.NewInt(1), } ) defer db.Close() @@ -189,67 +191,80 @@ func TestFilters(t *testing.T) { rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) } - filter, err := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) + filter, err := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) require.NoError(t, err) - logs, _ := filter.Logs(context.Background()) if len(logs) != 4 { t.Error("expected 4 log, got", len(logs)) } - filter, err = sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) - require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 1 { - t.Error("expected 1 log, got", len(logs)) - } - if len(logs) > 0 && logs[0].Topics[0] != hash3 { - t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) - } - - filter, err = sys.NewRangeFilter(990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) - require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 1 { - t.Error("expected 1 log, got", len(logs)) - } - if len(logs) > 0 && logs[0].Topics[0] != hash3 { - t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) - } - - filter, err = sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}) - require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 2 { - t.Error("expected 2 log, got", len(logs)) - } - - failHash := common.BytesToHash([]byte("fail")) - filter, err = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}}) - require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) - } - - failAddr := common.BytesToAddress([]byte("failmenow")) - filter, err = sys.NewRangeFilter(0, -1, []common.Address{failAddr}, nil) - require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) + for i, tc := range []struct { + f *Filter + wantHashes []common.Hash + }{ + { + mustNewRangeFilter(t, sys, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}), + []common.Hash{hash3}, + }, { + mustNewRangeFilter(t, sys, 990, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash3}}), + []common.Hash{hash3}, + }, { + mustNewRangeFilter(t, sys, 1, 10, nil, [][]common.Hash{{hash1, hash2}}), + []common.Hash{hash1, hash2}, + }, { + mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), + nil, + }, { + mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), + nil, + }, { + mustNewRangeFilter(t, sys, 0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), + nil, + }, { + mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have FinalizedBlock + mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have FinalizedBlock + mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have FinalizedBlock + mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), -3, nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have SafeBlock + mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have SafeBlock + mustNewRangeFilter(t, sys, int64(rpc.AcceptedBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4}, + }, { + // Note: modified from go-ethereum since we don't have SafeBlock + mustNewRangeFilter(t, sys, int64(rpc.LatestBlockNumber), int64(rpc.AcceptedBlockNumber), nil, nil), []common.Hash{hash4}, + }, + { + mustNewRangeFilter(t, sys, int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), nil, + }, + } { + logs, _ := tc.f.Logs(context.Background()) + var haveHashes []common.Hash + for _, l := range logs { + haveHashes = append(haveHashes, l.Topics[0]) + } + if have, want := len(haveHashes), len(tc.wantHashes); have != want { + t.Fatalf("test %d, have %d logs, want %d", i, have, want) + } + if len(haveHashes) == 0 { + continue + } + if !reflect.DeepEqual(tc.wantHashes, haveHashes) { + t.Fatalf("test %d, have %v want %v", i, haveHashes, tc.wantHashes) + } } +} - filter, err = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) +func mustNewRangeFilter(t *testing.T, sys *FilterSystem, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter { + t.Helper() + f, err := sys.NewRangeFilter(begin, end, addresses, topics) require.NoError(t, err) - - logs, _ = filter.Logs(context.Background()) - if len(logs) != 0 { - t.Error("expected 0 log, got", len(logs)) - } + return f } diff --git a/coreth/eth/gasprice/feehistory.go b/coreth/eth/gasprice/feehistory.go index 39f61840..8940640a 100644 --- a/coreth/eth/gasprice/feehistory.go +++ b/coreth/eth/gasprice/feehistory.go @@ -124,7 +124,7 @@ func (sb *slimBlock) processPercentiles(percentiles []float64) ([]*big.Int, *big // enforcing backend specific limitations. // Note: an error is only returned if retrieving the head header has failed. If there are no // retrievable blocks in the specified range then zero block count is returned with no error. -func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.BlockNumber, blocks int) (uint64, int, error) { +func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.BlockNumber, blocks uint64) (uint64, uint64, error) { // Query either pending block or head header and set headBlock if lastBlock == rpc.PendingBlockNumber { // Pending block not supported by backend, process until latest block @@ -149,12 +149,12 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block } // Ensure not trying to retrieve before genesis if rpc.BlockNumber(blocks) > lastBlock+1 { - blocks = int(lastBlock + 1) + blocks = uint64(lastBlock + 1) } // Truncate blocks range if extending past [oracle.maxBlockHistory] oldestQueriedIndex := lastBlock - rpc.BlockNumber(blocks) + 1 if queryDepth := lastAcceptedBlock - oldestQueriedIndex; queryDepth > maxQueryDepth { - overage := int(queryDepth - maxQueryDepth) + overage := uint64(queryDepth - maxQueryDepth) blocks -= overage } // It is not possible that [blocks] could be <= 0 after @@ -177,7 +177,7 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block // // Note: baseFee includes the next block after the newest of the returned range, because this // value can be derived from the newest block. -func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { +func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { if blocks < 1 { return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } @@ -197,7 +197,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast if err != nil || blocks == 0 { return common.Big0, nil, nil, nil, err } - oldestBlock := lastBlock + 1 - uint64(blocks) + oldestBlock := lastBlock + 1 - blocks var ( reward = make([][]*big.Int, blocks) @@ -206,16 +206,16 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast firstMissing = blocks ) - for blockNumber := oldestBlock; blockNumber < oldestBlock+uint64(blocks); blockNumber++ { + for blockNumber := oldestBlock; blockNumber < oldestBlock+blocks; blockNumber++ { // Check if the context has errored if err := ctx.Err(); err != nil { return common.Big0, nil, nil, nil, err } - i := int(blockNumber - oldestBlock) + i := blockNumber - oldestBlock var sb *slimBlock - if sbRaw, ok := oracle.historyCache.Get(blockNumber); ok { - sb = sbRaw.(*slimBlock) + if sbCache, ok := oracle.historyCache.Get(blockNumber); ok { + sb = sbCache } else { block, err := oracle.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNumber)) if err != nil { diff --git a/coreth/eth/gasprice/feehistory_test.go b/coreth/eth/gasprice/feehistory_test.go index 7a90a37b..f8234d1d 100644 --- a/coreth/eth/gasprice/feehistory_test.go +++ b/coreth/eth/gasprice/feehistory_test.go @@ -44,9 +44,9 @@ import ( func TestFeeHistory(t *testing.T) { var cases = []struct { pending bool - maxCallBlock int - maxBlock int - count int + maxCallBlock uint64 + maxBlock uint64 + count uint64 last rpc.BlockNumber percent []float64 expFirst uint64 @@ -110,7 +110,7 @@ func TestFeeHistory(t *testing.T) { require.NoError(t, err) first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) - + backend.teardown() expReward := c.expCount if len(c.percent) == 0 { expReward = 0 diff --git a/coreth/eth/gasprice/gasprice.go b/coreth/eth/gasprice/gasprice.go index 85ec33b0..2873d73d 100644 --- a/coreth/eth/gasprice/gasprice.go +++ b/coreth/eth/gasprice/gasprice.go @@ -39,26 +39,26 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - lru "github.com/hashicorp/golang-lru" ) const ( // DefaultMaxCallBlockHistory is the number of blocks that can be fetched in // a single call to eth_feeHistory. - DefaultMaxCallBlockHistory int = 2048 + DefaultMaxCallBlockHistory = 2048 // DefaultMaxBlockHistory is the number of blocks from the last accepted // block that can be fetched in eth_feeHistory. // // DefaultMaxBlockHistory is chosen to be a value larger than the required // fee lookback window that MetaMask uses (20k blocks). - DefaultMaxBlockHistory int = 25_000 + DefaultMaxBlockHistory = 25_000 // DefaultFeeHistoryCacheSize is chosen to be some value larger than // [DefaultMaxBlockHistory] to ensure all block lookups can be cached when // serving a fee history query. - DefaultFeeHistoryCacheSize int = 30_000 + DefaultFeeHistoryCacheSize = 30_000 ) var ( @@ -81,10 +81,10 @@ type Config struct { MaxLookbackSeconds uint64 // MaxCallBlockHistory specifies the maximum number of blocks that can be // fetched in a single eth_feeHistory call. - MaxCallBlockHistory int + MaxCallBlockHistory uint64 // MaxBlockHistory specifies the furthest back behind the last accepted block that can // be requested by fee history. - MaxBlockHistory int + MaxBlockHistory uint64 MaxPrice *big.Int `toml:",omitempty"` MinPrice *big.Int `toml:",omitempty"` MinGasUsed *big.Int `toml:",omitempty"` @@ -123,9 +123,9 @@ type Oracle struct { checkBlocks, percentile int maxLookbackSeconds uint64 - maxCallBlockHistory int - maxBlockHistory int - historyCache *lru.Cache + maxCallBlockHistory uint64 + maxBlockHistory uint64 + historyCache *lru.Cache[uint64, *slimBlock] feeInfoProvider *feeInfoProvider } @@ -180,7 +180,7 @@ func NewOracle(backend OracleBackend, config Config) (*Oracle, error) { log.Warn("Sanitizing invalid gasprice oracle max block history", "provided", config.MaxBlockHistory, "updated", maxBlockHistory) } - cache, _ := lru.New(DefaultFeeHistoryCacheSize) + cache := lru.NewCache[uint64, *slimBlock](DefaultFeeHistoryCacheSize) headEvent := make(chan core.ChainHeadEvent, 1) backend.SubscribeChainHeadEvent(headEvent) go func() { diff --git a/coreth/eth/gasprice/gasprice_test.go b/coreth/eth/gasprice/gasprice_test.go index 376b6933..82941c01 100644 --- a/coreth/eth/gasprice/gasprice_test.go +++ b/coreth/eth/gasprice/gasprice_test.go @@ -59,14 +59,14 @@ type testBackend struct { func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { if number == rpc.LatestBlockNumber { - return b.chain.CurrentBlock().Header(), nil + return b.chain.CurrentBlock(), nil } return b.chain.GetHeaderByNumber(uint64(number)), nil } func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { if number == rpc.LatestBlockNumber { - return b.chain.CurrentBlock(), nil + number = rpc.BlockNumber(b.chain.CurrentBlock().Number.Uint64()) } return b.chain.GetBlockByNumber(uint64(number)), nil } @@ -88,6 +88,10 @@ func (b *testBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) eve return nil } +func (b *testBackend) teardown() { + b.chain.Stop() +} + func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { var gspec = &core.Genesis{ Config: config, @@ -113,13 +117,15 @@ func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBloc return &testBackend{chain: chain} } +// newTestBackend creates a test backend. OBS: don't forget to invoke tearDown +// after use, otherwise the blockchain instance will mem-leak via goroutines. func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { var gspec = &core.Genesis{ Config: config, Alloc: core.GenesisAlloc{addr: core.GenesisAccount{Balance: bal}}, } - engine := dummy.NewDummyEngine(&dummy.ConsensusCallbacks{ + engine := dummy.NewFakerWithCallbacks(dummy.ConsensusCallbacks{ OnFinalizeAndAssemble: func(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { return nil, common.Big0, extDataGasUsage, nil }, @@ -153,7 +159,11 @@ func (b *testBackend) CurrentHeader() *types.Header { } func (b *testBackend) LastAcceptedBlock() *types.Block { - return b.chain.CurrentBlock() + current := b.chain.CurrentBlock() + if current == nil { + return nil + } + return b.chain.GetBlockByNumber(current.Number.Uint64()) } func (b *testBackend) GetBlockByNumber(number uint64) *types.Block { @@ -199,6 +209,7 @@ func applyGasPriceTest(t *testing.T, test suggestTipCapTest, config Config) { oracle.clock.Set(time.Unix(20, 0)) got, err := oracle.SuggestTipCap(context.Background()) + backend.teardown() require.NoError(t, err) if got.Cmp(test.expectedTip) != 0 { @@ -354,6 +365,8 @@ func TestSuggestGasPricePreAP3(t *testing.T) { b.AddTx(tx) } }) + defer backend.teardown() + oracle, err := NewOracle(backend, config) require.NoError(t, err) diff --git a/coreth/eth/state_accessor.go b/coreth/eth/state_accessor.go index 7197e8e6..1a416152 100644 --- a/coreth/eth/state_accessor.go +++ b/coreth/eth/state_accessor.go @@ -27,9 +27,9 @@ package eth import ( + "context" "errors" "fmt" - "math/big" "time" "github.com/ava-labs/coreth/core" @@ -67,7 +67,8 @@ var noopReleaser = tracers.StateReleaseFunc(func() {}) // - preferDisk: this arg can be used by the caller to signal that even though the 'base' is // provided, it would be preferable to start from a fresh state, if we have it // on disk. -func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { +func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { + reexec = 0 // Do not support re-executing historical blocks to grab state var ( current *types.Block database state.Database @@ -122,6 +123,9 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state } // Database does not have the state for the given block, try to regenerate for i := uint64(0); i < reexec; i++ { + if err := ctx.Err(); err != nil { + return nil, nil, err + } if current.NumberU64() == 0 { return nil, nil, errors.New("genesis state is missing") } @@ -153,6 +157,9 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state parent common.Hash ) for current.NumberU64() < origin { + if err := ctx.Err(); err != nil { + return nil, nil, err + } // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second && report { log.Info("Regenerating historical state", "block", current.NumberU64()+1, "target", origin, "remaining", origin-current.NumberU64()-1, "elapsed", time.Since(start)) @@ -193,7 +200,7 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { +func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") @@ -205,7 +212,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, release, err := eth.StateAtBlock(parent, reexec, nil, true, false) + statedb, release, err := eth.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) if err != nil { return nil, vm.BlockContext{}, nil, nil, err } @@ -213,10 +220,10 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. - signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), new(big.Int).SetUint64(block.Time())) + signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), block.Time()) for idx, tx := range block.Transactions() { // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer, block.BaseFee()) + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { @@ -224,7 +231,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) - statedb.Prepare(tx.Hash(), idx) + statedb.SetTxContext(tx.Hash(), idx) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } @@ -234,3 +241,25 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec } return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } + +// StateAtNextBlock is a helper function that returns the state at the next block. +// It wraps StateAtBlock and handles the case where Upgrades are applied to the +// next block. +// This is different than using StateAtBlock with [nextBlock] because it will +// apply the upgrades to the [parent] state before returning it. +func (eth *Ethereum) StateAtNextBlock(ctx context.Context, parent *types.Block, nextBlock *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { + // Get state for [parent] + statedb, release, err := eth.StateAtBlock(ctx, parent, reexec, base, readOnly, preferDisk) + if err != nil { + return nil, nil, err + } + + // Apply upgrades here for the [nextBlock] + err = core.ApplyUpgrades(eth.blockchain.Config(), &parent.Header().Time, nextBlock, statedb) + if err != nil { + release() + return nil, nil, err + } + + return statedb, release, nil +} diff --git a/coreth/eth/tracers/api.go b/coreth/eth/tracers/api.go index 0e0f7b2f..d835f7c9 100644 --- a/coreth/eth/tracers/api.go +++ b/coreth/eth/tracers/api.go @@ -33,7 +33,6 @@ import ( "encoding/json" "errors" "fmt" - "math/big" "os" "runtime" "sync" @@ -45,12 +44,12 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -71,8 +70,15 @@ const ( // For non-archive nodes, this limit _will_ be overblown, as disk-backed tries // will only be found every ~15K blocks or so. defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024) + + // maximumPendingTraceStates is the maximum number of states allowed waiting + // for tracing. The creation of trace state will be paused if the unused + // trace states exceed this limit. + maximumPendingTraceStates = 128 ) +var errTxNotFound = errors.New("transaction not found") + // StateReleaseFunc is used to deallocate resources held by constructing a // historical state for tracing purposes. type StateReleaseFunc func() @@ -91,7 +97,8 @@ type Backend interface { Engine() consensus.Engine ChainDb() ethdb.Database StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) + StateAtNextBlock(ctx context.Context, parent, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) } // baseAPI holds the collection of common methods for API and FileTracerAPI. @@ -121,34 +128,10 @@ func NewFileTracerAPI(backend Backend) *FileTracerAPI { return &FileTracerAPI{baseAPI{backend: backend}} } -type chainContext struct { - api *baseAPI - ctx context.Context -} - -func (context *chainContext) Engine() consensus.Engine { - return context.api.backend.Engine() -} - -func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header { - header, err := context.api.backend.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) - if err != nil { - return nil - } - if header.Hash() == hash { - return header - } - header, err = context.api.backend.HeaderByHash(context.ctx, hash) - if err != nil { - return nil - } - return header -} - // chainContext constructs the context reader which is used by the evm for reading // the necessary chain context. func (api *baseAPI) chainContext(ctx context.Context) core.ChainContext { - return &chainContext{api: api, ctx: ctx} + return ethapi.NewChainContext(ctx, api.backend) } // blockByNumber is the wrapper of the chain access function offered by the backend. @@ -221,6 +204,7 @@ type StdTraceConfig struct { // txTraceResult is the result of a single transaction trace. type txTraceResult struct { + TxHash common.Hash `json:"txHash"` // transaction hash Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer Error string `json:"error,omitempty"` // Trace failure produced by the tracer } @@ -283,30 +267,6 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf return sub, nil } -// releaser is a helper tool responsible for caching the release -// callbacks of tracing state. -type releaser struct { - releases []StateReleaseFunc - lock sync.Mutex -} - -func (r *releaser) add(release StateReleaseFunc) { - r.lock.Lock() - defer r.lock.Unlock() - - r.releases = append(r.releases, release) -} - -func (r *releaser) call() { - r.lock.Lock() - defer r.lock.Unlock() - - for _, release := range r.releases { - release() - } - r.releases = r.releases[:0] -} - // traceChain configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The tracing chain range includes // the end block but excludes the start one. The return value will be one item per @@ -323,11 +283,11 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed threads = blocks } var ( - pend = new(sync.WaitGroup) - ctx = context.Background() - taskCh = make(chan *blockTraceTask, threads) - resCh = make(chan *blockTraceTask, threads) - reler = new(releaser) + pend = new(sync.WaitGroup) + ctx = context.Background() + taskCh = make(chan *blockTraceTask, threads) + resCh = make(chan *blockTraceTask, threads) + tracker = newStateTracker(maximumPendingTraceStates, start.NumberU64()) ) for th := 0; th < threads; th++ { pend.Add(1) @@ -337,29 +297,32 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed // Fetch and execute the block trace taskCh for task := range taskCh { var ( - signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Timestamp()) + signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Time()) blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) ) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { - msg, _ := tx.AsMessage(signer, task.block.BaseFee()) + msg, _ := core.TransactionToMessage(tx, signer, task.block.BaseFee()) txctx := &Context{ - BlockHash: task.block.Hash(), - TxIndex: i, - TxHash: tx.Hash(), + BlockHash: task.block.Hash(), + BlockNumber: task.block.Number(), + TxIndex: i, + TxHash: tx.Hash(), } res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { - task.results[i] = &txTraceResult{Error: err.Error()} + task.results[i] = &txTraceResult{TxHash: tx.Hash(), Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) break } // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) - task.results[i] = &txTraceResult{Result: res} + task.results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res} } - // Tracing state is used up, queue it for de-referencing - reler.add(task.release) + // Tracing state is used up, queue it for de-referencing. Note the + // state is the parent state of trace block, use block.number-1 as + // the state number. + tracker.releaseState(task.block.NumberU64()-1, task.release) // Stream the result back to the result catcher or abort on teardown select { @@ -386,8 +349,8 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed close(taskCh) pend.Wait() - // Clean out any pending derefs. - reler.call() + // Clean out any pending release functions of trace states. + tracker.callReleases() // Log the chain result switch { @@ -424,6 +387,13 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed failed = err break } + // Make sure the state creator doesn't go too far. Too many unprocessed + // trace state may cause the oldest state to become stale(e.g. in + // path-based scheme). + if err = tracker.wait(number); err != nil { + failed = err + break + } // Prepare the statedb for tracing. Don't use the live database for // tracing to avoid persisting state junks into the database. Switch // over to `preferDisk` mode only if the memory usage exceeds the @@ -434,23 +404,23 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed s1, s2 := statedb.Database().TrieDB().Size() preferDisk = s1+s2 > defaultTracechainMemLimit } - statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) + statedb, release, err = api.backend.StateAtNextBlock(ctx, block, next, reexec, statedb, false, preferDisk) if err != nil { failed = err break } - // Clean out any pending derefs. Note this step must be done after - // constructing tracing state, because the tracing state of block - // next depends on the parent state and construction may fail if - // we release too early. - reler.call() + // Clean out any pending release functions of trace state. Note this + // step must be done after constructing tracing state, because the + // tracing state of block next depends on the parent state and construction + // may fail if we release too early. + tracker.callReleases() // Send the block over to the concurrent tracers (if not in the fast-forward phase) txs := next.Transactions() select { case taskCh <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}: case <-closed: - reler.add(release) + tracker.releaseState(number, release) return } traced += uint64(len(txs)) @@ -581,7 +551,7 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) if err != nil { return nil, err } @@ -589,19 +559,22 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config var ( roots []common.Hash - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) chainConfig = api.backend.ChainConfig() vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) deleteEmptyObjects = chainConfig.IsEIP158(block.Number()) ) for i, tx := range block.Transactions() { + if err := ctx.Err(); err != nil { + return nil, err + } var ( - msg, _ = tx.AsMessage(signer, block.BaseFee()) + msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee()) txContext = core.NewEVMTxContext(msg) vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{}) ) - statedb.Prepare(tx.Hash(), i) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { + statedb.SetTxContext(tx.Hash(), i) + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err) // We intentionally don't return the error here: if we do, then the RPC server will not // return the roots. Most likely, the caller already knows that a certain transaction fails to @@ -646,6 +619,7 @@ func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config * if block.NumberU64() == 0 { return nil, errors.New("genesis is not traceable") } + // Prepare base state parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) if err != nil { return nil, err @@ -654,68 +628,117 @@ func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config * if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) if err != nil { return nil, err } defer release() - // Execute all the transaction contained within the block concurrently + // JS tracers have high overhead. In this case run a parallel + // process that generates states in one thread and traces txes + // in separate worker threads. + if config != nil && config.Tracer != nil && *config.Tracer != "" { + if isJS := DefaultDirectory.IsJS(*config.Tracer); isJS { + return api.traceBlockParallel(ctx, block, statedb, config) + } + } + // Native tracers have low overhead var ( - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) - txs = block.Transactions() - results = make([]*txTraceResult, len(txs)) + txs = block.Transactions() + blockHash = block.Hash() + is158 = api.backend.ChainConfig().IsEIP158(block.Number()) + blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) + results = make([]*txTraceResult, len(txs)) + ) + for i, tx := range txs { + // Generate the next state snapshot fast without tracing + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + txctx := &Context{ + BlockHash: blockHash, + BlockNumber: block.Number(), + TxIndex: i, + TxHash: tx.Hash(), + } + res, err := api.traceTx(ctx, msg, txctx, blockCtx, statedb, config) + if err != nil { + return nil, err + } + results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res} + // Finalize the state so any modifications are written to the trie + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + statedb.Finalise(is158) + } + return results, nil +} - pend = new(sync.WaitGroup) - jobs = make(chan *txTraceTask, len(txs)) +// traceBlockParallel is for tracers that have a high overhead (read JS tracers). One thread +// runs along and executes txes without tracing enabled to generate their prestate. +// Worker threads take the tasks and the prestate and trace them. +func (api *baseAPI) traceBlockParallel(ctx context.Context, block *types.Block, statedb *state.StateDB, config *TraceConfig) ([]*txTraceResult, error) { + // Execute all the transaction contained within the block concurrently + var ( + txs = block.Transactions() + blockHash = block.Hash() + blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) + results = make([]*txTraceResult, len(txs)) + pend sync.WaitGroup ) threads := runtime.NumCPU() if threads > len(txs) { threads = len(txs) } - blockHash := block.Hash() + jobs := make(chan *txTraceTask, threads) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - - blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) // Fetch and execute the next transaction trace tasks for task := range jobs { - msg, _ := txs[task.index].AsMessage(signer, block.BaseFee()) + msg, _ := core.TransactionToMessage(txs[task.index], signer, block.BaseFee()) txctx := &Context{ - BlockHash: blockHash, - TxIndex: task.index, - TxHash: txs[task.index].Hash(), + BlockHash: blockHash, + BlockNumber: block.Number(), + TxIndex: task.index, + TxHash: txs[task.index].Hash(), } res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { - results[task.index] = &txTraceResult{Error: err.Error()} + results[task.index] = &txTraceResult{TxHash: txs[task.index].Hash(), Error: err.Error()} continue } - results[task.index] = &txTraceResult{Result: res} + results[task.index] = &txTraceResult{TxHash: txs[task.index].Hash(), Result: res} } }() } + // Feed the transactions into the tracers and return var failed error - blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) +txloop: for i, tx := range txs { // Send the trace task over for execution - jobs <- &txTraceTask{statedb: statedb.Copy(), index: i} + task := &txTraceTask{statedb: statedb.Copy(), index: i} + select { + case <-ctx.Done(): + failed = ctx.Err() + break txloop + case jobs <- task: + } // Generate the next state snapshot fast without tracing - msg, _ := tx.AsMessage(signer, block.BaseFee()) - statedb.Prepare(tx.Hash(), i) + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) + statedb.SetTxContext(tx.Hash(), i) vmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)); err != nil { failed = err - break + break txloop } // Finalize the state so any modifications are written to the trie // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } + close(jobs) pend.Wait() @@ -747,7 +770,7 @@ func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *t if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) if err != nil { return nil, err } @@ -767,7 +790,7 @@ func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *t // Execute transaction, either tracing all or just the requested one var ( dumps []string - signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Timestamp()) + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time()) chainConfig = api.backend.ChainConfig() vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) canon = true @@ -778,12 +801,13 @@ func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *t // in order to obtain the state. // Therefore, it's perfectly valid to specify `"futureForkBlock": 0`, to enable `futureFork` if config != nil && config.Overrides != nil { + // Note: This copies the config, to not screw up the main config chainConfig, canon = overrideConfig(chainConfig, config.Overrides) } for i, tx := range block.Transactions() { // Prepare the transaction for un-traced execution var ( - msg, _ = tx.AsMessage(signer, block.BaseFee()) + msg, _ = core.TransactionToMessage(tx, signer, block.BaseFee()) txContext = core.NewEVMTxContext(msg) vmConf vm.Config dump *os.File @@ -806,15 +830,14 @@ func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *t // Swap out the noop logger to the standard tracer writer = bufio.NewWriter(dump) vmConf = vm.Config{ - Debug: true, Tracer: logger.NewJSONLogger(&logConfig, writer), EnablePreimageRecording: true, } } // Execute the transaction and flush any traces to disk vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) - statedb.Prepare(tx.Hash(), i) - _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + statedb.SetTxContext(tx.Hash(), i) + _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) if writer != nil { writer.Flush() } @@ -851,10 +874,14 @@ func containsTx(block *types.Block, hash common.Hash) bool { // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + tx, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) if err != nil { return nil, err } + // Only mined txes are supported + if tx == nil { + return nil, errTxNotFound + } // It shouldn't happen in practice. if blockNumber == 0 { return nil, errors.New("genesis is not traceable") @@ -874,9 +901,10 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * defer release() txctx := &Context{ - BlockHash: blockHash, - TxIndex: int(index), - TxHash: hash, + BlockHash: blockHash, + BlockNumber: block.Number(), + TxIndex: int(index), + TxHash: hash, } return api.traceTx(ctx, msg, txctx, vmctx, statedb, config) } @@ -922,10 +950,18 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) // Apply the customization rules if required. if config != nil { + originalTime := block.Time() + config.BlockOverrides.Apply(&vmctx) + // Apply all relevant upgrades from [originalTime] to the block time set in the override. + // Should be applied before the state overrides. + err = core.ApplyUpgrades(api.backend.ChainConfig(), &originalTime, &vmctx, statedb) + if err != nil { + return nil, err + } + if err := config.StateOverrides.Apply(statedb); err != nil { return nil, err } - config.BlockOverrides.Apply(&vmctx) } // Execute the trace msg, err := args.ToMessage(api.backend.RPCGasCap(), block.BaseFee()) @@ -935,12 +971,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc var traceConfig *TraceConfig if config != nil { - traceConfig = &TraceConfig{ - Config: config.Config, - Tracer: config.Tracer, - Timeout: config.Timeout, - Reexec: config.Reexec, - } + traceConfig = &config.TraceConfig } return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig) } @@ -948,7 +979,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *baseAPI) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *baseAPI) traceTx(ctx context.Context, message *core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { var ( tracer Tracer err error @@ -961,11 +992,13 @@ func (api *baseAPI) traceTx(ctx context.Context, message core.Message, txctx *Co // Default tracer is the struct logger tracer = logger.NewStructLogger(config.Config) if config.Tracer != nil { - tracer, err = New(*config.Tracer, txctx, config.TracerConfig) + tracer, err = DefaultDirectory.New(*config.Tracer, txctx, config.TracerConfig) if err != nil { return nil, err } } + vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer, NoBaseFee: true}) + // Define a meaningful timeout of a single transaction trace if config.Timeout != nil { if timeout, err = time.ParseDuration(*config.Timeout); err != nil { @@ -977,15 +1010,15 @@ func (api *baseAPI) traceTx(ctx context.Context, message core.Message, txctx *Co <-deadlineCtx.Done() if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { tracer.Stop(errors.New("execution timeout")) + // Stop evm execution. Note cancellation is not necessarily immediate. + vmenv.Cancel() } }() defer cancel() - // Run the transaction with tracing enabled. - vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true}) // Call Prepare to clear out the statedb access list - statedb.Prepare(txctx.TxHash, txctx.TxIndex) - if _, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas())); err != nil { + statedb.SetTxContext(txctx.TxHash, txctx.TxIndex) + if _, err = core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.GasLimit)); err != nil { return nil, fmt.Errorf("tracing failed: %w", err) } return tracer.GetResult() @@ -1053,8 +1086,12 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.CortinaBlockTimestamp = timestamp canon = false } - if timestamp := override.DUpgradeBlockTimestamp; timestamp != nil { - copy.DUpgradeBlockTimestamp = timestamp + if timestamp := override.DurangoBlockTimestamp; timestamp != nil { + copy.DurangoBlockTimestamp = timestamp + canon = false + } + if timestamp := override.CancunTime; timestamp != nil { + copy.CancunTime = timestamp canon = false } diff --git a/coreth/eth/tracers/api_test.go b/coreth/eth/tracers/api_test.go index be367340..d33dea62 100644 --- a/coreth/eth/tracers/api_test.go +++ b/coreth/eth/tracers/api_test.go @@ -47,19 +47,18 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers/logger" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" ) var ( - errStateNotFound = errors.New("state not found") - errBlockNotFound = errors.New("block not found") - errTransactionNotFound = errors.New("transaction not found") + errStateNotFound = errors.New("state not found") + errBlockNotFound = errors.New("block not found") ) type testBackend struct { @@ -72,6 +71,8 @@ type testBackend struct { relHook func() // Hook is invoked when the requested state is released } +// testBackend creates a new test backend. OBS: After test is done, teardown must be +// invoked in order to release associated resources. func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { backend := &testBackend{ chainConfig: gspec.Config, @@ -86,10 +87,11 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i // Import the canonical chain cacheConfig := &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - SnapshotLimit: 128, - Pruning: false, // Archive mode + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TriePrefetcherParallelism: 4, + SnapshotLimit: 128, + Pruning: false, // Archive mode } chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, backend.engine, vm.Config{}, common.Hash{}, false) if err != nil { @@ -125,7 +127,7 @@ func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { - return b.chain.CurrentBlock(), nil + return b.chain.GetBlockByNumber(b.chain.CurrentBlock().Number.Uint64()), nil } return b.chain.GetBlockByNumber(uint64(number)), nil } @@ -134,9 +136,6 @@ func (b *testBackend) BadBlocks() ([]*types.Block, []*core.BadBlockReason) { ret func (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { tx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash) - if tx == nil { - return nil, common.Hash{}, 0, 0, errTransactionNotFound - } return tx, hash, blockNumber, index, nil } @@ -156,6 +155,11 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } +// teardown releases the associated resources. +func (b *testBackend) teardown() { + b.chain.Stop() +} + func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { @@ -172,22 +176,37 @@ func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reex return statedb, release, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { +func (b *testBackend) StateAtNextBlock(ctx context.Context, parent, nextBlock *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { + statedb, release, err := b.StateAtBlock(ctx, parent, reexec, base, readOnly, preferDisk) + if err != nil { + return nil, nil, err + } + // Apply upgrades to the parent state + err = core.ApplyUpgrades(b.chainConfig, &parent.Header().Time, nextBlock, statedb) + if err != nil { + release() + return nil, nil, err + } + + return statedb, release, nil +} + +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { return nil, vm.BlockContext{}, nil, nil, errBlockNotFound } - statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := b.StateAtNextBlock(ctx, parent, block, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. - signer := types.MakeSigner(b.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())) + signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time()) for idx, tx := range block.Transactions() { - msg, _ := tx.AsMessage(signer, block.BaseFee()) + msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { @@ -213,16 +232,19 @@ func TestTraceCall(t *testing.T) { accounts[0].addr: {Balance: big.NewInt(params.Ether)}, accounts[1].addr: {Balance: big.NewInt(params.Ether)}, accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + }, + } genBlocks := 10 signer := types.HomesteadSigner{} - api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) b.AddTx(tx) - })) + }) + defer backend.teardown() + api := NewAPI(backend) var testSuite = []struct { blockNumber rpc.BlockNumber call ethapi.TransactionArgs @@ -348,14 +370,16 @@ func TestTraceTransaction(t *testing.T) { } target := common.Hash{} signer := types.HomesteadSigner{} - api := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { + backend := newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), nil), signer, accounts[0].key) b.AddTx(tx) target = tx.Hash() - })) + }) + defer backend.chain.Stop() + api := NewAPI(backend) result, err := api.TraceTransaction(context.Background(), target, nil) if err != nil { t.Errorf("Failed to trace transaction %v", err) @@ -373,6 +397,12 @@ func TestTraceTransaction(t *testing.T) { if !reflect.DeepEqual(have, expected) { t.Errorf("Transaction tracing result is different: have %v want %v", have, expected) } + + // Test non-existent transaction + _, err = api.TraceTransaction(context.Background(), common.Hash{42}, nil) + if !errors.Is(err, errTxNotFound) { + t.Fatalf("want %v, have %v", errTxNotFound, err) + } } func TestTraceBlock(t *testing.T) { @@ -390,13 +420,17 @@ func TestTraceBlock(t *testing.T) { } genBlocks := 10 signer := types.HomesteadSigner{} - api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + var txHash common.Hash + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei - tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, new(big.Int).Add(b.BaseFee(), big.NewInt(int64(500*params.GWei))), nil), signer, accounts[0].key) + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) b.AddTx(tx) - })) + txHash = tx.Hash() + }) + defer backend.chain.Stop() + api := NewAPI(backend) var testSuite = []struct { blockNumber rpc.BlockNumber @@ -412,7 +446,7 @@ func TestTraceBlock(t *testing.T) { // Trace head block { blockNumber: rpc.BlockNumber(genBlocks), - want: `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, + want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), }, // Trace non-existent block { @@ -422,12 +456,12 @@ func TestTraceBlock(t *testing.T) { // Trace latest block { blockNumber: rpc.LatestBlockNumber, - want: `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, + want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), }, // Trace pending block { blockNumber: rpc.PendingBlockNumber, - want: `[{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, + want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), }, } for i, tc := range testSuite { @@ -458,23 +492,34 @@ func TestTracingWithOverrides(t *testing.T) { t.Parallel() // Initialize test accounts accounts := newAccounts(3) + storageAccount := common.Address{0x13, 37} genesis := &core.Genesis{ - Config: params.TestBanffChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Banff here so tests pass. + Config: params.TestCortinaChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Cortina here so tests pass. Alloc: core.GenesisAlloc{ accounts[0].addr: {Balance: big.NewInt(params.Ether)}, accounts[1].addr: {Balance: big.NewInt(params.Ether)}, accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + // An account with existing storage + storageAccount: { + Balance: new(big.Int), + Storage: map[common.Hash]common.Hash{ + common.HexToHash("0x03"): common.HexToHash("0x33"), + common.HexToHash("0x04"): common.HexToHash("0x44"), + }, + }, }, } genBlocks := 10 signer := types.HomesteadSigner{} - api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { // Transfer from account[0] to account[1] // value: 1000 wei // fee: 0 wei tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) b.AddTx(tx) - })) + }) + defer backend.chain.Stop() + api := NewAPI(backend) randomAccounts := newAccounts(3) type res struct { Gas int @@ -581,6 +626,164 @@ func TestTracingWithOverrides(t *testing.T) { }, want: `{"gas":72666,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`, }, + /* + pragma solidity =0.8.12; + + contract Test { + uint private x; + + function test2() external { + x = 1337; + revert(); + } + + function test() external returns (uint) { + x = 1; + try this.test2() {} catch (bytes memory) {} + return x; + } + } + */ + { // First with only code override, not storage override + blockNumber: rpc.LatestBlockNumber, + call: ethapi.TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[2].addr, + Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // + }, + config: &TraceCallConfig{ + StateOverrides: ðapi.StateOverride{ + randomAccounts[2].addr: ethapi.OverrideAccount{ + Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), + }, + }, + }, + want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, + }, + { // Same again, this time with storage override + blockNumber: rpc.LatestBlockNumber, + call: ethapi.TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[2].addr, + Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // + }, + config: &TraceCallConfig{ + StateOverrides: ðapi.StateOverride{ + randomAccounts[2].addr: ethapi.OverrideAccount{ + Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), + State: newStates([]common.Hash{{}}, []common.Hash{{}}), + }, + }, + }, + //want: `{"gas":46900,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000539"}`, + want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, + }, + { // No state override + blockNumber: rpc.LatestBlockNumber, + call: ethapi.TransactionArgs{ + From: &randomAccounts[0].addr, + To: &storageAccount, + Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // + }, + config: &TraceCallConfig{ + StateOverrides: ðapi.StateOverride{ + storageAccount: ethapi.OverrideAccount{ + Code: newRPCBytes([]byte{ + // SLOAD(3) + SLOAD(4) (which is 0x77) + byte(vm.PUSH1), 0x04, + byte(vm.SLOAD), + byte(vm.PUSH1), 0x03, + byte(vm.SLOAD), + byte(vm.ADD), + // 0x77 -> MSTORE(0) + byte(vm.PUSH1), 0x00, + byte(vm.MSTORE), + // RETURN (0, 32) + byte(vm.PUSH1), 32, + byte(vm.PUSH1), 00, + byte(vm.RETURN), + }), + }, + }, + }, + want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000077"}`, + }, + { // Full state override + // The original storage is + // 3: 0x33 + // 4: 0x44 + // With a full override, where we set 3:0x11, the slot 4 should be + // removed. So SLOT(3)+SLOT(4) should be 0x11. + blockNumber: rpc.LatestBlockNumber, + call: ethapi.TransactionArgs{ + From: &randomAccounts[0].addr, + To: &storageAccount, + Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // + }, + config: &TraceCallConfig{ + StateOverrides: ðapi.StateOverride{ + storageAccount: ethapi.OverrideAccount{ + Code: newRPCBytes([]byte{ + // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x00) + byte(vm.PUSH1), 0x04, + byte(vm.SLOAD), + byte(vm.PUSH1), 0x03, + byte(vm.SLOAD), + byte(vm.ADD), + // 0x11 -> MSTORE(0) + byte(vm.PUSH1), 0x00, + byte(vm.MSTORE), + // RETURN (0, 32) + byte(vm.PUSH1), 32, + byte(vm.PUSH1), 00, + byte(vm.RETURN), + }), + State: newStates( + []common.Hash{common.HexToHash("0x03")}, + []common.Hash{common.HexToHash("0x11")}), + }, + }, + }, + want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000011"}`, + }, + { // Partial state override + // The original storage is + // 3: 0x33 + // 4: 0x44 + // With a partial override, where we set 3:0x11, the slot 4 as before. + // So SLOT(3)+SLOT(4) should be 0x55. + blockNumber: rpc.LatestBlockNumber, + call: ethapi.TransactionArgs{ + From: &randomAccounts[0].addr, + To: &storageAccount, + Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // + }, + config: &TraceCallConfig{ + StateOverrides: ðapi.StateOverride{ + storageAccount: ethapi.OverrideAccount{ + Code: newRPCBytes([]byte{ + // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x44) + byte(vm.PUSH1), 0x04, + byte(vm.SLOAD), + byte(vm.PUSH1), 0x03, + byte(vm.SLOAD), + byte(vm.ADD), + // 0x55 -> MSTORE(0) + byte(vm.PUSH1), 0x00, + byte(vm.MSTORE), + // RETURN (0, 32) + byte(vm.PUSH1), 32, + byte(vm.PUSH1), 00, + byte(vm.RETURN), + }), + StateDiff: &map[common.Hash]common.Hash{ + common.HexToHash("0x03"): common.HexToHash("0x11"), + }, + }, + }, + }, + want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000055"}`, + }, } for i, tc := range testSuite { result, err := api.TraceCall(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, tc.config) @@ -607,7 +810,8 @@ func TestTracingWithOverrides(t *testing.T) { json.Unmarshal(resBytes, &have) json.Unmarshal([]byte(tc.want), &want) if !reflect.DeepEqual(have, want) { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(resBytes), want) + t.Logf("result: %v\n", string(resBytes)) + t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, have, want) } } } @@ -670,8 +874,8 @@ func TestTraceChain(t *testing.T) { signer := types.HomesteadSigner{} var ( - ref uint32 // total refs has made - rel uint32 // total rels has made + ref atomic.Uint32 // total refs has made + rel atomic.Uint32 // total rels has made nonce uint64 ) backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -684,11 +888,11 @@ func TestTraceChain(t *testing.T) { nonce += 1 } }) - backend.refHook = func() { atomic.AddUint32(&ref, 1) } - backend.relHook = func() { atomic.AddUint32(&rel, 1) } + backend.refHook = func() { ref.Add(1) } + backend.relHook = func() { rel.Add(1) } api := NewAPI(backend) - single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` + single := `{"txHash":"0x0000000000000000000000000000000000000000000000000000000000000000","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` var cases = []struct { start uint64 end uint64 @@ -698,7 +902,8 @@ func TestTraceChain(t *testing.T) { {10, 20, nil}, // the middle chain range, blocks [11, 20] } for _, c := range cases { - ref, rel = 0, 0 // clean up the counters + ref.Store(0) + rel.Store(0) from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) @@ -706,16 +911,17 @@ func TestTraceChain(t *testing.T) { next := c.start + 1 for result := range resCh { - if next != uint64(result.Block) { - t.Error("Unexpected tracing block") + if have, want := uint64(result.Block), next; have != want { + t.Fatalf("unexpected tracing block, have %d want %d", have, want) } - if len(result.Traces) != int(next) { - t.Error("Unexpected tracing result") + if have, want := len(result.Traces), int(next); have != want { + t.Fatalf("unexpected result length, have %d want %d", have, want) } for _, trace := range result.Traces { + trace.TxHash = common.Hash{} blob, _ := json.Marshal(trace) - if string(blob) != single { - t.Error("Unexpected tracing result") + if have, want := string(blob), single; have != want { + t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want) } } next += 1 @@ -723,8 +929,9 @@ func TestTraceChain(t *testing.T) { if next != c.end+1 { t.Error("Missing tracing block") } - if ref != rel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel) + + if nref, nrel := ref.Load(), rel.Load(); nref != nrel { + t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel) } } } diff --git a/coreth/eth/tracers/internal/tracetest/calltrace_test.go b/coreth/eth/tracers/internal/tracetest/calltrace_test.go index 1964c420..c3886364 100644 --- a/coreth/eth/tracers/internal/tracetest/calltrace_test.go +++ b/coreth/eth/tracers/internal/tracetest/calltrace_test.go @@ -31,10 +31,8 @@ import ( "math/big" "os" "path/filepath" - "reflect" "strings" "testing" - "unicode" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" @@ -46,64 +44,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - - // Force-load native and js packages to trigger registration - _ "github.com/ava-labs/coreth/eth/tracers/js" - _ "github.com/ava-labs/coreth/eth/tracers/native" ) -// To generate a new callTracer test, copy paste the makeTest method below into -// a Geth console and call it with a transaction hash you which to export. - -/* -// makeTest generates a callTracer test by running a prestate reassembled and a -// call trace run, assembling all the gathered information into a test case. -var makeTest = function(tx, rewind) { - // Generate the genesis block from the block, transaction and prestate data - var block = eth.getBlock(eth.getTransaction(tx).blockHash); - var genesis = eth.getBlock(block.parentHash); - - delete genesis.gasUsed; - delete genesis.logsBloom; - delete genesis.parentHash; - delete genesis.receiptsRoot; - delete genesis.sha3Uncles; - delete genesis.size; - delete genesis.transactions; - delete genesis.transactionsRoot; - delete genesis.uncles; - - genesis.gasLimit = genesis.gasLimit.toString(); - genesis.number = genesis.number.toString(); - genesis.timestamp = genesis.timestamp.toString(); - - genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind}); - for (var key in genesis.alloc) { - genesis.alloc[key].nonce = genesis.alloc[key].nonce.toString(); - } - genesis.config = admin.nodeInfo.protocols.eth.config; - - // Generate the call trace and produce the test input - var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind}); - delete result.time; - - console.log(JSON.stringify({ - genesis: genesis, - context: { - number: block.number.toString(), - difficulty: block.difficulty, - timestamp: block.timestamp.toString(), - gasLimit: block.gasLimit.toString(), - miner: block.miner, - }, - input: eth.getRawTransaction(tx), - result: result, - }, null, 2)); -} -*/ - type callContext struct { Number math.HexOrDecimal64 `json:"number"` Difficulty *math.HexOrDecimal256 `json:"difficulty"` @@ -112,18 +55,28 @@ type callContext struct { Miner common.Address `json:"miner"` } +// callLog is the result of LOG opCode +type callLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` +} + // callTrace is the result of a callTracer run. type callTrace struct { - Type string `json:"type"` - From common.Address `json:"from"` - To common.Address `json:"to"` - Input hexutil.Bytes `json:"input"` - Output hexutil.Bytes `json:"output"` - Gas *hexutil.Uint64 `json:"gas,omitempty"` - GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` - Value *hexutil.Big `json:"value,omitempty"` - Error string `json:"error,omitempty"` - Calls []callTrace `json:"calls,omitempty"` + From common.Address `json:"from"` + Gas *hexutil.Uint64 `json:"gas"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty"` + Input hexutil.Bytes `json:"input"` + Output hexutil.Bytes `json:"output,omitempty"` + Error string `json:"error,omitempty"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callTrace `json:"calls,omitempty"` + Logs []callLog `json:"logs,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + // Gencodec adds overridden fields at the end + Type string `json:"type"` } // callTracerTest defines a single test to check the call tracer against. @@ -145,7 +98,12 @@ func TestCallTracerNative(t *testing.T) { testCallTracer("callTracer", "call_tracer", t) } +func TestCallTracerNativeWithLog(t *testing.T) { + testCallTracer("callTracer", "call_tracer_withLog", t) +} + func testCallTracer(tracerName string, dirPath string, t *testing.T) { + isLegacy := strings.HasSuffix(dirPath, "_legacy") files, err := os.ReadDir(filepath.Join("testdata", dirPath)) if err != nil { t.Fatalf("failed to retrieve tracer test suite: %v", err) @@ -168,12 +126,12 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { } else if err := json.Unmarshal(blob, test); err != nil { t.Fatalf("failed to parse testcase: %v", err) } - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil { t.Fatalf("failed to parse testcase input: %v", err) } // Configure a blockchain with the given prestate var ( - signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), new(big.Int).SetUint64(uint64(test.Context.Time))) + signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time)) origin, _ = signer.Sender(tx) txContext = vm.TxContext{ Origin: origin, @@ -184,73 +142,62 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { Transfer: core.Transfer, Coinbase: test.Context.Miner, BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Time: uint64(test.Context.Time), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), + BaseFee: test.Genesis.BaseFee, } _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) ) - tracer, err := tracers.New(tracerName, new(tracers.Context), test.TracerConfig) + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) - msg, err := tx.AsMessage(signer, nil) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer}) + msg, err := core.TransactionToMessage(tx, signer, nil) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, err = st.TransitionDb(); err != nil { + vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if err != nil { t.Fatalf("failed to execute transaction: %v", err) } - // Retrieve the trace result and compare against the etalon + // Retrieve the trace result and compare against the expected. res, err := tracer.GetResult() if err != nil { t.Fatalf("failed to retrieve trace result: %v", err) } - ret := new(callTrace) - if err := json.Unmarshal(res, ret); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) + // The legacy javascript calltracer marshals json in js, which + // is not deterministic (as opposed to the golang json encoder). + if isLegacy { + // This is a tweak to make it deterministic. Can be removed when + // we remove the legacy tracer. + var x callTrace + json.Unmarshal(res, &x) + res, _ = json.Marshal(x) } - - if !jsonEqual(ret, test.Result) { - // uncomment this for easier debugging - //have, _ := json.MarshalIndent(ret, "", " ") - //want, _ := json.MarshalIndent(test.Result, "", " ") - //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) - t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) + want, err := json.Marshal(test.Result) + if err != nil { + t.Fatalf("failed to marshal test: %v", err) + } + if string(want) != string(res) { + t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), string(want)) + } + // Sanity check: compare top call's gas used against vm result + type simpleResult struct { + GasUsed hexutil.Uint64 + } + var topCall simpleResult + if err := json.Unmarshal(res, &topCall); err != nil { + t.Fatalf("failed to unmarshal top calls gasUsed: %v", err) + } + if uint64(topCall.GasUsed) != vmRet.UsedGas { + t.Fatalf("top call has invalid gasUsed. have: %d want: %d", topCall.GasUsed, vmRet.UsedGas) } }) } } -// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to -// comparison -func jsonEqual(x, y interface{}) bool { - xTrace := new(callTrace) - yTrace := new(callTrace) - if xj, err := json.Marshal(x); err == nil { - json.Unmarshal(xj, xTrace) - } else { - return false - } - if yj, err := json.Marshal(y); err == nil { - json.Unmarshal(yj, yTrace) - } else { - return false - } - return reflect.DeepEqual(xTrace, yTrace) -} - -// camel converts a snake cased input string into a camel cased output. -func camel(str string) string { - pieces := strings.Split(str, "_") - for i := 1; i < len(pieces); i++ { - pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] - } - return strings.Join(pieces, "") -} - func BenchmarkTracers(b *testing.B) { files, err := os.ReadDir(filepath.Join("testdata", "call_tracer")) if err != nil { @@ -281,8 +228,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { b.Fatalf("failed to parse testcase input: %v", err) } - signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), new(big.Int).SetUint64(uint64(test.Context.Time))) - msg, err := tx.AsMessage(signer, nil) + signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time)) + msg, err := core.TransactionToMessage(tx, signer, nil) if err != nil { b.Fatalf("failed to prepare transaction for tracing: %v", err) } @@ -296,7 +243,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { Transfer: core.Transfer, Coinbase: test.Context.Miner, BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Time: uint64(test.Context.Time), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } @@ -305,11 +252,11 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - tracer, err := tracers.New(tracerName, new(tracers.Context), nil) + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), nil) if err != nil { b.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer}) snap := statedb.Snapshot() st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) if _, err = st.TransitionDb(); err != nil { @@ -322,81 +269,143 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { } } -// TestZeroValueToNotExitCall tests the calltracer(s) on the following: -// Tx to A, A calls B with zero value. B does not already exist. -// Expected: that enter/exit is invoked and the inner call is shown in the result -func TestZeroValueToNotExitCall(t *testing.T) { - var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef") - privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef") - if err != nil { - t.Fatalf("err %v", err) - } - signer := types.NewEIP155Signer(big.NewInt(1)) - tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{ - GasPrice: big.NewInt(0), - Gas: 50000, - To: &to, - }) - if err != nil { - t.Fatalf("err %v", err) - } - origin, _ := signer.Sender(tx) - txContext := vm.TxContext{ - Origin: origin, - GasPrice: big.NewInt(1), - } - context := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: common.Address{}, - BlockNumber: new(big.Int).SetUint64(8000000), - Time: new(big.Int).SetUint64(5), - Difficulty: big.NewInt(0x30000), - GasLimit: uint64(6000000), - } - var code = []byte{ - byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero - byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS - byte(vm.CALL), +func TestInternals(t *testing.T) { + var ( + to = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + origin = common.HexToAddress("0x00000000000000000000000000000000feed") + txContext = vm.TxContext{ + Origin: origin, + GasPrice: big.NewInt(1), + } + context = vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: common.Address{}, + BlockNumber: new(big.Int).SetUint64(8000000), + Time: 5, + Difficulty: big.NewInt(0x30000), + GasLimit: uint64(6000000), + } + ) + mkTracer := func(name string, cfg json.RawMessage) tracers.Tracer { + tr, err := tracers.DefaultDirectory.New(name, nil, cfg) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + return tr } - var alloc = core.GenesisAlloc{ - to: core.GenesisAccount{ - Nonce: 1, - Code: code, + + for _, tc := range []struct { + name string + code []byte + tracer tracers.Tracer + want string + }{ + { + // TestZeroValueToNotExitCall tests the calltracer(s) on the following: + // Tx to A, A calls B with zero value. B does not already exist. + // Expected: that enter/exit is invoked and the inner call is shown in the result + name: "ZeroValueToNotExitCall", + code: []byte{ + byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero + byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS + byte(vm.CALL), + }, + tracer: mkTracer("callTracer", nil), + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x54d8","to":"0x00000000000000000000000000000000deadbeef","input":"0x","calls":[{"from":"0x00000000000000000000000000000000deadbeef","gas":"0xe01a","gasUsed":"0x0","to":"0x00000000000000000000000000000000000000ff","input":"0x","value":"0x0","type":"CALL"}],"value":"0x0","type":"CALL"}`, }, - origin: core.GenesisAccount{ - Nonce: 0, - Balance: big.NewInt(500000000000000), + { + name: "Stack depletion in LOG0", + code: []byte{byte(vm.LOG3)}, + tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x13880","to":"0x00000000000000000000000000000000deadbeef","input":"0x","error":"stack underflow (0 \u003c=\u003e 5)","value":"0x0","type":"CALL"}`, }, - } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) - // Create the tracer, the EVM environment and run it - tracer, err := tracers.New("callTracer", nil, nil) - if err != nil { - t.Fatalf("failed to create call tracer: %v", err) - } - evm := vm.NewEVM(context, txContext, statedb, params.AvalancheMainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) - msg, err := tx.AsMessage(signer, nil) - if err != nil { - t.Fatalf("failed to prepare transaction for tracing: %v", err) - } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, err = st.TransitionDb(); err != nil { - t.Fatalf("failed to execute transaction: %v", err) - } - // Retrieve the trace result and compare against the etalon - res, err := tracer.GetResult() - if err != nil { - t.Fatalf("failed to retrieve trace result: %v", err) - } - have := new(callTrace) - if err := json.Unmarshal(res, have); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) - } - wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}` - want := new(callTrace) - json.Unmarshal([]byte(wantStr), want) - if !jsonEqual(have, want) { - t.Error("have != want") + { + name: "Mem expansion in LOG0", + code: []byte{ + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.MSTORE), + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x0, + byte(vm.LOG0), + }, + tracer: mkTracer("callTracer", json.RawMessage(`{ "withLog": true }`)), + want: `{"from":"0x000000000000000000000000000000000000feed","gas":"0x13880","gasUsed":"0x5b9e","to":"0x00000000000000000000000000000000deadbeef","input":"0x","logs":[{"address":"0x00000000000000000000000000000000deadbeef","topics":[],"data":"0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}],"value":"0x0","type":"CALL"}`, + }, + { + // Leads to OOM on the prestate tracer + name: "Prestate-tracer - CREATE2 OOM", + code: []byte{ + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.MSTORE), + byte(vm.PUSH1), 0x1, + byte(vm.PUSH5), 0xff, 0xff, 0xff, 0xff, 0xff, + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.CREATE2), + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x0, + byte(vm.LOG0), + }, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`, + }, + { + // CREATE2 which requires padding memory by prestate tracer + name: "Prestate-tracer - CREATE2 Memory padding", + code: []byte{ + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.MSTORE), + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x1, + byte(vm.PUSH1), 0x0, + byte(vm.CREATE2), + byte(vm.PUSH1), 0xff, + byte(vm.PUSH1), 0x0, + byte(vm.LOG0), + }, + tracer: mkTracer("prestateTracer", nil), + want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52647880"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"0x91ff9a805d36f54e3e272e230f3e3f5c1b330804":{"balance":"0x0"}}`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), + core.GenesisAlloc{ + to: core.GenesisAccount{ + Code: tc.code, + }, + origin: core.GenesisAccount{ + Balance: big.NewInt(500000000000000), + }, + }, false) + + evm := vm.NewEVM(context, txContext, statedb, params.AvalancheMainnetChainConfig, vm.Config{Tracer: tc.tracer}) + msg := &core.Message{ + To: &to, + From: origin, + Value: big.NewInt(0), + GasLimit: 80000, + GasPrice: big.NewInt(0), + GasFeeCap: big.NewInt(0), + GasTipCap: big.NewInt(0), + SkipAccountChecks: false, + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.GasLimit)) + if _, err := st.TransitionDb(); err != nil { + t.Fatalf("test %v: failed to execute transaction: %v", tc.name, err) + } + // Retrieve the trace result and compare against the expected + res, err := tc.tracer.GetResult() + if err != nil { + t.Fatalf("test %v: failed to retrieve trace result: %v", tc.name, err) + } + if string(res) != tc.want { + t.Errorf("test %v: trace mismatch\n have: %v\n want: %v\n", tc.name, string(res), tc.want) + } + }) } } diff --git a/coreth/eth/tracers/internal/tracetest/flat_calltrace_test.go b/coreth/eth/tracers/internal/tracetest/flat_calltrace_test.go new file mode 100644 index 00000000..c7690f04 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -0,0 +1,222 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package tracetest + +import ( + "encoding/json" + "fmt" + "math/big" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/tests" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rlp" + + // Force-load the native, to trigger registration + "github.com/ava-labs/coreth/eth/tracers" +) + +// flatCallTrace is the result of a callTracerParity run. +type flatCallTrace struct { + Action flatCallTraceAction `json:"action"` + BlockHash common.Hash `json:"-"` + BlockNumber uint64 `json:"-"` + Error string `json:"error,omitempty"` + Result flatCallTraceResult `json:"result,omitempty"` + Subtraces int `json:"subtraces"` + TraceAddress []int `json:"traceAddress"` + TransactionHash common.Hash `json:"-"` + TransactionPosition uint64 `json:"-"` + Type string `json:"type"` + Time string `json:"-"` +} + +type flatCallTraceAction struct { + Author common.Address `json:"author,omitempty"` + RewardType string `json:"rewardType,omitempty"` + SelfDestructed common.Address `json:"address,omitempty"` + Balance hexutil.Big `json:"balance,omitempty"` + CallType string `json:"callType,omitempty"` + CreationMethod string `json:"creationMethod,omitempty"` + From common.Address `json:"from,omitempty"` + Gas hexutil.Uint64 `json:"gas,omitempty"` + Init hexutil.Bytes `json:"init,omitempty"` + Input hexutil.Bytes `json:"input,omitempty"` + RefundAddress common.Address `json:"refundAddress,omitempty"` + To common.Address `json:"to,omitempty"` + Value hexutil.Big `json:"value,omitempty"` +} + +type flatCallTraceResult struct { + Address common.Address `json:"address,omitempty"` + Code hexutil.Bytes `json:"code,omitempty"` + GasUsed hexutil.Uint64 `json:"gasUsed,omitempty"` + Output hexutil.Bytes `json:"output,omitempty"` +} + +// flatCallTracerTest defines a single test to check the call tracer against. +type flatCallTracerTest struct { + Genesis core.Genesis `json:"genesis"` + Context callContext `json:"context"` + Input string `json:"input"` + TracerConfig json.RawMessage `json:"tracerConfig"` + Result []flatCallTrace `json:"result"` +} + +func flatCallTracerTestRunner(tracerName string, filename string, dirPath string, t testing.TB) error { + // Call tracer test found, read if from disk + blob, err := os.ReadFile(filepath.Join("testdata", dirPath, filename)) + if err != nil { + return fmt.Errorf("failed to read testcase: %v", err) + } + test := new(flatCallTracerTest) + if err := json.Unmarshal(blob, test); err != nil { + return fmt.Errorf("failed to parse testcase: %v", err) + } + // Configure a blockchain with the given prestate + tx := new(types.Transaction) + if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + return fmt.Errorf("failed to parse testcase input: %v", err) + } + signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time)) + origin, _ := signer.Sender(tx) + txContext := vm.TxContext{ + Origin: origin, + GasPrice: tx.GasPrice(), + } + context := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + Time: uint64(test.Context.Time), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + } + _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + + // Create the tracer, the EVM environment and run it + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) + if err != nil { + return fmt.Errorf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer}) + + msg, err := core.TransactionToMessage(tx, signer, nil) + if err != nil { + return fmt.Errorf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + + if _, err = st.TransitionDb(); err != nil { + return fmt.Errorf("failed to execute transaction: %v", err) + } + + // Retrieve the trace result and compare against the etalon + res, err := tracer.GetResult() + if err != nil { + return fmt.Errorf("failed to retrieve trace result: %v", err) + } + ret := make([]flatCallTrace, 0) + if err := json.Unmarshal(res, &ret); err != nil { + return fmt.Errorf("failed to unmarshal trace result: %v", err) + } + if !jsonEqualFlat(ret, test.Result) { + t.Logf("tracer name: %s", tracerName) + + // uncomment this for easier debugging + // have, _ := json.MarshalIndent(ret, "", " ") + // want, _ := json.MarshalIndent(test.Result, "", " ") + // t.Logf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) + + // uncomment this for harder debugging <3 meowsbits + // lines := deep.Equal(ret, test.Result) + // for _, l := range lines { + // t.Logf("%s", l) + // t.FailNow() + // } + + t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) + } + return nil +} + +// Iterates over all the input-output datasets in the tracer parity test harness and +// runs the Native tracer against them. +func TestFlatCallTracerNative(t *testing.T) { + testFlatCallTracer("flatCallTracer", "call_tracer_flat", t) +} + +func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) { + files, err := os.ReadDir(filepath.Join("testdata", dirPath)) + if err != nil { + t.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { + t.Parallel() + + err := flatCallTracerTestRunner(tracerName, file.Name(), dirPath, t) + if err != nil { + t.Fatal(err) + } + }) + } +} + +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqualFlat(x, y interface{}) bool { + xTrace := new([]flatCallTrace) + yTrace := new([]flatCallTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false + } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} + +func BenchmarkFlatCallTracer(b *testing.B) { + files, err := filepath.Glob("testdata/call_tracer_flat/*.json") + if err != nil { + b.Fatalf("failed to read testdata: %v", err) + } + + for _, file := range files { + filename := strings.TrimPrefix(file, "testdata/call_tracer_flat/") + b.Run(camel(strings.TrimSuffix(filename, ".json")), func(b *testing.B) { + for n := 0; n < b.N; n++ { + err := flatCallTracerTestRunner("flatCallTracer", filename, "call_tracer_flat", b) + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/coreth/eth/tracers/internal/tracetest/prestate_test.go b/coreth/eth/tracers/internal/tracetest/prestate_test.go new file mode 100644 index 00000000..6f95fdb7 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/prestate_test.go @@ -0,0 +1,166 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracetest + +import ( + "encoding/json" + "math/big" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" + "github.com/ava-labs/coreth/tests" + "github.com/ethereum/go-ethereum/common" +) + +// prestateTrace is the result of a prestateTrace run. +type prestateTrace = map[common.Address]*account + +type account struct { + Balance string `json:"balance"` + Code string `json:"code"` + Nonce uint64 `json:"nonce"` + Storage map[common.Hash]common.Hash `json:"storage"` +} + +// testcase defines a single test to check the stateDiff tracer against. +type testcase struct { + Genesis *core.Genesis `json:"genesis"` + Context *callContext `json:"context"` + Input string `json:"input"` + TracerConfig json.RawMessage `json:"tracerConfig"` + Result interface{} `json:"result"` +} + +func TestPrestateTracerLegacy(t *testing.T) { + testPrestateDiffTracer("prestateTracerLegacy", "prestate_tracer_legacy", t) +} + +func TestPrestateTracer(t *testing.T) { + testPrestateDiffTracer("prestateTracer", "prestate_tracer", t) +} + +func TestPrestateWithDiffModeTracer(t *testing.T) { + testPrestateDiffTracer("prestateTracer", "prestate_tracer_with_diff_mode", t) +} + +func TestPrestateWithDiffModeANTTracer(t *testing.T) { + testPrestateDiffTracer("prestateTracer", "prestate_tracer_ant", t) +} + +func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { + files, err := os.ReadDir(filepath.Join("testdata", dirPath)) + if err != nil { + t.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { + t.Parallel() + + var ( + test = new(testcase) + tx = new(types.Transaction) + ) + // Call tracer test found, read if from disk + if blob, err := os.ReadFile(filepath.Join("testdata", dirPath, file.Name())); err != nil { + t.Fatalf("failed to read testcase: %v", err) + } else if err := json.Unmarshal(blob, test); err != nil { + t.Fatalf("failed to parse testcase: %v", err) + } + if err := tx.UnmarshalBinary(common.FromHex(test.Input)); err != nil { + t.Fatalf("failed to parse testcase input: %v", err) + } + // Configure a blockchain with the given prestate + var ( + blockNumber = new(big.Int).SetUint64(uint64(test.Context.Number)) + signer = types.MakeSigner(test.Genesis.Config, blockNumber, uint64(test.Context.Time)) + origin, _ = signer.Sender(tx) + txContext = vm.TxContext{ + Origin: origin, + GasPrice: tx.GasPrice(), + } + context = vm.BlockContext{ + CanTransfer: core.CanTransfer, + CanTransferMC: core.CanTransferMC, + Transfer: core.Transfer, + TransferMultiCoin: core.TransferMultiCoin, + Coinbase: test.Context.Miner, + BlockNumber: blockNumber, + Time: uint64(test.Context.Time), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + BaseFee: test.Genesis.BaseFee, + } + _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + ) + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer}) + msg, err := core.TransactionToMessage(tx, signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, err = st.TransitionDb(); err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + // Retrieve the trace result and compare against the expected + res, err := tracer.GetResult() + if err != nil { + t.Fatalf("failed to retrieve trace result: %v", err) + } + // The legacy javascript calltracer marshals json in js, which + // is not deterministic (as opposed to the golang json encoder). + if strings.HasSuffix(dirPath, "_legacy") { + // This is a tweak to make it deterministic. Can be removed when + // we remove the legacy tracer. + var x prestateTrace + json.Unmarshal(res, &x) + res, _ = json.Marshal(x) + } + want, err := json.Marshal(test.Result) + if err != nil { + t.Fatalf("failed to marshal test: %v", err) + } + if string(want) != string(res) { + t.Fatalf("trace mismatch\n have: %v\n want: %v\n", string(res), string(want)) + } + }) + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/create.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/create.json index 8699bf3e..df0b2872 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/create.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/create.json @@ -47,8 +47,8 @@ "input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f", "result": { "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", - "gas": "0x5e106", - "gasUsed": "0x5e106", + "gas": "0x897be", + "gasUsed": "0x897be", "input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11", "output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029", "to": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json index 0353d4cf..97561606 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/deep_calls.json @@ -263,7 +263,6 @@ "gas": "0x20ee1", "gasUsed": "0x5374", "input": "0x581d5d60000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", - "output": "0x", "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", "type": "CALL", "value": "0x0" @@ -305,7 +304,6 @@ "gas": "0x1a91d", "gasUsed": "0x12fa", "input": "0x0accce0600000000000000000000000000000000000000000000000000000000000000025842545553440000000000000000000000000000000000000000000000000000000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "output": "0x", "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", "type": "CALL", "value": "0x0" @@ -377,7 +375,6 @@ "gas": "0x16e62", "gasUsed": "0xebb", "input": "0x645a3b72584254555344000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002816d180e30c390000", - "output": "0x", "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", "type": "CALL", "value": "0x0" @@ -387,7 +384,6 @@ "gas": "0x283b9", "gasUsed": "0xc51c", "input": "0x949ae479000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", - "output": "0x", "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", "type": "CALL", "value": "0x0" @@ -397,17 +393,15 @@ "gas": "0x30b4a", "gasUsed": "0xedb7", "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", - "output": "0x", "to": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", "type": "CALL", "value": "0x0" } ], "from": "0x70c9217d814985faef62b124420f8dfbddd96433", - "gas": "0x37b38", - "gasUsed": "0x12bb3", + "gas": "0x3d090", + "gasUsed": "0x1810b", "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", - "output": "0x", "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", "type": "CALL", "value": "0x0" diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index f7ad6df5..6a2cda7d 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -72,7 +72,8 @@ "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", @@ -86,10 +87,9 @@ } ], "from": "0xa529806c67cc6486d4d62024471772f47f6fd672", - "gas": "0x2d6e28", - "gasUsed": "0x64bd", + "gas": "0x2dc6c0", + "gasUsed": "0xbd55", "input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e", - "output": "0x", "to": "0x269296dddce321a6bcbaa2f0181127593d732cba", "type": "CALL", "value": "0x0" diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json index 9395eb40..bb16a4a4 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_create_oog_outer_throw.json @@ -67,8 +67,8 @@ ], "error": "invalid jump destination", "from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8", - "gas": "0x435c8", - "gasUsed": "0x435c8", + "gas": "0x493e0", + "gasUsed": "0x493e0", "input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8", "to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json index 6e221b3c..9b45b52f 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json @@ -54,10 +54,8 @@ "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", "value": "0x0", - "gas": "0x1a466", - "gasUsed": "0x1dc6", - "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", - "output": "0x", - "calls": [] + "gas": "0x1f97e", + "gasUsed": "0x72de", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000" } } diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json new file mode 100644 index 00000000..e54129d4 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_revert_reason.json @@ -0,0 +1,84 @@ +{ + "genesis": { + "baseFeePerGas": "1000000000", + "difficulty": "1", + "extraData": "0x00000000000000000000000000000000000000000000000000000000000000003623191d4ccfbbdf09e8ebf6382a1f8257417bc10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "11500000", + "hash": "0x2af138b8a06e65b8dd0999df70b9e87609e9fc91ea201f08b1cc4f25ef01fcf6", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0", + "stateRoot": "0xa775801d572e9b79585eb131d18d79f8a0f71895455ab9a5b656911428e11708", + "timestamp": "0", + "totalDifficulty": "1", + "alloc": { + "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1": { + "balance": "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7" + }, + "0xd15abca351f79181dedfb6d019e382db90f3628a": { + "balance": "0x0" + } + }, + "config": { + "chainId": 1337, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "apricotPhase1BlockTimestamp": 0, + "apricotPhase2BlockTimestamp": 0, + "apricotPhase3BlockTimestamp": 0 + } + }, + "context": { + "number": "1", + "difficulty": "2", + "timestamp": "1665537018", + "gasLimit": "11511229", + "miner": "0x0000000000000000000000000000000000000000" + }, + "input": "0x02f9029d82053980849502f90085010c388d00832dc6c08080b90241608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033c001a07566181071cabaf58b70fc41557eb813bfc7a24f5c58554e7fed0bf7c031f169a0420af50b5fe791a4d839e181a676db5250b415dfb35cb85d544db7a1475ae2cc", + "result": { + "from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1", + "gas": "0x2dc6c0", + "gasUsed": "0x25590", + "input": "0x608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033", + "output": "0x08c379a000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000012546869732063616c6c6564206661696c65640000000000000000000000000000", + "error": "execution reverted", + "revertReason": "This called failed", + "calls": [ + { + "from": "0xdebfb4b387033eac57af7b3de5116dd60056803b", + "gas": "0x2ba851", + "gasUsed": "0xe557", + "to": "0xd15abca351f79181dedfb6d019e382db90f3628a", + "input": "0x608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033", + "output": "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033", + "value": "0x0", + "type": "CREATE" + }, + { + "from": "0xdebfb4b387033eac57af7b3de5116dd60056803b", + "gas": "0x2ac548", + "gasUsed": "0x1b2", + "to": "0xd15abca351f79181dedfb6d019e382db90f3628a", + "input": "0xc0406226", + "output": "0x08c379a000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000012546869732063616c6c6564206661696c65640000000000000000000000000000", + "error": "execution reverted", + "revertReason": "This called failed", + "type": "STATICCALL" + } + ], + "value": "0x0", + "type": "CREATE" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json index ec2ceb42..a023ed6d 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json @@ -71,8 +71,8 @@ ], "error": "execution reverted", "from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826", - "gas": "0x78d9e", - "gasUsed": "0x76fc0", + "gas": "0x7dfa6", + "gasUsed": "0x7c1c8", "input": "0x", "to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.md b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.md new file mode 100644 index 00000000..2700578b --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.md @@ -0,0 +1,19 @@ +This test tests out the trace generated by the deployment of this contract: + +```solidity +contract Revertor { + function run() public pure { + require(2 > 3, "This called failed"); + } +} + +contract Contract { + constructor() { + Revertor r = new Revertor(); + r.run(); + } +} +``` + +The trace should show a revert, with the revert reason for both the top-call as well +as the inner call. diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json index de4fed6a..333bdd03 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/oog.json @@ -50,8 +50,8 @@ "result": { "error": "out of gas", "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", - "gas": "0x7045", - "gasUsed": "0x7045", + "gas": "0xca1d", + "gasUsed": "0xca1d", "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json index 059040a1..3207a298 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert.json @@ -48,8 +48,8 @@ "result": { "error": "execution reverted", "from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9", - "gas": "0x2d55e8", - "gasUsed": "0xc3", + "gas": "0x2dc6c0", + "gasUsed": "0x719b", "input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000", "to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json index 094b0446..f02e5c68 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/revert_reason.json @@ -27,7 +27,7 @@ "byzantiumBlock": 0, "constantinopleBlock": 0, "petersburgBlock": 0, - "IstanbulBlock":1561651, + "IstanbulBlock": 1561651, "chainId": 5, "daoForkSupport": true, "eip150Block": 0, @@ -53,12 +53,13 @@ "result": { "error": "execution reverted", "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", - "gas": "0x2d7308", - "gasUsed": "0x588", + "gas": "0x2dc6c0", + "gasUsed": "0x5940", "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", "type": "CALL", "value": "0x0", - "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000", + "revertReason": "Self-delegation is disallowed." } } diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json index dd717906..620df1d6 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/selfdestruct.json @@ -58,16 +58,15 @@ "gas": "0x0", "gasUsed": "0x0", "input": "0x", - "to": "0x000000000000000000000000000000000000dEaD", + "to": "0x000000000000000000000000000000000000dead", "type": "SELFDESTRUCT", "value": "0x4d87094125a369d9bd5" } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x7533", + "gas": "0x15f90", + "gasUsed": "0x6fcb", "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", - "output": "0x", "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", "type": "CALL", "value": "0x0" diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json index 08cb7b2d..6c7d01de 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple.json @@ -69,8 +69,8 @@ } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x3ef9", + "gas": "0x15f90", + "gasUsed": "0x9751", "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json index ac1fef44..affb4ab0 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/simple_onlytop.json @@ -61,8 +61,8 @@ }, "result": { "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x3ef9", + "gas": "0x15f90", + "gasUsed": "0x9751", "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json index 09cf4497..499b449a 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer/throw.json @@ -52,8 +52,8 @@ "result": { "error": "invalid jump destination", "from": "0x70c9217d814985faef62b124420f8dfbddd96433", - "gas": "0x37b38", - "gasUsed": "0x37b38", + "gas": "0x3d090", + "gasUsed": "0x3d090", "input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000", "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json new file mode 100644 index 00000000..617f52a1 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/big_slow.json @@ -0,0 +1,64 @@ +{ + "genesis": { + "difficulty": "50486697699375", + "extraData": "0xd783010406844765746887676f312e362e32856c696e7578", + "gasLimit": "4788482", + "hash": "0xf6bbc5bbe34d5c93fd5b4712cd498d1026b8b0f586efefe7fe30231ed6b8a1a5", + "miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1", + "mixHash": "0xabca93555584c0463ee5c212251dd002bb3a93a157e06614276f93de53d4fdb8", + "nonce": "0xa64136fcb9c2d4ca", + "number": "1719576", + "stateRoot": "0xab5eec2177a92d633e282936af66c46e24cfa8f2fdc2b8155f33885f483d06f3", + "timestamp": "1466150166", + "totalDifficulty": "28295412423546970038", + "alloc": { + "0xf8bda96b67036ee48107f2a0695ea673479dda56": { + "balance": "0x1529e844f9ecdeec", + "nonce": "33", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 1, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 3000000, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 1150000, + "byzantiumBlock": 8772000, + "constantinopleBlock": 9573000, + "petersburgBlock": 10500839, + "istanbulBlock": 10500839 + } + }, + "context": { + "number": "1719577", + "difficulty": "50486697732143", + "timestamp": "1466150178", + "gasLimit": "4788484", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226" + }, + "input": "0xf874218504a817c800832318608080a35b620186a05a131560135760016020526000565b600080601f600039601f565b6000f31ba0575fa000a1f06659a7b6d3c7877601519a4997f04293f0dfa0eee6d8cd840c77a04c52ce50719ee2ff7a0c5753f4ee69c0340666f582dbb5148845a354ca726e4a", + "result": [ + { + "action": { + "from": "0xf8bda96b67036ee48107f2a0695ea673479dda56", + "gas": "0x231860", + "init": "0x5b620186a05a131560135760016020526000565b600080601f600039601f565b6000f3", + "value": "0x0" + }, + "blockNumber": 1719577, + "result": { + "address": "0xb2e6a2546c45889427757171ab05b8b438525b42", + "code": "0x", + "gasUsed": "0x219202" + }, + "subtraces": 0, + "traceAddress": [], + "type": "create" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json new file mode 100644 index 00000000..c796804a --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_fail_hide.json @@ -0,0 +1,74 @@ +{ + "genesis": { + "difficulty": "4671584", + "extraData": "0xd683010b05846765746886676f312e3133856c696e7578", + "gasLimit": "9435026", + "hash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "mixHash": "0x3a44525624571c31344ba57780f7664098fe7cbeafe532bcdee76a23fc474ba0", + "nonce": "0x6dca647c00c72bbf", + "number": "1555278", + "stateRoot": "0x5f56d8323ee384b0c8d1de49d63e150e17283eea813483698362bc0ec9e0242a", + "timestamp": "1590795319", + "totalDifficulty": "2242614315030", + "alloc": { + "0x0000000000000000000000000000000000000004": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x62436e941792f02a5fb1", + "nonce": "265356", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555279", + "difficulty": "4669303", + "timestamp": "1590795340", + "gasLimit": "9444238", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf86f83040c8c843b9aca0083019f7880809b60206000600060006013600462030d40f26002556000516000550081a2a086ad228c89ad9664287b12a5602a635a803506904f4ce39795990ac4f945cd57a025b30ea8042d773f6c5b13d7cc1b3979f9f10ee674410b6a2112ce840d0302dc", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x19f78", + "init": "0x60206000600060006013600462030d40f260025560005160005500" + }, + "result": { + "gasUsed": "0xf3bc", + "code": "0x", + "address": "0x5f8a7e007172ba80afbff1b15f800eb0b260f224" + }, + "traceAddress": [], + "subtraces": 0, + "transactionPosition": 74, + "transactionHash": "0x5ef60b27ac971c22a7d484e546e50093ca62300c8986d165154e47773764b6a4", + "blockNumber": 1555279, + "blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46", + "time": "209.346µs" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json new file mode 100644 index 00000000..fb29e496 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_oog.json @@ -0,0 +1,94 @@ +{ + "genesis": { + "difficulty": "4671584", + "extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578", + "gasLimit": "9425823", + "hash": "0x27dd7d052dbc8a29cc5b9487e1e41d842e7a643fcaea4964caa22b834964acaf", + "miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3", + "mixHash": "0xb4a050624f5d147fdf02857cbfd55da3ddc1451743acc5c163861584589c3034", + "nonce": "0x3c255875b17e0573", + "number": "1555277", + "stateRoot": "0x6290d79215a2eebc25d5e456b35876c6d78ffc1ea47bdd70e375ebb3cf325620", + "timestamp": "1590795308", + "totalDifficulty": "2242609643446", + "alloc": { + "0x0000000000000000000000000000000000000001": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x624329308610ab365fb1", + "nonce": "265194", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555278", + "difficulty": "4671584", + "timestamp": "1590795319", + "gasLimit": "9435026", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf8ee83040bea843b9aca008301a7588080b8997f18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c600052601c6020527f73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f6040527feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549606052602060806080600060006001610bb7f260025560a060020a6080510660005560005432146001550081a1a05b9a162d84bfe84faa7c176e21c26c0083645d4dd0d566547b7be2c2da0b4259a05b37ff12a4c27634cb0da6008d9b69726d415ff4694f9bc38c7806eb1fb60ae9", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x1a758", + "init": "0x7f18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c600052601c6020527f73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f6040527feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549606052602060806080600060006001610bb7f260025560a060020a60805106600055600054321460015500" + }, + "result": { + "gasUsed": "0xf3e9", + "code": "0x", + "address": "0x568c19ecb14b87e4aec29b4d2d700a3ad3fd0613" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 141, + "transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1", + "blockNumber": 1555278, + "blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b", + "time": "300.9µs" + }, + { + "type": "call", + "action": { + "from": "0x568c19ecb14b87e4aec29b4d2d700a3ad3fd0613", + "to": "0x0000000000000000000000000000000000000001", + "value": "0x0", + "gas": "0xbb7", + "input": "0x18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c000000000000000000000000000000000000000000000000000000000000001c73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549", + "callType": "callcode" + }, + "error": "out of gas", + "traceAddress": [ + 0 + ], + "subtraces": 0, + "transactionPosition": 141, + "transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1", + "blockNumber": 1555278, + "blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json new file mode 100644 index 00000000..3c1e370f --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/callcode_precompiled_throw.json @@ -0,0 +1,90 @@ +{ + "genesis": { + "difficulty": "4683014", + "extraData": "0x537465762d63676574682d76312e31312e34", + "gasLimit": "9435044", + "hash": "0x3452ca5005cb73cd60dfa488a7b124251168e564491f80eb66765e79d78cfd95", + "miner": "0x415aa6292d1db797a467b22139704956c030e62f", + "mixHash": "0x6037612618507ae70c74a72bc2580253662971db959cfbc06d3f8527d4d01575", + "nonce": "0x314fc90dee5e39a2", + "number": "1555274", + "stateRoot": "0x795751f3f96a5de1fd3944ddd78cbfe4ef10491e1086be47609869a30929d0e5", + "timestamp": "1590795228", + "totalDifficulty": "2242595605834", + "alloc": { + "0x0000000000000000000000000000000000000009": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x6242e3ccf48e66425fb1", + "nonce": "264981", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555275", + "difficulty": "4683014", + "timestamp": "1590795244", + "gasLimit": "9444256", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf87a83040b15843b9aca008301a0348080a636600060003760406103e8366000600060095af26001556103e851600255610408516003550081a1a0dd883fbbb489b640dadc8c1bf151767155228d0a1321f687f070f35f14374b05a02dd0ccb16a8de39bc8ee61381bbbbb54f0ab18422afd7b03c6163da1f5023934", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x1a034", + "init": "0x36600060003760406103e8366000600060095af26001556103e8516002556104085160035500" + }, + "error": "out of gas", + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 117, + "transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9", + "blockNumber": 1555275, + "blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd", + "time": "332.877µs" + }, + { + "type": "call", + "action": { + "from": "0x8832ef498070145c3a5b30f47fbca71fd7b1de9f", + "to": "0x0000000000000000000000000000000000000009", + "value": "0x0", + "gas": "0xc897", + "input": "0x", + "callType": "callcode" + }, + "error": "invalid input length", + "traceAddress": [ + 0 + ], + "subtraces": 0, + "transactionPosition": 117, + "transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9", + "blockNumber": 1555275, + "blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json new file mode 100644 index 00000000..11bc4eae --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/create.json @@ -0,0 +1,67 @@ +{ + "context": { + "difficulty": "3755480783", + "gasLimit": "5401723", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294702", + "timestamp": "1513676146" + }, + "genesis": { + "alloc": { + "0x13e4acefe6a6700604929946e70e6443e4e73447": { + "balance": "0xcf3e0938579f000", + "code": "0x", + "nonce": "9", + "storage": {} + }, + "0x7dc9c9730689ff0b0fd506c67db815f12d90a448": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3757315409", + "extraData": "0x566961425443", + "gasLimit": "5406414", + "hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1", + "nonce": "0x93363bbd2c95f410", + "number": "2294701", + "stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c", + "timestamp": "1513676127", + "totalDifficulty": "7160808139332585" + }, + "input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f", + "result": [ + { + "action": { + "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", + "gas": "0x897be", + "init": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11", + "value": "0x0" + }, + "blockNumber": 2294702, + "result": { + "address": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448", + "code": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029", + "gasUsed": "0x897be" + }, + "subtraces": 0, + "traceAddress": [], + "type": "create" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json new file mode 100644 index 00000000..375a1636 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/deep_calls.json @@ -0,0 +1,635 @@ +{ + "context": { + "difficulty": "117066904", + "gasLimit": "4712384", + "miner": "0x1977c248e1014cc103929dd7f154199c916e39ec", + "number": "25001", + "timestamp": "1479891545" + }, + "genesis": { + "alloc": { + "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a600035046302d05d3f811461008a5780630accce061461009c5780631ab9075a146100c757806331ed274614610102578063645a3b7214610133578063772fdae314610155578063a7f4377914610180578063ae5f80801461019e578063c9bded21146101ea578063f905c15a14610231575b61023a610002565b61023c600054600160a060020a031681565b61023a600435602435604435606435608435600254600160a060020a03166000141561024657610002565b61023a600435600254600160a060020a03166000148015906100f8575060025433600160a060020a03908116911614155b156102f457610002565b61023a60043560243560443560643560843560a43560c435600254600160a060020a03166000141561031657610002565b61023a600435602435600254600160a060020a0316600014156103d057610002565b61023a600435602435604435606435608435600254600160a060020a03166000141561046157610002565b61023a60025433600160a060020a0390811691161461051657610002565b61023a6004356024356044356060828152600160a060020a0382169060ff8516907fa6c2f0913db6f79ff0a4365762c61718973b3413d6e40382e704782a9a5099f690602090a3505050565b61023a600435602435600160a060020a038116606090815260ff8316907fee6348a7ec70f74e3d6cba55a53e9f9110d180d7698e9117fc466ae29a43e34790602090a25050565b61023c60035481565b005b6060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061029d57610002565b60408051858152602081018390528151600160a060020a03858116939087169260ff8a16927f5a690ecd0cb15c1c1fd6b6f8a32df0d4f56cb41a54fea7e94020f013595de796929181900390910190a45050505050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061036d57610002565b6040805186815260208101869052808201859052606081018490529051600160a060020a03831691889160ff8b16917fd65d9ddafbad8824e2bbd6f56cc9f4ac27ba60737035c10a321ea2f681c94d47919081900360800190a450505050505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061042757610002565b60408051828152905183917fa9c6cbc4bd352a6940479f6d802a1001550581858b310d7f68f7bea51218cda6919081900360200190a25050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104b857610002565b80600160a060020a031684600160a060020a03168660ff167f69bdaf789251e1d3a0151259c0c715315496a7404bce9fd0b714674685c2cab78686604051808381526020018281526020019250505060405180910390a45050505050565b600254600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x2cccf5e0538493c235d1c5ef6580f77d99e91396": { + "balance": "0x0", + "code": "0x606060405236156100775760e060020a600035046302d05d3f811461007f57806313bc6d4b146100915780633688a877146100b95780635188f9961461012f5780637eadc976146101545780638ad79680146101d3578063a43e04d814610238578063a7f437791461025e578063e16c7d981461027c575b61029f610002565b6102a1600054600160a060020a031681565b6102be600435600160a060020a03811660009081526002602052604090205460ff165b919050565b6102d26004356040805160208181018352600080835284815260038252835190849020805460026001821615610100026000190190911604601f8101849004840283018401909552848252929390929183018282801561037d5780601f106103525761010080835404028352916020019161037d565b61029f6004356024356000805433600160a060020a039081169116146104a957610002565b61034060043560008181526001602090815260408083205481517ff905c15a0000000000000000000000000000000000000000000000000000000081529151600160a060020a03909116928392839263f905c15a92600483810193919291829003018189876161da5a03f1156100025750506040515195945050505050565b60408051602060248035600481810135601f810185900485028601850190965285855261029f9581359591946044949293909201918190840183828082843750949650505050505050600054600160a060020a0390811633909116146104f657610002565b61029f6004355b600080548190600160a060020a0390811633909116146105a457610002565b61029f60005433600160a060020a0390811691161461072957610002565b6102a1600435600081815260016020526040902054600160a060020a03166100b4565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156103325780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161036057829003601f168201915b505050505090506100b4565b506000828152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a038581168086526002909352818520805460ff191690941790935580517f1ab9075a0000000000000000000000000000000000000000000000000000000081523090931660048401525184939192631ab9075a926024828101939192829003018183876161da5a03f11561000257505060408051602081018690528082019290925243606083015260808083526003908301527f414444000000000000000000000000000000000000000000000000000000000060a0830152517f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d39181900360c00190a15b505050565b600083815260016020526040902054600160a060020a03838116911614156104d0576104a4565b600083815260016020526040812054600160a060020a031614610389576103898361023f565b600082815260036020908152604082208054845182855293839020919360026001831615610100026000190190921691909104601f90810184900483019391929186019083901061056a57805160ff19168380011785555b5061059a9291505b808211156105a05760008155600101610556565b8280016001018555821561054e579182015b8281111561054e57825182600050559160200191906001019061057c565b50505050565b5090565b600083815260016020526040812054600160a060020a031614156105c757610002565b50506000818152600160205260408082205481517fa7f437790000000000000000000000000000000000000000000000000000000081529151600160a060020a0391909116928392839263a7f4377992600483810193919291829003018183876161da5a03f11561000257505050600160005060008460001916815260200190815260200160002060006101000a815490600160a060020a0302191690556002600050600083600160a060020a0316815260200190815260200160002060006101000a81549060ff02191690557f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d383834360405180806020018560001916815260200184600160a060020a03168152602001838152602001828103825260038152602001807f44454c000000000000000000000000000000000000000000000000000000000081526020015060200194505050505060405180910390a1505050565b600054600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0684ac65a9fa32414dda56996f4183597d695987fdb82b145d722743891a6fe8": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "0x1cd76f78169a420d99346e3501dd3e541622c38a226f9b63e01cfebc69879dc7": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "0x8e54a4494fe5da016bfc01363f4f6cdc91013bb5434bd2a4a3359f13a23afa2f": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "0x94edf7f600ba56655fd65fca1f1424334ce369326c1dc3e53151dcd1ad06bc13": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xbbee47108b275f55f98482c6800f6372165e88b0330d3f5dae6419df4734366c": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "0xd38c0c4e84de118cfdcc775130155d83b8bbaaf23dc7f3c83a626b10473213bd": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xfb3aa5c655c2ec9d40609401f88d505d1da61afaa550e36ef5da0509ada257ba": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113" + } + }, + "0x3e9286eafa2db8101246c2131c09b49080d00690": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063056d4470146100e957806316c66cc61461010c5780631ab9075a146101935780633ae1005c146101ce57806358541662146101fe5780635ed61af014610231578063644e3b791461025457806384dbac3b146102db578063949ae479146102fd5780639859387b14610321578063a7f4377914610340578063ab03fc261461035e578063e8161b7814610385578063e964d4e114610395578063f905c15a146103a5578063f92eb774146103ae575b6103be610002565b6103c0600054600160a060020a031681565b6103be6004356002546000908190600160a060020a031681141561040357610002565b6103dd60043560006108365b6040805160025460e360020a631c2d8fb30282527f636f6e747261637464620000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435600254600160a060020a03166000148015906101c4575060025433600160a060020a03908116911614155b1561088d57610002565b6103be600435602435604435606435600254600090819081908190600160a060020a03168114156108af57610002565b6103c0600435602435604435606435608435600254600090819081908190600160a060020a03168114156110e857610002565b6103be6004356002546000908190600160a060020a03168114156115ec57610002565b6103c06004356000611b635b6040805160025460e360020a631c2d8fb30282527f6d61726b6574646200000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435602435600254600160a060020a031660001415611bb557610002565b6103be600435602435600254600090600160a060020a0316811415611d2e57610002565b6103be600435600254600160a060020a031660001415611fc657610002565b6103be60025433600160a060020a0390811691161461207e57610002565b6103be600435602435604435600254600090600160a060020a031681141561208c57610002565b6103dd60043560006124b8610260565b6103c0600435600061250a610118565b6103f160035481565b6103f16004356000612561610260565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061046557610002565b8291506104e55b6040805160025460e360020a631c2d8fb30282527f63706f6f6c00000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f115610002575050604051519150505b90565b600160a060020a031663b2206e6d83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fb2206e6d0000000000000000000000000000000000000000000000000000000082526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f11561000257505060405151915061059b90506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f115610002575050506107355b6040805160025460e360020a631c2d8fb30282527f6c6f676d6772000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b50826120ee5b6040805160025460e360020a631c2d8fb30282527f6163636f756e7463746c0000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316630accce06600684600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150866040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050505050565b600160a060020a03166316c66cc6836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519150505b919050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061091157610002565b87935061091c610260565b600160a060020a031663bdbdb08685600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fbdbdb0860000000000000000000000000000000000000000000000000000000082526004820152602481018a905290516044808301935060209282900301816000876161da5a03f1156100025750506040515193506109ca90506106ba565b600160a060020a03166381982a7a8885876040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610a3661046c565b600160a060020a03166308636bdb85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517f08636bdb000000000000000000000000000000000000000000000000000000008252600482015260248101889052604481019290925251606482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919450600160a060020a03871692506314baa1b6916024828101926000929190829003018183876161da5a03f11561000257505050610b3561046c565b600160a060020a0316630a3b6ede85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038d16602482015290516044808301935060209282900301816000876161da5a03f115610002575050604051519150610bd590506106ba565b600160a060020a031663d5b205ce87838b6040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610c41610118565b600160a060020a031663988db79c888a6040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050610ca5610260565b600160a060020a031663f4f2821b896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610d6f5b6040805160025460e360020a631c2d8fb30282527f747261646564620000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316635f539d69896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610dc2610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928e9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610ec5610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928d9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610fc8610639565b600160a060020a031663645a3b7285600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151905061101e610260565b600160a060020a031663f92eb77488600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f115610002575050505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061114a57610002565b604051600254600160a060020a0316908a908a908a908a908a90611579806125b38339018087600160a060020a0316815260200186600160a060020a03168152602001856000191681526020018481526020018381526020018281526020019650505050505050604051809103906000f092506111c5610118565b600160a060020a031663b9858a288a856040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611229610260565b600160a060020a0316635188f99689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611288610260565b600160a060020a031663bdbdb08689896040518360e060020a0281526004018083600019168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750506040515192506112e590506106ba565b600160a060020a03166346d88e7d8a858a6040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506000604051808303816000876161da5a03f115610002575050506113516106ba565b600160a060020a03166381982a7a8a84866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050506113bd61046c565b600160a060020a0316632b58469689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f1156100025750505061141c61046c565b600160a060020a03166308636bdb8984866040518460e060020a028152600401808460001916815260200183815260200182600160a060020a0316815260200193505050506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919350600160a060020a03861692506314baa1b6916024828101926000929190829003018183876161da5a03f115610002575050506114d3610639565b6040805160e160020a630566670302815260016004820152602481018b9052600160a060020a0386811660448301528c811660648301526000608483018190529251931692630accce069260a480840193919291829003018183876161da5a03f11561000257505050611544610639565b600160a060020a031663645a3b728961155b610260565b600160a060020a031663f92eb7748c6040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448084019360009350829003018183876161da5a03f1156100025750939a9950505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061164e57610002565b82915061165961046c565b600160a060020a0316630a3b6ede83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f1156100025750506040515191506116f990506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f1156100025750505061179b6106ba565b600160a060020a031663d653078983600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517ff1ff78a0000000000000000000000000000000000000000000000000000000008252915191929163f1ff78a09160048181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f1156100025750505061189f610260565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506118f2610118565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050611945610639565b600160a060020a0316630accce06600484600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da44689181870191602091908190038801816000876161da5a03f115610002575050506040518051906020015060006040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050611a48610639565b600160a060020a031663645a3b7283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611a9e610260565b600160a060020a031663f92eb77486600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b600160a060020a03166381738c59836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611c1757610002565b611c1f610260565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060405151159050611c7457610002565b611c7c610260565b600160a060020a0316632243118a836040518260e060020a02815260040180826000191681526020019150506000604051808303816000876161da5a03f11561000257505050611cca610639565b600160a060020a031663ae5f8080600184846040518460e060020a028152600401808481526020018360001916815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611d9057610002565b5081611d9a610260565b600160a060020a031663581d5d6084846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506000604051808303816000876161da5a03f11561000257505050611df5610639565b600160a060020a0316630accce06600283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630566670302825260048201949094526024810193909352600160a060020a038816604484015260006064840181905260848401819052905160a4808501949293509091829003018183876161da5a03f11561000257505050611eab610639565b600160a060020a031663645a3b7282600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611f01610260565b600160a060020a031663f92eb77485600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061202857610002565b612030610118565b600160a060020a0316639859387b826040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505050565b600254600160a060020a0316ff5b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f11561000257505060405151151590506106b457610002565b600160a060020a031663d65307898383600160a060020a031663f1ff78a06040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fd6530789000000000000000000000000000000000000000000000000000000008252600160a060020a039485166004830152602482015292891660448401525160648381019360009350829003018183876161da5a03f115610002575050506121a5610118565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506121f8610cf4565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505061224b610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d028252915191928a9290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f1156100025750505080600160a060020a031663ea71b02d6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a031660001490506124b25761239f610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fea71b02d000000000000000000000000000000000000000000000000000000008252915191928a92909163ea71b02d91600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f115610002575050505b50505050565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663213fe2b7836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663f92eb774836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f115610002575050604051519150610888905056606060405260405160c08061157983396101206040819052825160805160a051935160e0516101005160008054600160a060020a03199081163317909155600180546005805484168817905560048a90556006869055600b8590556008849055909116861760a060020a60ff02191690554360038190556002558686526101408390526101608190529396929594919390929091600160a060020a033016917f76885d242fb71c6f74a7e717416e42eff4d96faf54f6de75c6a0a6bbd8890c6b91a230600160a060020a03167fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff600b600050546040518082815260200191505060405180910390a250505050505061145e8061011b6000396000f3606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "16", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ef436dcbda6cd4a", + "code": "0x", + "nonce": "1634", + "storage": {} + }, + "0x7986bad81f4cbd9317f5a46861437dae58d69113": { + "balance": "0x0", + "code": "0x6060604052361561008d5760e060020a600035046302d05d3f811461009557806316c66cc6146100a75780631ab9075a146100d7578063213fe2b7146101125780639859387b1461013f578063988db79c1461015e578063a7f4377914610180578063b9858a281461019e578063c8e40fbf146101c0578063f4f2821b146101e8578063f905c15a14610209575b610212610002565b610214600054600160a060020a031681565b600160a060020a0360043581811660009081526005602052604081205461023193168114610257575060016101e3565b610212600435600254600160a060020a0316600014801590610108575060025433600160a060020a03908116911614155b1561025f57610002565b610214600435600160a060020a03811660009081526004602052604081205460ff16151561027557610002565b610212600435600254600160a060020a03166000141561029b57610002565b610212600435602435600254600160a060020a03166000141561050457610002565b61021260025433600160a060020a0390811691161461056757610002565b610212600435602435600254600160a060020a03166000141561057557610002565b610231600435600160a060020a03811660009081526004602052604090205460ff165b919050565b610212600435600254600090600160a060020a031681141561072057610002565b61024560035481565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060006101e3565b60028054600160a060020a031916821790555b50565b50600160a060020a038181166000908152600460205260409020546101009004166101e3565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506102fe57610002565b600160a060020a03811660009081526004602052604090205460ff161515610272576040516104028061092e833901809050604051809103906000f06004600050600083600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600083600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555050565b600160a060020a03821660009081526004602052604090205460ff1615156104725760405161040280610d30833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a03811660009081526006602052604090208054600160a060020a031916831790555b5050565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506103b957610002565b600254600160a060020a0316ff5b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506105d857610002565b600160a060020a03821660009081526004602052604090205460ff1615156106915760405161040280611132833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a031660009081526005602052604090208054600160a060020a0319169091179055565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f115610002575050604051511515905061078357610002565b50600160a060020a0381811660009081526005602090815260408083205490931680835260049091529190205460ff161561080f576040600081812054825160e260020a632e72bafd028152600160a060020a03868116600483015293516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260056020526040812054909116146108545760406000908120600160a060020a0384169091528054600160a060020a03191690555b50600160a060020a0381811660009081526006602090815260408083205490931680835260049091529190205460ff16156108e657600160a060020a038181166000908152604080518183205460e260020a632e72bafd028252868516600483015291516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260066020526040812054909116146105005760406000908120600160a060020a0384169091528054600160a060020a0319169055505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "7", + "storage": { + "0xffc4df2d4f3d2cffad590bed6296406ab7926ca9e74784f74a95191fa069a174": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + }, + "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f": { + "balance": "0x0", + "code": "0x606060405236156100ae5760e060020a600035046302d05d3f81146100b65780631ab9075a146100c85780632b68bb2d146101035780634cc927d7146101c557806351a34eb81461028e57806356ccb6f0146103545780635928d37f1461041d578063599efa6b146104e9578063759297bb146105b2578063771d50e11461067e578063a7f4377914610740578063f905c15a1461075e578063f92eb77414610767578063febf661214610836575b610902610002565b610904600054600160a060020a031681565b610902600435600254600160a060020a03166000148015906100f9575060025433600160a060020a03908116911614155b1561092057610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061094257610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610a0d57610002565b61090260043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ae957610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610bbc57610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610c9657610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610de057610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ebb57610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f9e57610002565b61090260025433600160a060020a0390811691161461106957610002565b61090e60035481565b61090e60043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750506040805180517ff92eb774000000000000000000000000000000000000000000000000000000008252600482018790529151919350600160a060020a038416925063f92eb774916024828101926020929190829003018188876161da5a03f11561000257505060405151949350505050565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061107757610002565b005b6060908152602090f35b60408051918252519081900360200190f35b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5ed61af000000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152925190959286169350635ed61af092602483810193919291829003018183876161da5a03f115610002575050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fab03fc2600000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015260248301899052808816604484015292519095928616935063ab03fc2692606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f949ae47900000000000000000000000000000000000000000000000000000000825233600160a060020a0390811660048401526024830188905292519095928616935063949ae47992604483810193919291829003018183876161da5a03f11561000257505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f46d88e7d000000000000000000000000000000000000000000000000000000008252600160a060020a0380891660048401523381166024840152604483018890529251909592861693506346d88e7d92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5315cdde00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a16602484015260448301889052925190959286169350635315cdde92606483810193919291829003018183876161da5a03f115610002575050604080517f5928d37f00000000000000000000000000000000000000000000000000000000815233600160a060020a03908116600483015287166024820152604481018690529051635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fe68e401c00000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015280891660248401526044830188905292519095928616935063e68e401c92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5152f381000000000000000000000000000000000000000000000000000000008252600160a060020a03808a1660048401528089166024840152604483018890523381166064840152925190959286169350635152f38192608483810193919291829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f056d447000000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015292519095928616935063056d447092602483810193919291829003018183876161da5a03f115610002575050505050565b600254600160a060020a0316ff5b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f3ae1005c00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a166024840152808916604484015260648301889052925190959286169350633ae1005c92608483810193919291829003018183876161da5a03f11561000257505050505050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000006195", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x5842545553440000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000" + } + }, + "0xcf00ffd997ad14939736f026006498e3f099baaf": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063031e7f5d146100e95780631ab9075a1461010b5780632243118a1461014657806327aad68a1461016557806338a699a4146101da5780635188f996146101f8578063581d5d601461021e57806381738c5914610246578063977da54014610269578063a07421ce14610288578063a7f43779146102be578063bdbdb086146102dc578063e1c7111914610303578063f4f2821b14610325578063f905c15a1461034a578063f92eb77414610353575b610387610002565b610389600054600160a060020a031681565b610387600435602435600254600160a060020a0316600014156103a857610002565b610387600435600254600160a060020a031660001480159061013c575060025433600160a060020a03908116911614155b1561042957610002565b610387600435600254600160a060020a03166000141561044b57610002565b6102ac60043560008181526004602081815260408320547f524d81d3000000000000000000000000000000000000000000000000000000006060908152610100909104600160a060020a031692839263524d81d3926064928188876161da5a03f1156100025750506040515192506103819050565b61039c60043560008181526004602052604090205460ff165b919050565b6103876004356024356002546000908190600160a060020a031681141561079457610002565b61038760043560243560025460009081908190600160a060020a031681141561080457610002565b61038960043560008181526004602052604081205460ff1615156109e357610002565b610387600435600254600160a060020a0316600014156109fb57610002565b600435600090815260096020526040902054670de0b6b3a764000090810360243502045b60408051918252519081900360200190f35b61038760025433600160a060020a03908116911614610a9257610002565b600435600090815260086020526040902054670de0b6b3a7640000602435909102046102ac565b610387600435602435600254600160a060020a031660001415610aa057610002565b61038760043560025460009081908190600160a060020a0316811415610b3657610002565b6102ac60035481565b6102ac600435600081815260076020908152604080832054600690925290912054670de0b6b3a76400000204805b50919050565b005b600160a060020a03166060908152602090f35b15156060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506103fe57610002565b60008281526004602052604090205460ff16151561041b57610002565b600860205260406000205550565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104a157610002565b604080516000838152600460205291909120805460ff1916600117905561040280610de2833901809050604051809103906000f0600460005060008360001916815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555066470de4df8200006008600050600083600019168152602001908152602001600020600050819055506703782dace9d9000060096000506000836000191681526020019081526020016000206000508190555050565b600460005060008560001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151821415905061060057838152600660209081526040808320839055600790915281208190555b81600160a060020a0316630a3b0a4f846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050600160a060020a038316808252600560209081526040808420879055805160e160020a6364a81ff102815290518694670de0b6b3a7640000949363c9503fe29360048181019492939183900301908290876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008660001916815260200190815260200160002060008282825054019250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000866000191681526020019081526020016000206000828282505401925050819055505b50505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f11561000257505060405151151590506107e957610002565b8381526004602052604081205460ff16151561056657610002565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f115610002575050604051511515905061085957610002565b849250670de0b6b3a764000083600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575060408051805160e160020a6364a81ff102825291519189028590049650600481810192602092909190829003018188876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b9391600481810192602092909190829003018189876161da5a03f115610002575050506040518051906020015002049050806006600050600085600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750604080518051855260208681528286208054989098039097557f2e94420f00000000000000000000000000000000000000000000000000000000815290518896600483810193919291829003018187876161da5a03f115610002575050604080515183526020939093525020805490910190555050505050565b60409020546101009004600160a060020a03166101f3565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610a5157610002565b60008181526004602052604090205460ff161515610a6e57610002565b6040600020805474ffffffffffffffffffffffffffffffffffffffffff1916905550565b600254600160a060020a0316ff5b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610af657610002565b60008281526004602052604090205460ff161515610b1357610002565b670de0b6b3a7640000811115610b2857610002565b600960205260406000205550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f1156100025750506040515115159050610b8b57610002565b600160a060020a038416815260056020908152604080832054808452600490925282205490935060ff161515610bc057610002565b600460005060008460001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663b9caebf4856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506005600050600085600160a060020a0316815260200190815260200160002060005060009055839050600082600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519190911115905061078e57670de0b6b3a764000081600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008560001916815260200190815260200160002060008282825054039250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000856000191681526020019081526020016000206000828282505403925050819055505050505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x3571d73f14f31a1463bd0a2f92f7fde1653d4e1ead7aedf4b0a5df02f16092ab": "0x0000000000000000000000000000000000000000000007d634e4c55188be0000", + "0x4e64fe2d1b72d95a0a31945cc6e4f4e524ac5ad56d6bd44a85ec7bc9cc0462c0": "0x000000000000000000000000000000000000000000000002b5e3af16b1880000" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117124093", + "extraData": "0xd5830105008650617269747986312e31322e31826d61", + "gasLimit": "4707788", + "hash": "0xad325e4c49145fb7a4058a68ac741cc8607a71114e23fc88083c7e881dd653e7", + "miner": "0x00714b9ac97fd6bd9325a059a70c9b9fa94ce050", + "mixHash": "0x0af918f65cb4af04b608fc1f14a849707696986a0e7049e97ef3981808bcc65f", + "nonce": "0x38dee147326a8d40", + "number": "25000", + "stateRoot": "0xc5d6bbcd46236fcdcc80b332ffaaa5476b980b01608f9708408cfef01b58bd5b", + "timestamp": "1479891517", + "totalDifficulty": "1895410389427" + }, + "input": "0xf88b8206628504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb80000000000000000000000000000000000000000000000280faf689c35ac00002aa0a7ee5b7877811bf671d121b40569462e722657044808dc1d6c4f1e4233ec145ba0417e7543d52b65738d9df419cbe40a708424f4d54b0fc145c0a64545a2bb1065", + "result": [ + { + "action": { + "callType": "call", + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x3d090", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 25001, + "result": { + "gasUsed": "0x1810b", + "output": "0x" + }, + "subtraces": 2, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x31217", + "input": "0xe16c7d98636f6e7472616374617069000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f" + }, + "subtraces": 0, + "traceAddress": [0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x30b4a", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "to": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0xedb7", + "output": "0x" + }, + "subtraces": 4, + "traceAddress": [1], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x2a68d", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690" + }, + "subtraces": 0, + "traceAddress": [1, 0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x29f35", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0xf8d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 2, + "traceAddress": [1, 1], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23ac9", + "input": "0xe16c7d98636f6e7472616374646200000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113" + }, + "subtraces": 0, + "traceAddress": [1, 1, 0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23366", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "to": "0x7986bad81f4cbd9317f5a46861437dae58d69113", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x273", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [1, 1, 1], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x28a9e", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690" + }, + "subtraces": 0, + "traceAddress": [1, 2], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x283b9", + "input": "0x949ae479000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0xc51c", + "output": "0x" + }, + "subtraces": 12, + "traceAddress": [1, 3], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x21d79", + "input": "0x13bc6d4b000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x24d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [1, 3, 0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x2165b", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf" + }, + "subtraces": 0, + "traceAddress": [1, 3, 1], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x20ee1", + "input": "0x581d5d60000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x5374", + "output": "0x" + }, + "subtraces": 6, + "traceAddress": [1, 3, 2], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a8e8", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x24d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a2c6", + "input": "0xc9503fe2", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x3cb", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 1], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19b72", + "input": "0xc9503fe2", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x3cb", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 2], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19428", + "input": "0x6f265b93", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x305", + "output": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 3], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x18d45", + "input": "0x2e94420f", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x229", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 4], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1734e", + "input": "0x2e94420f", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x229", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 2, 5], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1b6c1", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38" + }, + "subtraces": 0, + "traceAddress": [1, 3, 3], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1af69", + "input": "0x2e94420f", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x229", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 4], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1a91d", + "input": "0x0accce0600000000000000000000000000000000000000000000000000000000000000025842545553440000000000000000000000000000000000000000000000000000000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x12fa", + "output": "0x" + }, + "subtraces": 1, + "traceAddress": [1, 3, 5], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x143a5", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x24d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [1, 3, 5, 0], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x19177", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38" + }, + "subtraces": 0, + "traceAddress": [1, 3, 6], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18a22", + "input": "0x2e94420f", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x229", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 7], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18341", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x334", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf" + }, + "subtraces": 0, + "traceAddress": [1, 3, 8], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x17bec", + "input": "0x2e94420f", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x229", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 9], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1764e", + "input": "0xf92eb7745842545553440000000000000000000000000000000000000000000000000000", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x45c", + "output": "0x00000000000000000000000000000000000000000000002816d180e30c390000" + }, + "subtraces": 0, + "traceAddress": [1, 3, 10], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x16e62", + "input": "0x645a3b72584254555344000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002816d180e30c390000", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0xebb", + "output": "0x" + }, + "subtraces": 1, + "traceAddress": [1, 3, 11], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x108ba", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x24d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [1, 3, 11, 0], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json new file mode 100644 index 00000000..e5a37cbf --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall.json @@ -0,0 +1,120 @@ +{ + "context": { + "difficulty": "31927752", + "gasLimit": "4707788", + "miner": "0x5659922ce141eedbc2733678f9806c77b4eebee8", + "number": "11495", + "timestamp": "1479735917" + }, + "genesis": { + "alloc": { + "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a60003504630a0313a981146100875780630a3b0a4f146101095780630cd40fea1461021257806329092d0e1461021f5780634cd06a5f146103295780635dbe47e8146103395780637a9e5410146103d9578063825db5f7146103e6578063a820b44d146103f3578063efa52fb31461047a575b610002565b34610002576104fc600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a26333556e849091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f415610002575050604051519150505b919050565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f21ce24d4000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926321ce24d49260448082019391829003018186803b156100025760325a03f415610002575050505b50565b3461000257610512600181565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f89489a87000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926389489a879260448082019391829003018186803b156100025760325a03f4156100025750505061020f565b3461000257610528600435610403565b34610002576104fc600435604080516000602091820181905282517f7d65837a00000000000000000000000000000000000000000000000000000000815260048101829052600160a060020a0385166024820152925190927342b02b5deeb78f34cd5ac896473b63e6c99a71a292637d65837a92604480840193829003018186803b156100025760325a03f4156100025750506040515191506101049050565b3461000257610512600c81565b3461000257610512600081565b3461000257610528600061055660005b600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263685a1f3c9091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b346100025761053a600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263f775b6b59091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b604080519115158252519081900360200190f35b005b6040805160ff9092168252519081900360200190f35b60408051918252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b90509056", + "nonce": "1", + "storage": { + "0x4d140b25abf3c71052885c66f73ce07cff141c1afabffdaf5cba04d625b7ebcc": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + }, + "0x269296dddce321a6bcbaa2f0181127593d732cba": { + "balance": "0x0", + "code": "0x606060405236156101275760e060020a60003504630cd40fea811461012c578063173825d9146101395780631849cb5a146101c7578063285791371461030f5780632a58b3301461033f5780632cb0d48a146103565780632f54bf6e1461036a578063332b9f061461039d5780633ca8b002146103c55780633df4ddf4146103d557806341c0e1b5146103f457806347799da81461040557806362a51eee1461042457806366907d13146104575780637065cb48146104825780637a9e541014610496578063825db5f7146104a3578063949d225d146104b0578063a51687df146104c7578063b4da4e37146104e6578063b4e6850b146104ff578063bd7474ca14610541578063e75623d814610541578063e9938e1114610555578063f5d241d314610643575b610002565b3461000257610682600181565b34610002576106986004356106ff335b60006001600a9054906101000a9004600160a060020a0316600160a060020a0316635dbe47e8836000604051602001526040518260e060020a0281526004018082600160a060020a03168152602001915050602060405180830381600087803b156100025760325a03f1156100025750506040515191506103989050565b3461000257604080516101008082018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a0360043581168752600586529589902089519788018a528054808816808a52605060020a91829004600160a060020a0316978a01889052600183015463ffffffff8082169d8c018e905264010000000082048116988c01899052604060020a90910416958a018690526002830154948a01859052600390920154808916938a01849052049096169690970186905293969495949293604080516001605060020a03998a16815297891660208901529590971686860152600160a060020a03909316606086015263ffffffff9182166080860152811660a08501521660c083015260e08201929092529051908190036101000190f35b346100025761069a60043560018054600091829160ff60f060020a909104161515141561063d5761072833610376565b34610002576106ae6004546001605060020a031681565b34610002576106986004356108b333610149565b346100025761069a6004355b600160a060020a03811660009081526002602052604090205460ff1615156001145b919050565b34610002576106986001805460ff60f060020a9091041615151415610913576108ed33610376565b346100025761069a600435610149565b34610002576106ae6003546001605060020a03605060020a9091041681565b346100025761069861091533610149565b34610002576106ae6003546001605060020a0360a060020a9091041681565b346100025761069a60043560243560018054600091829160ff60f060020a909104161515141561095e5761092633610376565b34610002576106986004356001805460ff60f060020a909104161515141561072557610a8b33610376565b3461000257610698600435610aa533610149565b3461000257610682600c81565b3461000257610682600081565b34610002576106ae6003546001605060020a031681565b34610002576106ca600154600160a060020a03605060020a9091041681565b346100025761069a60015460ff60f060020a9091041681565b346100025761069a60043560243560443560643560843560a43560c43560018054600091829160ff60f060020a9091041615151415610b5857610ad233610376565b3461000257610698600435610bd633610149565b34610002576106e6600435604080516101008181018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a03808b168752600586529589902089519788018a5280548088168952600160a060020a03605060020a918290041696890196909652600181015463ffffffff8082169b8a019b909b5264010000000081048b1695890195909552604060020a90940490981691860182905260028301549086015260039091015480841696850196909652940416918101919091525b50919050565b346100025761069a60043560243560443560643560843560a43560018054600091829160ff60f060020a9091041615151415610c8e57610bfb33610376565b6040805160ff9092168252519081900360200190f35b005b604080519115158252519081900360200190f35b604080516001605060020a039092168252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b6040805163ffffffff9092168252519081900360200190f35b1561012757600160a060020a0381166000908152600260205260409020805460ff191690555b50565b1561063d57506001605060020a0380831660009081526005602052604090208054909116151561075b576000915061063d565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610817905b8051600354600090819060016001605060020a0390911611610c995760038054605060020a60f060020a0319169055610ddf565b600380546001605060020a031981166000196001605060020a03928316011782558416600090815260056020526040812080547fffff000000000000000000000000000000000000000000000000000000000000168155600181810180546bffffffffffffffffffffffff191690556002820192909255909101805473ffffffffffffffffffffffffffffffffffffffff19169055915061063d565b1561012757600180547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1660f060020a8302179055610725565b1561091357600480546001605060020a031981166001605060020a039091166001011790555b565b156101275733600160a060020a0316ff5b1561095e57506001605060020a03808416600090815260056020526040902080549091161515610965576000915061095e565b600191505b5092915050565b60038101546001605060020a0384811691161415610986576001915061095e565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610a12906107e3565b61095983825b80546003546001605060020a0391821691600091161515610de55760038054605060020a60a060020a031916605060020a84021760a060020a69ffffffffffffffffffff02191660a060020a84021781558301805473ffffffffffffffffffffffffffffffffffffffff19169055610ddf565b1561072557600480546001605060020a0319168217905550565b1561012757600160a060020a0381166000908152600260205260409020805460ff19166001179055610725565b15610b5857506001605060020a038088166000908152600560205260409020805490911615610b645760009150610b58565b6004546001605060020a0390811690891610610b3057600480546001605060020a03191660018a011790555b6003805460016001605060020a03821681016001605060020a03199092169190911790915591505b50979650505050505050565b80546001605060020a0319168817605060020a60f060020a031916605060020a880217815560018101805463ffffffff1916871767ffffffff0000000019166401000000008702176bffffffff00000000000000001916604060020a860217905560028101839055610b048982610a18565b156101275760018054605060020a60f060020a031916605060020a8302179055610725565b15610c8e57506001605060020a03808816600090815260056020526040902080549091161515610c2e5760009150610c8e565b8054605060020a60f060020a031916605060020a88021781556001808201805463ffffffff1916881767ffffffff0000000019166401000000008802176bffffffff00000000000000001916604060020a87021790556002820184905591505b509695505050505050565b6003546001605060020a03848116605060020a909204161415610d095760e084015160038054605060020a928302605060020a60a060020a031990911617808255919091046001605060020a031660009081526005602052604090200180546001605060020a0319169055610ddf565b6003546001605060020a0384811660a060020a909204161415610d825760c08401516003805460a060020a92830260a060020a69ffffffffffffffffffff021990911617808255919091046001605060020a03166000908152600560205260409020018054605060020a60a060020a0319169055610ddf565b505060c082015160e08301516001605060020a0380831660009081526005602052604080822060039081018054605060020a60a060020a031916605060020a8702179055928416825290200180546001605060020a031916831790555b50505050565b6001605060020a0384161515610e6457600380546001605060020a03605060020a9182900481166000908152600560205260409020830180546001605060020a0319908116871790915583548785018054918590049093168402605060020a60a060020a03199182161790911690915582549185029116179055610ddf565b506001605060020a038381166000908152600560205260409020600390810180549185018054605060020a60a060020a0319908116605060020a94859004909516808502959095176001605060020a0319168817909155815416918402919091179055801515610ef4576003805460a060020a69ffffffffffffffffffff02191660a060020a8402179055610ddf565b6003808401546001605060020a03605060020a9091041660009081526005602052604090200180546001605060020a031916831790555050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000113204f5d64c28326fd7bd05fd4ea855302d7f2ff00000000000000000000" + } + }, + "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2": { + "balance": "0x0", + "code": "0x6504032353da7150606060405236156100695760e060020a60003504631bf7509d811461006e57806321ce24d41461008157806333556e84146100ec578063685a1f3c146101035780637d65837a1461011757806389489a8714610140578063f775b6b5146101fc575b610007565b61023460043560006100fd82600061010d565b610246600435602435600160a060020a03811660009081526020839052604081205415156102cb57826001016000508054806001018281815481835581811511610278576000838152602090206102789181019083015b808211156102d057600081556001016100d8565b610248600435602435600182015481105b92915050565b6102346004356024355b60018101906100fd565b610248600435602435600160a060020a03811660009081526020839052604090205415156100fd565b61024660043560243580600160a060020a031632600160a060020a03161415156101f857600160a060020a038116600090815260208390526040902054156101f857600160a060020a038116600090815260208390526040902054600183018054909160001901908110156100075760009182526020808320909101805473ffffffffffffffffffffffffffffffffffffffff19169055600160a060020a038316825283905260408120556002820180546000190190555b5050565b61025c60043560243560008260010160005082815481101561000757600091825260209091200154600160a060020a03169392505050565b60408051918252519081900360200190f35b005b604080519115158252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b50505060009283526020808420909201805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a0385168352908590526040909120819055600284018054600101905590505b505050565b509056", + "nonce": "1", + "storage": {} + }, + "0xa529806c67cc6486d4d62024471772f47f6fd672": { + "balance": "0x67820e39ac8fe9800", + "code": "0x", + "nonce": "68", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "31912170", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0x0855914bdc581bccdc62591fd438498386ffb59ea4d5361ed5c3702e26e2c72f", + "miner": "0x334391aa808257952a462d1475562ee2106a6c90", + "mixHash": "0x64bb70b8ca883cadb8fbbda2c70a861612407864089ed87b98e5de20acceada6", + "nonce": "0x684129f283aaef18", + "number": "11494", + "stateRoot": "0x7057f31fe3dab1d620771adad35224aae43eb70e94861208bc84c557ff5b9d10", + "timestamp": "1479735912", + "totalDifficulty": "90744064339" + }, + "input": "0xf889448504a817c800832dc6c094269296dddce321a6bcbaa2f0181127593d732cba80a47065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e29a080ed81e4c5e9971a730efab4885566e2c868cd80bd4166d0ed8c287fdf181650a069d7c49215e3d4416ad239cd09dbb71b9f04c16b33b385d14f40b618a7a65115", + "result": [ + { + "action": { + "callType": "call", + "from": "0xa529806c67cc6486d4d62024471772f47f6fd672", + "gas": "0x2dc6c0", + "input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e", + "to": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "value": "0x0" + }, + "blockNumber": 11495, + "result": { + "gasUsed": "0xbd55", + "output": "0x" + }, + "subtraces": 1, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "gas": "0x2cae73", + "input": "0x5dbe47e8000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "to": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0xa9d", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 1, + "traceAddress": [0], + "type": "call" + }, + { + "action": { + "callType": "delegatecall", + "from": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "gas": "0x2bf459", + "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", + "value": "0x0" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x2aa", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [0, 0], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json new file mode 100644 index 00000000..17791242 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/delegatecall_parent_value.json @@ -0,0 +1,103 @@ +{ + "genesis": { + "number": "566098", + "hash": "0xba134562590a59291892395a29c5088899c2c64d720135dad88f7f076cf55f5f", + "nonce": "0x4b281be9594e3eb3", + "mixHash": "0xdb4ec386166d9c0dc9ba147755ecbb87af9f0a22563cbda02c799efa4e29db6e", + "stateRoot": "0xfc01993ad96a8fb8790a093cea4f505f8db1b0e1143c5f57bb1d173db0baa9e3", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "difficulty": "1926740", + "totalDifficulty": "482216286599", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "19388354", + "timestamp": "1577558314", + "alloc": { + "0x6ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0xcbd5b9b25d1c38c2aad", + "nonce": "134969", + "code": "0x", + "storage": {} + }, + "0x91765918420bcb5ad22ee0997abed04056705798": { + "balance": "0x0", + "nonce": "1", + "code": "0x366000803760206000366000736ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc60325a03f41560015760206000f3", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "566099", + "difficulty": "1927680", + "timestamp": "1577558317", + "gasLimit": "19369422", + "miner": "0x774c398d763161f55b66a646f17edda4addad2ca" + }, + "input": "0xf87983020f3985746a52880083015f909491765918420bcb5ad22ee0997abed04056705798888ac7230489e80000884e45375a4741394181a1a04b7260723fd02830754916b3bdf1537b6a851a7ae27c7e9296cfe1fc8275ec08a049d32158988eb717d61b4503b27c7583037c067daba1eb56f4bdfafc1b0045f6", + "result": [ + { + "action": { + "callType": "call", + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "gas": "0x15f90", + "input": "0x4e45375a47413941", + "to": "0x91765918420bcb5ad22ee0997abed04056705798", + "value": "0x8ac7230489e80000" + }, + "blockHash": "0xb05cc5c8f11df2b5d53ced342ee79e2805785f04c2f40add4539f27bd349f74e", + "blockNumber": 566099, + "result": { + "gasUsed": "0x5721", + "output": "0x4e45375a47413941000000000000000000000000000000000000000000000000" + }, + "subtraces": 1, + "traceAddress": [], + "transactionHash": "0x6e26dffe2f66186f03a2c36a16a4cd9724d07622c83746f1e35f988515713d4b", + "transactionPosition": 10, + "type": "call" + }, + { + "action": { + "callType": "delegatecall", + "from": "0x91765918420bcb5ad22ee0997abed04056705798", + "gas": "0x10463", + "input": "0x4e45375a47413941", + "to": "0x6ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc", + "value": "0x8ac7230489e80000" + }, + "blockHash": "0xb05cc5c8f11df2b5d53ced342ee79e2805785f04c2f40add4539f27bd349f74e", + "blockNumber": 566099, + "result": { + "gasUsed": "0x0", + "output": "0x" + }, + "subtraces": 0, + "traceAddress": [ + 0 + ], + "transactionHash": "0x6e26dffe2f66186f03a2c36a16a4cd9724d07622c83746f1e35f988515713d4b", + "transactionPosition": 10, + "type": "call" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json new file mode 100644 index 00000000..d977dbe3 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/gas.json @@ -0,0 +1,95 @@ +{ + "genesis": { + "difficulty": "4683014", + "extraData": "0x537465762d63676574682d76312e31312e34", + "gasLimit": "9435044", + "hash": "0x3452ca5005cb73cd60dfa488a7b124251168e564491f80eb66765e79d78cfd95", + "miner": "0x415aa6292d1db797a467b22139704956c030e62f", + "mixHash": "0x6037612618507ae70c74a72bc2580253662971db959cfbc06d3f8527d4d01575", + "nonce": "0x314fc90dee5e39a2", + "number": "1555274", + "stateRoot": "0x795751f3f96a5de1fd3944ddd78cbfe4ef10491e1086be47609869a30929d0e5", + "timestamp": "1590795228", + "totalDifficulty": "2242595605834", + "alloc": { + "0x0000000000000000000000000000000000000001": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x6242e3ccf48e66425fb1", + "nonce": "264882", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555275", + "difficulty": "4683014", + "timestamp": "1590795244", + "gasLimit": "9444256", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf9011583040ab2843b9aca008301a9c88080b8c0601b565b6000555b005b630badf00d6003565b63c001f00d6003565b7319e7e376e7c213b7e7e7e46cc70a5dd086daff2a7f22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda600052601b603f537f16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f6040527f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e40060605260206080607f60006000600161fffff2156007576080511460125760095681a1a07682fc43dbe1fb13c6474f5e70e121c826dd996168d8bb1d8ca7a63470127b46a00a25b308ba417b7770899e8f98a3f0c14aa9bf7db0edacfe4e78d00dbbd3c31e", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x1a9c8", + "init": "0x601b565b6000555b005b630badf00d6003565b63c001f00d6003565b7319e7e376e7c213b7e7e7e46cc70a5dd086daff2a7f22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda600052601b603f537f16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f6040527f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e40060605260206080607f60006000600161fffff21560075760805114601257600956" + }, + "result": { + "gasUsed": "0x137e5", + "code": "0x", + "address": "0x1a05d76017ca02010533a470e05e8925a0380d8f" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 18, + "transactionHash": "0xc1c42a325856d513523aec464811923b2e2926f54015c7ba37877064cf889803", + "blockNumber": 1555275, + "blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd", + "time": "453.925µs" + }, + { + "type": "call", + "action": { + "from": "0x1a05d76017ca02010533a470e05e8925a0380d8f", + "to": "0x0000000000000000000000000000000000000001", + "value": "0x0", + "gas": "0xc8c6", + "input": "0x22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda000000000000000000000000000000000000000000000000000000000000001b16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e4", + "callType": "callcode" + }, + "result": { + "gasUsed": "0xbb8", + "output": "0x00000000000000000000000019e7e376e7c213b7e7e7e46cc70a5dd086daff2a" + }, + "traceAddress": [0], + "subtraces": 0, + "transactionPosition": 18, + "transactionHash": "0xc1c42a325856d513523aec464811923b2e2926f54015c7ba37877064cf889803", + "blockNumber": 1555275, + "blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json new file mode 100644 index 00000000..0f28c07a --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/include_precompiled.json @@ -0,0 +1,832 @@ +{ + "genesis": { + "number": "559197", + "hash": "0x0742a2bfab0452e2c634f3685b7e49ceb065c7000609b2b73f086e01fd1dfb58", + "nonce": "0x3060ad521440e1c2", + "mixHash": "0x59e7d4ae6cc3c38d23dac3f869b21984c7ba8f38070f4116a4941d9c403b6299", + "stateRoot": "0x68418fb5cf4afa9b807dc079e8cdde0e148ac2c8afb378e675465b5bed1fbd02", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "difficulty": "1813945", + "totalDifficulty": "469107641961", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "6321166", + "timestamp": "1577471202", + "alloc": { + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0xc5e6fdae52af83f7e28", + "nonce": "77947", + "code": "0x", + "storage": {} + }, + "0x774c398d763161f55b66a646f17edda4addad2ca": { + "balance": "0xf09ef316eff819ee488", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc": { + "balance": "0x0", + "nonce": "1", + "code": "0x60006121df537c01000000000000000000000000000000000000000000000000000000006000350463b041b2858114156100d257600435604052780100000000000000000000000000000000000000000000000060606060599059016000905260038152604051816020015260008160400152809050205404606052606051151561008f57600060a052602060a0f35b604051601c604459905901600090520163e0e9a17b601c82035260605160048201526020610100602483600030602d5a03f1506101005190501460c052602060c0f35b632cce81aa81141561019957600435610120526001610120511280156100f85780610143565b78010000000000000000000000000000000000000000000000006060606059905901600090526003815266040000000000025481602001526000816040015280905020540461012051135b905015610157576000610180526020610180f35b601c604459905901600090520163e0e9a17b601c82035261012051600482015260206101c0602483600030602d5a03f1506101c05190506101a05260206101a0f35b63e0e9a17b8114156102e957600435610120526604000000000002546101e0526007610200525b610120517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540413156102da575b6102005160050a610120517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e051816020015260008160400152809050205404031215610269576000610200511361026c565b60005b1561028157600161020051036102005261020b565b7c01000000000000000000000000000000000000000000000000000000006102005160200260020a606060605990590160009052600381526101e05181602001526001816040015280905020540204546101e0526101c0565b6101e051610280526020610280f35b63cef887b08114156103e757365990590160009052366004823760043560208201016102c0526024356102e052506060601c61014c5990590160009052016390fa337d601c8203526102c0516020601f602083035101046020026020018360048401526020820360648401528060c8840152808401935050506102e051602482015233604482015281600401599059016000905260648160648460006004601cf161039057fe5b60648101925060c882015180808582606487015160006004600a8705601201f16103b657fe5b5080840193505080830360206103a08284600030602d5a03f1506103a0519050905090509050610300526020610300f35b6390fa337d81141561065f57365990590160009052366004823760043560208201016102c0526024356102e0526044356103e052505a610400526020601c608c599059016000905201632b861629601c8203526102c0516020601f6020830351010460200260200183600484015260208203602484015280604884015280840193505050816004015990590160009052602481602484600060046015f161048a57fe5b602481019250604882015180808582602487015160006004600a8705601201f16104b057fe5b5080840193505080830360206104408284600030602d5a03f15061044051905090509050905061042052610420511561065e576102c05160208103516020599059016000905260208183856000600287604801f150805190509050905061046052602059905901600090526020816020610460600060026068f1508051905060005b6020811215610552578181601f031a816105400153600181019050610532565b5050610540516101e0526102e0516c010000000000000000000000006103e0510217606060605990590160009052600381526101e05181602001526003816040015280905020555a61058052700100000000000000000000000000000000660400000000000154046105a0526104006105a0516103ff02056105c0526104006105a05161040102056105e0526105c0513a12156105f6576105c05161060052610615565b6105e0513a131561060e576105e05161060052610614565b3a610600525b5b6105805161040051036106005160020202610620526106205170010000000000000000000000000000000061060051021766040000000000015561042051610640526020610640f35b5b63d467ae0381141561073257600435604052602435610660526106605134121515610725576000341315610718576c01000000000000000000000000606060605990590160009052600381526040518160200152600381604001528090502054046103e0526000600060006000346103e051611388f115156106dd57fe5b601c60405990590160009052013481526103e0517f15e746bf513b8a58e4265cc1162d7fc445da5c9b1928d7cfcde2582735d4677f602083a2505b60016106a05260206106a0f35b60006106c05260206106c0f35b63ea4971ee811415610851576004356101e0526024356102e0526044356103e052601c606459905901600090520163d467ae03601c8203526101e05160048201526604000000000001546fffffffffffffffffffffffffffffffff16602482015260206106e060448334306123555a03f1506106e051905015156107bd576000610700526020610700f35b606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff166102e0511215610844576102e0516c010000000000000000000000006103e0510217606060605990590160009052600381526101e05181602001526003816040015280905020556001610760526020610760f35b6000610780526020610780f35b6387def0818114156108a3576004356101e0526c01000000000000000000000000606060605990590160009052600381526101e0518160200152600381604001528090502054046107a05260206107a0f35b630aece23c8114156108f4576004356101e052606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff166107e05260206107e0f35b63fa14df6b811415610926576604000000000001546fffffffffffffffffffffffffffffffff16610820526020610820f35b63b8c48f8c811415610b1b576004356101e0526024356108405260443561086052600066040000000000035414151561096a576000610880526020610880f3610976565b60016604000000000003555b6101e051660400000000000255606060605990590160009052600381526101e05181602001526000816040015280905020546108a0526108a0610840518060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a600783015350506108a051606060605990590160009052600381526101e0518160200152600081604001528090502055606060605990590160009052600381526101e051816020015260008160400152809050205461094052601061094001610860518060101a82538060111a60018301538060121a60028301538060131a60038301538060141a60048301538060151a60058301538060161a60068301538060171a60078301538060181a60088301538060191a600983015380601a1a600a83015380601b1a600b83015380601c1a600c83015380601d1a600d83015380601e1a600e83015380601f1a600f830153505061094051606060605990590160009052600381526101e051816020015260008160400152809050205560016109e05260206109e0f35b632b86162981141561179457365990590160009052366004823760043560208201016102c0525060483560005b6020811215610b68578181601f031a81610a600153600181019050610b48565b5050610a6051610a00526102c05160208103516020599059016000905260208183856000600287604801f1508051905090509050610a8052602059905901600090526020816020610a80600060026068f1508051905060005b6020811215610be1578181601f031a81610b600153600181019050610bc1565b5050610b60516101e05270010000000000000000000000000000000070010000000000000000000000000000000060606060599059016000905260038152610a005181602001526000816040015280905020540204610b8052610b80511515610c8b57601c602059905901600090520161272e6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610bc0526020610bc0f35b700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204610be0526000610be051141515610d2e57601c60205990590160009052016127386101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610c20526020610c20f35b608c35610c40526301000000610c405160031a0262010000610c405160021a02610100610c405160011a02610c405160001a010101610c60526301000000610c605104610ca05262ffffff610c605116610cc0526003610ca051036101000a610cc05102610c805260006101e0511315610db057610c80516101e05112610db3565b60005b1561174d57780100000000000000000000000000000000000000000000000060606060599059016000905260038152610a00518160200152600081604001528090502054046001016101205260806080599059016000905260038152610a005181602001526002816040015260008160600152809050206002810154610d405250610d405160081a610d405160091a61010002610d4051600a1a6201000002610d4051600b1a630100000002010101610d005260006107e0610120510614158015610e7e5780610e8b565b6001660400000000000054145b905015610f0257610d0051610c6051141515610eae576000610d00511415610eb1565b60005b15610efd57601c602059905901600090520161271a6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610da0526020610da0f35b6111b4565b6301000000610d005104610de05262ffffff610d005116610e00526003610de051036101000a610e005102610dc05260806080599059016000905260038152610a005181602001526002816040015260008160600152809050206002810154610e605250610e605160041a610e605160051a61010002610e605160061a6201000002610e605160071a630100000002010101610e2052601c604459905901600090520163e0e9a17b601c8203526107e0610120510360048201526020610ec0602483600030602d5a03f150610ec0519050610ea05260806080599059016000905260038152610ea05181602001526002816040015260008160600152809050206002810154610f205250610f205160041a610f205160051a61010002610f205160061a6201000002610f205160071a630100000002010101610ee052610ee051610e20510362049d408112156110595762049d4090505b6249d40081131561106b576249d40090505b62127500610dc0518202047bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8113156110ba577bffffffffffffffffffffffffffffffffffffffffffffffffffffffff90505b600860076000835b80156110d9576002810490506001820191506110c2565b5080905001046000600382131515611103578160030360080260020a62ffffff841602905061111a565b6003820360080260020a8304905062ffffff811690505b6280000081161561113357610100810490506001820191505b6301000000820281179050905090509050610f6052610f6051610c6051141515611164576000610f60511415611167565b60005b156111b357601c60205990590160009052016127246101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000611040526020611040f35b5b6101e0516101e0516101e05166040000000000005455606060605990590160009052600381526101e0518160200152600081604001528090502054611060526008611060016604000000000000548060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a6007830153505061106051606060605990590160009052600381526101e0518160200152600081604001528090502055600166040000000000005401660400000000000055606060605990590160009052600381526101e0518160200152600081604001528090502054611100526111006001780100000000000000000000000000000000000000000000000060606060599059016000905260038152610a0051816020015260008160400152809050205404018060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a6007830153505061110051606060605990590160009052600381526101e051816020015260008160400152809050205560006111c05278010000000000000000000000000000000000000000000000006801000000000000000060606060599059016000905260038152610a0051816020015260008160400152809050205402046111e0526111c06111e05180601c1a825380601d1a600183015380601e1a600283015380601f1a600383015350506001611260525b6008611260511215611515576112605160050a611280526001611280517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540407141561148757611260516004026111c0016111e05180601c1a825380601d1a600183015380601e1a600283015380601f1a60038301535050611505565b611260516004026111c0017c01000000000000000000000000000000000000000000000000000000006112605160200260020a60606060599059016000905260038152610a00518160200152600181604001528090502054020480601c1a825380601d1a600183015380601e1a600283015380601f1a600383015350505b60016112605101611260526113ec565b6111c051606060605990590160009052600381526101e05181602001526001816040015280905020555050608060805990590160009052600381526101e051816020015260028160400152600081606001528090502060005b600281121561159057806020026102c05101518282015560018101905061156e565b700100000000000000000000000000000000600003816020026102c051015116828201555050610c80517bffff0000000000000000000000000000000000000000000000000000056113e0526113e051610b805101610be052606060605990590160009052600381526101e051816020015260008160400152809050205461140052601061140001610be0518060101a82538060111a60018301538060121a60028301538060131a60038301538060141a60048301538060151a60058301538060161a60068301538060171a60078301538060181a60088301538060191a600983015380601a1a600a83015380601b1a600b83015380601c1a600c83015380601d1a600d83015380601e1a600e83015380601f1a600f830153505061140051606060605990590160009052600381526101e0518160200152600081604001528090502055660400000000000354610be051121515611703576101e051660400000000000255610be0516604000000000003555b601c6020599059016000905201610120516101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a350610120516114a05260206114a0f35b601c602059905901600090520161276a6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a35060006114c05260206114c0f35b630f5995ce8114156119a157365990590160009052366004823760043560208201016114e05260243561150052604435602082010161152052606435604052506114e05160208103516020599059016000905260208183856000600287604801f150805190509050905061156052602059905901600090526020816020611560600060026068f1508051905060005b6020811215611843578181601f031a816116400153600181019050611823565b50506116405161154052604060206114e051035114156118a457601c6020599059016000905201614e52611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a3506000611660526020611660f35b6080601c6101ac59905901600090520163bd136cb3601c8203526115405160048201526115005160248201526115205160208103516020026020018360448401526020820360c48401528061014884015280840193505050604051606482015281600401599059016000905260848160848460006004601ff161192357fe5b6084810192506101488201518080858260c487015160006004600a8705601201f161194a57fe5b508084019350508083036020611680828434306123555a03f15061168051905090509050905061042052600161042051141561199357611540516116a05260206116a0f36119a0565b60006116c05260206116c0f35b5b63bd136cb3811415611d8c573659905901600090523660048237600435611540526024356115005260443560208201016115205260643560405250601c606459905901600090520163d467ae03601c82035260405160048201526060606059905901600090526003815260405181602001526003816040015280905020546bffffffffffffffffffffffff166024820152602061170060448334306123555a03f1506117005190501515611a9757601c6020599059016000905201614e2a611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e2a611720526020611720f35b601c6044599059016000905201633d73b705601c82035260405160048201526020611740602483600030602d5a03f15061174051905015611b1a57601c6020599059016000905201614e34611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e34611760526020611760f35b601c604459905901600090520163b041b285601c82035260405160048201526020611780602483600030602d5a03f1506117805190501515611b9e57601c6020599059016000905201614e3e611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e3e6117a05260206117a0f35b6060601c61014c59905901600090520163b7129afb601c8203526115405160048201526115005160248201526115205160208103516020026020018360448401526020820360a4840152806101088401528084019350505081600401599059016000905260648160648460006004601cf1611c1557fe5b6064810192506101088201518080858260a487015160006004600a8705601201f1611c3c57fe5b5080840193505080830360206117e08284600030602d5a03f1506117e05190509050905090506117c0526080608059905901600090526003815260405181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006002820154046401000000006001830154020160005b6020811215611ce4578181601f031a816118a00153600181019050611cc4565b50506118a051905061180052611800516117c0511415611d4457601c60205990590160009052016001611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a35060016118c05260206118c0f35b601c6020599059016000905201614e48611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e486118e05260206118e0f35b63318a3fee81141561205657365990590160009052366004823760043560208201016114e0526024356115005260443560208201016115205260643560405260843561190052506080601c6101ac599059016000905201630f5995ce601c8203526114e0516020601f6020830351010460200260200183600484015260208203608484015280610108840152808401935050506115005160248201526115205160208103516020026020018360448401526020820360c48401528061014884015280840193505050604051606482015281600401599059016000905260848160848460006004601ff1611e7b57fe5b60848101925061010882015180808582608487015160006004600a8705601201f1611ea257fe5b508084019350506101488201518080858260c487015160006004600a8705601201f1611eca57fe5b508084019350508083036020611920828434306123555a03f15061192051905090509050905061154052600061154051141515612010576040601c60ec599059016000905201631c0b6367601c8203526114e0516020601f6020830351010460200260200183600484015260208203604484015280608884015280840193505050611540516024820152816004015990590160009052604481604484600060046018f1611f7357fe5b604481019250608882015180808582604487015160006004600a8705601201f1611f9957fe5b5080840193505080830360206119608284600061190051602d5a03f15061196051905090509050905061194052601c602059905901600090520161194051611540517f2d0d11d0f27e21fab56a8712078721096066b7faaa8540a3ea566e70b97de2d4600084a35061194051611980526020611980f35b601c602059905901600090520161753a60007f2d0d11d0f27e21fab56a8712078721096066b7faaa8540a3ea566e70b97de2d4600084a35061753a6119a05260206119a0f35b6309dd0e81811415612076576604000000000002546119c05260206119c0f35b63023948728114156120d2577801000000000000000000000000000000000000000000000000606060605990590160009052600381526604000000000002548160200152600081604001528090502054046119e05260206119e0f35b632c181929811415612139577001000000000000000000000000000000007001000000000000000000000000000000006060606059905901600090526003815266040000000000025481602001526000816040015280905020540204611a20526020611a20f35b637ca823d58114156122af576604000000000002546101e052700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204611a60526000611260525b600a61126051121561224c57608060805990590160009052600381526101e05181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006001820154046401000000008254020160005b6020811215612230578181601f031a81611b200153600181019050612210565b5050611b205190506101e05260016112605101611260526121a8565b700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204611b4052611b4051611a605103611b80526020611b80f35b63b7129afb81141561246a57365990590160009052366004823760043561154052602435611500526044356020820101611520525061154051611ba0526020611520510351611bc0526000611260525b611bc05161126051121561245b5761126051602002611520510151611be05260026115005107611c00526001611c0051141561234a57611be051611c2052611ba051611c4052612368565b6000611c0051141561236757611ba051611c2052611be051611c40525b5b60405990590160009052611c205160005b6020811215612399578181601f031a81611ca00153600181019050612379565b5050611ca0518152611c405160005b60208112156123c8578181601f031a81611d2001536001810190506123a8565b5050611d2051602082015260205990590160009052602081604084600060026088f15080519050611d4052602059905901600090526020816020611d40600060026068f1508051905060005b6020811215612434578181601f031a81611de00153600181019050612414565b5050611de0519050611ba052600261150051056115005260016112605101611260526122ff565b611ba051611e00526020611e00f35b633d73b70581141561255b576004356040526604000000000002546101e0526000611260525b600661126051121561254e576101e05160405114156124b6576001611e20526020611e20f35b608060805990590160009052600381526101e05181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006001820154046401000000008254020160005b6020811215612532578181601f031a81611ec00153600181019050612512565b5050611ec05190506101e0526001611260510161126052612490565b6000611ee0526020611ee0f35b631f794436811415612737576004356101e052601c606459905901600090520163d467ae03601c8203526101e0516004820152606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff1660248201526020611f2060448334306123555a03f150611f20519050151561265657601c602059905901600090520160006101e0517f60ab231f060fa320acea170017564b7ee77f477e6465a8c964380cffb270aaf4600084a350602159905901600090526001815260006020820152602081019050602060408203526020601f6020830351604001010460200260408203f3505b601c602059905901600090520160016101e0517f60ab231f060fa320acea170017564b7ee77f477e6465a8c964380cffb270aaf4600084a350608060805990590160009052600381526101e0518160200152600281604001526000816060015280905020607059905901600090526050815260208101905060005b60028112156126f05780830154816020028301526001810190506126d1565b70010000000000000000000000000000000060000381840154168160200283015281905090509050602060408203526020601f6020830351604001010460200260408203f3505b6313f955e18114156128ca573659905901600090523660048237600435602082010161204052602435612060525060506120805260006120a052612080516120c0526000611260525b612060516112605112156128bb576120a051806120c051038080602001599059016000905281815260208101905090508180828286612040510160006004600a8705601201f16127cc57fe5b50809050905090506120e0526020601c608c599059016000905201632b861629601c8203526120e0516020601f6020830351010460200260200183600484015260208203602484015280604884015280840193505050816004015990590160009052602481602484600060046015f161284157fe5b602481019250604882015180808582602487015160006004600a8705601201f161286757fe5b5080840193505080830360206121a08284600030602d5a03f1506121a051905090509050905061042052612080516120a051016120a052612080516120c051016120c0526001611260510161126052612780565b610420516121c05260206121c0f35b50", + "storage": { + "0x292b7a8d467a95cffd303c7edd99875892cdb3eaee87e5ca29057dc88a09ffbd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4d2fcf8ac901ad7dcf5b1c3979801430d9979c87157230ae066a0276984c6ac7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xdf951a5d1d9283b06d4f1de58542f1e1e310d8d17aada46586ddb9598bc42894": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x9c8d09d387f3ba5dd4733e24c63e4d549864a7cd57a1bdf1fdd831a2a0184815": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4ab3b783bb170e11b0932a5ce8f5f343f67058b3925da271001a75ae498bd655": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x0000000000000000000000000000000000000004": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x0000000000000000000000000000000000000002": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "559198", + "difficulty": "1814830", + "timestamp": "1577471205", + "gasLimit": "6327338", + "miner": "0x774c398d763161f55b66a646f17edda4addad2ca" + }, + "tracerConfig": { + "includePrecompiles": true + }, + "input": "0xf9026f8301307b85746a52880083124f80946cc68eb482a757c690dd151d2bd5e774ada38bdc80b9020413f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e00000000000000000000000000000000081a1a01c9e9d742c8e69daba2a026ccafdde618f2e44c96db281c2209c22f183ad03a2a049a61d267d22226896d4c065525819c238784c439dc2afa7d17fce76595730d1", + "result": [ + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "gas": "0x124f80", + "input": "0x13f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1c6ff", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 20, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1a", + "input": "0x04000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x18", + "output": "0x04000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67" + }, + "subtraces": 0, + "traceAddress": [ + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x15", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x15", + "output": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020" + }, + "subtraces": 0, + "traceAddress": [ + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1e", + "input": "0x000000000000000000000000000000000000000000000000000000000000005004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a6700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1b", + "output": "0x000000000000000000000000000000000000000000000000000000000000005004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a6700000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 2 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x114243", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a6700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 2, + "traceAddress": [ + 3 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x98", + "input": "0x04000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x60", + "output": "0xb099ea4048830027371dc31039920ae4fd19a641a7cbe57c198edd19d60f158a" + }, + "subtraces": 0, + "traceAddress": [ + 3, + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x68", + "input": "0xb099ea4048830027371dc31039920ae4fd19a641a7cbe57c198edd19d60f158a", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x48", + "output": "0x5b53875b0f1381589859adcf938980f4a8fb0af4c88450070000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 3, + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1a", + "input": "0x040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae7", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x18", + "output": "0x040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae7" + }, + "subtraces": 0, + "traceAddress": [ + 4 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x15", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x15", + "output": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020" + }, + "subtraces": 0, + "traceAddress": [ + 5 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1e", + "input": "0x0000000000000000000000000000000000000000000000000000000000000050040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000050040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae700000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 6 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x110d3b", + "input": "0x2b86162900000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000050040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 2, + "traceAddress": [ + 7 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x98", + "input": "0x040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae7", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x60", + "output": "0xa0c6939b58a99b0d940f4435ab7db7d54d6b7786e68e00d9ff3890d69f95565d" + }, + "subtraces": 0, + "traceAddress": [ + 7, + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x68", + "input": "0xa0c6939b58a99b0d940f4435ab7db7d54d6b7786e68e00d9ff3890d69f95565d", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x48", + "output": "0xabbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 7, + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1a", + "input": "0x04000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc303", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x18", + "output": "0x04000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc303" + }, + "subtraces": 0, + "traceAddress": [ + 8 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x15", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x15", + "output": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020" + }, + "subtraces": 0, + "traceAddress": [ + 9 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1e", + "input": "0x000000000000000000000000000000000000000000000000000000000000005004000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30300000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1b", + "output": "0x000000000000000000000000000000000000000000000000000000000000005004000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30300000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 10 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x10d833", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30300000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 2, + "traceAddress": [ + 11 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x98", + "input": "0x04000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc303", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x60", + "output": "0x6defff59ba277fa4511f8675ca98ca7d9c237c7433684490cf1ce09a9249e32f" + }, + "subtraces": 0, + "traceAddress": [ + 11, + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x68", + "input": "0x6defff59ba277fa4511f8675ca98ca7d9c237c7433684490cf1ce09a9249e32f", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x48", + "output": "0xe93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 11, + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1a", + "input": "0x04000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x18", + "output": "0x04000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de" + }, + "subtraces": 0, + "traceAddress": [ + 12 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x15", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x15", + "output": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020" + }, + "subtraces": 0, + "traceAddress": [ + 13 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1e", + "input": "0x000000000000000000000000000000000000000000000000000000000000005004000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de00000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1b", + "output": "0x000000000000000000000000000000000000000000000000000000000000005004000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de00000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 14 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x10a328", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de00000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 2, + "traceAddress": [ + 15 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x98", + "input": "0x04000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x60", + "output": "0x996652142ffecd9cc272f376ca0e8228871a903772996289f847a6dbe2ce2698" + }, + "subtraces": 0, + "traceAddress": [ + 15, + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x68", + "input": "0x996652142ffecd9cc272f376ca0e8228871a903772996289f847a6dbe2ce2698", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x48", + "output": "0xf2e372a0b5b837116eee8f968840393d85975a15313468070000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 15, + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1a", + "input": "0x04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e0", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x18", + "output": "0x04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e0" + }, + "subtraces": 0, + "traceAddress": [ + 16 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x15", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x15", + "output": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020" + }, + "subtraces": 0, + "traceAddress": [ + 17 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x1e", + "input": "0x000000000000000000000000000000000000000000000000000000000000005004000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000004", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1b", + "output": "0x000000000000000000000000000000000000000000000000000000000000005004000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 18 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x106e1d", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 2, + "traceAddress": [ + 19 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x98", + "input": "0x04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e0", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x60", + "output": "0xe57cf1c1d6132b9cfd9e90f54f907c038b47941b2a7f3800783af26e852ec116" + }, + "subtraces": 0, + "traceAddress": [ + 19, + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x68", + "input": "0xe57cf1c1d6132b9cfd9e90f54f907c038b47941b2a7f3800783af26e852ec116", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000002", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x48", + "output": "0x8d5b6fafc6216500f9ef1ab16b30a59df9122d7de0f4910a0000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 19, + 1 + ], + "type": "call" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json new file mode 100644 index 00000000..6c4ce180 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_create_oog_outer_throw.json @@ -0,0 +1,88 @@ +{ + "context": { + "difficulty": "3451177886", + "gasLimit": "4709286", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2290744", + "timestamp": "1513616439" + }, + "genesis": { + "alloc": { + "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a": { + "balance": "0x0", + "code": "0x606060405263ffffffff60e060020a6000350416633b91f50681146100505780635bb47808146100715780635f51fca01461008c578063bc7647a9146100ad578063f1bd0d7a146100c8575b610000565b346100005761006f600160a060020a03600435811690602435166100e9565b005b346100005761006f600160a060020a0360043516610152565b005b346100005761006f600160a060020a036004358116906024351661019c565b005b346100005761006f600160a060020a03600435166101fa565b005b346100005761006f600160a060020a0360043581169060243516610db8565b005b600160a060020a038083166000908152602081905260408120549091908116903316811461011657610000565b839150600160a060020a038316151561012d573392505b6101378284610e2e565b6101418284610db8565b61014a826101fa565b5b5b50505050565b600154600160a060020a03908116903316811461016e57610000565b6002805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0384161790555b5b5050565b600254600160a060020a0390811690331681146101b857610000565b600160a060020a038381166000908152602081905260409020805473ffffffffffffffffffffffffffffffffffffffff19169184169190911790555b5b505050565b6040805160e260020a631a481fc102815260016024820181905260026044830152606482015262093a8060848201819052600060a4830181905260c06004840152601e60c48401527f736574456e7469747953746174757328616464726573732c75696e743829000060e484015292519091600160a060020a038516916369207f049161010480820192879290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526000602482018190526001604483015260606004830152602360648301527f626567696e506f6c6c28616464726573732c75696e7436342c626f6f6c2c626f60848301527f6f6c29000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f61646453746f636b28616464726573732c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f697373756553746f636b2875696e74382c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602160648301527f6772616e7453746f636b2875696e74382c75696e743235362c61646472657373608483015260f860020a60290260a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f115610000575050604080517f010555b8000000000000000000000000000000000000000000000000000000008152600160a060020a03338116602483015260006044830181905260606004840152603c60648401527f6772616e7456657374656453746f636b2875696e74382c75696e743235362c6160848401527f6464726573732c75696e7436342c75696e7436342c75696e743634290000000060a48401529251908716935063010555b89260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601260c48201527f626567696e53616c65286164647265737329000000000000000000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601a60648301527f7472616e7366657253616c6546756e64732875696e743235362900000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152602d60c48201527f7365744163636f756e74696e6753657474696e67732875696e743235362c756960e48201527f6e7436342c75696e7432353629000000000000000000000000000000000000006101048201529051600160a060020a03861692506369207f04916101248082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152603460648301527f637265617465526563757272696e6752657761726428616464726573732c756960848301527f6e743235362c75696e7436342c737472696e672900000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601b60648301527f72656d6f7665526563757272696e675265776172642875696e7429000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602360648301527f697373756552657761726428616464726573732c75696e743235362c7374726960848301527f6e6729000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f61737369676e53746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f72656d6f766553746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260026024808301919091526003604483015260006064830181905267ffffffffffffffff8616608484015260ff871660a484015260c0600484015260c48301919091527f7365744164647265737342796c617728737472696e672c616464726573732c6260e48301527f6f6f6c29000000000000000000000000000000000000000000000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152602160c48301527f73657453746174757342796c617728737472696e672c75696e74382c626f6f6c60e483015260f860020a6029026101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152603860c48301527f736574566f74696e6742796c617728737472696e672c75696e743235362c756960e48301527f6e743235362c626f6f6c2c75696e7436342c75696e74382900000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f115610000575050505b505050565b604080517f225553a4000000000000000000000000000000000000000000000000000000008152600160a060020a0383811660048301526002602483015291519184169163225553a49160448082019260009290919082900301818387803b156100005760325a03f115610000575050505b5050565b600082604051611fd280610f488339600160a060020a03909216910190815260405190819003602001906000f0801561000057905082600160a060020a03166308b027418260016040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b156100005760325a03f115610000575050604080517fa14e3ee300000000000000000000000000000000000000000000000000000000815260006004820181905260016024830152600160a060020a0386811660448401529251928716935063a14e3ee39260648084019382900301818387803b156100005760325a03f115610000575050505b5050505600606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029a165627a7a723058200e78a5f7e0f91739035d0fbf5eca02f79377210b722f63431f29a22e2880b3bd0029", + "nonce": "789", + "storage": { + "0xfe9ec0542a1c009be8b1f3acf43af97100ffff42eb736850fb038fa1151ad4d9": "0x000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8" + } + }, + "0x5cb4a6b902fcb21588c86c3517e797b07cdaadb9": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe4a13bc304682a903e9472f469c33801dd18d9e8": { + "balance": "0x33c763c929f62c4f", + "code": "0x", + "nonce": "14", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3451177886", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4713874", + "hash": "0x5d52a672417cd1269bf4f7095e25dcbf837747bba908cd5ef809dc1bd06144b5", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x01a12845ed546b94a038a7a03e8df8d7952024ed41ccb3db7a7ade4abc290ce1", + "nonce": "0x28c446f1cb9748c1", + "number": "2290743", + "stateRoot": "0x4898aceede76739daef76448a367d10015a2c022c9e7909b99a10fbf6fb16708", + "timestamp": "1513616414", + "totalDifficulty": "7146523769022564" + }, + "input": "0xf8aa0e8509502f9000830493e0941d3ddf7caf024f253487e18bc4a15b1a360c170a80b8443b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e829a0524564944fa419f5c189b5074044f89210c6d6b2d77ee8f7f12a927d59b636dfa0015b28986807a424b18b186ee6642d76739df36cad802d20e8c00e79a61d7281", + "result": [ + { + "action": { + "callType": "call", + "from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8", + "gas": "0x493e0", + "input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8", + "to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "value": "0x0" + }, + "blockNumber": 2290744, + "error": "invalid jump destination", + "result": {}, + "subtraces": 1, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "from": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "gas": "0x39ff0", + "init": "0x606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182", + "value": "0x0" + }, + "blockNumber": 0, + "error": "contract creation code storage out of gas", + "result": {}, + "subtraces": 0, + "traceAddress": [0], + "type": "create" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json new file mode 100644 index 00000000..4de08f2c --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_instafail.json @@ -0,0 +1,72 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": [ + { + "action": { + "callType": "call", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "gas": "0x1f97e", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0" + }, + "blockNumber": 24974, + "result": { + "gasUsed": "0x72de", + "output": "0x" + }, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json new file mode 100644 index 00000000..70442fdb --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_precompiled_wrong_gas.json @@ -0,0 +1,219 @@ +{ + "genesis": { + "number": "559197", + "hash": "0x0742a2bfab0452e2c634f3685b7e49ceb065c7000609b2b73f086e01fd1dfb58", + "nonce": "0x3060ad521440e1c2", + "mixHash": "0x59e7d4ae6cc3c38d23dac3f869b21984c7ba8f38070f4116a4941d9c403b6299", + "stateRoot": "0x68418fb5cf4afa9b807dc079e8cdde0e148ac2c8afb378e675465b5bed1fbd02", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "difficulty": "1813945", + "totalDifficulty": "469107641961", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "6321166", + "timestamp": "1577471202", + "alloc": { + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0xc5e6fdae52af83f7e28", + "nonce": "77947", + "code": "0x", + "storage": {} + }, + "0x774c398d763161f55b66a646f17edda4addad2ca": { + "balance": "0xf09ef316eff819ee488", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc": { + "balance": "0x0", + "nonce": "1", + "code": "0x60006121df537c01000000000000000000000000000000000000000000000000000000006000350463b041b2858114156100d257600435604052780100000000000000000000000000000000000000000000000060606060599059016000905260038152604051816020015260008160400152809050205404606052606051151561008f57600060a052602060a0f35b604051601c604459905901600090520163e0e9a17b601c82035260605160048201526020610100602483600030602d5a03f1506101005190501460c052602060c0f35b632cce81aa81141561019957600435610120526001610120511280156100f85780610143565b78010000000000000000000000000000000000000000000000006060606059905901600090526003815266040000000000025481602001526000816040015280905020540461012051135b905015610157576000610180526020610180f35b601c604459905901600090520163e0e9a17b601c82035261012051600482015260206101c0602483600030602d5a03f1506101c05190506101a05260206101a0f35b63e0e9a17b8114156102e957600435610120526604000000000002546101e0526007610200525b610120517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540413156102da575b6102005160050a610120517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e051816020015260008160400152809050205404031215610269576000610200511361026c565b60005b1561028157600161020051036102005261020b565b7c01000000000000000000000000000000000000000000000000000000006102005160200260020a606060605990590160009052600381526101e05181602001526001816040015280905020540204546101e0526101c0565b6101e051610280526020610280f35b63cef887b08114156103e757365990590160009052366004823760043560208201016102c0526024356102e052506060601c61014c5990590160009052016390fa337d601c8203526102c0516020601f602083035101046020026020018360048401526020820360648401528060c8840152808401935050506102e051602482015233604482015281600401599059016000905260648160648460006004601cf161039057fe5b60648101925060c882015180808582606487015160006004600a8705601201f16103b657fe5b5080840193505080830360206103a08284600030602d5a03f1506103a0519050905090509050610300526020610300f35b6390fa337d81141561065f57365990590160009052366004823760043560208201016102c0526024356102e0526044356103e052505a610400526020601c608c599059016000905201632b861629601c8203526102c0516020601f6020830351010460200260200183600484015260208203602484015280604884015280840193505050816004015990590160009052602481602484600060046015f161048a57fe5b602481019250604882015180808582602487015160006004600a8705601201f16104b057fe5b5080840193505080830360206104408284600030602d5a03f15061044051905090509050905061042052610420511561065e576102c05160208103516020599059016000905260208183856000600287604801f150805190509050905061046052602059905901600090526020816020610460600060026068f1508051905060005b6020811215610552578181601f031a816105400153600181019050610532565b5050610540516101e0526102e0516c010000000000000000000000006103e0510217606060605990590160009052600381526101e05181602001526003816040015280905020555a61058052700100000000000000000000000000000000660400000000000154046105a0526104006105a0516103ff02056105c0526104006105a05161040102056105e0526105c0513a12156105f6576105c05161060052610615565b6105e0513a131561060e576105e05161060052610614565b3a610600525b5b6105805161040051036106005160020202610620526106205170010000000000000000000000000000000061060051021766040000000000015561042051610640526020610640f35b5b63d467ae0381141561073257600435604052602435610660526106605134121515610725576000341315610718576c01000000000000000000000000606060605990590160009052600381526040518160200152600381604001528090502054046103e0526000600060006000346103e051611388f115156106dd57fe5b601c60405990590160009052013481526103e0517f15e746bf513b8a58e4265cc1162d7fc445da5c9b1928d7cfcde2582735d4677f602083a2505b60016106a05260206106a0f35b60006106c05260206106c0f35b63ea4971ee811415610851576004356101e0526024356102e0526044356103e052601c606459905901600090520163d467ae03601c8203526101e05160048201526604000000000001546fffffffffffffffffffffffffffffffff16602482015260206106e060448334306123555a03f1506106e051905015156107bd576000610700526020610700f35b606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff166102e0511215610844576102e0516c010000000000000000000000006103e0510217606060605990590160009052600381526101e05181602001526003816040015280905020556001610760526020610760f35b6000610780526020610780f35b6387def0818114156108a3576004356101e0526c01000000000000000000000000606060605990590160009052600381526101e0518160200152600381604001528090502054046107a05260206107a0f35b630aece23c8114156108f4576004356101e052606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff166107e05260206107e0f35b63fa14df6b811415610926576604000000000001546fffffffffffffffffffffffffffffffff16610820526020610820f35b63b8c48f8c811415610b1b576004356101e0526024356108405260443561086052600066040000000000035414151561096a576000610880526020610880f3610976565b60016604000000000003555b6101e051660400000000000255606060605990590160009052600381526101e05181602001526000816040015280905020546108a0526108a0610840518060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a600783015350506108a051606060605990590160009052600381526101e0518160200152600081604001528090502055606060605990590160009052600381526101e051816020015260008160400152809050205461094052601061094001610860518060101a82538060111a60018301538060121a60028301538060131a60038301538060141a60048301538060151a60058301538060161a60068301538060171a60078301538060181a60088301538060191a600983015380601a1a600a83015380601b1a600b83015380601c1a600c83015380601d1a600d83015380601e1a600e83015380601f1a600f830153505061094051606060605990590160009052600381526101e051816020015260008160400152809050205560016109e05260206109e0f35b632b86162981141561179457365990590160009052366004823760043560208201016102c0525060483560005b6020811215610b68578181601f031a81610a600153600181019050610b48565b5050610a6051610a00526102c05160208103516020599059016000905260208183856000600287604801f1508051905090509050610a8052602059905901600090526020816020610a80600060026068f1508051905060005b6020811215610be1578181601f031a81610b600153600181019050610bc1565b5050610b60516101e05270010000000000000000000000000000000070010000000000000000000000000000000060606060599059016000905260038152610a005181602001526000816040015280905020540204610b8052610b80511515610c8b57601c602059905901600090520161272e6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610bc0526020610bc0f35b700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204610be0526000610be051141515610d2e57601c60205990590160009052016127386101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610c20526020610c20f35b608c35610c40526301000000610c405160031a0262010000610c405160021a02610100610c405160011a02610c405160001a010101610c60526301000000610c605104610ca05262ffffff610c605116610cc0526003610ca051036101000a610cc05102610c805260006101e0511315610db057610c80516101e05112610db3565b60005b1561174d57780100000000000000000000000000000000000000000000000060606060599059016000905260038152610a00518160200152600081604001528090502054046001016101205260806080599059016000905260038152610a005181602001526002816040015260008160600152809050206002810154610d405250610d405160081a610d405160091a61010002610d4051600a1a6201000002610d4051600b1a630100000002010101610d005260006107e0610120510614158015610e7e5780610e8b565b6001660400000000000054145b905015610f0257610d0051610c6051141515610eae576000610d00511415610eb1565b60005b15610efd57601c602059905901600090520161271a6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000610da0526020610da0f35b6111b4565b6301000000610d005104610de05262ffffff610d005116610e00526003610de051036101000a610e005102610dc05260806080599059016000905260038152610a005181602001526002816040015260008160600152809050206002810154610e605250610e605160041a610e605160051a61010002610e605160061a6201000002610e605160071a630100000002010101610e2052601c604459905901600090520163e0e9a17b601c8203526107e0610120510360048201526020610ec0602483600030602d5a03f150610ec0519050610ea05260806080599059016000905260038152610ea05181602001526002816040015260008160600152809050206002810154610f205250610f205160041a610f205160051a61010002610f205160061a6201000002610f205160071a630100000002010101610ee052610ee051610e20510362049d408112156110595762049d4090505b6249d40081131561106b576249d40090505b62127500610dc0518202047bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8113156110ba577bffffffffffffffffffffffffffffffffffffffffffffffffffffffff90505b600860076000835b80156110d9576002810490506001820191506110c2565b5080905001046000600382131515611103578160030360080260020a62ffffff841602905061111a565b6003820360080260020a8304905062ffffff811690505b6280000081161561113357610100810490506001820191505b6301000000820281179050905090509050610f6052610f6051610c6051141515611164576000610f60511415611167565b60005b156111b357601c60205990590160009052016127246101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a3506000611040526020611040f35b5b6101e0516101e0516101e05166040000000000005455606060605990590160009052600381526101e0518160200152600081604001528090502054611060526008611060016604000000000000548060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a6007830153505061106051606060605990590160009052600381526101e0518160200152600081604001528090502055600166040000000000005401660400000000000055606060605990590160009052600381526101e0518160200152600081604001528090502054611100526111006001780100000000000000000000000000000000000000000000000060606060599059016000905260038152610a0051816020015260008160400152809050205404018060181a82538060191a600183015380601a1a600283015380601b1a600383015380601c1a600483015380601d1a600583015380601e1a600683015380601f1a6007830153505061110051606060605990590160009052600381526101e051816020015260008160400152809050205560006111c05278010000000000000000000000000000000000000000000000006801000000000000000060606060599059016000905260038152610a0051816020015260008160400152809050205402046111e0526111c06111e05180601c1a825380601d1a600183015380601e1a600283015380601f1a600383015350506001611260525b6008611260511215611515576112605160050a611280526001611280517801000000000000000000000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540407141561148757611260516004026111c0016111e05180601c1a825380601d1a600183015380601e1a600283015380601f1a60038301535050611505565b611260516004026111c0017c01000000000000000000000000000000000000000000000000000000006112605160200260020a60606060599059016000905260038152610a00518160200152600181604001528090502054020480601c1a825380601d1a600183015380601e1a600283015380601f1a600383015350505b60016112605101611260526113ec565b6111c051606060605990590160009052600381526101e05181602001526001816040015280905020555050608060805990590160009052600381526101e051816020015260028160400152600081606001528090502060005b600281121561159057806020026102c05101518282015560018101905061156e565b700100000000000000000000000000000000600003816020026102c051015116828201555050610c80517bffff0000000000000000000000000000000000000000000000000000056113e0526113e051610b805101610be052606060605990590160009052600381526101e051816020015260008160400152809050205461140052601061140001610be0518060101a82538060111a60018301538060121a60028301538060131a60038301538060141a60048301538060151a60058301538060161a60068301538060171a60078301538060181a60088301538060191a600983015380601a1a600a83015380601b1a600b83015380601c1a600c83015380601d1a600d83015380601e1a600e83015380601f1a600f830153505061140051606060605990590160009052600381526101e0518160200152600081604001528090502055660400000000000354610be051121515611703576101e051660400000000000255610be0516604000000000003555b601c6020599059016000905201610120516101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a350610120516114a05260206114a0f35b601c602059905901600090520161276a6101e0517f055e4f8dd3a534789b3feb8e0681afa2aee8713fdd6472f25b2c30dc7bf4e0f4600084a35060006114c05260206114c0f35b630f5995ce8114156119a157365990590160009052366004823760043560208201016114e05260243561150052604435602082010161152052606435604052506114e05160208103516020599059016000905260208183856000600287604801f150805190509050905061156052602059905901600090526020816020611560600060026068f1508051905060005b6020811215611843578181601f031a816116400153600181019050611823565b50506116405161154052604060206114e051035114156118a457601c6020599059016000905201614e52611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a3506000611660526020611660f35b6080601c6101ac59905901600090520163bd136cb3601c8203526115405160048201526115005160248201526115205160208103516020026020018360448401526020820360c48401528061014884015280840193505050604051606482015281600401599059016000905260848160848460006004601ff161192357fe5b6084810192506101488201518080858260c487015160006004600a8705601201f161194a57fe5b508084019350508083036020611680828434306123555a03f15061168051905090509050905061042052600161042051141561199357611540516116a05260206116a0f36119a0565b60006116c05260206116c0f35b5b63bd136cb3811415611d8c573659905901600090523660048237600435611540526024356115005260443560208201016115205260643560405250601c606459905901600090520163d467ae03601c82035260405160048201526060606059905901600090526003815260405181602001526003816040015280905020546bffffffffffffffffffffffff166024820152602061170060448334306123555a03f1506117005190501515611a9757601c6020599059016000905201614e2a611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e2a611720526020611720f35b601c6044599059016000905201633d73b705601c82035260405160048201526020611740602483600030602d5a03f15061174051905015611b1a57601c6020599059016000905201614e34611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e34611760526020611760f35b601c604459905901600090520163b041b285601c82035260405160048201526020611780602483600030602d5a03f1506117805190501515611b9e57601c6020599059016000905201614e3e611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e3e6117a05260206117a0f35b6060601c61014c59905901600090520163b7129afb601c8203526115405160048201526115005160248201526115205160208103516020026020018360448401526020820360a4840152806101088401528084019350505081600401599059016000905260648160648460006004601cf1611c1557fe5b6064810192506101088201518080858260a487015160006004600a8705601201f1611c3c57fe5b5080840193505080830360206117e08284600030602d5a03f1506117e05190509050905090506117c0526080608059905901600090526003815260405181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006002820154046401000000006001830154020160005b6020811215611ce4578181601f031a816118a00153600181019050611cc4565b50506118a051905061180052611800516117c0511415611d4457601c60205990590160009052016001611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a35060016118c05260206118c0f35b601c6020599059016000905201614e48611540517fd008620948a1ed10f4fed82dc43cf79acad36dc6b7c2c924e27c9813193b83ad600084a350614e486118e05260206118e0f35b63318a3fee81141561205657365990590160009052366004823760043560208201016114e0526024356115005260443560208201016115205260643560405260843561190052506080601c6101ac599059016000905201630f5995ce601c8203526114e0516020601f6020830351010460200260200183600484015260208203608484015280610108840152808401935050506115005160248201526115205160208103516020026020018360448401526020820360c48401528061014884015280840193505050604051606482015281600401599059016000905260848160848460006004601ff1611e7b57fe5b60848101925061010882015180808582608487015160006004600a8705601201f1611ea257fe5b508084019350506101488201518080858260c487015160006004600a8705601201f1611eca57fe5b508084019350508083036020611920828434306123555a03f15061192051905090509050905061154052600061154051141515612010576040601c60ec599059016000905201631c0b6367601c8203526114e0516020601f6020830351010460200260200183600484015260208203604484015280608884015280840193505050611540516024820152816004015990590160009052604481604484600060046018f1611f7357fe5b604481019250608882015180808582604487015160006004600a8705601201f1611f9957fe5b5080840193505080830360206119608284600061190051602d5a03f15061196051905090509050905061194052601c602059905901600090520161194051611540517f2d0d11d0f27e21fab56a8712078721096066b7faaa8540a3ea566e70b97de2d4600084a35061194051611980526020611980f35b601c602059905901600090520161753a60007f2d0d11d0f27e21fab56a8712078721096066b7faaa8540a3ea566e70b97de2d4600084a35061753a6119a05260206119a0f35b6309dd0e81811415612076576604000000000002546119c05260206119c0f35b63023948728114156120d2577801000000000000000000000000000000000000000000000000606060605990590160009052600381526604000000000002548160200152600081604001528090502054046119e05260206119e0f35b632c181929811415612139577001000000000000000000000000000000007001000000000000000000000000000000006060606059905901600090526003815266040000000000025481602001526000816040015280905020540204611a20526020611a20f35b637ca823d58114156122af576604000000000002546101e052700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204611a60526000611260525b600a61126051121561224c57608060805990590160009052600381526101e05181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006001820154046401000000008254020160005b6020811215612230578181601f031a81611b200153600181019050612210565b5050611b205190506101e05260016112605101611260526121a8565b700100000000000000000000000000000000700100000000000000000000000000000000606060605990590160009052600381526101e05181602001526000816040015280905020540204611b4052611b4051611a605103611b80526020611b80f35b63b7129afb81141561246a57365990590160009052366004823760043561154052602435611500526044356020820101611520525061154051611ba0526020611520510351611bc0526000611260525b611bc05161126051121561245b5761126051602002611520510151611be05260026115005107611c00526001611c0051141561234a57611be051611c2052611ba051611c4052612368565b6000611c0051141561236757611ba051611c2052611be051611c40525b5b60405990590160009052611c205160005b6020811215612399578181601f031a81611ca00153600181019050612379565b5050611ca0518152611c405160005b60208112156123c8578181601f031a81611d2001536001810190506123a8565b5050611d2051602082015260205990590160009052602081604084600060026088f15080519050611d4052602059905901600090526020816020611d40600060026068f1508051905060005b6020811215612434578181601f031a81611de00153600181019050612414565b5050611de0519050611ba052600261150051056115005260016112605101611260526122ff565b611ba051611e00526020611e00f35b633d73b70581141561255b576004356040526604000000000002546101e0526000611260525b600661126051121561254e576101e05160405114156124b6576001611e20526020611e20f35b608060805990590160009052600381526101e05181602001526002816040015260008160600152809050207c01000000000000000000000000000000000000000000000000000000006001820154046401000000008254020160005b6020811215612532578181601f031a81611ec00153600181019050612512565b5050611ec05190506101e0526001611260510161126052612490565b6000611ee0526020611ee0f35b631f794436811415612737576004356101e052601c606459905901600090520163d467ae03601c8203526101e0516004820152606060605990590160009052600381526101e05181602001526003816040015280905020546bffffffffffffffffffffffff1660248201526020611f2060448334306123555a03f150611f20519050151561265657601c602059905901600090520160006101e0517f60ab231f060fa320acea170017564b7ee77f477e6465a8c964380cffb270aaf4600084a350602159905901600090526001815260006020820152602081019050602060408203526020601f6020830351604001010460200260408203f3505b601c602059905901600090520160016101e0517f60ab231f060fa320acea170017564b7ee77f477e6465a8c964380cffb270aaf4600084a350608060805990590160009052600381526101e0518160200152600281604001526000816060015280905020607059905901600090526050815260208101905060005b60028112156126f05780830154816020028301526001810190506126d1565b70010000000000000000000000000000000060000381840154168160200283015281905090509050602060408203526020601f6020830351604001010460200260408203f3505b6313f955e18114156128ca573659905901600090523660048237600435602082010161204052602435612060525060506120805260006120a052612080516120c0526000611260525b612060516112605112156128bb576120a051806120c051038080602001599059016000905281815260208101905090508180828286612040510160006004600a8705601201f16127cc57fe5b50809050905090506120e0526020601c608c599059016000905201632b861629601c8203526120e0516020601f6020830351010460200260200183600484015260208203602484015280604884015280840193505050816004015990590160009052602481602484600060046015f161284157fe5b602481019250604882015180808582602487015160006004600a8705601201f161286757fe5b5080840193505080830360206121a08284600030602d5a03f1506121a051905090509050905061042052612080516120a051016120a052612080516120c051016120c0526001611260510161126052612780565b610420516121c05260206121c0f35b50", + "storage": { + "0x292b7a8d467a95cffd303c7edd99875892cdb3eaee87e5ca29057dc88a09ffbd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4d2fcf8ac901ad7dcf5b1c3979801430d9979c87157230ae066a0276984c6ac7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xdf951a5d1d9283b06d4f1de58542f1e1e310d8d17aada46586ddb9598bc42894": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x9c8d09d387f3ba5dd4733e24c63e4d549864a7cd57a1bdf1fdd831a2a0184815": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4ab3b783bb170e11b0932a5ce8f5f343f67058b3925da271001a75ae498bd655": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x0000000000000000000000000000000000000004": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x0000000000000000000000000000000000000002": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "559198", + "difficulty": "1814830", + "timestamp": "1577471205", + "gasLimit": "6327338", + "miner": "0x774c398d763161f55b66a646f17edda4addad2ca" + }, + "input": "0xf9026f8301307b85746a52880083124f80946cc68eb482a757c690dd151d2bd5e774ada38bdc80b9020413f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e00000000000000000000000000000000081a1a01c9e9d742c8e69daba2a026ccafdde618f2e44c96db281c2209c22f183ad03a2a049a61d267d22226896d4c065525819c238784c439dc2afa7d17fce76595730d1", + "result": [ + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "gas": "0x124f80", + "input": "0x13f955e100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000005000000000000000000000000000000000000000000000000000000000000019004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a67040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae704000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30304000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de04000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x1c6ff", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 5, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x114243", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000afbe013b4a83b2f91f3d9b6627cf382394c4914fd2b7510700000000000000008621196eb526a0e02430b6dd5c72fd368e768977f3a8364861e5a471a8ae61a1028f745609c40b185f537a6700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 0 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x110d3b", + "input": "0x2b86162900000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000050040000005b53875b0f1381589859adcf938980f4a8fb0af4c8845007000000000000000075289d1c48c8f71deee521a76c8d92948cbe14343991998dfaea6b08596d97dcc891745609c40b18ae825ae700000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 1 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x10d833", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000abbacd8711f647ab97c6c9b9658eb9bef081e2cedb630f010000000000000000549bcab22422baef6c34af382b227e4b1a27bec3312e04dbb62fc315203c67f30f9d745609c40b180fdfc30300000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 2 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x10a328", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000e93433dde5128942e47e8722d37ec4dcc1c8a78cf9c4a4030000000000000000bf92c09e8e37b2c8ffbb4b9cadfccc563e474c4feae6997f52d56236fedafce20a9f745609c40b1840cc27de00000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 3 + ], + "type": "call" + }, + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "gas": "0x106e1d", + "input": "0x2b8616290000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005004000000f2e372a0b5b837116eee8f968840393d85975a1531346807000000000000000076bc91399edda1de98976ee0774e2ad3b21dd38ad9f5f34d2c816a832747fe7f4c9e745609c40b18e290e9e000000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x6cc68eb482a757c690dd151d2bd5e774ada38bdc", + "value": "0x0" + }, + "result": { + "address": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x27c3", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "subtraces": 0, + "traceAddress": [ + 4 + ], + "type": "call" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json new file mode 100644 index 00000000..bc947087 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/inner_throw_outer_revert.json @@ -0,0 +1,95 @@ +{ + "context": { + "difficulty": "3956606365", + "gasLimit": "5413248", + "miner": "0x00d8ae40d9a06d0e7a2877b62e32eb959afbe16d", + "number": "2295104", + "timestamp": "1513681256" + }, + "genesis": { + "alloc": { + "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76": { + "balance": "0x0", + "code": "0x60606040526004361061015e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680625b4487146101a257806311df9995146101cb578063278ecde11461022057806330adce0e146102435780633197cbb61461026c5780634bb278f3146102955780636103d70b146102aa57806363a599a4146102bf5780636a2d1cb8146102d457806375f12b21146102fd57806378e979251461032a578063801db9cc1461035357806386d1a69f1461037c5780638da5cb5b146103915780638ef26a71146103e65780639890220b1461040f5780639b39caef14610424578063b85dfb801461044d578063be9a6555146104a1578063ccb07cef146104b6578063d06c91e4146104e3578063d669e1d414610538578063df40503c14610561578063e2982c2114610576578063f02e030d146105c3578063f2fde38b146105d8578063f3283fba14610611575b600060149054906101000a900460ff1615151561017a57600080fd5b60075442108061018b575060085442115b15151561019757600080fd5b6101a03361064a565b005b34156101ad57600080fd5b6101b5610925565b6040518082815260200191505060405180910390f35b34156101d657600080fd5b6101de61092b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561022b57600080fd5b6102416004808035906020019091905050610951565b005b341561024e57600080fd5b610256610c48565b6040518082815260200191505060405180910390f35b341561027757600080fd5b61027f610c4e565b6040518082815260200191505060405180910390f35b34156102a057600080fd5b6102a8610c54565b005b34156102b557600080fd5b6102bd610f3e565b005b34156102ca57600080fd5b6102d261105d565b005b34156102df57600080fd5b6102e76110d5565b6040518082815260200191505060405180910390f35b341561030857600080fd5b6103106110e1565b604051808215151515815260200191505060405180910390f35b341561033557600080fd5b61033d6110f4565b6040518082815260200191505060405180910390f35b341561035e57600080fd5b6103666110fa565b6040518082815260200191505060405180910390f35b341561038757600080fd5b61038f611104565b005b341561039c57600080fd5b6103a4611196565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156103f157600080fd5b6103f96111bb565b6040518082815260200191505060405180910390f35b341561041a57600080fd5b6104226111c1565b005b341561042f57600080fd5b610437611296565b6040518082815260200191505060405180910390f35b341561045857600080fd5b610484600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061129c565b604051808381526020018281526020019250505060405180910390f35b34156104ac57600080fd5b6104b46112c0565b005b34156104c157600080fd5b6104c9611341565b604051808215151515815260200191505060405180910390f35b34156104ee57600080fd5b6104f6611354565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561054357600080fd5b61054b61137a565b6040518082815260200191505060405180910390f35b341561056c57600080fd5b610574611385565b005b341561058157600080fd5b6105ad600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506116c3565b6040518082815260200191505060405180910390f35b34156105ce57600080fd5b6105d66116db565b005b34156105e357600080fd5b61060f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611829565b005b341561061c57600080fd5b610648600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506118fe565b005b600080670de0b6b3a7640000341015151561066457600080fd5b61069b610696670de0b6b3a7640000610688610258346119d990919063ffffffff16565b611a0c90919063ffffffff16565b611a27565b9150660221b262dd80006106ba60065484611a7e90919063ffffffff16565b111515156106c757600080fd5b600a60008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84846000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15156107d557600080fd5b6102c65a03f115156107e657600080fd5b5050506040518051905050610808828260010154611a7e90919063ffffffff16565b8160010181905550610827348260000154611a7e90919063ffffffff16565b816000018190555061084434600554611a7e90919063ffffffff16565b60058190555061085f82600654611a7e90919063ffffffff16565b6006819055503373ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c836040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e8583600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60025481565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060085442108061096b5750651b48eb57e00060065410155b15151561097757600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010154821415156109c757600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166323b872dd3330856000604051602001526040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050602060405180830381600087803b1515610ac857600080fd5b6102c65a03f11515610ad957600080fd5b5050506040518051905050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68836000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610b7d57600080fd5b6102c65a03f11515610b8e57600080fd5b505050604051805190501515610ba357600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015490506000600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600001819055506000811115610c4457610c433382611a9c565b5b5050565b60055481565b60085481565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610cb157600080fd5b600854421015610cd357660221b262dd8000600654141515610cd257600080fd5b5b651b48eb57e000600654108015610cf057506213c6806008540142105b151515610cfc57600080fd5b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f193505050501515610d7557600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166370a08231306000604051602001526040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050602060405180830381600087803b1515610e3a57600080fd5b6102c65a03f11515610e4b57600080fd5b5050506040518051905090506000811115610f2057600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68826000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610ef957600080fd5b6102c65a03f11515610f0a57600080fd5b505050604051805190501515610f1f57600080fd5b5b6001600960006101000a81548160ff02191690831515021790555050565b600080339150600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905060008114151515610f9657600080fd5b803073ffffffffffffffffffffffffffffffffffffffff163110151515610fbc57600080fd5b610fd181600254611b5090919063ffffffff16565b6002819055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561105957fe5b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156110b857600080fd5b6001600060146101000a81548160ff021916908315150217905550565b670de0b6b3a764000081565b600060149054906101000a900460ff1681565b60075481565b651b48eb57e00081565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561115f57600080fd5b600060149054906101000a900460ff16151561117a57600080fd5b60008060146101000a81548160ff021916908315150217905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60065481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561121c57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f19350505050151561129457600080fd5b565b61025881565b600a6020528060005260406000206000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561131b57600080fd5b600060075414151561132c57600080fd5b4260078190555062278d004201600881905550565b600960009054906101000a900460ff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b660221b262dd800081565b60008060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156113e557600080fd5b600654660221b262dd800003925061142b670de0b6b3a764000061141c610258670de0b6b3a76400006119d990919063ffffffff16565b81151561142557fe5b04611a27565b915081831115151561143c57600080fd5b600a60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16856000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b151561158c57600080fd5b6102c65a03f1151561159d57600080fd5b50505060405180519050506115bf838260010154611a7e90919063ffffffff16565b81600101819055506115dc83600654611a7e90919063ffffffff16565b6006819055503073ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c846040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e856000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60016020528060005260406000206000915090505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561173657600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663f2fde38b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff166040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050600060405180830381600087803b151561181357600080fd5b6102c65a03f1151561182457600080fd5b505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561188457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415156118fb57806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561195957600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415151561199557600080fd5b80600460006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b600080828402905060008414806119fa57508284828115156119f757fe5b04145b1515611a0257fe5b8091505092915050565b6000808284811515611a1a57fe5b0490508091505092915050565b6000611a416202a300600754611a7e90919063ffffffff16565b421015611a7557611a6e611a5f600584611a0c90919063ffffffff16565b83611a7e90919063ffffffff16565b9050611a79565b8190505b919050565b6000808284019050838110151515611a9257fe5b8091505092915050565b611aee81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054611a7e90919063ffffffff16565b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550611b4681600254611a7e90919063ffffffff16565b6002819055505050565b6000828211151515611b5e57fe5b8183039050929150505600a165627a7a72305820ec0d82a406896ccf20989b3d6e650abe4dc104e400837f1f58e67ef499493ae90029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000008d69d00910d0b2afb2a99ed6c16c8129fa8e1751", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000e819f024b41358d2c08e3a868a5c5dd0566078d4", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x000000000000000000000000000000000000000000000000000000005a388981", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x000000000000000000000000000000000000000000000000000000005a3b38e6" + } + }, + "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826": { + "balance": "0x2a2dd979a35cf000", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe819f024b41358d2c08e3a868a5c5dd0566078d4": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c681461027257806370a08231146102ad5780638da5cb5b146102fa57806395d89b411461034f578063a9059cbb146103dd578063dd62ed3e14610437578063f2fde38b146104a3575b600080fd5b34156100ca57600080fd5b6100d26104dc565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610515565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba61069c565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506106a2565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610952565b6040518082815260200191505060405180910390f35b341561027d57600080fd5b6102936004808035906020019091905050610957565b604051808215151515815260200191505060405180910390f35b34156102b857600080fd5b6102e4600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610abe565b6040518082815260200191505060405180910390f35b341561030557600080fd5b61030d610b07565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561035a57600080fd5b610362610b2d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103a2578082015181840152602081019050610387565b50505050905090810190601f1680156103cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156103e857600080fd5b61041d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b66565b604051808215151515815260200191505060405180910390f35b341561044257600080fd5b61048d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d01565b6040518082815260200191505060405180910390f35b34156104ae57600080fd5b6104da600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d88565b005b6040805190810160405280600b81526020017f416c6c436f6465436f696e00000000000000000000000000000000000000000081525081565b6000808214806105a157506000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054145b15156105ac57600080fd5b81600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60005481565b600080600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905061077683600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555061080b83600160008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506108618382610e7d90919063ffffffff16565b600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a360019150509392505050565b600681565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156109b557600080fd5b610a0782600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610a5f82600054610e7d90919063ffffffff16565b60008190555060003373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a360019050919050565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600481526020017f414c4c430000000000000000000000000000000000000000000000000000000081525081565b6000610bba82600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610c4f82600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a36001905092915050565b6000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610de457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515610e5c5780600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000808284019050838110151515610e7357fe5b8091505092915050565b6000828211151515610e8b57fe5b8183039050929150505600a165627a7a7230582059f3ea3df0b054e9ab711f37969684ba83fe38f255ffe2c8d850d951121c51100029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3956606365", + "extraData": "0x566961425443", + "gasLimit": "5418523", + "hash": "0x6f37eb930a25da673ea1bb80fd9e32ddac19cdf7cd4bb2eac62cc13598624077", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0x10971cde68c587c750c23b8589ae868ce82c2c646636b97e7d9856470c5297c7", + "nonce": "0x810f923ff4b450a1", + "number": "2295103", + "stateRoot": "0xff403612573d76dfdaf4fea2429b77dbe9764021ae0e38dc8ac79a3cf551179e", + "timestamp": "1513681246", + "totalDifficulty": "7162347056825919" + }, + "input": "0xf86d808504e3b292008307dfa69433056b5dcac09a9b4becad0e1dcf92c19bd0af76880e92596fd62900008029a0e5f27bb66431f7081bb7f1f242003056d7f3f35414c352cd3d1848b52716dac2a07d0be78980edb0bd2a0678fc53aa90ea9558ce346b0d947967216918ac74ccea", + "result": [ + { + "action": { + "callType": "call", + "from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826", + "gas": "0x7dfa6", + "input": "0x", + "to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "value": "0xe92596fd6290000" + }, + "blockNumber": 2295104, + "error": "execution reverted", + "result": { + "gasUsed": "0x7c1c8" + }, + "subtraces": 1, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "gas": "0x75fe3", + "input": "0xa9059cbb000000000000000000000000d4fcab9f0a6dc0493af47c864f6f17a8a5e2e82600000000000000000000000000000000000000000000000000000000000002f4", + "to": "0xe819f024b41358d2c08e3a868a5c5dd0566078d4", + "value": "0x0" + }, + "blockNumber": 0, + "error": "invalid opcode: INVALID", + "result": {}, + "subtraces": 0, + "traceAddress": [0], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json new file mode 100644 index 00000000..3fcc61fc --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create.json @@ -0,0 +1,94 @@ +{ + "genesis": { + "difficulty": "1808543", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "4875092", + "hash": "0x3851fdc18bd5f2314cf0c90439356f9a1fe157d7fb06c20e20b77954da903671", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "mixHash": "0x3d4e702d6058acf94c9547560f05536d45d515bd4f9014564ec41b5b4ff9578b", + "nonce": "0x1695153e7b16c1e7", + "number": "555461", + "stateRoot": "0xba8272acd0dfeb5f04376328e8bfc5b276b177697000c204a060f6f7b629ae32", + "timestamp": "1577423350", + "totalDifficulty": "462222992438", + "alloc": { + "0xcf5b3467dfa45cdc8e5358a7a1ba4deb02e5faed": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x16c102a3b09c02abdace", + "nonce": "19049", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "555462", + "difficulty": "1808543", + "timestamp": "1577423360", + "gasLimit": "4873701", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf90451824a6985746a52880083053e908080b903fb60606040525b60405161015b806102a0833901809050604051809103906000f0600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b610247806100596000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b9392505050566060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b50505681a1a0b9a85df655d3b6aa081e52d8c3db52c50c2bf97d9d993a980113b2262649c125a00d51e63880ca8ef4705914a71e7ff906834a9cdcff0cbd063ff4e43a5905890d", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x53e90", + "init": "0x60606040525b60405161015b806102a0833901809050604051809103906000f0600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b610247806100596000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b9392505050566060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056" + }, + "result": { + "gasUsed": "0x53e90", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b939250505056", + "address": "0x9db7a1baf185a865ffee3824946ccd8958191e5e" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 23, + "transactionHash": "0xe267552ce8437a5bc7081385c99f912de5723ad34b958db215dbc41abd5f6c03", + "blockNumber": 555462, + "blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1", + "time": "1.147715ms" + }, + { + "type": "create", + "action": { + "from": "0x9db7a1baf185a865ffee3824946ccd8958191e5e", + "value": "0x0", + "gas": "0x30b34", + "init": "0x6060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056" + }, + "result": { + "gasUsed": "0x1009d", + "code": "0x60606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056", + "address": "0xcf5b3467dfa45cdc8e5358a7a1ba4deb02e5faed" + }, + "traceAddress": [0], + "subtraces": 0, + "transactionPosition": 23, + "transactionHash": "0xe267552ce8437a5bc7081385c99f912de5723ad34b958db215dbc41abd5f6c03", + "blockNumber": 555462, + "blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json new file mode 100644 index 00000000..0eaa3f86 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create2_action_gas.json @@ -0,0 +1,94 @@ +{ + "genesis": { + "difficulty": "4635413", + "extraData": "0xd683010b05846765746886676f312e3133856c696e7578", + "gasLimit": "9289294", + "hash": "0x359775cf1a2ae2400e26ec68bf33bcfe38b7979c76b7e616f42c4ca7e7605e39", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "mixHash": "0x4b2a0ef121a9c7d732fa0fbd4166a0e1041d2da2b8cb677c61edabf8b7183b64", + "nonce": "0x2a8a64ad9757be55", + "number": "1555160", + "stateRoot": "0x95067c12148e2362fcd4a89df286ff0b1739ef097a40ca42ae7f698af9a9d913", + "timestamp": "1590793999", + "totalDifficulty": "2242063623471", + "alloc": { + "0x8785e369f0ef0a4e5c5a5f929680427dc75273a5": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x623145b285b3f551fa3f", + "nonce": "260617", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555161", + "difficulty": "4633150", + "timestamp": "1590794020", + "gasLimit": "9298364", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf85e8303fa09843b9aca0083019ed880808a6000600060006000f50081a2a0485ea410e210740eef8e6f6de11c530f46f8da80eecb02afbb6c5f61749ac015a068d72f1b0f1d3cb4e214d5def79b49a73e6ee91db2df83499a54c656c144600f", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x19ed8", + "init": "0x6000600060006000f500" + }, + "result": { + "gasUsed": "0x14c78", + "code": "0x", + "address": "0x2e8eded627eead210cb6143eb39ef7a3e44e4f00" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 31, + "transactionHash": "0x1257b698c5833c54ce786734087002b097275abc3877af082b5c2a538e894a41", + "blockNumber": 1555161, + "blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171", + "time": "889.048µs" + }, + { + "type": "create", + "action": { + "from": "0x2e8eded627eead210cb6143eb39ef7a3e44e4f00", + "value": "0x0", + "gas": "0x5117", + "init": "0x" + }, + "result": { + "gasUsed": "0x0", + "code": "0x", + "address": "0x8785e369f0ef0a4e5c5a5f929680427dc75273a5" + }, + "traceAddress": [0], + "subtraces": 0, + "transactionPosition": 31, + "transactionHash": "0x1257b698c5833c54ce786734087002b097275abc3877af082b5c2a538e894a41", + "blockNumber": 1555161, + "blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json new file mode 100644 index 00000000..132b84df --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_action_gas.json @@ -0,0 +1,90 @@ +{ + "genesis": { + "difficulty": "4639933", + "extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578", + "gasLimit": "9280188", + "hash": "0x9a5f3a98eb1c60f6e3f450658a9cea190157e7021d04f927b752ad6482cf9194", + "miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3", + "mixHash": "0x6b6f8fcaa54b8565c4c1ae7cf0a020e938a53007f4561e758b17bc05c9044d78", + "nonce": "0x773aba50dc51b462", + "number": "1555169", + "stateRoot": "0xc4b9703de3e59ff795baae2c3afa010cf039c37244a7a6af7f3f491a10601348", + "timestamp": "1590794111", + "totalDifficulty": "2242105342155", + "alloc": { + "0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x62325b40cbbd0915c4b9", + "nonce": "260875", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555170", + "difficulty": "4642198", + "timestamp": "1590794112", + "gasLimit": "9289249", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf8658303fb0b843b9aca0083019ee48080915a600055600060006000f0505a6001550081a2a01a7deb3a16d967b766459ef486b00656c6581e5ad58968184a33701e27e0eb8aa07162ccdfe2018d64360a605310a62c399dd586c7282dd42a88c54f02f51d451f", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x19ee4", + "init": "0x5a600055600060006000f0505a60015500" + }, + "error": "out of gas", + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 63, + "transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650", + "blockNumber": 1555170, + "blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e", + "time": "952.736µs" + }, + { + "type": "create", + "action": { + "from": "0x9c5cfe45b15eaff4ad617af4250189e26024a4f8", + "value": "0x0", + "gas": "0x3cb", + "init": "0x" + }, + "result": { + "gasUsed": "0x0", + "code": "0x", + "address": "0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d" + }, + "traceAddress": [0], + "subtraces": 0, + "transactionPosition": 63, + "transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650", + "blockNumber": 1555170, + "blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json new file mode 100644 index 00000000..28e96684 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_create_inerror.json @@ -0,0 +1,81 @@ +{ + "genesis": { + "difficulty": "3244991", + "extraData": "0x", + "gasLimit": "7968787", + "hash": "0x62bbf18c203068a8793af8d8360d054f95a63bc62b87ade550861ed490af3f15", + "miner": "0x9f2659ffe7b3b467e46dcec3623392cf51635079", + "mixHash": "0xc8dec711fd1e03972b6a279a09dc0cd29c5171b60f42c4ce37c7c51ff445f776", + "nonce": "0x40b1bbcc25ddb804", + "number": "839246", + "stateRoot": "0x4bb3b02ec70b837651233957fb61a6ea3fc6a4244c1f55df7a713c154829ec0a", + "timestamp": "1581179375", + "totalDifficulty": "1023985623933", + "alloc": { + "0x76554b33410b6d90b7dc889bfed0451ad195f27e": { + "balance": "0x0", + "nonce": "1", + "code": "0x6080604052348015600f57600080fd5b506004361060505760003560e01c8063391521f414605557806355313dea14605d5780636d3d14161460655780638da5cb5b14606d578063b9d1e5aa1460b5575b600080fd5b605b60bd565b005b606360c8565b005b606b60ca565b005b607360cf565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60bb60f4565b005b6020610123600af050565b005b600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565bfefea165627a7a723058202094d5aa5dbbd493e9a2c64c50b62eba4b109b2a12d2bb73a5d0d54982651fc80029", + "storage": {} + }, + "0xed69ab7145a9bae7152406d062c077c6ecc6ae18": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0xa3b31cbd5168d3c99756660d4b7625d679e12573": { + "balance": "0x569bc6535d3083fce", + "nonce": "26", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "839247", + "difficulty": "3213311", + "timestamp": "1581179571", + "gasLimit": "7961006", + "miner": "0x9f2659ffe7b3b467e46dcec3623392cf51635079" + }, + "input": "0xf86a1a8509502f9000830334509476554b33410b6d90b7dc889bfed0451ad195f27e8084391521f481a2a02e4ff0d171a860c8c7de2283978e2f225f9ba3ed4dec446b773c6b2d73ef22dea02a6a517528b491cb71b204f534db11a1c8059035f54d5bae347d1cab536bde2c", + "result": [ + { + "type": "call", + "action": { + "from": "0xa3b31cbd5168d3c99756660d4b7625d679e12573", + "to": "0x76554b33410b6d90b7dc889bfed0451ad195f27e", + "value": "0x0", + "gas": "0x33450", + "input": "0x391521f4", + "callType": "call" + }, + "result": { + "gasUsed": "0xd0b5", + "output": "0x" + }, + "traceAddress": [], + "subtraces": 0, + "transactionPosition": 26, + "transactionHash": "0xcb1090fa85d2a3da8326b75333e92b3dca89963c895d9c981bfdaa64643135e4", + "blockNumber": 839247, + "blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c", + "time": "182.267µs" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json new file mode 100644 index 00000000..c3191d61 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/nested_pointer_issue.json @@ -0,0 +1,189 @@ +{ + "genesis": { + "number": "13535", + "hash": "0x6f706fe8026edb51577b57685574dc152dba4e2ebfc8a50bb63a8c95a4f8818d", + "nonce": "0x0000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "stateRoot": "0x7f54db248a004ca182fe87fdfa6efda97163908b4f0cc84b36a6d60699d5d1be", + "miner": "0x0000000000000000000000000000000000000000", + "difficulty": "1", + "totalDifficulty": "24766", + "extraData": "0xf09f928e20407072796c616273206e6f64652d3020f09f928e000000000000001d32ac3baf238e163e18ed6d77b67b0b54b08ad9781dc4ffd93c5ede1ca12c5f21b36ac39c7ebb88dff65da91f5b9461f19873a02602230b931ba388a809119f00", + "gasLimit": "8000000", + "timestamp": "1549153003", + "alloc": { + "0x0b1ba0af832d7c05fd64161e0db78e85978e8082": { + "balance": "0x0", + "nonce": "1", + "code": "0x6080604052600436106100ae5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166306fdde0381146100b8578063095ea7b31461014257806318160ddd1461018757806323b872dd146101ae5780632e1a7d4d146101e5578063313ce567146101fd57806370a082311461022857806395d89b4114610256578063a9059cbb1461026b578063d0e30db0146100ae578063dd62ed3e1461029c575b6100b66102d0565b005b3480156100c457600080fd5b506100cd61031f565b6040805160208082528351818301528351919283929083019185019080838360005b838110156101075781810151838201526020016100ef565b50505050905090810190601f1680156101345780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561014e57600080fd5b5061017373ffffffffffffffffffffffffffffffffffffffff600435166024356103cb565b604080519115158252519081900360200190f35b34801561019357600080fd5b5061019c61043e565b60408051918252519081900360200190f35b3480156101ba57600080fd5b5061017373ffffffffffffffffffffffffffffffffffffffff60043581169060243516604435610443565b3480156101f157600080fd5b506100b66004356105e3565b34801561020957600080fd5b50610212610678565b6040805160ff9092168252519081900360200190f35b34801561023457600080fd5b5061019c73ffffffffffffffffffffffffffffffffffffffff60043516610681565b34801561026257600080fd5b506100cd610693565b34801561027757600080fd5b5061017373ffffffffffffffffffffffffffffffffffffffff6004351660243561070b565b3480156102a857600080fd5b5061019c73ffffffffffffffffffffffffffffffffffffffff6004358116906024351661071f565b33600081815260036020908152604091829020805434908101909155825190815291517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c9281900390910190a2565b6000805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f810184900484028201840190925281815292918301828280156103c35780601f10610398576101008083540402835291602001916103c3565b820191906000526020600020905b8154815290600101906020018083116103a657829003601f168201915b505050505081565b33600081815260046020908152604080832073ffffffffffffffffffffffffffffffffffffffff8716808552908352818420869055815186815291519394909390927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925928290030190a350600192915050565b303190565b73ffffffffffffffffffffffffffffffffffffffff831660009081526003602052604081205482111561047557600080fd5b73ffffffffffffffffffffffffffffffffffffffff841633148015906104eb575073ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14155b156105655773ffffffffffffffffffffffffffffffffffffffff8416600090815260046020908152604080832033845290915290205482111561052d57600080fd5b73ffffffffffffffffffffffffffffffffffffffff841660009081526004602090815260408083203384529091529020805483900390555b73ffffffffffffffffffffffffffffffffffffffff808516600081815260036020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a35060019392505050565b336000908152600360205260409020548111156105ff57600080fd5b33600081815260036020526040808220805485900390555183156108fc0291849190818181858888f1935050505015801561063e573d6000803e3d6000fd5b5060408051828152905133917f7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65919081900360200190a250565b60025460ff1681565b60036020526000908152604090205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f810184900484028201840190925281815292918301828280156103c35780601f10610398576101008083540402835291602001916103c3565b6000610718338484610443565b9392505050565b6004602090815260009283526040808420909152908252902054815600a165627a7a72305820228981f11f47ad9630080069b0a81423fcfba5aa8e0f478a579c4bc080ba7e820029", + "storage": { + "0xbe8a6e3827dad84a671edac41a02b0f5b47b9d0339adb1e9411b9ba4e2118738": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x48bacb9266a570d521063ef5dd96e61686dbe788": { + "balance": "0x0", + "nonce": "1", + "code": "0x6080604052600436106101b65763ffffffff7c0100000000000000000000000000000000000000000000000000000000600035041663288cdc9181146101bb578063297bb70b146101f15780632ac126221461021e5780633683ef8e1461024b5780633c28d8611461026d5780633e228bae1461029a5780633fd3c997146102ba5780634ac14782146102e75780634d0ae546146103075780634f9559b11461032757806350dde190146103475780636070410814610367578063642f2eaf1461039457806364a3bc15146103b457806377fcce68146103d45780637b8e3514146103f45780637e1d9808146104145780637e9d74dc1461043457806382c174d0146104615780638da5cb5b146104815780639363470214610496578063a3e20380146104b6578063b4be83d5146104d6578063bfc8bfce146104f6578063c585bb9314610516578063c75e0a8114610536578063d46b02c314610563578063d9bfa73e14610583578063db123b1a146105a3578063dd1c7d18146105c5578063e306f779146105e5578063e5fa431b146105fa578063eea086ba1461061a578063f2fde38b1461062f578063ffa1ad741461064f575b600080fd5b3480156101c757600080fd5b506101db6101d63660046148ee565b610664565b6040516101e89190615513565b60405180910390f35b3480156101fd57600080fd5b5061021161020c366004614811565b610676565b6040516101e891906157ed565b34801561022a57600080fd5b5061023e6102393660046148ee565b6107a1565b6040516101e89190615505565b34801561025757600080fd5b5061026b61026636600461492b565b6107b6565b005b34801561027957600080fd5b5061028d610288366004614a5f565b6108a3565b6040516101e891906157fb565b3480156102a657600080fd5b506102116102b5366004614b1f565b610a3a565b3480156102c657600080fd5b506102da6102d53660046149ee565b610a90565b6040516101e891906155cf565b3480156102f357600080fd5b5061026b6103023660046147dc565b610ab8565b34801561031357600080fd5b50610211610322366004614811565b610b85565b34801561033357600080fd5b5061026b6103423660046148ee565b610c75565b34801561035357600080fd5b50610211610362366004614811565b610e2a565b34801561037357600080fd5b506103876103823660046149ee565b610ebe565b6040516101e89190615425565b3480156103a057600080fd5b5061023e6103af3660046148ee565b610f0c565b3480156103c057600080fd5b506102116103cf366004614b1f565b610f21565b3480156103e057600080fd5b5061026b6103ef3660046147ac565b610fcc565b34801561040057600080fd5b5061023e61040f366004614772565b611106565b34801561042057600080fd5b5061021161042f3660046148a5565b611126565b34801561044057600080fd5b5061045461044f3660046147dc565b61128a565b6040516101e891906154f4565b34801561046d57600080fd5b5061023e61047c36600461490c565b61131f565b34801561048d57600080fd5b5061038761133f565b3480156104a257600080fd5b5061023e6104b1366004614993565b61135b565b3480156104c257600080fd5b506102116104d13660046148a5565b6118de565b3480156104e257600080fd5b506102116104f1366004614b1f565b6119f1565b34801561050257600080fd5b5061026b610511366004614b68565b611a6c565b34801561052257600080fd5b5061026b610531366004614754565b611d05565b34801561054257600080fd5b50610556610551366004614a2a565b611f30565b6040516101e8919061580a565b34801561056f57600080fd5b5061026b61057e366004614a2a565b61202a565b34801561058f57600080fd5b506101db61059e366004614772565b6120c6565b3480156105af57600080fd5b506105b86120e3565b6040516101e891906155be565b3480156105d157600080fd5b506102116105e03660046148a5565b61218e565b3480156105f157600080fd5b506101db612263565b34801561060657600080fd5b506102116106153660046148a5565b612269565b34801561062657600080fd5b506103876123db565b34801561063b57600080fd5b5061026b61064a366004614754565b6123f7565b34801561065b57600080fd5b506105b86124a8565b60046020526000908152604090205481565b61067e614386565b600080610689614386565b60005460ff16156106cf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011781558751935091505b81831461076f57610758878381518110151561071957fe5b90602001906020020151878481518110151561073157fe5b90602001906020020151878581518110151561074957fe5b906020019060200201516124df565b9050610764848261257d565b600190910190610701565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055509392505050565b60056020526000908152604090205460ff1681565b73ffffffffffffffffffffffffffffffffffffffff831633146108465761080e848484848080601f0160208091040260200160405190810160405280939291908181526020018383808284375061135b945050505050565b1515610846576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061569d565b5050600091825260076020908152604080842073ffffffffffffffffffffffffffffffffffffffff9093168452919052902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055565b6108ab6143af565b6108b36143de565b6108bb6143de565b6000805460ff16156108f9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905561016080890151610140808a01919091528901519088015261094588611f30565b925061095087611f30565b915061095a6125df565b905061096888848389612611565b61097487838388612611565b61097e88886127a9565b610992888885604001518560400151612809565b8051602081015190519195506109ad918a9186918190612990565b6020808501519081015190516109c99189918591908190612990565b6109e28882856020015186604001518860000151612aa9565b6109fb8782846020015185604001518860200151612aa9565b610a0788888387612b55565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905550949350505050565b610a42614386565b6060610a4f858585612d2d565b9050608081825160208401305af48015610a8657815183526020820151602084015260408201516040840152606082015160608401525b505b509392505050565b600b6020526000908152604090205473ffffffffffffffffffffffffffffffffffffffff1681565b60008054819060ff1615610af8576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011781558151905b808214610b5857610b508382815181101515610b4157fe5b90602001906020020151612eff565b600101610b29565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905550565b610b8d614386565b600080610b98614386565b60005460ff1615610bd5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011781558751935091505b81831461076f57610c5e8783815181101515610c1f57fe5b906020019060200201518784815181101515610c3757fe5b906020019060200201518785815181101515610c4f57fe5b90602001906020020151612f2a565b9050610c6a848261257d565b600190910190610c07565b6000805481908190819060ff1615610cb9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055610cec6125df565b935073ffffffffffffffffffffffffffffffffffffffff84163314610d115733610d14565b60005b73ffffffffffffffffffffffffffffffffffffffff8086166000908152600660209081526040808320938516835292905220549093506001860192509050808211610d8b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061572d565b73ffffffffffffffffffffffffffffffffffffffff80851660008181526006602090815260408083209488168084529490915290819020859055517f82af639571738f4ebd4268fb0363d8957ebe1bbb9e78dba5ebd69eed39b154f090610df3908690615513565b60405180910390a35050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055505050565b610e32614386565b600080610e3d614386565b86519250600091505b818314610eb457610e9d8783815181101515610e5e57fe5b906020019060200201518784815181101515610e7657fe5b906020019060200201518785815181101515610e8e57fe5b90602001906020020151610a3a565b9050610ea9848261257d565b600190910190610e46565b5050509392505050565b7fffffffff0000000000000000000000000000000000000000000000000000000081166000908152600b602052604090205473ffffffffffffffffffffffffffffffffffffffff165b919050565b60096020526000908152604090205460ff1681565b610f29614386565b60005460ff1615610f66576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055610f9c848484612f2a565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055949350505050565b6000805460ff161561100a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905561103d6125df565b73ffffffffffffffffffffffffffffffffffffffff8181166000818152600860209081526040808320948916808452949091529081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00168715151790555192935090917fa8656e308026eeabce8f0bc18048433252318ab80ac79da0b3d3d8697dfba891906110d1908690615505565b60405180910390a35050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905550565b600860209081526000928352604080842090915290825290205460ff1681565b61112e614386565b6060600080600061113d614386565b60005460ff161561117a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117815589518a919081106111b257fe5b906020019060200201516101600151945088519350600092505b828414611255578489848151811015156111e257fe5b906020019060200201516101600181905250611202888760200151612f7d565b915061122e898481518110151561121557fe5b9060200190602002015183898681518110151561074957fe5b905061123a868261257d565b6020860151881161124a57611255565b6001909201916111cc565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055509195945050505050565b606060006060600084519250826040519080825280602002602001820160405280156112d057816020015b6112bd6143de565b8152602001906001900390816112b55790505b509150600090505b808314610a88576112ff85828151811015156112f057fe5b90602001906020020151611f30565b828281518110151561130d57fe5b602090810290910101526001016112d8565b600760209081526000928352604080842090915290825290205460ff1681565b60035473ffffffffffffffffffffffffffffffffffffffff1681565b600080600080600080600080600089511115156113a4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061571d565b6113ad89612fc4565b7f010000000000000000000000000000000000000000000000000000000000000090049650600760ff88161061140f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061563d565b8660ff16600781111561141e57fe5b9550600086600781111561142e57fe5b1415611466576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061570d565b600186600781111561147457fe5b14156114bc578851156114b3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906157dd565b600097506118d0565b60028660078111156114ca57fe5b141561160557885160411461150b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906155dd565b88600081518110151561151a57fe5b01602001517f010000000000000000000000000000000000000000000000000000000000000090819004810204945061155a89600163ffffffff61308816565b935061156d89602163ffffffff61308816565b925060018b86868660405160008152602001604052604051611592949392919061556e565b60206040516020810390808403906000865af11580156115b6573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015173ffffffffffffffffffffffffffffffffffffffff8c811690821614995092506118d09050565b600386600781111561161357fe5b14156117b9578851604114611654576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906155dd565b88600081518110151561166357fe5b01602001517f01000000000000000000000000000000000000000000000000000000000000009081900481020494506116a389600163ffffffff61308816565b93506116b689602163ffffffff61308816565b925060018b60405160200180807f19457468657265756d205369676e6564204d6573736167653a0a333200000000815250601c0182600019166000191681526020019150506040516020818303038152906040526040518082805190602001908083835b6020831061175757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161171a565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040805192909401829003822060008352910192839052611592945092508991899150889061556e565b60048660078111156117c757fe5b14156117df576117d88b8b8b6130d3565b97506118d0565b60058660078111156117ed57fe5b1415611850576117fc89613228565b73ffffffffffffffffffffffffffffffffffffffff808c1660009081526008602090815260408083209385168352929052205490915060ff16151561184457600097506118d0565b6117d8818c8c8c6132a1565b600686600781111561185e57fe5b141561189e5760008b815260076020908152604080832073ffffffffffffffffffffffffffffffffffffffff8e16845290915290205460ff1697506118d0565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061563d565b505050505050509392505050565b6118e6614386565b60606000806000806118f6614386565b89600081518110151561190557fe5b906020019060200201516101400151955089519450600093505b8385146119e457858a8581518110151561193557fe5b6020908102909101015161014001528651611951908a90612f7d565b92506119948a8581518110151561196457fe5b9060200190602002015160a001518b8681518110151561198057fe5b9060200190602002015160800151856133fd565b91506119c08a858151811015156119a757fe5b90602001906020020151838a87815181101515610e8e57fe5b90506119cc878261257d565b865189116119d9576119e4565b60019093019261191f565b5050505050509392505050565b6119f9614386565b60005460ff1615611a36576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055610f9c8484846124df565b600a5460009073ffffffffffffffffffffffffffffffffffffffff1615611abf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b611b02611afd888888888080601f01602080910402602001604051908101604052809392919081815260200183838082843750613453945050505050565b613694565b60008181526009602052604090205490915060ff1615611b4e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061568d565b73ffffffffffffffffffffffffffffffffffffffff86163314611c1f57611ba6818785858080601f0160208091040260200160405190810160405280939291908181526020018383808284375061135b945050505050565b1515611bde576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906157cd565b600a80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff88161790555b6000818152600960205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555130908690869080838380828437820191505092505050600060405180830381855af49150501515611cb6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156bd565b73ffffffffffffffffffffffffffffffffffffffff86163314611cfc57600a80547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555b50505050505050565b6003546000908190819073ffffffffffffffffffffffffffffffffffffffff163314611d5d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061577d565b8392508273ffffffffffffffffffffffffffffffffffffffff1663ae25532e6040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401602060405180830381600087803b158015611dc457600080fd5b505af1158015611dd8573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250611dfc9190810190614a0c565b7fffffffff0000000000000000000000000000000000000000000000000000000081166000908152600b602052604090205490925073ffffffffffffffffffffffffffffffffffffffff1690508015611e81576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061561d565b7fffffffff0000000000000000000000000000000000000000000000000000000082166000908152600b60205260409081902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8616179055517fd2c6b762299c609bdb96520b58a49bfb80186934d4f71a86a367571a15c0319490611f2290849087906155a3565b60405180910390a150505050565b611f386143de565b611f41826136d1565b6020808301829052600091825260049052604090819020549082015260808201511515611f755760015b60ff168152610f07565b60a08201511515611f87576002611f6b565b60a0820151604082015110611f9d576005611f6b565b6101008201514210611fb0576004611f6b565b60208082015160009081526005909152604090205460ff1615611fd4576006611f6b565b610120820151825173ffffffffffffffffffffffffffffffffffffffff90811660009081526006602090815260408083206060880151909416835292905220541115612021576006611f6b565b60038152919050565b60005460ff1615612067576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905561209b81612eff565b50600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055565b600660209081526000928352604080842090915290825290205481565b60018054604080516020600284861615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f810184900484028201840190925281815292918301828280156121865780601f1061215b57610100808354040283529160200191612186565b820191906000526020600020905b81548152906001019060200180831161216957829003601f168201915b505050505081565b612196614386565b606060008060006121a5614386565b8860008151811015156121b457fe5b906020019060200201516101600151945088519350600092505b828414612257578489848151811015156121e457fe5b906020019060200201516101600181905250612204888760200151612f7d565b9150612230898481518110151561221757fe5b90602001906020020151838986815181101515610e8e57fe5b905061223c868261257d565b6020860151881161224c57612257565b6001909201916121ce565b50505050509392505050565b60025481565b612271614386565b6060600080600080612281614386565b60005460ff16156122be576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061576d565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011781558a518b919081106122f657fe5b906020019060200201516101400151955089519450600093505b8385146123a557858a8581518110151561232657fe5b6020908102909101015161014001528651612342908a90612f7d565b92506123558a8581518110151561196457fe5b91506123818a8581518110151561236857fe5b90602001906020020151838a8781518110151561074957fe5b905061238d878261257d565b8651891161239a576123a5565b600190930192612310565b5050600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016905550929695505050505050565b600a5473ffffffffffffffffffffffffffffffffffffffff1681565b60035473ffffffffffffffffffffffffffffffffffffffff163314612448576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061577d565b73ffffffffffffffffffffffffffffffffffffffff8116156124a557600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83161790555b50565b60408051808201909152600581527f322e302e30000000000000000000000000000000000000000000000000000000602082015281565b6124e7614386565b6124ef6143de565b60008060006124fd88611f30565b93506125076125df565b925061251588858589612611565b6125278860a001518560400151612f7d565b915061253387836136df565b9050612546888589848960000151612990565b61255088826136f5565b945061256788848660200151876040015189612aa9565b612572888487613756565b505050509392505050565b8151815161258b9190613864565b8252602080830151908201516125a19190613864565b6020830152604080830151908201516125ba9190613864565b6040830152606080830151908201516125d39190613864565b60609092019190915250565b600a5460009073ffffffffffffffffffffffffffffffffffffffff16818115612608578161260a565b335b9392505050565b825160ff1660031461264f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061579d565b606084015173ffffffffffffffffffffffffffffffffffffffff16156126c257606084015173ffffffffffffffffffffffffffffffffffffffff1633146126c2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906157ad565b602084015173ffffffffffffffffffffffffffffffffffffffff161561274d578173ffffffffffffffffffffffffffffffffffffffff16846020015173ffffffffffffffffffffffffffffffffffffffff1614151561274d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906155ed565b604083015115156127a35761276b836020015185600001518361135b565b15156127a3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061565d565b50505050565b6127bb8260a001518260a001516138ae565b6127cd836080015183608001516138ae565b1015612805576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906157bd565b5050565b6128116143af565b6000806000806128258960a0015188612f7d565b935061283a89608001518a60a0015186613909565b925061284a8860a0015187612f7d565b915061285f88608001518960a0015184613909565b90508084106128a25760208086018051839052805182018490525151865182015260808a015160a08b015187519092015161289a9290613909565b8551526128df565b845183905284516020908101859052855181015190860180519190915260a089015160808a01519151516128d69290613986565b60208087015101525b84515160208087015101516128f49190612f7d565b604086015284515160808a015160c08b0151612911929190613909565b85516040015284516020015160a08a015160e08b0151612932929190613909565b855160600152602085015151608089015160c08a0151612953929190613909565b8560200151604001818152505061297b8560200151602001518960a001518a60e00151613909565b60208601516060015250505050949350505050565b8215156129c9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156dd565b82821115612a03576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156cd565b8460a00151612a16856040015184613864565b1115612a4e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906155fd565b612a5c8560800151836138ae565b612a6a828760a001516138ae565b1115612aa2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061575d565b5050505050565b612ab7828260200151613864565b600084815260046020908152604091829020929092558681015187518451938501518584015160608701516101408c01516101608d015196518b9873ffffffffffffffffffffffffffffffffffffffff9788169897909616967f0bcc4c97732e47d9946f229edb95f5b6323f601300e4690de719993f3c37112996612b46968f96339692959194909390615433565b60405180910390a45050505050565b60018054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101008789161502019095169490940493840181900481028201810190925282815260609390929091830182828015612bfe5780601f10612bd357610100808354040283529160200191612bfe565b820191906000526020600020905b815481529060010190602001808311612be157829003601f168201915b50505050509050612c2685610140015186600001518660000151856020015160200151613a23565b61014084015184518651845160200151612c4293929190613a23565b612c5b8561014001518660000151858560400151613a23565b612c778186600001518760400151856000015160400151613a23565b612c938185600001518660400151856020015160400151613a23565b836040015173ffffffffffffffffffffffffffffffffffffffff16856040015173ffffffffffffffffffffffffffffffffffffffff161415612cfd57612cf881848760400151612cf3866000015160600151876020015160600151613864565b613a23565b612aa2565b612d1581848760400151856000015160600151613a23565b612aa281848660400151856020015160600151613a23565b604080517fb4be83d5000000000000000000000000000000000000000000000000000000006020808301919091526060602483018181528751608485019081528884015160a48601529488015160c48501529087015160e4840152608087015161010484015260a087015161012484015260c087015161014484015260e08701516101648401526101008701516101848401526101208701516101a4840152610140870180516101c485019081526101608901516101e4860152610180905251805161020485018190529394919384936044870192849261022489019291820191601f82010460005b81811015612e34578351855260209485019490930192600101612e16565b50505050818103610160808401919091528a0151805180835260209283019291820191601f82010460005b81811015612e7d578351855260209485019490930192600101612e5f565b50505089845250848103602093840190815288518083529093918201918981019190601f82010460005b81811015612ec5578351855260209485019490930192600101612ea7565b5050507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08883030188525060405250505050509392505050565b612f076143de565b612f1082611f30565b9050612f1c8282613bed565b612805828260200151613d04565b612f32614386565b612f3d8484846124df565b6020810151909150831461260a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061574d565b600082821115612fb9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061560d565b508082035b92915050565b6000808251111515613002576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156fd565b815182907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff810190811061303257fe5b016020015182517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01909252507f0100000000000000000000000000000000000000000000000000000000000000908190040290565b6000816020018351101515156130ca576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061562d565b50016020015190565b6040516000906060907f1626ba7e000000000000000000000000000000000000000000000000000000009061310e908790869060240161554e565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052602080820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909416939093178352815191935090829081885afa8080156131ab576001811461321c57612572565b7f08c379a0000000000000000000000000000000000000000000000000000000006000527c20000000000000000000000000000000000000000000000000000000006020527c0c57414c4c45545f4552524f5200000000000000000000000000000000604052600060605260646000fd5b50505195945050505050565b60006014825110151515613268576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061578d565b613276826014845103613dab565b82517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec019092525090565b6040516000906060907f9363470200000000000000000000000000000000000000000000000000000000906132de90879087908790602401615521565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152919052602080820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931783528151919350908290818a5afa80801561337b57600181146133ec576133f1565b7f08c379a0000000000000000000000000000000000000000000000000000000006000527c20000000000000000000000000000000000000000000000000000000006020527c0f56414c494441544f525f4552524f5200000000000000000000000000604052600060605260646000fd5b825194505b50505050949350505050565b6000808311613438576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061564d565b61344b61344585846138ae565b84613e0c565b949350505050565b604080517f5a65726f45785472616e73616374696f6e2800000000000000000000000000006020808301919091527f75696e743235362073616c742c0000000000000000000000000000000000000060328301527f61646472657373207369676e6572416464726573732c00000000000000000000603f8301527f627974657320646174610000000000000000000000000000000000000000000060558301527f2900000000000000000000000000000000000000000000000000000000000000605f830152825180830384018152606090920192839052815160009384938493909282918401908083835b6020831061357c57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161353f565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909216911617905260405191909301819003812089519097508995509093508392850191508083835b6020831061361257805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016135d5565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040805192909401829003822097825281019a909a525073ffffffffffffffffffffffffffffffffffffffff97909716968801969096525050606085015250506080909120919050565b600280546040517f190100000000000000000000000000000000000000000000000000000000000081529182015260228101919091526042902090565b6000612fbe611afd83613e23565b60008183106136ee578161260a565b5090919050565b6136fd614386565b6020810182905260a08301516080840151613719918491613909565b808252608084015160c0850151613731929190613909565b604082015260a083015160e084015161374b918491613909565b606082015292915050565b60018054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff61010087891615020190951694909404938401819004810282018101909252828152606093909290918301828280156137ff5780601f106137d4576101008083540402835291602001916137ff565b820191906000526020600020905b8154815290600101906020018083116137e257829003601f168201915b5050505050905061381f8461014001518560000151858560000151613a23565b6138388461016001518486600001518560200151613a23565b61385081856000015186604001518560400151613a23565b6127a3818486604001518560600151613a23565b6000828201838110156138a3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061567d565b8091505b5092915050565b6000808315156138c157600091506138a7565b508282028284828115156138d157fe5b04146138a3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061567d565b6000808311613944576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061564d565b61394f84848461427c565b15613438576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156ad565b60008083116139c1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061564d565b6139cc848484614301565b15613a03576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156ad565b61344b613445613a1386856138ae565b613a1e866001612f7d565b613864565b600080600083118015613a6257508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff1614155b15613be5578551600310613aa2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061573d565b50506020848101517fffffffff00000000000000000000000000000000000000000000000000000000166000818152600b90925260409091205473ffffffffffffffffffffffffffffffffffffffff16801515613b2b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906156ed565b604051660fffffffffffe0603f885101168060840182017fa85e59e40000000000000000000000000000000000000000000000000000000083526080600484015273ffffffffffffffffffffffffffffffffffffffff8816602484015273ffffffffffffffffffffffffffffffffffffffff87166044840152856064840152608483015b81811015613bc757895181526020998a019901613baf565b61020084858403866000895af1801515613bdf573d85fd5b50505050505b505050505050565b805160009060ff16600314613c2e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061579d565b606083015173ffffffffffffffffffffffffffffffffffffffff1615613ca157606083015173ffffffffffffffffffffffffffffffffffffffff163314613ca1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c6906157ad565b613ca96125df565b835190915073ffffffffffffffffffffffffffffffffffffffff808316911614613cff576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061566d565b505050565b6000818152600560205260409081902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790558281015183516101408501516101608601519351859473ffffffffffffffffffffffffffffffffffffffff9485169493909316927fdc47b3613d9fe400085f6dbdc99453462279057e6207385042827ed6b1a62cf792613d9f923392906154b7565b60405180910390a45050565b600081601401835110151515613ded576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061578d565b50016014015173ffffffffffffffffffffffffffffffffffffffff1690565b6000808284811515613e1a57fe5b04949350505050565b604080517f4f726465722800000000000000000000000000000000000000000000000000006020808301919091527f61646472657373206d616b6572416464726573732c000000000000000000000060268301527f616464726573732074616b6572416464726573732c0000000000000000000000603b8301527f6164647265737320666565526563697069656e74416464726573732c0000000060508301527f616464726573732073656e646572416464726573732c00000000000000000000606c8301527f75696e74323536206d616b65724173736574416d6f756e742c0000000000000060828301527f75696e743235362074616b65724173736574416d6f756e742c00000000000000609b8301527f75696e74323536206d616b65724665652c00000000000000000000000000000060b48301527f75696e743235362074616b65724665652c00000000000000000000000000000060c58301527f75696e743235362065787069726174696f6e54696d655365636f6e64732c000060d68301527f75696e743235362073616c742c0000000000000000000000000000000000000060f48301527f6279746573206d616b65724173736574446174612c00000000000000000000006101018301527f62797465732074616b65724173736574446174610000000000000000000000006101168301527f290000000000000000000000000000000000000000000000000000000000000061012a830152825161010b81840301815261012b90920192839052815160009384938493849391929182918401908083835b602083106140ab57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161406e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930181900381206101408b0151805191995095509093508392850191508083835b6020831061414657805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101614109565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930181900381206101608b0151805191985095509093508392850191508083835b602083106141e157805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016141a4565b5181516020939093036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff018019909116921691909117905260405192018290039091207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0890180516101408b018051610160909c0180519a84529881529288526101a0822091529890525050509190525090919050565b6000808084116142b8576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061564d565b8215806142c3575084155b156142d15760009150610a88565b838015156142db57fe5b85840990506142ea85846138ae565b6142f66103e8836138ae565b101595945050505050565b60008080841161433d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c69061564d565b821580614348575084155b156143565760009150610a88565b8380151561436057fe5b8584099050836143708583612f7d565b81151561437957fe5b0690506142ea85846138ae565b608060405190810160405280600081526020016000815260200160008152602001600081525090565b610120604051908101604052806143c4614386565b81526020016143d1614386565b8152602001600081525090565b604080516060810182526000808252602082018190529181019190915290565b600061260a82356158b0565b6000601f8201831361441b57600080fd5b813561442e6144298261583f565b615818565b81815260209384019390925082018360005b8381101561446c578135860161445688826145bc565b8452506020928301929190910190600101614440565b5050505092915050565b6000601f8201831361448757600080fd5b81356144956144298261583f565b81815260209384019390925082018360005b8381101561446c57813586016144bd888261460b565b84525060209283019291909101906001016144a7565b6000601f820183136144e457600080fd5b81356144f26144298261583f565b9150818183526020840193506020810190508385602084028201111561451757600080fd5b60005b8381101561446c578161452d888261454f565b845250602092830192919091019060010161451a565b600061260a82356158c9565b600061260a82356158ce565b600061260a82356158d1565b600061260a82516158d1565b600080601f8301841361458557600080fd5b50813567ffffffffffffffff81111561459d57600080fd5b6020830191508360018202830111156145b557600080fd5b9250929050565b6000601f820183136145cd57600080fd5b81356145db61442982615860565b915080825260208301602083018583830111156145f757600080fd5b614602838284615907565b50505092915050565b6000610180828403121561461e57600080fd5b614629610180615818565b9050600061463784846143fe565b8252506020614648848483016143fe565b602083015250604061465c848285016143fe565b6040830152506060614670848285016143fe565b60608301525060806146848482850161454f565b60808301525060a06146988482850161454f565b60a08301525060c06146ac8482850161454f565b60c08301525060e06146c08482850161454f565b60e0830152506101006146d58482850161454f565b610100830152506101206146eb8482850161454f565b6101208301525061014082013567ffffffffffffffff81111561470d57600080fd5b614719848285016145bc565b6101408301525061016082013567ffffffffffffffff81111561473b57600080fd5b614747848285016145bc565b6101608301525092915050565b60006020828403121561476657600080fd5b600061344b84846143fe565b6000806040838503121561478557600080fd5b600061479185856143fe565b92505060206147a2858286016143fe565b9150509250929050565b600080604083850312156147bf57600080fd5b60006147cb85856143fe565b92505060206147a285828601614543565b6000602082840312156147ee57600080fd5b813567ffffffffffffffff81111561480557600080fd5b61344b84828501614476565b60008060006060848603121561482657600080fd5b833567ffffffffffffffff81111561483d57600080fd5b61484986828701614476565b935050602084013567ffffffffffffffff81111561486657600080fd5b614872868287016144d3565b925050604084013567ffffffffffffffff81111561488f57600080fd5b61489b8682870161440a565b9150509250925092565b6000806000606084860312156148ba57600080fd5b833567ffffffffffffffff8111156148d157600080fd5b6148dd86828701614476565b93505060206148728682870161454f565b60006020828403121561490057600080fd5b600061344b848461454f565b6000806040838503121561491f57600080fd5b6000614791858561454f565b6000806000806060858703121561494157600080fd5b600061494d878761454f565b945050602061495e878288016143fe565b935050604085013567ffffffffffffffff81111561497b57600080fd5b61498787828801614573565b95989497509550505050565b6000806000606084860312156149a857600080fd5b60006149b4868661454f565b93505060206149c5868287016143fe565b925050604084013567ffffffffffffffff8111156149e257600080fd5b61489b868287016145bc565b600060208284031215614a0057600080fd5b600061344b848461455b565b600060208284031215614a1e57600080fd5b600061344b8484614567565b600060208284031215614a3c57600080fd5b813567ffffffffffffffff811115614a5357600080fd5b61344b8482850161460b565b60008060008060808587031215614a7557600080fd5b843567ffffffffffffffff811115614a8c57600080fd5b614a988782880161460b565b945050602085013567ffffffffffffffff811115614ab557600080fd5b614ac18782880161460b565b935050604085013567ffffffffffffffff811115614ade57600080fd5b614aea878288016145bc565b925050606085013567ffffffffffffffff811115614b0757600080fd5b614b13878288016145bc565b91505092959194509250565b600080600060608486031215614b3457600080fd5b833567ffffffffffffffff811115614b4b57600080fd5b614b578682870161460b565b93505060206149c58682870161454f565b60008060008060008060808789031215614b8157600080fd5b6000614b8d898961454f565b9650506020614b9e89828a016143fe565b955050604087013567ffffffffffffffff811115614bbb57600080fd5b614bc789828a01614573565b9450945050606087013567ffffffffffffffff811115614be657600080fd5b614bf289828a01614573565b92509250509295509295509295565b614c0a816158b0565b82525050565b6000614c1b826158ac565b808452602084019350614c2d836158a6565b60005b82811015614c5d57614c438683516153e5565b614c4c826158a6565b606096909601959150600101614c30565b5093949350505050565b614c0a816158c9565b614c0a816158ce565b614c0a816158d1565b6000614c8d826158ac565b808452614ca1816020860160208601615913565b614caa8161593f565b9093016020019392505050565b614c0a816158fc565b601281527f4c454e4754485f36355f52455155495245440000000000000000000000000000602082015260400190565b600d81527f494e56414c49445f54414b455200000000000000000000000000000000000000602082015260400190565b600e81527f4f524445525f4f56455246494c4c000000000000000000000000000000000000602082015260400190565b601181527f55494e543235365f554e444552464c4f57000000000000000000000000000000602082015260400190565b601a81527f41535345545f50524f58595f414c52454144595f455849535453000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f33325f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601581527f5349474e41545552455f554e535550504f525445440000000000000000000000602082015260400190565b601081527f4449564953494f4e5f42595f5a45524f00000000000000000000000000000000602082015260400190565b601781527f494e56414c49445f4f524445525f5349474e4154555245000000000000000000602082015260400190565b600d81527f494e56414c49445f4d414b455200000000000000000000000000000000000000602082015260400190565b601081527f55494e543235365f4f564552464c4f5700000000000000000000000000000000602082015260400190565b600f81527f494e56414c49445f54585f484153480000000000000000000000000000000000602082015260400190565b601181527f494e56414c49445f5349474e4154555245000000000000000000000000000000602082015260400190565b600e81527f524f554e44494e475f4552524f52000000000000000000000000000000000000602082015260400190565b601081527f4641494c45445f455845435554494f4e00000000000000000000000000000000602082015260400190565b600d81527f54414b45525f4f56455250415900000000000000000000000000000000000000602082015260400190565b601481527f494e56414c49445f54414b45525f414d4f554e54000000000000000000000000602082015260400190565b601a81527f41535345545f50524f58595f444f45535f4e4f545f4558495354000000000000602082015260400190565b602181527f475245415445525f5448414e5f5a45524f5f4c454e4754485f5245515549524560208201527f4400000000000000000000000000000000000000000000000000000000000000604082015260600190565b601181527f5349474e41545552455f494c4c4547414c000000000000000000000000000000602082015260400190565b601e81527f4c454e4754485f475245415445525f5448414e5f305f52455155495245440000602082015260400190565b601781527f494e56414c49445f4e45575f4f524445525f45504f4348000000000000000000602082015260400190565b601e81527f4c454e4754485f475245415445525f5448414e5f335f52455155495245440000602082015260400190565b601481527f434f4d504c4554455f46494c4c5f4641494c4544000000000000000000000000602082015260400190565b601281527f494e56414c49445f46494c4c5f50524943450000000000000000000000000000602082015260400190565b601281527f5245454e5452414e43595f494c4c4547414c0000000000000000000000000000602082015260400190565b601381527f4f4e4c595f434f4e54524143545f4f574e455200000000000000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601081527f4f524445525f554e46494c4c41424c4500000000000000000000000000000000602082015260400190565b600e81527f494e56414c49445f53454e444552000000000000000000000000000000000000602082015260400190565b601881527f4e454741544956455f5350524541445f52455155495245440000000000000000602082015260400190565b601481527f494e56414c49445f54585f5349474e4154555245000000000000000000000000602082015260400190565b601181527f4c454e4754485f305f5245515549524544000000000000000000000000000000602082015260400190565b805160808301906153738482614c70565b5060208201516153866020850182614c70565b5060408201516153996040850182614c70565b5060608201516127a36060850182614c70565b80516101208301906153be8482615362565b5060208201516153d16080850182615362565b5060408201516127a3610100850182614c70565b805160608301906153f6848261541c565b5060208201516154096020850182614c70565b5060408201516127a36040850182614c70565b614c0a816158f6565b60208101612fbe8284614c01565b6101008101615442828b614c01565b61544f602083018a614c01565b61545c6040830189614c70565b6154696060830188614c70565b6154766080830187614c70565b61548360a0830186614c70565b81810360c08301526154958185614c82565b905081810360e08301526154a98184614c82565b9a9950505050505050505050565b606081016154c58286614c01565b81810360208301526154d78185614c82565b905081810360408301526154eb8184614c82565b95945050505050565b6020808252810161260a8184614c10565b60208101612fbe8284614c67565b60208101612fbe8284614c70565b6060810161552f8286614c70565b61553c6020830185614c01565b81810360408301526154eb8184614c82565b6040810161555c8285614c70565b818103602083015261344b8184614c82565b6080810161557c8287614c70565b615589602083018661541c565b6155966040830185614c70565b6154eb6060830184614c70565b604081016155b18285614c79565b61260a6020830184614c01565b6020808252810161260a8184614c82565b60208101612fbe8284614cb7565b60208082528101612fbe81614cc0565b60208082528101612fbe81614cf0565b60208082528101612fbe81614d20565b60208082528101612fbe81614d50565b60208082528101612fbe81614d80565b60208082528101612fbe81614db0565b60208082528101612fbe81614e06565b60208082528101612fbe81614e36565b60208082528101612fbe81614e66565b60208082528101612fbe81614e96565b60208082528101612fbe81614ec6565b60208082528101612fbe81614ef6565b60208082528101612fbe81614f26565b60208082528101612fbe81614f56565b60208082528101612fbe81614f86565b60208082528101612fbe81614fb6565b60208082528101612fbe81614fe6565b60208082528101612fbe81615016565b60208082528101612fbe81615046565b60208082528101612fbe8161509c565b60208082528101612fbe816150cc565b60208082528101612fbe816150fc565b60208082528101612fbe8161512c565b60208082528101612fbe8161515c565b60208082528101612fbe8161518c565b60208082528101612fbe816151bc565b60208082528101612fbe816151ec565b60208082528101612fbe8161521c565b60208082528101612fbe81615272565b60208082528101612fbe816152a2565b60208082528101612fbe816152d2565b60208082528101612fbe81615302565b60208082528101612fbe81615332565b60808101612fbe8284615362565b6101208101612fbe82846153ac565b60608101612fbe82846153e5565b60405181810167ffffffffffffffff8111828210171561583757600080fd5b604052919050565b600067ffffffffffffffff82111561585657600080fd5b5060209081020190565b600067ffffffffffffffff82111561587757600080fd5b506020601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160190565b60200190565b5190565b73ffffffffffffffffffffffffffffffffffffffff1690565b151590565b90565b7fffffffff000000000000000000000000000000000000000000000000000000001690565b60ff1690565b6000612fbe826158b0565b82818337506000910152565b60005b8381101561592e578181015183820152602001615916565b838111156127a35750506000910152565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016905600a265627a7a72305820d41ee66f45c4d1637cb6e5f109447c6d5d7fef3204a685dc442151c0f029b7da6c6578706572696d656e74616cf50037", + "storage": { + "0x1458d05345aa0372fb580f207529f32cbb6e9242890d36a93225785d4496083e": "0x0000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48" + } + }, + "0x5409ed021d9299bf6814279a6a1411a7e866a631": { + "balance": "0xac6bd1cc338c2000", + "nonce": "22", + "code": "0x", + "storage": {} + }, + "0x871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c": { + "balance": "0x0", + "nonce": "1", + "code": "0x606060405236156100965763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166306fdde038114610098578063095ea7b31461014657806318160ddd1461018657806323b872dd146101a8578063313ce567146101ee57806370a082311461021457806395d89b411461024f578063a9059cbb146102fd578063dd62ed3e1461033d575bfe5b34156100a057fe5b6100a861037e565b60408051602080825283518183015283519192839290830191850190808383821561010c575b80518252602083111561010c577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016100ce565b505050905090810190601f1680156101385780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561014e57fe5b61017273ffffffffffffffffffffffffffffffffffffffff600435166024356103b5565b604080519115158252519081900360200190f35b341561018e57fe5b61019661042d565b60408051918252519081900360200190f35b34156101b057fe5b61017273ffffffffffffffffffffffffffffffffffffffff60043581169060243516604435610433565b604080519115158252519081900360200190f35b34156101f657fe5b6101fe6105d4565b6040805160ff9092168252519081900360200190f35b341561021c57fe5b61019673ffffffffffffffffffffffffffffffffffffffff600435166105d9565b60408051918252519081900360200190f35b341561025757fe5b6100a8610605565b60408051602080825283518183015283519192839290830191850190808383821561010c575b80518252602083111561010c577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016100ce565b505050905090810190601f1680156101385780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561030557fe5b61017273ffffffffffffffffffffffffffffffffffffffff6004351660243561063c565b604080519115158252519081900360200190f35b341561034557fe5b61019673ffffffffffffffffffffffffffffffffffffffff60043581169060243516610727565b60408051918252519081900360200190f35b60408051808201909152601181527f30782050726f746f636f6c20546f6b656e000000000000000000000000000000602082015281565b73ffffffffffffffffffffffffffffffffffffffff338116600081815260016020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b60035481565b73ffffffffffffffffffffffffffffffffffffffff808416600081815260016020908152604080832033909516835293815283822054928252819052918220548390108015906104835750828110155b80156104b6575073ffffffffffffffffffffffffffffffffffffffff841660009081526020819052604090205483810110155b156105c65773ffffffffffffffffffffffffffffffffffffffff808516600090815260208190526040808220805487019055918716815220805484900390557fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8110156105585773ffffffffffffffffffffffffffffffffffffffff808616600090815260016020908152604080832033909416835292905220805484900390555b8373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a3600191506105cb565b600091505b5b509392505050565b601281565b73ffffffffffffffffffffffffffffffffffffffff81166000908152602081905260409020545b919050565b60408051808201909152600381527f5a52580000000000000000000000000000000000000000000000000000000000602082015281565b73ffffffffffffffffffffffffffffffffffffffff3316600090815260208190526040812054829010801590610699575073ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205482810110155b156107185773ffffffffffffffffffffffffffffffffffffffff33811660008181526020818152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a3506001610427565b506000610427565b5b92915050565b73ffffffffffffffffffffffffffffffffffffffff8083166000908152600160209081526040808320938516835292905220545b929150505600a165627a7a723058201b5b70cf82a73dec658c2e60ab9a0f8e2ba01a74b66a6f5b0402f56d2ea0ffcf0029", + "storage": { + "0xd37b858806ebf992fe75c1dd1a61cc7625ea52328d19005ba6b8b62506ae5306": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + } + }, + "config": { + "chainId": 5, + "supportedProtocolVersions": [ + 67, + 66 + ], + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 1561651, + "berlinBlock": 4460644, + "londonBlock": 5062605, + "terminalTotalDifficulty": 10790000, + "terminalTotalDifficultyPassed": true, + "clique": { + "period": 15, + "epoch": 30000 + }, + "trustedCheckpoint": { + "sectionIndex": 210, + "sectionHead": "0xbb11eaf551a6c06f74a6c7bbfe1699cbf64b8f248b64691da916dd443176db2f", + "chtRoot": "0x9934ae326d00d9c7de2e074c0e51689efb7fa7fcba18929ff4279c27259c45e6", + "bloomRoot": "0x7fe3bd4fd45194aa8a5cfe5ac590edff1f870d3d98d3c310494e7f67613a87ff" + }, + "trustedCheckpointOracle": { + "address": "0x18ca0e045f0d772a851bc7e48357bcaab0a0795d", + "signers": [ + "0x4769bcad07e3b938b7f43eb7d278bc7cb9effb38", + "0x78d1ad571a1a09d60d9bbf25894b44e4c8859595", + "0x286834935f4a8cfb4ff4c77d5770c2775ae2b0e7", + "0xb86e2b0ab5a4b1373e40c51a7c712c70ba2f9f8e", + "0x0df8fa387c602ae62559cc4afa4972a7045d6707" + ], + "threshold": 2 + } + } + }, + "context": { + "number": "13536", + "difficulty": "1", + "timestamp": "1549153018", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "transactionHash": "0x6974f745a004f030bebb1c01d4595edbda2fafcf01c0bfbd5d335711e2a7b04e" + }, + "input": "0xf92e9e1684ee6b2800832c8c7f8080b92e4c60806040523480156200001157600080fd5b5060405162002d2c38038062002d2c83398101806040526200003791908101906200051d565b6000805433600160a060020a031991821617825560018054909116600160a060020a0386161790558251849084908490849081906200007e906004906020870190620003d0565b50825162000094906005906020860190620003d0565b50620000b0836010640100000000620019476200036f82021704565b9150620000cd846010640100000000620019476200036f82021704565b60028054600160a060020a03948516600160a060020a031991821617909155600380549285169290911691909117905550600154604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130181207f6070410800000000000000000000000000000000000000000000000000000000825291909216945063607041089350620001739250906004016200068e565b602060405180830381600087803b1580156200018e57600080fd5b505af1158015620001a3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620001c99190810190620004f4565b9050600160a060020a038116151562000219576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200021090620006b0565b60405180910390fd5b6002546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b39062000268908490600019906004016200066f565b602060405180830381600087803b1580156200028357600080fd5b505af115801562000298573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620002be9190810190620005a1565b506003546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b3906200030e908490600019906004016200066f565b602060405180830381600087803b1580156200032957600080fd5b505af11580156200033e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620003649190810190620005a1565b50505050506200077a565b600081601401835110151515620003b4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000210906200069e565b506014818301810151910190600160a060020a03165b92915050565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200041357805160ff191683800117855562000443565b8280016001018555821562000443579182015b828111156200044357825182559160200191906001019062000426565b506200045192915062000455565b5090565b6200047291905b808211156200045157600081556001016200045c565b90565b600062000483825162000711565b9392505050565b600062000483825162000742565b6000601f82018313620004aa57600080fd5b8151620004c1620004bb82620006e9565b620006c2565b91508082526020830160208301858383011115620004de57600080fd5b620004eb83828462000747565b50505092915050565b6000602082840312156200050757600080fd5b600062000515848462000475565b949350505050565b6000806000606084860312156200053357600080fd5b600062000541868662000475565b93505060208401516001604060020a038111156200055e57600080fd5b6200056c8682870162000498565b92505060408401516001604060020a038111156200058957600080fd5b620005978682870162000498565b9150509250925092565b600060208284031215620005b457600080fd5b60006200051584846200048a565b620005cd8162000711565b82525050565b620005cd816200071d565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601881527f554e524547495354455245445f41535345545f50524f58590000000000000000602082015260400190565b620005cd8162000472565b604081016200067f8285620005c2565b62000483602083018462000664565b60208101620003ca8284620005d3565b60208082528101620003ca81620005de565b60208082528101620003ca8162000634565b6040518181016001604060020a0381118282101715620006e157600080fd5b604052919050565b60006001604060020a038211156200070057600080fd5b506020601f91909101601f19160190565b600160a060020a031690565b7fffffffff000000000000000000000000000000000000000000000000000000001690565b151590565b60005b83811015620007645781810151838201526020016200074a565b8381111562000774576000848401525b50505050565b6125a2806200078a6000396000f30060806040526004361061006c5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166318978e8281146100c8578063630f1e6c146100f25780638da5cb5b146101125780639395525c14610134578063f2fde38b14610147575b60025473ffffffffffffffffffffffffffffffffffffffff1633146100c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612388565b60405180910390fd5b005b6100db6100d6366004611df1565b610167565b6040516100e9929190612488565b60405180910390f35b3480156100fe57600080fd5b506100c661010d366004611eec565b6102f7565b34801561011e57600080fd5b50610127610388565b6040516100e99190612337565b6100db610142366004611d0b565b6103a4565b34801561015357600080fd5b506100c6610162366004611ce5565b61050a565b61016f6119fa565b6101776119fa565b6000806101826105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff610100600188161502019095169490940493840181900481028201810190925282815261025c939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b820191906000526020600020905b81548152906001019060200180831161021057829003601f168201915b50505050508c600081518110151561024157fe5b6020908102909101015161014001519063ffffffff61069616565b156102875761026c8b8b8b6107c3565b935061028084600001518560600151610ac1565b90506102ae565b6102928b8b8b610b03565b9350836060015191506102a68883896107c3565b845190935090505b6102c2846020015184602001518888610d15565b6102e98b60008151811015156102d457fe5b90602001906020020151610140015182610f29565b505097509795505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610348576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b61038383838080601f01602080910402602001604051908101604052809392919081815260200183838082843750879450610f299350505050565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b6103ac6119fa565b6103b46119fa565b60008060006103c16105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152610441939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b156104925761046a670de0b6b3a7640000610464670de0b6b3a76400008a611045565b3461108f565b92506104778b848c6110e7565b945061048b85600001518660600151610ac1565b90506104d6565b6104ad670d2f13f7789f0000670de0b6b3a76400003461108f565b92506104ba8b848c6110e7565b9450846060015191506104ce89838a6107c3565b855190945090505b6104ea856020015185602001518989610d15565b6104fc8b60008151811015156102d457fe5b505050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461055b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b73ffffffffffffffffffffffffffffffffffffffff8116156105b857600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83161790555b50565b600034116105f5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612398565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d0e30db0346040518263ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004016000604051808303818588803b15801561067b57600080fd5b505af115801561068f573d6000803e3d6000fd5b5050505050565b6000815183511480156107ba5750816040518082805190602001908083835b602083106106f257805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016106b5565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0180199092169116179052604051919093018190038120885190955088945090928392508401908083835b6020831061078757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161074a565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051809103902060001916145b90505b92915050565b6107cb6119fa565b60608060008060008060006107de6119fa565b8a15156107ea57610ab2565b6004805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561088e5780601f106108635761010080835404028352916020019161088e565b820191906000526020600020905b81548152906001019060200180831161087157829003601f168201915b505060058054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152969e509194509250840190508282801561093d5780601f106109125761010080835404028352916020019161093d565b820191906000526020600020905b81548152906001019060200180831161092057829003601f168201915b50505050509650600095508b519450600093505b838514610a7857878c8581518110151561096757fe5b6020908102909101015161014001528b5187908d908690811061098657fe5b60209081029091010151610160015261099f8b87610ac1565b9250610a068c858151811015156109b257fe5b9060200190602002015160a00151610a008e878151811015156109d157fe5b90602001906020020151608001518f888151811015156109ed57fe5b9060200190602002015160e00151610ac1565b8561128b565b9150610a418c85815181101515610a1957fe5b90602001906020020151838c87815181101515610a3257fe5b906020019060200201516112e6565b9050610a4d898261135e565b610a5f89600001518a60600151610ac1565b95508a8610610a6d57610a78565b600190930192610951565b8a861015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b50505050505050509392505050565b600082821115610afd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123b8565b50900390565b610b0b6119fa565b606080600080600080610b1c6119fa565b60008b6000815181101515610b2d57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929b5092909190830182828015610be55780601f10610bba57610100808354040283529160200191610be5565b820191906000526020600020905b815481529060010190602001808311610bc857829003601f168201915b505050505096508b519550600094505b848614610cdb57878c86815181101515610c0b57fe5b6020908102909101015161014001528b5187908d9087908110610c2a57fe5b6020908102909101015161016001528851610c46908c90610ac1565b9350610c898c86815181101515610c5957fe5b9060200190602002015160a001518d87815181101515610c7557fe5b90602001906020020151608001518661128b565b9250610cb58c86815181101515610c9c57fe5b90602001906020020151848c88815181101515610a3257fe5b9150610cc1898361135e565b5087518a8110610cd057610cdb565b600190940193610bf5565b8a811015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b600080808066b1a2bc2ec50000861115610d5b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612448565b610d658888611045565b935034841115610da1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123a8565b610dab3485610ac1565b9250610dc086670de0b6b3a76400008a61108f565b915082821115610dfc576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612428565b6000831115610f1f576002546040517f2e1a7d4d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690632e1a7d4d90610e5b9086906004016124a4565b600060405180830381600087803b158015610e7557600080fd5b505af1158015610e89573d6000803e3d6000fd5b505050506000821115610edb5760405173ffffffffffffffffffffffffffffffffffffffff86169083156108fc029084906000818181858888f19350505050158015610ed9573d6000803e3d6000fd5b505b610ee58383610ac1565b90506000811115610f1f57604051339082156108fc029083906000818181858888f19350505050158015610f1d573d6000803e3d6000fd5b505b5050505050505050565b6000610f3b838263ffffffff6113c016565b604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130190209091507fffffffff0000000000000000000000000000000000000000000000000000000080831691161415610fab57610fa6838361142d565b610383565b604080517f455243373231546f6b656e28616464726573732c75696e7432353629000000008152905190819003601c0190207fffffffff000000000000000000000000000000000000000000000000000000008281169116141561101357610fa6838361161b565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123f8565b600082820183811015611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b8091505b5092915050565b60008083116110ca576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d78584611703565b8461175e565b90505b9392505050565b6110ef6119fa565b60608060008060006110ff6119fa565b89600081518110151561110e57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929950929091908301828280156111c65780601f1061119b576101008083540402835291602001916111c6565b820191906000526020600020905b8154815290600101906020018083116111a957829003601f168201915b5050505050945089519350600092505b82841461127e57858a848151811015156111ec57fe5b602090810290910101516101400152895185908b908590811061120b57fe5b90602001906020020151610160018190525061122b898860200151610ac1565b91506112578a8481518110151561123e57fe5b90602001906020020151838a86815181101515610a3257fe5b9050611263878261135e565b602087015189116112735761127e565b6001909201916111d6565b5050505050509392505050565b60008083116112c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d76112d68685611703565b6112e1866001610ac1565b611045565b6112ee6119fa565b606060006112fd868686611775565b600154815191935073ffffffffffffffffffffffffffffffffffffffff1691506080908390602082016000855af1801561135457825184526020830151602085015260408301516040850152606083015160608501525b5050509392505050565b8151815161136c9190611045565b8252602080830151908201516113829190611045565b60208301526040808301519082015161139b9190611045565b6040830152606080830151908201516113b49190611045565b60609092019190915250565b600081600401835110151515611402576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612468565b5001602001517fffffffff000000000000000000000000000000000000000000000000000000001690565b60008061144184601063ffffffff61194716565b604080517f7472616e7366657228616464726573732c75696e7432353629000000000000008152905190819003601901812091935073ffffffffffffffffffffffffffffffffffffffff8416919061149f903390879060240161236d565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931783525181519192909182919080838360005b8381101561154357818101518382015260200161152b565b50505050905090810190601f1680156115705780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1925050508015156115bf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b3d156115dc575060003d602014156115dc5760206000803e506000515b801515611615576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b50505050565b60008060018314611658576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612478565b61166984601063ffffffff61194716565b915061167c84602463ffffffff6119a816565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815290915073ffffffffffffffffffffffffffffffffffffffff8316906323b872dd906116d590309033908690600401612345565b600060405180830381600087803b1580156116ef57600080fd5b505af1158015610f1f573d6000803e3d6000fd5b6000808315156117165760009150611088565b5082820282848281151561172657fe5b0414611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b600080828481151561176c57fe5b04949350505050565b604080517fb4be83d5000000000000000000000000000000000000000000000000000000006020808301919091526060602483018181528751608485019081528884015160a48601529488015160c48501529087015160e4840152608087015161010484015260a087015161012484015260c087015161014484015260e08701516101648401526101008701516101848401526101208701516101a4840152610140870180516101c485019081526101608901516101e4860152610180905251805161020485018190529394919384936044870192849261022489019291820191601f82010460005b8181101561187c57835185526020948501949093019260010161185e565b50505050818103610160808401919091528a0151805180835260209283019291820191601f82010460005b818110156118c55783518552602094850194909301926001016118a7565b50505089845250848103602093840190815288518083529093918201918981019190601f82010460005b8181101561190d5783518552602094850194909301926001016118ef565b5050507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08883030188525060405250505050509392505050565b600081601401835110151515611989576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612458565b50016014015173ffffffffffffffffffffffffffffffffffffffff1690565b60006107ba83836000816020018351101515156119f1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123c8565b50016020015190565b608060405190810160405280600081526020016000815260200160008152602001600081525090565b60006107ba8235612540565b6000601f82018313611a4057600080fd5b8135611a53611a4e826124d9565b6124b2565b81815260209384019390925082018360005b83811015611a915781358601611a7b8882611b41565b8452506020928301929190910190600101611a65565b5050505092915050565b6000601f82018313611aac57600080fd5b8135611aba611a4e826124d9565b81815260209384019390925082018360005b83811015611a915781358601611ae28882611b90565b8452506020928301929190910190600101611acc565b600080601f83018413611b0a57600080fd5b50813567ffffffffffffffff811115611b2257600080fd5b602083019150836001820283011115611b3a57600080fd5b9250929050565b6000601f82018313611b5257600080fd5b8135611b60611a4e826124fa565b91508082526020830160208301858383011115611b7c57600080fd5b611b8783828461255c565b50505092915050565b60006101808284031215611ba357600080fd5b611bae6101806124b2565b90506000611bbc8484611a23565b8252506020611bcd84848301611a23565b6020830152506040611be184828501611a23565b6040830152506060611bf584828501611a23565b6060830152506080611c0984828501611cd9565b60808301525060a0611c1d84828501611cd9565b60a08301525060c0611c3184828501611cd9565b60c08301525060e0611c4584828501611cd9565b60e083015250610100611c5a84828501611cd9565b61010083015250610120611c7084828501611cd9565b6101208301525061014082013567ffffffffffffffff811115611c9257600080fd5b611c9e84828501611b41565b6101408301525061016082013567ffffffffffffffff811115611cc057600080fd5b611ccc84828501611b41565b6101608301525092915050565b60006107ba8235612559565b600060208284031215611cf757600080fd5b6000611d038484611a23565b949350505050565b60008060008060008060c08789031215611d2457600080fd5b863567ffffffffffffffff811115611d3b57600080fd5b611d4789828a01611a9b565b965050602087013567ffffffffffffffff811115611d6457600080fd5b611d7089828a01611a2f565b955050604087013567ffffffffffffffff811115611d8d57600080fd5b611d9989828a01611a9b565b945050606087013567ffffffffffffffff811115611db657600080fd5b611dc289828a01611a2f565b9350506080611dd389828a01611cd9565b92505060a0611de489828a01611a23565b9150509295509295509295565b600080600080600080600060e0888a031215611e0c57600080fd5b873567ffffffffffffffff811115611e2357600080fd5b611e2f8a828b01611a9b565b9750506020611e408a828b01611cd9565b965050604088013567ffffffffffffffff811115611e5d57600080fd5b611e698a828b01611a2f565b955050606088013567ffffffffffffffff811115611e8657600080fd5b611e928a828b01611a9b565b945050608088013567ffffffffffffffff811115611eaf57600080fd5b611ebb8a828b01611a2f565b93505060a0611ecc8a828b01611cd9565b92505060c0611edd8a828b01611a23565b91505092959891949750929550565b600080600060408486031215611f0157600080fd5b833567ffffffffffffffff811115611f1857600080fd5b611f2486828701611af8565b93509350506020611f3786828701611cd9565b9150509250925092565b611f4a81612540565b82525050565b602381527f44454641554c545f46554e4354494f4e5f574554485f434f4e54524143545f4f60208201527f4e4c590000000000000000000000000000000000000000000000000000000000604082015260600190565b601181527f494e56414c49445f4d53475f56414c5545000000000000000000000000000000602082015260400190565b600d81527f4f564552534f4c445f5745544800000000000000000000000000000000000000602082015260400190565b601181527f55494e543235365f554e444552464c4f57000000000000000000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f33325f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601081527f4449564953494f4e5f42595f5a45524f00000000000000000000000000000000602082015260400190565b601081527f55494e543235365f4f564552464c4f5700000000000000000000000000000000602082015260400190565b601781527f554e535550504f525445445f41535345545f50524f5859000000000000000000602082015260400190565b600f81527f5452414e534645525f4641494c45440000000000000000000000000000000000602082015260400190565b601481527f434f4d504c4554455f46494c4c5f4641494c4544000000000000000000000000602082015260400190565b601a81527f494e53554646494349454e545f4554485f52454d41494e494e47000000000000602082015260400190565b601381527f4f4e4c595f434f4e54524143545f4f574e455200000000000000000000000000602082015260400190565b601881527f4645455f50455243454e544147455f544f4f5f4c415247450000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b602581527f475245415445525f4f525f455155414c5f544f5f345f4c454e4754485f52455160208201527f5549524544000000000000000000000000000000000000000000000000000000604082015260600190565b600e81527f494e56414c49445f414d4f554e54000000000000000000000000000000000000602082015260400190565b805160808301906122f9848261232e565b50602082015161230c602085018261232e565b50604082015161231f604085018261232e565b50606082015161161560608501825b611f4a81612559565b602081016107bd8284611f41565b606081016123538286611f41565b6123606020830185611f41565b611d03604083018461232e565b6040810161237b8285611f41565b6110e0602083018461232e565b602080825281016107bd81611f50565b602080825281016107bd81611fa6565b602080825281016107bd81611fd6565b602080825281016107bd81612006565b602080825281016107bd81612036565b602080825281016107bd8161208c565b602080825281016107bd816120bc565b602080825281016107bd816120ec565b602080825281016107bd8161211c565b602080825281016107bd8161214c565b602080825281016107bd8161217c565b602080825281016107bd816121ac565b602080825281016107bd816121dc565b602080825281016107bd8161220c565b602080825281016107bd81612262565b602080825281016107bd816122b8565b610100810161249782856122e8565b6110e060808301846122e8565b602081016107bd828461232e565b60405181810167ffffffffffffffff811182821017156124d157600080fd5b604052919050565b600067ffffffffffffffff8211156124f057600080fd5b5060209081020190565b600067ffffffffffffffff82111561251157600080fd5b506020601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160190565b73ffffffffffffffffffffffffffffffffffffffff1690565b90565b828183375060009101525600a265627a7a72305820d9f418f11e0f91f06f6f9d22924be0add925495eeb76a6388b5417adb505eeb36c6578706572696d656e74616cf5003700000000000000000000000048bacb9266a570d521063ef5dd96e61686dbe788000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b00000000000000000000000000b1ba0af832d7c05fd64161e0db78e85978e8082000000000000000000000000000000000000000000000000000000001ba0a7c6b0c9a5cb47eb4a8449556851a943353640d4fe93a64eb89eff56245c27f1a00e0d13877bfb8842dc394fd206d041b1f76be95a371eff128c8c34812a1b24c8", + "result": [ + { + "action": { + "from": "0x5409ed021d9299bf6814279a6a1411a7e866a631", + "gas": "0x2c8c7f", + "init": "0x60806040523480156200001157600080fd5b5060405162002d2c38038062002d2c83398101806040526200003791908101906200051d565b6000805433600160a060020a031991821617825560018054909116600160a060020a0386161790558251849084908490849081906200007e906004906020870190620003d0565b50825162000094906005906020860190620003d0565b50620000b0836010640100000000620019476200036f82021704565b9150620000cd846010640100000000620019476200036f82021704565b60028054600160a060020a03948516600160a060020a031991821617909155600380549285169290911691909117905550600154604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130181207f6070410800000000000000000000000000000000000000000000000000000000825291909216945063607041089350620001739250906004016200068e565b602060405180830381600087803b1580156200018e57600080fd5b505af1158015620001a3573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620001c99190810190620004f4565b9050600160a060020a038116151562000219576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016200021090620006b0565b60405180910390fd5b6002546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b39062000268908490600019906004016200066f565b602060405180830381600087803b1580156200028357600080fd5b505af115801562000298573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620002be9190810190620005a1565b506003546040517f095ea7b3000000000000000000000000000000000000000000000000000000008152600160a060020a039091169063095ea7b3906200030e908490600019906004016200066f565b602060405180830381600087803b1580156200032957600080fd5b505af11580156200033e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250620003649190810190620005a1565b50505050506200077a565b600081601401835110151515620003b4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040162000210906200069e565b506014818301810151910190600160a060020a03165b92915050565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200041357805160ff191683800117855562000443565b8280016001018555821562000443579182015b828111156200044357825182559160200191906001019062000426565b506200045192915062000455565b5090565b6200047291905b808211156200045157600081556001016200045c565b90565b600062000483825162000711565b9392505050565b600062000483825162000742565b6000601f82018313620004aa57600080fd5b8151620004c1620004bb82620006e9565b620006c2565b91508082526020830160208301858383011115620004de57600080fd5b620004eb83828462000747565b50505092915050565b6000602082840312156200050757600080fd5b600062000515848462000475565b949350505050565b6000806000606084860312156200053357600080fd5b600062000541868662000475565b93505060208401516001604060020a038111156200055e57600080fd5b6200056c8682870162000498565b92505060408401516001604060020a038111156200058957600080fd5b620005978682870162000498565b9150509250925092565b600060208284031215620005b457600080fd5b60006200051584846200048a565b620005cd8162000711565b82525050565b620005cd816200071d565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601881527f554e524547495354455245445f41535345545f50524f58590000000000000000602082015260400190565b620005cd8162000472565b604081016200067f8285620005c2565b62000483602083018462000664565b60208101620003ca8284620005d3565b60208082528101620003ca81620005de565b60208082528101620003ca8162000634565b6040518181016001604060020a0381118282101715620006e157600080fd5b604052919050565b60006001604060020a038211156200070057600080fd5b506020601f91909101601f19160190565b600160a060020a031690565b7fffffffff000000000000000000000000000000000000000000000000000000001690565b151590565b60005b83811015620007645781810151838201526020016200074a565b8381111562000774576000848401525b50505050565b6125a2806200078a6000396000f30060806040526004361061006c5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166318978e8281146100c8578063630f1e6c146100f25780638da5cb5b146101125780639395525c14610134578063f2fde38b14610147575b60025473ffffffffffffffffffffffffffffffffffffffff1633146100c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612388565b60405180910390fd5b005b6100db6100d6366004611df1565b610167565b6040516100e9929190612488565b60405180910390f35b3480156100fe57600080fd5b506100c661010d366004611eec565b6102f7565b34801561011e57600080fd5b50610127610388565b6040516100e99190612337565b6100db610142366004611d0b565b6103a4565b34801561015357600080fd5b506100c6610162366004611ce5565b61050a565b61016f6119fa565b6101776119fa565b6000806101826105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff610100600188161502019095169490940493840181900481028201810190925282815261025c939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b820191906000526020600020905b81548152906001019060200180831161021057829003601f168201915b50505050508c600081518110151561024157fe5b6020908102909101015161014001519063ffffffff61069616565b156102875761026c8b8b8b6107c3565b935061028084600001518560600151610ac1565b90506102ae565b6102928b8b8b610b03565b9350836060015191506102a68883896107c3565b845190935090505b6102c2846020015184602001518888610d15565b6102e98b60008151811015156102d457fe5b90602001906020020151610140015182610f29565b505097509795505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610348576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b61038383838080601f01602080910402602001604051908101604052809392919081815260200183838082843750879450610f299350505050565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b6103ac6119fa565b6103b46119fa565b60008060006103c16105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152610441939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b156104925761046a670de0b6b3a7640000610464670de0b6b3a76400008a611045565b3461108f565b92506104778b848c6110e7565b945061048b85600001518660600151610ac1565b90506104d6565b6104ad670d2f13f7789f0000670de0b6b3a76400003461108f565b92506104ba8b848c6110e7565b9450846060015191506104ce89838a6107c3565b855190945090505b6104ea856020015185602001518989610d15565b6104fc8b60008151811015156102d457fe5b505050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461055b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b73ffffffffffffffffffffffffffffffffffffffff8116156105b857600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83161790555b50565b600034116105f5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612398565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d0e30db0346040518263ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004016000604051808303818588803b15801561067b57600080fd5b505af115801561068f573d6000803e3d6000fd5b5050505050565b6000815183511480156107ba5750816040518082805190602001908083835b602083106106f257805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016106b5565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0180199092169116179052604051919093018190038120885190955088945090928392508401908083835b6020831061078757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161074a565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051809103902060001916145b90505b92915050565b6107cb6119fa565b60608060008060008060006107de6119fa565b8a15156107ea57610ab2565b6004805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561088e5780601f106108635761010080835404028352916020019161088e565b820191906000526020600020905b81548152906001019060200180831161087157829003601f168201915b505060058054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152969e509194509250840190508282801561093d5780601f106109125761010080835404028352916020019161093d565b820191906000526020600020905b81548152906001019060200180831161092057829003601f168201915b50505050509650600095508b519450600093505b838514610a7857878c8581518110151561096757fe5b6020908102909101015161014001528b5187908d908690811061098657fe5b60209081029091010151610160015261099f8b87610ac1565b9250610a068c858151811015156109b257fe5b9060200190602002015160a00151610a008e878151811015156109d157fe5b90602001906020020151608001518f888151811015156109ed57fe5b9060200190602002015160e00151610ac1565b8561128b565b9150610a418c85815181101515610a1957fe5b90602001906020020151838c87815181101515610a3257fe5b906020019060200201516112e6565b9050610a4d898261135e565b610a5f89600001518a60600151610ac1565b95508a8610610a6d57610a78565b600190930192610951565b8a861015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b50505050505050509392505050565b600082821115610afd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123b8565b50900390565b610b0b6119fa565b606080600080600080610b1c6119fa565b60008b6000815181101515610b2d57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929b5092909190830182828015610be55780601f10610bba57610100808354040283529160200191610be5565b820191906000526020600020905b815481529060010190602001808311610bc857829003601f168201915b505050505096508b519550600094505b848614610cdb57878c86815181101515610c0b57fe5b6020908102909101015161014001528b5187908d9087908110610c2a57fe5b6020908102909101015161016001528851610c46908c90610ac1565b9350610c898c86815181101515610c5957fe5b9060200190602002015160a001518d87815181101515610c7557fe5b90602001906020020151608001518661128b565b9250610cb58c86815181101515610c9c57fe5b90602001906020020151848c88815181101515610a3257fe5b9150610cc1898361135e565b5087518a8110610cd057610cdb565b600190940193610bf5565b8a811015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b600080808066b1a2bc2ec50000861115610d5b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612448565b610d658888611045565b935034841115610da1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123a8565b610dab3485610ac1565b9250610dc086670de0b6b3a76400008a61108f565b915082821115610dfc576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612428565b6000831115610f1f576002546040517f2e1a7d4d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690632e1a7d4d90610e5b9086906004016124a4565b600060405180830381600087803b158015610e7557600080fd5b505af1158015610e89573d6000803e3d6000fd5b505050506000821115610edb5760405173ffffffffffffffffffffffffffffffffffffffff86169083156108fc029084906000818181858888f19350505050158015610ed9573d6000803e3d6000fd5b505b610ee58383610ac1565b90506000811115610f1f57604051339082156108fc029083906000818181858888f19350505050158015610f1d573d6000803e3d6000fd5b505b5050505050505050565b6000610f3b838263ffffffff6113c016565b604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130190209091507fffffffff0000000000000000000000000000000000000000000000000000000080831691161415610fab57610fa6838361142d565b610383565b604080517f455243373231546f6b656e28616464726573732c75696e7432353629000000008152905190819003601c0190207fffffffff000000000000000000000000000000000000000000000000000000008281169116141561101357610fa6838361161b565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123f8565b600082820183811015611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b8091505b5092915050565b60008083116110ca576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d78584611703565b8461175e565b90505b9392505050565b6110ef6119fa565b60608060008060006110ff6119fa565b89600081518110151561110e57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929950929091908301828280156111c65780601f1061119b576101008083540402835291602001916111c6565b820191906000526020600020905b8154815290600101906020018083116111a957829003601f168201915b5050505050945089519350600092505b82841461127e57858a848151811015156111ec57fe5b602090810290910101516101400152895185908b908590811061120b57fe5b90602001906020020151610160018190525061122b898860200151610ac1565b91506112578a8481518110151561123e57fe5b90602001906020020151838a86815181101515610a3257fe5b9050611263878261135e565b602087015189116112735761127e565b6001909201916111d6565b5050505050509392505050565b60008083116112c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d76112d68685611703565b6112e1866001610ac1565b611045565b6112ee6119fa565b606060006112fd868686611775565b600154815191935073ffffffffffffffffffffffffffffffffffffffff1691506080908390602082016000855af1801561135457825184526020830151602085015260408301516040850152606083015160608501525b5050509392505050565b8151815161136c9190611045565b8252602080830151908201516113829190611045565b60208301526040808301519082015161139b9190611045565b6040830152606080830151908201516113b49190611045565b60609092019190915250565b600081600401835110151515611402576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612468565b5001602001517fffffffff000000000000000000000000000000000000000000000000000000001690565b60008061144184601063ffffffff61194716565b604080517f7472616e7366657228616464726573732c75696e7432353629000000000000008152905190819003601901812091935073ffffffffffffffffffffffffffffffffffffffff8416919061149f903390879060240161236d565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931783525181519192909182919080838360005b8381101561154357818101518382015260200161152b565b50505050905090810190601f1680156115705780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1925050508015156115bf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b3d156115dc575060003d602014156115dc5760206000803e506000515b801515611615576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b50505050565b60008060018314611658576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612478565b61166984601063ffffffff61194716565b915061167c84602463ffffffff6119a816565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815290915073ffffffffffffffffffffffffffffffffffffffff8316906323b872dd906116d590309033908690600401612345565b600060405180830381600087803b1580156116ef57600080fd5b505af1158015610f1f573d6000803e3d6000fd5b6000808315156117165760009150611088565b5082820282848281151561172657fe5b0414611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b600080828481151561176c57fe5b04949350505050565b604080517fb4be83d5000000000000000000000000000000000000000000000000000000006020808301919091526060602483018181528751608485019081528884015160a48601529488015160c48501529087015160e4840152608087015161010484015260a087015161012484015260c087015161014484015260e08701516101648401526101008701516101848401526101208701516101a4840152610140870180516101c485019081526101608901516101e4860152610180905251805161020485018190529394919384936044870192849261022489019291820191601f82010460005b8181101561187c57835185526020948501949093019260010161185e565b50505050818103610160808401919091528a0151805180835260209283019291820191601f82010460005b818110156118c55783518552602094850194909301926001016118a7565b50505089845250848103602093840190815288518083529093918201918981019190601f82010460005b8181101561190d5783518552602094850194909301926001016118ef565b5050507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08883030188525060405250505050509392505050565b600081601401835110151515611989576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612458565b50016014015173ffffffffffffffffffffffffffffffffffffffff1690565b60006107ba83836000816020018351101515156119f1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123c8565b50016020015190565b608060405190810160405280600081526020016000815260200160008152602001600081525090565b60006107ba8235612540565b6000601f82018313611a4057600080fd5b8135611a53611a4e826124d9565b6124b2565b81815260209384019390925082018360005b83811015611a915781358601611a7b8882611b41565b8452506020928301929190910190600101611a65565b5050505092915050565b6000601f82018313611aac57600080fd5b8135611aba611a4e826124d9565b81815260209384019390925082018360005b83811015611a915781358601611ae28882611b90565b8452506020928301929190910190600101611acc565b600080601f83018413611b0a57600080fd5b50813567ffffffffffffffff811115611b2257600080fd5b602083019150836001820283011115611b3a57600080fd5b9250929050565b6000601f82018313611b5257600080fd5b8135611b60611a4e826124fa565b91508082526020830160208301858383011115611b7c57600080fd5b611b8783828461255c565b50505092915050565b60006101808284031215611ba357600080fd5b611bae6101806124b2565b90506000611bbc8484611a23565b8252506020611bcd84848301611a23565b6020830152506040611be184828501611a23565b6040830152506060611bf584828501611a23565b6060830152506080611c0984828501611cd9565b60808301525060a0611c1d84828501611cd9565b60a08301525060c0611c3184828501611cd9565b60c08301525060e0611c4584828501611cd9565b60e083015250610100611c5a84828501611cd9565b61010083015250610120611c7084828501611cd9565b6101208301525061014082013567ffffffffffffffff811115611c9257600080fd5b611c9e84828501611b41565b6101408301525061016082013567ffffffffffffffff811115611cc057600080fd5b611ccc84828501611b41565b6101608301525092915050565b60006107ba8235612559565b600060208284031215611cf757600080fd5b6000611d038484611a23565b949350505050565b60008060008060008060c08789031215611d2457600080fd5b863567ffffffffffffffff811115611d3b57600080fd5b611d4789828a01611a9b565b965050602087013567ffffffffffffffff811115611d6457600080fd5b611d7089828a01611a2f565b955050604087013567ffffffffffffffff811115611d8d57600080fd5b611d9989828a01611a9b565b945050606087013567ffffffffffffffff811115611db657600080fd5b611dc289828a01611a2f565b9350506080611dd389828a01611cd9565b92505060a0611de489828a01611a23565b9150509295509295509295565b600080600080600080600060e0888a031215611e0c57600080fd5b873567ffffffffffffffff811115611e2357600080fd5b611e2f8a828b01611a9b565b9750506020611e408a828b01611cd9565b965050604088013567ffffffffffffffff811115611e5d57600080fd5b611e698a828b01611a2f565b955050606088013567ffffffffffffffff811115611e8657600080fd5b611e928a828b01611a9b565b945050608088013567ffffffffffffffff811115611eaf57600080fd5b611ebb8a828b01611a2f565b93505060a0611ecc8a828b01611cd9565b92505060c0611edd8a828b01611a23565b91505092959891949750929550565b600080600060408486031215611f0157600080fd5b833567ffffffffffffffff811115611f1857600080fd5b611f2486828701611af8565b93509350506020611f3786828701611cd9565b9150509250925092565b611f4a81612540565b82525050565b602381527f44454641554c545f46554e4354494f4e5f574554485f434f4e54524143545f4f60208201527f4e4c590000000000000000000000000000000000000000000000000000000000604082015260600190565b601181527f494e56414c49445f4d53475f56414c5545000000000000000000000000000000602082015260400190565b600d81527f4f564552534f4c445f5745544800000000000000000000000000000000000000602082015260400190565b601181527f55494e543235365f554e444552464c4f57000000000000000000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f33325f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601081527f4449564953494f4e5f42595f5a45524f00000000000000000000000000000000602082015260400190565b601081527f55494e543235365f4f564552464c4f5700000000000000000000000000000000602082015260400190565b601781527f554e535550504f525445445f41535345545f50524f5859000000000000000000602082015260400190565b600f81527f5452414e534645525f4641494c45440000000000000000000000000000000000602082015260400190565b601481527f434f4d504c4554455f46494c4c5f4641494c4544000000000000000000000000602082015260400190565b601a81527f494e53554646494349454e545f4554485f52454d41494e494e47000000000000602082015260400190565b601381527f4f4e4c595f434f4e54524143545f4f574e455200000000000000000000000000602082015260400190565b601881527f4645455f50455243454e544147455f544f4f5f4c415247450000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b602581527f475245415445525f4f525f455155414c5f544f5f345f4c454e4754485f52455160208201527f5549524544000000000000000000000000000000000000000000000000000000604082015260600190565b600e81527f494e56414c49445f414d4f554e54000000000000000000000000000000000000602082015260400190565b805160808301906122f9848261232e565b50602082015161230c602085018261232e565b50604082015161231f604085018261232e565b50606082015161161560608501825b611f4a81612559565b602081016107bd8284611f41565b606081016123538286611f41565b6123606020830185611f41565b611d03604083018461232e565b6040810161237b8285611f41565b6110e0602083018461232e565b602080825281016107bd81611f50565b602080825281016107bd81611fa6565b602080825281016107bd81611fd6565b602080825281016107bd81612006565b602080825281016107bd81612036565b602080825281016107bd8161208c565b602080825281016107bd816120bc565b602080825281016107bd816120ec565b602080825281016107bd8161211c565b602080825281016107bd8161214c565b602080825281016107bd8161217c565b602080825281016107bd816121ac565b602080825281016107bd816121dc565b602080825281016107bd8161220c565b602080825281016107bd81612262565b602080825281016107bd816122b8565b610100810161249782856122e8565b6110e060808301846122e8565b602081016107bd828461232e565b60405181810167ffffffffffffffff811182821017156124d157600080fd5b604052919050565b600067ffffffffffffffff8211156124f057600080fd5b5060209081020190565b600067ffffffffffffffff82111561251157600080fd5b506020601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160190565b73ffffffffffffffffffffffffffffffffffffffff1690565b90565b828183375060009101525600a265627a7a72305820d9f418f11e0f91f06f6f9d22924be0add925495eeb76a6388b5417adb505eeb36c6578706572696d656e74616cf5003700000000000000000000000048bacb9266a570d521063ef5dd96e61686dbe788000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000024f47261b0000000000000000000000000871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024f47261b00000000000000000000000000b1ba0af832d7c05fd64161e0db78e85978e808200000000000000000000000000000000000000000000000000000000", + "value": "0x0" + }, + "blockHash": "0x6456fbd35a3a69a1709c324fad114d68507d2c8ab391e9adb128f9734c8e4ae8", + "blockNumber": 13536, + "result": { + "address": "0x6000eca38b8b5bba64986182fe2a69c57f6b5414", + "code": "0x60806040526004361061006c5763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166318978e8281146100c8578063630f1e6c146100f25780638da5cb5b146101125780639395525c14610134578063f2fde38b14610147575b60025473ffffffffffffffffffffffffffffffffffffffff1633146100c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612388565b60405180910390fd5b005b6100db6100d6366004611df1565b610167565b6040516100e9929190612488565b60405180910390f35b3480156100fe57600080fd5b506100c661010d366004611eec565b6102f7565b34801561011e57600080fd5b50610127610388565b6040516100e99190612337565b6100db610142366004611d0b565b6103a4565b34801561015357600080fd5b506100c6610162366004611ce5565b61050a565b61016f6119fa565b6101776119fa565b6000806101826105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff610100600188161502019095169490940493840181900481028201810190925282815261025c939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b820191906000526020600020905b81548152906001019060200180831161021057829003601f168201915b50505050508c600081518110151561024157fe5b6020908102909101015161014001519063ffffffff61069616565b156102875761026c8b8b8b6107c3565b935061028084600001518560600151610ac1565b90506102ae565b6102928b8b8b610b03565b9350836060015191506102a68883896107c3565b845190935090505b6102c2846020015184602001518888610d15565b6102e98b60008151811015156102d457fe5b90602001906020020151610140015182610f29565b505097509795505050505050565b60005473ffffffffffffffffffffffffffffffffffffffff163314610348576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b61038383838080601f01602080910402602001604051908101604052809392919081815260200183838082843750879450610f299350505050565b505050565b60005473ffffffffffffffffffffffffffffffffffffffff1681565b6103ac6119fa565b6103b46119fa565b60008060006103c16105bb565b60048054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152610441939092909183018282801561022d5780601f106102025761010080835404028352916020019161022d565b156104925761046a670de0b6b3a7640000610464670de0b6b3a76400008a611045565b3461108f565b92506104778b848c6110e7565b945061048b85600001518660600151610ac1565b90506104d6565b6104ad670d2f13f7789f0000670de0b6b3a76400003461108f565b92506104ba8b848c6110e7565b9450846060015191506104ce89838a6107c3565b855190945090505b6104ea856020015185602001518989610d15565b6104fc8b60008151811015156102d457fe5b505050965096945050505050565b60005473ffffffffffffffffffffffffffffffffffffffff16331461055b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612438565b73ffffffffffffffffffffffffffffffffffffffff8116156105b857600080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff83161790555b50565b600034116105f5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612398565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663d0e30db0346040518263ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004016000604051808303818588803b15801561067b57600080fd5b505af115801561068f573d6000803e3d6000fd5b5050505050565b6000815183511480156107ba5750816040518082805190602001908083835b602083106106f257805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016106b5565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0180199092169116179052604051919093018190038120885190955088945090928392508401908083835b6020831061078757805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161074a565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051809103902060001916145b90505b92915050565b6107cb6119fa565b60608060008060008060006107de6119fa565b8a15156107ea57610ab2565b6004805460408051602060026001851615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190941693909304601f8101849004840282018401909252818152929183018282801561088e5780601f106108635761010080835404028352916020019161088e565b820191906000526020600020905b81548152906001019060200180831161087157829003601f168201915b505060058054604080516020601f60027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6101006001881615020190951694909404938401819004810282018101909252828152969e509194509250840190508282801561093d5780601f106109125761010080835404028352916020019161093d565b820191906000526020600020905b81548152906001019060200180831161092057829003601f168201915b50505050509650600095508b519450600093505b838514610a7857878c8581518110151561096757fe5b6020908102909101015161014001528b5187908d908690811061098657fe5b60209081029091010151610160015261099f8b87610ac1565b9250610a068c858151811015156109b257fe5b9060200190602002015160a00151610a008e878151811015156109d157fe5b90602001906020020151608001518f888151811015156109ed57fe5b9060200190602002015160e00151610ac1565b8561128b565b9150610a418c85815181101515610a1957fe5b90602001906020020151838c87815181101515610a3257fe5b906020019060200201516112e6565b9050610a4d898261135e565b610a5f89600001518a60600151610ac1565b95508a8610610a6d57610a78565b600190930192610951565b8a861015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b50505050505050509392505050565b600082821115610afd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123b8565b50900390565b610b0b6119fa565b606080600080600080610b1c6119fa565b60008b6000815181101515610b2d57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929b5092909190830182828015610be55780601f10610bba57610100808354040283529160200191610be5565b820191906000526020600020905b815481529060010190602001808311610bc857829003601f168201915b505050505096508b519550600094505b848614610cdb57878c86815181101515610c0b57fe5b6020908102909101015161014001528b5187908d9087908110610c2a57fe5b6020908102909101015161016001528851610c46908c90610ac1565b9350610c898c86815181101515610c5957fe5b9060200190602002015160a001518d87815181101515610c7557fe5b90602001906020020151608001518661128b565b9250610cb58c86815181101515610c9c57fe5b90602001906020020151848c88815181101515610a3257fe5b9150610cc1898361135e565b5087518a8110610cd057610cdb565b600190940193610bf5565b8a811015610ab2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612418565b600080808066b1a2bc2ec50000861115610d5b576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612448565b610d658888611045565b935034841115610da1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123a8565b610dab3485610ac1565b9250610dc086670de0b6b3a76400008a61108f565b915082821115610dfc576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612428565b6000831115610f1f576002546040517f2e1a7d4d00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690632e1a7d4d90610e5b9086906004016124a4565b600060405180830381600087803b158015610e7557600080fd5b505af1158015610e89573d6000803e3d6000fd5b505050506000821115610edb5760405173ffffffffffffffffffffffffffffffffffffffff86169083156108fc029084906000818181858888f19350505050158015610ed9573d6000803e3d6000fd5b505b610ee58383610ac1565b90506000811115610f1f57604051339082156108fc029083906000818181858888f19350505050158015610f1d573d6000803e3d6000fd5b505b5050505050505050565b6000610f3b838263ffffffff6113c016565b604080517f4552433230546f6b656e28616464726573732900000000000000000000000000815290519081900360130190209091507fffffffff0000000000000000000000000000000000000000000000000000000080831691161415610fab57610fa6838361142d565b610383565b604080517f455243373231546f6b656e28616464726573732c75696e7432353629000000008152905190819003601c0190207fffffffff000000000000000000000000000000000000000000000000000000008281169116141561101357610fa6838361161b565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123f8565b600082820183811015611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b8091505b5092915050565b60008083116110ca576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d78584611703565b8461175e565b90505b9392505050565b6110ef6119fa565b60608060008060006110ff6119fa565b89600081518110151561110e57fe5b6020908102919091018101516101400151600580546040805160026001841615610100027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190931692909204601f8101869004860283018601909152808252929950929091908301828280156111c65780601f1061119b576101008083540402835291602001916111c6565b820191906000526020600020905b8154815290600101906020018083116111a957829003601f168201915b5050505050945089519350600092505b82841461127e57858a848151811015156111ec57fe5b602090810290910101516101400152895185908b908590811061120b57fe5b90602001906020020151610160018190525061122b898860200151610ac1565b91506112578a8481518110151561123e57fe5b90602001906020020151838a86815181101515610a3257fe5b9050611263878261135e565b602087015189116112735761127e565b6001909201916111d6565b5050505050509392505050565b60008083116112c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123d8565b6110dd6110d76112d68685611703565b6112e1866001610ac1565b611045565b6112ee6119fa565b606060006112fd868686611775565b600154815191935073ffffffffffffffffffffffffffffffffffffffff1691506080908390602082016000855af1801561135457825184526020830151602085015260408301516040850152606083015160608501525b5050509392505050565b8151815161136c9190611045565b8252602080830151908201516113829190611045565b60208301526040808301519082015161139b9190611045565b6040830152606080830151908201516113b49190611045565b60609092019190915250565b600081600401835110151515611402576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612468565b5001602001517fffffffff000000000000000000000000000000000000000000000000000000001690565b60008061144184601063ffffffff61194716565b604080517f7472616e7366657228616464726573732c75696e7432353629000000000000008152905190819003601901812091935073ffffffffffffffffffffffffffffffffffffffff8416919061149f903390879060240161236d565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931783525181519192909182919080838360005b8381101561154357818101518382015260200161152b565b50505050905090810190601f1680156115705780820380516001836020036101000a031916815260200191505b509150506000604051808303816000865af1925050508015156115bf576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b3d156115dc575060003d602014156115dc5760206000803e506000515b801515611615576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612408565b50505050565b60008060018314611658576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612478565b61166984601063ffffffff61194716565b915061167c84602463ffffffff6119a816565b6040517f23b872dd00000000000000000000000000000000000000000000000000000000815290915073ffffffffffffffffffffffffffffffffffffffff8316906323b872dd906116d590309033908690600401612345565b600060405180830381600087803b1580156116ef57600080fd5b505af1158015610f1f573d6000803e3d6000fd5b6000808315156117165760009150611088565b5082820282848281151561172657fe5b0414611084576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123e8565b600080828481151561176c57fe5b04949350505050565b604080517fb4be83d5000000000000000000000000000000000000000000000000000000006020808301919091526060602483018181528751608485019081528884015160a48601529488015160c48501529087015160e4840152608087015161010484015260a087015161012484015260c087015161014484015260e08701516101648401526101008701516101848401526101208701516101a4840152610140870180516101c485019081526101608901516101e4860152610180905251805161020485018190529394919384936044870192849261022489019291820191601f82010460005b8181101561187c57835185526020948501949093019260010161185e565b50505050818103610160808401919091528a0151805180835260209283019291820191601f82010460005b818110156118c55783518552602094850194909301926001016118a7565b50505089845250848103602093840190815288518083529093918201918981019190601f82010460005b8181101561190d5783518552602094850194909301926001016118ef565b5050507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08883030188525060405250505050509392505050565b600081601401835110151515611989576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd90612458565b50016014015173ffffffffffffffffffffffffffffffffffffffff1690565b60006107ba83836000816020018351101515156119f1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906123c8565b50016020015190565b608060405190810160405280600081526020016000815260200160008152602001600081525090565b60006107ba8235612540565b6000601f82018313611a4057600080fd5b8135611a53611a4e826124d9565b6124b2565b81815260209384019390925082018360005b83811015611a915781358601611a7b8882611b41565b8452506020928301929190910190600101611a65565b5050505092915050565b6000601f82018313611aac57600080fd5b8135611aba611a4e826124d9565b81815260209384019390925082018360005b83811015611a915781358601611ae28882611b90565b8452506020928301929190910190600101611acc565b600080601f83018413611b0a57600080fd5b50813567ffffffffffffffff811115611b2257600080fd5b602083019150836001820283011115611b3a57600080fd5b9250929050565b6000601f82018313611b5257600080fd5b8135611b60611a4e826124fa565b91508082526020830160208301858383011115611b7c57600080fd5b611b8783828461255c565b50505092915050565b60006101808284031215611ba357600080fd5b611bae6101806124b2565b90506000611bbc8484611a23565b8252506020611bcd84848301611a23565b6020830152506040611be184828501611a23565b6040830152506060611bf584828501611a23565b6060830152506080611c0984828501611cd9565b60808301525060a0611c1d84828501611cd9565b60a08301525060c0611c3184828501611cd9565b60c08301525060e0611c4584828501611cd9565b60e083015250610100611c5a84828501611cd9565b61010083015250610120611c7084828501611cd9565b6101208301525061014082013567ffffffffffffffff811115611c9257600080fd5b611c9e84828501611b41565b6101408301525061016082013567ffffffffffffffff811115611cc057600080fd5b611ccc84828501611b41565b6101608301525092915050565b60006107ba8235612559565b600060208284031215611cf757600080fd5b6000611d038484611a23565b949350505050565b60008060008060008060c08789031215611d2457600080fd5b863567ffffffffffffffff811115611d3b57600080fd5b611d4789828a01611a9b565b965050602087013567ffffffffffffffff811115611d6457600080fd5b611d7089828a01611a2f565b955050604087013567ffffffffffffffff811115611d8d57600080fd5b611d9989828a01611a9b565b945050606087013567ffffffffffffffff811115611db657600080fd5b611dc289828a01611a2f565b9350506080611dd389828a01611cd9565b92505060a0611de489828a01611a23565b9150509295509295509295565b600080600080600080600060e0888a031215611e0c57600080fd5b873567ffffffffffffffff811115611e2357600080fd5b611e2f8a828b01611a9b565b9750506020611e408a828b01611cd9565b965050604088013567ffffffffffffffff811115611e5d57600080fd5b611e698a828b01611a2f565b955050606088013567ffffffffffffffff811115611e8657600080fd5b611e928a828b01611a9b565b945050608088013567ffffffffffffffff811115611eaf57600080fd5b611ebb8a828b01611a2f565b93505060a0611ecc8a828b01611cd9565b92505060c0611edd8a828b01611a23565b91505092959891949750929550565b600080600060408486031215611f0157600080fd5b833567ffffffffffffffff811115611f1857600080fd5b611f2486828701611af8565b93509350506020611f3786828701611cd9565b9150509250925092565b611f4a81612540565b82525050565b602381527f44454641554c545f46554e4354494f4e5f574554485f434f4e54524143545f4f60208201527f4e4c590000000000000000000000000000000000000000000000000000000000604082015260600190565b601181527f494e56414c49445f4d53475f56414c5545000000000000000000000000000000602082015260400190565b600d81527f4f564552534f4c445f5745544800000000000000000000000000000000000000602082015260400190565b601181527f55494e543235365f554e444552464c4f57000000000000000000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f33325f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b601081527f4449564953494f4e5f42595f5a45524f00000000000000000000000000000000602082015260400190565b601081527f55494e543235365f4f564552464c4f5700000000000000000000000000000000602082015260400190565b601781527f554e535550504f525445445f41535345545f50524f5859000000000000000000602082015260400190565b600f81527f5452414e534645525f4641494c45440000000000000000000000000000000000602082015260400190565b601481527f434f4d504c4554455f46494c4c5f4641494c4544000000000000000000000000602082015260400190565b601a81527f494e53554646494349454e545f4554485f52454d41494e494e47000000000000602082015260400190565b601381527f4f4e4c595f434f4e54524143545f4f574e455200000000000000000000000000602082015260400190565b601881527f4645455f50455243454e544147455f544f4f5f4c415247450000000000000000602082015260400190565b602681527f475245415445525f4f525f455155414c5f544f5f32305f4c454e4754485f524560208201527f5155495245440000000000000000000000000000000000000000000000000000604082015260600190565b602581527f475245415445525f4f525f455155414c5f544f5f345f4c454e4754485f52455160208201527f5549524544000000000000000000000000000000000000000000000000000000604082015260600190565b600e81527f494e56414c49445f414d4f554e54000000000000000000000000000000000000602082015260400190565b805160808301906122f9848261232e565b50602082015161230c602085018261232e565b50604082015161231f604085018261232e565b50606082015161161560608501825b611f4a81612559565b602081016107bd8284611f41565b606081016123538286611f41565b6123606020830185611f41565b611d03604083018461232e565b6040810161237b8285611f41565b6110e0602083018461232e565b602080825281016107bd81611f50565b602080825281016107bd81611fa6565b602080825281016107bd81611fd6565b602080825281016107bd81612006565b602080825281016107bd81612036565b602080825281016107bd8161208c565b602080825281016107bd816120bc565b602080825281016107bd816120ec565b602080825281016107bd8161211c565b602080825281016107bd8161214c565b602080825281016107bd8161217c565b602080825281016107bd816121ac565b602080825281016107bd816121dc565b602080825281016107bd8161220c565b602080825281016107bd81612262565b602080825281016107bd816122b8565b610100810161249782856122e8565b6110e060808301846122e8565b602081016107bd828461232e565b60405181810167ffffffffffffffff811182821017156124d157600080fd5b604052919050565b600067ffffffffffffffff8211156124f057600080fd5b5060209081020190565b600067ffffffffffffffff82111561251157600080fd5b506020601f919091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160190565b73ffffffffffffffffffffffffffffffffffffffff1690565b90565b828183375060009101525600a265627a7a72305820d9f418f11e0f91f06f6f9d22924be0add925495eeb76a6388b5417adb505eeb36c6578706572696d656e74616cf50037", + "gasUsed": "0x2c8c7f" + }, + "subtraces": 3, + "traceAddress": [], + "transactionHash": "0x6974f745a004f030bebb1c01d4595edbda2fafcf01c0bfbd5d335711e2a7b04e", + "transactionPosition": 0, + "type": "create" + }, + { + "action": { + "callType": "call", + "from": "0x6000eca38b8b5bba64986182fe2a69c57f6b5414", + "gas": "0x1dba84", + "input": "0x60704108f47261b000000000000000000000000000000000000000000000000000000000", + "to": "0x48bacb9266a570d521063ef5dd96e61686dbe788", + "value": "0x0" + }, + "blockHash": "0x6456fbd35a3a69a1709c324fad114d68507d2c8ab391e9adb128f9734c8e4ae8", + "blockNumber": 13536, + "result": { + "gasUsed": "0x3d9", + "output": "0x0000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48" + }, + "subtraces": 0, + "traceAddress": [ + 0 + ], + "transactionHash": "0x6974f745a004f030bebb1c01d4595edbda2fafcf01c0bfbd5d335711e2a7b04e", + "transactionPosition": 0, + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x6000eca38b8b5bba64986182fe2a69c57f6b5414", + "gas": "0x1dad2e", + "input": "0x095ea7b30000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "to": "0x0b1ba0af832d7c05fd64161e0db78e85978e8082", + "value": "0x0" + }, + "blockHash": "0x6456fbd35a3a69a1709c324fad114d68507d2c8ab391e9adb128f9734c8e4ae8", + "blockNumber": 13536, + "result": { + "gasUsed": "0x56c8", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [ + 1 + ], + "transactionHash": "0x6974f745a004f030bebb1c01d4595edbda2fafcf01c0bfbd5d335711e2a7b04e", + "transactionPosition": 0, + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x6000eca38b8b5bba64986182fe2a69c57f6b5414", + "gas": "0x1d4ee1", + "input": "0x095ea7b30000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "to": "0x871dd7c2b4b25e1aa18728e9d5f2af4c4e431f5c", + "value": "0x0" + }, + "blockHash": "0x6456fbd35a3a69a1709c324fad114d68507d2c8ab391e9adb128f9734c8e4ae8", + "blockNumber": 13536, + "result": { + "gasUsed": "0x56ca", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [ + 2 + ], + "transactionHash": "0x6974f745a004f030bebb1c01d4595edbda2fafcf01c0bfbd5d335711e2a7b04e", + "transactionPosition": 0, + "type": "call" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json new file mode 100644 index 00000000..bd6059fa --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/oog.json @@ -0,0 +1,68 @@ +{ + "context": { + "difficulty": "3699098917", + "gasLimit": "5258985", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294631", + "timestamp": "1513675366" + }, + "genesis": { + "alloc": { + "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c68146102785780635a3b7e42146102b357806370a082311461034157806379cc67901461038e57806395d89b41146103e8578063a9059cbb14610476578063dd62ed3e146104b8575b600080fd5b34156100ca57600080fd5b6100d2610524565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061055d565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba6105ea565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105f0565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610910565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b6102996004808035906020019091905050610915565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102c6610a18565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103065780820151818401526020810190506102eb565b50505050905090810190601f1680156103335780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561034c57600080fd5b610378600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a51565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103ce600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610a69565b604051808215151515815260200191505060405180910390f35b34156103f357600080fd5b6103fb610bf8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561043b578082015181840152602081019050610420565b50505050905090810190601f1680156104685780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561048157600080fd5b6104b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610c31565b005b34156104c357600080fd5b61050e600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610e34565b6040518082815260200191505060405180910390f35b6040805190810160405280600881526020017f446f70616d696e6500000000000000000000000000000000000000000000000081525081565b600081600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60005481565b6000808373ffffffffffffffffffffffffffffffffffffffff161415151561061757600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561066557600080fd5b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401101515156106f157fe5b600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561077c57600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555081600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b601281565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561096557600080fd5b81600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b6040805190810160405280600981526020017f446f706d6e20302e32000000000000000000000000000000000000000000000081525081565b60016020528060005260406000206000915090505481565b600081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ab957600080fd5b600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020548211151515610b4457600080fd5b81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b6040805190810160405280600581526020017f444f504d4e00000000000000000000000000000000000000000000000000000081525081565b60008273ffffffffffffffffffffffffffffffffffffffff1614151515610c5757600080fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ca557600080fd5b600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540110151515610d3157fe5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b60026020528160005260406000206020528060005260406000206000915091505054815600a165627a7a723058206d93424f4e7b11929b8276a269038402c10c0ddf21800e999916ddd9dff4a7630029", + "nonce": "1", + "storage": { + "0x296b66049cc4f9c8bf3d4f14752add261d1a980b39bdd194a7897baf39ac7579": "0x0000000000000000000000000000000000000000033b2e3c9fc9653f9e72b1e0" + } + }, + "0x94194bc2aaf494501d7880b61274a169f6502a54": { + "balance": "0xea8c39a876d19888d", + "code": "0x", + "nonce": "265", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3699098917", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5263953", + "hash": "0x03a0f62a8106793dafcfae7b75fd2654322062d585a19cea568314d7205790dc", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x15482cc64b7c00a947f5bf015dfc010db1a6a668c74df61974d6a7848c174408", + "nonce": "0xd1bdb150f6fd170e", + "number": "2294630", + "stateRoot": "0x1ab1a534e84cc787cda1db21e0d5920ab06017948075b759166cfea7274657a1", + "timestamp": "1513675347", + "totalDifficulty": "7160543502214733" + }, + "input": "0xf8ab820109855d21dba00082ca1d9443064693d3d38ad6a7cb579e0d6d9718c8aa6b6280b844a9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f90001ba0ce3ad83f5530136467b7c2bb225f406bd170f4ad59c254e5103c34eeabb5bd69a0455154527224a42ab405cacf0fe92918a75641ce4152f8db292019a5527aa956", + "result": [ + { + "action": { + "callType": "call", + "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", + "gas": "0xca1d", + "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", + "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", + "value": "0x0" + }, + "blockNumber": 2294631, + "error": "out of gas", + "result": {}, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json new file mode 100644 index 00000000..8888d3e6 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/option_convert_parity_errors.json @@ -0,0 +1,71 @@ +{ + "context": { + "difficulty": "3699098917", + "gasLimit": "5258985", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294631", + "timestamp": "1513675366" + }, + "genesis": { + "alloc": { + "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c68146102785780635a3b7e42146102b357806370a082311461034157806379cc67901461038e57806395d89b41146103e8578063a9059cbb14610476578063dd62ed3e146104b8575b600080fd5b34156100ca57600080fd5b6100d2610524565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061055d565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba6105ea565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105f0565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610910565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b6102996004808035906020019091905050610915565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102c6610a18565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103065780820151818401526020810190506102eb565b50505050905090810190601f1680156103335780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561034c57600080fd5b610378600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a51565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103ce600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610a69565b604051808215151515815260200191505060405180910390f35b34156103f357600080fd5b6103fb610bf8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561043b578082015181840152602081019050610420565b50505050905090810190601f1680156104685780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561048157600080fd5b6104b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610c31565b005b34156104c357600080fd5b61050e600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610e34565b6040518082815260200191505060405180910390f35b6040805190810160405280600881526020017f446f70616d696e6500000000000000000000000000000000000000000000000081525081565b600081600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60005481565b6000808373ffffffffffffffffffffffffffffffffffffffff161415151561061757600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561066557600080fd5b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401101515156106f157fe5b600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561077c57600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555081600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b601281565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561096557600080fd5b81600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b6040805190810160405280600981526020017f446f706d6e20302e32000000000000000000000000000000000000000000000081525081565b60016020528060005260406000206000915090505481565b600081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ab957600080fd5b600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020548211151515610b4457600080fd5b81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b6040805190810160405280600581526020017f444f504d4e00000000000000000000000000000000000000000000000000000081525081565b60008273ffffffffffffffffffffffffffffffffffffffff1614151515610c5757600080fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ca557600080fd5b600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540110151515610d3157fe5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b60026020528160005260406000206020528060005260406000206000915091505054815600a165627a7a723058206d93424f4e7b11929b8276a269038402c10c0ddf21800e999916ddd9dff4a7630029", + "nonce": "1", + "storage": { + "0x296b66049cc4f9c8bf3d4f14752add261d1a980b39bdd194a7897baf39ac7579": "0x0000000000000000000000000000000000000000033b2e3c9fc9653f9e72b1e0" + } + }, + "0x94194bc2aaf494501d7880b61274a169f6502a54": { + "balance": "0xea8c39a876d19888d", + "code": "0x", + "nonce": "265", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3699098917", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5263953", + "hash": "0x03a0f62a8106793dafcfae7b75fd2654322062d585a19cea568314d7205790dc", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x15482cc64b7c00a947f5bf015dfc010db1a6a668c74df61974d6a7848c174408", + "nonce": "0xd1bdb150f6fd170e", + "number": "2294630", + "stateRoot": "0x1ab1a534e84cc787cda1db21e0d5920ab06017948075b759166cfea7274657a1", + "timestamp": "1513675347", + "totalDifficulty": "7160543502214733" + }, + "tracerConfig": { + "convertParityErrors": true + }, + "input": "0xf8ab820109855d21dba00082ca1d9443064693d3d38ad6a7cb579e0d6d9718c8aa6b6280b844a9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f90001ba0ce3ad83f5530136467b7c2bb225f406bd170f4ad59c254e5103c34eeabb5bd69a0455154527224a42ab405cacf0fe92918a75641ce4152f8db292019a5527aa956", + "result": [ + { + "action": { + "callType": "call", + "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", + "gas": "0xca1d", + "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", + "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", + "value": "0x0" + }, + "blockNumber": 2294631, + "error": "Out of gas", + "result": {}, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json new file mode 100644 index 00000000..62baf333 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/result_output.json @@ -0,0 +1,111 @@ +{ + "genesis": { + "difficulty": "1911202", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "7842876", + "hash": "0x4d7bc82e0d56307094378e1a8fbfa6260986f621de95b5fe68a95248b3ba8efe", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "mixHash": "0xc102ad52677c391edab82cc895ca7a7e9fff3eed4fa966ecf7fb61ec1e84bb6b", + "nonce": "0x39f5b074e3437f3f", + "number": "553415", + "stateRoot": "0x8f89e79109c19fa00e72b400502448540dc4773ad92dddd341dbba20c710a3b5", + "timestamp": "1577396195", + "totalDifficulty": "458361299240", + "alloc": { + "0x531f76bad925f6a925474996c7d738c1008045f6": { + "balance": "0x0", + "nonce": "1", + "code": "0x6060604052361561008a576000357c01000000000000000000000000000000000000000000000000000000009004806301cb3b20146102bf57806329dcb0cf146102cc57806338af3eed146102ed5780636e66f6e9146103245780637a3a0e841461035b5780637b3e5e7b1461037c578063a035b1fe1461039d578063dc0d3dff146103be5761008a565b6102bd5b60003490506040604051908101604052803381526020018281526020015060066000506006600050805480919060010190908154818355818115116101365760020281600202836000526020600020918201910161013591906100ec565b808211156101315760006000820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690556001820160005060009055506001016100ec565b5090565b5b505050815481101561000257906000526020600020906002020160005060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff0219169083021790555060208201518160010160005055905050806002600082828250540192505081905550600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166390b98a11336004600050548404604051837c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750505060405151507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf633826001604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a15b50565b005b6102ca6004506104c8565b005b6102d760045061043a565b6040518082815260200191505060405180910390f35b6102f8600450610402565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b61032f60045061044c565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610366600450610428565b6040518082815260200191505060405180910390f35b610387600450610431565b6040518082815260200191505060405180910390f35b6103a8600450610443565b6040518082815260200191505060405180910390f35b6103cf600480359060200150610472565b604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60016000505481565b60026000505481565b60036000505481565b60046000505481565b600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60066000508181548110156100025790600052602060002090600202016000915090508060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010160005054905082565b6000600360005054421015156107d8576001600050546002600050541015156105cf57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166000600260005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf6600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff166002600050546000604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a161079d565b7fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf66000600b600060405180848152602001838152602001828152602001935050505060405180910390a1600090505b60066000505481101561079c57600660005081815481101561000257906000526020600020906002020160005060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166000600660005083815481101561000257906000526020600020906002020160005060010160005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf6600660005082815481101561000257906000526020600020906002020160005060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff166006600050838154811015610002579060005260206000209060020201600050600101600050546000604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a15b806001019050805061061e565b5b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b5056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d40": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x000000000000000000000000b49180d443dc4ca6028de0031ac09337891fd8ce", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000" + } + }, + "0xb49180d443dc4ca6028de0031ac09337891fd8ce": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x193e9986e2e3f0c58988", + "nonce": "2585", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "553416", + "difficulty": "1909336", + "timestamp": "1577396224", + "gasLimit": "7835218", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf870820a1985e8d4a5100083040b2894531f76bad925f6a925474996c7d738c1008045f6880de0b6b3a76400008081a2a08693170f040d9501b831b404d9e40fba040c5aef4b8974aedc20b3844aea7c32a0476861058ff9b8030c58bcba8be320acc855e4694a633c493fb50fbdb9455489", + "result": [ + { + "type": "call", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "to": "0x531f76bad925f6a925474996c7d738c1008045f6", + "value": "0xde0b6b3a7640000", + "gas": "0x40b28", + "input": "0x", + "callType": "call" + }, + "result": { + "gasUsed": "0x19c3e", + "output": "0x" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 5, + "transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada", + "blockNumber": 553416, + "blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282", + "time": "617.42µs" + }, + { + "type": "call", + "action": { + "from": "0x531f76bad925f6a925474996c7d738c1008045f6", + "to": "0xb49180d443dc4ca6028de0031ac09337891fd8ce", + "value": "0x0", + "gas": "0x2164e", + "input": "0x90b98a11000000000000000000000000877bd459c9b7d8576b44e59e09d076c25946f4430000000000000000000000000000000000000000000000000000000000000001", + "callType": "call" + }, + "result": { + "gasUsed": "0x0", + "output": "0x" + }, + "traceAddress": [ + 0 + ], + "subtraces": 0, + "transactionPosition": 5, + "transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada", + "blockNumber": 553416, + "blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json new file mode 100644 index 00000000..b0346d86 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert.json @@ -0,0 +1,68 @@ +{ + "context": { + "difficulty": "3665057456", + "gasLimit": "5232723", + "miner": "0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f", + "number": "2294501", + "timestamp": "1513673601" + }, + "genesis": { + "alloc": { + "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9": { + "balance": "0x2a3fc32bcc019283", + "code": "0x", + "nonce": "10", + "storage": {} + }, + "0xabbcd5b340c80b5f1c0545c04c987b87310296ae": { + "balance": "0x0", + "code": "0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3672229776", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5227619", + "hash": "0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0", + "nonce": "0xbc5d43adc2c30c7d", + "number": "2294500", + "stateRoot": "0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d", + "timestamp": "1513673552", + "totalDifficulty": "7160066586979149" + }, + "input": "0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a", + "result": [ + { + "action": { + "callType": "call", + "from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9", + "gas": "0x2dc6c0", + "input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000", + "to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae", + "value": "0x0" + }, + "blockNumber": 2294501, + "error": "execution reverted", + "result": { + "gasUsed": "0x719b" + }, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json new file mode 100644 index 00000000..6759b05e --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/revert_reason.json @@ -0,0 +1,74 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock": 1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": [ + { + "action": { + "callType": "call", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2dc6c0", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "value": "0x0" + }, + "blockNumber": 3212651, + "error": "execution reverted", + "result": { + "gasUsed": "0x5940", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + }, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json new file mode 100644 index 00000000..74fd87cc --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/selfdestruct.json @@ -0,0 +1,91 @@ +{ + "genesis": { + "difficulty": "4628640", + "extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578", + "gasLimit": "9244120", + "hash": "0x5a1f551897cc91265225b0453136ad8c7eef1c1c8b06139da4f2e6e710c1f4df", + "miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3", + "mixHash": "0xd6735e63f8937fe0c5491e0d5836ec28467363be7ada5a2f979f9d107e2c831e", + "nonce": "0x7c35e34d2e328d7d", + "number": "1555145", + "stateRoot": "0x565873b05f71b98595133e37a52d79c3476ce820c05ebedaddd35541b0e894a3", + "timestamp": "1590793819", + "totalDifficulty": "2241994078605", + "alloc": { + "0x119f569a45e9d0089d51d7f9529f5ea9bf5785e2": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x622e8fced69d43eb8d97", + "nonce": "260140", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555146", + "difficulty": "4630900", + "timestamp": "1590793820", + "gasLimit": "9253146", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf8628303f82c843b9aca0083019ecc80808e605a600053600160006001f0ff0081a2a077f539ae2a58746bbfa6370fc423f946870efa32753d697d3729d361a428623aa0384ef9a5650d6630f5c1ddef616bffa5fc72a95a9314361d0918de066aa4475a", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x19ecc", + "init": "0x605a600053600160006001f0ff00" + }, + "result": { + "gasUsed": "0x102a1", + "code": "0x", + "address": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca" + }, + "traceAddress": [], + "subtraces": 1, + "transactionPosition": 14, + "transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79", + "blockNumber": 1555146, + "blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e", + "time": "187.145µs" + }, + { + "type": "suicide", + "action": { + "address": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca", + "refundAddress": "0x0000000000000000000000000000000000000000", + "balance": "0x0" + }, + "result": null, + "traceAddress": [ + 0 + ], + "subtraces": 0, + "transactionPosition": 14, + "transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79", + "blockNumber": 1555146, + "blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json new file mode 100644 index 00000000..a7244e97 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple.json @@ -0,0 +1,97 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": [ + { + "action": { + "callType": "call", + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x15f90", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "value": "0x0" + }, + "blockNumber": 2289806, + "result": { + "gasUsed": "0x9751", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 1, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "gas": "0x6d05", + "input": "0x", + "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "value": "0x6f05b59d3b20000" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x0", + "output": "0x" + }, + "subtraces": 0, + "traceAddress": [0], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple_onlytop.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple_onlytop.json new file mode 100644 index 00000000..5fbdf55d --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/simple_onlytop.json @@ -0,0 +1,100 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "tracerConfig": { + "onlyTopCall": true + }, + "result": [ + { + "action": { + "callType": "call", + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x15f90", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "value": "0x0" + }, + "blockNumber": 2289806, + "result": { + "gasUsed": "0x9751", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 1, + "traceAddress": [], + "type": "call" + }, + { + "action": { + "callType": "call", + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "gas": "0x6d05", + "input": "0x", + "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "value": "0x6f05b59d3b20000" + }, + "blockNumber": 0, + "result": { + "gasUsed": "0x0", + "output": "0x" + }, + "subtraces": 0, + "traceAddress": [0], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json new file mode 100644 index 00000000..96060d55 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/skip_no_balance_error.json @@ -0,0 +1,70 @@ +{ + "genesis": { + "difficulty": "4673862", + "extraData": "0xd683010b05846765746886676f312e3133856c696e7578", + "gasLimit": "9471919", + "hash": "0x7f072150c5905c214966e3432d418910badcdbe510aceaac295b1d7059cc0ffc", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "mixHash": "0x113ced8fedb939fdc862008da7bdddde726f997c0e6dfba0e55613994757b489", + "nonce": "0x0f411a2e5552c5b7", + "number": "1555284", + "stateRoot": "0x9fe125b361b72d5479b24ad9be9964b74228c73a2dfb0065060a79b4a6dfaa1e", + "timestamp": "1590795374", + "totalDifficulty": "2242642335405", + "alloc": { + "0xe85df1413eebe1b191c26260e19783a274a6b041": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x6244c985ef1e48e84531", + "nonce": "265775", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "1555285", + "difficulty": "4676144", + "timestamp": "1590795378", + "gasLimit": "9481167", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf9014083040e2f843b9aca008301aab08080b8eb7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5547f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000037f05581a2a09db45e7846f193471f6d897fb6ff58b7ec41a9c6f63d10aca47d821c365981cba052ec320875625e16141a1a9e8b7993de863698fb699f93ae2cab26149bbb144f", + "result": [ + { + "type": "create", + "action": { + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "value": "0x0", + "gas": "0x1aab0", + "init": "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5547f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000037f055" + }, + "error": "out of gas", + "traceAddress": [], + "subtraces": 0, + "transactionPosition": 16, + "transactionHash": "0x384487e5ae8d2997aece8e28403d393cb9752425e6de358891bed981c5af1c05", + "blockNumber": 1555285, + "blockHash": "0x93231d8e9662adb4c5c703583a92c7b3112cd5448f43ab4fa1f0f00a0183ed3f", + "time": "665.278µs" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json new file mode 100644 index 00000000..45ffbe2d --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/staticcall_precompiled.json @@ -0,0 +1,83 @@ +{ + "genesis": { + "difficulty": "2028219", + "extraData": "0xd883010906846765746888676f312e31332e35856c696e7578", + "gasLimit": "23481547", + "hash": "0x3c06114e88c26b52decfe4e5f6d4d51cfaaea0317b646017fac32fadbe7df9f5", + "miner": "0x2a1442b4fbabf7b5507c13ccf076a547abfaeb1b", + "mixHash": "0x46108f74220c5ab23651f93912b14fea37ed1380d22e10639a1f5651c98cb949", + "nonce": "0x426a5267e0b636fe", + "number": "567687", + "stateRoot": "0x7b4b193fe73ef87101c7c325954681861cc240c299d03459784b2b11c9c522ae", + "timestamp": "1577578008", + "totalDifficulty": "485254950048", + "alloc": { + "0x8521f13dd5e4bc3dab3cf0f01a195a5af899e851": { + "balance": "0x0", + "nonce": "1", + "code": "0x608060405260043610610251576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806301ffc9a7146102565780630519ce79146102c857806306fdde031461031f578063095ea7b3146103af5780630a0f81681461040a5780631155dfe51461046157806318160ddd1461048c5780631b57cd44146104b7578063200b1e641461050657806327d7874c146105cb5780632ba73c151461061c5780633108e4d71461066d578063317676bf146106bc5780633f4ba83a1461071557806342842e0e1461072c57806346cb96fa146107a75780634e0a3379146107f65780635501d42d146108475780635c975abb146108a05780635fd8c710146108cf5780636352211e146108e65780636af04a571461096157806370a08231146109b85780637158798814610a1d5780637866928014610a6e5780638456cb5914610ae95780638462151c14610b0057806385ac788214610ba657806395787d2614610c2c57806395d89b4114610c6e57806396b5d99214610cfe578063990581b614610d795780639db797f014610e2d578063ab8f933a14610e80578063ad84202814610eab578063b047fb5014610ed6578063b355752214610f2d578063b9db15b414610f7c578063bc4006f514610fd2578063ca083be214611029578063cdd22c9314611082578063cec21acb146110d1578063e078d8b114611136578063e17b25af14611182578063e52ab74b146111d3578063f010432314611222578063fac9c51f1461129d578063fdb33429146112ec578063fffb147914611367575b600080fd5b34801561026257600080fd5b506102ae6004803603602081101561027957600080fd5b8101908080357bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191690602001909291905050506113e2565b604051808215151515815260200191505060405180910390f35b3480156102d457600080fd5b506102dd6116cb565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561032b57600080fd5b506103346116f1565b6040518080602001828103825283818151815260200191508051906020019080838360005b83811015610374578082015181840152602081019050610359565b50505050905090810190601f1680156103a15780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156103bb57600080fd5b50610408600480360360408110156103d257600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061172a565b005b34801561041657600080fd5b5061041f6117c4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561046d57600080fd5b506104766117e9565b6040518082815260200191505060405180910390f35b34801561049857600080fd5b506104a16117f6565b6040518082815260200191505060405180910390f35b3480156104c357600080fd5b506104f0600480360360208110156104da57600080fd5b8101908080359060200190929190505050611806565b6040518082815260200191505060405180910390f35b34801561051257600080fd5b506105b5600480360360a081101561052957600080fd5b81019080803590602001909291908035906020019064010000000081111561055057600080fd5b82018360208201111561056257600080fd5b8035906020019184600183028401116401000000008311171561058457600080fd5b9091929391929390803560ff169060200190929190803590602001909291908035906020019092919050505061181e565b6040518082815260200191505060405180910390f35b3480156105d757600080fd5b5061061a600480360360208110156105ee57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611cac565b005b34801561062857600080fd5b5061066b6004803603602081101561063f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611d86565b005b34801561067957600080fd5b506106a66004803603602081101561069057600080fd5b8101908080359060200190929190505050611e61565b6040518082815260200191505060405180910390f35b3480156106c857600080fd5b506106ff600480360360408110156106df57600080fd5b810190808035906020019092919080359060200190929190505050611e79565b6040518082815260200191505060405180910390f35b34801561072157600080fd5b5061072a611ea9565b005b34801561073857600080fd5b506107a56004803603606081101561074f57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050611f86565b005b3480156107b357600080fd5b506107e0600480360360208110156107ca57600080fd5b8101908080359060200190929190505050612053565b6040518082815260200191505060405180910390f35b34801561080257600080fd5b506108456004803603602081101561081957600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061206b565b005b34801561085357600080fd5b5061088a6004803603604081101561086a57600080fd5b810190808035906020019092919080359060200190929190505050612146565b6040518082815260200191505060405180910390f35b3480156108ac57600080fd5b506108b5612176565b604051808215151515815260200191505060405180910390f35b3480156108db57600080fd5b506108e4612189565b005b3480156108f257600080fd5b5061091f6004803603602081101561090957600080fd5b810190808035906020019092919050505061226d565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561096d57600080fd5b506109766122e6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156109c457600080fd5b50610a07600480360360208110156109db57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061230c565b6040518082815260200191505060405180910390f35b348015610a2957600080fd5b50610a6c60048036036020811015610a4057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050612355565b005b348015610a7a57600080fd5b50610aa760048036036020811015610a9157600080fd5b8101908080359060200190929190505050612472565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b348015610af557600080fd5b50610afe6124a5565b005b348015610b0c57600080fd5b50610b4f60048036036020811015610b2357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506125e9565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b83811015610b92578082015181840152602081019050610b77565b505050509050019250505060405180910390f35b348015610bb257600080fd5b50610c16600480360360c0811015610bc957600080fd5b810190808035906020019092919080359060200190929190803515159060200190929190803560ff1690602001909291908035906020019092919080359060200190929190505050612737565b6040518082815260200191505060405180910390f35b610c5860048036036020811015610c4257600080fd5b8101908080359060200190929190505050612c0c565b6040518082815260200191505060405180910390f35b348015610c7a57600080fd5b50610c8361304b565b6040518080602001828103825283818151815260200191508051906020019080838360005b83811015610cc3578082015181840152602081019050610ca8565b50505050905090810190601f168015610cf05780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b348015610d0a57600080fd5b50610d3760048036036020811015610d2157600080fd5b8101908080359060200190929190505050613084565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b348015610d8557600080fd5b50610db260048036036020811015610d9c57600080fd5b81019080803590602001909291905050506130b7565b6040518080602001828103825283818151815260200191508051906020019080838360005b83811015610df2578082015181840152602081019050610dd7565b50505050905090810190601f168015610e1f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b348015610e3957600080fd5b50610e6660048036036020811015610e5057600080fd5b810190808035906020019092919050505061317b565b604051808215151515815260200191505060405180910390f35b348015610e8c57600080fd5b50610e956131b3565b6040518082815260200191505060405180910390f35b348015610eb757600080fd5b50610ec06131b9565b6040518082815260200191505060405180910390f35b348015610ee257600080fd5b50610eeb6131bf565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b348015610f3957600080fd5b50610f6660048036036020811015610f5057600080fd5b81019080803590602001909291905050506131e5565b6040518082815260200191505060405180910390f35b348015610f8857600080fd5b50610fb560048036036020811015610f9f57600080fd5b81019080803590602001909291905050506131fd565b604051808381526020018281526020019250505060405180910390f35b348015610fde57600080fd5b50610fe7613235565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561103557600080fd5b5061106c6004803603604081101561104c57600080fd5b81019080803590602001909291908035906020019092919050505061325b565b6040518082815260200191505060405180910390f35b34801561108e57600080fd5b506110bb600480360360208110156110a557600080fd5b810190808035906020019092919050505061328b565b6040518082815260200191505060405180910390f35b3480156110dd57600080fd5b50611120600480360360208110156110f457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506132ab565b6040518082815260200191505060405180910390f35b61116c6004803603604081101561114c57600080fd5b8101908080359060200190929190803590602001909291905050506132c3565b6040518082815260200191505060405180910390f35b34801561118e57600080fd5b506111d1600480360360208110156111a557600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506134ee565b005b3480156111df57600080fd5b5061120c600480360360208110156111f657600080fd5b810190808035906020019092919050505061358d565b6040518082815260200191505060405180910390f35b34801561122e57600080fd5b5061125b6004803603602081101561124557600080fd5b81019080803590602001909291905050506135ad565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156112a957600080fd5b506112d6600480360360208110156112c057600080fd5b81019080803590602001909291905050506135e0565b6040518082815260200191505060405180910390f35b3480156112f857600080fd5b506113256004803603602081101561130f57600080fd5b81019080803590602001909291905050506135f8565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561137357600080fd5b506113a06004803603602081101561138a57600080fd5b810190808035906020019092919050505061362b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b600060405180807f737570706f727473496e74657266616365286279746573342900000000000000815250601901905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19161480611610575060405180807f746f6b656e734f664f776e6572286164647265737329000000000000000000008152506016019050604051809103902060405180807f736166655472616e7366657246726f6d28616464726573732c6164647265737381526020017f2c75696e743235362900000000000000000000000000000000000000000000008152506029019050604051809103902060405180807f617070726f766528616464726573732c75696e743235362900000000000000008152506018019050604051809103902060405180807f6f776e65724f662875696e7432353629000000000000000000000000000000008152506010019050604051809103902060405180807f62616c616e63654f6628616464726573732900000000000000000000000000008152506012019050604051809103902060405180807f746f74616c537570706c79282900000000000000000000000000000000000000815250600d019050604051809103902018181818187bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b806116c4575060405180807f73796d626f6c28290000000000000000000000000000000000000000000000008152506008019050604051809103902060405180807f6e616d652829000000000000000000000000000000000000000000000000000081525060060190506040518091039020187bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b9050919050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600781526020017f426974766965770000000000000000000000000000000000000000000000000081525081565b600260149054906101000a900460ff1615151561174657600080fd5b611750338261365e565b151561175b57600080fd5b61176581836136ca565b808273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92560405160405180910390a45050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600480549050905090565b6000600160048054905003905090565b60166020528060005260406000206000915090505481565b600085858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506101006000825111801561187a575080825111155b151561188557600080fd5b33896005600082815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141515156118f557600080fd5b6000878760405160200180838152602001828152602001925050506040516020818303038152906040528051906020012090506000600e6000838152602001908152602001600020541415156119b3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260168152602001807f5369676e617475726520416c726561647920557365640000000000000000000081525060200191505060405180910390fd5b600560008d815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660018d60405160200180828152602001915050604051602081830303815290604052805190602001208b8b8b60405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015611a80573d6000803e3d6000fd5b5050506020604051035173ffffffffffffffffffffffffffffffffffffffff16141515611aac57600080fd5b611ab4613a21565b6020604051908101604052808d8d8080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050815250905060006001600a839080600181540180825580915050906001820390600052602060002001600090919290919091506000820151816000019080519060200190611b54929190613a35565b5050500390508063ffffffff1681141515611b6e57600080fd5b7fe819187a0cf517f3c23c7bd6e6b11a3aec56ec3f2784dc69ac56ebac668748ee3382604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a133600b600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508d600c600083815260200190815260200160002081905550600860008f81526020019081526020016000208190806001815401808255809150509060018203906000526020600020016000909192909190915055508d600e600085815260200190815260200160002081905550809750505050505050509695505050505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611d0757600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614151515611d4357600080fd5b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611de157600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614151515611e1d57600080fd5b80600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b60116020528060005260406000206000915090505481565b600860205281600052604060002081815481101515611e9457fe5b90600052602060002001600091509150505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611f0457600080fd5b600260149054906101000a900460ff161515611f1f57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff16601860009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16141515611f7c57600080fd5b611f84613720565b565b600260149054906101000a900460ff16151515611fa257600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614151515611fde57600080fd5b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415151561201957600080fd5b61202333826137b3565b151561202e57600080fd5b612038838261365e565b151561204357600080fd5b61204e83838361381f565b505050565b600e6020528060005260406000206000915090505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156120c657600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415151561210257600080fd5b80600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b600d6020528160005260406000208181548110151561216157fe5b90600052602060002001600091509150505481565b600260149054906101000a900460ff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156121e557600080fd5b60003073ffffffffffffffffffffffffffffffffffffffff16319050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015612269573d6000803e3d6000fd5b5050565b60006005600083815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515156122e157600080fd5b919050565b601860009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156123b057600080fd5b600260149054906101000a900460ff1615156123cb57600080fd5b80601860006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507f450db8da6efbe9c22f2347f7c2021231df1fc58d3ae9a2fa75d39fa44619930581604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b600b6020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16148061254d57506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b806125a55750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156125b057600080fd5b600260149054906101000a900460ff161515156125cc57600080fd5b6001600260146101000a81548160ff021916908315150217905550565b606060006125f68361230c565b9050600081141561263a5760006040519080825280602002602001820160405280156126315781602001602082028038833980820191505090505b50915050612732565b60608160405190808252806020026020018201604052801561266b5781602001602082028038833980820191505090505b50905060006126786117f6565b905060008090506000600190505b8281111515612729578673ffffffffffffffffffffffffffffffffffffffff166005600083815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16141561271c5780848381518110151561270557fe5b906020019060200201818152505081806001019250505b8080600101915050612686565b83955050505050505b919050565b600033876005600082815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141515156127a957600080fd5b60008585604051602001808381526020018281526020019250505060405160208183030381529060405280519060200120905060008911156128715788601260008381526020019081526020016000205414151515612870576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260168152602001807f5369676e617475726520416c726561647920557365640000000000000000000081525060200191505060405180910390fd5b5b600560008b815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660018b604051602001808281526020019150506040516020818303038152906040528051906020012089898960405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561293e573d6000803e3d6000fd5b5050506020604051035173ffffffffffffffffffffffffffffffffffffffff1614151561296a57600080fd5b6000339050600073ffffffffffffffffffffffffffffffffffffffff16600b60008c815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16141515156129de57600080fd5b8073ffffffffffffffffffffffffffffffffffffffff16600b60008c815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614151515612a4c57600080fd5b612a54613ab5565b6020604051908101604052808b1515815250905060006001600f8390806001815401808255809150509060018203906000526020600020016000909192909190915060008201518160000160006101000a81548160ff02191690831515021790555050500390508063ffffffff1681141515612acf57600080fd5b7fa10f25ef783c24056e27eb55eb6c0ac1c4863cd5eab7e657cd067926b3dce0648382604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1826010600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600d60008d81526020019081526020016000208190806001815401808255809150509060018203906000526020600020016000909192909190915055508b60116000838152602001908152602001600020819055508b60126000868152602001908152602001600020819055508096505050505050509695505050505050565b600034601354808210151515612c2157600080fd5b60003390506000600b600087815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614151515612c9a57600080fd5b8173ffffffffffffffffffffffffffffffffffffffff16600b600088815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614151515612d0857600080fd5b612d10613acb565b602060405190810160405280348152509050600060016014839080600181540180825580915050906001820390600052602060002001600090919290919091506000820151816000015550500390508063ffffffff1681141515612d7357600080fd5b836015600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508760166000838152602001908152602001600020819055506000606434604602811515612dee57fe5b0490506000600d60008b815260200190815260200160002080549050823403811515612e1657fe5b049050600b60008b815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc839081150290604051600060405180830381858888f19350505050158015612e92573d6000803e3d6000fd5b5060008090505b600d60008c815260200190815260200160002080549050811015612fcf5760106000600d60008e815260200190815260200160002083815481101515612edb57fe5b9060005260206000200154815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc839081150290604051600060405180830381858888f19350505050158015612f5a573d6000803e3d6000fd5b5060176000858152602001908152602001600020600d60008d815260200190815260200160002082815481101515612f8e57fe5b906000526020600020015490806001815401808255809150509060018203906000526020600020016000909192909190915055508080600101915050612e99565b507f6ea1e5e03071ff9bad53b614eafcc00d29db646e9c351fcc00d45a4118d7c51a8684604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a18298505050505050505050919050565b6040805190810160405280600281526020017f425600000000000000000000000000000000000000000000000000000000000081525081565b60056020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60606000600a838154811015156130ca57fe5b906000526020600020019050806000018054600181600116156101000203166002900480601f01602080910402602001604051908101604052809291908181526020018280546001816001161561010002031660029004801561316e5780601f106131435761010080835404028352916020019161316e565b820191906000526020600020905b81548152906001019060200180831161315157829003601f168201915b5050505050915050919050565b600080600f8381548110151561318d57fe5b9060005260206000200190508060000160009054906101000a900460ff16915050919050565b60035481565b60135481565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60126020528060005260406000206000915090505481565b600080600060048481548110151561321157fe5b90600052602060002090600202019050806000015492508060010154915050915091565b600960009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60176020528160005260406000208181548110151561327657fe5b90600052602060002001600091509150505481565b600060086000838152602001908152602001600020805490509050919050565b60066020528060005260406000206000915090505481565b6000346003548082101515156132d857600080fd5b8460007f01000000000000000000000000000000000000000000000000000000000000000281600060208110151561330c57fe5b1a7f0100000000000000000000000000000000000000000000000000000000000000027effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff19161415151561335e57600080fd5b8460007f01000000000000000000000000000000000000000000000000000000000000000281600060208110151561339257fe5b1a7f0100000000000000000000000000000000000000000000000000000000000000027effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916141515156133e457600080fd5b60003390506133f1613adf565b60408051908101604052808a81526020018981525090506000600160048390806001815401808255809150509060018203906000526020600020906002020160009091929091909150600082015181600001556020820151816001015550500390508063ffffffff168114151561346757600080fd5b7f982bb66d9aa60573bc0a2066122e1466ecbc4c179a5e7c1c5b589345008ce69a8382604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a16134de6000848361381f565b8097505050505050505092915050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561354957600080fd5b80600960006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000600d6000838152602001908152602001600020805490509050919050565b60156020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600c6020528060005260406000206000915090505481565b60076020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60106020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008273ffffffffffffffffffffffffffffffffffffffff166005600084815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614905092915050565b806007600084815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561377b57600080fd5b600260149054906101000a900460ff16151561379657600080fd5b6000600260146101000a81548160ff021916908315150217905550565b60008273ffffffffffffffffffffffffffffffffffffffff166007600084815260200190815260200160002060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1614905092915050565b600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008154809291906001019190505550816005600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1614151561397d57600660008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008154809291906001900391905055506007600082815260200190815260200160002060006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690555b7f70a295484349ac4c2073cdca8ba026869fff31e0d35e268f820e44c9d25f4a2e838383604051808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001828152602001935050505060405180910390a1505050565b602060405190810160405280606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10613a7657805160ff1916838001178555613aa4565b82800160010185558215613aa4579182015b82811115613aa3578251825591602001919060010190613a88565b5b509050613ab19190613aff565b5090565b6020604051908101604052806000151581525090565b602060405190810160405280600081525090565b604080519081016040528060008019168152602001600080191681525090565b613b2191905b80821115613b1d576000816000905550600101613b05565b5090565b9056fea165627a7a72305820b73bf81476c95567782e45ebae5220573d46c55a9004c11243c470bc91f2d26d0029", + "storage": { + "0x05b8ccbb9d4d8fb16ea74ce3c29a41f1b461fbdaff4714a0d9a8eb05499746bc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa54c2b4154b4f221d71d6d5bc0ec905c931a021bb6fb138fc0495bb0373e2276": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x0000000000000000000000000000000000000001": { + "balance": "0x0", + "nonce": "0", + "code": "0x", + "storage": {} + }, + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0xcec3d4daf44926cc41e", + "nonce": "147795", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "567688", + "difficulty": "2028219", + "timestamp": "1577578023", + "gasLimit": "23504477", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf9018f8302415385746a52880083048196948521f13dd5e4bc3dab3cf0f01a195a5af899e85180b90124200b1e64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001b9af799918107e9a339eba0584b8b60b35aae6f087c74f6bfc00c9301849b204d094ed65e09c76c2597f5516f9440aad2921e50dde096e7caaa65a536d4d9265e00000000000000000000000000000000000000000000000000000000000000504269747669657720697320616e20616d617a696e6720776562736974652e20596f752073686f756c6420646566696e6974656c792061646420796f75722070726f6475637420746f2069742e20e282bf0000000000000000000000000000000081a2a0686e4a69e1fa6cac6b4f751a3935ca5a371d720c34d3a7136988aa017a528ed5a07d993e607b665c24557d0eae166c21fe744e618ed3430902ac6206c63a331dc0", + "result": [ + { + "action": { + "author": "0x0000000000000000000000000000000000000000", + "address": "0x0000000000000000000000000000000000000000", + "balance": "0x0", + "callType": "call", + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "gas": "0x48196", + "input": "0x200b1e64000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001b9af799918107e9a339eba0584b8b60b35aae6f087c74f6bfc00c9301849b204d094ed65e09c76c2597f5516f9440aad2921e50dde096e7caaa65a536d4d9265e00000000000000000000000000000000000000000000000000000000000000504269747669657720697320616e20616d617a696e6720776562736974652e20596f752073686f756c6420646566696e6974656c792061646420796f75722070726f6475637420746f2069742e20e282bf00000000000000000000000000000000", + "refundAddress": "0x0000000000000000000000000000000000000000", + "to": "0x8521f13dd5e4bc3dab3cf0f01a195a5af899e851", + "value": "0x0" + }, + "error": "execution reverted", + "result": { + "gasUsed": "0x947c" + }, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json new file mode 100644 index 00000000..16d43767 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/suicide.json @@ -0,0 +1,92 @@ +{ + "genesis": { + "number": "553153", + "hash": "0x88bde20840880a1f3fba92121912a3cc0d3b26d76e4d914fbd85fc2e43da3b3f", + "nonce": "0x7be554ffe4b82fc2", + "mixHash": "0xf73d2ff3c16599c3b8a24b9ebde6c09583b5ee3f747d3cd37845d564f4c8d87a", + "stateRoot": "0x40b5f53d610108947688a04fb68838ff9c0aa0dd6e54156b682537192171ff5c", + "miner": "0x774c398d763161f55b66a646f17edda4addad2ca", + "difficulty": "1928226", + "totalDifficulty": "457857582215", + "extraData": "0xd983010907846765746888676f312e31332e358664617277696e", + "gasLimit": "7999473", + "timestamp": "1577392669", + "alloc": { + "0x877bd459c9b7d8576b44e59e09d076c25946f443": { + "balance": "0x19bb4ac611ca7a1fc881", + "nonce": "701", + "code": "0x", + "storage": {} + }, + "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000877bd459c9b7d8576b44e59e09d076c25946f443" + } + } + }, + "config": { + "chainId": 63, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 0, + "eip158Block": 0, + "ethash": {}, + "homesteadBlock": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 301243, + "petersburgBlock": 999983, + "istanbulBlock": 999983 + } + }, + "context": { + "number": "553154", + "difficulty": "1929167", + "timestamp": "1577392670", + "gasLimit": "8000000", + "miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "input": "0xf86c8202bd850ee6b280008344aa20948ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91808441c0e1b581a2a03f95ca5cdf7fd727630341c4c6aa1b64ccd9949bd9ecc72cfdd7ce17a2013a69a06d34795ef7fb0108a6dbee4ae0a1bdc48dcd2a4ee53bb6a33d45515af07bb9a8", + "result": [ + { + "action": { + "callType": "call", + "from": "0x877bd459c9b7d8576b44e59e09d076c25946f443", + "gas": "0x44aa20", + "input": "0x41c0e1b5", + "to": "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91", + "value": "0x0" + }, + "blockHash": "0xf641c3b0f82b07cd3a528adb9927dd83eeb4f1682e2bd523ed36888e0d82c9a9", + "blockNumber": 553154, + "result": { + "gasUsed": "0x347a", + "output": "0x" + }, + "subtraces": 1, + "traceAddress": [], + "transactionHash": "0x6af0a5c3188ffacae4d340d4a17e14fdb5a54187683a80ef241bde248189882b", + "transactionPosition": 15, + "type": "call" + }, + { + "action": { + "address": "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91", + "balance": "0x0", + "refundAddress": "0x877bd459c9b7d8576b44e59e09d076c25946f443" + }, + "blockHash": "0xf641c3b0f82b07cd3a528adb9927dd83eeb4f1682e2bd523ed36888e0d82c9a9", + "blockNumber": 553154, + "subtraces": 0, + "traceAddress": [ + 0 + ], + "transactionHash": "0x6af0a5c3188ffacae4d340d4a17e14fdb5a54187683a80ef241bde248189882b", + "transactionPosition": 15, + "type": "suicide" + } + ] +} \ No newline at end of file diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json new file mode 100644 index 00000000..a001178a --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_flat/throw.json @@ -0,0 +1,70 @@ +{ + "context": { + "difficulty": "117009631", + "gasLimit": "4712388", + "miner": "0x294e5d6c39a36ce38af1dca70c1060f78dee8070", + "number": "25009", + "timestamp": "1479891666" + }, + "genesis": { + "alloc": { + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ecd70668f5d854a", + "code": "0x", + "nonce": "1638", + "storage": {} + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000000000000061a9", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117066792", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0xe23e8d4562a1045b70cbc99fefb20c101a8f0fc8559a80d65fea8896e2f1d46e", + "miner": "0x71842f946b98800fe6feb49f0ae4e253259031c9", + "mixHash": "0x0aada9d6e93dd4db0d09c0488dc0a048fca2ccdc1f3fc7b83ba2a8d393a3a4ff", + "nonce": "0x70849d5838dee2e9", + "number": "25008", + "stateRoot": "0x1e01d2161794768c5b917069e73d86e8dca80cd7f3168c0597de420ab93a3b7b", + "timestamp": "1479891641", + "totalDifficulty": "1896347038589" + }, + "input": "0xf88b8206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d59750", + "result": [ + { + "action": { + "callType": "call", + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x3d090", + "input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "value": "0x0" + }, + "blockNumber": 25009, + "error": "invalid jump destination", + "result": {}, + "subtraces": 0, + "traceAddress": [], + "type": "call" + } + ] +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json index 8699bf3e..df0b2872 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json @@ -47,8 +47,8 @@ "input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f", "result": { "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", - "gas": "0x5e106", - "gasUsed": "0x5e106", + "gas": "0x897be", + "gasUsed": "0x897be", "input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11", "output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029", "to": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json index 0353d4cf..80fc0b0a 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json @@ -404,8 +404,8 @@ } ], "from": "0x70c9217d814985faef62b124420f8dfbddd96433", - "gas": "0x37b38", - "gasUsed": "0x12bb3", + "gas": "0x3d090", + "gasUsed": "0x1810b", "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", "output": "0x", "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json index f7ad6df5..2cd28bac 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json @@ -86,8 +86,8 @@ } ], "from": "0xa529806c67cc6486d4d62024471772f47f6fd672", - "gas": "0x2d6e28", - "gasUsed": "0x64bd", + "gas": "0x2dc6c0", + "gasUsed": "0xbd55", "input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e", "output": "0x", "to": "0x269296dddce321a6bcbaa2f0181127593d732cba", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json index 72152e27..07fda21d 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json @@ -67,8 +67,8 @@ ], "error": "invalid jump destination", "from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8", - "gas": "0x435c8", - "gasUsed": "0x435c8", + "gas": "0x493e0", + "gasUsed": "0x493e0", "input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8", "to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json index 86070d13..16e41362 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json @@ -54,8 +54,8 @@ "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", "value": "0x0", - "gas": "0x1a466", - "gasUsed": "0x1dc6", + "gas": "0x1f97e", + "gasUsed": "0x72de", "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", "output": "0x", "calls": [ @@ -64,7 +64,7 @@ "from": "0x6c06b16512b332e6cd8293a2974872674716ce18", "to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", "value": "0x14d1120d7b160000", - "error":"internal failure", + "error": "internal failure", "input": "0x" } ] diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json index ec2ceb42..a023ed6d 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json @@ -71,8 +71,8 @@ ], "error": "execution reverted", "from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826", - "gas": "0x78d9e", - "gasUsed": "0x76fc0", + "gas": "0x7dfa6", + "gasUsed": "0x7c1c8", "input": "0x", "to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json index de4fed6a..333bdd03 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json @@ -50,8 +50,8 @@ "result": { "error": "out of gas", "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", - "gas": "0x7045", - "gasUsed": "0x7045", + "gas": "0xca1d", + "gasUsed": "0xca1d", "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json index 059040a1..3207a298 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json @@ -48,8 +48,8 @@ "result": { "error": "execution reverted", "from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9", - "gas": "0x2d55e8", - "gasUsed": "0xc3", + "gas": "0x2dc6c0", + "gasUsed": "0x719b", "input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000", "to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json index 094b0446..5c7e5629 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json @@ -53,8 +53,8 @@ "result": { "error": "execution reverted", "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", - "gas": "0x2d7308", - "gasUsed": "0x588", + "gas": "0x2dc6c0", + "gasUsed": "0x5940", "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json index 132cefa1..11b23a99 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json @@ -62,8 +62,8 @@ } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x7533", + "gas": "0x15f90", + "gasUsed": "0x6fcb", "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", "output": "0x", "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json index b4643212..37723f17 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json @@ -67,8 +67,8 @@ } ], "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", - "gas": "0x10738", - "gasUsed": "0x3ef9", + "gas": "0x15f90", + "gasUsed": "0x9751", "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json index 09cf4497..499b449a 100644 --- a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json @@ -52,8 +52,8 @@ "result": { "error": "invalid jump destination", "from": "0x70c9217d814985faef62b124420f8dfbddd96433", - "gas": "0x37b38", - "gasUsed": "0x37b38", + "gas": "0x3d090", + "gasUsed": "0x3d090", "input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000", "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", "type": "CALL", diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json new file mode 100644 index 00000000..9264f1e2 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/calldata.json @@ -0,0 +1,115 @@ +{ + "genesis": { + "difficulty": "11934798510088", + "extraData": "0xd983010302844765746887676f312e342e328777696e646f7773", + "gasLimit": "3141592", + "hash": "0xfc543a4a551afbd4a6c5d6d49041371e6bb58b1108c12aaec7f487ce656bb97f", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069", + "mixHash": "0xa6a1e67fc68da76b8d9cc3ce1c45d5e1f4bbd96b5dcfddbe0017d7fa99903ead", + "nonce": "0x5f00c600268b4659", + "number": "995200", + "stateRoot": "0x3579328470dd2aef5b9da69f5480cbe0d375e653b530ab3c1aee0da5e1ff4c94", + "timestamp": "1455322761", + "totalDifficulty": "7077231809278509672", + "alloc": { + "0x200edd17f30485a8735878661960cd7a9a95733f": { + "balance": "0x0", + "code": "0x3660008037602060003660003473273930d21e01ee25e4c219b63259d214872220a261235a5a03f21560015760206000f3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000104": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4c0be60200faa20559308cb7b5a1bb3255c16cb1cab91f525b5ae7a03d02fabe": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf04": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf05": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf06": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa611e7c895a426c0477bc9e280db9c3b1e456dc6310ffcf23926ef5186c1facc": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c410e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c410f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c4110": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x273930d21e01ee25e4c219b63259d214872220a2": { + "balance": "0x0", + "code": "0x606060405236156100da5760e060020a6000350463173825d9811461012c5780632f54bf6e146101875780634123cb6b146101af57806352375093146101b857806354fd4d50146101c25780635c52c2f5146101cc578063659010e7146101fd5780637065cb4814610207578063746c91711461023b578063797af62714610244578063b20d30a914610257578063b61d27f61461028b578063b75c7dc6146102ac578063ba51a6df146102db578063c2cf73261461030f578063cbf0b0c01461034d578063f00d4b5d14610381578063f1736d86146103ba575b6103c4600034111561012a5760408051600160a060020a033216815234602082015281517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c929181900390910190a15b565b6103c46004356000600036436040518084848082843750505090910190815260405190819003602001902090506106c9815b600160a060020a03321660009081526101026020526040812054818082811415610c3f57610d97565b6103c66004355b600160a060020a03811660009081526101026020526040812054115b919050565b6103c660015481565b6103c66101075481565b6103c66101085481565b6103c46000364360405180848480828437505050909101908152604051908190036020019020905061081a8161015e565b6103c66101065481565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506106418161015e565b6103c660005481565b6103c66004355b600081610a7d8161015e565b6103c46004356000364360405180848480828437505050909101908152604051908190036020019020905061080e8161015e565b6103c66004803590602480359160443591820191013560006108393261018e565b6103c4600435600160a060020a033216600090815261010260205260408120549080828114156103d857610457565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506107888161015e565b6103c6600435602435600082815261010360209081526040808320600160a060020a038516845261010290925282205482818114156107e157610805565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506108288161015e565b6103c46004356024356000600036436040518084848082843750505090910190815260405190819003602001902090506104e28161015e565b6103c66101055481565b005b60408051918252519081900360200190f35b50506000828152610103602052604081206001810154600284900a9290831611156104575780546001828101805492909101835590839003905560408051600160a060020a03321681526020810186905281517fc7fb647e59b18047309aa15aad418e5d7ca96d173ad704f1031a2c3d7591734b929181900390910190a15b50505050565b600160a060020a03831660028361010081101561000257508301819055600160a060020a03851660008181526101026020908152604080832083905584835291829020869055815192835282019290925281517fb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c929181900390910190a1505b505050565b15610457576104f08361018e565b156104fb57506104dd565b600160a060020a03841660009081526101026020526040812054925082141561052457506104dd565b61045d5b6101045460005b81811015610ee457610104805461010991600091849081101561000257600080516020610f9f83398151915201548252506020918252604081208054600160a060020a0319168155600181018290556002810180548382559083528383209193610f6992601f9290920104810190610a65565b60018054810190819055600160a060020a038316906002906101008110156100025790900160005081905550600160005054610102600050600084600160a060020a03168152602001908152602001600020600050819055507f994a936646fe87ffe4f1e469d3d6aa417d6b855598397f323de5b449f765f0c3826040518082600160a060020a0316815260200191505060405180910390a15b505b50565b1561063c5761064f8261018e565b1561065a575061063e565b610662610528565b60015460fa90106106775761067561068c565b505b60015460fa90106105a2575061063e565b6107465b600060015b600154811015610a79575b600154811080156106bc5750600281610100811015610002570154600014155b15610d9f5760010161069c565b156104dd57600160a060020a0383166000908152610102602052604081205492508214156106f7575061063c565b6001600160005054036000600050541115610712575061063c565b600060028361010081101561000257508301819055600160a060020a03841681526101026020526040812055610688610528565b5060408051600160a060020a038516815290517f58619076adf5bb0943d100ef88d52d7c3fd691b19d3a9071b555b651fbf418da9181900360200190a1505050565b1561063c5760015482111561079d575061063e565b60008290556107aa610528565b6040805183815290517facbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da9181900360200190a15050565b506001820154600282900a908116600014156108005760009350610805565b600193505b50505092915050565b1561063c575061010555565b1561063e5760006101065550565b1561063c5781600160a060020a0316ff5b15610a555761084d846000610e793261018e565b15610909577f92ca3a80853e6663fa31fa10b99225f18d4902939b4c53a9caae9043f6efd00432858786866040518086600160a060020a0316815260200185815260200184600160a060020a031681526020018060200182810382528484828181526020019250808284378201915050965050505050505060405180910390a184600160a060020a03168484846040518083838082843750505090810191506000908083038185876185025a03f15060009350610a5592505050565b6000364360405180848480828437505050909101908152604051908190036020019020915061093990508161024b565b15801561095c575060008181526101096020526040812054600160a060020a0316145b15610a555760008181526101096020908152604082208054600160a060020a03191688178155600181018790556002018054858255818452928290209092601f01919091048101908490868215610a5d579182015b82811115610a5d5782358260005055916020019190600101906109b1565b50507f1733cbb53659d713b79580f79f3f9ff215f78a7c7aa45890f3b89fc5cddfbf328132868887876040518087815260200186600160a060020a0316815260200185815260200184600160a060020a03168152602001806020018281038252848482818152602001925080828437820191505097505050505050505060405180910390a15b949350505050565b506109cf9291505b80821115610a795760008155600101610a65565b5090565b15610c2c5760008381526101096020526040812054600160a060020a031614610c2c5760408051600091909120805460018201546002929092018054600160a060020a0392909216939091819083908015610afd57820191906000526020600020905b815481529060010190602001808311610ae057829003601f168201915b505091505060006040518083038185876185025a03f150505060008481526101096020908152604080519281902080546001820154600160a060020a033281811688529587018b905293860181905292166060850181905260a06080860181815260029390930180549187018290527fe7c957c06e9a662c1a6c77366179f5b702b97651dc28eee7d5bf1dff6e40bb4a975094958a959293909160c083019084908015610bcf57820191906000526020600020905b815481529060010190602001808311610bb257829003601f168201915b5050965050505050505060405180910390a160008381526101096020908152604082208054600160a060020a031916815560018101839055600281018054848255908452828420919392610c3292601f9290920104810190610a65565b50919050565b50505060019150506101aa565b60008581526101036020526040812080549093501415610cc7576000805483556001838101919091556101048054918201808255828015829011610c9657818360005260206000209182019101610c969190610a65565b50505060028301819055610104805487929081101561000257600091909152600080516020610f9f83398151915201555b506001810154600283900a90811660001415610d975760408051600160a060020a03321681526020810187905281517fe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda929181900390910190a1815460019011610d84576000858152610103602052604090206002015461010480549091908110156100025760406000908120600080516020610f9f8339815191529290920181905580825560018083018290556002909201559450610d979050565b8154600019018255600182018054821790555b505050919050565b5b60018054118015610dc257506001546002906101008110156100025701546000145b15610dd65760018054600019019055610da0565b60015481108015610df95750600154600290610100811015610002570154600014155b8015610e1357506002816101008110156100025701546000145b15610e7457600154600290610100811015610002578101549082610100811015610002578101919091558190610102906000908361010081101561000257810154825260209290925260408120929092556001546101008110156100025701555b610691565b156101aa5761010754610e8f5b62015180420490565b1115610ea857600061010655610ea3610e86565b610107555b6101065480830110801590610ec65750610106546101055490830111155b15610edc575061010680548201905560016101aa565b5060006101aa565b61063c6101045460005b81811015610f745761010480548290811015610002576000918252600080516020610f9f833981519152015414610f6157610104805461010391600091849081101561000257600080516020610f9f83398151915201548252506020919091526040812081815560018101829055600201555b600101610eee565b50505060010161052f565b61010480546000808355919091526104dd90600080516020610f9f83398151915290810190610a6556004c0be60200faa20559308cb7b5a1bb3255c16cb1cab91f525b5ae7a03d02fabe" + }, + "0x4f5777744b500616697cb655dcb02ee6cd51deb5": { + "balance": "0xb0983f1b83eec290", + "nonce": "2" + }, + "0xf8b483dba2c3b7176a3da549ad41a48bb3121069": { + "balance": "0x16969a0ba2c2d384d07", + "nonce": "67521" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "995201", + "difficulty": "11940626048551", + "timestamp": "1455322773", + "gasLimit": "3141592", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069" + }, + "input": "0xf89102850a954d522e8303308594200edd17f30485a8735878661960cd7a9a95733f888ac7230489e80000a4ba51a6df00000000000000000000000000000000000000000000000000000000000000001ca04f2cc45b96f965296382b2e9b657e90808301d5179035a5d91a2de7b912def20a056e19271ea4e19e4e034f38e925e312beed4d300c267160eeb2f565c42deb578", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", + "gas": "0x33085", + "gasUsed": "0x1a9e5", + "to": "0x200edd17f30485a8735878661960cd7a9a95733f", + "input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000", + "output": "0xba51a6df00000000000000000000000000000000000000000000000000000000", + "calls": [ + { + "from": "0x200edd17f30485a8735878661960cd7a9a95733f", + "gas": "0x2c263", + "gasUsed": "0x1b0e4", + "to": "0x273930d21e01ee25e4c219b63259d214872220a2", + "input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000", + "logs": [ + { + "address": "0x200edd17f30485a8735878661960cd7a9a95733f", + "topics": [ + "0xe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda" + ], + "data": "0x0000000000000000000000004f5777744b500616697cb655dcb02ee6cd51deb5be96016bb57376da7a6d296e0a405ee1501778227dfa604df0a81cb1ae018598" + }, + { + "address": "0x200edd17f30485a8735878661960cd7a9a95733f", + "topics": [ + "0xacbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x8ac7230489e80000", + "type": "CALLCODE" + } + ], + "value": "0x8ac7230489e80000", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json new file mode 100644 index 00000000..f63dbd47 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -0,0 +1,408 @@ +{ + "genesis": { + "difficulty": "80344740444880", + "extraData": "0x7777772e62772e636f6d", + "gasLimit": "1498600", + "hash": "0xf5d85a80bdbc5d28a16b8eb0d1b9dd18316ddc3655c7d5c901b67acdb7700037", + "miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1", + "mixHash": "0x433ae590edf0e7ba9aac698bb7d3be2300e3e79d175db13528ff3e79a3f93910", + "nonce": "0x084adce0020c6fd8", + "number": "2340152", + "stateRoot": "0x38295a2634c9c62d48bcbf2ef2ae83768b9055c1f5e6469d17a5d1bcb052072e", + "timestamp": "1475034708", + "totalDifficulty": "66488249547380413902", + "alloc": { + "0x01e60b511fced1eb2b5b40991eb1dfd171a6df42": { + "balance": "0x0", + "code": "0x6060604052361561008d5760e060020a600035046306fdde03811461008f578063095ea7b3146100a557806318160ddd1461012457806323b872dd1461012f578063313ce567146101dc578063475a9fa9146101f057806370a0823114610215578063721a37d21461024357806395d89b411461008f578063a9059cbb14610268578063dd62ed3e146102e7575b005b61031d6040805160208101909152600081525b90565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db63c6605267600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b6102316003546100a2565b61038b60043560243560443560008054604080517fa00bfa1100000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a038781166024830152868116604483015260648201869052929092166084830152517319ee743d2e356d5f0e4d97cc09b96d06e933d0db9163a00bfa119160a482810192602092919082900301818660325a03f4156100025750506040515195945050505050565b604080516000815290519081900360200190f35b61038b6004356024356000805433600160a060020a0390811691161461039f57610002565b600160a060020a03600435166000908152600160205260409020545b60408051918252519081900360200190f35b61038b6004356024356000805433600160a060020a039081169116146103ce57610002565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db6388d5fecb600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b610231600435602435600160a060020a038281166000908152600260209081526040808320938516835292905220545b92915050565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561037d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604080519115158252519081900360200190f35b50600160a060020a03821660009081526001602081905260409091208054830190556003805483019055610317565b600160a060020a038316600090815260016020526040902054821161040a57506040600020805482900390556003805482900390556001610317565b50600061031756", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000012098a4651fb262f7", + "0xfae22198212900725daa5db635d1fda7b0fa195adaabdc806a7267959c3d8ae4": "0x00000000000000000000000000000000000000000000000026cbcbc35aaa62f7" + } + }, + "0x19ee743d2e356d5f0e4d97cc09b96d06e933d0db": { + "balance": "0x0", + "code": "0x6503060000000050606060405260e060020a600035046388d5fecb811461003c578063a00bfa11146100e3578063c6605267146102dc575b610007565b610356600435602435604435600160a060020a0333166000908152602084905260408120548290108015906100715750600082115b1561036a57600160a060020a0333811660008181526020878152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161034f565b610356600435602435604435606435608435600160a060020a03841660009081526020869052604081205483901080159061011e5750600083115b80156101bb5750600160a060020a0385811660009081526001880160209081526040808320339094168352929052205483901015806101bb575081600160a060020a0316631934d55a86336040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506020604051808303816000876161da5a03f1156100075750506040515190505b1561037257600160a060020a038481166000908152602088815260408083208054880190558884168084528184208054899003905581517f1934d55a00000000000000000000000000000000000000000000000000000000815260048101919091523385166024820152905193861693631934d55a936044838101949383900301908290876161da5a03f115610007575050604051511515905061028957600160a060020a038581166000908152600188016020908152604080832033909416835292905220805484900390555b83600160a060020a031685600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a3506001610376565b610356600435602435604435600160a060020a033381166000818152600186016020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b9392505050565b604080519115158252519081900360200190f35b50600061034f565b5060005b9594505050505056" + }, + "0x3de712784baf97260455ae25fb74f574ec9c1add": { + "balance": "0x23c8352f33854625", + "nonce": "80" + }, + "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd": { + "balance": "0x0", + "nonce": "29", + "code": "0x606060405236156100cf5760e060020a600035046307d5b82681146100d157806315e812ad146101775780631934d55a1461018d5780631d007f5f146101c65780631f0c1e0c146101ee5780633983d5c41461022b5780634025b29314610243578063428d64bd1461030f578063481b659d146104b557806357bcccb6146104f45780638c172fa21461052f5780639ba5b4e9146105ea578063a4a7cf5c146106ca578063b11e3b82146106ed578063c51cf179146107a6578063d6911046146107c2578063eff6be2f146109cb575b005b6109f2600435602435600082815260016020908152604080832060049081015482517f23b872dd00000000000000000000000000000000000000000000000000000000815233600160a060020a0390811693820193909352308316602482015260448101879052925185948594859493909316926323b872dd9260648281019392829003018187876161da5a03f1156100025750506040515115159050610a6d57610002565b6004545b60408051918252519081900360200190f35b6109f2600435602435600160a060020a0382811660009081526003602090815260408083209385168352929052205460ff165b92915050565b6109f2600435600080546101009004600160a060020a039081163390911614610be757610002565b610a066004356024356000828152600160205260408120600901805483908110156100025750815260209020810154600160a060020a03166101c0565b61017b6004355b600454620f4240908202045b919050565b6109f26004356024356000805b600084815260016020526040902060090154811015610c13576040600090812090859052600160205260090180548290811015610002576000918252604080516020808520909301547f721a37d2000000000000000000000000000000000000000000000000000000008252600160a060020a03338116600484015260248301899052925192169363721a37d293604483810194919391929183900301908290876161da5a03f1156100025750506040515115159050610c8d57610002565b604080516024803560048181013560208181028087018201909752818652610a2396833596939560449501929182919085019084908082843750949650505050505050604080516020818101835260008083528351918201909352828152909190819081905b8551831015610c9f57600091505b600160005060008785815181101561000257602090810290910181015182528101919091526040016000206009015460ff831610156104a957600060016000506000888681518110156100025760209081029091018101518252810191909152604001600020600901805460ff85169081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166370a08231896040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191909111159050610f59576001600050600087858151811015610002576020908102909101810151825281019190915260400160002060090154909301600201925b60019290920191610375565b6109f260043533600160a060020a039081166000908152600360209081526040808320938516835292905220805460ff1916600190811790915561023e565b6109f260043533600160a060020a039081166000908152600360209081526040808320938516835292905220805460ff19169055600161023e565b60048035600090815260016020818152604092839020600981015481548551968301546002840154600385015460088601546005870154600688015499880154600790980154958c52600160a060020a03888116998d019990995260a060020a90970460ff90811615158c8c015260608c019390935260808b019190915260a08a019490945290851660c08901529290931660e087015261010086019390935216151561012084015261014083015251908190036101600190f35b60408051600480358082013560208181028086018201909652818552610a23959394602494909385019291829190850190849080828437509496505050505050506040805160208181018352600080835283519182019093528281529091908190815b8551831015610f93576000600260005060008886815181101561000257602090810290910181015182528101919091526040016000205411156106be576002600050600087858151811015610002576020908102909101810151825281019190915260400160002054909301600201925b6001929092019161064d565b61017b6004356000805481908190819081908190819060ff161561115757610002565b6040805160e4356004818101356020818102808601820190965281855261017b95833595602480359660443596606435966084359660a4359660c4359693956101049501929182919085019084908082843750949650505050505050600080808080808d81148061076657508c801561076657508a8c12155b80610774575060028a60ff16105b80610788575087600160a060020a03166000145b8061079c575088600160a060020a03166000145b1561177157611760565b61017b600435600454620f42409081039082020481900361023e565b60408051600480358082013560208181028086018201909652818552610a23959394602494909385019291829190850190849080828437509496505093359350506044359150506064356040805160208181018352600080835283519182019093528281529091908190815b8851831015611cee576000600102600160005060008b8681518110156100025760209081029091018101518252810191909152604001600020541180156108c7575087600160a060020a0316600014806108c7575087600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060050154600160a060020a0316145b8015610925575086600160a060020a031660001480610925575086600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060040154600160a060020a0316145b8015610983575085600160a060020a031660001480610983575085600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060010154600160a060020a0316145b156109bf57600160005060008a858151811015610002576020908102909101810151825281019190915260400160002060090154909301600c01925b6001929092019161082e565b6109f26004356000805433600160a060020a03908116610100909204161461234c57610002565b604080519115158252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b610a7685610232565b92508285039150600083118015610b00575060008681526001602090815260408083206004908101548251855460e060020a63a9059cbb0282526101009004600160a060020a039081169382019390935260248101899052925191169363a9059cbb936044848101949193929183900301908290876161da5a03f115610002575050604051511590505b15610b0a57610002565b5060005b60008681526001602052604090206009015460ff82161015610bd35760406000908120908790526001602052600901805460ff831690811015610002576000918252604080516020808520909301547f475a9fa9000000000000000000000000000000000000000000000000000000008252600160a060020a03338116600484015260248301889052925192169363475a9fa993604483810194919391929183900301908290876161da5a03f1156100025750506040515115159050610bdf57610002565b50600195945050505050565b600101610b0e565b506000805474ffffffffffffffffffffffffffffffffffffffff0019166101008302179055600161023e565b6000848152600160209081526040808320600490810154825160e060020a63a9059cbb028152600160a060020a033381169382019390935260248101899052925191169363a9059cbb936044848101949193929183900301908290876161da5a03f1156100025750506040515115159050610c9557610002565b600101610250565b5060019392505050565b83604051805910610cad5750595b908082528060200260200182016040528015610cc4575b506000945084935090505b8551831015610f6557600091505b600160005060008785815181101561000257602090810290910181015182528101919091526040016000206009015460ff83161015610f7b57600060016000506000888681518110156100025760209081029091018101518252810191909152604001600020600901805460ff85169081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166370a08231896040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191909111159050610f8757858381518110156100025790602001906020020151600190048185815181101561000257602090810290910101528551600190600090889086908110156100025760209081029091018101518252810191909152604001600020600901548151829060018701908110156100025760209081029091010152600091505b600160005060008785815181101561000257602090810290910181015182528101919091526040016000206009015460ff83161015610f6f5760016000506000878581518110156100025760209081029091018101518252810191909152604001600020600901805460ff84169081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166370a08231886040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051518251909150829060ff8516870160020190811015610002576020908102909101015260019190910190610e49565b60019190910190610383565b9695505050505050565b6002820160ff16909301925b60019290920191610ccf565b60019190910190610cdd565b83604051805910610fa15750595b908082528060200260200182016040528015610fb8575b506000945084935091505b85518310156111125760006002600050600088868151811015610002576020908102909101810151825281019190915260400160002054111561114b578583815181101561000257906020019060200201516001900482858151811015610002576020908102909101015285516002906000908890869081101561000257602090810290910181015182528101919091526040016000205482518390600187019081101561000257602090810290910101525060005b600260005060008785815181101561000257602090810290910181015182528101919091526040016000205481101561111b5760026000506000878581518110156100025760209081029091018101518252810191909152604001600020805482908110156100025760009182526020909120015482518390868401600201908110156100025760209081029091010152600101611079565b50949350505050565b60026000506000878581518110156100025750506020858102890181015182528290526040902054909401909301925b60019290920191610fc3565b6000805460ff191660019081178255898252602052604090206007015460ff1615156112e85760406000818120600581015483516006909201547f5101770200000000000000000000000000000000000000000000000000000000835260048301529251600160a060020a0393909316926351017702926024838101936020939290839003909101908290876161da5a03f115610002575050604051511515905061120457611338611347565b6000888152600160209081526040808320815160058201546006909201547f5d1a3b8200000000000000000000000000000000000000000000000000000000825260048201529151600160a060020a039190911693635d1a3b82936024808501949193929183900301908290876161da5a03f1156100025750505060405180519060200150600160005060008a600019168152602001908152602001600020600050600801600050819055506001600160005060008a60001916815260200190815260200160002060005060070160006101000a81548160ff021916908302179055505b6000888152600160208190526040909120015460a060020a900460ff16156113535760406000908120908990526001602052600281015460089091015412156115435760009450611598565b8596505b505050505050919050565b6113345b6000805460ff19169055565b6000888152600160205260409020600981018054600890920154909181101561000257600091825260208083206040805193909101547f70a08231000000000000000000000000000000000000000000000000000000008452600160a060020a03338116600486015291519116936370a082319360248181019493929183900301908290876161da5a03f115610002575050604051519650505b600091505b60008881526001602052604090206009015460ff831610156116d65760406000908120908990526001602052600901805460ff84169081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166370a08231336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604080515160008b81526001602052919091206009018054919350915060ff84169081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a031663721a37d233836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061175057610002565b60008881526001602052604090206003810154600890910154131561156c576127109450611598565b600088815260016020526040902060028101546003820154600890920154918190039103612710020594505b6000888152600160208190526040909120600901805461271088810361ffff16975087810396509286929181101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166370a08231336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604080515160008d815260016020529182206009018054919094029389935091908110156100025790815260208120909054906101000a9004600160a060020a0316600160a060020a03166370a08231336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750505060405180519060200150020104955085506113ed565b6000888152600160209081526040808320600490810154825160e060020a63a9059cbb028152600160a060020a0333811693820193909352602481018c9052925191169363a9059cbb936044848101949193929183900301908290876161da5a03f115610002575050604051511515905061134357610002565b600191909101906113f2565b8495505b505050505098975050505050505050565b8d8d8d8d8d8d8d8d604051808960001916815260200188151560f860020a0281526001018781526020018681526020018560ff1660f860020a02815260010184600160a060020a03166c0100000000000000000000000002815260140183600160a060020a03166c010000000000000000000000000281526014018280519060200190602002808383829060006004602084601f0104600302600f01f1509050019850505050505050505060405180910390209450600060010260016000506000876000191681526020019081526020016000206000506000016000505460001916111561185e57611760565b87600160a060020a031663c91d7e9c886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506040604051808303816000876161da5a03f1156100025750506040518051602091909101519095509350506000841180156119bd575082600160a060020a03166323b872dd3330876040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506020604051808303816000876161da5a03f11561000257505060405151159050806119bd575082600160a060020a031663095ea7b389866040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511590505b156119c757610002565b87600160a060020a031663c1b06513886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f150905001925050506020604051808303816000876161da5a03f115610002575050604051519250506000821415611a5257610002565b60008e81526002602052604090208054600181018083558281838015829011611a9e57818360005260206000209182019101611a9e91905b80821115611c975760008155600101611a8a565b50505091909060005260206000209001600087909190915055508d60016000506000876000191681526020019081526020016000206000506000016000508190555087600160005060008760001916815260200190815260200160002060005060050160006101000a815481600160a060020a0302191690830217905550816001600050600087600019168152602001908152602001600020600050600601600050819055508c600160005060008760001916815260200190815260200160002060005060010160146101000a81548160ff021916908302179055508b6001600050600087600019168152602001908152602001600020600050600201600050819055508a60016000506000876000191681526020019081526020016000206000506003016000508190555088600160005060008760001916815260200190815260200160002060005060040160006101000a815481600160a060020a030219169083021790555033600160005060008760001916815260200190815260200160002060005060010160006101000a815481600160a060020a0302191690830217905550600090505b8960ff168160ff16101561175c57600085815260016020819052604090912060090180549182018082559091908281838015829011611c9b57600083815260209020611c9b918101908301611a8a565b5090565b5050509190906000526020600020900160006040516104368061236c833901809050604051809103906000f0825473ffffffffffffffffffffffffffffffffffffffff1916179091555050600101611c47565b83604051805910611cfc5750595b908082528060200260200182016040528015611d13575b506000945084935091505b8851831015611fa2576000600102600160005060008b868151811015610002576020908102909101810151825281019190915260400160002054118015611db7575087600160a060020a031660001480611db7575087600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060050154600160a060020a0316145b8015611e15575086600160a060020a031660001480611e15575086600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060040154600160a060020a0316145b8015611e73575085600160a060020a031660001480611e73575085600160a060020a0316600160005060008b868151811015610002576020908102909101810151825281019190915260400160002060010154600160a060020a0316145b15611fe5578883815181101561000257906020019060200201516001900482858151811015610002576020908102909101015288516001906000908b908690811015610002576020908102909101810151825281019190915260400160002054825183906001870190811015610002576020908102909101015288516001906000908b9086908110156100025760209081029091018101518252810191909152604001600020600101548251600160a060020a03919091169083906002870190811015610002576020908102909101015288516001906000908b90869081101561000257602090810290910181015182528101919091526040016000206001015460a060020a900460ff1615611ff157600182856003018151811015610002576020908102909101015261200c565b50979650505050505050565b600160005060008a858151811015610002576020908102909101810151825281019190915260400160002060090154909301600c01925b60019290920191611d1e565b60008285600301815181101561000257602090810290910101525b600160005060008a858151811015610002576020908102909101810151825281019190915260400160002060020154825183906004870190811015610002576020908102909101015288516001906000908b908690811015610002576020908102909101810151825281019190915260400160002060030154825183906005870190811015610002576020908102909101015288516001906000908b9086908110156100025760209081029091018101518252810191909152604001600020600401548251600160a060020a03919091169083906006870190811015610002576020908102909101015288516001906000908b9086908110156100025760209081029091018101518252810191909152604001600020600501548251600160a060020a03919091169083906007870190811015610002576020908102909101015288516001906000908b908690811015610002576020908102909101810151825281019190915260400160002060060154825183906008870190811015610002576020908102909101015288516001906000908b90869081101561000257602090810290910181015182528101919091526040016000206007015460ff16156121ee576001828560090181518110156100025760209081029091010152612209565b60008285600901815181101561000257602090810290910101525b600160005060008a85815181101561000257602090810290910181015182528101919091526040016000206008015482518390600a870190811015610002576020908102909101015288516001906000908b90869081101561000257602090810290910181015182528101919091526040016000206009015482518390600b87019081101561000257602090810290910101525060005b600160005060008a858151811015610002576020908102909101810151825281019190915260400160002060090154811015611fae57600160005060008a858151811015610002576020908102909101810151825281019190915260400160002060090180548290811015610002576000918252602090912001548251600160a060020a0391909116908390868401600c019081101561000257602090810290910101526001016122a0565b620f424082101561236457506004819055600161023e565b50600061023e56606060405260008054600160a060020a03191633179055610412806100246000396000f36060604052361561008d5760e060020a600035046306fdde03811461008f578063095ea7b3146100a557806318160ddd1461012457806323b872dd1461012f578063313ce567146101dc578063475a9fa9146101f057806370a0823114610215578063721a37d21461024357806395d89b411461008f578063a9059cbb14610268578063dd62ed3e146102e7575b005b61031d6040805160208101909152600081525b90565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db63c6605267600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b6102316003546100a2565b61038b60043560243560443560008054604080517fa00bfa1100000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a038781166024830152868116604483015260648201869052929092166084830152517319ee743d2e356d5f0e4d97cc09b96d06e933d0db9163a00bfa119160a482810192602092919082900301818660325a03f4156100025750506040515195945050505050565b604080516000815290519081900360200190f35b61038b6004356024356000805433600160a060020a0390811691161461039f57610002565b600160a060020a03600435166000908152600160205260409020545b60408051918252519081900360200190f35b61038b6004356024356000805433600160a060020a039081169116146103ce57610002565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db6388d5fecb600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b610231600435602435600160a060020a038281166000908152600260209081526040808320938516835292905220545b92915050565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561037d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604080519115158252519081900360200190f35b50600160a060020a03821660009081526001602081905260409091208054830190556003805483019055610317565b600160a060020a038316600090815260016020526040902054821161040a57506040600020805482900390556003805482900390556001610317565b50600061031756", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000950ca4a06c78934a148b7a3ff3ea8fc366f77a0600", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x00000000000000000000000000000000000000000000000000000000000007d0", + "0x6b8ad191d0fa8204d4eafca22ce4ec42425fde2eecf25ce484ecc76765b9a937": "0x00000000000000000000000001e60b511fced1eb2b5b40991eb1dfd171a6df42", + "0x6b8ad191d0fa8204d4eafca22ce4ec42425fde2eecf25ce484ecc76765b9a938": "0x000000000000000000000000f4cbd7e037b80c2e67b80512d482685f15b1fb28", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e854": "0x446374989d279847d0dbc6708a9c76a419fe9831d42c78bc89473f559a00d915", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e855": "0x00000000000000000000000061d76c05cd2aa9ed5135e21e52fff188b02089d4", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e856": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e857": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e858": "0x00000000000000000000000092f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e859": "0x000000000000000000000000529c4cb814029b8bb32acb516ea3a4b07fdae350", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e85a": "0x846fd373887ade3ab7703750294876afa61cf56303f5f014a4d80d04f508a1f1", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e85b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e85c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x71dbd1e5cfc57324881ede454ea48ef3502c5c0b0454ccd622624a7061c2e85d": "0x0000000000000000000000000000000000000000000000000000000000000002" + } + }, + "0x61c808d82a3ac53231750dadc13c777b59310bd9": { + "balance": "0x90a7af5d4755984561", + "nonce": "197408" + }, + "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5": { + "balance": "0x0", + "code": "0x606060405236156100a35760e060020a6000350463031d973e81146100a557806316181bb7146100da5780635aa97eeb146103b1578063674cc1f5146104f75780636da84ec0146105d7578063929e626e146105fe578063a0bde7e8146106ac578063bbd4f8541461078b578063c1fd43391461098e578063c3c95c7b14610a88578063db833e3a14610afe578063df6c13c314610cfe578063ebb7119414610d13575b005b610d4960043560008181526020819052604081206004015481908390600160a060020a039081163390911614610dcb57610002565b610d0160043560243560443560643560008481526020819052604080822054815160e160020a63460b97d1028152600481018290529151909183918291829182916000805160206123dd83398151915291638c172fa29160248181019261016092909190829003018187876161da5a03f1156100025750506040805160a081015160c08201517fc51cf179000000000000000000000000000000000000000000000000000000008352600483018d90529251909750919550600160a060020a038616926323b872dd92339230929163c51cf1799160248181019260209290919082900301818b876161da5a03f11561000257505060408051805160e060020a6323b872dd028252600160a060020a039586166004830152939094166024850152918d01604484015250516064828101926020929190829003018187876161da5a03f11561000257505060405151159050806102e8575082600160a060020a031663095ea7b36000805160206123dd8339815191526000805160206123dd833981519152600160a060020a031663c51cf1798c6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a63095ea7b30282526004820193909352918d0160248301525160448281019350602092829003018187876161da5a03f115610002575050604051511590505b806103a757506000805160206123dd833981519152600160a060020a03166307d5b826866000805160206123dd833981519152600160a060020a031663c51cf1798c6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e160020a6303eadc130282526004820194909452928d016024840152516044838101936020935082900301816000876161da5a03f115610002575050604051511590505b15610fcd57610002565b60408051600480358082013560208181028086018201909652818552610d5d9593946024949093850192918291908501908490808284375094965050933593505050506040805160208181018352600080835283519182019093528281529091908190815b86518310156112c757600060010260006000506000898681518110156100025760209081029091018101518252810191909152604001600020541180156104af575085600160a060020a0316600014806104af575085600160a060020a03166000600050600089868151811015610002576020908102909101810151825281019190915260400160002060040154600160a060020a0316145b156104eb576000600050600088858151811015610002576020908102909101810151825281019190915260400160002060070154909301600901925b60019290920191610416565b60408051600480358082013560208181028086018201909652818552610d5d959394602494909385019291829190850190849080828437509496505050505050506040805160208181018352600080835283519182019093528281529091908190815b8551831015611713576000600160005060008886815181101561000257602090810290910181015182528101919091526040016000205411156105cb576001600050600087858151811015610002576020908102909101810151825281019190915260400160002054909301600201925b6001929092019161055a565b610d016004356024355b60009182526020829052604090912060010154620f424091020490565b610da760043561200060405190810160405280610100905b6000815260200190600190039081610616575050604080516120008101909152610100815b600081526020019060019003908161063b5750600090505b60008481526020819052604090206007015460ff821610156118d8576040600020600701805460ff8316908110156100025760009182526020909120810154908390610100811015610002576020020152600101610653565b610d5d600435604080516020818101835260008083528351808301855281815285825291819052835193812060070154929391929091600191909101908059106106f35750595b90808252806020026020018201604052801561070a575b509150428260008151811015610002576020919091019190915290505b60008481526020819052604090206007015460ff821610156118d8576040600020600701805460ff83169081101561000257906000526020600020900160005054828260010160ff1681518110156100025760209081029091010152600101610727565b610d0160043560243560443560643560008481526020819052604080822054815160e160020a63460b97d102815260048101919091529051829182918291829182916000805160206123dd83398151915291638c172fa29160248181019261016092909190829003018187876161da5a03f1156100025750505060405180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200150505050509a50505050505050600060005060008b60001916815260200190815260200160002060005060050160009054906101000a9004600160a060020a0316600160a060020a0316630439978d8b600060005060008e60001916815260200190815260200160002060005060030160005054600060005060008f6000191681526020019081526020016000206000506007016000508d8d6040518660e060020a0281526004018086600019168152602001858152602001806020018460ff168152602001838152602001828103825285818154815260200191508054801561095657602002820191906000526020600020905b81600050548152602001906001019080831161093f575b505096505050505050506020604051808303816000876161da5a03f1156100025750506040515194505060008414156118e357610fc0565b610d01600435602435604435606435600060006000600060006000805160206123dd833981519152600160a060020a0316638c172fa28a6040518260e060020a0281526004018082600019168152602001915050610160604051808303816000876161da5a03f1156100025750506040805160a081015160c08201518d83526c01000000000000000000000000600160a060020a033381168202602086810191909152908d16909102603485015284516048948190039490940190932080875292869052928520600301549097509195509350821415905080610a7357506207a12088115b80610a7e5750836000145b15611cc857611cbc565b60048035600090815260208181526040918290206002810154815484516001840154600385015497850154600586015460069096015493835295820152808601929092526060820195909552600160a060020a039283166080820152911660a082015260c0810192909252519081900360e00190f35b610d0160043560243560443560643560008481526020819052604080822054815160e160020a63460b97d10281526004810191909152905182918291829182916000805160206123dd83398151915291638c172fa291602482810192610160929190829003018187876161da5a03f1156100025750505060405180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200180519060200150505050509950505050505050600060005060008a60001916815260200190815260200160002060005060050160009054906101000a9004600160a060020a0316600160a060020a031663f47cd6718a600060005060008d60001916815260200190815260200160002060005060030160005054600060005060008e6000191681526020019081526020016000206000506007016000508c8c6040518660e060020a0281526004018086600019168152602001858152602001806020018460ff1681526020018381526020018281038252858181548152602001915080548015610cc657602002820191906000526020600020905b816000505481526020019060010190808311610caf575b505096505050505050506020604051808303816000876161da5a03f11561000257505060405151935050600083141561201957611cbc565b60005b60408051918252519081900360200190f35b610d0160043560008181526020819052604081206004015481908190849033600160a060020a039081169116146122df57610002565b604080519115158252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600302600f01f1509050019250505060405180910390f35b6040518082612000808381846000600461030ff15090500191505060405180910390f35b600091505b60008481526020819052604090206007015460ff83161015610eed576040600081812086825260208281528351915460e260020a6307c30783028352600483015260ff8616602483015292516000805160206123dd83398151915293631f0c1e0c9360448481019492939283900301908290876161da5a03f115610002575050604080515160008781526020819052919091206007018054600160a060020a0392909216925063a9059cbb9133919060ff871690811015610002579060005260206000209001600050546040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750506040515115159050610f7357610002565b60406000908120602082815282825560018201839055600282018390556003820183905560048201805473ffffffffffffffffffffffffffffffffffffffff19908116909155600583018054909116905560068201839055600782018054848255908452908320919291610fa7918101905b80821115610fb45760008155600101610f5f565b6000848152602081905260408120600701805460ff8516908110156100025790825260208220015560019190910190610dd0565b5060019695505050505050565b5090565b818385010195505b5050505050949350505050565b6040805160e260020a6307c307830281526004810187905260ff8b16602482015290516000805160206123dd83398151915291631f0c1e0c91604482810192602092919082900301816000876161da5a03f11561000257505060408051805160e060020a63095ea7b302825230600160a060020a039081166004840152602483018d905292519216925063095ea7b391604482810192602092919082900301816000876161da5a03f115610002575050604051511515905061108e57610002565b604080517fdb833e3a000000000000000000000000000000000000000000000000000000008152600481018c905260ff8b166024820152604481018a905260648101899052905130600160a060020a03169163db833e3a91608482810192602092919082900301816000876161da5a03f11561000257505060405151925050600082141561111b57610002565b5060005b838160ff1610156111f75760ff808a169082161461125b576040805160e260020a6307c307830281526004810187905260ff8316602482015290516000805160206123dd83398151915291631f0c1e0c91604482810192602092919082900301816000876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600160a060020a033381166004840152602483018d905292519216925063a9059cbb91604482810192602092919082900301816000876161da5a03f115610002575050604051511515905061125b57610002565b82600160a060020a031663a9059cbb33846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061126357610002565b60010161111f565b816000805160206123dd833981519152600160a060020a031663c51cf1798a6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f115610002575050604051518a01919091039650610fc09050565b836040518059106112d55750595b9080825280602002602001820160405280156112ec575b506000945084935091505b86518310156116c65760006001026000600050600089868151811015610002576020908102909101810151825281019190915260400160002054118015611390575085600160a060020a031660001480611390575085600160a060020a03166000600050600089868151811015610002576020908102909101810151825281019190915260400160002060040154600160a060020a0316145b1561170757868381518110156100025790602001906020020151600190048285815181101561000257602090810290910101528651600090819089908690811015610002576020908102909101810151825281019190915260400160002054825183906001870190811015610002576020908102909101015286516000908190899086908110156100025760209081029091018101518252810191909152604001600020600101548251839060028701908110156100025760209081029091010152865160009081908990869081101561000257602090810290910181015182528101919091526040016000206002015482518390600387019081101561000257602090810290910101528651600090819089908690811015610002576020908102909101810151825281019190915260400160002060030154825183906004870190811015610002576020908102909101015286516000908190899086908110156100025760209081029091018101518252810191909152604001600020600401548251600160a060020a03919091169083906005870190811015610002576020908102909101015286516000908190899086908110156100025760209081029091018101518252810191909152604001600020600501548251600160a060020a03919091169083906006870190811015610002576020908102909101015286516000908190899086908110156100025760209081029091018101518252810191909152604001600020600601548251839060078701908110156100025760209081029091010152865160009081908990869081101561000257602090810290910181015182528101919091526040016000206007015482518390600887019081101561000257602090810290910101525060005b60006000506000888581518110156100025760209081029091018101518252810191909152604001600020600701548110156116d0576000600050600088858151811015610002576020908102909101810151825281019190915260400160002060070180548290811015610002579060005260206000209001600050548282866009010181518110156100025760209081029091010152600101611626565b5095945050505050565b6000600050600088858151811015610002576020908102909101810151825281019190915260400160002060070154909301600901925b600192909201916112f7565b836040518059106117215750595b908082528060200260200182016040528015611738575b506000945084935091505b8551831015611892576000600160005060008886815181101561000257602090810290910181015182528101919091526040016000205411156118cc578583815181101561000257906020019060200201516001900482858151811015610002576020908102909101015285516001906000908890869081101561000257602090810290910181015182528101919091526040016000205482518390600187019081101561000257602090810290910101525060005b600160005060008785815181101561000257602090810290910181015182528101919091526040016000205481101561189b57600160005060008785815181101561000257602090810290910181015182528101919091526040016000208054829081101561000257600091825260209091200154825183908684016002019081101561000257602090810290910101526001016117f9565b50949350505050565b6001600050600087858151811015610002575050602085810289018101518252919091526040902054909301600201925b60019290920191611743565b8192505b5050919050565b6118ed8a856105e1565b92506000805160206123dd833981519152600160a060020a031663c51cf179896040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f1156100025750506040515192505083830182018790111561195857610fc0565b84600160a060020a03166323b872dd333085878901016040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506020604051808303816000876161da5a03f1156100025750506040515115905080611a3557506040805160e060020a63095ea7b30281526000805160206123dd833981519152600482015285840160248201529051600160a060020a0387169163095ea7b391604482810192602092919082900301816000876161da5a03f115610002575050604051511590505b80611aa9575060008a81526020818152604080832054815160e160020a6303eadc130281526004810191909152878601602482015290516000805160206123dd833981519152936307d5b826936044848101949193929183900301908290876161da5a03f115610002575050604051511590505b15611ab357610002565b5060005b60008a81526020819052604090206007015460ff82161015611b06576040600020600701805485919060ff84169081101561000257600091825260209091200180549091019055600101611ab7565b604060009081208b8252602091909152600701805460ff8b169081101561000257600091825260209091200154881115611b3f57610002565b60008a815260208190526040902060028101805485019055600701805489919060ff8c1690811015610002579060005260206000209001600050805491909103905560008a81526020818152604080832054815160e260020a6307c30783028152600481019190915260ff8d16602482015290516000805160206123dd83398151915293631f0c1e0c936044848101949193929183900301908290876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600160a060020a033381166004840152602483018d905292519216925063a9059cbb91604482810192602092919082900301816000876161da5a03f1156100025750506040515115159050610fb857610002565b505050600092835250602080832090910184905583825281905260409020600181018990556003810188905589815560048101805473ffffffffffffffffffffffffffffffffffffffff199081163317909155600582018054909116881790554360069091015590935083905b50505050949350505050565b82600160a060020a03166323b872dd33306000805160206123dd833981519152600160a060020a031663c51cf1798c6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a6323b872dd028252600160a060020a039586166004830152939094166024850152918c0160448401525051606482810192602092919082900301816000876161da5a03f1156100025750506040515115905080611e45575082600160a060020a031663095ea7b36000805160206123dd8339815191526000805160206123dd833981519152600160a060020a031663c51cf1798b6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a63095ea7b30282526004820193909352918c016024830152516044828101935060209282900301816000876161da5a03f115610002575050604051511590505b80611f0457506000805160206123dd833981519152600160a060020a03166307d5b8268a6000805160206123dd833981519152600160a060020a031663c51cf1798b6040518260e060020a028152600401808281526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e160020a6303eadc130282526004820194909452928c016024840152516044838101936020935082900301816000876161da5a03f115610002575050604051511590505b15611f0e57610002565b83604051805910611f1c5750595b908082528060200260200182016040528015611f33575b506000838152602081815260408220600701805484518083558285529383902091949082019392018215611f86579160200282015b82811115611f86578251826000505591602001919060010190611f68565b50611f92929150610f5f565b5050600090505b838160ff161015611fda576000828152602081905260409020600701805488919060ff84169081101561000257600091825260209091200155600101611f99565b600089815260016020819052604090912080549182018082559091908281838015829011611c4f57600083815260209020611c4f918101908301610f5f565b61202389846105e1565b915085828403101561203457611cbc565b60008981526020818152604080832054815160e260020a6307c30783028152600481019190915260ff8c16602482015290516000805160206123dd83398151915293631f0c1e0c936044848101949193929183900301908290876161da5a03f11561000257505060408051805160e060020a6323b872dd028252600160a060020a0333811660048401523081166024840152604483018c90529251921692506323b872dd91606482810192602092919082900301816000876161da5a03f115610002575050604051511590508061218d57506000805160206123dd833981519152600160a060020a0316634025b293600060005060008c60001916815260200190815260200160002060005060000160005054856040518360e060020a0281526004018083600019168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511590505b1561219757610002565b6000898152602081905260409020600701805488919060ff8b16908110156100025760009182526020822001805490920190915590505b60008981526020819052604090206007015460ff82161015612254576040600020600701805484919060ff84169081101561000257600091825260209091200154106122d0576000898152602081905260409020600701805484919060ff84169081101561000257906000526020600020900160005080549190910390556001016121ce565b600089815260208181526040808320600201805486019055805160e060020a63a9059cbb028152600160a060020a033381166004830152868803602483015291519188169363a9059cbb93604483810194919391929183900301908290876161da5a03f11561000257505060405151151590506122d557610002565b610002565b8183039450611cbc565b60008581526020819052604080822054815160e160020a63460b97d1028152600481019190915290516000805160206123dd83398151915292638c172fa292602481810193610160939092839003909101908290876161da5a03f1156100025750506040805160c001516000888152602081905291822060020180549083905590955093508311905080156123ca575082600160a060020a031663a9059cbb33846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511590505b156123d457610002565b819350506118dc560000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd", + "storage": { + "0x50ff25f5e9a51687bca1c50f3544d5eef8202f228d3de791691a137aecb6b360": "0x00000000000000000000000000000000000000000000000026566ea1ec2f6a9b", + "0x50ff25f5e9a51687bca1c50f3544d5eef8202f228d3de791691a137aecb6b361": "0x00000000000000000000000000000000000000000000000072aa5b7e04d56a9b", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed86c": "0xd9a4ffe21d19763887176173d08241e8393c1dfd208f29193dfecdf854b664ac", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed86d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed86e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed86f": "0x0000000000000000000000000000000000000000000000004563918244f40000", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed871": "0x0000000000000000000000008695e5e79dab06fbbb05f445316fa4edb0da30f0", + "0x642f3c12d3cd25d9b946d8c2ec97f080f4efcff18301a6dcade5a6be0c5ed873": "0x0000000000000000000000000000000000000000000000000000000000000002" + } + }, + "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0": { + "balance": "0x0", + "code": "0x606060405260e060020a60003504630439978d811461003157806308f028d514610115578063f47cd6711461026e575b005b60408051604435600481810135602081810285810182019096528185526103509583359560248035966064959294910192829185019084908082843750949650509335935050608435915050600060006040604051908101604052806002905b60008152602001906001900390816100915790505060006000600061271073ef3487d24a0702703e04a26cef479e313c8fc7ae6324d4e90a604060020a8c51026040518260e060020a028152600401808281526020019150506020604051808303818660325a03f41561000257505060405151919091049550610398905089610157565b60408051600480358082013560208181028086018201909652818552610362959394602494909385019291829190850190849080828437509496505050505050505b6040604051908101604052806002905b6000815260200190600190039081610167575050604080518082019091526002815b600081526020019060019003908161018957905050600083600081518110156100025760209081029091010151825283518490600090811015610002576020908102909101810151908301525060005b83518160ff1610156104a7578351825190859060ff8416908110156100025790602001906020020151101561022357838160ff168151811015610002576020908102909101015182525b60208201518451859060ff8416908110156100025790602001906020020151111561026657838160ff168151811015610002576020908102909101810151908301525b6001016101d9565b60408051604435600481810135602081810285810182019096528185526103509583359560248035966064959294910192829185019084908082843750949650509335935050608435915050600060006040604051908101604052806002905b60008152602001906001900390816102ce579050506000600061271073ef3487d24a0702703e04a26cef479e313c8fc7ae6324d4e90a604060020a8b51026040518260e060020a028152600401808281526020019150506020604051808303818660325a03f415610002575050604051519190910494506104ae905088610157565b60408051918252519081900360200190f35b60408051908190839080838184600060046015f15090500191505060405180910390f35b8095505b505050505095945050505050565b935061044d85858b8d5b6000806127108304815b85518160ff16101561051d5773ef3487d24a0702703e04a26cef479e313c8fc7ae63872fb2b589848a6000909060200201518a8660ff1681518110156100025760209081029091010151038b600060200201518c600190906020020151030304026040518260e060020a028152600401808281526020019150506020604051808303818660325a03f4156100025750506040515190930192506001016103ac565b925086898960ff16815181101561000257602090810290910101805191909103905261047b85858b8d6103a2565b915050604060020a620186a0620186a28484036127108d0402020404868111156103865786955061038a565b5092915050565b6020810180518801905292506104c684848a8c6103a2565b915085888860ff16815181101561000257602090810290910101805190910190526104f384848a8c6103a2565b9050604060020a620186a06127108b04838503026201869e02040494505050505095945050505050565b87604060020a73ef3487d24a0702703e04a26cef479e313c8fc7ae6324d4e90a866040518260e060020a028152600401808281526020019150506020604051808303818660325a03f4156100025750506040515190910291909104999850505050505050505056" + }, + "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2": { + "balance": "0xa6e361612cc228000", + "code": "0x6060604052361561008d5760e060020a600035046306fdde03811461008f578063095ea7b3146100ed57806318160ddd1461016257806323b872dd1461016b578063313ce567146102565780636c11bcd31461026257806370a08231146102d057806395d89b41146102f5578063a9059cbb14610353578063d0febe4c146103f8578063dd62ed3e14610439575b005b6040805160038054602060026001831615610100026000190190921691909104601f810182900482028401820190945283835261046d939083018282801561052e5780601f106105035761010080835404028352916020019161052e565b61042560043560243533600160a060020a03908116600081815260016020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b6104db60025481565b610425600435602435604435600160a060020a0383166000908152602081905260408120548290108015906101be575060016020908152604080832033600160a060020a03168452909152812054829010155b80156101ca5750600082115b1561053657600160a060020a0383811660008181526020818152604080832080548801905588851680845281842080548990039055600183528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a350600161053a565b6104ed60055460ff1681565b61042560043533600160a060020a0316600090815260208190526040812054821161054157604081208054839003905560028054839003905580821180156102c6575060405133600160a060020a0316908290849082818181858883f19350505050155b1561054957610002565b6104db600435600160a060020a0381166000908152602081905260409020545b919050565b61046d6004805460408051602060026000196101006001871615020190941693909304601f8101849004840282018401909252818152929183018282801561052e5780601f106105035761010080835404028352916020019161052e565b61042560043560243533600160a060020a03166000908152602081905260408120548290108015906103855750600082115b156105515733600160a060020a0390811660008181526020818152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161015c565b33600160a060020a0316600090815260208190526040902080543490810190915560028054909101905560015b604080519115158252519081900360200190f35b6104db600435602435600160a060020a0382811660009081526001602090815260408083209385168352929052205461015c565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156104cd5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161051157829003601f168201915b505050505081565b5060005b9392505050565b5060006102f0565b5060016102f0565b50600061015c56", + "storage": { + "0x3830062b39ca7888048a385f112e36aef7258a27d84eb6e31312c298e5954da3": "0x0000000000000000000000000000000000000000000000035fe3763f1973ab3b", + "0x527b1dd758d53f706730e0fb37a8de5c38d8b4cd17fbe1cfa285480a00f55bf4": "0x000000000000000000000000000000000000000000000003ab97b2fc29ad66c6", + "0x52cb6de4baff82acfb6977b64d52b9ac011f8af34631d933997b7649a84d716f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8f0cfa08792bcd3de052a3bb7bd54f8a62c44b02ba16ff336e9a881c348cca21": "0x000000000000000000000000000000000000000000046ba103abb9d1301f1b2e", + "0xa29249eda6f9f8d0c67b7a4f954f6ba7a9f1bb3f216b2fedc6db8def03c47746": "0x00000000000000000000000000000000000000000000000007a93ebd870d6684", + "0xbe1e23f4b08159a01ee61379749e9b484f5947aaeeb008ce7c97d1c56d3eeb8b": "0x0000000000000000000000000000000000000000000000000dfecc50c6f7d5cd" + } + }, + "0xef3487d24a0702703e04a26cef479e313c8fc7ae": { + "balance": "0x0", + "code": "0x6503060000000050606060405260e060020a600035046324d4e90a8114610031578063872fb2b514610078575b610007565b61013c6004356000680171547652b82fe177818080808061014e886000604060020a82048160bf605f5b6001830182146103c4578060020a8410156103ce579050806103d2565b61013c600435604060020a67b17217f7d1cf79ac81830281900482810460020a680100000016aee6e8ef67b172182739bc0e46858406908102869004673d7f78a624cfb9b582800288900490810288900491909101670e359bcfeb6e45319183028890049182028890040167027601df2fc048dc91830288900491820288900401665808a728816ee89183028890049182028890040166095dedef350bc991830288900491820297909704969096019190910182810295905b505050505050919050565b60408051918252519081900360200190f35b94508460020a88049350604060020a9250604060020a600a029150819050604060020a83680443b9c5adb08cc45f0204810390508050604060020a8484020492508250604060020a83680f0a52590f17c71a3f0204810190508050604060020a8484020492508250604060020a83682478f22e787502b0230204810390508050604060020a8484020492508250604060020a836848c6de1480526b8d4c0204810190508050604060020a8484020492508250604060020a836870c18cae824656408c0204810390508050604060020a8484020492508250604060020a8368883c81ec0ce7abebb20204810190508050604060020a8484020492508250604060020a836881814da94fe52ca9f50204810390508050604060020a8484020492508250604060020a8368616361924625d1acf50204810190508050604060020a8484020492508250604060020a836839f9a16fb9292a608d0204810390508050604060020a8484020492508250604060020a83681b3049a5740b21d65f0204810190508050604060020a8484020492508250604060020a836809ee1408bd5ad96f3e0204810390508050604060020a8484020492508250604060020a836802c465c91703b7a7f40204810190508050604060020a8484020492508250604060020a8367918d2d5f045a4d630204810390508050604060020a8484020492508250604060020a836714ca095145f44f780204810190508050604060020a8484020492508250604060020a836701d806fc412c1b990204810390508050604060020a8484020492508250604060020a836613950b4e1e89cc020481019050805085604060020a8383604060020a8902010302049650610131565b5090949350505050565b9150815b5060028282010461005b56" + }, + "0xf4cbd7e037b80c2e67b80512d482685f15b1fb28": { + "balance": "0x0", + "code": "0x6060604052361561008d5760e060020a600035046306fdde03811461008f578063095ea7b3146100a557806318160ddd1461012457806323b872dd1461012f578063313ce567146101dc578063475a9fa9146101f057806370a0823114610215578063721a37d21461024357806395d89b411461008f578063a9059cbb14610268578063dd62ed3e146102e7575b005b61031d6040805160208101909152600081525b90565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db63c6605267600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b6102316003546100a2565b61038b60043560243560443560008054604080517fa00bfa1100000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a038781166024830152868116604483015260648201869052929092166084830152517319ee743d2e356d5f0e4d97cc09b96d06e933d0db9163a00bfa119160a482810192602092919082900301818660325a03f4156100025750506040515195945050505050565b604080516000815290519081900360200190f35b61038b6004356024356000805433600160a060020a0390811691161461039f57610002565b600160a060020a03600435166000908152600160205260409020545b60408051918252519081900360200190f35b61038b6004356024356000805433600160a060020a039081169116146103ce57610002565b61038b60043560243560007319ee743d2e356d5f0e4d97cc09b96d06e933d0db6388d5fecb600160005085856040518460e060020a0281526004018084815260200183600160a060020a0316815260200182815260200193505050506020604051808303818660325a03f4156100025750506040515191506103179050565b610231600435602435600160a060020a038281166000908152600260209081526040808320938516835292905220545b92915050565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561037d5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b604080519115158252519081900360200190f35b50600160a060020a03821660009081526001602081905260409091208054830190556003805483019055610317565b600160a060020a038316600090815260016020526040902054821161040a57506040600020805482900390556003805482900390556001610317565b50600061031756", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000012098a4651fb262f7", + "0xfae22198212900725daa5db635d1fda7b0fa195adaabdc806a7267959c3d8ae4": "0x000000000000000000000000000000000000000000000000731fb89f735062f7", + "0xfd73dc2251dc113619c6fcc1c142e797f06e77a178cc37fe300a56823b741ef7": "0x0000000000000000000000000000000000000000000000008ac7230489e80000" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "2340153", + "difficulty": "80383973372327", + "timestamp": "1475034716", + "gasLimit": "1500062", + "miner": "0x61c808d82a3ac53231750dadc13c777b59310bd9" + }, + "input": "0xf8ea508504a817c80083084398946ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba580b884bbd4f854e9efd3ab89acad6a3edf9828c3b00ed1c4a74e974d05d32d3b2fb15aa16fc3770000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000000000000000000000000000080d29fa5cccfadac1ba0690ce7a4cf8590c636a1799ebf2cc52229714c47da72ee406fb9bd7d29e52440a017b6ce39e8876965afa2a1c579a592eb1af146506ccdbfc2c9ea422b13dca438", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0x3de712784baf97260455ae25fb74f574ec9c1add", + "gas": "0x84398", + "gasUsed": "0x27ec3", + "to": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "input": "0xbbd4f854e9efd3ab89acad6a3edf9828c3b00ed1c4a74e974d05d32d3b2fb15aa16fc3770000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000de0b6b3a7640000000000000000000000000000000000000000000000000000080d29fa5cccfadac", + "output": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "calls": [ + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x77e82", + "gasUsed": "0x54c", + "to": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "input": "0x8c172fa2d9a4ffe21d19763887176173d08241e8393c1dfd208f29193dfecdf854b664ac", + "output": "0x446374989d279847d0dbc6708a9c76a419fe9831d42c78bc89473f559a00d91500000000000000000000000061d76c05cd2aa9ed5135e21e52fff188b02089d4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000092f1dbea03ce08225e31e95cc926ddbe0198e6f2000000000000000000000000529c4cb814029b8bb32acb516ea3a4b07fdae350846fd373887ade3ab7703750294876afa61cf56303f5f014a4d80d04f508a1f100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x7737b", + "gasUsed": "0x3fe1", + "to": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "input": "0x0439978de9efd3ab89acad6a3edf9828c3b00ed1c4a74e974d05d32d3b2fb15aa16fc3770000000000000000000000000000000000000000000000004563918244f4000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000026566ea1ec2f6a9b00000000000000000000000000000000000000000000000072aa5b7e04d56a9b", + "output": "0x0000000000000000000000000000000000000000000000008060b57e2e0c99aa", + "calls": [ + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x770ef", + "gasUsed": "0xc24", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x24d4e90a0000000000000000000000000000000000000000000000020000000000000000", + "output": "0x000000000000000000000000000000000000000000000000b17217f7d1cf79ab", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x75eb2", + "gasUsed": "0x265", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", + "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x75aad", + "gasUsed": "0x25b", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x872fb2b50000000000000000000000000000000000000000000000000000000000000000", + "output": "0x00000000000000000000000000000000000000000000000100000016aee6e8ef", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x75737", + "gasUsed": "0xc24", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x24d4e90a00000000000000000000000000000000000000000000000324bf7e0976f5f167", + "output": "0x0000000000000000000000000000000000000000000000012535c5e5f87ee0d2", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x748c7", + "gasUsed": "0x265", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", + "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x744c2", + "gasUsed": "0x265", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x872fb2b500000000000000000000000000000000000000000000000237d37fe5d297a500", + "output": "0x0000000000000000000000000000000000000000000000093088c407fcbbce38", + "type": "DELEGATECALL", + "value": "0x0" + }, + { + "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", + "gas": "0x74142", + "gasUsed": "0xc99", + "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", + "input": "0x24d4e90a00000000000000000000000000000000000000000000000b554841fac4cad6b0", + "output": "0x0000000000000000000000000000000000000000000000026d7fc130d6a74cbe", + "type": "DELEGATECALL", + "value": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x731be", + "gasUsed": "0x241", + "to": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "input": "0xc51cf179000000000000000000000000000000000000000000000000de0b6b3a76400000", + "output": "0x0000000000000000000000000000000000000000000000000071ea279ec31402", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x72df4", + "gasUsed": "0x468b", + "to": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "input": "0x23b872dd0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba500000000000000000000000000000000000000000000000080d29fa5cccfadac", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add", + "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5" + ], + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x6e627", + "gasUsed": "0x56d6", + "to": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "input": "0x095ea7b30000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "topics": [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", + "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" + ], + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x68dae", + "gasUsed": "0xd6f0", + "to": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "input": "0x07d5b826d9a4ffe21d19763887176173d08241e8393c1dfd208f29193dfecdf854b664ac00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "gas": "0x629ff", + "gasUsed": "0x468b", + "to": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "input": "0x23b872dd0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba50000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd00000000000000000000000000000000000000000000000080d29fa5cccfadac", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd" + ], + "data": "0x00000000000000000000000000000000000000000000000080d29fa5cccfadac" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "gas": "0x5e0df", + "gasUsed": "0x31af", + "to": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "input": "0xa9059cbb000000000000000000000000950ca4a06c78934a148b7a3ff3ea8fc366f77a060000000000000000000000000000000000000000000000000041f50e27d56848", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x92f1dbea03ce08225e31e95cc926ddbe0198e6f2", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000005aae5c59d642e5fd45b427df6ed478b49d55fefd", + "0x000000000000000000000000950ca4a06c78934a148b7a3ff3ea8fc366f77a06" + ], + "data": "0x0000000000000000000000000000000000000000000000000041f50e27d56848" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "gas": "0x5ac6b", + "gasUsed": "0x29ae", + "to": "0x01e60b511fced1eb2b5b40991eb1dfd171a6df42", + "input": "0x475a9fa90000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba50000000000000000000000000000000000000000000000008090aa97a4fa4564", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "gas": "0x57fed", + "gasUsed": "0x29ae", + "to": "0xf4cbd7e037b80c2e67b80512d482685f15b1fb28", + "input": "0x475a9fa90000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba50000000000000000000000000000000000000000000000008090aa97a4fa4564", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x56030", + "gasUsed": "0x265", + "to": "0x5aae5c59d642e5fd45b427df6ed478b49d55fefd", + "input": "0x1f0c1e0cd9a4ffe21d19763887176173d08241e8393c1dfd208f29193dfecdf854b664ac0000000000000000000000000000000000000000000000000000000000000001", + "output": "0x000000000000000000000000f4cbd7e037b80c2e67b80512d482685f15b1fb28", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "gas": "0x55cc3", + "gasUsed": "0x339f", + "to": "0xf4cbd7e037b80c2e67b80512d482685f15b1fb28", + "input": "0xa9059cbb0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add000000000000000000000000000000000000000000000000de0b6b3a76400000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0xf4cbd7e037b80c2e67b80512d482685f15b1fb28", + "gas": "0x55a8a", + "gasUsed": "0x30f7", + "to": "0x19ee743d2e356d5f0e4d97cc09b96d06e933d0db", + "input": "0x88d5fecb00000000000000000000000000000000000000000000000000000000000000010000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add000000000000000000000000000000000000000000000000de0b6b3a76400000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0xf4cbd7e037b80c2e67b80512d482685f15b1fb28", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000006ca7f214ab2ddbb9a8e1a1e2c8550e3164e9dba5", + "0x0000000000000000000000003de712784baf97260455ae25fb74f574ec9c1add" + ], + "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000" + } + ], + "type": "DELEGATECALL", + "value": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json new file mode 100644 index 00000000..5e5d9538 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multi_contracts.json @@ -0,0 +1,2295 @@ +{ + "genesis": { + "difficulty": "59917798787272", + "extraData": "0xe4b883e5bda9e7a59ee4bb99e9b1bc", + "gasLimit": "4712380", + "hash": "0xae82afe3630b001a34ad4c51695dacb17872ebee4dadd2de88b1a16671871da4", + "miner": "0x61c808d82a3ac53231750dadc13c777b59310bd9", + "mixHash": "0x23c2289cdee8a397cf36db9ffa3419503bed54eb09e988b3c7a3587a090e6fc1", + "nonce": "0x94dc83e0044f49c8", + "number": "1881283", + "stateRoot": "0x6e3832bc2e4e66170a1e716449083e08fbb70e7b2a9f1f34e0c57e66ce40c50f", + "timestamp": "1468467284", + "totalDifficulty": "37186898441932102239", + "alloc": { + "0x0000000000000000000000000000000000000004": { + "balance": "0x0" + }, + "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e": { + "balance": "0x0", + "code": "0x606060405236156100f05760e060020a600035046303afc23581146100f257806313af4035146101145780631838e26614610136578063186ef9621461014d57806327df8c501461016f578063295d5866146101915780634162169f146101b35780634dfc3db6146101c55780636637b882146101e8578063839d3f7f1461020a57806386c9b5361461021d5780638da5cb5b1461022f5780639093a5e714610241578063b199efb514610263578063b262b9ae14610275578063b9f34aa114610297578063be9a6555146102a9578063d1c3c84a146102c7578063e26fc92b146102d9578063e8d9f074146102eb575b005b6100f0600435600054600160a060020a03908116339091161461034057610002565b6100f0600435600054600160a060020a03908116339091161461035557610002565b6102fd60006000600060006000600061036a6101c9565b6100f0600435600054600160a060020a0390811633909116146108a157610002565b6100f0600435600054600160a060020a0390811633909116146108b657610002565b6100f0600435600054600160a060020a0390811633909116146108cb57610002565b61030f600154600160a060020a031681565b6102fd5b60008054600160a060020a0390811633909116146108e0575060015b90565b6100f0600435600054600160a060020a03908116339091161461098857610002565b61032c60075460a060020a900460ff1681565b61030f600454600160a060020a031681565b61030f600054600160a060020a031681565b6100f0600435600054600160a060020a03908116339091161461099d57610002565b61030f600254600160a060020a031681565b6100f0600435600054600160a060020a0390811633909116146109b257610002565b61030f600754600160a060020a031681565b6100f060005433600160a060020a03908116911614610a1a57610002565b61030f600354600160a060020a031681565b61030f600554600160a060020a031681565b61030f600654600160a060020a031681565b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60048054600160a060020a0319168217905550565b60008054600160a060020a0319168217905550565b945060008514610380578495505b505050505090565b6002546040805160015460e060020a634162169f0282529151600160a060020a039283169390921691634162169f9160048181019260209290919082900301816000876161da5a03f11561000257505060405151600160a060020a031690911490506103ef5760649550610378565b600260009054906101000a9004600160a060020a0316600160a060020a0316634dfc3db66040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519450506000841461045857836064019550610378565b6040805160015460035460e060020a634162169f0283529251600160a060020a039182169390911691634162169f91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a031690911490506104c65760c89550610378565b600360009054906101000a9004600160a060020a0316600160a060020a0316634dfc3db66040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519350506000831461052f578260c8019550610378565b604080516001546004805460e060020a634162169f0284529351600160a060020a039283169490921692634162169f928183019260209282900301816000876161da5a03f11561000257505060405151600160a060020a0316909114905061059b5761012c9550610378565b60408051600480547f4dfc3db60000000000000000000000000000000000000000000000000000000083529251600160a060020a039390931692634dfc3db692808301926020929182900301816000876161da5a03f1156100025750506040515192505060008214610613578161012c019550610378565b6040805160015460055460e060020a634162169f0283529251600160a060020a039182169390911691634162169f91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a03169091149050610682576101909550610378565b600560009054906101000a9004600160a060020a0316600160a060020a0316634dfc3db66040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151915050600081146106ec5780610190019550610378565b6040805160015460065460e060020a634162169f0283529251600160a060020a039182169390911691634162169f91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a0316909114905061075b576101f49550610378565b6040805160065460e060020a638da5cb5b028252915130600160a060020a03908116931691638da5cb5b91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a031690911490506107c6576101f59550610378565b6040805160075460015460e060020a634162169f0283529251600160a060020a03938416939190911691634162169f91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a03169091149050610836576102589550610378565b6040805160075460e060020a638da5cb5b028252915130600160a060020a03908116931691638da5cb5b91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a03169091149050610378576102599550610378565b60038054600160a060020a0319168217905550565b60058054600160a060020a0319168217905550565b60068054600160a060020a0319168217905550565b600154600160a060020a0316600014156108fc575060026101e5565b600654600160a060020a031660001415610918575060036101e5565b600754600160a060020a031660001415610934575060046101e5565b600254600160a060020a031660001415610950575060056101e5565b600354600160a060020a03166000141561096c575060066101e5565b600454600160a060020a0316600014156101e5575060076101e5565b60018054600160a060020a0319168217905550565b60078054600160a060020a0319168217905550565b60028054600160a060020a0319168217905550565b600260009054906101000a9004600160a060020a0316600160a060020a031663975057e76040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050505b565b610a8a600154604080517f4b6753bc0000000000000000000000000000000000000000000000000000000081529051600092600160a060020a031691634b6753bc916004828101926020929190829003018187876161da5a03f11561000257505060405151421191506101e59050565b1515610a9557610a18565b60075460a060020a900460ff1615610e93576111556040805160015460065460e060020a6370a08231028352600160a060020a039081166004840152925160009384939216916370a08231916024808301926020929190829003018187876161da5a03f1156100025750506040515191909111159050610c61576040805160065460025460015460e060020a6370a08231028452600160a060020a0392831660048501819052945163a9059cbb949284169391909116916370a0823191602482810192602092919082900301818a876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600482019490945260248101939093525160448084019360209350829003018187876161da5a03f1156100025750506040805160025460e060020a63a8618f71028252600160a060020a031660048201819052915191925063a8618f71916024828101926020929190829003018187876161da5a03f11561000257505060405151159050610c6157600260009054906101000a9004600160a060020a0316600160a060020a031663975057e76040518160e060020a0281526004018090506000604051808303816000876161da5a03f11561000257506001925050505b6001546007546040805160e060020a6370a08231028152600160a060020a0392831660048201529051600093909216916370a0823191602481810192602092909190829003018187876161da5a03f1156100025750506040515191909111159050610e1b576040805160075460025460015460e060020a6370a08231028452600160a060020a0392831660048501819052945163a9059cbb949284169391909116916370a0823191602482810192602092919082900301816000876161da5a03f11561000257505060408051805160e060020a63a9059cbb02825260048201949094526024810193909352516044838101936020935082900301816000876161da5a03f1156100025750506040805160025460e060020a63a8618f71028252600160a060020a031660048201819052915191925063a8618f7191602482810192602092919082900301816000876161da5a03f11561000257505060405151159050610e1b57600260009054906101000a9004600160a060020a0316600160a060020a031663975057e76040518160e060020a0281526004018090506000604051808303816000876161da5a03f11561000257506001925050505b8015610e7257600260009054906101000a9004600160a060020a0316600160a060020a0316632e64cec16040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050505b6007805474ff00000000000000000000000000000000000000001916905550565b600260009054906101000a9004600160a060020a0316600160a060020a0316632e64cec16040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050505b60048054604080517ffc3407160000000000000000000000000000000000000000000000000000000081529051600160a060020a03929092169263fc340716928282019260009290829003018183876161da5a03f115610002575050600354604080517fd95f98ce0000000000000000000000000000000000000000000000000000000081529051600160a060020a0392909216925063d95f98ce916004828101926000929190829003018183876161da5a03f11561000257505050620f42405a11156109c7576109c76001546005546040805160e060020a6370a08231028152600160a060020a039283166004820152905192909116916370a082319160248181019260209290919082900301816000876161da5a03f1156100025750506040515160001415905061108357604080516002546005547fd0679d34000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015260016024840152925192169163d0679d349160448181019260209290919082900301816000876161da5a03f115610002575050505b5b600554604080517f400e39490000000000000000000000000000000000000000000000000000000081529051600a92600160a060020a03169163400e394991600482810192602092919082900301816000876161da5a03f1156100025750506040515191909110905080156110fb5750620aae605a115b15610a1857600560009054906101000a9004600160a060020a0316600160a060020a031663ff2f4bd26040518160e060020a0281526004018090506000604051808303816000876161da5a03f11561000257505050611084565b610ee456", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x000000000000000000000001f835a0247b0063c04ef22006ebe57c5f11977cc4" + } + }, + "0x304a554a310c7e546dfe434669c62820b7d83490": { + "balance": "0x3034f5ca7d45e17df1d83", + "nonce": "3", + "code": "0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "0x000000000000000000000000000000000000000000000000000000000000000c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000f": "0x0000000000000000000000000000000000000000000000000000000057870858", + "0x0000000000000000000000000000000000000000000000000000000000000011": "0x0000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c18941301", + "0x0000000000000000000000000000000000000000000000000000000000000016": "0x00000000000000000000000000000000000000000003034f5ca7d45e17df199b", + "0x0421a2c4dbea98e8df669bb77238b62677daa210c5fbc46600627f90c03d0f08": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e571": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e572": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e573": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e574": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e575": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e576": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e577": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e578": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e579": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e57e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e57f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e580": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e581": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e582": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e583": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e584": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e585": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e586": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e587": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e58c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e58d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e58e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e58f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e590": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e591": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e592": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e593": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e594": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e595": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e59f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5a9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5aa": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ab": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ac": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ad": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ae": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5af": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5b9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ba": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5bb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5bc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5bd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5be": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5bf": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5c9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ca": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5cb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5cc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5cd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5d9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5da": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5db": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5e9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ee": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5ef": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5f7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5fc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x330b9432081afd3b64172d5df1f72ca72fc17e7e729ceb8b7529f91eee8b3f23": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x33f9bdb745e7edb1789dd1d68f40f693940aa8313b4f6bdc543be443dbc85e63": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4830270ad35536baba417a92ea24656430586a37c90999b53c4d72ef1090cc9d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4b16ba88f291613070529c10c8bdc41e973e2e2aa412ed92254cdca71ccfbc89": "0x00000000000000000000000000000000000000000001819451f999d617dafa76", + "0x6546a4760869a51d07a75a31f00531836c32152c06dc88ac342da52fac5d939e": "0x000000000000000000000000000000000000000000000026b8b4a0b1e8292492", + "0x6796d25b854f17a11b940a9ff2822335f7d9bd1b85fbcd9d1f5cf742099d477a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x711886c99bc7a6e316551823dca012bd5b4381b57cec388f72c4b8105c1ed4ad": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x74024021ec74dc59b0fa1b66e9f430163a5e1128785ec9495f9686628ca7cc2b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x79a0e9ff42282e7cbcb539e164f024ab90021633de05f600fff6d16305888d26": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x81ffe0a69ee20c37e3f3ba834da8b20475846fcde1f4a39fdfc628a2560076aa": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8f85b96a91f601f62149f5dd6a35d6168f6de5bc047a18e3cf7b97a3843c6ffd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x946f68a04a200ebe87f2f896f7f6c08f4e22813db910c8a6d6abf17611ce3ffb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x9c1ad2f16775f94ffd360e8bc716f86016a3fcf90992b5b4f3312353efd1bd61": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa66ae63934365a757bf33d70ca0a28352da4c2fe6cb147bf29d69fbea3a706e0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa7653edcf1403f7ce2f75784d5f34ca5f57ff110bd0c3abbdcc5a84f101dc83a": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "0xa87317e3ffd5ed16f357bd31427bd97cbb35fc51ad1e00feec89bdfe82c5dba4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xaa535eb427f7963e49601f9032ee6b62a9f72b6b3c610a5f11faf8dc68a97e2a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xaade287f2b81ac58dcc6ee0c381cde85b6aa7a9a769be73605f1af9453a340a0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xb56a086d82c3921c13c13d1d53f18bbbe36d9d1c4862be8339a5171feb94c164": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xb6ab9f1541f42dc4feba15ccd18bc3af7c8f99cafb184ab65539883a68c7a1a9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xbad9e5f7dc3001078ea6433993a2f145c2ef9af1c5137a35e9c173c208990249": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xc319152db8781ef1f12090aad94325d650e39c8a20285c7e02959817118f3f28": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xde65b6d76ea4a5547af9707e6e099bba6f16dbc7b5cf97fb8fedc82583b38de0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xdf71c8506c3cf85e2e677b60ec28fe60eb820775001bdce289e3253f304f22e8": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x4fd27b205895e698fa350f7ea57cec8a21927fcd": { + "balance": "0x0", + "nonce": "11", + "code": "0x606060405236156100f05760e060020a600035046303afc23581146100f257806313af403514610114578063186ef962146101365780632e64cec11461015857806331962cdc146101d2578063365a86fc146101f45780634162169f146102065780634dfc3db61461021857806365f13792146102585780636637b88214610441578063715d832f146104625780637452c2e61461048457806386c9b536146105005780638da5cb5b14610512578063975057e714610524578063a8618f711461059f578063d0679d341461061b578063d1c3c84a14610698578063d9d35966146106aa578063f3273907146106c9575b005b6100f0600435600054600160a060020a03908116339091161461072e57610002565b6100f0600435600054600160a060020a03908116339091161461074457610002565b6100f0600435600054600160a060020a03908116339091161461075957610002565b6100f06000805481908190600160a060020a0390811633909116148015906101905750600154600160a060020a039081163390911614155b80156101ac5750600254600160a060020a039081163390911614155b80156101c85750600354600160a060020a039081163390911614155b1561076e57610002565b6100f0600435600054600160a060020a039081163390911614610a5d57610002565b6106eb600154600160a060020a031681565b6106eb600454600160a060020a031681565b61070860008054600160a060020a03908116339091161480159061024c5750600154600160a060020a039081163390911614155b15610a72575060015b90565b6107086004355b600060006000600060006000600460009054906101000a9004600160a060020a0316600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150945084600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604080518051600480547f81f03fcb000000000000000000000000000000000000000000000000000000008452600160a060020a038d81169285019290925293519198509290921692506381f03fcb916024828101926020929190829003018187876161da5a03f115610002575050604080518051600480547f18160ddd0000000000000000000000000000000000000000000000000000000084529351919750600160a060020a039390931693506318160ddd92828101926020929190829003018187876161da5a03f1156100025750506040805180516004805460e060020a6370a08231028452600160a060020a038d81169285019290925293519196509290921692506370a08231916024828101926020929190829003018187876161da5a03f11561000257505060405151909402919091049695505050505050565b6100f060043560005433600160a060020a03908116911614610ad957610002565b6100f06004356000805433600160a060020a03908116911614610aff57610002565b61071a600435600080548190819033600160a060020a039081169116148015906104be5750600154600160a060020a039081163390911614155b80156104da5750600254600160a060020a039081163390911614155b80156104f65750600354600160a060020a039081163390911614155b15610be657610002565b6106eb600354600160a060020a031681565b6106eb600054600160a060020a031681565b6100f06000805481908190819033600160a060020a0390811691161480159061055d5750600154600160a060020a039081163390911614155b80156105795750600254600160a060020a039081163390911614155b80156105955750600354600160a060020a039081163390911614155b15610c4457610002565b61071a6004355b60006000600460009054906101000a9004600160a060020a0316600160a060020a03166381f03fcb846040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519150610da490508361025f565b61071a60043560243560008054819033600160a060020a039081169116148015906106565750600154600160a060020a039081163390911614155b80156106725750600254600160a060020a039081163390911614155b801561068e5750600354600160a060020a039081163390911614155b15610dac57610002565b6106eb600254600160a060020a031681565b6100f06000805433600160a060020a03908116911614610ec457610002565b6106eb6004356000805433600160a060020a03908116911614610fd357610002565b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b604080519115158252519081900360200190f35b60038054600160a060020a031916821790555b50565b60008054600160a060020a0319168217905550565b60028054600160a060020a0319168217905550565b30925061077a836105a6565b1561082557600480546006546040805160e060020a6370a08231028152600160a060020a038881169582019590955290519284169363a9059cbb9392169184916370a0823191602482810192602092919082900301816000876161da5a03f11561000257505060408051805160e060020a63a9059cbb02825260048201949094526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b600091505b6005548210156108f45760058054600454600160a060020a0316916370a0823191859081101561000257600091825260408051600080516020611089833981519152929092015460e060020a6370a08231028352600160a060020a0316600483015251602482810193602093839003909101908290876161da5a03f115610002575050604051519150506000811115610a51576108f96005600050838154811015610002576000919091526000805160206110898339815191520154600160a060020a03166105a6565b505050565b15156109c25760058054600454600160a060020a0316916323b872dd918590811015610002575060009081526040805160008051602061108983398151915287015460e060020a6323b872dd028252600160a060020a03908116600483015230166024820152604481018690529051606482810193602093839003909101908290876161da5a03f1156100025750506040805183815290517f92da44f6982cd1ca7a9c851f8c39b26c80c235d7bb9fd59bce334fa634a1728b92509081900360200190a1610a51565b60058054600454600160a060020a0316916323b872dd918590811015610002575060009081526006546040805160008051602061108983398151915288015460e060020a6323b872dd028252600160a060020a0390811660048301529290921660248301526044820186905251606482810193602093839003909101908290876161da5a03f115610002575050505b6001919091019061082a565b60018054600160a060020a0319168217905550565b600454600160a060020a031660001415610a8e57506002610255565b600354600160a060020a031660001415610aaa57506003610255565b600254600160a060020a031660001415610ac657506004610255565b6005546000141561025557506005610255565b6005546000901115610aea57610002565b60048054600160a060020a0319168217905550565b5060005b81811015610b5a57600580546001818101808455930192909190828015829011610b5e576000839052610b5e906000805160206110898339815191529081019083015b80821115610bd65760008155600101610b46565b5050565b5050604051600454600160a060020a0316925090506082806110078339018082600160a060020a03168152602001915050604051809103906000f06005805460001981019081101561000257600091909152600080516020611089833981519152018054600160a060020a0319169091179055610b03565b5090565b600092505b5050919050565b600091505b600554821015610bda57600580548390811015610002576000919091526000805160206110898339815191520154600160a060020a0390811691508416811415610c385760019250610bdf565b60019190910190610beb565b600460009054906101000a9004600160a060020a0316600160a060020a03166370a08231306040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519450506000841415610cbb575b50505050565b600554600093506004900460010191505b81831015610cb557506005805460045442850182900692600160a060020a03919091169163a9059cbb9190849081101561000257600091825260408051600080516020611089833981519152929092015460e060020a63a9059cbb028352600160a060020a03166004830152868904602483015251604482810193602093839003909101908290876161da5a03f11561000257505060408051848704815290517fc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b92509081900360200190a160019290920191610ccc565b901192915050565b600460009054906101000a9004600160a060020a0316600160a060020a03166370a08231306040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191505082811015610e5f57604080516020810185905281517f703690365b2d63b5b9ec4471a919cdd5924f745170399a5d24927fd07d81a04d929181900390910190a1600091505b5092915050565b604080516004805460e060020a63a9059cbb028352600160a060020a038881169284019290925260248301879052925192169163a9059cbb9160448082019260209290919082900301816000876161da5a03f115610002575060019350610e58915050565b600480546006546040805160e060020a6370a08231028152600160a060020a0392831694810194909452519116916370a0823191602482810192602092919082900301816000876161da5a03f11561000257505060405151915050600081111561074157604080516004805460065460e060020a6323b872dd028452600160a060020a039081169284019290925230821660248401526044830185905292519216916323b872dd9160648181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150507f32e95f921f72e9e736ccad1cc1c0ef6e3c3c08204eb74e9ee4ae8f98e195e3f0816040518082815260200191505060405180910390a150565b600580548390811015610002576000919091526000805160206110898339815191520154600160a060020a03169291505056006060604052604051602080608283396080604081905291517f095ea7b3000000000000000000000000000000000000000000000000000000008352600160a060020a0333811660845260001960a4819052919384939184169163095ea7b39160c491906044816000876161da5a03f115600257505033600160a060020a03169050ff036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x000000000000000000000000000000000000000000000000000000000000000a", + "0x036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db6": "0x0000000000000000000000007ccbc69292c7a6d7b538c91f3b283de97906cf30", + "0x036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db7": "0x0000000000000000000000001b9ec8ba24630b75a7a958153ffff56dd6d4b6a2", + "0x036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db8": "0x000000000000000000000000c3a2c744ad1f5253c736875b93bacce5b01b060b" + } + }, + "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f": { + "balance": "0x0", + "code": "0x606060405236156100da5760e060020a600035046303afc23581146100dc57806309f180f9146100fe5780630a39ce021461016c57806310aa1caa146101a057806313af40351461021e57806331962cdc14610240578063365a86fc146102625780634162169f146102745780634dfc3db6146102865780636637b882146102c65780636de45dee146102e85780638da5cb5b146103285780639137c1a71461033a578063b199efb51461035c578063b3a69f861461036e578063d5d7ff3c1461040b578063d95f98ce1461044b578063fe39084c146104b5575b005b6100da600435600054600160a060020a0390811633909116146104f657610002565b6104c76004355b6002546040805160e060020a6381f03fcb028152600160a060020a0384811660048301529151600093849384939116916381f03fcb91602481810192602092909190829003018187876161da5a03f1156100025750506040515192506105199050846101a7565b6104d960043560068054829081101561000257506000526000805160206110cf8339815191520154600160a060020a031681565b6104c76004355b604080516003547f65f13792000000000000000000000000000000000000000000000000000000008252600160a060020a038481166004840152925160009391909116916365f13792916024828101926020929190829003018187876161da5a03f115610002575050604051516001019392505050565b6100da600435600054600160a060020a03908116339091161461052c57610002565b6100da600435600054600160a060020a03908116339091161461054157610002565b6104d9600154600160a060020a031681565b6104d9600254600160a060020a031681565b6104c760008054600160a060020a0390811633909116148015906102ba5750600154600160a060020a039081163390911614155b15610556575060015b90565b6100da600435600054600160a060020a03908116339091161461063c57610002565b6100da600435600054600160a060020a03908116339091161480159061031e5750600154600160a060020a039081163390911614155b1561065157610002565b6104d9600054600160a060020a031681565b6100da600435600054600160a060020a03908116339091161461081b57610002565b6104d9600354600160a060020a031681565b6104c75b6000805b6006548110156108175760068054600254600160a060020a0316916370a08231918490811015610002576000918252604080516000805160206110cf833981519152929092015460e060020a6370a08231028352600160a060020a0316600483015251602482810193602093839003909101908290876161da5a03f11561000257505060405151929092019150600101610376565b6100da6004356000805433600160a060020a039081169116148015906104415750600154600160a060020a039081163390911614155b1561083057610002565b6100da60006000600060006000600060006000600060006000600060009054906101000a9004600160a060020a0316600160a060020a031633600160a060020a0316141580156104ab5750600154600160a060020a039081163390911614155b1561092d57610002565b6104d9600454600160a060020a031681565b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60048054600160a060020a031916821790555b50565b81810392505b5050919050565b90508082111561050c5760009250610512565b60008054600160a060020a0319168217905550565b60018054600160a060020a0319168217905550565b600254600160a060020a031660001415610572575060026102c3565b600354600160a060020a03166000141561058e575060036102c3565b600354604080517fd1c3c84a000000000000000000000000000000000000000000000000000000008152905130600160a060020a0390811693169163d1c3c84a91600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a0316909114905061060d575060046102c3565b60065460001415610620575060056102c3565b600454600160a060020a0316600014156102c3575060066102c3565b60028054600160a060020a0319168217905550565b6106ab816000805b6006548110156110bc5782600160a060020a03166006600050828154811015610002576000919091526000805160206110cf8339815191520154600160a060020a031614156110c757600191506110c1565b156106b557610509565b600354604080517f7452c2e6000000000000000000000000000000000000000000000000000000008152600160a060020a03848116600483015291519290911691637452c2e69160248181019260209290919082900301816000876161da5a03f1156100025750506040515115905061072d57610509565b30600160a060020a031681600160a060020a0316148061075b5750600354600160a060020a03908116908216145b806107745750600154600160a060020a03908116908216145b1561077e57610509565b60068054600181018083559091908280158290116107bf578183600052602060002091820191016107bf91905b8082111561081757600081556001016107ab565b505060068054849350909150600019810190811015610002575080546000919091527ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3e018054600160a060020a031916909117905550565b5090565b60038054600160a060020a0319168217905550565b5060005b6006548110156109215781600160a060020a03166006600050828154811015610002576000919091526000805160206110cf8339815191520154600160a060020a0316141561092557600680546000198101908110156100025760009182526000805160206110cf83398151915201909054906101000a9004600160a060020a03166006600050828154811015610002576000805160206110cf833981519152018054600160a060020a0319169092179091558054600019810180835590919082801582901161091c5761091c906000805160206110cf8339815191529081019083016107ab565b505050505b5050565b600101610834565b6002546040805160e060020a6381f03fcb02815230600160a060020a0381811660048401529251909e5092909116916381f03fcb9160248181019260209290919082900301816000876161da5a03f115610002575050604051519a505060008a14156109c1576040517f044c61dab36644651a1f82d87d6494a3a6450a6edde20b9baf45e374fb2d0bb990600090a1610e04565b6109c9610372565b6040805160025460e060020a6370a08231028252600160a060020a038f811660048401529251939c50909116916370a082319160248181019260209290919082900301816000876161da5a03f115610002575050604051519850606497505086881015610ade57604080516003547fd0679d34000000000000000000000000000000000000000000000000000000008252600160a060020a038e811660048401528b8b036024840152925192169163d0679d349160448082019260209290919082900301816000876161da5a03f1156100025750506040515115159050610ad8576040517f044c61dab36644651a1f82d87d6494a3a6450a6edde20b9baf45e374fb2d0bb990600090a1610e04565b86975087505b600095505b600654861015610b2457610d8e6006600050878154811015610002576000919091526000805160206110cf8339815191520154600160a060020a0316610105565b6040805160025460e060020a6381f03fcb028252600160a060020a038e8116600484015292519216916381f03fcb9160248181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001509250600260009054906101000a9004600160a060020a0316600160a060020a03166370a082318c6040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f11561000257505060405151600097509250505b600654861015610e045760068054600254600160a060020a0316916370a082319189908110156100025760009182526000805160206110cf83398151915201546040805160e060020a6370a08231028152600160a060020a0392909216600483015251602482810193602093839003909101908290876161da5a03f115610002575050604051518084028b900495509150506000841115610d82577ff340c079d598119636d42046c6a2d2faf7a68c04aecee516f0e0b8a9e79b86666006600050878154811015610002576000919091526000805160206110cf833981519152015460408051600160a060020a03929092168252602082018790528386048c900482820152519081900360600190a160025460068054600160a060020a03929092169163a9059cbb919089908110156100025760009182526000805160206110cf83398151915201546040805160e060020a63a9059cbb028152600160a060020a039290921660048301526024820189905251604482810193602093839003909101908290876161da5a03f115610002575050505b60019590950194610bed565b9450898589020493508760001415610e11577fdb0f19c627ca59a2db73b1e1e8c4853f34a58afa92b29331e56c75144fa0c84c6006600050878154811015610002576000919091526000805160206110cf833981519152015460408051600160a060020a03929092168252519081900360200190a15b5050505050505050505050565b87841115610e84577f211d59fc569e166e12f7ca82135d85b1f178f636fefe40d168f0113cf07f818f6006600050878154811015610002576000919091526000805160206110cf833981519152015460408051600160a060020a03929092168252519081900360200190a1879350610ee8565b7f4b0bc4f25f8d0b92d2e12b686ba96cd75e4e69325e6cf7b1f3119d14eaf2cbdf6006600050878154811015610002576000919091526000805160206110cf833981519152015460408051600160a060020a03929092168252519081900360200190a15b60008411156110b05760068054998501997ff340c079d598119636d42046c6a2d2faf7a68c04aecee516f0e0b8a9e79b86669190889081101561000257600091909152604080516000805160206110cf8339815191529290920154600160a060020a0316825260208201879052818101889052519081900360600190a160025460068054600160a060020a03929092169163a9059cbb91908990811015610002576000918252604080516000805160206110cf833981519152929092015460e060020a63a9059cbb028352600160a060020a031660048301526024820189905251604482810193602093839003909101908290876161da5a03f1156100025750506040805160025460e060020a6370a08231028252600160a060020a038f811660048401529251921692506370a0823191602482810192602092919082900301816000876161da5a03f115610002575050506040518051906020015097508750600260009054906101000a9004600160a060020a0316600160a060020a03166381f03fcb8c6040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519a50505b60019590950194610ae3565b600091505b50919050565b60010161065956f652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" + } + }, + "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc": { + "balance": "0x4563918244f400000", + "code": "0x606060405236156100da5760e060020a600035046313af40358114610145578063186ef9621461016757806331962cdc14610189578063365a86fc146101ab5780634162169f146101bd57806348c981e2146101cf5780634dfc3db61461020f57806361bc221a146102505780636637b882146102595780636c0e29601461027b5780638da5cb5b146104795780638f2b29a71461048b5780639137c1a714610602578063b199efb514610624578063b826c4fd14610636578063d1c3c84a1461063f578063d2f0ad9214610651578063fc340716146106bf575b6107236002546040805160e060020a630e7082030281529051600092600160a060020a031691630e708203916004828101926020929190829003018187876161da5a03f1156100025750506040515133600160a060020a039081169116149050610737575060015b90565b610861600435600054600160a060020a03908116339091161461089257610002565b610861600435600054600160a060020a0390811633909116146108a757610002565b610861600435600054600160a060020a0390811633909116146108bc57610002565b610863600154600160a060020a031681565b610863600254600160a060020a031681565b610861600435600054600160a060020a0390811633909116148015906102055750600154600160a060020a039081163390911614155b156108d157610002565b61088060008054600160a060020a0390811633909116148015906102435750600154600160a060020a039081163390911614155b156108fa57506001610142565b61088060055481565b610861600435600054600160a060020a0390811633909116146109e857610002565b6108805b6000600060006000600060006000600060006000600260009054906101000a9004600160a060020a0316600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150985088600160a060020a03163130600160a060020a03163101975088600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160025460035460e060020a6381f03fcb028452600160a060020a0390811660048501529351919b5090921692506381f03fcb916024828101926020929190829003018187876161da5a03f11561000257505060408051805160025460e060020a6318160ddd0283529251909950600160a060020a039290921692506318160ddd916004828101926020929190829003018187876161da5a03f11561000257505060408051805160025460035460e060020a6370a08231028452600160a060020a039081166004850152935191995090921692506370a08231916024828101926020929190829003018187876161da5a03f115610002575050506040518051906020015093508784860202925088600160a060020a03163188880103840291508585029050808210156109fd57610a05565b610863600054600160a060020a031681565b61088060043560006000600060006000600260009054906101000a9004600160a060020a0316600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150935083600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051600254815160035460e060020a6381f03fcb028452600160a060020a0390811660048501529351909750921692506381f03fcb916024808301926020929190829003018187876161da5a03f11561000257505060408051805160025460e060020a6318160ddd0283529251909550600160a060020a039290921692506318160ddd916004828101926020929190829003018187876161da5a03f115610002575050506040518051906020015090508581038183020486820387850204039450845084945050505050919050565b610861600435600054600160a060020a039081163390911614610a1157610002565b610863600354600160a060020a031681565b61088060065481565b610863600454600160a060020a031681565b6108806004355b6040805160025460035460e060020a6370a08231028352600160a060020a039081166004840152925160009384939216916370a08231916024828101926020929190829003018187876161da5a03f115610002575050604051519093046001019392505050565b61086160006000600060006000600060006000600060009054906101000a9004600160a060020a0316600160a060020a031633600160a060020a0316141580156107195750600154600160a060020a039081163390911614155b15610cbc57610002565b604080519115158252519081900360200190f35b600654600554600019909101901115610803576040805160025460035460e060020a6370a0823102835230600160a060020a03908116600485015293519184169363a9059cbb9391169160019185916370a082319160248082019260209290919082900301816000876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600482019590955260001994909401602485015251604480850194602094509192509082900301816000876161da5a03f115610002575060019250610142915050565b6005805460010190556040805160025460e160020a63664d71fb0282529151600160a060020a03929092169163cc9ae3f69160048181019260209290919082900301816000876161da5a03f115610002575060019250610142915050565b005b60408051600160a060020a03929092168252519081900360200190f35b60408051918252519081900360200190f35b60008054600160a060020a0319168217905550565b60048054600160a060020a0319168217905550565b60018054600160a060020a0319168217905550565b604051600160a060020a0382811691309091163190600081818185876185025a03f15050505050565b600254600160a060020a03166000141561091657506002610142565b600354600160a060020a03166000141561093257506003610142565b600454600160a060020a03166000141561094e57506004610142565b600354604080517f86c9b536000000000000000000000000000000000000000000000000000000008152905130600160a060020a039081169316916386c9b53691600482810192602092919082900301816000876161da5a03f11561000257505060405151600160a060020a031690911490506109cd57506005610142565b30600160a060020a0316316000141561014257506006610142565b60028054600160a060020a0319168217905550565b808203830499505b50505050505050505090565b60038054600160a060020a0319168217905550565b6006819055600354604080517fd0679d34000000000000000000000000000000000000000000000000000000008152600160a060020a038981166004830152938b04602482018190529151919750919092169163d0679d349160448181019260209290919082900301816000876161da5a03f11561000257505060408051600160055530600160a060020a031631815290517f7027eecbd2a688fc1fa281702b311ed7168571514adfd17014a55d828cb4338292509081900360200190a1604051600160a060020a0389811691309091163190600081818185876185025a03f15050604080517fd2cc718f000000000000000000000000000000000000000000000000000000008152905163d2cc718f9250600482810192602092919082900301816000876161da5a03f11561000257505060408051805160025460e060020a6370a08231028352600160a060020a038a81166004850152935191975090921692506370a0823191602482810192602092919082900301816000876161da5a03f11561000257505060408051805160025460e060020a6381f03fcb028352600160a060020a038a81166004850152935191965090921692506381f03fcb91602482810192602092919082900301816000876161da5a03f11561000257505060408051805160025460e160020a63664d71fb0283529251909450600160a060020a0392909216925063cc9ae3f691600482810192602092919082900301816000876161da5a03f115610002575050604080516002546004805460e060020a63a9059cbb028452600160a060020a03908116918401919091526001602484015292519216925063a9059cbb91604482810192602092919082900301816000876161da5a03f115610002575050505b5050505050505050565b6002546040805160e060020a630e7082030281529051600160a060020a0390921691630e7082039160048181019260209290919082900301816000876161da5a03f115610002575050604051519850610d15905061027f565b60408051600160a060020a038b1631815290519198507f07cf7e805770612a8b2ee8e0bcbba8aa908df5f85fbc4f9e2ef384cf75315038919081900360200190a187600160a060020a03163130600160a060020a0316310195508660001480610d7e5750856000145b15610db1576040517f30090d86c52e12fbc1213c1ecf7e193d6ce4a5c838c8c41d06c1a9daea8a2cec90600090a1610cb2565b309450610a268761065856", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b": { + "balance": "0x0", + "code": "0x606060405236156100985760e060020a6000350463013cf08b811461009a57806313af4035146100d757806331962cdc146100f9578063365a86fc1461011b578063400e39491461012d5780634162169f146101375780634dfc3db6146101495780636637b8821461018a5780638da5cb5b146101ac578063e66f53b7146101be578063e90956cf146101d0578063ff2f4bd2146101f2575b005b61024460043560048054829081101561000257506000527f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b015481565b610098600435600054600160a060020a03908116339091161461026157610002565b610098600435600054600160a060020a03908116339091161461027657610002565b61024e600154600160a060020a031681565b6102446004545b90565b61024e600254600160a060020a031681565b61024460008054600160a060020a03908116339091161480159061017d5750600154600160a060020a039081163390911614155b1561028b57506001610134565b610098600435600054600160a060020a0390811633909116146102c157610002565b61024e600054600160a060020a031681565b61024e600354600160a060020a031681565b610098600435600054600160a060020a0390811633909116146102d657610002565b6100986000606081815260a06040526080828152825491929091819033600160a060020a0390811691161480159061023a5750600154600160a060020a039081163390911614155b1561031857610002565b6060908152602090f35b600160a060020a03166060908152602090f35b60008054600160a060020a0319168217905550565b60018054600160a060020a0319168217905550565b600254600160a060020a03168114156102a657506002610134565b600354600160a060020a031681141561013457506003610134565b60028054600160a060020a0319168217905550565b60038054600160a060020a0319168217905550565b50508054839250600019810190811015610002579060005260206000209001600050819055505b50505050565b6002547f70a082310000000000000000000000000000000000000000000000000000000060a090815230600160a060020a0390811660a45291909116906370a082319060c49060209060248187876161da5a03f11561000257505060405151821415905061038557610312565b60006040518059106103945750595b9080825280602002602001820160405250935062093a809150600260009054906101000a9004600160a060020a0316600160a060020a031663612e45a3600360009054906101000a9004600160a060020a0316600086888760016040518760e060020a0281526004018087600160a060020a03168152602001868152602001806020018060200185815260200184151581526020018381038352878181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156104815780820380516001836020036101000a031916815260200191505b508381038252868181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156104da5780820380516001836020036101000a031916815260200191505b50985050505050505050506020604051808303816000876161da5a03f1156100025750506040515160048054600181018083559294509250908280158290116102eb578183600052602060002091820191016102eb91905b808211156105465760008155600101610532565b509056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1a0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1a1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1a2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1a3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1a4": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xad3ecf23c0c8983b07163708be6d763b5f056193": { + "balance": "0x0", + "code": "0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xbe3ae5cb97c253dda67181c6e34e43f5c275e08b": { + "balance": "0x167d285b38143c04f", + "nonce": "68" + }, + "0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89": { + "balance": "0x9651c71936", + "code": "0x606060405236156100b95760e060020a600035046313af4035811461019e57806326f5a8c9146101c1578063371fa854146101ca5780634162169f146101d35780634c8fe526146101e55780635970c915146101f757806361bc221a14610209578063625e847d146102125780636637b882146102325780637f9f519f146102555780638da5cb5b14610278578063a9059cbb1461028a578063c4463c80146102b0578063c9d27afe146102df578063e66f53b714610305575b6103176002547f0e708203000000000000000000000000000000000000000000000000000000006060908152600091600160a060020a031690630e7082039060649060209060048187876161da5a03f1156100025750506040515133600160a060020a039081169116149050610329576040805133600160a060020a03166020820152818152600f818301527f636f6e73747563746f72206661696c0000000000000000000000000000000000606082015290517fa6af7265d7ede5fbf0ee375956b52b362800d4f92e268809bef5fdf2a57924b89181900360800190a15060015b90565b61031760043560008054600160a060020a03908116339091161461049257610002565b61047560055481565b61047560045481565b61047f600254600160a060020a031681565b61047f600654600160a060020a031681565b61047f600754600160a060020a031681565b61047560035481565b61031760008054600160a060020a0390811633909116146104ef57610002565b61031760043560008054600160a060020a03908116339091161461057a57610002565b61031760043560008054600160a060020a0390811633909116146105d757610002565b61047f600054600160a060020a031681565b61031760043560243560008054600160a060020a03908116339091161461060f57610002565b61031760043560243560443560643560843560008054600160a060020a0390811633909116146106a657610002565b61031760043560243560008054600160a060020a0390811633909116146107bb57610002565b61047f600154600160a060020a031681565b60408051918252519081900360200190f35b60055460035460001990910190111561040257604080516002546006547f70a0823100000000000000000000000000000000000000000000000000000000835230600160a060020a03908116600485015293519184169363a9059cbb9391169184916370a0823191602480830192602092919082900301818a876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600482019490945260248101939093525160448084019360209350829003018187876161da5a03f11561000257505060016003819055915061019b9050565b6040805160038054600190810190915560025460048054925460e260020a632099877102855290840192909252600160a060020a03918216602484015292519216916382661dc491604480820192602092909190829003018187876161da5a03f11561000257506001925061019b915050565b6060908152602090f35b600160a060020a03166060908152602090f35b600160a060020a03821660609081527f3edd90e7770f06fafde38004653b33870066c33bfc923ff6102acd601f85dfbc90602090a181600060006101000a815481600160a060020a0302191690830217905550600190505b919050565b6001600355600754600160a060020a03908116908290301631606082818181858883f15050604080516002546001546004805460e260020a632099877102855290840152600160a060020a0390811660248401529251921694506382661dc493506044808201935060209291829003018187876161da5a03f11561000257506001925061019b915050565b6002805473ffffffffffffffffffffffffffffffffffffffff1916831790819055600160a060020a031660609081527fce6a5015a40a2ec38ce912a63bca374d85386207c6927d284292449f1431082290602090a15060016104ea565b600582905560608281527fbab6859bc098da798dbdc4860f0fee7467d703dadd975799e8c258b46a37d3de90602090a15060016104ea565b60025460e060020a63a9059cbb026060908152600160a060020a0385811660645260848590529091169063a9059cbb9060a49060209060448187876161da5a03f11561000257505060408051600160a060020a03861681526020810185905281517f69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de293509081900390910190a15060015b92915050565b6006805473ffffffffffffffffffffffffffffffffffffffff1990811686179091556001600381905580548216871790556004879055600584905560078054909116831790819055600160a060020a03908116908290301631606082818181858883f15050604080516002546004805460015460e260020a632099877102855291840152600160a060020a0390811660248401529251921694506382661dc493506044808201935060209291829003018187876161da5a03f11561000257505060408051600454600654908252600160a060020a0316602082015281517fa1ab731770d71027cd294cc0af5c8f5ec3c2ff5dbe6b75d68963d17192f8377b93509081900390910190a150600195945050505050565b6002547fc9d27afe0000000000000000000000000000000000000000000000000000000060609081526064859052831515608452600160a060020a039091169063c9d27afe9060a49060209060448187876161da5a03f11561000257505060408051858152841515602082015281517f8bfa1f40665434b48e7becc865cc0586ce3d6d2388521c05d4db87536ac8279993509081900390910190a15060016106a056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490" + } + }, + "0xea674fdde714fd979de3edf0f56aa9716b898ec8": { + "balance": "0x4ab3566739e7b24371", + "nonce": "286339" + }, + "0xf835a0247b0063c04ef22006ebe57c5f11977cc4": { + "balance": "0x9645db5736", + "code": "0x606060405236156100b95760e060020a600035046313af4035811461019e57806326f5a8c9146101c1578063371fa854146101ca5780634162169f146101d35780634c8fe526146101e55780635970c915146101f757806361bc221a14610209578063625e847d146102125780636637b882146102325780637f9f519f146102555780638da5cb5b14610278578063a9059cbb1461028a578063c4463c80146102b0578063c9d27afe146102df578063e66f53b714610305575b6103176002547f0e708203000000000000000000000000000000000000000000000000000000006060908152600091600160a060020a031690630e7082039060649060209060048187876161da5a03f1156100025750506040515133600160a060020a039081169116149050610329576040805133600160a060020a03166020820152818152600f818301527f636f6e73747563746f72206661696c0000000000000000000000000000000000606082015290517fa6af7265d7ede5fbf0ee375956b52b362800d4f92e268809bef5fdf2a57924b89181900360800190a15060015b90565b61031760043560008054600160a060020a03908116339091161461049257610002565b61047560055481565b61047560045481565b61047f600254600160a060020a031681565b61047f600654600160a060020a031681565b61047f600754600160a060020a031681565b61047560035481565b61031760008054600160a060020a0390811633909116146104ef57610002565b61031760043560008054600160a060020a03908116339091161461057a57610002565b61031760043560008054600160a060020a0390811633909116146105d757610002565b61047f600054600160a060020a031681565b61031760043560243560008054600160a060020a03908116339091161461060f57610002565b61031760043560243560443560643560843560008054600160a060020a0390811633909116146106a657610002565b61031760043560243560008054600160a060020a0390811633909116146107bb57610002565b61047f600154600160a060020a031681565b60408051918252519081900360200190f35b60055460035460001990910190111561040257604080516002546006547f70a0823100000000000000000000000000000000000000000000000000000000835230600160a060020a03908116600485015293519184169363a9059cbb9391169184916370a0823191602480830192602092919082900301818a876161da5a03f11561000257505060408051805160e060020a63a9059cbb028252600482019490945260248101939093525160448084019360209350829003018187876161da5a03f11561000257505060016003819055915061019b9050565b6040805160038054600190810190915560025460048054925460e260020a632099877102855290840192909252600160a060020a03918216602484015292519216916382661dc491604480820192602092909190829003018187876161da5a03f11561000257506001925061019b915050565b6060908152602090f35b600160a060020a03166060908152602090f35b600160a060020a03821660609081527f3edd90e7770f06fafde38004653b33870066c33bfc923ff6102acd601f85dfbc90602090a181600060006101000a815481600160a060020a0302191690830217905550600190505b919050565b6001600355600754600160a060020a03908116908290301631606082818181858883f15050604080516002546001546004805460e260020a632099877102855290840152600160a060020a0390811660248401529251921694506382661dc493506044808201935060209291829003018187876161da5a03f11561000257506001925061019b915050565b6002805473ffffffffffffffffffffffffffffffffffffffff1916831790819055600160a060020a031660609081527fce6a5015a40a2ec38ce912a63bca374d85386207c6927d284292449f1431082290602090a15060016104ea565b600582905560608281527fbab6859bc098da798dbdc4860f0fee7467d703dadd975799e8c258b46a37d3de90602090a15060016104ea565b60025460e060020a63a9059cbb026060908152600160a060020a0385811660645260848590529091169063a9059cbb9060a49060209060448187876161da5a03f11561000257505060408051600160a060020a03861681526020810185905281517f69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de293509081900390910190a15060015b92915050565b6006805473ffffffffffffffffffffffffffffffffffffffff1990811686179091556001600381905580548216871790556004879055600584905560078054909116831790819055600160a060020a03908116908290301631606082818181858883f15050604080516002546004805460015460e260020a632099877102855291840152600160a060020a0390811660248401529251921694506382661dc493506044808201935060209291829003018187876161da5a03f11561000257505060408051600454600654908252600160a060020a0316602082015281517fa1ab731770d71027cd294cc0af5c8f5ec3c2ff5dbe6b75d68963d17192f8377b93509081900390910190a150600195945050505050565b6002547fc9d27afe0000000000000000000000000000000000000000000000000000000060609081526064859052831515608452600160a060020a039091169063c9d27afe9060a49060209060448187876161da5a03f11561000257505060408051858152841515602082015281517f8bfa1f40665434b48e7becc865cc0586ce3d6d2388521c05d4db87536ac8279993509081900390910190a15060016106a056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x00000000000000000000000003e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "1881284", + "difficulty": "59917798852808", + "timestamp": "1468467296", + "gasLimit": "4712388", + "miner": "0xea674fdde714fd979de3edf0f56aa9716b898ec8" + }, + "input": "0xf869448505d21dba00833567e09403e3d4561a8f8e975fdcd798d32857a20cf25e7e8084be9a65551ba0d4dd5fff30e83fbe630bb0fd67eeefe9e3aad0c3ee870a2b6e80fc40191bc7d4a074f93b546bfad60f3cae8e4aafef835237095d6618334154a24df4b4d49d9359", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0xbe3ae5cb97c253dda67181c6e34e43f5c275e08b", + "gas": "0x3567e0", + "gasUsed": "0x26e1ef", + "to": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "input": "0xbe9a6555", + "calls": [ + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x34affa", + "gasUsed": "0x1ef", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x4b6753bc", + "output": "0x0000000000000000000000000000000000000000000000000000000057870858", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x34abef", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "output": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x34a705", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "output": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x34a31a", + "gasUsed": "0xa2f3", + "to": "0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "input": "0xa9059cbb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "gas": "0x343e8c", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" + ], + "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa93" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89", + "topics": [ + "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" + ], + "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa93" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x33ff04", + "gasUsed": "0x168e", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0xa8618f710000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x339a3b", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x3395a4", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x339363", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x339129", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x338cfa", + "gasUsed": "0x13f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x18160ddd", + "output": "0x00000000000000000000000000000000000000000003034f5ca7d45e17df199b", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x338a75", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x00000000000000000000000000000000000000000001819451f999d617dafa93", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x33e6f2", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc4", + "output": "0x00000000000000000000000000000000000000000001819451f999d617dafa76", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x33e208", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc4", + "output": "0x00000000000000000000000000000000000000000001819451f999d617dafa76", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x33de20", + "gasUsed": "0x685b", + "to": "0xf835a0247b0063c04ef22006ebe57c5f11977cc4", + "input": "0xa9059cbb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0xf835a0247b0063c04ef22006ebe57c5f11977cc4", + "gas": "0x337992", + "gasUsed": "0x5fca", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc4", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" + ], + "data": "0x00000000000000000000000000000000000000000001819451f999d617dafa76" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0xf835a0247b0063c04ef22006ebe57c5f11977cc4", + "topics": [ + "0x69ca02dd4edd7bf0a4abb9ed3b7af3f14778db5d61921c7dc7cd545266326de2" + ], + "data": "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd00000000000000000000000000000000000000000001819451f999d617dafa76" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x3374a2", + "gasUsed": "0x168e", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0xa8618f710000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x330fd9", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x330b42", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x330901", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x3306c7", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x330298", + "gasUsed": "0x13f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x18160ddd", + "output": "0x00000000000000000000000000000000000000000003034f5ca7d45e17df199b", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x330013", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f509", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x33490b", + "gasUsed": "0x3f781", + "to": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "input": "0xfc340716", + "calls": [ + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32e30d", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32e037", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32dd7b", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32daf9", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32d6ab", + "gasUsed": "0x13f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x18160ddd", + "output": "0x00000000000000000000000000000000000000000003034f5ca7d45e17df199b", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32d400", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f509", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x32c975", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f509", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x3276d3", + "gasUsed": "0xa49d", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0xd0679d340000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x320fe1", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f509", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x320b5b", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" + ], + "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x3164e1", + "gasUsed": "0x4e91", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0x", + "value": "0x4563918244f400000", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x3115cc", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x311382", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "output": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x310f37", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x310ae9", + "gasUsed": "0x1446e", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xcc9ae3f6", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x30a397", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x309fc1", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x309c45", + "gasUsed": "0x122af", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0x0221038a0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "gas": "0x301e6f", + "gasUsed": "0x10068", + "to": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "input": "0x", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2fbb97", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2fa477", + "gasUsed": "0xe7b6", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xcc9ae3f6", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x2f3d25", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x2f394f", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x2f35d3", + "gasUsed": "0x8b5f", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0x0221038a0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc0000000000000000000000000000000000000000000000022b1c8c12279fffff", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "gas": "0x2eb7fd", + "gasUsed": "0x6918", + "to": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "input": "0x", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2e5525", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2e5168", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "output": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccd", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2e4d69", + "gasUsed": "0x5fca", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccc", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd" + ], + "data": "0x0000000000000000000000000000000000000000000181a7ae53ea2f0bef8ccc" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x22b1c8c12279fffff", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "topics": [ + "0x9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc", + "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" + ], + "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x22b1c8c12279fffff", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "topics": [ + "0x9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc", + "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc" + ], + "data": "0x0000000000000000000000000000000000000000000000022b1c8c12279fffff" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "gas": "0x2fc505", + "gasUsed": "0xd4fa", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f0000000000000000000000000000000000000000000000000000000000000001", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000006e715ab4f598eacf0016b9b35ef33e4141844ccc", + "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "topics": [ + "0x07cf7e805770612a8b2ee8e0bcbba8aa908df5f85fbc4f9e2ef384cf75315038" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "address": "0x6e715ab4f598eacf0016b9b35ef33e4141844ccc", + "topics": [ + "0x7027eecbd2a688fc1fa281702b311ed7168571514adfd17014a55d828cb43382" + ], + "data": "0x000000000000000000000000000000000000000000000004563918244f400000" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2f5092", + "gasUsed": "0x14e37", + "to": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "input": "0xd95f98ce", + "calls": [ + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2eea7c", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "output": "0x000000000000000000000000000000000000000000000004563918244f3ffffe", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2ee4cb", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x000000000000000000000000000000000000000000000026b8b4a0b1e8292492", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2edfff", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2edb9a", + "gasUsed": "0x6994", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0xd0679d340000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f0000000000000000000000000000000000000000000000000000000000000063", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2e7519", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f508", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2e7093", + "gasUsed": "0x5fca", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f0000000000000000000000000000000000000000000000000000000000000063", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000063" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e6f59", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e6afa", + "gasUsed": "0x1113", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0x65f13792000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x0000000000000000000000000000000000000000000000000037bc5737aa7ba8", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2e06f9", + "gasUsed": "0x15f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x0e708203", + "output": "0x000000000000000000000000ad3ecf23c0c8983b07163708be6d763b5f056193", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2e04b8", + "gasUsed": "0x113", + "to": "0xad3ecf23c0c8983b07163708be6d763b5f056193", + "input": "0xd2cc718f", + "output": "0x000000000000000000000000000000000000000000000004563918244f400000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2e027b", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2dfe4c", + "gasUsed": "0x13f", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x18160ddd", + "output": "0x00000000000000000000000000000000000000000003034f5ca7d45e17df199b", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2dfbc7", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x000000000000000000000000000000000000000000000026b8b4a0b1e8292492", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e5281", + "gasUsed": "0x329", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x81f03fcb0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "output": "0x000000000000000000000000000000000000000000000004563918244f3ffffe", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e4dcc", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000064", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e4857", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a08231000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526", + "output": "0x000000000000000000000000000000000000000000000026b8b4a0b1e8292492", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "gas": "0x2e3bae", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb000000000000000000000000da4a4626d3e16e094de3225a751aab7128e965260000000000000000000000000000000000000000000000000000000000000064", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000006dbfc63479ffc031f23e94dc91befa38bec2c25f", + "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000064" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "topics": [ + "0x4b0bc4f25f8d0b92d2e12b686ba96cd75e4e69325e6cf7b1f3119d14eaf2cbdf" + ], + "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e96526" + }, + { + "address": "0x6dbfc63479ffc031f23e94dc91befa38bec2c25f", + "topics": [ + "0xf340c079d598119636d42046c6a2d2faf7a68c04aecee516f0e0b8a9e79b8666" + ], + "data": "0x000000000000000000000000da4a4626d3e16e094de3225a751aab7128e9652600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2e00dc", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2dfc58", + "gasUsed": "0xa3bb", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0xd0679d340000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b0000000000000000000000000000000000000000000000000000000000000001", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2d9648", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f4a5", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0x2d91c2", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b0000000000000000000000000000000000000000000000000000000000000001", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2d57a6", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2d5515", + "gasUsed": "0x3478d", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x2ceffb", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x2ce86e", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000001" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2a0c87", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x2a09f6", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x29a4dc", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x299d4f", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000002", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000002" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x26fc00", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x26f96f", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x269455", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x268cc8", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000003", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x23eb79", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x23e8e8", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x2383ce", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x237c41", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000004", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000004" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x20daf2", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x20d861", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x207347", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x206bba", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000005", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000005" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1dca6b", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1dc7da", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1d62c0", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1d5b33", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000006", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000006" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1ab9e4", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1ab753", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1a5239", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1a4aac", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000007", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000007" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x17a95d", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x17a6cc", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1741b2", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x173a25", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000008", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000008" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1498d6", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x149645", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x14312b", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x14299e", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000009", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x0000000000000000000000000000000000000000000000000000000000000009" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x11884f", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x0000000000000000000000000000000000000000000000000000000000000009", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0x1185be", + "gasUsed": "0x30cf5", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0xff2f4bd2", + "calls": [ + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x1120a4", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000007498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "gas": "0x111917", + "gasUsed": "0x29e8d", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x612e45a3000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000093a80000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000000000000000000000000000000000000000000a", + "calls": [ + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x304a554a310c7e546dfe434669c62820b7d83490", + "gas": "0x3", + "gasUsed": "0x3", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x", + "error": "out of gas", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0x5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f", + "0x000000000000000000000000000000000000000000000000000000000000000a" + ], + "data": "0x000000000000000000000000be3ae5cb97c253dda67181c6e34e43f5c275e08b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0xe77c8", + "gasUsed": "0x112", + "to": "0x7498bb5749c9801f1f7e490baf5f966dbfe4e97b", + "input": "0x400e3949", + "output": "0x000000000000000000000000000000000000000000000000000000000000000a", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x03e3d4561a8f8e975fdcd798d32857a20cf25e7e", + "gas": "0xe7537", + "gasUsed": "0x1eafd", + "to": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "input": "0x975057e7", + "calls": [ + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0xe0f53", + "gasUsed": "0x314", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0x70a082310000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "output": "0x000000000000000000000000000000000000000000030328a3f333ac2fb5f4a4", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0xe096d", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000007ccbc69292c7a6d7b538c91f3b283de97906cf3000000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000007ccbc69292c7a6d7b538c91f3b283de97906cf30" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0xd6871", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb0000000000000000000000001b9ec8ba24630b75a7a958153ffff56dd6d4b6a200000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x0000000000000000000000001b9ec8ba24630b75a7a958153ffff56dd6d4b6a2" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "gas": "0xcc775", + "gasUsed": "0x9a62", + "to": "0x304a554a310c7e546dfe434669c62820b7d83490", + "input": "0xa9059cbb000000000000000000000000c3a2c744ad1f5253c736875b93bacce5b01b060b00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x304a554a310c7e546dfe434669c62820b7d83490", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x0000000000000000000000004fd27b205895e698fa350f7ea57cec8a21927fcd", + "0x000000000000000000000000c3a2c744ad1f5253c736875b93bacce5b01b060b" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "topics": [ + "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + }, + { + "address": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "topics": [ + "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + }, + { + "address": "0x4fd27b205895e698fa350f7ea57cec8a21927fcd", + "topics": [ + "0xc6d8c0af6d21f291e7c359603aa97e0ed500f04db6e983b9fce75a91c6b8da6b" + ], + "data": "0x00000000000000000000000000000000000000000001010d8bfbbbe40fe7518c" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json new file mode 100644 index 00000000..1ffffd24 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/multilogs.json @@ -0,0 +1,530 @@ +{ + "genesis": { + "difficulty": "7507253814130", + "extraData": "0xd783010400844765746887676f312e352e31856c696e7578", + "gasLimit": "3141592", + "hash": "0x3d9d19618f67bbb7708403fe9bda131fbade0449d2ac12bf3b140b4269112826", + "miner": "0x63a9975ba31b0b9626b34300f7f627147df1f526", + "mixHash": "0x50aaa8973eadd4bbfc7f5b59d5be52f6a1be2d38f40b5a0786a24b90257520da", + "nonce": "0x3547956c62c256b9", + "number": "595531", + "stateRoot": "0x79d00dd270bffc48d89fa55842f63f840981121378da8c6de4d479535f25ed6a", + "timestamp": "1448471472", + "totalDifficulty": "3448100174991667199", + "alloc": { + "0x2a65aca4d5fc5b5c859090a6c34d164135398226": { + "balance": "0x44dc051cccdfd2e132", + "nonce": "39602" + }, + "0x350e0ffc780a6a75b44cc52e1ff9092870668945": { + "balance": "0xe37111b7c79406c0", + "code": "0x606060405236156100f05760e060020a60003504631ff6c70581146100f257806347980c0d146100fd57806353ba9c2f146101085780635ea8cd12146101c957806369d640fd146101f05780637ce3489b146102405780637d1bb97a1461026b5780637fd6f15c146103e55780638bf50628146103f057806390a248f814610411578063a8f37bb214610438578063b019e0171461046a578063b4c70cea1461059b578063cf955f34146106a1578063d229b54b146106bd578063d54b4a04146106e4578063e021fadb146106f1578063e45be8eb14610858578063eddfa7c814610863578063f2a75fe41461095d575b005b6109a5621e84845481565b6109a5621e84865481565b6109b76004356024356000808080806003876103e881101561000257506107d0880201866103e8811015610002579090600202016000505461ffff168152602081019190915260400160002054600160a060020a031692506003856103e881101561000257506107d0860201846103e88110156100025790906002020160005054620100009004600390810b9250856103e881101561000257506107d0860201846103e8811015610002579090600202016000506001015490509250925092565b6100f0600435621e848354600160a060020a0390811633909116141561026857621e848755565b6109e36004356024356003826103e881101561000257506107d0830201816103e88110156100025790906002020160005080546001919091015461ffff821693506201000090910460030b915083565b6100f0600435621e848354600160a060020a0390811633909116141561026857621e84858190555b50565b610a0a600435617d00604051908101604052806103e8905b600081526020019060019003908161028357505060408051617d0081019091526103e8815b60008152602001906001900390816102a857505060408051617d0081019091526103e8815b60008152602001906001900390816102cd5750600090505b6103e861ffff82161015610d09576000806003836103e8811015610002576107d002018150876103e881101561000257600202016000505461ffff168152602081019190915260400160002054600160a060020a031684826103e8811015610002575050602082028501526003816103e8811015610002576107d00201600050856103e8811015610002579090600202016000505462010000900460030b83826103e8811015610002575050600390810b60208302850152816103e8811015610002576107d00201600050856103e8811015610002579090600202016000506001015482826103e8811015610002575050602082028301526001016102e5565b6109a5621e84855481565b610a59600435600060208190529081526040902054600160a060020a031681565b6100f0600435621e848354600160a060020a0390811633909116141561026857621e848655565b6100f060043560243560443560643560843560a435610a8e868684866101000288620100000286607f02010101610870565b604080516004803580820135602081810280860182019096528185526100f09593946024949093850192918291908501908490808284375050604080518735808a013560208181028085018201909552818452989a996044999398509190910195509350839250850190849080828437505060408051606435808a013560208181028085018201909552818452989a9935999860849850929650602491909101945092508291908501908490808284375050604080519635808901356020818102808b018201909452818a5297999860a4989097506024929092019550935083925085019084908082843750949650505050505050621e848354600090600160a060020a03908116339091161415610a8e575b8551811015610a8e57606060405190810160405280610d1186610ebc565b60408051602060248035600481810135601f81018590048502860185019096528585526109a5958135959194604494929390920191819084018382808284375094965050933593505050505b6000831515610699577ffd33e90d0eac940755277aa91045b95664988beeeafc4ed7d1281a6d83afbc003384846040518084600160a060020a03168152602001806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106895780820380516001836020036101000a031916815260200191505b5094505050505060405180910390a15b509192915050565b610a7660043560016020526000908152604090205461ffff1681565b6100f0600435621e848354600160a060020a0390811633909116141561026857621e848455565b610a7660025461ffff1681565b604080516004803580820135602081810280860182019096528185526109a59593946024949093850192918291908501908490808284375050604080518735808a013560208181028085018201909552818452989a9960449993985091909101955093508392508501908490808284375050604080519635808901356020818102808b018201909452818a529799986064989097506024929092019550935083925085019084908082843750506040805196358089013560208181028a81018201909452818a5297999860849890975060249290920195509350839250850190849080828437509496505050505050506000600060006000610ad68751895114606060405190810160405280602381526020017f446966666572656e74206e756d626572206f66207856616c732061732079566181526020017f6c732e00000000000000000000000000000000000000000000000000000000008152602001508a516105e7565b6109a5621e84875481565b6100f06004356024356044355b6000610a96848484345b6000808080808060038a6103e8811015610002576107d00201896103e88110156100025760020201805461ffff16825260208290526040822054621e8484546001830154621e848654939850600160a060020a03928316975060649181028290049650929092029190910492503316841415610f4d57610fbc82341015606060405190810160405280602e81526020017f4368616e67696e6720796f7572206f776e20706978656c20636f73747320313081526020017f25206f66206974732076616c7565000000000000000000000000000000000000815260200150846105e7565b6100f0621e848354600160a060020a039081163390911614156109a357604051621e848354600160a060020a03908116916000913016319082818181858883f150505050505b565b60408051918252519081900360200190f35b60408051600160a060020a0394909416845260039290920b602084015282820152519081900360600190f35b6040805161ffff94909416845260039290920b602084015282820152519081900360600190f35b6040518084617d008083818460006004610bc7f150918201918591508083818460006004610bc7f15061fa00840192508491508083818460006004610bc7f15062017700965092945050505050f35b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b505050505050565b90506000811115610ac25760405133600160a060020a031690600090839082818181858883f150505050505b50505050565b93505b505050949350505050565b1580610b4e5750610b4c8651895114606060405190810160405280602481526020017f446966666572656e74206e756d626572206f66207856616c7320617320636f6c81526020017f6f72732e000000000000000000000000000000000000000000000000000000008152602001508a516105e7565b155b80610bc55750610bc38551895114606060405190810160405280602481526020017f446966666572656e74206e756d626572206f66207856616c732061732070726981526020017f6365732e000000000000000000000000000000000000000000000000000000008152602001508a516105e7565b155b15610bd35760009350610acb565b5034915060009050805b8751811015610c63578481815181101561000257602090810290910101519092039160008310610d0157610cfb88828151811015610002579060200190602002015188838151811015610002579060200190602002015188848151811015610002579060200190602002015188858151811015610002579060200190602002015161087a565b6000821115610c8d5760405133600160a060020a031690600090849082818181858883f150505050505b610ac86000841015606060405190810160405280602181526020017f56616c756520776173206c657373207468616e2073756d206f6620707269636581526020017f7300000000000000000000000000000000000000000000000000000000000000815260200150856105e7565b91909101905b600101610bdd565b509193909250565b8152602001848381518110156100025790602001906020020151815260200183838151811015610002579060200190602002015181526020015060036000508783815181101561000257906020019060200201516103e8811015610002576107d002016000508683815181101561000257906020019060200201516103e8811015610002576002020160005081518154602084015160e060020a90810204620100000261ffff199190911690911765ffffffff00001916178155604091909101516001919091015560010161057d565b8454600061ffff919091161115610e225750604051621e84855460649088020490600160a060020a03851690600090838a039082818181858883f150505050505b845460018601546040805161ffff8e811682528d166020820152600160a060020a038881168284015262010000909404600390810b810b606083015260808201939093523390931660a0840152908a900b60c083015260e08201899052517fcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42918190036101000190a1606060405190810160405280611143335b600160a060020a03811660009081526001602052604081205461ffff1690811415610f485750604060008181206002805461ffff1981811661ffff928316600190810191821790945591821685526020858152958520805473ffffffffffffffffffffffffffffffffffffffff191688179055600160a060020a03871690945293528054909116821790555b919050565b60408051621e848754606082018352602182527f4d696e696d756d20706978656c2070726963652069732035302066696e6e657960208301527f2e00000000000000000000000000000000000000000000000000000000000000928201929092526110c29134101590896105e7565b1515610fca578695506110b5565b33600160a060020a031684600160a060020a03161415610de157604080518654600188015461ffff8e811684528d166020840152600160a060020a03881683850181905262010000909204600390810b810b60608501526080840182905260a0840192909252908b900b60c083015260e082015290517fcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42918190036101000190a18760038b6103e8811015610002576107d002018a6103e88110156100025760020201805465ffffffff0000191660e060020a92830292909204620100000291909117905581870395505b5050505050949350505050565b15806111365750610fbc83341015606060405190810160405280603281526020017f56616c7565206d7573742062652031302520686967686572207468616e20637581526020017f7272656e7420706978656c2070726963652e0000000000000000000000000000815260200150856105e7565b15610fca578695506110b5565b8152602081018a905260400188905260038b6103e8811015610002576107d002018a6103e8811015610002576002020160005081518154602084015160e060020a90810204620100000261ffff199190911690911765ffffffff000019161781556040919091015160019190910155600095506110b556", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000175901": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000175902": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000175903": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000175904": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760c7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760c8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760c9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760ca": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760cb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760cc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760cd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760ce": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760cf": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d1": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d2": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760d9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760da": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760db": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760dc": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760dd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760de": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760df": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001760e0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000176897": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000176898": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000176899": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000017689f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768a0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768a7": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768a8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768a9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768aa": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768ab": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768ac": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768ad": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768ae": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768af": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001768b0": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c37": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c38": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c39": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c3f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c40": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c45": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c46": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c47": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c48": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c49": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c4f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000196c50": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197407": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197408": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197409": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019740f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197410": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197411": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197412": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197413": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197414": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197415": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197416": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197417": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197418": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197419": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000019741f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197420": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197be3": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000197be4": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x00000000000000000000000000000000000000000000000000000000001e8484": "0x000000000000000000000000000000000000000000000000000000000000006e", + "0x00000000000000000000000000000000000000000000000000000000001e8486": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x00000000000000000000000000000000000000000000000000000000001e8487": "0x0000000000000000000000000000000000000000000000000011c37937e08000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xe1723559c995b1804c0512df6fe6d061eeb47aff37a3ced3b93f0c1bef247540": "0x0000000000000000000000000000000000000000000000000000000000000007" + } + }, + "0x3fcb0342353c541e210013aaddc2e740b9a33d08": { + "balance": "0x6a0e4be198f18400", + "nonce": "17" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "595532", + "difficulty": "7503588162862", + "timestamp": "1448471495", + "gasLimit": "3141592", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226" + }, + "input": "0xf91a7311850ba43b7400832dc6c094350e0ffc780a6a75b44cc52e1ff90928706689458803782dace9d90000b91a04e021fadb000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000006e00000000000000000000000000000000000000000000000000000000000000d4000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fd000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000002fd0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003700000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000039000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000032fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebebffffffffffffffffffffffffffffffffffffffffffffffffffffffffff888888ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff636363fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9cfffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e53ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080001ca0e8a879dd98a39d735b866ff64d84e9c144a17bcab106cf2f1327b1272db06aaca02ab279a2459b5e30dfea0bc8a888c7d2a190740090352b4a7aded30c45490af9", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0x3fcb0342353c541e210013aaddc2e740b9a33d08", + "gas": "0x2dc6c0", + "gasUsed": "0x2570bf", + "to": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "input": "0xe021fadb000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000006e00000000000000000000000000000000000000000000000000000000000000d4000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000002fd000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000002fd0000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003700000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000039000000000000000000000000000000000000000000000000000000000000003900000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000036000000000000000000000000000000000000000000000000000000000000003a000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000035000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000000000000000000000000000000000000000003b00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000003c000000000000000000000000000000000000000000000000000000000000003c00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000033000000000000000000000000000000000000000000000000000000000000003d000000000000000000000000000000000000000000000000000000000000003d00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000032fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebebffffffffffffffffffffffffffffffffffffffffffffffffffffffffff888888ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefefffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff636363fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaeaffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9cfffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfcfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e53ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e080000000000000000000000000000000000000000000000000000011c37937e08000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffebebeb0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8888880000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb3b3b30000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe3e3e30000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff3e3e3e0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdbdbdb0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f4f40000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034100000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002ff000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000341000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb0b0b00000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0a0a00000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b5b5b0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa9a9a90000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb9b9b90000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbfbfb0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffefefe0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffbababa0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6363630000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9f9f90000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeaeaea0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9c9c9c0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8f8f80000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000330000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe00000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034200000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffcfcfc0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fe000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdfdfd0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000342000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4d4e530000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x000000000000000000000000000000000000000000000000000000000000034300000000000000000000000000000000000000000000000000000000000000380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000011c37937e08000" + }, + { + "address": "0x350e0ffc780a6a75b44cc52e1ff9092870668945", + "topics": [ + "0xcacb62d8acea4678658eb5dc4aaa889b34d893b967c96a5f8c066e6549fa3f42" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000002fd00000000000000000000000000000000000000000000000000000000000000370000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003fcb0342353c541e210013aaddc2e740b9a33d08ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4f494b0000000000000000000000000000000000000000000000000011c37937e08000" + } + ], + "value": "0x3782dace9d90000", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json new file mode 100644 index 00000000..116606b3 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/notopic.json @@ -0,0 +1,286 @@ +{ + "genesis": { + "difficulty": "45944156141275", + "extraData": "0xd783010406844765746887676f312e342e32856c696e7578", + "gasLimit": "4714680", + "hash": "0x3c41811ab60f232565db6cfafb939d96255b9f678a203181c6f537d6c22d7e6f", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", + "mixHash": "0x8b736c63e05d381ae593d584b63fef5c31b04a3cea72bd5a3c92f95f4f7040e8", + "nonce": "0xce8ffb5c1ad942ec", + "number": "1725115", + "stateRoot": "0xca08a341c1f95fcba0821c4a27662ef162d39e1f9f5722717531f510d54112b0", + "timestamp": "1466232982", + "totalDifficulty": "28554024908214037524", + "alloc": { + "0x0000000000000000000000000000000000000004": { + "balance": "0x0" + }, + "0x1d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed": { + "balance": "0x0", + "code": "0x606060405260e060020a600035046338cc483181146038578063767800de14604f578063a6f9dae1146060578063d1d80fdf14607e575b005b600054600160a060020a03165b6060908152602090f35b6045600054600160a060020a031681565b603660043560015433600160a060020a03908116911614609c576002565b603660043560015433600160a060020a0390811691161460be576002565b6001805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6000805473ffffffffffffffffffffffffffffffffffffffff1916821790555056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x00000000000000000000000088e1315687aec48a72786c6b3b3f075208b62713" + } + }, + "0x50739060a2c32dc076e507ae1a893aab28ecfe68": { + "balance": "0x6a8ecefb09f7c4141", + "code": "0x606060405236156101745760e060020a6000350463058aace1811461017f578063061e494f146101905780630d1fce421461021e57806311610c251461029157806312253a6c146102b5578063132ae5e9146102d357806316d190e3146102dc57806329e206bd146102e5578063337b68ba1461030a57806338bbfa50146103135780633f683b6a146104115780634dc6b523146104245780634e69d5601461042d57806366d16cc31461044a578063724ae9d014610453578063758971e81461046f5780637cf0ffcb146104965780638ca17995146104a35780639619367d146104b7578063a96a5a5b146104c0578063adc2c98a146104c9578063b70d0b3b146104d2578063bc99cc37146104db578063c4bc5da5146104e4578063cafb220214610502578063d28442ef1461050b578063d4c80edf14610514578063df06f9061461051d578063e8b5e51f14610527578063f738e5ca14610546578063f8b2cb4f14610553578063fa968eea14610594575b610661610663610295565b6106616000341115610eab57610002565b61066560043560006000600060006000600f6000508054905086101561021657600f8054879081101561000257505050507f8d1108e10bcb7c27dddfc02ed9d693a074039d026cf4ea4240b40f7d581ac80284015490819052600e602052604090912080546001820154600283015460039390930154600160a060020a03929092169450925b509193509193565b6106965b601254601354601154600c5460009391019091010330600160a060020a0316318190101561028957604080517f62616e6b726f6c6c5f6d69736d61746368000000000000000000000000000000815290519081900360110190a05030600160a060020a0316315b8091505b5090565b6106615b600060006000600d60149054906101000a900460ff16156106ef57610002565b610661600d5433600160a060020a03908116911614610fd657610002565b610696600a5481565b61069660045481565b6106616004355b600d5460009033600160a060020a0390811691161461101c57610002565b61069660125481565b60408051602060248035600481810135601f81018590048502860185019096528585526106619581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a0190935282825296989760649791965060249190910194509092508291508401838280828437509496505050505050506000600060006000610a18600080546040805160e060020a6338cc483102815290518392600160a060020a0316916338cc4831916004828101926020929190829003018187876161da5a03f11561000257505060405151915050600160a060020a0381168214156114635761140b6000610939565b610696600d5460a060020a900460ff1681565b61069660085481565b6106a8600060006000600060006000600060006000610f8d610222565b61069660115481565b6106965b600a54600654600091829182911015610ef857610f33565b6106616004355b600d54600090819033600160a060020a0390811691161461108657610002565b61066161066360006102ec565b6106616004356000341115610e7957610002565b61069660055481565b61069660025481565b61069660035481565b61069660075481565b61069660065481565b610661600d5433600160a060020a03908116911614610ffc57610002565b610696600c5481565b61069660135481565b61069660105481565b610696600f545b90565b610661600d54600090819060a060020a900460ff1615610c8057610002565b6106616106636000610476565b6106966004355b600160a060020a0381166000908152600b602052604081205481901180156105845750600c548190115b15610ebd57600c54610ec6610222565b610696600080546040805160e060020a6338cc483102815290518392600160a060020a0316916338cc4831916004828101926020929190829003018187876161da5a03f11561000257505060408051805160e260020a630bbceb33028252620249f06024830152600482018390526003604483015260ea60020a621554930260648301529151600160a060020a03929092169250632ef3accc916084828101926020929190829003018187876161da5a03f1156100025750506040515160055481019350915061028d9050565b005b565b60408051600160a060020a039590951685526020850193909352838301919091526060830152519081900360800190f35b60408051918252519081900360200190f35b60408051998a5260208a0198909852888801969096526060880194909452608087019290925260a086015260c085015260e084015261010083015251908190036101200190f35b600060009054906101000a9004600160a060020a0316600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a630bbceb33028252620249f06024830152600482018390526003604483015260ea60020a621554930260648301529151600160a060020a03929092169250632ef3accc91608480830192602092919082900301816000876161da5a03f1156100025750506040515193505034839010156107c257610002565b82340391506127106107d2610222565b600460005054020460026000505460026000505460036000505461271003038402041115801561080457506005548210155b1561095a576040805180820182526003815260ea60020a62155493026020828101919091528251608081018452604381527f6a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e818301527f2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174818501527f612e30000000000000000000000000000000000000000000000000000000000060608201528351610160810190945261012c80855261095f94919261175690830139620249f0600060006000600060009054906101000a9004600160a060020a0316600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151915050600160a060020a03811682141561118c5761113460005b600060006115ef731d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed5b3b90565b610002565b6040805160808101825233815260208181018681526000838501818152606085018a8152878352600e90945293519490208054600160a060020a031916909417845551600184810191909155915160028401555160039290920191909155600f8054918201808255929350918281838015829011610a0057818360005260206000209182019101610a0091905b8082111561028d57600081556001016109ec565b5050506000928352506020909120018190555b505050565b600160a060020a031633600160a060020a0316141515610a3757610002565b6000878152600e6020526040812060018101549095501115610c5257600d5460a060020a900460ff166000148015610aa15750612710610a75610222565b600460005054020460026000505460026000505460036000505461271003038660010160005054020411155b15610b4957610b968660006114e28260006040805160208101909152600090819052828180805b8351811015610b3e57603060f860020a02848281518110156100025790602001015160f860020a900460f860020a0210158015610b295750603960f860020a02848281518110156100025790602001015160f860020a900460f860020a0211155b156116cb578115611722578560001415611719575b509095945050505050565b60018401548454610c5291600160a060020a0391909116905b604051600160a060020a038316906161a89083906000818181858888f193505050501515610d005760138054820190555050565b92506001831080610ba8575061271083115b15610bc75783546001850154610c5291600160a060020a031690610b62565b6000878152600e6020526040902060029081018490555460001984011015610c5b57506002546003546001850154855461271092909203029190910490610c7190600160a060020a031682610b62565b60018401546000190191505b601380546007546127109085020590810190915560118054918403909101905560018401546010805490910190555b50505050505050565b8354610c1790600160a060020a03166001610b62565b60018401548190039150610c23565b33600160a060020a03166000908152600b60205260408120541115610ca757610cc5610cab565b610d045b6011546012546000918291829114610a13576114e9610222565b33600160a060020a03166000908152600b6020908152604080832054835260099091529020600101805434908101909155600c805490910190555b5050565b600a5460065460009350901015610d6557600a80546001019081905591505b600082111561095a576000828152600960205260408120600101541115610deb576040600020805460019190910154610dc591600160a060020a031690610e7f565b5060015b600a548111610d23576000818152600960205260409020600101543490108015610db457508160001480610db457506040600081812060019081015485835292822001549083905290105b15610dbd579050805b600101610d69565b600082815260096020908152604080832054600160a060020a03168352600b9091528120555b600082815260096020526040812060010154148015610e2357506040600081812054600160a060020a03168152600b60205290812054145b1561095a5760008281526009602090815260408083208054600160a060020a03191633908117825534600192909201829055600c8054909201909155600160a060020a03168352600b9091529020829055610d00565b610ea833825b600160a060020a0382166000908152600b602052604081205481901115610a1357611578610cab565b50565b61066333610eb83361055a565b610e7f565b5060005b919050565b600160a060020a0384166000908152600b60209081526040808320548352600990915290206001015402049050610ec1565b5060015b600a548111610f38578160001480610f5b5750600082815260096020526040902054610f6c90600160a060020a031661055a565b92505b505090565b600082815260096020526040902054610f3090600160a060020a031661055a565b105b15610f64579050805b600101610efc565b600082815260096020526040902054610f5990600160a060020a031661055a565b601154600254600354600454600554601054939492939192909190610fb0610457565b600f60005080549050985098509850985098509850985098509850909192939495969798565b600d805474ff0000000000000000000000000000000000000000191660a060020a179055565b600d805474ff000000000000000000000000000000000000000019169055565b5060015b600a54811161104e5760008181526009602052604090205461107e90600160a060020a0316610eb88161055a565b8115610d0057604051600d54600160a060020a03908116916000913016319082818181858883f150505050505050565b600101611020565b82156110bb57506000905060015b600a5481116110e55760008181526009602052604090206001015490910190600101611094565b601354604051600d54600160a060020a03169160009182818181858883f150505060135550505050565b816000148015611101575060135430600160a060020a03163114155b1561112f57604051600d54600160a060020a03908116916000913016319082818181858883f1505050601355505b610a13565b50600060009054906101000a9004600160a060020a0316600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519150505b60018054600160a060020a0319168217908190556040805160e260020a630bbceb330281526024810187905260048181019283528a5160448301528a51600160a060020a039490941693632ef3accc938c938a939192839260649290920191602087810192918291859183918691600091601f850104600f02600301f150905090810190601f1680156112335780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050604051519250503a8402670de0b6b3a76400000182111561127c5750600091505b50949350505050565b600160009054906101000a9004600160a060020a0316600160a060020a03166385dee34c8360008a8a8a8a6040518760e060020a028152600401808681526020018060200180602001806020018581526020018481038452888181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156113275780820380516001836020036101000a031916815260200191505b508481038352878181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156113805780820380516001836020036101000a031916815260200191505b508481038252868181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156113d95780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038185886185025a03f11561000257505060405151945061127392505050565b50600060009054906101000a9004600160a060020a0316600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519150505b60018054600160a060020a031916821790819055604080517fc281d19e0000000000000000000000000000000000000000000000000000000081529051600160a060020a03929092169163c281d19e9160048181019260209290919082900301816000876161da5a03f115610002575050604051519250610524915050565b9050610ec1565b9150600190505b600a54811161151a5760008181526009602052604090205461155990600160a060020a031661055a565b600c8390558282148015906115325750600a54600090115b1561154e5760138054848403908101909155600c805490910190555b601154601255505050565b60008281526009602052604090206001908101829055930192016114f0565b6115818361055a565b821115611594576115918361055a565b91505b50600160a060020a0382166000908152600b602090815260408083205483526009909152902060010180548290039055600c8054829003905560085460138054612710928402929092049182019055610a1383828403610b62565b1115611623575060008054600160a060020a031916731d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed1790556001610ec1565b6000611642739efbea6358bed926b293d2ce63a730d6d98d43dd610956565b1115611678575060008054739efbea6358bed926b293d2ce63a730d6d98d43dd600160a060020a03199091161790556001610ec1565b60006116977320e12a1f859b3feae5fb2a0a32c18f5a65555bbf610956565b1115610ebd575060008054600160a060020a0319167320e12a1f859b3feae5fb2a0a32c18f5a65555bbf1790556001610ec1565b8381815181101561000257016020015160f860020a90819004027f2e00000000000000000000000000000000000000000000000000000000000000141561171157600191505b600101610ac8565b60001995909501945b600a83029250825060308482815181101561000257016020015160f860020a90819004810204909301602f19019250611711564244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000001d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000088e1315687aec48a72786c6b3b3f075208b62713", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000000000000009c4", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x00000000000000000000000000000000000000000000000000000000000000be", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000064", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000000000000000000000000000002c68af0bb140000", + "0x000000000000000000000000000000000000000000000000000000000000000c": "0x000000000000000000000000000000000000000000000006ad2ff8ba84afdcdc", + "0x000000000000000000000000000000000000000000000000000000000000000d": "0x000000000000000000000000a1b5f95be71ffa2f86adefcaa0028c46fe825161", + "0x000000000000000000000000000000000000000000000000000000000000000f": "0x0000000000000000000000000000000000000000000000000000000000000022", + "0x0000000000000000000000000000000000000000000000000000000000000011": "0xffffffffffffffffffffffffffffffffffffffffffffffffd14ae0a37b4cc1d4", + "0x0000000000000000000000000000000000000000000000000000000000000012": "0xffffffffffffffffffffffffffffffffffffffffffffffffd5ab72be30cb5f50", + "0x0000000000000000000000000000000000000000000000000000000000000013": "0xffffffffffffffffd5bbd8ce9d1eb44232ca20eb5b4319ac5e1982d2c94bc3cb", + "0x8d1108e10bcb7c27dddfc02ed9d693a074039d026cf4ea4240b40f7d581ac824": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xe950f1be9a49788ef79ea4e854ed56155a7f60661724f41e3af5f799203a1eb9": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xe950f1be9a49788ef79ea4e854ed56155a7f60661724f41e3af5f799203a1eba": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xe950f1be9a49788ef79ea4e854ed56155a7f60661724f41e3af5f799203a1ebb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xe950f1be9a49788ef79ea4e854ed56155a7f60661724f41e3af5f799203a1ebc": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x61c808d82a3ac53231750dadc13c777b59310bd9": { + "balance": "0x12f621ea72fef44f848", + "nonce": "51830" + }, + "0x6412becf35cc7e2a9e7e47966e443f295e1e4f4a": { + "balance": "0xfb5dbfc0d448e70", + "nonce": "6" + }, + "0x88e1315687aec48a72786c6b3b3f075208b62713": { + "balance": "0x24b9f2c5dc266dc6", + "code": "0x606060405236156101535760e060020a60003504630f825673811461018f57806323dc42e7146102135780632ef3accc146102ad578063453629781461033b578063480a434d146103d5578063524f3889146103de5780635c242c591461043f57806360f66701146104de57806362b3b8331461056757806368742da6146105eb578063688dcfd71461062b578063757004371461065857806377228659146106f25780637d242ae5146107cd5780637e1c42051461085357806381ade3071461033b57806385dee34c14610932578063a2ec191a14610a0c578063adf59f9914610213578063ae81584314610658578063b5bfdd7314610a64578063bf1fe42014610af2578063c281d19e14610b32578063c51be90f14610b44578063ca6ad1e414610bdd578063d959701614610bff578063db37e42f14610cb6578063de4b326214610d6d578063e839e65e14610daf575b61065660025433600160a060020a039081169116148015906101855750600154600160a060020a039081163390911614155b15610e8a57610002565b6106566004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650505050505050600254600160a060020a0390811633909116148015906102095750600154600160a060020a039081163390911614155b15610ebb57610002565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a0190935282825296989760649791965060249190910194509092508291508401838280828437509496505050505050506000610f2084848462030d406104cb565b610e8c6004808035906020019082018035906020019191908080601f0160208091040260200160405190810160405280939291908181526020018383808284375094965050933593505050506000610f288383335b6000600062030d40841115801561032d5750600160a060020a03831681526020819052604081205481145b1561184e5760009150611846565b610e8c6004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750506040805160208835808b0135601f81018390048302840183019094528383529799986044989297509190910194509092508291508401838280828437509496505050505050506000610f286000848462030d406104cb565b610e8c60085481565b610e8c6004808035906020019082018035906020019191908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496505050505050506000610f2f82336000610f288362030d4084610302565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897606497919650602491909101945090925082915084018382808284375094965050933593505050505b600083826000600061113d848433610302565b6106566004808035906020019082018035906020019191908080601f0160208091040260200160405190810160405280939291908181526020018383808284375094965050505050505080604051808280519060200190808383829060006004602084601f0104600f02600301f150905001915050604051809103902060046000508190555050565b6106566004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650505050505050600254600160a060020a0390811633909116148015906105e15750600154600160a060020a039081163390911614155b1561119757610002565b610656600435600254600160a060020a0390811633909116148015906106215750600154600160a060020a039081163390911614155b156111f957610002565b600160a060020a0333166000908152600660205260409020805460ff191660f860020a600435041790555b005b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897606497919650602491909101945090925082915084018382808284375094965050933593505050505b6000610f1d858585856104cb565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a0190935282825296989760849791965060249190910194509092508291508401838280828437509496505050505050506000610f1d8585858562030d4061091f565b60408051602060248035600481810135601f81018590048502860185019096528585526106569581359591946044949293909201918190840183828082843750949650505050505050600254600090600160a060020a0390811633909116148015906108495750600154600160a060020a039081163390911614155b1561121f57610002565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505050505b6000848260006000611516848433610302565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a0190935282825296989760849791965060249190910194509092508291508401838280828437509496505093359350505050600061156b868686868661091f565b6106566004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650509335935050505061157282600083610ab5565b6106566004808035906020019082018035906020019191908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496505093359350506044359150505b600254600090600160a060020a039081163390911614801590610ae85750600154600160a060020a039081163390911614155b1561157657610002565b610656600435600254600160a060020a039081163390911614801590610b285750600154600160a060020a039081163390911614155b1561162f57610002565b610e9e600154600160a060020a031681565b60408051602060248035600481810135601f8101859004850286018501909652858552610e8c9581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897606497919650602491909101945090925082915084018382808284375094965050933593505050506000610f1d858585856106e4565b600160a060020a03331660009081526007602052604090206004359055610656565b604080516004803580820135602081810285810182019096528185526106569593946024949093850192918291908501908490808284375050604080518735808a013560208181028085018201909552818452989a99604499939850919091019550935083925085019084908082843750949650505050505050600254600090600160a060020a039081163390911614801590610cac5750600154600160a060020a039081163390911614155b1561163457610002565b604080516004803580820135602081810285810182019096528185526106569593946024949093850192918291908501908490808284375050604080518735808a013560208181028085018201909552818452989a99604499939850919091019550935083925085019084908082843750949650505050505050600254600090600160a060020a039081163390911614801590610d635750600154600160a060020a039081163390911614155b1561168f57610002565b61065660043560025460009033600160a060020a03908116911614801590610da55750600154600160a060020a039081163390911614155b1561170557610002565b610e8c6004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a0190935282825296989760649791965060249190910194509092508291508401838280828437509496505050505050506000610f20600085858562030d4061091f565b565b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60006003600050600083604051808280519060200190808383829060006004602084601f0104600f02600301f1509050019150506040518091039020815260200190815260200160002060006101000a81548160ff0219169083021790555050565b90505b949350505050565b9392505050565b92915050565b6000600050600033600160a060020a031681526020019081526020016000206000505433600160a060020a031630600160a060020a03160101604051808281526020019150506040518091039020945084506000600050600033600160a060020a031681526020019081526020016000206000818150548092919060010191905055507fb76d0edd90c6a07aa3ff7a222d7f5933e29c6acc660c059c97837f05c4ca1a8433868b8b8b8b6006600050600033600160a060020a0316815260200190815260200160002060009054906101000a900460f860020a026007600050600033600160a060020a03168152602001908152602001600020600050546040518089600160a060020a0316815260200188815260200187815260200180602001806020018681526020018581526020018481526020018381038352888181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156110c35780820380516001836020036101000a031916815260200191505b508381038252878181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561111c5780820380516001836020036101000a031916815260200191505b509a505050505050505050505060405180910390a150505050949350505050565b91503482901061119257813403905060008111156111765760405133600160a060020a031690600090839082818181858883f150505050505b42624f1a000189118061118857504586115b15610f3557610002565b610002565b60016003600050600083604051808280519060200190808383829060006004602084601f0104600f02600301f1509050019150506040518091039020815260200190815260200160002060006101000a81548160ff0219169083021790555050565b604051600160a060020a03828116916000913016319082818181858883f1505050505050565b50600882905560005b600b548110156112a757600b8054600a91600091849081101561000257508054600080516020611883833981519152850154835260209390935260408220548602926009929190859081101561000257908252600080516020611883833981519152018150548152602081019190915260400160002055600101611228565b505050565b6000600050600033600160a060020a031681526020019081526020016000206000505433600160a060020a031630600160a060020a03160101604051808281526020019150506040518091039020945084506000600050600033600160a060020a031681526020019081526020016000206000818150548092919060010191905055507faf30e4d66b2f1f23e63ef4591058a897f67e6867233e33ca3508b982dcc4129b33868c8c8c8c8c6006600050600033600160a060020a0316815260200190815260200160002060009054906101000a900460f860020a026007600050600033600160a060020a0316815260200190815260200160002060005054604051808a600160a060020a0316815260200189815260200188815260200180602001806020018060200187815260200186815260200185815260200184810384528a8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561143f5780820380516001836020036101000a031916815260200191505b508481038352898181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156114985780820380516001836020036101000a031916815260200191505b508481038252888181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156114f15780820380516001836020036101000a031916815260200191505b509c5050505050505050505050505060405180910390a1505050505b95945050505050565b915034829010611192578134039050600081111561154f5760405133600160a060020a031690600090839082818181858883f150505050505b42624f1a00018a118061156157504586115b156112ac57610002565b905061150d565b5050565b8383604051808380519060200190808383829060006004602084601f0104600f02600301f150905001828152600101925050506040518091039020905080600b600050600b600050805480919060010190908154818355818115116115fe578183600052602060002091820191016115fe91905b8082111561162b57600081556001016115ea565b5050508154811015610002576000918252602080832090910192909255918252600a905260409020555050565b5090565b600555565b5060005b81518110156112a7578281815181101561000257906020019060200201516007600050600084848151811015610002576020908102909101810151600160a060020a03168252919091526040902055600101611638565b5060005b81518110156112a75782818151811015610002579060200190602002015160f860020a026006600050600084848151811015610002576020908102909101810151600160a060020a031682529190915260409020805460f860020a90920460ff19909216919091179055600101611693565b50600881905560005b600b5481101561157257600b8054600a91600091849081101561000257600080516020611883833981519152015482526020929092526040812054825490850292600992918590811015610002576000805160206118838339815191520154825250602091909152604090205560010161170e565b60096000506000866006600050600087600160a060020a0316815260200190815260200160002060009054906101000a900460f860020a02604051808380519060200190808383829060006004602084601f0104600f02600301f150905001828152600101925050506040518091039020815260200190815260200160002060005054915081506007600050600084600160a060020a03168152602001908152602001600020600050549050806000141561183d57506005545b83810291909101905b509392505050565b600454600014801590611875575060045460009081526003602052604090205460ff166001145b156117835760009150611846560175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000004": "0xbb130806898f085471286ecb4f3966fcbe090ba29e4f9d194ee9e9062f6b61ae", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000000000000000000000000000000000004a817c800", + "0x797fdd0f6c82412493cfa2aacdc9999c10e5d0c9aa3f05a8a289b1b3918c6db8": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8d90a37db271d62339ebfe84641d1ebdaf56fd5d50861d795eacb410dbb57630": "0x000000000000000000000000000000000000000000000000000cf4e712e8d654", + "0x9864048b6d6c99ecd7fcaecf663fbe1036a6e1fc00cec0a3eb25684dd08184c2": "0x0000000000000000000000000000000000000000000000000000000000000011", + "0xca9ea8077ddc97a21c029df4b19819e51903e11d4bfc7564a622a192cefd6356": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xf34e44a0672ef76b852374cc47d9772eb4e5e41fa79fba61dcfc9cf7d50418d5": "0x0000000000000000000000000000000000000000000000000000000000000022" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "1725116", + "difficulty": "45966589844033", + "timestamp": "1466232988", + "gasLimit": "4716972", + "miner": "0x61c808d82a3ac53231750dadc13c777b59310bd9" + }, + "input": "0xf86d068504e3b2920083030d409450739060a2c32dc076e507ae1a893aab28ecfe68880429d069189e0000801ca04e403b46022c2098e41d3a0e561881ac368cd330637239da85759c1b4f44ab24a072a88235d98959283c00af411bd663b0da8703e05a94d3673aca37d0a39b7e07", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0x6412becf35cc7e2a9e7e47966e443f295e1e4f4a", + "gas": "0x30d40", + "gasUsed": "0x249eb", + "to": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "input": "0x", + "calls": [ + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x257af", + "gasUsed": "0xbc", + "to": "0x1d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed", + "input": "0x38cc4831", + "output": "0x00000000000000000000000088e1315687aec48a72786c6b3b3f075208b62713", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x255a1", + "gasUsed": "0x73a", + "to": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "input": "0x2ef3accc000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000249f0000000000000000000000000000000000000000000000000000000000000000355524c0000000000000000000000000000000000000000000000000000000000", + "output": "0x00000000000000000000000000000000000000000000000000179d63013c5654", + "calls": [ + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x24680", + "gasUsed": "0xbc", + "to": "0x1d3b2638a7cc9f2cb3d298a3da7a90b67e5506ed", + "input": "0x38cc4831", + "output": "0x00000000000000000000000088e1315687aec48a72786c6b3b3f075208b62713", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x22f3b", + "gasUsed": "0x73a", + "to": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "input": "0x2ef3accc000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000249f0000000000000000000000000000000000000000000000000000000000000000355524c0000000000000000000000000000000000000000000000000000000000", + "output": "0x00000000000000000000000000000000000000000000000000179d63013c5654", + "calls": [ + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x30", + "gasUsed": "0x18", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x6a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e30", + "output": "0x6a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e30", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x99", + "gasUsed": "0x2d", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d", + "output": "0x4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "gas": "0x2083e", + "gasUsed": "0x4417", + "to": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "input": "0x85dee34c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000249f0000000000000000000000000000000000000000000000000000000000000000355524c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000436a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d0000000000000000000000000000000000000000", + "output": "0xd1b13c1538a940417bf0e73b2498634436753c854c7fb971224d971bd2ae3e88", + "calls": [ + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x12", + "gasUsed": "0x12", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x55524c", + "output": "0x55524c", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x30", + "gasUsed": "0x18", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x6a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e30", + "output": "0x6a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e30", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "gas": "0x99", + "gasUsed": "0x2d", + "to": "0x0000000000000000000000000000000000000004", + "input": "0x4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d", + "output": "0x4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x88e1315687aec48a72786c6b3b3f075208b62713", + "topics": [ + "0xaf30e4d66b2f1f23e63ef4591058a897f67e6867233e33ca3508b982dcc4129b" + ], + "data": "0x00000000000000000000000050739060a2c32dc076e507ae1a893aab28ecfe68d1b13c1538a940417bf0e73b2498634436753c854c7fb971224d971bd2ae3e8800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000001e000000000000000000000000000000000000000000000000000000000000249f011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000355524c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000436a736f6e2868747470733a2f2f6170692e72616e646f6d2e6f72672f6a736f6e2d7270632f312f696e766f6b65292e726573756c742e72616e646f6d2e646174612e300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c4244584a68725670424a35336f3243786c4a526c51745a4a4b5a714c5974354951652b37335944533448744e6a5335486f64624942337476666f773755717579416b303835566b4c6e4c3945704b67777157517a375a4c64477673516c526432734b78496f6c4e673944626e6650737047714c684c62625953566e4e38437776736a7041586353536f33632b34634e774339307946346f4e69626b764433797461706f5a37676f5453796f5559546677536a6e773374692b484a5648374e332b633069774f43715a6a4464734751556358336d33532f494857624f4f5151356f734f344c626a33476730783155644e7466557a5943465937396e7a596757495145464375524249306e364e42764251573732372b4f73445259304a2f392f676a74387563696248576963303d0000000000000000000000000000000000000000" + } + ], + "value": "0x179d63013c5654", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0x50739060a2c32dc076e507ae1a893aab28ecfe68", + "topics": [], + "data": "0x62616e6b726f6c6c5f6d69736d61746368" + } + ], + "value": "0x429d069189e0000", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json new file mode 100644 index 00000000..30f17770 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/simple.json @@ -0,0 +1,84 @@ +{ + "genesis": { + "difficulty": "8430028481555", + "extraData": "0xd783010302844765746887676f312e352e31856c696e7578", + "gasLimit": "3141592", + "hash": "0xde66937783697293f2e529d2034887c531535d78afa8c9051511ae12ba48fbea", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226", + "mixHash": "0xba28a43bfbca4a2effbb76bb70d03482a8a0c92e2883ff36cbac3d7c6dbb7df5", + "nonce": "0xa3827ec0a82fe823", + "number": "765824", + "stateRoot": "0x8d96cb027a29f8ca0ccd6d31f9ea0656136ec8030ecda70bb9231849ed6f41a2", + "timestamp": "1451389443", + "totalDifficulty": "4838314986494741271", + "alloc": { + "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb": { + "balance": "0x14203bee2ea6fbe8c", + "nonce": "34" + }, + "0xe2fe6b13287f28e193333fdfe7fedf2f6df6124a": { + "balance": "0x2717a9c870a286f4350" + }, + "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd": { + "balance": "0x0", + "code": "0x606060405260e060020a600035046306fdde038114610047578063313ce567146100a457806370a08231146100b057806395d89b41146100c8578063a9059cbb14610123575b005b61015260008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156101f55780601f106101ca576101008083540402835291602001916101f5565b6101c060025460ff1681565b6101c060043560036020526000908152604090205481565b610152600180546020601f6002600019610100858716150201909316929092049182018190040260809081016040526060828152929190828280156101f55780601f106101ca576101008083540402835291602001916101f5565b610045600435602435600160a060020a033316600090815260036020526040902054819010156101fd57610002565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156101b25780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6060908152602090f35b820191906000526020600020905b8154815290600101906020018083116101d857829003601f168201915b505050505081565b600160a060020a03821660009081526040902054808201101561021f57610002565b806003600050600033600160a060020a03168152602001908152602001600020600082828250540392505081905550806003600050600084600160a060020a0316815260200190815260200160002060008282825054019250508190555081600160a060020a031633600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a3505056", + "storage": { + "0x1dae8253445d3a5edbe8200da9fc39bc4f11db9362181dc1b640d08c3c2fb4d6": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba52aac7f255d80a49abcf003d6af4752aba5a9531cae94fde7ac8d72191d67": "0x000000000000000000000000000000000000000000000000000000000178e460" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "765825", + "difficulty": "8425912256743", + "timestamp": "1451389488", + "gasLimit": "3141592", + "miner": "0xe2fe6b13287f28e193333fdfe7fedf2f6df6124a" + }, + "input": "0xf8aa22850ba43b740083024d4594f4eced2f682ce333f96f2d8966c613ded8fc95dd80b844a9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb00000000000000000000000000000000000000000000000000000000009896801ca067da548a2e0f381a957b9b51f086073375d6bfc7312cbc9540b3647ccab7db11a042c6e5b34bc7ba821e9c25b166fa13d82ad4b0d044d16174d5587d4f04ecfcd1", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0xd1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", + "gas": "0x24d45", + "gasUsed": "0xc6a5", + "to": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd", + "input": "0xa9059cbb000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb0000000000000000000000000000000000000000000000000000000000989680", + "logs": [ + { + "address": "0xf4eced2f682ce333f96f2d8966c613ded8fc95dd", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000d1220a0cf47c7b9be7a2e6ba89f429762e7b9adb", + "0x000000000000000000000000dbf03b407c01e7cd3cbea99509d93f8dddc8c6fb" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000989680" + } + ], + "value": "0x0", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json new file mode 100644 index 00000000..30346d07 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_failed.json @@ -0,0 +1,244 @@ +{ + "genesis": { + "difficulty": "56311715121637", + "extraData": "0x7777772e62772e636f6d", + "gasLimit": "4712388", + "hash": "0x20d3b8daa046f2f10564d84ccbe6d0a8842d8d52bc6d623e23c38050a8f73776", + "miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1", + "mixHash": "0x75029f90d7de3f9e3d5eac4a25019f9ac5d0041641d1ef17e7759e45699d4224", + "nonce": "0x54ff3b34fa1d9c97", + "number": "1968179", + "stateRoot": "0x6420003b1779cca3bcdab698c239bbc63623c0a7e4deeedbdb8190b9e7fd7520", + "timestamp": "1469713675", + "totalDifficulty": "42284028928878034360", + "alloc": { + "0x10abb5efecdc09581f8b7cb95791fe2936790b4e": { + "balance": "0x81f158e2814b4ab624c", + "code": "0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + "nonce": "3", + "storage": { + "0x000000000000000000000000000000000000000000000000000000000000000f": "0x0000000000000000000000000000000000000000000000000000000057bda071", + "0x0000000000000000000000000000000000000000000000000000000000000010": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000011": "0x0000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c18941301", + "0x0000000000000000000000000000000000000000000000000000000000000012": "0x000000000000000000000000fde8d5f77ef48bb7bf5766c7404691b9ee1dfca7", + "0x0000000000000000000000000000000000000000000000000000000000000016": "0x00000000000000000000000000000000000000000000081f158e2814b4ab624c", + "0x7ffc832d0c7f56b16d03bf3ff14bc4dd6a6cb1ec75841f7397362f4a9be4d392": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xaccfa2662c944e8eae80b7720d9d232eb6809c18f6c8da65189acbb38069d869": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x630a0cd35d5bd57e61410fda76fea850225cda18": { + "balance": "0x0", + "code": "0x6060604052361561006c5760e060020a60003504630121b93f81146100e15780636637b882146101615780636dbf2fa0146101935780638da5cb5b1461026a578063a6f9dae11461027c578063beabacc8146102ae578063d979f5aa14610322578063e1fa763814610354575b61050b600060006000600460005054111561051d576004805460001901905560015460035460055460e260020a6320998771026060908152606492909252600160a060020a03908116608452909116906382661dc49060a49060209060448187876161da5a03f11561000257506105c3915050565b6105cb60043560005433600160a060020a039081169116141561015e57600180547fc9d27afe0000000000000000000000000000000000000000000000000000000060609081526064849052608492909252600160a060020a03169063c9d27afe9060a4906020906044816000876161da5a03f115610002575050505b50565b6105cb60043560005433600160a060020a039081169116141561015e5760018054600160a060020a0319168217905550565b60806020604435600481810135601f8101849004909302840160405260608381526105cb9482359460248035956064949391019190819083828082843750949650505050505050600054600160a060020a039081163390911614156102655782600160a060020a03168282604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561024b5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f1505050505b505050565b6105cd600054600160a060020a031681565b6105cb60043560005433600160a060020a039081169116141561015e5760008054600160a060020a0319168217905550565b6105cb6004356024356044356000805433600160a060020a039081169116141561031c5760e060020a63a9059cbb026060908152600160a060020a03848116606452608484905285929083169163a9059cbb9160a4916020916044908290876161da5a03f115610002575050505b50505050565b6105cb60043560005433600160a060020a039081169116141561015e5760028054600160a060020a0319168217905550565b6105cb60043560243560005433600160a060020a03908116911614156105075760015460e060020a6370a0823102606090815230600160a060020a0390811660645291909116906370a08231906084906020906024816000876161da5a03f1156100025750506040805180516006556002546001547f1a695230000000000000000000000000000000000000000000000000000000008352600160a060020a039081166004840152925192169250631a695230916024828101926000929190829003018183876161da5a03f1156100025750505060048181556003839055600154604080517f013cf08b00000000000000000000000000000000000000000000000000000000815292830185905251600160a060020a03919091169163013cf08b91602482810192602092919082900301816000876161da5a03f11561000257505060408051805160058054600160a060020a0319169091179081905560015460035460e260020a63209987710284526004840152600160a060020a0391821660248401529251921692506382661dc491604482810192602092919082900301816000876161da5a03f115610002575050505b5050565b60408051918252519081900360200190f35b60015460e060020a6370a0823102606090815230600160a060020a0390811660645291909116906370a082319060849060209060248187876161da5a03f11561000257505060408051805160015460025460e060020a63a9059cbb028452600160a060020a039081166004850152602484018390529351919550909216925063a9059cbb916044828101926020929190829003018188876161da5a03f115610002575050505b600191505090565b005b6060908152602090f3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000e6002189a74b43e6868b20c1311bc108e38aac57", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c189413", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000006e073c0e1bd5af550239025dffcfb37175acedd3", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x6e073c0e1bd5af550239025dffcfb37175acedd3": { + "balance": "0x0", + "code": "0x606060405260e060020a60003504631a69523081146100475780636dbf2fa01461006d5780638da5cb5b14610144578063a6f9dae114610156578063beabacc814610196575b005b610045600435600080548190819032600160a060020a0390811691161461022957610002565b60806020604435600481810135601f8101849004909302840160405260608381526100459482359460248035956064949391019190819083828082843750949650505050505050600054600160a060020a0390811633909116141561013f5782600160a060020a03168282604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156101255780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f1505050505b505050565b61021f600054600160a060020a031681565b61004560043560005433600160a060020a0390811691161415610193576000805473ffffffffffffffffffffffffffffffffffffffff1916821790555b50565b6100456004356024356044356000805433600160a060020a0390811691161415610343577fa9059cbb000000000000000000000000000000000000000000000000000000006060908152600160a060020a03808516606452608484905285929083169163a9059cbb9160a4916020916044908290876161da5a03f1156100025750505050505050565b6060908152602090f35b7f70a0823100000000000000000000000000000000000000000000000000000000606090815230600160a060020a039081166064528594508416906370a082319060849060209060248187876161da5a03f1156100025750506040805180517f18160ddd00000000000000000000000000000000000000000000000000000000825291519194506318160ddd916004828101926020929190829003018187876161da5a03f11561000257505050604051805190602001509050808211156102ee579050805b82600160a060020a031663a9059cbb33846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050505b5050505056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000e6002189a74b43e6868b20c1311bc108e38aac57" + } + }, + "0xbb9bc244d798123fde783fcc1c72d3bb8c189413": { + "balance": "0x53d2c8df046dd3db5", + "code": "0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + "nonce": "3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000120", + "0x000000000000000000000000000000000000000000000000000000000000000f": "0x0000000000000000000000000000000000000000000000000000000057495e10", + "0x0000000000000000000000000000000000000000000000000000000000000011": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000016": "0x000000000000000000000000000000000000000000098b4d3b425f8c368391b2", + "0x29066f14bd0b438bb3db8771a65febf0be7574be7528f87e7ae11aafc2b2c3ac": "0x000000000000000000000000000000000000000000000025d57ab057892050fc", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f443": "0x000000000000000000000000b3b10eff47b9c0b3e5579bf1c25872111667e650", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f444": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f445": "0x0000000000000000000000000000000000000000000000000000000000000093", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f446": "0x00000000000000000000000000000000000000000000000000000000579a07ea", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f447": "0x0000000000000000000000000000000000000000000000000000000000000101", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f448": "0x63c103e1feea47a9bf6c0dce1349da1a95b96532661d43063ab8e52b3e2a844b", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f449": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f44a": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f44b": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f44c": "0x00000000000000000000000000000000000000000000000001620725a3de2009", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f44d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3f450": "0x000000000000000000000000b3b10eff47b9c0b3e5579bf1c25872111667e650", + "0x3987ba2457a57cc6778cce06d8c58970029977d834f0de345c7a495612cbb060": "0x00000000000000000000000000000000000000000000081f2acc2a62590de041", + "0x3987ba2457a57cc6778cce06d8c58970029977d834f0de345c7a495612cbb061": "0x000000000000000000000000000000000000000000098b4d3b425f8c368391b2", + "0x3987ba2457a57cc6778cce06d8c58970029977d834f0de345c7a495612cbb062": "0x00000000000000000000000000000000000000000000003635c9adc5dea00000", + "0x3987ba2457a57cc6778cce06d8c58970029977d834f0de345c7a495612cbb063": "0x00000000000000000000000010abb5efecdc09581f8b7cb95791fe2936790b4e", + "0x6f125332c6f598e8798f0c277f4b1052ac80cd02ff2eebe0c7f362d63b6959ef": "0x000000000000000000000000000000000000000000000000008dc9007b27b5a9", + "0x793bebaf0ea12c858c08547e9aa88b849bba94bb6933c7bdb0fecbb707ecf5c7": "0x00000000000000000000000000000000000000000000076d52eebfbfbfc172e5", + "0xaccfa2662c944e8eae80b7720d9d232eb6809c18f6c8da65189acbb38069d869": "0x000000000000000000000000000000000000000000000000000289739e60e3e2", + "0xb6e4d5c52e0c64fb49c5a97cacdbcf8bd94b5bd4d490590326a19d27eaf543ae": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xbe273e24e8bd646e29d1fb5a924a12a8585095b9f45a317fc708165a127fdd70": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xc34fc4bc1a730d3f836c9ac5124865056e88f3776b63662e34976bdb47549077": "0x000000000000000000000000000000000000000000000036353be4c563784a57", + "0xe2112d92b8a1d00a569b85fbe7a384a5c9f74f5ff8478647397cb58dde254ffa": "0x53706c697420666f722070656f706c652077686f2073656e74206d6f6e657920", + "0xe2112d92b8a1d00a569b85fbe7a384a5c9f74f5ff8478647397cb58dde254ffb": "0x746f207468652044414f20616674657220746865204861726420466f726b2062", + "0xe2112d92b8a1d00a569b85fbe7a384a5c9f74f5ff8478647397cb58dde254ffc": "0x79206d697374616b650000000000000000000000000000000000000000000000", + "0xf60322aa1a2e769d412b36e4a9def4300f7540bf1bc9e0f4691786a9100145fa": "0x0000000000000000000000000000000000000000000000000000000062188dd2", + "0xf735edeea40e4ec771f49da7f7b854b398a1ad43f8a9617d43e53d3093e9fdc0": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xf7905fa5d54027d5d59f4678dda481331babad2d3d0fdefd552afbce2e74c07e": "0x0000000000000000000000000000000000000000000000000000000000000110" + } + }, + "0xe6002189a74b43e6868b20c1311bc108e38aac57": { + "balance": "0x29129264d1ae4848b", + "nonce": "45" + }, + "0xea674fdde714fd979de3edf0f56aa9716b898ec8": { + "balance": "0x1601bbe4c58ec73210", + "nonce": "337736" + }, + "0xfde8d5f77ef48bb7bf5766c7404691b9ee1dfca7": { + "balance": "0x0", + "code": "0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "1968180", + "difficulty": "56311715252709", + "timestamp": "1469713694", + "gasLimit": "4712388", + "miner": "0xea674fdde714fd979de3edf0f56aa9716b898ec8" + }, + "input": "0xf8aa2d850c2b6f9f7e830aae6094630a0cd35d5bd57e61410fda76fea850225cda1880b844e1fa7638000000000000000000000000000000000000000000000000000000000000011000000000000000000000000000000000000000000000000000000000000000001ba0563f81ca66b2c618bf4be9470fab88fff1b44eb5c33a9c73a68e8b26fbaa7c8da041464789c49fee77d2e053ff0705bc845fe2a78a35e478132371f294bb594021", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0xe6002189a74b43e6868b20c1311bc108e38aac57", + "gas": "0xaae60", + "gasUsed": "0xaae60", + "to": "0x630a0cd35d5bd57e61410fda76fea850225cda18", + "input": "0xe1fa763800000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000000", + "error": "invalid jump destination", + "calls": [ + { + "from": "0x630a0cd35d5bd57e61410fda76fea850225cda18", + "gas": "0x9f5a0", + "gasUsed": "0x314", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x70a08231000000000000000000000000630a0cd35d5bd57e61410fda76fea850225cda18", + "output": "0x000000000000000000000000000000000000000000000000000289739e60e3e2", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x630a0cd35d5bd57e61410fda76fea850225cda18", + "gas": "0x9a327", + "gasUsed": "0x67b0", + "to": "0x6e073c0e1bd5af550239025dffcfb37175acedd3", + "input": "0x1a695230000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c189413", + "calls": [ + { + "from": "0x6e073c0e1bd5af550239025dffcfb37175acedd3", + "gas": "0x93ff6", + "gasUsed": "0x314", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x70a082310000000000000000000000006e073c0e1bd5af550239025dffcfb37175acedd3", + "output": "0x000000000000000000000000000000000000000000000025d57ab057892050fc", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e073c0e1bd5af550239025dffcfb37175acedd3", + "gas": "0x93c42", + "gasUsed": "0x13f", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x18160ddd", + "output": "0x000000000000000000000000000000000000000000098b4d3b425f8c368391b2", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x6e073c0e1bd5af550239025dffcfb37175acedd3", + "gas": "0x939ba", + "gasUsed": "0x5fca", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0xa9059cbb000000000000000000000000630a0cd35d5bd57e61410fda76fea850225cda18000000000000000000000000000000000000000000000025d57ab057892050fc", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x630a0cd35d5bd57e61410fda76fea850225cda18", + "gas": "0x8d8b6", + "gasUsed": "0x7be", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x013cf08b0000000000000000000000000000000000000000000000000000000000000110", + "output": "0x000000000000000000000000b3b10eff47b9c0b3e5579bf1c25872111667e6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000579a07ea0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000163c103e1feea47a9bf6c0dce1349da1a95b96532661d43063ab8e52b3e2a844b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000001620725a3de20090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000b3b10eff47b9c0b3e5579bf1c25872111667e650000000000000000000000000000000000000000000000000000000000000004953706c697420666f722070656f706c652077686f2073656e74206d6f6e657920746f207468652044414f20616674657220746865204861726420466f726b206279206d697374616b650000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + }, + { + "from": "0x630a0cd35d5bd57e61410fda76fea850225cda18", + "gas": "0x880f8", + "gasUsed": "0x880f8", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x82661dc40000000000000000000000000000000000000000000000000000000000000110000000000000000000000000b3b10eff47b9c0b3e5579bf1c25872111667e650", + "error": "invalid jump destination", + "calls": [ + { + "from": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "gas": "0x7f910", + "gasUsed": "0xd20f", + "to": "0x10abb5efecdc09581f8b7cb95791fe2936790b4e", + "input": "0xbaac5300000000000000000000000000630a0cd35d5bd57e61410fda76fea850225cda18", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "0x10abb5efecdc09581f8b7cb95791fe2936790b4e", + "gas": "0x76e12", + "gasUsed": "0x13f9", + "to": "0xfde8d5f77ef48bb7bf5766c7404691b9ee1dfca7", + "input": "0x", + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x20320625e3126cb0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "value": "0x0", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json new file mode 100644 index 00000000..eb251442 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/tx_partial_failed.json @@ -0,0 +1,107 @@ +{ + "genesis": { + "difficulty": "45372803248884", + "extraData": "0x65746865726d696e652e6f7267202855533129", + "gasLimit": "4712388", + "hash": "0xa2b18cc64ec062676680f2bb2d880205dcd372f4396722f2294d3fceece96193", + "miner": "0xea674fdde714fd979de3edf0f56aa9716b898ec8", + "mixHash": "0xce7c26a9238b249edcdcd51f0ea1ad0e632e872daf9a09f039d918bcaeb7194f", + "nonce": "0x849d49e634e93bb5", + "number": "1646451", + "stateRoot": "0x2bd193b9911caf43204960cc7661ce864bf0bac7f9b60191aa02bbff24f061fb", + "timestamp": "1465103859", + "totalDifficulty": "24813742796574158431", + "alloc": { + "0x01115b41bd2731353dd3e6abf44818fdc035aaf1": { + "balance": "0x16d99e16e809000", + "nonce": "23" + }, + "0x61c808d82a3ac53231750dadc13c777b59310bd9": { + "balance": "0x6a636960e34bd696f4", + "nonce": "36888" + }, + "0xbb9bc244d798123fde783fcc1c72d3bb8c189413": { + "balance": "0x9b37460cdbcba74181f81", + "code": "0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563", + "nonce": "3", + "storage": { + "0x000000000000000000000000000000000000000000000000000000000000000f": "0x0000000000000000000000000000000000000000000000000000000057495e10", + "0x0000000000000000000000000000000000000000000000000000000000000012": "0x000000000000000000000000807640a13483f8ac783c557fcdf27be11ea4ac7a" + } + }, + "0xcf1476387d780169410d4e936d75a206fda2a68c": { + "balance": "0x15fd0ad66ea7000", + "code": "0x606060405236156100b95760e060020a6000350463173825d9811461010b5780632f54bf6e1461015f5780634123cb6b146101875780635c52c2f5146101905780637065cb48146101ba578063746c9171146101e7578063797af627146101f0578063b20d30a914610203578063b61d27f614610230578063b75c7dc614610251578063ba51a6df14610280578063c2cf7326146102ad578063cbf0b0c0146102eb578063f00d4b5d14610318578063f1736d861461034a575b61035460003411156101095760408051600160a060020a033316815234602082015281517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c929181900390910190a15b565b610354600435600060003660405180838380828437820191505092505050604051809103902061064a815b600160a060020a03331660009081526101026020526040812054818082811415610c6657610dbf565b6103566004355b600160a060020a03811660009081526101026020526040812054115b919050565b61035660015481565b61035460003660405180838380828437820191505092505050604051809103902061078b81610136565b6103546004356000366040518083838082843782019150509250505060405180910390206105c681610136565b61035660005481565b6103566004355b600081610a2781610136565b61035460043560003660405180838380828437820191505092505050604051809103902061077f81610136565b6103566004803590602480359160443591820191013560006107aa33610166565b610354600435600160a060020a03331660009081526101026020526040812054908082811415610368576103e7565b61035460043560003660405180838380828437820191505092505050604051809103902061070881610136565b610356600435602435600082815261010360209081526040808320600160a060020a0385168452610102909252822054828181141561076157610776565b61035460043560003660405180838380828437820191505092505050604051809103902061079981610136565b610354600435602435600060003660405180838380828437820191505092505050604051809103902061047281610136565b6103566101055481565b005b60408051918252519081900360200190f35b50506000828152610103602052604081206001810154600284900a9290831611156103e75780546001828101805492909101835590839003905560408051600160a060020a03331681526020810186905281517fc7fb647e59b18047309aa15aad418e5d7ca96d173ad704f1031a2c3d7591734b929181900390910190a15b50505050565b600160a060020a03831660028361010081101561000257508301819055600160a060020a03851660008181526101026020908152604080832083905584835291829020869055815192835282019290925281517fb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c929181900390910190a1505b505050565b156103e75761048083610166565b1561048b575061046d565b600160a060020a0384166000908152610102602052604081205492508214156104b4575061046d565b6103ed5b6101045460005b81811015610f0b57610104805461010891600091849081101561000257600080516020610fd88339815191520154825250602091909152604081208054600160a060020a0319168155600181810183905560028281018054858255939493909281161561010002600019011604601f819010610f9057505b5050506001016104bf565b60018054810190819055600160a060020a038316906002906101008110156100025790900160005055600154600160a060020a03831660008181526101026020908152604091829020939093558051918252517f994a936646fe87ffe4f1e469d3d6aa417d6b855598397f323de5b449f765f0c3929181900390910190a15b505b50565b156105c1576105d482610166565b156105df57506105c3565b6105e76104b8565b60015460fa90106105fa576105fa61060f565b60015460fa901061054257506105c3565b6106c75b60015b6001548110156105c3575b6001548110801561063d5750600281610100811015610002570154600014155b15610dc75760010161061d565b1561046d57600160a060020a03831660009081526101026020526040812054925082141561067857506105c1565b600160016000505403600060005054111561069357506105c1565b600060028361010081101561000257508301819055600160a060020a0384168152610102602052604081205561060b6104b8565b60408051600160a060020a038516815290517f58619076adf5bb0943d100ef88d52d7c3fd691b19d3a9071b555b651fbf418da9181900360200190a1505050565b156105c15760015482111561071d57506105c3565b600082905561072a6104b8565b6040805183815290517facbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da9181900360200190a15050565b506001820154600282900a9081166000141593505b50505092915050565b156105c1575061010555565b156105c35760006101065550565b156105c15781600160a060020a0316ff5b156109eb576107be846000610ea133610166565b1561087d577f92ca3a80853e6663fa31fa10b99225f18d4902939b4c53a9caae9043f6efd00433858786866040518086600160a060020a0316815260200185815260200184600160a060020a031681526020018060200182810382528484828181526020019250808284378201915050965050505050505060405180910390a184600160a060020a03168484846040518083838082843782019150509250505060006040518083038185876185025a03f150600093506109eb92505050565b6000364360405180848480828437820191505082815260200193505050506040518091039020905080506108b0816101f7565b1580156108d3575060008181526101086020526040812054600160a060020a0316145b156109eb5760008181526101086020908152604082208054600160a060020a0319168817815560018181018890556002918201805481865294849020909491821615610100026000190190911691909104601f9081019290920481019185919087908390106109f35760ff198135168380011785555b506109659291505b80821115610a235760008155600101610951565b50507f1733cbb53659d713b79580f79f3f9ff215f78a7c7aa45890f3b89fc5cddfbf328133868887876040518087815260200186600160a060020a0316815260200185815260200184600160a060020a03168152602001806020018281038252848482818152602001925080828437820191505097505050505050505060405180910390a15b949350505050565b82800160010185558215610949579182015b82811115610949578235826000505591602001919060010190610a05565b5090565b15610aaa5760008381526101086020526040812054600160a060020a031614610aaa5760408051600091909120805460018281015460029384018054600160a060020a0394909416959194909391928392859291811615610100026000190116048015610adb5780601f10610ab057610100808354040283529160200191610adb565b50919050565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505091505060006040518083038185876185025a03f1505050600084815261010860209081526040918290208054600180830154855133600160a060020a0381811683529682018c9052968101829052929094166060830181905260a06080840181815260029586018054948516156101000260001901909416959095049084018190527fe7c957c06e9a662c1a6c77366179f5b702b97651dc28eee7d5bf1dff6e40bb4a97508a95949193919060c083019084908015610bdd5780601f10610bb257610100808354040283529160200191610bdd565b820191906000526020600020905b815481529060010190602001808311610bc057829003601f168201915b5050965050505050505060405180910390a16000838152610108602052604081208054600160a060020a0319168155600181810183905560028281018054858255939493909281161561010002600019011604601f819010610c4857505b5050506001915050610182565b601f016020900490600052602060002090810190610c3b9190610951565b60008581526101036020526040812080549093501415610cee576000805483556001838101919091556101048054918201808255828015829011610cbd57818360005260206000209182019101610cbd9190610951565b50505060028301819055610104805487929081101561000257600091909152600080516020610fd883398151915201555b506001810154600283900a90811660001415610dbf5760408051600160a060020a03331681526020810187905281517fe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda929181900390910190a1815460019011610dac576000858152610103602052604090206002015461010480549091908110156100025760406000908120600080516020610fd8833981519152929092018190558082556001828101829055600292909201559450610dbf9050565b8154600019018255600182018054821790555b505050919050565b5b60018054118015610dea57506001546002906101008110156100025701546000145b15610dfe5760018054600019019055610dc8565b60015481108015610e215750600154600290610100811015610002570154600014155b8015610e3b57506002816101008110156100025701546000145b15610e9c57600154600290610100811015610002578101549082610100811015610002579090016000505580610102600060028361010081101561000257810154825260209290925260408120929092556001546101008110156100025701555b610612565b156101825761010754610eb75b62015180420490565b1115610ed057600061010655610ecb610eae565b610107555b6101065480830110801590610eed57506101055461010654830111155b15610f0357506101068054820190556001610182565b506000610182565b6105c16101045460005b81811015610fae5761010480548290811015610002576000918252600080516020610fd8833981519152015414610f8857610104805461010391600091849081101561000257600080516020610fd883398151915201548252506020919091526040812081815560018101829055600201555b600101610f15565b601f0160209004906000526020600020908101906105379190610951565b610104805460008083559190915261046d90600080516020610fd883398151915290810190610951564c0be60200faa20559308cb7b5a1bb3255c16cb1cab91f525b5ae7a03d02fabe", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000105": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000106": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000107": "0x000000000000000000000000000000000000000000000000000000000000423d", + "0xcabd288dcb1ace4f49c34e8ac2d843772952b4226b3c832bdb4ac1ddca0f7c05": "0x0000000000000000000000000000000000000000000000000000000000000002" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "1646452", + "difficulty": "45328493887096", + "timestamp": "1465103894", + "gasLimit": "4712388", + "miner": "0x61c808d82a3ac53231750dadc13c777b59310bd9" + }, + "input": "0xf9018b178504a817c80083030d4094cf1476387d780169410d4e936d75a206fda2a68c80b90124b61d27f6000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c189413000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000088613930353963626230303030303030303030303030303030303030303030303039306433633138313264653236363962663830376264373735386365623165333439376163376534303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316336626635323633343030300000000000000000000000000000000000000000000000001ca0f1ae5ea07b1d00eb5e06fc854124ee0234ec61c8b393147f9d030804a75c98daa01d045d7633012cca74e30e975c3d00d11b4243dd8648f2e78d652f3a8aaafceb", + "tracerConfig": { + "withLog": true + }, + "result": { + "from": "0x01115b41bd2731353dd3e6abf44818fdc035aaf1", + "gas": "0x30d40", + "gasUsed": "0x288c9", + "to": "0xcf1476387d780169410d4e936d75a206fda2a68c", + "input": "0xb61d27f6000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c18941300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000008861393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000000000000000000000000000000000000000000000", + "calls": [ + { + "from": "0xcf1476387d780169410d4e936d75a206fda2a68c", + "gas": "0x1e30b", + "gasUsed": "0x1e30b", + "to": "0xbb9bc244d798123fde783fcc1c72d3bb8c189413", + "input": "0x61393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030", + "error": "invalid jump destination", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "0xcf1476387d780169410d4e936d75a206fda2a68c", + "topics": [ + "0x92ca3a80853e6663fa31fa10b99225f18d4902939b4c53a9caae9043f6efd004" + ], + "data": "0x00000000000000000000000001115b41bd2731353dd3e6abf44818fdc035aaf10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c1894130000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000008861393035396362623030303030303030303030303030303030303030303030303930643363313831326465323636396266383037626437373538636562316533343937616337653430303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031633662663532363334303030" + } + ], + "value": "0x0", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json new file mode 100644 index 00000000..e7308110 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/with_onlyTopCall.json @@ -0,0 +1,89 @@ +{ + "genesis": { + "difficulty": "11934798510088", + "extraData": "0xd983010302844765746887676f312e342e328777696e646f7773", + "gasLimit": "3141592", + "hash": "0xfc543a4a551afbd4a6c5d6d49041371e6bb58b1108c12aaec7f487ce656bb97f", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069", + "mixHash": "0xa6a1e67fc68da76b8d9cc3ce1c45d5e1f4bbd96b5dcfddbe0017d7fa99903ead", + "nonce": "0x5f00c600268b4659", + "number": "995200", + "stateRoot": "0x3579328470dd2aef5b9da69f5480cbe0d375e653b530ab3c1aee0da5e1ff4c94", + "timestamp": "1455322761", + "totalDifficulty": "7077231809278509672", + "alloc": { + "0x200edd17f30485a8735878661960cd7a9a95733f": { + "balance": "0x0", + "code": "0x3660008037602060003660003473273930d21e01ee25e4c219b63259d214872220a261235a5a03f21560015760206000f3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000104": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x4c0be60200faa20559308cb7b5a1bb3255c16cb1cab91f525b5ae7a03d02fabe": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf04": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf05": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x8ba1097eb3abe3dc1b51faa48445d593bf968f722e20b67bb62a87495836bf06": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa611e7c895a426c0477bc9e280db9c3b1e456dc6310ffcf23926ef5186c1facc": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c410e": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c410f": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xac682d343707aadf06c2c4c3692831d9e7ba711099ef36f9efb8bb29be8c4110": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x273930d21e01ee25e4c219b63259d214872220a2": { + "balance": "0x0", + "code": "0x606060405236156100da5760e060020a6000350463173825d9811461012c5780632f54bf6e146101875780634123cb6b146101af57806352375093146101b857806354fd4d50146101c25780635c52c2f5146101cc578063659010e7146101fd5780637065cb4814610207578063746c91711461023b578063797af62714610244578063b20d30a914610257578063b61d27f61461028b578063b75c7dc6146102ac578063ba51a6df146102db578063c2cf73261461030f578063cbf0b0c01461034d578063f00d4b5d14610381578063f1736d86146103ba575b6103c4600034111561012a5760408051600160a060020a033216815234602082015281517fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c929181900390910190a15b565b6103c46004356000600036436040518084848082843750505090910190815260405190819003602001902090506106c9815b600160a060020a03321660009081526101026020526040812054818082811415610c3f57610d97565b6103c66004355b600160a060020a03811660009081526101026020526040812054115b919050565b6103c660015481565b6103c66101075481565b6103c66101085481565b6103c46000364360405180848480828437505050909101908152604051908190036020019020905061081a8161015e565b6103c66101065481565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506106418161015e565b6103c660005481565b6103c66004355b600081610a7d8161015e565b6103c46004356000364360405180848480828437505050909101908152604051908190036020019020905061080e8161015e565b6103c66004803590602480359160443591820191013560006108393261018e565b6103c4600435600160a060020a033216600090815261010260205260408120549080828114156103d857610457565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506107888161015e565b6103c6600435602435600082815261010360209081526040808320600160a060020a038516845261010290925282205482818114156107e157610805565b6103c4600435600036436040518084848082843750505090910190815260405190819003602001902090506108288161015e565b6103c46004356024356000600036436040518084848082843750505090910190815260405190819003602001902090506104e28161015e565b6103c66101055481565b005b60408051918252519081900360200190f35b50506000828152610103602052604081206001810154600284900a9290831611156104575780546001828101805492909101835590839003905560408051600160a060020a03321681526020810186905281517fc7fb647e59b18047309aa15aad418e5d7ca96d173ad704f1031a2c3d7591734b929181900390910190a15b50505050565b600160a060020a03831660028361010081101561000257508301819055600160a060020a03851660008181526101026020908152604080832083905584835291829020869055815192835282019290925281517fb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c929181900390910190a1505b505050565b15610457576104f08361018e565b156104fb57506104dd565b600160a060020a03841660009081526101026020526040812054925082141561052457506104dd565b61045d5b6101045460005b81811015610ee457610104805461010991600091849081101561000257600080516020610f9f83398151915201548252506020918252604081208054600160a060020a0319168155600181018290556002810180548382559083528383209193610f6992601f9290920104810190610a65565b60018054810190819055600160a060020a038316906002906101008110156100025790900160005081905550600160005054610102600050600084600160a060020a03168152602001908152602001600020600050819055507f994a936646fe87ffe4f1e469d3d6aa417d6b855598397f323de5b449f765f0c3826040518082600160a060020a0316815260200191505060405180910390a15b505b50565b1561063c5761064f8261018e565b1561065a575061063e565b610662610528565b60015460fa90106106775761067561068c565b505b60015460fa90106105a2575061063e565b6107465b600060015b600154811015610a79575b600154811080156106bc5750600281610100811015610002570154600014155b15610d9f5760010161069c565b156104dd57600160a060020a0383166000908152610102602052604081205492508214156106f7575061063c565b6001600160005054036000600050541115610712575061063c565b600060028361010081101561000257508301819055600160a060020a03841681526101026020526040812055610688610528565b5060408051600160a060020a038516815290517f58619076adf5bb0943d100ef88d52d7c3fd691b19d3a9071b555b651fbf418da9181900360200190a1505050565b1561063c5760015482111561079d575061063e565b60008290556107aa610528565b6040805183815290517facbdb084c721332ac59f9b8e392196c9eb0e4932862da8eb9beaf0dad4f550da9181900360200190a15050565b506001820154600282900a908116600014156108005760009350610805565b600193505b50505092915050565b1561063c575061010555565b1561063e5760006101065550565b1561063c5781600160a060020a0316ff5b15610a555761084d846000610e793261018e565b15610909577f92ca3a80853e6663fa31fa10b99225f18d4902939b4c53a9caae9043f6efd00432858786866040518086600160a060020a0316815260200185815260200184600160a060020a031681526020018060200182810382528484828181526020019250808284378201915050965050505050505060405180910390a184600160a060020a03168484846040518083838082843750505090810191506000908083038185876185025a03f15060009350610a5592505050565b6000364360405180848480828437505050909101908152604051908190036020019020915061093990508161024b565b15801561095c575060008181526101096020526040812054600160a060020a0316145b15610a555760008181526101096020908152604082208054600160a060020a03191688178155600181018790556002018054858255818452928290209092601f01919091048101908490868215610a5d579182015b82811115610a5d5782358260005055916020019190600101906109b1565b50507f1733cbb53659d713b79580f79f3f9ff215f78a7c7aa45890f3b89fc5cddfbf328132868887876040518087815260200186600160a060020a0316815260200185815260200184600160a060020a03168152602001806020018281038252848482818152602001925080828437820191505097505050505050505060405180910390a15b949350505050565b506109cf9291505b80821115610a795760008155600101610a65565b5090565b15610c2c5760008381526101096020526040812054600160a060020a031614610c2c5760408051600091909120805460018201546002929092018054600160a060020a0392909216939091819083908015610afd57820191906000526020600020905b815481529060010190602001808311610ae057829003601f168201915b505091505060006040518083038185876185025a03f150505060008481526101096020908152604080519281902080546001820154600160a060020a033281811688529587018b905293860181905292166060850181905260a06080860181815260029390930180549187018290527fe7c957c06e9a662c1a6c77366179f5b702b97651dc28eee7d5bf1dff6e40bb4a975094958a959293909160c083019084908015610bcf57820191906000526020600020905b815481529060010190602001808311610bb257829003601f168201915b5050965050505050505060405180910390a160008381526101096020908152604082208054600160a060020a031916815560018101839055600281018054848255908452828420919392610c3292601f9290920104810190610a65565b50919050565b50505060019150506101aa565b60008581526101036020526040812080549093501415610cc7576000805483556001838101919091556101048054918201808255828015829011610c9657818360005260206000209182019101610c969190610a65565b50505060028301819055610104805487929081101561000257600091909152600080516020610f9f83398151915201555b506001810154600283900a90811660001415610d975760408051600160a060020a03321681526020810187905281517fe1c52dc63b719ade82e8bea94cc41a0d5d28e4aaf536adb5e9cccc9ff8c1aeda929181900390910190a1815460019011610d84576000858152610103602052604090206002015461010480549091908110156100025760406000908120600080516020610f9f8339815191529290920181905580825560018083018290556002909201559450610d979050565b8154600019018255600182018054821790555b505050919050565b5b60018054118015610dc257506001546002906101008110156100025701546000145b15610dd65760018054600019019055610da0565b60015481108015610df95750600154600290610100811015610002570154600014155b8015610e1357506002816101008110156100025701546000145b15610e7457600154600290610100811015610002578101549082610100811015610002578101919091558190610102906000908361010081101561000257810154825260209290925260408120929092556001546101008110156100025701555b610691565b156101aa5761010754610e8f5b62015180420490565b1115610ea857600061010655610ea3610e86565b610107555b6101065480830110801590610ec65750610106546101055490830111155b15610edc575061010680548201905560016101aa565b5060006101aa565b61063c6101045460005b81811015610f745761010480548290811015610002576000918252600080516020610f9f833981519152015414610f6157610104805461010391600091849081101561000257600080516020610f9f83398151915201548252506020919091526040812081815560018101829055600201555b600101610eee565b50505060010161052f565b61010480546000808355919091526104dd90600080516020610f9f83398151915290810190610a6556004c0be60200faa20559308cb7b5a1bb3255c16cb1cab91f525b5ae7a03d02fabe" + }, + "0x4f5777744b500616697cb655dcb02ee6cd51deb5": { + "balance": "0xb0983f1b83eec290", + "nonce": "2" + }, + "0xf8b483dba2c3b7176a3da549ad41a48bb3121069": { + "balance": "0x16969a0ba2c2d384d07", + "nonce": "67521" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "995201", + "difficulty": "11940626048551", + "timestamp": "1455322773", + "gasLimit": "3141592", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069" + }, + "input": "0xf89102850a954d522e8303308594200edd17f30485a8735878661960cd7a9a95733f888ac7230489e80000a4ba51a6df00000000000000000000000000000000000000000000000000000000000000001ca04f2cc45b96f965296382b2e9b657e90808301d5179035a5d91a2de7b912def20a056e19271ea4e19e4e034f38e925e312beed4d300c267160eeb2f565c42deb578", + "tracerConfig": { + "withLog": true, + "onlyTopCall": true + }, + "result": { + "from": "0x4f5777744b500616697cb655dcb02ee6cd51deb5", + "gas": "0x33085", + "gasUsed": "0x1a9e5", + "to": "0x200edd17f30485a8735878661960cd7a9a95733f", + "input": "0xba51a6df0000000000000000000000000000000000000000000000000000000000000000", + "output": "0xba51a6df00000000000000000000000000000000000000000000000000000000", + "value": "0x8ac7230489e80000", + "type": "CALL" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json new file mode 100644 index 00000000..a34d3b75 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json @@ -0,0 +1,85 @@ +{ + "genesis": { + "difficulty": "6217248151198", + "extraData": "0xd783010103844765746887676f312e342e32856c696e7578", + "gasLimit": "3141592", + "hash": "0xe8bff55fe3e61936ef321cf3afaeb1ba2f7234e1e89535fa8ae39963caebe9c3", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", + "mixHash": "0x03da00d5a15a064e5ebddf53cd0aaeb9a8aff0f40c0fb031a74f463d11ec83b8", + "nonce": "0x6575fe08c4167044", + "number": "243825", + "stateRoot": "0x47182fe2e6e740b8a76f82fe5c527d6ad548f805274f21792cf4047235b24fbf", + "timestamp": "1442424328", + "totalDifficulty": "1035061827427752845", + "alloc": { + "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { + "balance": "0xc820f93200f4000", + "nonce": "0x5E", + "code": "0x" + }, + "0x332b656504f4eabb44c8617a42af37461a34e9dc": { + "balance": "0x11faea4f35e5af80000", + "code": "0x" + }, + "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5": { + "balance": "0xbf681825be002ac452", + "nonce": "0x70FA", + "code": "0x" + }, + "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { + "balance": "0xb3d0ac5cb94df6f6b0", + "nonce": "0x1", + "code": "0x" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "ethash": {} + } + }, + "context": { + "number": "243826", + "difficulty": "6214212385501", + "timestamp": "1442424353", + "gasLimit": "3141592", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5" + }, + "input": "0xf8e85e850ba43b7400830f42408080b89660606040527382effbaaaf28614e55b2ba440fb198e0e5789b0f600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b600a80608c6000396000f30060606040526008565b001ca0340b21661e5bb85a46319a15f33a362e5c0f02faa7cdbf9c5808b2134da968eaa0226e6788f8c20e211d436ab7f6298ef32fa4c23a509eeeaac0880d115c17bc3f", + "result": { + "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { + "balance": "0xc820f93200f4000", + "nonce": 94 + }, + "0x332b656504f4eabb44c8617a42af37461a34e9dc": { + "balance": "0x11faea4f35e5af80000", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5": { + "balance": "0xbf681825be002ac452", + "nonce": 28922 + }, + "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { + "balance": "0xb3d0ac5cb94df6f6b0", + "nonce": 1 + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json new file mode 100644 index 00000000..7204bfcb --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json @@ -0,0 +1,83 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "nonce": 22 + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "nonce": 1, + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "nonce": 29072 + }, + "0x1585936b53834b021f68cc13eeefdec2efc8e724": { + "balance": "0x0" + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_ant/sload.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_ant/sload.json new file mode 100644 index 00000000..c779265c --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_ant/sload.json @@ -0,0 +1,69 @@ +{ + "context": { + "difficulty": "3755480783", + "gasLimit": "5401723", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294702", + "timestamp": "1513676146" + }, + "genesis": { + "alloc": { + "0xe296F389F90f3cF5EdeFb63690eb7a6257B69203": { + "balance": "0xcf3e0938579f00000", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0x7dc9c9730689ff0b0fd506c67db815f12d90a448": { + "balance": "0x0", + "code": "0x600154", + "nonce": "0", + "storage": {} + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "apricotPhase1BlockTimestamp": 0, + "apricotPhase2BlockTimestamp": 0 + }, + "difficulty": "3757315409", + "extraData": "0x566961425443", + "gasLimit": "5406414", + "hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1", + "nonce": "0x93363bbd2c95f410", + "number": "2294701", + "stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c", + "timestamp": "1513676127", + "totalDifficulty": "7160808139332585" + }, + "input": "0xf8ba808534630b8a00834c4b4094010000000000000000000000000000000000000280b8547dc9c9730689ff0b0fd506c67db815f12d90a4480000000000000000000000007dc9c9730689ff0b0fd506c67db815f12d90a448000000000000000000000000000000000000000000000000000000000000000026a0b441b733b17156dee3ff2107978a4238bfcb5874a36d008a1bcc5b22617a3232a04212c6b26069068d604be380ace25133dab677f9d18c2010d6f92cc7491fef86", + "tracerConfig": { + "onlyTopCall": false, + "diffMode": true + }, + "result": { + "post": { + "0xe296f389f90f3cf5edefb63690eb7a6257b69203": { + "balance": "0xcf3bd7819714fea00", + "nonce": 1 + } + }, + "pre": { + "0xe296f389f90f3cf5edefb63690eb7a6257b69203": { + "balance": "0xcf3e0938579f00000" + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_legacy/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_legacy/simple.json new file mode 100644 index 00000000..44b1f08d --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_legacy/simple.json @@ -0,0 +1,84 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": 22, + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": 1, + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": 29072, + "storage": {} + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json new file mode 100644 index 00000000..415d6302 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json @@ -0,0 +1,95 @@ +{ + "genesis": { + "difficulty": "13756228101629", + "extraData": "0xd983010302844765746887676f312e342e328777696e646f7773", + "gasLimit": "3141592", + "hash": "0x58b7a87b6ba10b46b4e251d64ebc3d9822dd82218eaf24dff6796f6f1f687251", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069", + "mixHash": "0x5984b9a316116bd890e6e5f4c52d655184b0d7aa74821e1382d7760f9803c1dd", + "nonce": "0xea4bb4997242c681", + "number": "1061221", + "stateRoot": "0x5402c04d481414248d824c3b61e924e0c9307adbc9fbaae774a74cce30a4163d", + "timestamp": "1456458069", + "totalDifficulty": "7930751135586064334", + "alloc": { + "0x2a65aca4d5fc5b5c859090a6c34d164135398226": { + "balance": "0x9fb6b81e112638b886", + "nonce": "217865", + "code": "0x" + }, + "0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": { + "balance": "0x15b6828e22bb12188", + "nonce": "747", + "code": "0x" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "ethash": {} + } + }, + "context": { + "number": "1061222", + "difficulty": "13749511193633", + "timestamp": "1456458097", + "gasLimit": "3141592", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226" + }, + "input": "0xf905498202eb850ba43b7400830f42408080b904f460606040526040516102b43803806102b48339016040526060805160600190602001505b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b806001600050908051906020019082805482825590600052602060002090601f01602090048101928215609e579182015b82811115609d5782518260005055916020019190600101906081565b5b50905060c5919060a9565b8082111560c1576000818150600090555060010160a9565b5090565b50505b506101dc806100d86000396000f30060606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001ee7b225f6964223a225a473466784a7245323639384866623839222c22666f726d5f736f75726365223a22434c54523031222c22636f6d6d69746d656e745f64617465223a22222c22626f72726f7765725f6e616d65223a22222c22626f72726f7765725f616464726573735f6c696e6531223a22222c22626f72726f7765725f616464726573735f6c696e6532223a22222c22626f72726f7765725f636f6e74616374223a22222c22626f72726f7765725f7374617465223a22222c22626f72726f7765725f74797065223a22222c2270726f70657274795f61646472657373223a22222c226c6f616e5f616d6f756e745f7772697474656e223a22222c226c6f616e5f616d6f756e74223a22222c224c54565f7772697474656e223a22222c224c5456223a22222c2244534352223a22222c2270726f70657274795f74797065223a22222c2270726f70657274795f6465736372697074696f6e223a22222c226c656e646572223a22222c2267756172616e746f7273223a22222c226c696d69746564223a22222c226361705f616d6f756e74223a22222c226361705f70657263656e745f7772697474656e223a22222c226361705f70657263656e74616765223a22222c227465726d5f7772697474656e223a22222c227465726d223a22222c22657874656e64223a22227d0000000000000000000000000000000000001ba027d54712289af34f0ec0f06092745104d68e5801cd17097bc1104111f855258da070ec9f1c942d9bedf89f9660a684d3bb8cd9c2ac7f6dd883cb3e26a193180244", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": { + "balance": "0x15b6828e22bb12188", + "nonce": 747 + } + }, + "post": { + "0x40f2f445da6c9047554683fb382fba6769717116": { + "code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f0c5cef39b17c213cfe090a46b8c7760ffb7928a", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000000000000000001ee", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "0x7b225f6964223a225a473466784a7245323639384866623839222c22666f726d", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7": "0x5f736f75726365223a22434c54523031222c22636f6d6d69746d656e745f6461", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf8": "0x7465223a22222c22626f72726f7765725f6e616d65223a22222c22626f72726f", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf9": "0x7765725f616464726573735f6c696e6531223a22222c22626f72726f7765725f", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfa": "0x616464726573735f6c696e6532223a22222c22626f72726f7765725f636f6e74", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfb": "0x616374223a22222c22626f72726f7765725f7374617465223a22222c22626f72", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfc": "0x726f7765725f74797065223a22222c2270726f70657274795f61646472657373", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfd": "0x223a22222c226c6f616e5f616d6f756e745f7772697474656e223a22222c226c", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cfe": "0x6f616e5f616d6f756e74223a22222c224c54565f7772697474656e223a22222c", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cff": "0x224c5456223a22222c2244534352223a22222c2270726f70657274795f747970", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d00": "0x65223a22222c2270726f70657274795f6465736372697074696f6e223a22222c", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d01": "0x226c656e646572223a22222c2267756172616e746f7273223a22222c226c696d", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d02": "0x69746564223a22222c226361705f616d6f756e74223a22222c226361705f7065", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d03": "0x7263656e745f7772697474656e223a22222c226361705f70657263656e746167", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d04": "0x65223a22222c227465726d5f7772697474656e223a22222c227465726d223a22", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0d05": "0x222c22657874656e64223a22227d000000000000000000000000000000000000" + } + }, + "0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": { + "balance": "0x15b058920efcc5188", + "nonce": 748 + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_failed.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_failed.json new file mode 100644 index 00000000..b4c77165 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_failed.json @@ -0,0 +1,89 @@ +{ + "genesis": { + "baseFeePerGas": "51088069741", + "difficulty": "14315558652874667", + "extraData": "0xd883010a10846765746888676f312e31362e35856c696e7578", + "gasLimit": "30058590", + "hash": "0xdf6b95183f99054fb6541e3b482c0109c9f6be40553cff24efa3ac76736adbf5", + "miner": "0xb7e390864a90b7b923c9f9310c6f98aafe43f707", + "mixHash": "0x8d76b0d32e42ab277dbf00836eabef76674cd70ae2bb53718175069ad6b6147e", + "nonce": "0x8d3a1c010ad2c687", + "number": "14707767", + "stateRoot": "0x8a50c896a6f7eb1f3479337db981fa10ce316281cb4dd2f07487be9ca27dae6b", + "timestamp": "1651623275", + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "0x268fd0b894b8c4f6d1f" + }, + "0x13b152c9f50878ffaf3de41e192653bda545d889": { + "balance": "0x0", + "nonce": "1", + "code": "0x363d3d373d3d3d363d73059ffafdc6ef594230de44f824e2bd0a51ca5ded5af43d82803e903d91602b57fd5bf3" + }, + "0x808b4da0be6c9512e948521452227efc619bea52": { + "balance": "0x2cdb96c56db040b43", + "nonce": "1223932" + }, + "0x8f03f1a3f10c05e7cccf75c1fd10168e06659be7": { + "balance": "0x38079b28689d40240e", + "nonce": "44" + }, + "0xffa397285ce46fb78c588a9e993286aac68c37cd": { + "balance": "0x0", + "nonce": "747319", + "code": "0x608060405234801561001057600080fd5b50600436106100365760003560e01c8063b97a23191461003b578063fb90b3201461006f575b600080fd5b6100436100bd565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100bb6004803603604081101561008557600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506100e1565b005b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282604051602001808373ffffffffffffffffffffffffffffffffffffffff1660601b815260140182815260200192505050604051602081830303815290604052805190602001209050600061015960008054906101000a900473ffffffffffffffffffffffffffffffffffffffff168361024d565b90508073ffffffffffffffffffffffffffffffffffffffff166319ab453c856040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff168152602001915050600060405180830381600087803b1580156101c457600080fd5b505af11580156101d8573d6000803e3d6000fd5b505050507fa35ea2cc726861482a50a162c72aad60965cc64641d419cd4d675036238b52048185604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018273ffffffffffffffffffffffffffffffffffffffff1681526020019250505060405180910390a150505050565b6000808360601b90506040517f3d602d80600a3d3981f3363d3d373d3d3d363d7300000000000000000000000081528160148201527f5af43d82803e903d91602b57fd5bf300000000000000000000000000000000006028820152836037826000f5925050509291505056fea2646970667358221220c87b2492828fdd7dad3175a32a98ff07fc0eedf106536f2eddd9a016971c56a764736f6c63430007050033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000059ffafdc6ef594230de44f824e2bd0a51ca5ded" + } + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "terminalTotalDifficultyPassed": true, + "apricotPhase1BlockTimestamp": 0, + "apricotPhase2BlockTimestamp": 0, + "apricotPhase3BlockTimestamp": 0 + } + }, + "context": { + "number": "14707768", + "difficulty": "14322823549655084", + "timestamp": "1651623279", + "gasLimit": "30029237", + "miner": "0x8f03f1a3f10c05e7cccf75c1fd10168e06659be7" + }, + "input": "0x02f8b4018312acfc8459682f00851a46bcf47a8302b1a194ffa397285ce46fb78c588a9e993286aac68c37cd80b844fb90b3200000000000000000000000002a549b4af9ec39b03142da6dc32221fc390b553300000000000000000000000000000000000000000000000000000000000cb3d5c001a03002079d2873f7963c4278200c43aa71efad262b2150bc8524480acfc38b5faaa077d44aa09d56b9cf99443c7f55aaad1bbae9cfb5bbb9de31eaf7a8f9e623e980", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x808b4da0be6c9512e948521452227efc619bea52": { + "balance": "0x2cdb96c56db040b43", + "nonce": 1223932 + } + }, + "post": { + "0x808b4da0be6c9512e948521452227efc619bea52": { + "balance": "0x2cd72a36dd031f089", + "nonce": 1223933 + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json new file mode 100644 index 00000000..01d57153 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json @@ -0,0 +1,97 @@ +{ + "genesis": { + "difficulty": "6217248151198", + "extraData": "0xd783010103844765746887676f312e342e32856c696e7578", + "gasLimit": "3141592", + "hash": "0xe8bff55fe3e61936ef321cf3afaeb1ba2f7234e1e89535fa8ae39963caebe9c3", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", + "mixHash": "0x03da00d5a15a064e5ebddf53cd0aaeb9a8aff0f40c0fb031a74f463d11ec83b8", + "nonce": "0x6575fe08c4167044", + "number": "243825", + "stateRoot": "0x47182fe2e6e740b8a76f82fe5c527d6ad548f805274f21792cf4047235b24fbf", + "timestamp": "1442424328", + "totalDifficulty": "1035061827427752845", + "alloc": { + "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { + "balance": "0xc820f93200f4000", + "nonce": "0x5E", + "code": "0x" + }, + "0x332b656504f4eabb44c8617a42af37461a34e9dc": { + "balance": "0x11faea4f35e5af80000", + "code": "0x", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5": { + "balance": "0xbf681825be002ac452", + "nonce": "0x70FA", + "code": "0x" + }, + "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { + "balance": "0xb3d0ac5cb94df6f6b0", + "nonce": "0x1", + "code": "0x" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "ethash": {} + } + }, + "context": { + "number": "243826", + "difficulty": "6214212385501", + "timestamp": "1442424353", + "gasLimit": "3141592", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5" + }, + "input": "0xf8e85e850ba43b7400830f42408080b89660606040527382effbaaaf28614e55b2ba440fb198e0e5789b0f600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b600a80608c6000396000f30060606040526008565b001ca0340b21661e5bb85a46319a15f33a362e5c0f02faa7cdbf9c5808b2134da968eaa0226e6788f8c20e211d436ab7f6298ef32fa4c23a509eeeaac0880d115c17bc3f", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { + "balance": "0xc820f93200f4000", + "nonce": 94 + }, + "0x332b656504f4eabb44c8617a42af37461a34e9dc": { + "balance": "0x11faea4f35e5af80000", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { + "balance": "0xb3d0ac5cb94df6f6b0", + "nonce": 1 + } + }, + "post": { + "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { + "balance": "0xc7d4d88af8b4c00", + "nonce": 95 + }, + "0x82effbaaaf28614e55b2ba440fb198e0e5789b0f": { + "balance": "0x1d37f515017a8eef6b0" + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json new file mode 100644 index 00000000..fad27a4f --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json @@ -0,0 +1,305 @@ +{ + "genesis": { + "difficulty": "13707196986889", + "extraData": "0xd983010302844765746887676f312e342e328777696e646f7773", + "gasLimit": "3141592", + "hash": "0x607b38fe7e94427ee8f3b9a62375c67f953f8d49e05dbfd0145f9d3bac142193", + "miner": "0xf8b483dba2c3b7176a3da549ad41a48bb3121069", + "mixHash": "0x98c74c9e76fd0078157e1696e4334a7e787396459693a84536d8b96414dafd5d", + "nonce": "0x77a5a0a73ad8745e", + "number": "1062502", + "stateRoot": "0x1df615df5fdbc8d5397bf3574f462f6d9696428eb8796d8e9252bccc8e3a8996", + "timestamp": "1456480432", + "totalDifficulty": "7948153536501153741", + "alloc": { + "0x0000000000000000000000000000000000000004": { + "balance": "0x0", + "code": "0x" + }, + "0x1deeda36e15ec9e80f3d7414d67a4803ae45fc80": { + "balance": "0x0", + "code": "0x650200d2f18c7350606060405236156100c15760e060020a60003504630bd295e681146100c65780630fd1f94e1461017d5780630fee183d1461018c578063349501b7146101ad5780635054d98a146101c75780637c0278fc146101ef5780637e92656214610287578063a0943154146102f6578063a1873db61461030e578063a9d2293d14610355578063b5d0f16e146103ad578063c17e6817146103ce578063cc3471af1461046a578063da46be0a1461047a578063f55627531461052a575b610007565b6105d36004356024356044355b60006000600030915081600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040515191505080841080610173575081600160a060020a031663a06db7dc6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050506040518051906020015060ff16810184115b1561100d57610007565b6105d3600060f0610f6d61046e565b6105d3600435602435604435606435600081600202831015610ff657610fee565b6105d36004355b600081600014156109115750600161098f565b6105d36004355b6008810154600090819062010000900460ff16156105f257600691506105ec565b60408051602060248035600481810135601f81018590048502860185019096528585526105e5958135959194604494929390920191819084018382808284375094965050505050505060006004825103836001016000508181546001816001161561010002031660029004825481601f106108005782601f1061083a575b826008026101000360020a80910402828001178355610851565b6105e5600435602435604051600090600160a060020a038316907f398bd6b21ae4164ec322fb0eb8c2eb6277f36fd41903fbbed594dfe125591281908390a26007830154819010610e415760078301546005840154610e3f9162010000909104600160a060020a0316906103d8565b6105d3600435602435600060006000611064856101ce565b6105d36004356024356044356004830154600090819030908410156110e4577f4e4f545f454e4f5547485f47415300000000000000000000000000000000000091506112dd565b6105d35b60006000309050600a81600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040515160091901935050505b5090565b6105d36004356024355b60008282111561099e578183606402049050610998565b6105d36004356024355b600030600160a060020a0316318211156103fa57600160a060020a0330163191505b6000821115610994577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc84846040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100075750839250610998915050565b6105d35b6000600f610f6d610359565b6105e560043560243560443560643560843560088501805461ff00191661010017905584543090600090819081908190819060a060020a900460e060020a02811480156104db575060018b8101546002918116156101000260001901160481145b156109b3578a5460028c0154600160a060020a039190911690895a60405191900391906000818181858888f193505050508b60080160006101000a81548160ff02191690830217905550610bfa565b6105d36004355b6000600060006000309250600a83600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000757505060405151600919019350505081851115610eb05782600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050604051519450610ea89050565b60408051918252519081900360200190f35b005b600291505b50919050565b6008830154610100900460ff161561060d57600591506105ec565b30905080600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040515143610109011015905061066457600091506105ec565b80600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040515143600a01101590506106d3576005830154620100009004600160a060020a0316600014156105e757600191506105ec565b80600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000757505060405151431015905061072357600391506105ec565b80600160a060020a031663a06db7dc6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050506040518051906020015060ff1681600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050604051519190910143101590506107bf57600491506105ec565b600791506105ec565b5081800160010183558181151161085157601f016020900481601f016020900483600052602060002091820191016108519190610826565b82601f106107c8575082600052602060002080549082601f016020900481019061090691905b808211156103a95760008155600101610826565b60ff19168360005260206000205581800160010183555b5050505060048251111561090c575060005b6001838101546002918116156101000260001901160481101561090c57818160040181518110156100075790602001015160f860020a900460f860020a02836001016000508281546001816001161561010002031660029004811015610007578154600116156108e25790600052602060002090602091828204019190065b601f036101000a81548160ff0219169060f860020a84040217905550600101610863565b5061026d565b505050565b604080517f5f5f6469672875696e74323536290000000000000000000000000000000000008152815190819003600e01812060e060020a9081900481028190049081028252600019850160048301529151600160a060020a03301692916102bc86029160248281019260009291908290030181838887f19450505050505b919050565b5060005b92915050565b818360020203836064020460c8039050610998565b8a5460a060020a900460e060020a0260001415610a23578a5460028c0154600160a060020a039190911690895a03908d6001016000506040518082805460018160011615610100020316600290048015610ae55780601f10610aba57610100808354040283529160200191610ae5565b60018b8101546002918116156101000260001901160460001415610b1a578a5460028c0154600160a060020a039190911690895a03908d60000160149054906101000a900460e060020a0260e060020a900491906040518360e060020a028152600401809050600060405180830381858988f19450505050508b60080160006101000a81548160ff02191690830217905550610bfa565b820191906000526020600020905b815481529060010190602001808311610ac857829003601f168201915b5050915050600060405180830381858888f193505050508b60080160006101000a81548160ff02191690830217905550610bfa565b8a5460028c0154600160a060020a039190911690895a03908d60000160149054906101000a900460e060020a0260e060020a900491908e6001016000506040518460e060020a0281526004018082805460018160011615610100020316600290048015610bc85780601f10610b9d57610100808354040283529160200191610bc8565b820191906000526020600020905b815481529060010190602001808311610bab57829003601f168201915b5050915050600060405180830381858988f19450505050508b60080160006101000a81548160ff021916908302179055505b85600160a060020a031663938b5f326040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750505060405180519060200150600160a060020a031660405180807f75706461746544656661756c745061796d656e742829000000000000000000008152602001506016019050604051809103902060e060020a8091040260e060020a90046040518160e060020a0281526004018090506000604051808303816000876161da5a03f15050505060038b0154610cc8903a6103b7565b60058c0154909550620100009004600160a060020a03908116908a161415610cf65760068b01549350610d38565b85600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050604051519450505b6064858502048b6007016000505401925060648587600160a060020a031663625cc4656040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000757505050604051805190602001500204915060008b60070160005081905550865a8b03013a029050610db7898285016103d8565b9250610dd773d3cda913deb6f67967b99d67acdfa1712c293601836103d8565b6040805160088e01548482526020820187905281830184905260ff1660608201529051919350600160a060020a038b16917f4538b7ec91dae8fada01e66a052482086d3e690c3db5a80457fbcd55457b4ae19181900360800190a25050505050505050505050565b505b309050610e8c81600160a060020a031663ae45850b6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040515190316103d8565b505050600801805462ff0000191662010000179055565b600093505b505050919050565b600e19919091019081851115610f075782600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050604051519450610ea89050565b60ef19919091019081851115610ea357818503905060f08184600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050506040518051906020015002049350610ea8565b03905090565b6006860181905560058601805475ffffffffffffffffffffffffffffffffffffffff000019166201000087021790556007860184905560408051600160a060020a0387168152602081019290925280517fd8138f8a3f377c5259ca548e70e4c2de94f129f5a11036a15b69513cba2b426a9281900390910190a15b949350505050565b610f7343610531565b600192505b50509392505050565b60108185031015610fff576005860154620100009004600160a060020a03166000148061105057506005860154620100009004600160a060020a03908116908616145b9250611004565b600092505b505092915050565b91503090506000821480156110c4575080600160a060020a031663ae45850b6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000757505060405151600160a060020a039081169086161490505b156110d2576001925061105c565b6007821415611057576001925061105c565b6008860154610100900460ff161561111e577f414c52454144595f43414c4c454400000000000000000000000000000000000091506112dd565b80600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610007575050604051514310905080611206575080600160a060020a031663a06db7dc6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100075750506040805180517f0a16697a000000000000000000000000000000000000000000000000000000008252915160ff9092169291630a16697a9160048181019260209290919082900301816000876161da5a03f1156100075750506040515191909101431190505b15611233577f4e4f545f494e5f43414c4c5f57494e444f57000000000000000000000000000091506112dd565b61123e8686436100d3565b151561126c577f4e4f545f415554484f52495a454400000000000000000000000000000000000091506112dd565b6005860154600061ffff91909116118015611299575032600160a060020a031685600160a060020a031614155b80156112b4575060058601546112b29061ffff166101b4565b155b156112dd577f535441434b5f544f4f5f4445455000000000000000000000000000000000000091505b60008214610fff5760408051600160a060020a03871681526020810184905281517fdcb278834ca505ad219cf8e4b5d11f026080abef6ec68e249ea5e4d9bb3dc7b2929181900390910190a16000925061100456" + }, + "0x2a65aca4d5fc5b5c859090a6c34d164135398226": { + "balance": "0x98e1c608601c2496b2", + "nonce": "218916", + "code": "0x" + }, + "0x651913977e8140c323997fce5e03c19e0015eebf": { + "balance": "0x0", + "code": "0x", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000c": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000d": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000e": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0x6c8f2a135f6ed072de4503bd7c4999a1a17f824b": { + "balance": "0x0", + "nonce": "237", + "code": "0x6060604052361561027c5760e060020a600035046301991313811461027e57806303d22885146102ca5780630450991814610323578063049ae734146103705780630ce46c43146103c35780630e85023914610602578063112e39a8146106755780631b4fa6ab146106c25780631e74a2d3146106d057806326a7985a146106fd5780633017fe2414610753578063346cabbc1461075c578063373a1bc3146107d55780633a9e74331461081e5780633c2c21a01461086e5780633d9ce89b146108ba578063480b70bd1461092f578063481078431461097e57806348f0518714610a0e5780634c471cde14610a865780634db3da8314610b09578063523ccfa814610b4f578063586a69fa14610be05780635a9f2def14610c3657806364ee49fe14610caf57806367beaccb14610d055780636840246014610d74578063795b9a6f14610dca5780637b55c8b514610e415780637c73f84614610ee15780638c0e156d14610f145780638c1d01c814610f605780638e46afa914610f69578063938c430714610fc0578063971c803f146111555780639772c982146111ac57806398c9cdf41461122857806398e00e541461127f5780639f927be7146112d5578063a00aede914611383578063a1c0539d146113d3578063aff21c6514611449578063b152f19e14611474578063b549793d146114cb578063b5b33eda1461154b578063bbc6eb1f1461159b578063c0f68859146115ab578063c3a2c0c314611601578063c43d05751461164b578063d8e5c04814611694578063dbfef71014611228578063e29fb547146116e7578063e6470fbe1461173a578063ea27a8811461174c578063ee77fe86146117d1578063f158458c14611851575b005b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387876020604051908101604052806000815260200150612225610f6d565b61188260043560243560443560643560843560a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338b8a6020604051908101604052806000815260200150896125196106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a026020604051908101604052806000815260200150611e4a610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503389896020604051908101604052806000815260200150886124e86106c6565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750506040805160a08082019092529597963596608435969095506101449450925060a491506005908390839080828437509095505050505050604080518082018252600160a060020a03338116825288166020820152815160c0810190925260009173e54d323f9ef17c1f0dede47ecc86a9718fe5ea349163e3042c0f91600191908a908a9089908b90808b8b9090602002015181526020018b60016005811015610002579090602002015181526020018b60026005811015610002579090602002015181526020018b60036005811015610002579090602002015181526020018b6004600581101561000257909060200201518152602001348152602001506040518860e060020a02815260040180888152602001876002602002808383829060006004602084601f0104600f02600301f150905001868152602001806020018560ff1681526020018461ffff168152602001836006602002808383829060006004602084601f0104600f02600301f1509050018281038252868181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156105d25780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038160008760325a03f2156100025750506040515191506124cd9050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808787611e64610f6d565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611d28610f6d565b61189f5b6000611bf8611159565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881600060005054611a9561159f565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346326a7985a6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b760075b90565b604080516020606435600481810135601f8101849004840285018401909552848452611882948135946024803595604435956084949201919081908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160013389898861224b610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386866020604051908101604052806000815260200150611e64610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333896020604051908101604052806000815260200150886123bc6106c6565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387866020604051908101604052806000815260200150611f8d610f6d565b60408051602060248035600481810135601f810185900485028601850190965285855261188295813595919460449492939092019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808888612225610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503388886020604051908101604052806000815260200150612388610f6d565b611882600435604080517fc4144b2600000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163c4144b26916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133888888612238610f6d565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338b8b8b896126536106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333866020604051908101604052806000815260200150611e4a610f6d565b6118b76004355b604080517fed5bd7ea00000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163ed5bd7ea916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b61189f600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463586a69fa6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650509335935050606435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808989612388610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a896020604051908101604052806000815260200150886124d76106c6565b6040805160206004803580820135601f8101849004840285018401909552848452611882949193602493909291840191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808587611e4a610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a8a60206040519081016040528060008152602001508961262d6106c6565b604080516020606435600481810135601f810184900484028501840190955284845261188294813594602480359560443595608494920191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338888876120c7610f6d565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437505060408051608080820190925295979635969561010495509350608492508591508390839080828437509095505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989898961263a6106c6565b6118b7600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881858585611ba361122c565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050333388602060405190810160405280600081526020015061236e610f6d565b6118b760005481565b6118c95b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea34638e46afa96040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a43560c43560e43561010435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338e8e8d8f8e8e8e8e8e346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156111195780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519b9a5050505050505050505050565b61189f5b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463971c803f6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650509335935050608435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989896123a2610f6d565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398c9cdf46040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398e00e546040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600435604080517fe6ce3a6a000000000000000000000000000000000000000000000000000000008152600160048201527f3e3d0000000000000000000000000000000000000000000000000000000000006024820152604481018390529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163e6ce3a6a916064818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a0260206040519081016040528060008152602001506121ef610f6d565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338787876120b5610f6d565b6118b7600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88183611b4561159f565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463b152f19e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808b8b8961262d6106c6565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386600060e060020a026020604051908101604052806000815260200150612200610f6d565b6118b75b60005460649004610759565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463c0f688596040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611bff610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333876020604051908101604052806000815260200150612200610f6d565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387600060e060020a026020604051908101604052806000815260200150612213610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338a60206040519081016040528060008152602001508961250c6106c6565b61027c6000600060006118e033610b56565b6118b7600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881868686866040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b949350505050565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338a8a8a886124fa6106c6565b6118b7600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88184846000611b4f61122c565b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b60408051918252519081900360200190f35b6040805160ff929092168252519081900360200190f35b15611a905733925082600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fc6803622000000000000000000000000000000000000000000000000000000008252915191945063c680362291600482810192602092919082900301816000876161da5a03f11561000257505060405151905080156119d1575082600160a060020a031663d379be236040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a03166000141590505b80156119dd5750600082115b80156119ec5750600054600190115b15611a90578183600160a060020a031663830953ab6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040515160640291909104915050604281118015611a4d5750600054829011155b15611a675760008054612710612711909102049055611a90565b602181108015611a7a5750600054829010155b15611a90576000805461271061270f9091020490555b505050565b6000611a9f61122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b919050565b6000611af261122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b9392505050565b9050610759565b611c076106c6565b6000611c11611478565b611c1961122c565b600054611c2461159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611cf25780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b611d306106c6565b60008b611d3b61122c565b600054611d4661159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611e145780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b409050565b611e526106c6565b6000611e5c611478565b611d3b61122c565b611e6c6106c6565b6000611e76611478565b611e7e61122c565b600054611e8961159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611f575780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b9d9050565b611f956106c6565b8b611f9e611478565b611fa661122c565b600054611fb161159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561207f5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611bf19050565b6120bd6106c6565b6000611f9e611478565b6120cf6106c6565b8b6120d8611478565b6120e061122c565b6000546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156121b95780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506117c99050565b6121f76106c6565b8b611e76611478565b6122086106c6565b60008b611e7e61122c565b61221b6106c6565b8a8c611fa661122c565b61222d6106c6565b60008b611fa661122c565b6122406106c6565b60008b6120e061122c565b6122536106c6565b8c8b61225d61122c565b60005461226861159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156123365780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f21561000257505060405151979650505050505050565b6123766106c6565b60008c8c600060005054611fb161159f565b6123906106c6565b60008c8c6000600050546120eb61159f565b6123aa6106c6565b60008c8c60006000505461226861159f565b60008d8d6000600050546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561249c5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150505b9695505050505050565b8e8d8d6000600050546123ce61159f565b60008d8d60006000505461226861159f565b60008d8d6000600050546123ce61159f565b60008e8e8d61226861159f565b8f8e8e8d61252561159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156125f35780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519998505050505050505050565b60008e8e8d6123ce61159f565b8a5160208c015160408d015160608e015161226861159f565b60008e8e8d61252561159f56", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000011f8119429ed3a", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000000000000000000000000f5d861791e76fa01433e0d7421aee565290e4afe", + "0x031b9ec274101cc3ccff4d6d98ef4513742dadbaadba538bff48b88403253234": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x20ef51bb8ea9e8e8d5e2c17d28e47285698893c1017db4b4e40b792358a3dbc7": "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8abd": "0x000000000000000000000000c9a2bfd279fe57e7651e5d9f29bb1793c9a1cf01", + "0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8abf": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8ac2": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794dfb": "0x000000000000000000000000f5d861791e76fa01433e0d7421aee565290e4afe", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794dfc": "0x00000000000000000000000000000000000000000000000000000000000f6897", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794dfd": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794dfe": "0x0000000000000000000000002859ddf2877c46d54e67b6becdb1cafb8ef4a458", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794dff": "0x000000000000000000000000b7df3c43a8b13ecf45777c267404e15c7cdb04c9", + "0x37a551428681c06e6f97b79bb6c8c325935dc1a51b31a982594f40f2dd794e00": "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x3b20a4b931bc4ae9450774ee52b8f5da1b248d23e61cd20c09b25662f73894fd": "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x3b99aee1e3090227401ac2055c861246ca6ec62f426b4b4d74df88510f841b89": "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef711": "0x000000000000000000000000a4d91b341f0e9a7000be916a668408b463f4c38c", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef712": "0x0000000000000000000000000000000000000000000000000000000000102ce9", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef713": "0x000000000000000000000000fd97a0d81cc92eecd52452831930b27889925ef0", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef714": "0x00000000000000000000000016917c151bb1399852a0741eb7b317b443e2cfa3", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef715": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef716": "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a3fe": "0x000000000000000000000000c5ef24ec3bf0e3522cfc8e53f3e076b043547ce1", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a3ff": "0x00000000000000000000000000000000000000000000000000000000000fff67", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a400": "0x000000000000000000000000b7df3c43a8b13ecf45777c267404e15c7cdb04c9", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a401": "0x00000000000000000000000010fc2e8ba5f40336c3576ffaa25177f1cdedf836", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a402": "0x000000000000000000000000fd97a0d81cc92eecd52452831930b27889925ef0", + "0x5d866e5ddc53cb4c50f232302c51f03204d70c867baf663c9211cc229676a403": "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5ba": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bb": "0x000000000000000000000000000000000000000000000000000000000010347b", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bc": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bd": "0x000000000000000000000000c9a2bfd279fe57e7651e5d9f29bb1793c9a1cf01", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5be": "0x000000000000000000000000741467b251fca923d6229c4b439078b55dca233b", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bf": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2751": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2752": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2753": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2754": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2755": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2756": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826a7": "0x000000000000000000000000b7df3c43a8b13ecf45777c267404e15c7cdb04c9", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826a8": "0x00000000000000000000000000000000000000000000000000000000000fe13d", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826a9": "0x000000000000000000000000f5d861791e76fa01433e0d7421aee565290e4afe", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826aa": "0x00000000000000000000000063110531142fb314118164ff579ba52746504408", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826ab": "0x000000000000000000000000c5ef24ec3bf0e3522cfc8e53f3e076b043547ce1", + "0xa9e249fecbfa0518be95c32972ad551c71206081844335006bb2a349490826ac": "0x0000000000000000000000000000000000000000000000000000000000000007", + "0xac33ff75c19e70fe83507db0d683fd3465c996598dc972688b7ace676c890780": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xccd2cbc946692be8ade97db99353304e3af0fa6202f93649d4e185ad8b1f385c": "0x0000000000000000000000000000000000000000000000000000000000000004", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4ef": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f0": "0x00000000000000000000000000000000000000000000000000000000001030b3", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f1": "0x000000000000000000000000a4d91b341f0e9a7000be916a668408b463f4c38c", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f2": "0x000000000000000000000000dd87a67740c2acf48a31829783a095a81c3628d9", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f3": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f4": "0x0000000000000000000000000000000000000000000000000000000000000003", + "0xdabde47554d6a6cfcff3c968abb145f298585fafa9e24c10fc526269794bd626": "0x0000000000000000000000000000000000000000000000000000000000000003", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64db7": "0x000000000000000000000000741467b251fca923d6229c4b439078b55dca233b", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64db8": "0x000000000000000000000000000000000000000000000000000000000010365c", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64db9": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dba": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dbb": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dbc": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bdec": "0x000000000000000000000000fd97a0d81cc92eecd52452831930b27889925ef0", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bded": "0x0000000000000000000000000000000000000000000000000000000000101dc2", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bdee": "0x000000000000000000000000c5ef24ec3bf0e3522cfc8e53f3e076b043547ce1", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bdef": "0x000000000000000000000000173243e117a6382211b1ac91eeb262f4a7021c16", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bdf0": "0x000000000000000000000000a4d91b341f0e9a7000be916a668408b463f4c38c", + "0xfbba286dd5525a6ed3322411df4f261c98e43b123fef71777adc2b44d705bdf1": "0x0000000000000000000000000000000000000000000000000000000000000005" + } + }, + "0x741467b251fca923d6229c4b439078b55dca233b": { + "balance": "0x29c613529e8218f8", + "code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000007dd677b54fc954824a7bc49bd26cbdfa12c75adf", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000011f79bd42b0c7c", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000002dfeff8fca5d", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x00000000000000003defb9627dd677b54fc954824a7bc49bd26cbdfa12c75adf", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000ba43b7400", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x00000000000000000000000000000000000000000000000000000000001e8480", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x000000000000000000000000000000000000000000000000000000000000000a", + "0x000000000000000000000000000000000000000000000000000000000000000a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000c": "0x0000000000000000000000006c8f2a135f6ed072de4503bd7c4999a1a17f824b", + "0x000000000000000000000000000000000000000000000000000000000000000d": "0x000000000000000000000000000000000000000000000000000000000010365c", + "0x000000000000000000000000000000000000000000000000000000000000000e": "0x00000000000000000000000000000000000000000000000000000000000000ff" + } + }, + "0x7c1eb207c07e7ab13cf245585bd03d0fa478d034": { + "balance": "0x0", + "code": "0x650200d2f18c7350606060405236156100a05760e060020a60003504630e9f1a3c81146100a55780632b4096b4146100c95780636ec13982146100eb578063a3119e571461010d578063a749f19b1461012f578063ab7366f714610151578063bacd69581461017f578063bfdf87c0146101c2578063c4144b26146101e1578063caa46c9c1461023c578063e6ce3a6a14610297578063ed5bd7ea146102b6575b610007565b6102d960043560243560008181526001830160205260409020600401545b92915050565b6102d960043560243560008181526001830160205260409020600301546100c3565b6102d960043560243560008181526001830160205260409020600201546100c3565b6102d960043560243560008181526001838101602052604090912001546100c3565b6102d960043560243560008181526001830160205260409020600501546100c3565b6102eb6004356024355b600081815260018301602052604081208054829182918291908614610790576101b9565b6102eb600435602435604435600082815260018401602052604081205481908190819086141561068a576040812060010154851415610680575b50505050505050565b6102d960043560243560008181526001830160205260409020546100c3565b6102d96004356024355b6040805160c08101825260008082526020828101829052828401829052606083018290526080830182905260a08301829052848252600186019052918220805490919083908114156102fb576102f2565b6102d96004356024355b6040805160c08101825260008082526020828101829052828401829052606083018290526080830182905260a08301829052848252600186019052918220805490919083908114156104c0576102f2565b6102d960043560243560443582546000908181811415610a6557610a8c565b6102d96004356024356000818152600183016020526040812060050154116100c3565b60408051918252519081900360200190f35b005b815193505b50505092915050565b60048301546000146103d257600483810154600090815260018881016020908152604092839020835160c081018552815481529281015491830191909152600281015492820192909252600382015460608201529181015460808301526005015460a082015291505b60608201516000146102ed57606091820151600090815260018781016020908152604092839020835160c081018552815481529281015491830191909152600281015492820192909252600382015493810193909352600481015460808401526005015460a0830152610364565b600283015460001461045b5750506002810154600081815260018681016020908152604092839020835160c081018552865481529286015491830191909152918101929092526003830154606083015260048301546080830152600583015460a08301525b81516003820154141561044d57805493506102f2565b600281015460001415610464575b600093506102f2565b6040805160c08101825282548152600183810154602083810191909152600285015483850181905260038601546060850152600486015460808501526005959095015460a0840152600094855290890190529120909150610437565b600383015460001461059757600383810154600090815260018881016020908152604092839020835160c081018552815481529281015491830191909152600281015492820192909252918101546060830152600481015460808301526005015460a082015291505b60808201516000146102ed57608091820151600090815260018781016020908152604092839020835160c081018552815481529281015491830191909152600281015492820192909252600382015460608201526004820154938101939093526005015460a0830152610529565b600283015460001461045b5750506002810154600081815260018681016020908152604092839020835160c081018552865481529286015491830191909152918101929092526003830154606083015260048301546080830152600583015460a08301525b81516004820154141561061257805493506102f2565b6002810154600014156106245761045b565b6040805160c08101825282548152600183810154602083810191909152600285015483850181905260038601546060850152600486015460808501526005959095015460a08401526000948552908901905291209091506105fc565b61068a878761015b565b86546000925082141561069b578587555b508554600090815260018701602052604090205b8054600014156107255785815560028101829055600181018590556101b987875b60008181526001830160205260408120905b8154610d8e9085905b60008181526001830160205260408082206004810154835281832060059081015460038301548552929093209092015403905b5092915050565b60018101548154925085126107625760048101546000141561074957600481018690555b60040154600090815260018701602052604090206106af565b60038101546000141561077757600381018690555b60030154600090815260018701602052604090206106af565b600381015460001415806107a957506004810154600014155b156107cf576003810154600014610826578054600188019060009061083b908a90610246565b6002810154600014610a285760028101546000908152600188016020526040902060038101548254919550141561080857600060038501555b60048401548154141561081d57600060048501555b83549150610a2d565b80546001880190600090610852908a906101eb565b815260208101919091526040016000209450610865565b8152602081019190915260400160002094505b600285015460009081526001880160205260409020600381015486549195509092508214156108b9576004850154600385018190556000146108b95760048501546000908152604090208454600282015592505b60048401548554141561091357600385015460048501819055600014610913578660010160005060008660030160005054815260200190815260200160002060005092508250836000016000505483600201600050819055505b60028082015490860181905560001461098457866001016000506000826002016000505481526020019081526020016000206000509350835080600001600050548460030160005054141561096a57845460038501555b60048401548154141561097f57845460048501555b610989565b845487555b6003818101549086018190556000146109d6578660010160005060008260030160005054815260200190815260200160002060005092508250846000016000505483600201600050819055505b600481810154908601819055600014610a23578660010160005060008260040160005054815260200190815260200160002060005092508250846000016000505483600201600050819055505b610a2d565b600087555b6000808255600182018190556002820181905560038201819055600482018190556005820181905582146101b9576101b987836106d0565b50600081815260018601602052604090205b6001810154610a95908686610ad4565b805492505b50509392505050565b15610b915760fa60020a600f02851480610ab6575060f060020a613c3d0285145b15610af157600481015460001415610b3a5780549250610a8c565b86865b600060f960020a601f02831415610ce357508083135b9392505050565b60f960020a601f02851480610b0d575060f060020a613e3d0285145b80610b1f575060f060020a613d3d0285145b15610b9157600381015460001415610bc85780549250610a8c565b610b73610ad1878360040160005054600081815260018301602052604081205b600381015460001415610d61576001810154915061071e565b15610a87576004015460009081526001860160205260409020610a77565b60fa60020a600f02851480610bad575060f060020a613c3d0285145b15610c1f57600381015460001415610c565760009250610a8c565b610c01610ad1878360030160005054600081815260018301602052604081205b600481015460001415610d48576001810154915061071e565b15610a87576003015460009081526001860160205260409020610a77565b60f960020a601f02851480610c3b575060f060020a613e3d0285145b15610c6f57600481015460001415610ca25760009250610a8c565b6003015460009081526001860160205260409020610a77565b60f060020a613d3d02851415610cde57600181015484901215610cbb57600481015460001415610ca25760009250610a8c565b6004015460009081526001860160205260409020610a77565b600181015484901315610cde57600381015460001415610c565760009250610a8c565b610a77565b60fa60020a600f02831415610cfb5750808312610aea565b60f060020a613e3d02831415610d15575080831215610aea565b60f060020a613c3d02831415610d2f575080831315610aea565b60f060020a613d3d028314156100a05750828114610aea565b6004015460009081526001840160205260409020610be8565b6003015460009081526001840160205260409020610b5a565b600282015460001415610fbd575b50505050565b90508060021415610e2657610daa8483600301600050546106eb565b6000191415610dc457610dc4848360030160005054610dfe565b8154610e269085905b60008181526001830160205260408120600381015490919081908190811415610ffb57610007565b8154610e5a9085905b60008181526001830160205260408120600481015490919081908190811415610e7f57610007565b806001191415610e5a57610e418483600401600050546106eb565b60011415610df557610df5848360040160005054610dcd565b8060001913158015610e6d575060018113155b15610d7a578154610d7a908590610f7a565b6004840180546000908152600188016020526040812060028088015490820181905592829055945014610f0f57856001016000506000856002016000505481526020019081526020016000206000509150836000016000505482600301600050541415610efa57826000016000505482600301600050819055505b835460048301541415610f0f57825460048301555b6003830154600014610f40575060038201546000908152600186016020526040902080546004850155835460028201555b82546002808601919091558454600385015583015460001415610f7157826000016000505486600001600050819055505b8354610fe69087905b6000818152600183016020526040808220600381015483528183206005908101546004830154855292842001549092610fd99291908183106110fa5750816100c3565b60029091015460009081526001840160205260409020906106e2565b6001016005820155505050565b8254610ff3908790610f7a565b505050505050565b600384018054600090815260018801602052604081206002808801549082018190559282905594501461108b5785600101600050600085600201600050548152602001908152602001600020600050915083600001600050548260030160005054141561107657826000016000505482600301600050819055505b83546004830154141561108b57825460048301555b60048301546000146110bd57506004820154600081815260018701602052604090206003850191909155835460028201555b82546002808601919091558454600485015583015460001415610f7157826000016000505486600001600050819055508354610fe6908790610f7a565b50806100c356" + }, + "0x7dd677b54fc954824a7bc49bd26cbdfa12c75adf": { + "balance": "0xd7a58f5b73b4b6c4", + "code": "0x606060405236156100985760e060020a60003504633896002781146100e15780633defb962146100ea5780633f4be8891461010c5780634136aa351461011f5780634a420138146101a057806369c1a7121461028c5780638129fc1c146102955780638da5cb5b146102a6578063ae45850b146102b8578063af3309d8146102cc578063ea8a1af0146102d5578063ead50da3146102f4575b610308671bc16d674ec8000030600160a060020a03163110156100df57600554604051600160a060020a03918216916000913091909116319082818181858883f150505050505b565b61030a60005481565b610308671bc16d674ec8000030600160a060020a031631101561040f576100df565b61031c600454600160a060020a03165b90565b61030a5b600080548190118015610199575060408051600480547f0a16697a0000000000000000000000000000000000000000000000000000000083529251600160a060020a039390931692630a16697a928083019260209291829003018187876161da5a03f1156100025750506040515160ff01431090505b905061011c565b6103085b600354600554604080517f8c0e156d0000000000000000000000000000000000000000000000000000000081527f3defb96200000000000000000000000000000000000000000000000000000000600482015260a060020a90920461ffff1643016024830152621e8480604483015251600092600160a060020a031691638c0e156d916729a2241af62c000091606481810192602092909190829003018185886185025a03f1156100025750506040515192600160a060020a0384161491506102899050576004805473ffffffffffffffffffffffffffffffffffffffff1916821790555b50565b61030a60015481565b61030860008054146103f2576100df565b61031c600554600160a060020a031681565b61031c600354600160a060020a031661011c565b61030a60025481565b610308600554600160a060020a03908116339091161461035157610002565b61033960055460a060020a900461ffff1681565b005b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b6004546000600160a060020a03919091163111156103c75760408051600480547fea8a1af00000000000000000000000000000000000000000000000000000000083529251600160a060020a03939093169263ea8a1af0928083019260009291829003018183876161da5a03f115610002575050505b600554604051600160a060020a03918216916000913091909116319082818181858883f15050505050565b426000556100df6101a4565b600280546001908101909155429055565b600454600160a060020a03908116339091161461042b576100df565b610433610123565b151561043e576100df565b6103fe6101a456", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000000000000000000000000000000000000056be5b99", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000056d0009b", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000008b", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000006c8f2a135f6ed072de4503bd7c4999a1a17f824b", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x000000000000000000000000741467b251fca923d6229c4b439078b55dca233b", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x0000000000000000000001e0d3cda913deb6f67967b99d67acdfa1712c293601" + } + }, + "0x89efe605e9ecbe22849cd85d5449cc946c26f8f3": { + "balance": "0x0", + "code": "0x650200d2f18c73506060604052361561007f5760e060020a600035046312c82bcc81146100845780635548c837146100a55780635c54305e146101015780636b103966146101555780637fcf532c14610189578063b1df3d80146101d5578063b5bc6dbb146101ee578063c6ab451414610225578063e62af6c114610293575b610007565b6102c56004356024356000620186a05a10156103855761030083835a610232565b6102d760043560243560443581600160a060020a031683600160a060020a03167f47a08955ce2b7f21ea62ff0024e1ea0ad87430953554a87e6bc65d777f18e639836040518082815260200191505060405180910390a3505050565b6102d760043560243560443560408051838152602081018390528151600160a060020a038616927f9b24879829bed3003de08d5c5d7e18dcbb8dc76faebd95cafc5d4dec8c61a3a5928290030190a2505050565b6102d76004356024356044355b600160a060020a03821660009081526020849052604090205480820110156102d957610007565b6102d7600435602435604080518281529051600160a060020a038416917fd0c5cf41ee8ebf084ad0bce53de7cbc6e4693d9b53a4019ca36a2f91cdc20b3a919081900360200190a25050565b6102c560043560243560443560006102fc848484610162565b6102c5600435602435604435600160a060020a03821660009081526020849052604081205482901061032b576103338484846102a0565b6102c56004356024356044355b60006000831180156102605750604051600160a060020a038516908290859082818181858883f19350505050155b156102fc57604051600160a060020a03851690839085906000818181858888f1935050505015156102fc57506000610300565b6102d76004356024356044355b600160a060020a03821660009081526020849052604090205481111561030757610007565b60408051918252519081900360200190f35b005b600160a060020a0382166000908152602084905260409020805482019055505050565b5060015b9392505050565b600160a060020a038216600090815260208490526040902080548290039055505050565b506000610300565b604051600160a060020a03841690600090849082818181858883f1935050505015156102fc57604051600160a060020a038416908390600081818185876185025a03f19250505015156102fc57610007565b6103008383620186a061023256" + }, + "0xb834e3edfc1a927bdcecb67a9d0eccbd752a5bb3": { + "balance": "0xffe9b09a5c474dca", + "nonce": "975", + "code": "0x" + }, + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x4f5807198e238f13e", + "nonce": "283", + "code": "0x" + }, + "0xe54d323f9ef17c1f0dede47ecc86a9718fe5ea34": { + "balance": "0x0", + "code": "0x650200d2f18c7350606060405236156100ab5760e060020a600035046326a7985a81146100b057806350d4e411146100be57806354fd4d501461023d578063586a69fa1461025d5780638e46afa91461026857806396cff3df14610272578063971c803f1461029657806398c9cdf4146102a157806398e00e54146102ae578063b152f19e146102b8578063c0f68859146102c4578063e3042c0f146102cf578063ea27a88114610461575b610007565b6102845b60006104cb6102a5565b604080516020601f60843560048181013592830184900484028501840190955281845261047f948035946024803595604435956064359560a494930191819084018382808284375094965050933593505060c43591505060e435610104356101243561014435610164356101843560006101806040519081016040528060008152602001600081526020016000815260200160206040519081016040528060008152602001508152602001600081526020016000815260200160008152602001600081526020016000815260200160008152602001600081526020016000815260200150610180604051908101604052808f81526020018e81526020018d81526020018c81526020018981526020018b81526020018a81526020018881526020018781526020018681526020018581526020018481526020015090506104d48f825b600060006000600a43018460e0015110156105de577f544f4f5f534f4f4e0000000000000000000000000000000000000000000000009150610524565b604080516000808252600760208301528183015290519081900360600190f35b61049c5b6103e85b90565b6104b460ff610265565b62030d403a0260026024356004350102015b60408051918252519081900360200190f35b61049c5b600a610265565b6102845b62030d40610265565b6102846010610265565b61028443600a01610265565b6102845b6020610265565b60408051808201825261047f916004803592909160649190602490600290839083908082843780516020601f608435808c01359182018390048302840183019094528083529499983598975060a49650909450910191908190840183828082843750506040805160c0818101909252959796359660c435969095506101a49450925060e491506006908390839080828437509095505050505050604080516101808181018352600080835260208381018290528385018290528451908101855281815260608401526080830181905260a0830181905260c0830181905260e0830181905261010083018190526101208301819052610140830181905261016083018190528351918201909352808984505181526020018960015060209081015182528101899052604081018890526060018484505181526020810187905260408101869052606001846001506020908101518252018460025060400151815260200184600350606001518152602001846004506080015181526020018460055060a00151905290506104e78982610200565b6102846004356024356044356064355b3a0291909201600202010190565b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b6040805160ff929092168252519081900360200190f35b45039050610265565b9f9e505050505050505050505050505050565b9998505050505050505050565b8461016001511015610524577f494e53554646494349454e545f46554e4453000000000000000000000000000091505b600082146106ed576040805185518482529151600160a060020a0392909216917f513485fc54ef019ef1bc1ea683ef7d5d522f2865224ae10871ff992749c0ba4f9181900360200190a27389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc85600001518661016001516040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f215610007575050505b505092915050565b8360c0015161ffff166105ef61029a565b61ffff1611806106115750610602610261565b61ffff168460c0015161ffff16115b1561063e577f535441434b5f434845434b5f4f55545f4f465f52414e474500000000000000009150610524565b6106466102c8565b8460a0015160ff16101561067c577f47524143455f544f4f5f53484f525400000000000000000000000000000000009150610524565b6106846102a5565b84610100015110806106a157506106996100b4565b846101000151115b156106ce577f52455155495245445f4741535f4f55545f4f465f52414e4745000000000000009150610524565b6104f48461012001518561014001518660800151876101000151610471565b83610160015184600001518560e001518660a001518760200151886040015189606001518a608001518b61010001518c60c001518d61012001518e6101400151604051611078806108fa833901808c600160a060020a031681526020018b81526020018a60ff16815260200189600160a060020a03168152602001888152602001806020018781526020018681526020018561ffff1681526020018481526020018381526020018281038252888181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156107ec5780820380516001836020036101000a031916815260200191505b509c505050505050505050505050506040518091039082f090509050737c1eb207c07e7ab13cf245585bd03d0fa478d03463bacd69588683600160a060020a031660010284600160a060020a0316630a16697a6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000757505050604051805190602001506040518460e060020a02815260040180848152602001838152602001828152602001935050505060006040518083038160008760325a03f21561000757505060408051600160a060020a038416815290517f2b05d346f0b0b9fd470024751c52d3b5dac5c37796f077c1a66241f2eada44b792509081900360200190a18092506105d656606060405260405161107838038061107883398101604052805160805160a05160c05160e05161010051610120516101405161016051610180516101a051999a98999798969795969490940194929391929091908a84848a8a8a8a88886101008051600c8054600160a060020a031990811633179091556000805482168d1781556001868155600286815560078e90556008805461ffff19168e1790553a600655600380547c01000000000000000000000000000000000000000000000000000000008d04740100000000000000000000000000000000000000000260a060020a63ffffffff0219919096168e17169490941790935588516004805493819052956020601f9385161590910260001901909316939093048101919091047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b908101939091608091909101908390106101ee57805160ff19168380011785555b5061017a9291505b8082111561021e5760008155600101610166565b5050826003600050600201600050819055505050505050505050508a600060006101000a815481600160a060020a030219169083021790555089600d6000508190555088600e60006101000a81548160ff021916908302179055505050505050505050505050610e56806102226000396000f35b8280016001018555821561015e579182015b8281111561015e578251826000505591602001919060010190610200565b509056606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "ethash": {} + } + }, + "context": { + "number": "1062503", + "difficulty": "13700504019867", + "timestamp": "1456480446", + "gasLimit": "3141592", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226" + }, + "input": "0xf86b8203cf850ba43b740083200b2094741467b251fca923d6229c4b439078b55dca233b8084614619541ca078293714f69a810356f1ee29dc686ec2ca3a0e5448e1ef6322c77369ebdd26c2a01c3836fa363548959554ee5360361be9db4aea9eb7c31f61550f0e9a10138adf", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x6c8f2a135f6ed072de4503bd7c4999a1a17f824b": { + "balance": "0x0", + "nonce": 237, + "code": "0x6060604052361561027c5760e060020a600035046301991313811461027e57806303d22885146102ca5780630450991814610323578063049ae734146103705780630ce46c43146103c35780630e85023914610602578063112e39a8146106755780631b4fa6ab146106c25780631e74a2d3146106d057806326a7985a146106fd5780633017fe2414610753578063346cabbc1461075c578063373a1bc3146107d55780633a9e74331461081e5780633c2c21a01461086e5780633d9ce89b146108ba578063480b70bd1461092f578063481078431461097e57806348f0518714610a0e5780634c471cde14610a865780634db3da8314610b09578063523ccfa814610b4f578063586a69fa14610be05780635a9f2def14610c3657806364ee49fe14610caf57806367beaccb14610d055780636840246014610d74578063795b9a6f14610dca5780637b55c8b514610e415780637c73f84614610ee15780638c0e156d14610f145780638c1d01c814610f605780638e46afa914610f69578063938c430714610fc0578063971c803f146111555780639772c982146111ac57806398c9cdf41461122857806398e00e541461127f5780639f927be7146112d5578063a00aede914611383578063a1c0539d146113d3578063aff21c6514611449578063b152f19e14611474578063b549793d146114cb578063b5b33eda1461154b578063bbc6eb1f1461159b578063c0f68859146115ab578063c3a2c0c314611601578063c43d05751461164b578063d8e5c04814611694578063dbfef71014611228578063e29fb547146116e7578063e6470fbe1461173a578063ea27a8811461174c578063ee77fe86146117d1578063f158458c14611851575b005b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387876020604051908101604052806000815260200150612225610f6d565b61188260043560243560443560643560843560a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338b8a6020604051908101604052806000815260200150896125196106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a026020604051908101604052806000815260200150611e4a610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503389896020604051908101604052806000815260200150886124e86106c6565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750506040805160a08082019092529597963596608435969095506101449450925060a491506005908390839080828437509095505050505050604080518082018252600160a060020a03338116825288166020820152815160c0810190925260009173e54d323f9ef17c1f0dede47ecc86a9718fe5ea349163e3042c0f91600191908a908a9089908b90808b8b9090602002015181526020018b60016005811015610002579090602002015181526020018b60026005811015610002579090602002015181526020018b60036005811015610002579090602002015181526020018b6004600581101561000257909060200201518152602001348152602001506040518860e060020a02815260040180888152602001876002602002808383829060006004602084601f0104600f02600301f150905001868152602001806020018560ff1681526020018461ffff168152602001836006602002808383829060006004602084601f0104600f02600301f1509050018281038252868181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156105d25780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038160008760325a03f2156100025750506040515191506124cd9050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808787611e64610f6d565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611d28610f6d565b61189f5b6000611bf8611159565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881600060005054611a9561159f565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346326a7985a6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b760075b90565b604080516020606435600481810135601f8101849004840285018401909552848452611882948135946024803595604435956084949201919081908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160013389898861224b610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386866020604051908101604052806000815260200150611e64610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333896020604051908101604052806000815260200150886123bc6106c6565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387866020604051908101604052806000815260200150611f8d610f6d565b60408051602060248035600481810135601f810185900485028601850190965285855261188295813595919460449492939092019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808888612225610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503388886020604051908101604052806000815260200150612388610f6d565b611882600435604080517fc4144b2600000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163c4144b26916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133888888612238610f6d565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338b8b8b896126536106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333866020604051908101604052806000815260200150611e4a610f6d565b6118b76004355b604080517fed5bd7ea00000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163ed5bd7ea916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b61189f600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463586a69fa6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650509335935050606435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808989612388610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a896020604051908101604052806000815260200150886124d76106c6565b6040805160206004803580820135601f8101849004840285018401909552848452611882949193602493909291840191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808587611e4a610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a8a60206040519081016040528060008152602001508961262d6106c6565b604080516020606435600481810135601f810184900484028501840190955284845261188294813594602480359560443595608494920191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338888876120c7610f6d565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437505060408051608080820190925295979635969561010495509350608492508591508390839080828437509095505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989898961263a6106c6565b6118b7600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881858585611ba361122c565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050333388602060405190810160405280600081526020015061236e610f6d565b6118b760005481565b6118c95b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea34638e46afa96040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a43560c43560e43561010435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338e8e8d8f8e8e8e8e8e346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156111195780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519b9a5050505050505050505050565b61189f5b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463971c803f6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650509335935050608435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989896123a2610f6d565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398c9cdf46040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398e00e546040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600435604080517fe6ce3a6a000000000000000000000000000000000000000000000000000000008152600160048201527f3e3d0000000000000000000000000000000000000000000000000000000000006024820152604481018390529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163e6ce3a6a916064818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a0260206040519081016040528060008152602001506121ef610f6d565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338787876120b5610f6d565b6118b7600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88183611b4561159f565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463b152f19e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808b8b8961262d6106c6565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386600060e060020a026020604051908101604052806000815260200150612200610f6d565b6118b75b60005460649004610759565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463c0f688596040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611bff610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333876020604051908101604052806000815260200150612200610f6d565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387600060e060020a026020604051908101604052806000815260200150612213610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338a60206040519081016040528060008152602001508961250c6106c6565b61027c6000600060006118e033610b56565b6118b7600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881868686866040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b949350505050565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338a8a8a886124fa6106c6565b6118b7600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88184846000611b4f61122c565b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b60408051918252519081900360200190f35b6040805160ff929092168252519081900360200190f35b15611a905733925082600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fc6803622000000000000000000000000000000000000000000000000000000008252915191945063c680362291600482810192602092919082900301816000876161da5a03f11561000257505060405151905080156119d1575082600160a060020a031663d379be236040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a03166000141590505b80156119dd5750600082115b80156119ec5750600054600190115b15611a90578183600160a060020a031663830953ab6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040515160640291909104915050604281118015611a4d5750600054829011155b15611a675760008054612710612711909102049055611a90565b602181108015611a7a5750600054829010155b15611a90576000805461271061270f9091020490555b505050565b6000611a9f61122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b919050565b6000611af261122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b9392505050565b9050610759565b611c076106c6565b6000611c11611478565b611c1961122c565b600054611c2461159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611cf25780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b611d306106c6565b60008b611d3b61122c565b600054611d4661159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611e145780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b409050565b611e526106c6565b6000611e5c611478565b611d3b61122c565b611e6c6106c6565b6000611e76611478565b611e7e61122c565b600054611e8961159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611f575780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b9d9050565b611f956106c6565b8b611f9e611478565b611fa661122c565b600054611fb161159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561207f5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611bf19050565b6120bd6106c6565b6000611f9e611478565b6120cf6106c6565b8b6120d8611478565b6120e061122c565b6000546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156121b95780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506117c99050565b6121f76106c6565b8b611e76611478565b6122086106c6565b60008b611e7e61122c565b61221b6106c6565b8a8c611fa661122c565b61222d6106c6565b60008b611fa661122c565b6122406106c6565b60008b6120e061122c565b6122536106c6565b8c8b61225d61122c565b60005461226861159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156123365780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f21561000257505060405151979650505050505050565b6123766106c6565b60008c8c600060005054611fb161159f565b6123906106c6565b60008c8c6000600050546120eb61159f565b6123aa6106c6565b60008c8c60006000505461226861159f565b60008d8d6000600050546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561249c5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150505b9695505050505050565b8e8d8d6000600050546123ce61159f565b60008d8d60006000505461226861159f565b60008d8d6000600050546123ce61159f565b60008e8e8d61226861159f565b8f8e8e8d61252561159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156125f35780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519998505050505050505050565b60008e8e8d6123ce61159f565b8a5160208c015160408d015160608e015161226861159f565b60008e8e8d61252561159f56", + "storage": { + "0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8abf": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef715": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bc": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bd": "0x000000000000000000000000c9a2bfd279fe57e7651e5d9f29bb1793c9a1cf01", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bf": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f1": "0x000000000000000000000000a4d91b341f0e9a7000be916a668408b463f4c38c", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f3": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f4": "0x0000000000000000000000000000000000000000000000000000000000000003", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dbc": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + }, + "0x741467b251fca923d6229c4b439078b55dca233b": { + "balance": "0x29c613529e8218f8", + "code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256" + }, + "0x7dd677b54fc954824a7bc49bd26cbdfa12c75adf": { + "balance": "0xd7a58f5b73b4b6c4", + "code": "0x606060405236156100985760e060020a60003504633896002781146100e15780633defb962146100ea5780633f4be8891461010c5780634136aa351461011f5780634a420138146101a057806369c1a7121461028c5780638129fc1c146102955780638da5cb5b146102a6578063ae45850b146102b8578063af3309d8146102cc578063ea8a1af0146102d5578063ead50da3146102f4575b610308671bc16d674ec8000030600160a060020a03163110156100df57600554604051600160a060020a03918216916000913091909116319082818181858883f150505050505b565b61030a60005481565b610308671bc16d674ec8000030600160a060020a031631101561040f576100df565b61031c600454600160a060020a03165b90565b61030a5b600080548190118015610199575060408051600480547f0a16697a0000000000000000000000000000000000000000000000000000000083529251600160a060020a039390931692630a16697a928083019260209291829003018187876161da5a03f1156100025750506040515160ff01431090505b905061011c565b6103085b600354600554604080517f8c0e156d0000000000000000000000000000000000000000000000000000000081527f3defb96200000000000000000000000000000000000000000000000000000000600482015260a060020a90920461ffff1643016024830152621e8480604483015251600092600160a060020a031691638c0e156d916729a2241af62c000091606481810192602092909190829003018185886185025a03f1156100025750506040515192600160a060020a0384161491506102899050576004805473ffffffffffffffffffffffffffffffffffffffff1916821790555b50565b61030a60015481565b61030860008054146103f2576100df565b61031c600554600160a060020a031681565b61031c600354600160a060020a031661011c565b61030a60025481565b610308600554600160a060020a03908116339091161461035157610002565b61033960055460a060020a900461ffff1681565b005b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b6004546000600160a060020a03919091163111156103c75760408051600480547fea8a1af00000000000000000000000000000000000000000000000000000000083529251600160a060020a03939093169263ea8a1af0928083019260009291829003018183876161da5a03f115610002575050505b600554604051600160a060020a03918216916000913091909116319082818181858883f15050505050565b426000556100df6101a4565b600280546001908101909155429055565b600454600160a060020a03908116339091161461042b576100df565b610433610123565b151561043e576100df565b6103fe6101a456", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000056d0009b", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000008b", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x000000000000000000000000741467b251fca923d6229c4b439078b55dca233b" + } + }, + "0xb834e3edfc1a927bdcecb67a9d0eccbd752a5bb3": { + "balance": "0xffe9b09a5c474dca", + "nonce": 975 + }, + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x4f5807198e238f13e", + "nonce": 283 + } + }, + "post": { + "0x651913977e8140c323997fce5e03c19e0015eebf": { + "balance": "0x29a2241af62c0000", + "code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000007dd677b54fc954824a7bc49bd26cbdfa12c75adf", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000011f8119429ed3a", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000002e002d006b55", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x00000000000000003defb9627dd677b54fc954824a7bc49bd26cbdfa12c75adf", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000ba43b7400", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x00000000000000000000000000000000000000000000000000000000001e8480", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x000000000000000000000000000000000000000000000000000000000000000a", + "0x000000000000000000000000000000000000000000000000000000000000000c": "0x0000000000000000000000006c8f2a135f6ed072de4503bd7c4999a1a17f824b", + "0x000000000000000000000000000000000000000000000000000000000000000d": "0x0000000000000000000000000000000000000000000000000000000000103847", + "0x000000000000000000000000000000000000000000000000000000000000000e": "0x00000000000000000000000000000000000000000000000000000000000000ff" + } + }, + "0x6c8f2a135f6ed072de4503bd7c4999a1a17f824b": { + "nonce": 238, + "storage": { + "0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8abf": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef715": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bc": "0x000000000000000000000000a4d91b341f0e9a7000be916a668408b463f4c38c", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bd": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3", + "0x95e05d02b91af970cb4998107e8613455258880676e00b819c12d675e60de5bf": "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2751": "0x000000000000000000000000651913977e8140c323997fce5e03c19e0015eebf", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2752": "0x0000000000000000000000000000000000000000000000000000000000103847", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2753": "0x000000000000000000000000741467b251fca923d6229c4b439078b55dca233b", + "0x99d5294a34e2d6d560a223237786adc8b5651c09094b9ecd56e6ae7abc2a2756": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f1": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f3": "0x000000000000000000000000c9a2bfd279fe57e7651e5d9f29bb1793c9a1cf01", + "0xd3a5582b3eff6ef8ee90f3962e9d598a3f4b7d07840356c9b8fd7b494879b4f4": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dbb": "0x000000000000000000000000651913977e8140c323997fce5e03c19e0015eebf", + "0xf7518490c515b9fc8e7fe713b647fe88eacefc92d616fa9454e61fe9aab64dbc": "0x0000000000000000000000000000000000000000000000000000000000000002" + } + }, + "0x741467b251fca923d6229c4b439078b55dca233b": { + "balance": "0x0", + "storage": { + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000000000000000000101" + } + }, + "0x7dd677b54fc954824a7bc49bd26cbdfa12c75adf": { + "balance": "0xd6c5f42b8502a0e3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000056d020be", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000008c", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x000000000000000000000000651913977e8140c323997fce5e03c19e0015eebf" + } + }, + "0xb834e3edfc1a927bdcecb67a9d0eccbd752a5bb3": { + "balance": "0x10002e64ebd492a46", + "nonce": 976 + }, + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x4f5809f97e1c8bb9b" + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json new file mode 100644 index 00000000..3b89ccd8 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json @@ -0,0 +1,97 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "nonce": 22 + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "nonce": 1, + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "nonce": 29072 + } + }, + "post": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x6f05b59d3b20000" + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d869a3b70062eb9bd5", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b95e" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d7725724a9044b75", + "nonce": 29073 + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json new file mode 100644 index 00000000..6567ae42 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json @@ -0,0 +1,100 @@ +{ + "genesis": { + "difficulty": "5697691613344", + "extraData": "0xd783010202844765746887676f312e342e32856c696e7578", + "gasLimit": "3141592", + "hash": "0x2004021ae3545cf8abba1ec97a7e401157cee9e847131e2f4c75ce38610040cc", + "miner": "0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5", + "mixHash": "0x651f01d13fb801c602e1544ab80b3bc32888ea40ef298efa52ec3df983b558ee", + "nonce": "0xdf23f0da925518a6", + "number": "422908", + "stateRoot": "0xd914c6440edf9f4a6f997a9b3ecb6e1a9ca2310f74b0b6890c0d0d4a3c28e4d3", + "timestamp": "1445530335", + "totalDifficulty": "2148894717741690476", + "alloc": { + "0x2861bf89b6c640c79040d357c1e9513693ef5d3f": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a600035046312055e8f8114610084578063185061da146100b157806322beb9b9146100d5578063245a03ec146101865780633fa4f245146102a657806341c0e1b5146102af578063890eba68146102cb578063b29f0835146102de578063d6b4485914610308578063dd012a15146103b9575b005b6001805474ff0000000000000000000000000000000000000000191660a060020a60043502179055610082565b6100826001805475ff00000000000000000000000000000000000000000019169055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527fb29f0835000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b6100826004356024356001547fb0f07e440000000000000000000000000000000000000000000000000000000060609081526064839052600160a060020a039091169063b0f07e449060849060009060248183876161da5a03f150604080516001547f73657449742875696e74323536290000000000000000000000000000000000008252825191829003600e018220878352835192839003602001832060e060020a6352afbc33028452600160a060020a03308116600486015260e060020a9283900490920260248501526044840152438901606484015260a060020a820460ff1694830194909452600060a483018190529251931694506352afbc33935060c48181019391829003018183876161da5a03f115610002575050505050565b6103c460025481565b61008260005433600160a060020a039081169116146103ce575b565b6103c460015460a860020a900460ff1681565b6100826001805475ff000000000000000000000000000000000000000000191660a860020a179055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527f185061da000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b600435600255610082565b6060908152602090f35b6001547f6ff96d17000000000000000000000000000000000000000000000000000000006060908152600160a060020a0330811660645290911690632e1a7d4d908290636ff96d17906084906020906024816000876161da5a03f1156100025750506040805180517f2e1a7d4d0000000000000000000000000000000000000000000000000000000082526004820152905160248281019350600092829003018183876161da5a03f115610002575050600054600160a060020a03169050ff", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000d3cda913deb6f67967b99d67acdfa1712c293601", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000ff30c9e568f133adce1f1ea91e189613223fc461b9" + } + }, + "0x2a65aca4d5fc5b5c859090a6c34d164135398226": { + "balance": "0x326601cc6cf364f6b9", + "nonce": "12122", + "code": "0x" + }, + "0x30c9e568f133adce1f1ea91e189613223fc461b9": { + "balance": "0x8b83c417dd78000", + "nonce": "2", + "code": "0x606060405236156102ea5760e060020a6000350463022bc71f81146102f757806303d6d7b61461037f578063086ae9e4146103ec57806309c975df146104595780631145a20f146104c657806312d67c5f146104e75780631302188c146104f15780631ae460e5146104fc57806323306ed614610573578063234917d4146105ca57806329917954146106375780632a472ae81461071d5780632e1a7d4d1461078a578063306b031d1461087f57806333613cbe1461089d57806334c19b93146108c257806335b281531461092f5780633664a0ea146109b85780633c941423146109c35780633cbfed7414610a3b57806350a3bd3914610a4957806352afbc3314610a735780635539d40014610c2a5780635a5383ac14610c3e57806360b831e514610cb55780636164947214610d7f578063685c234a14610d8a5780636ffc089614610de0578063741b3c3914610e4d5780637542861514610ed25780637772a38014610f5557806377b19cd514610ff057806378bc64601461105d5780638b37e656146110ca5780638baced64146111375780638dd5e298146111b157806393423e9c146111de57806394d2b21b1461120257806394f3f81d1461121657806398e00e54146112665780639f927be7146112bc578063a502aae81461136a578063a6c01cfd146113e8578063a9743c68146113fa578063aa4cc01f14611467578063b010d94a146114d4578063b0171fa41461154e578063b0ac4c8c146115cc578063b0f07e4414611635578063b35594601461171c578063c0f6885914611739578063c3daab961461178f578063c630f92b146117bb578063c831391d146117e5578063cd062734146117f0578063d0e30db01461185d578063db681e5414611865578063e40986551461190c578063e850f3ae14611979578063ed2b8e0b146119e6578063f340fa01146119f1578063f828c3fa14611ae8578063f8b1185314611b07578063f9f447eb14611b24578063fc30052214611b91578063fcf3691814611bfe575b6112645b611c86336119f8565b611c88600435604080517fc4144b260000000000000000000000000000000000000000000000000000000081526010600482015260248101839052905160009173ce642b6a82e72147ceade0e72c786ba8eaeb31d79163c4144b26916044818101926020929091908290030181878760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b637d613b346000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63da40fd616000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c9a60043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63c68efc486000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b61126460043560243560443560643560843561200185858585856000610a89565b611c886004545b90565b611c886005546104ee565b611c886040805160e160020a6333f8a36702815260066004820152600160a060020a0333166024820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f916367f146ce916044818101926020929091908290030181878760325a03f2156100025750506040515191506104ee9050565b611c885b60007327b1b436e4699a012cc8698e33c8f3e1c035c28b6323306ed66040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506104ee9050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63e99a66856000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611264604080517f317c152d00000000000000000000000000000000000000000000000000000000815260066004820152600160a060020a0333166024820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163317c152d916044818101926020929091908290030181878760325a03f2156100025750506040805180517ff1173928000000000000000000000000000000000000000000000000000000008252600160a060020a0333166004830152602482018190529151919363f1173928926044838101938290030181838760325a03f2156100025750505050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63707378396000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611264600435604080517fb5bc6dbb00000000000000000000000000000000000000000000000000000000815260126004820152600160a060020a033316602482015260448101839052905173d3cb18959b0435864ff33010fa83be60afc04b229163b5bc6dbb916064828101926020929190829003018160008760325a03f21561000257505060405151159050611d255773d3cb18959b0435864ff33010fa83be60afc04b22637fcf532c33836040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060006040518083038160008760325a03f21561000257505050611ae5565b611c886004356000818152600e60205260409020600201545b919050565b611c886004355b600160a060020a0381166000908152600f6020526040902054610898565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63fc4730126000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611264600435604080517fa95d3e76000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a0384811660248401523316604483015291517327b1b436e4699a012cc8698e33c8f3e1c035c28b9263a95d3e7692606481810193918290030181838760325a03f2156100025750505050565b611c886002546104ee565b611c9a60043560243560007327b1b436e4699a012cc8698e33c8f3e1c035c28b6398213db6600060005085856040518460e060020a02815260040180848152602001838152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150610dda9050565b611c886000611e0a336108a4565b611264600073c895c144d0b0f88417cf9e14e03e6abc82c0af3f635748147e600633611ec2610577565b61126460043560243560443560643560843560a4355b604080517ff1924efb000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a03338116602484015289166044830152606482018890526084820187905260a4820186905260ff851660c483015260e48201849052915182917327b1b436e4699a012cc8698e33c8f3e1c035c28b9163f1924efb91610104818101926020929091908290030181878760325a03f2156100025750506040805180517f5a1230bf000000000000000000000000000000000000000000000000000000008252600160a060020a0333811660048401528c166024830152604482018b9052606482018a90526084820189905260ff881660a483015260c482018790529151919450635a1230bf9160e48083019260209291908290030181878760325a03f215610002575050604051519183149050612008577327b1b436e4699a012cc8698e33c8f3e1c035c28b6318b753ab82846040518360e060020a028152600401808381526020018281526020019250505060006040518083038160008760325a03f21561000257505050612056565b611c9a600154600160a060020a03166104ee565b611c886040805160e560020a6304b47bb902815260066004820152600160a060020a0333166024820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163968f7720916044818101926020929091908290030181878760325a03f2156100025750506040515191506104ee9050565b6112646004357327b1b436e4699a012cc8698e33c8f3e1c035c28b637e853f3d600060005083336040518460e060020a0281526004018084815260200183815260200182600160a060020a03168152602001935050505060206040518083038160008760325a03f21561000257505060405151159050611ae5577327b1b436e4699a012cc8698e33c8f3e1c035c28b63ab2af349826040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f2156100025750505050565b611c886008546104ee565b611c88600435602435604080516c01000000000000000000000000600160a060020a03858116820283528416026014820152815160289181900391909101902060009081526015602052205460ff165b92915050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63b506054f6000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611264604080517f068e3ef100000000000000000000000000000000000000000000000000000000815260066004820152600160a060020a0333166024820152346044820152905173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163068e3ef19160648281019260009291908290030181838760325a03f21561000257505050565b611cb76004356040805160208181018352600080835284815260138252838120600d0154815260148252835190849020805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015611fb85780601f10611f8d57610100808354040283529160200191611fb8565b611c886004356024355b604080517fa163a32500000000000000000000000000000000000000000000000000000000815260066004820152600160a060020a038416602482015260448101839052905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163a163a325916064818101926020929091908290030181878760325a03f215610002575050604051519150610dda9050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63775f20f96000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b637517a7c96000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c9a60043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63250687836000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c886004356040805160e160020a6333f8a36702815260066004820152600160a060020a0383166024820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f916367f146ce916044818101926020929091908290030181878760325a03f2156100025750506040515191506108989050565b611c88600435600073c895c144d0b0f88417cf9e14e03e6abc82c0af3f6354e37911600684611e6d610577565b611c88600435600160a060020a038116600090815260126020526040902054610898565b611c9a600054600160a060020a03166104ee565b604080516c01000000000000000000000000600435600160a060020a0390811682028352331602601482015281516028918190039190910190206000908152601560205220805460ff191690555b005b611c8860007327b1b436e4699a012cc8698e33c8f3e1c035c28b6398e00e546040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506104ee9050565b611c88600435604080517fe6ce3a6a000000000000000000000000000000000000000000000000000000008152601060048201527f3e3d000000000000000000000000000000000000000000000000000000000000602482015260448101839052905160009173ce642b6a82e72147ceade0e72c786ba8eaeb31d79163e6ce3a6a916064818101926020929091908290030181878760325a03f2156100025750506040515191506108989050565b611c88604080517f8f00e61a00000000000000000000000000000000000000000000000000000000815260066004820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f91638f00e61a916024818101926020929091908290030181878760325a03f2156100025750506040515191506104ee9050565b611c886004356000611e113383610f5f565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63dd382dd36000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63aebd65476000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c886004356040805160e560020a6304b47bb902815260066004820152600160a060020a0383166024820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163968f7720916044818101926020929091908290030181878760325a03f2156100025750506040515191506108989050565b611c88604080517fc75e8f8800000000000000000000000000000000000000000000000000000000815260066004820152905160009173c895c144d0b0f88417cf9e14e03e6abc82c0af3f9163c75e8f88916024818101926020929091908290030181878760325a03f2156100025750506040515191506104ee9050565b611cb760408051602081810183526000825282516003805460026000196001831615610100020190911604601f81018490048402830184019095528482529293909291830182828015611fef5780601f10611fc457610100808354040283529160200191611fef565b611264604080517fa89713750000000000000000000000000000000000000000000000000000000081526000600482018181526024830193845236604484018190527327b1b436e4699a012cc8698e33c8f3e1c035c28b9463a89713759484939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050604080516005547f321f45840000000000000000000000000000000000000000000000000000000082526004820152905163321f4584916024818101926000929091908290030181838760325a03f21561000257505050565b611c886004356000818152600e6020526040902060030154610898565b611c8860007327b1b436e4699a012cc8698e33c8f3e1c035c28b63c0f688596040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506104ee9050565b61126460043573c895c144d0b0f88417cf9e14e03e6abc82c0af3f63dd8abb6c60063384611db7610577565b611c88600073c895c144d0b0f88417cf9e14e03e6abc82c0af3f6354e37911600633611e18610577565b611c886007546104ee565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63125935846000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b6112646102ee565b611c886004356000818152601360209081526040805181842060038101546004828101547f38f4c9eb0000000000000000000000000000000000000000000000000000000085526006918501919091526024840182905260ff160160448301529151919273c895c144d0b0f88417cf9e14e03e6abc82c0af3f926338f4c9eb9260648181019392918290030181888760325a03f21561000257505060405151949350505050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63fae644646000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63b3a5e2556000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c886006546104ee565b6112646004355b604080517fb1df3d8000000000000000000000000000000000000000000000000000000000815260126004820152600160a060020a0383166024820152346044820152905173d3cb18959b0435864ff33010fa83be60afc04b229163b1df3d80916064828101926020929190829003018160008760325a03f215610002575050604080517f5548c837000000000000000000000000000000000000000000000000000000008152600160a060020a033381166004830152841660248201523460448201529051635548c837916064818101926000929091908290030181838760325a03f215610002575050505b50565b611264600435602435604435606435611ffb8484848460ff6000610a89565b611c886004356000818152600e6020526040902060010154610898565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b63c9abdb7c6000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611c8860043560007327b1b436e4699a012cc8698e33c8f3e1c035c28b6386b0aac96000600050846040518360e060020a028152600401808381526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515191506108989050565b611264600435604080517f25fea09900000000000000000000000000000000000000000000000000000000815260006004820181905260248201849052600160a060020a033316604483015291517327b1b436e4699a012cc8698e33c8f3e1c035c28b926325fea09992606481810193918290030181838760325a03f2156100025750505050565b565b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015611d175780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600160a060020a0333166000818152601260205260408051818320547f5c54305e00000000000000000000000000000000000000000000000000000000825260048201949094526024810185905260448101939093525173d3cb18959b0435864ff33010fa83be60afc04b2292635c54305e9260648281019391928290030181838760325a03f2156100025750505050565b6040518560e060020a0281526004018085815260200184600160a060020a0316815260200183815260200182815260200194505050505060006040518083038160008760325a03f2156100025750505050565b90506104ee565b9050610898565b6040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506104ee9050565b6040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506108989050565b6040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040805180517f6a704d7b000000000000000000000000000000000000000000000000000000008252600160a060020a033316600483015260248201819052915191935073c895c144d0b0f88417cf9e14e03e6abc82c0af3f9250636a704d7b9160448281019260009291908290030181838760325a03f2156100025750505050565b820191906000526020600020905b815481529060010190602001808311611f9b57829003601f168201915b50505050509050610898565b820191906000526020600020905b815481529060010190602001808311611fd257829003601f168201915b505050505090506104ee565b50505050565b5050505050565b7327b1b436e4699a012cc8698e33c8f3e1c035c28b635ca1bad5826040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f215610002575050505b505050505050505056", + "storage": { + "0x18b039f13c5f33908f0960616cb3e44029c716366508c54d555096d6e1fa5145": "0x00000000000000000000000000000000000000000000000008b83c417dd78000" + } + }, + "0xd3cb18959b0435864ff33010fa83be60afc04b22": { + "balance": "0x0", + "code": "0x650105e11e10f850606060405236156100695760e060020a60003504635548c837811461006e5780635c54305e146100ca5780636b1039661461011e5780637fcf532c14610152578063b1df3d801461019e578063b5bc6dbb146101b7578063e62af6c1146101ee575b610007565b61022060043560243560443581600160a060020a031683600160a060020a03167f47a08955ce2b7f21ea62ff0024e1ea0ad87430953554a87e6bc65d777f18e639836040518082815260200191505060405180910390a3505050565b61022060043560243560443560408051838152602081018390528151600160a060020a038616927f9b24879829bed3003de08d5c5d7e18dcbb8dc76faebd95cafc5d4dec8c61a3a5928290030190a2505050565b6102206004356024356044355b600160a060020a038216600090815260208490526040902054808201101561023457610007565b610220600435602435604080518281529051600160a060020a038416917fd0c5cf41ee8ebf084ad0bce53de7cbc6e4693d9b53a4019ca36a2f91cdc20b3a919081900360200190a25050565b610222600435602435604435600061025784848461012b565b610222600435602435604435600160a060020a0382166000908152602084905260408120548290106102865761028e8484846101fb565b6102206004356024356044355b600160a060020a03821660009081526020849052604090205481111561026257610007565b005b60408051918252519081900360200190f35b600160a060020a0382166000908152602084905260409020805482019055505050565b5060015b9392505050565b600160a060020a038216600090815260208490526040902080548290039055505050565b50600061025b565b604051600160a060020a03841690600090849082818181858883f19350505050151561025757604051600160a060020a038416908390600081818185876185025a03f19250505015156102575761000756" + }, + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x1ff0509d9d6821e26", + "nonce": "138", + "code": "0x" + } + }, + "config": { + "chainId": 1, + "homesteadBlock": 1150000, + "daoForkBlock": 1920000, + "daoForkSupport": true, + "eip150Block": 2463000, + "eip150Hash": "0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0", + "eip155Block": 2675000, + "eip158Block": 2675000, + "byzantiumBlock": 4370000, + "constantinopleBlock": 7280000, + "petersburgBlock": 7280000, + "istanbulBlock": 9069000, + "muirGlacierBlock": 9200000, + "berlinBlock": 12244000, + "londonBlock": 12965000, + "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, + "ethash": {} + } + }, + "context": { + "number": "422909", + "difficulty": "5694909537365", + "timestamp": "1445530357", + "gasLimit": "3141592", + "miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226" + }, + "input": "0xf86a818a850ba43b7400832d8a40942861bf89b6c640c79040d357c1e9513693ef5d3f808441c0e1b51ca0b8de64a9a04d699f5938efa5431ca7c80500f6accb329da43aadabd4eab84f17a035b969c198f694be991a2a5b287250e19e852efd0ccba30bd50707277bfbc9aa", + "tracerConfig": { + "diffMode": true + }, + "result": { + "pre": { + "0x2861bf89b6c640c79040d357c1e9513693ef5d3f": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a600035046312055e8f8114610084578063185061da146100b157806322beb9b9146100d5578063245a03ec146101865780633fa4f245146102a657806341c0e1b5146102af578063890eba68146102cb578063b29f0835146102de578063d6b4485914610308578063dd012a15146103b9575b005b6001805474ff0000000000000000000000000000000000000000191660a060020a60043502179055610082565b6100826001805475ff00000000000000000000000000000000000000000019169055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527fb29f0835000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b6100826004356024356001547fb0f07e440000000000000000000000000000000000000000000000000000000060609081526064839052600160a060020a039091169063b0f07e449060849060009060248183876161da5a03f150604080516001547f73657449742875696e74323536290000000000000000000000000000000000008252825191829003600e018220878352835192839003602001832060e060020a6352afbc33028452600160a060020a03308116600486015260e060020a9283900490920260248501526044840152438901606484015260a060020a820460ff1694830194909452600060a483018190529251931694506352afbc33935060c48181019391829003018183876161da5a03f115610002575050505050565b6103c460025481565b61008260005433600160a060020a039081169116146103ce575b565b6103c460015460a860020a900460ff1681565b6100826001805475ff000000000000000000000000000000000000000000191660a860020a179055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527f185061da000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b600435600255610082565b6060908152602090f35b6001547f6ff96d17000000000000000000000000000000000000000000000000000000006060908152600160a060020a0330811660645290911690632e1a7d4d908290636ff96d17906084906020906024816000876161da5a03f1156100025750506040805180517f2e1a7d4d0000000000000000000000000000000000000000000000000000000082526004820152905160248281019350600092829003018183876161da5a03f115610002575050600054600160a060020a03169050ff", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000d3cda913deb6f67967b99d67acdfa1712c293601", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000ff30c9e568f133adce1f1ea91e189613223fc461b9" + } + }, + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x1ff0509d9d6821e26", + "nonce": 138 + } + }, + "post": { + "0xd3cda913deb6f67967b99d67acdfa1712c293601": { + "balance": "0x1ff01e7e76afa4226", + "nonce": 139 + } + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/util.go b/coreth/eth/tracers/internal/tracetest/util.go new file mode 100644 index 00000000..3cecbf84 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/util.go @@ -0,0 +1,81 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package tracetest + +import ( + "strings" + "unicode" + + // Force-load native and js packages, to trigger registration + _ "github.com/ava-labs/coreth/eth/tracers/js" + _ "github.com/ava-labs/coreth/eth/tracers/native" +) + +// To generate a new callTracer test, copy paste the makeTest method below into +// a Geth console and call it with a transaction hash you which to export. + +/* +// makeTest generates a callTracer test by running a prestate reassembled and a +// call trace run, assembling all the gathered information into a test case. +var makeTest = function(tx, rewind) { + // Generate the genesis block from the block, transaction and prestate data + var block = eth.getBlock(eth.getTransaction(tx).blockHash); + var genesis = eth.getBlock(block.parentHash); + + delete genesis.gasUsed; + delete genesis.logsBloom; + delete genesis.parentHash; + delete genesis.receiptsRoot; + delete genesis.sha3Uncles; + delete genesis.size; + delete genesis.transactions; + delete genesis.transactionsRoot; + delete genesis.uncles; + + genesis.gasLimit = genesis.gasLimit.toString(); + genesis.number = genesis.number.toString(); + genesis.timestamp = genesis.timestamp.toString(); + + genesis.alloc = debug.traceTransaction(tx, {tracer: "prestateTracer", rewind: rewind}); + for (var key in genesis.alloc) { + var nonce = genesis.alloc[key].nonce; + if (nonce) { + genesis.alloc[key].nonce = nonce.toString(); + } + } + genesis.config = admin.nodeInfo.protocols.eth.config; + + // Generate the call trace and produce the test input + var result = debug.traceTransaction(tx, {tracer: "callTracer", rewind: rewind}); + delete result.time; + + console.log(JSON.stringify({ + genesis: genesis, + context: { + number: block.number.toString(), + difficulty: block.difficulty, + timestamp: block.timestamp.toString(), + gasLimit: block.gasLimit.toString(), + miner: block.miner, + }, + input: eth.getRawTransaction(tx), + result: result, + }, null, 2)); +} +*/ + +// camel converts a snake cased input string into a camel cased output. +func camel(str string) string { + pieces := strings.Split(str, "_") + for i := 1; i < len(pieces); i++ { + pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] + } + return strings.Join(pieces, "") +} diff --git a/coreth/eth/tracers/js/goja.go b/coreth/eth/tracers/js/goja.go index 4a55f06d..b82bfea3 100644 --- a/coreth/eth/tracers/js/goja.go +++ b/coreth/eth/tracers/js/goja.go @@ -31,7 +31,6 @@ import ( "errors" "fmt" "math/big" - "time" "github.com/dop251/goja" @@ -52,7 +51,16 @@ func init() { if err != nil { panic(err) } - tracers.RegisterLookup(true, newJsTracer) + type ctorFn = func(*tracers.Context, json.RawMessage) (tracers.Tracer, error) + lookup := func(code string) ctorFn { + return func(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + return newJsTracer(code, ctx, cfg) + } + } + for name, code := range assetTracers { + tracers.DefaultDirectory.Register(name, lookup(code), true) + } + tracers.DefaultDirectory.RegisterJSEval(newJsTracer) } // bigIntProgram is compiled once and the exported function mostly invoked to convert @@ -91,7 +99,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() return b, nil } - return nil, fmt.Errorf("invalid buffer type") + return nil, errors.New("invalid buffer type") } // jsTracer is an implementation of the Tracer interface which evaluates @@ -129,16 +137,14 @@ type jsTracer struct { frameResultValue goja.Value } -// newJsTracer instantiates a new JS tracer instance. code is either -// the name of a built-in JS tracer or a Javascript snippet which -// evaluates to an expression returning an object with certain methods. +// newJsTracer instantiates a new JS tracer instance. code is a +// Javascript snippet which evaluates to an expression returning +// an object with certain methods: +// // The methods `result` and `fault` are required to be present. // The methods `step`, `enter`, and `exit` are optional, but note that // `enter` and `exit` always go together. func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { - if c, ok := assetTracers[code]; ok { - code = c - } vm := goja.New() // By default field names are exported to JS as is, i.e. capitalized. vm.SetFieldNameMapper(goja.UncapFieldNameMapper()) @@ -220,9 +226,11 @@ func (t *jsTracer) CaptureTxStart(gasLimit uint64) { t.gasLimit = gasLimit } -// CaptureTxStart implements the Tracer interface and is invoked at the end of +// CaptureTxEnd implements the Tracer interface and is invoked at the end of // transaction processing. -func (t *jsTracer) CaptureTxEnd(restGas uint64) {} +func (t *jsTracer) CaptureTxEnd(restGas uint64) { + t.ctx["gasUsed"] = t.vm.ToValue(t.gasLimit - restGas) +} // CaptureStart implements the Tracer interface to initialize the tracing operation. func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { @@ -237,7 +245,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr t.ctx["from"] = t.vm.ToValue(from.Bytes()) t.ctx["to"] = t.vm.ToValue(to.Bytes()) t.ctx["input"] = t.vm.ToValue(input) - t.ctx["gas"] = t.vm.ToValue(gas) + t.ctx["gas"] = t.vm.ToValue(t.gasLimit) t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice) valueBig, err := t.toBig(t.vm, value.String()) if err != nil { @@ -249,7 +257,6 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr // Update list of precompiles based on current block rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) - t.ctx["intrinsicGas"] = t.vm.ToValue(t.gasLimit - gas) } // CaptureState implements the Tracer interface to trace a single step of VM execution. @@ -290,10 +297,8 @@ func (t *jsTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *jsTracer) CaptureEnd(output []byte, gasUsed uint64, duration time.Duration, err error) { +func (t *jsTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { t.ctx["output"] = t.vm.ToValue(output) - t.ctx["time"] = t.vm.ToValue(duration.String()) - t.ctx["gasUsed"] = t.vm.ToValue(gasUsed) if err != nil { t.ctx["error"] = t.vm.ToValue(err.Error()) } @@ -572,10 +577,11 @@ func (mo *memoryObj) slice(begin, end int64) ([]byte, error) { if end < begin || begin < 0 { return nil, fmt.Errorf("tracer accessed out of bound memory: offset %d, end %d", begin, end) } - if mo.memory.Len() < int(end) { - return nil, fmt.Errorf("tracer accessed out of bound memory: available %d, offset %d, size %d", mo.memory.Len(), begin, end-begin) + slice, err := tracers.GetMemoryCopyPadded(mo.memory, begin, end-begin) + if err != nil { + return nil, err } - return mo.memory.GetCopy(begin, end-begin), nil + return slice, nil } func (mo *memoryObj) GetUint(addr int64) goja.Value { diff --git a/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js index 3ca73777..451a644b 100644 --- a/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js +++ b/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js @@ -204,7 +204,6 @@ gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16), input: toHex(ctx.input), output: toHex(ctx.output), - time: ctx.time, }; if (this.callstack[0].calls !== undefined) { result.calls = this.callstack[0].calls; @@ -234,7 +233,6 @@ input: call.input, output: call.output, error: call.error, - time: call.time, calls: call.calls, } for (var key in sorted) { diff --git a/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js index 77f25209..2757b8b1 100644 --- a/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js +++ b/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js @@ -62,7 +62,7 @@ var toBal = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16); this.prestate[toHex(ctx.to)].balance = '0x'+toBal.subtract(ctx.value).toString(16); - this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add((ctx.gasUsed + ctx.intrinsicGas) * ctx.gasPrice).toString(16); + this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add(ctx.gasUsed * ctx.gasPrice).toString(16); // Decrement the caller's nonce, and remove empty create targets this.prestate[toHex(ctx.from)].nonce--; diff --git a/coreth/eth/tracers/js/tracer_test.go b/coreth/eth/tracers/js/tracer_test.go index 435fcf07..b0bc34ba 100644 --- a/coreth/eth/tracers/js/tracer_test.go +++ b/coreth/eth/tracers/js/tracer_test.go @@ -70,22 +70,25 @@ func testCtx() *vmContext { return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} } -func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { +func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig, contractCode []byte) (json.RawMessage, error) { var ( - env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer}) + env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Tracer: tracer}) gasLimit uint64 = 31000 startGas uint64 = 10000 value = big.NewInt(0) contract = vm.NewContract(account{}, account{}, value, startGas) ) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} + if contractCode != nil { + contract.Code = contractCode + } tracer.CaptureTxStart(gasLimit) tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value) ret, err := env.Interpreter().Run(contract, []byte{}, false) - tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err) + tracer.CaptureEnd(ret, startGas-contract.Gas, err) // Rest gas assumes no refund - tracer.CaptureTxEnd(startGas - contract.Gas) + tracer.CaptureTxEnd(contract.Gas) if err != nil { return nil, err } @@ -93,35 +96,36 @@ func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCon } func TestTracer(t *testing.T) { - execTracer := func(code string) ([]byte, string) { + execTracer := func(code string, contract []byte) ([]byte, string) { t.Helper() tracer, err := newJsTracer(code, nil, nil) if err != nil { t.Fatal(err) } - ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig, contract) if err != nil { return nil, err.Error() // Stringify to allow comparison without nil checks } return ret, "" } for i, tt := range []struct { - code string - want string - fail string + code string + want string + fail string + contract []byte }{ { // tests that we don't panic on bad arguments to memory access code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(15)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(13)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to stack peeks code: "{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(11)) in server-side tracer function 'step'", }, { // tests that we don't panic on bad arguments to memory getUint code: "{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", want: ``, - fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(13)) in server-side tracer function 'step'", + fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(11)) in server-side tracer function 'step'", }, { // tests some general counting code: "{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", want: `3`, @@ -134,9 +138,9 @@ func TestTracer(t *testing.T) { }, { // tests to-string of opcodes code: "{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", want: `["PUSH1","PUSH1","STOP"]`, - }, { // tests intrinsic gas - code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", - want: `"100000.6.21000"`, + }, { // tests gasUsed + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed; }}", + want: `"100000.21006"`, }, { code: "{res: null, step: function(log) {}, fault: function() {}, result: function() { return toWord('0xffaa') }}", want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":255,"31":170}`, @@ -149,10 +153,19 @@ func TestTracer(t *testing.T) { }, { code: "{res: null, step: function(log) { var address = Array.prototype.slice.call(log.contract.getAddress()); this.res = toAddress(address); }, fault: function() {}, result: function() { return this.res }}", want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0}`, + }, { + code: "{res: [], step: function(log) { var op = log.op.toString(); if (op === 'MSTORE8' || op === 'STOP') { this.res.push(log.memory.slice(0, 2)) } }, fault: function() {}, result: function() { return this.res }}", + want: `[{"0":0,"1":0},{"0":255,"1":0}]`, + contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)}, + }, { + code: "{res: [], step: function(log) { if (log.op.toString() === 'STOP') { this.res.push(log.memory.slice(5, 1025 * 1024)) } }, fault: function() {}, result: function() { return this.res }}", + want: "", + fail: "reached limit for padding memory slice: 1049568 at step (:1:83(20)) in server-side tracer function 'step'", + contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)}, }, } { - if have, err := execTracer(tt.code); tt.want != string(have) || tt.fail != err { - t.Errorf("testcase %d: expected return value to be '%s' got '%s', error to be '%s' got '%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code) + if have, err := execTracer(tt.code, tt.contract); tt.want != string(have) || tt.fail != err { + t.Errorf("testcase %d: expected return value to be \n'%s'\n\tgot\n'%s'\nerror to be\n'%s'\n\tgot\n'%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code) } } } @@ -167,7 +180,7 @@ func TestHalt(t *testing.T) { time.Sleep(1 * time.Second) tracer.Stop(timeout) }() - if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); !strings.Contains(err.Error(), "stahp") { + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig, nil); !strings.Contains(err.Error(), "stahp") { t.Errorf("Expected timeout error, got %v", err) } } @@ -177,7 +190,7 @@ func TestHaltBetweenSteps(t *testing.T) { if err != nil { t.Fatal(err) } - env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer}) scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), } @@ -201,9 +214,9 @@ func TestNoStepExec(t *testing.T) { if err != nil { t.Fatal(err) } - env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: tracer}) tracer.CaptureStart(env, common.Address{}, common.Address{}, false, []byte{}, 1000, big.NewInt(0)) - tracer.CaptureEnd(nil, 0, 1, nil) + tracer.CaptureEnd(nil, 0, nil) ret, err := tracer.GetResult() if err != nil { t.Fatal(err) @@ -226,7 +239,7 @@ func TestNoStepExec(t *testing.T) { } func TestIsPrecompile(t *testing.T) { - chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0)} + chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0)} chaincfg.ByzantiumBlock = big.NewInt(100) chaincfg.IstanbulBlock = big.NewInt(200) txCtx := vm.TxContext{GasPrice: big.NewInt(100000)} @@ -236,7 +249,7 @@ func TestIsPrecompile(t *testing.T) { } blockCtx := vm.BlockContext{BlockNumber: big.NewInt(150)} - res, err := runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg) + res, err := runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg, nil) if err != nil { t.Error(err) } @@ -246,7 +259,7 @@ func TestIsPrecompile(t *testing.T) { tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil) blockCtx = vm.BlockContext{BlockNumber: big.NewInt(250)} - res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg) + res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg, nil) if err != nil { t.Error(err) } diff --git a/coreth/eth/tracers/logger/access_list_tracer.go b/coreth/eth/tracers/logger/access_list_tracer.go index 862d5e4b..e2bb12f2 100644 --- a/coreth/eth/tracers/logger/access_list_tracer.go +++ b/coreth/eth/tracers/logger/access_list_tracer.go @@ -18,7 +18,6 @@ package logger import ( "math/big" - "time" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -162,7 +161,7 @@ func (a *AccessListTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 func (*AccessListTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { } -func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {} +func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {} func (*AccessListTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { } diff --git a/coreth/eth/tracers/logger/logger.go b/coreth/eth/tracers/logger/logger.go index 6624ce0d..2173db79 100644 --- a/coreth/eth/tracers/logger/logger.go +++ b/coreth/eth/tracers/logger/logger.go @@ -24,7 +24,6 @@ import ( "math/big" "strings" "sync/atomic" - "time" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -118,8 +117,8 @@ type StructLogger struct { gasLimit uint64 usedGas uint64 - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } // NewStructLogger returns a new logger @@ -151,8 +150,7 @@ func (l *StructLogger) CaptureStart(env *vm.EVM, from common.Address, to common. // CaptureState also tracks SLOAD/SSTORE ops to track storage change. func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { // If tracing was interrupted, set the error and stop - if atomic.LoadUint32(&l.interrupt) > 0 { - l.env.Cancel() + if l.interrupt.Load() { return } // check if already accumulated the specified number of logs @@ -220,7 +218,7 @@ func (l *StructLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, s } // CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { +func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, err error) { l.output = output l.err = err if l.cfg.Debug { @@ -260,7 +258,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (l *StructLogger) Stop(err error) { l.reason = err - atomic.StoreUint32(&l.interrupt, 1) + l.interrupt.Store(true) } func (l *StructLogger) CaptureTxStart(gasLimit uint64) { @@ -386,7 +384,7 @@ func (t *mdLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) } -func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) { +func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, err error) { fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n", output, gasUsed, err) } diff --git a/coreth/eth/tracers/logger/logger_json.go b/coreth/eth/tracers/logger/logger_json.go index 92c13c0c..3b2b7832 100644 --- a/coreth/eth/tracers/logger/logger_json.go +++ b/coreth/eth/tracers/logger/logger_json.go @@ -20,7 +20,6 @@ import ( "encoding/json" "io" "math/big" - "time" "github.com/ava-labs/coreth/core/vm" "github.com/ethereum/go-ethereum/common" @@ -80,18 +79,17 @@ func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco } // CaptureEnd is triggered at end of execution. -func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { +func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, err error) { type endLog struct { Output string `json:"output"` GasUsed math.HexOrDecimal64 `json:"gasUsed"` - Time time.Duration `json:"time"` Err string `json:"error,omitempty"` } var errMsg string if err != nil { errMsg = err.Error() } - l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg}) + l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), errMsg}) } func (l *JSONLogger) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { diff --git a/coreth/eth/tracers/logger/logger_test.go b/coreth/eth/tracers/logger/logger_test.go index c5fb4cd6..2d70cc5f 100644 --- a/coreth/eth/tracers/logger/logger_test.go +++ b/coreth/eth/tracers/logger/logger_test.go @@ -18,10 +18,11 @@ package logger import ( "encoding/json" - "fmt" + "errors" "math/big" "testing" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" @@ -44,23 +45,17 @@ func (d *dummyContractRef) SetBalance(*big.Int) {} func (d *dummyContractRef) SetNonce(uint64) {} func (d *dummyContractRef) Balance() *big.Int { return new(big.Int) } -type dummyStatedb struct { - state.StateDB -} - -func (*dummyStatedb) GetRefund() uint64 { return 1337 } -func (*dummyStatedb) GetState(_ common.Address, _ common.Hash) common.Hash { return common.Hash{} } -func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) {} - func TestStoreCapture(t *testing.T) { var ( - logger = NewStructLogger(nil) - env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: logger}) - contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 100000) + logger = NewStructLogger(nil) + statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, statedb, params.TestChainConfig, vm.Config{Tracer: logger}) + contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 100000) ) contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)} var index common.Hash logger.CaptureStart(env, common.Address{}, contract.Address(), false, nil, 0, nil) + statedb.Prepare(params.TestRules, common.Address{}, common.Address{}, nil, nil, nil) _, err := env.Interpreter().Run(contract, []byte{}, false) if err != nil { t.Fatal(err) @@ -85,7 +80,7 @@ func TestStructLogMarshalingOmitEmpty(t *testing.T) { }{ {"empty err and no fields", &StructLog{}, `{"pc":0,"op":0,"gas":"0x0","gasCost":"0x0","memSize":0,"stack":null,"depth":0,"refund":0,"opName":"STOP"}`}, - {"with err", &StructLog{Err: fmt.Errorf("this failed")}, + {"with err", &StructLog{Err: errors.New("this failed")}, `{"pc":0,"op":0,"gas":"0x0","gasCost":"0x0","memSize":0,"stack":null,"depth":0,"refund":0,"opName":"STOP","error":"this failed"}`}, {"with mem", &StructLog{Memory: make([]byte, 2), MemorySize: 2}, `{"pc":0,"op":0,"gas":"0x0","gasCost":"0x0","memory":"0x0000","memSize":2,"stack":null,"depth":0,"refund":0,"opName":"STOP"}`}, diff --git a/coreth/eth/tracers/native/4byte.go b/coreth/eth/tracers/native/4byte.go index 5ea51f2d..5bcabd58 100644 --- a/coreth/eth/tracers/native/4byte.go +++ b/coreth/eth/tracers/native/4byte.go @@ -31,7 +31,6 @@ import ( "math/big" "strconv" "sync/atomic" - "time" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers" @@ -39,7 +38,7 @@ import ( ) func init() { - register("4byteTracer", newFourByteTracer) + tracers.DefaultDirectory.Register("4byteTracer", newFourByteTracer, false) } // fourByteTracer searches for 4byte-identifiers, and collects them for post-processing. @@ -57,9 +56,9 @@ func init() { // 0xc281d19e-0: 1 // } type fourByteTracer struct { - env *vm.EVM + noopTracer ids map[string]int // ids aggregates the 4byte ids found - interrupt uint32 // Atomic flag to signal execution interruption + interrupt atomic.Bool // Atomic flag to signal execution interruption reason error // Textual reason for the interruption activePrecompiles []common.Address // Updated on CaptureStart based on given rules } @@ -91,8 +90,6 @@ func (t *fourByteTracer) store(id []byte, size int) { // CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { - t.env = env - // Update list of precompiles based on current block rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) @@ -103,15 +100,10 @@ func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to commo } } -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *fourByteTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} - // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { - t.env.Cancel() + if t.interrupt.Load() { return } if len(input) < 4 { @@ -129,23 +121,6 @@ func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from common.Address, to comm t.store(input[0:4], len(input)-4) } -// CaptureExit is called when EVM exits a scope, even if the scope didn't -// execute any code. -func (t *fourByteTracer) CaptureExit(output []byte, gasUsed uint64, err error) { -} - -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *fourByteTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *fourByteTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { -} - -func (*fourByteTracer) CaptureTxStart(gasLimit uint64) {} - -func (*fourByteTracer) CaptureTxEnd(restGas uint64) {} - // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *fourByteTracer) GetResult() (json.RawMessage, error) { @@ -159,5 +134,9 @@ func (t *fourByteTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *fourByteTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) +} + +func bytesToHex(s []byte) string { + return "0x" + common.Bytes2Hex(s) } diff --git a/coreth/eth/tracers/native/call.go b/coreth/eth/tracers/native/call.go index bc01277b..95fd7c64 100644 --- a/coreth/eth/tracers/native/call.go +++ b/coreth/eth/tracers/native/call.go @@ -30,43 +30,97 @@ import ( "encoding/json" "errors" "math/big" - "strconv" - "strings" "sync/atomic" - "time" + "github.com/ava-labs/coreth/accounts/abi" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers" + "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" ) +//go:generate go run github.com/fjl/gencodec -type callFrame -field-override callFrameMarshaling -out gen_callframe_json.go + func init() { - register("callTracer", newCallTracer) + tracers.DefaultDirectory.Register("callTracer", newCallTracer, false) +} + +type callLog struct { + Address common.Address `json:"address"` + Topics []common.Hash `json:"topics"` + Data hexutil.Bytes `json:"data"` } type callFrame struct { - Type string `json:"type"` - From string `json:"from"` - To string `json:"to,omitempty"` - Value string `json:"value,omitempty"` - Gas string `json:"gas"` - GasUsed string `json:"gasUsed"` - Input string `json:"input"` - Output string `json:"output,omitempty"` - Error string `json:"error,omitempty"` - Calls []callFrame `json:"calls,omitempty"` + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas uint64 `json:"gas"` + GasUsed uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input []byte `json:"input" rlp:"optional"` + Output []byte `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + // Placed at end on purpose. The RLP will be decoded to 0 instead of + // nil if there are non-empty elements after in the struct. + Value *big.Int `json:"value,omitempty" rlp:"optional"` +} + +func (f callFrame) TypeString() string { + return f.Type.String() +} + +func (f callFrame) failed() bool { + return len(f.Error) > 0 +} + +func (f *callFrame) processOutput(output []byte, err error) { + output = common.CopyBytes(output) + if err == nil { + f.Output = output + return + } + f.Error = err.Error() + if f.Type == vm.CREATE || f.Type == vm.CREATE2 { + f.To = nil + } + if !errors.Is(err, vmerrs.ErrExecutionReverted) || len(output) == 0 { + return + } + f.Output = output + if len(output) < 4 { + return + } + if unpacked, err := abi.UnpackRevert(output); err == nil { + f.RevertReason = unpacked + } +} + +type callFrameMarshaling struct { + TypeString string `json:"type"` + Gas hexutil.Uint64 + GasUsed hexutil.Uint64 + Value *hexutil.Big + Input hexutil.Bytes + Output hexutil.Bytes } type callTracer struct { - env *vm.EVM + noopTracer callstack []callFrame config callTracerConfig - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + gasLimit uint64 + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption } type callTracerConfig struct { OnlyTopCall bool `json:"onlyTopCall"` // If true, call tracer won't collect any subcalls + WithLog bool `json:"withLog"` // If true, call tracer will collect event logs } // newCallTracer returns a native go tracer which tracks @@ -85,39 +139,69 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, e // CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { - t.env = env + toCopy := to t.callstack[0] = callFrame{ - Type: "CALL", - From: addrToHex(from), - To: addrToHex(to), - Input: bytesToHex(input), - Gas: uintToHex(gas), - Value: bigToHex(value), + Type: vm.CALL, + From: from, + To: &toCopy, + Input: common.CopyBytes(input), + Gas: t.gasLimit, + Value: value, } if create { - t.callstack[0].Type = "CREATE" + t.callstack[0].Type = vm.CREATE } } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { - t.callstack[0].GasUsed = uintToHex(gasUsed) - if err != nil { - t.callstack[0].Error = err.Error() - if err.Error() == "execution reverted" && len(output) > 0 { - t.callstack[0].Output = bytesToHex(output) - } - } else { - t.callstack[0].Output = bytesToHex(output) - } +func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { + t.callstack[0].processOutput(output, err) } // CaptureState implements the EVMLogger interface to trace a single step of VM execution. func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} + // skip if the previous op caused an error + if err != nil { + return + } + // Only logs need to be captured via opcode processing + if !t.config.WithLog { + return + } + // Avoid processing nested calls when only caring about top call + if t.config.OnlyTopCall && depth > 0 { + return + } + // Skip if tracing was interrupted + if t.interrupt.Load() { + return + } + switch op { + case vm.LOG0, vm.LOG1, vm.LOG2, vm.LOG3, vm.LOG4: + size := int(op - vm.LOG0) -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *callTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { + stack := scope.Stack + stackData := stack.Data() + + // Don't modify the stack + mStart := stackData[len(stackData)-1] + mSize := stackData[len(stackData)-2] + topics := make([]common.Hash, size) + for i := 0; i < size; i++ { + topic := stackData[len(stackData)-2-(i+1)] + topics[i] = common.Hash(topic.Bytes32()) + } + + data, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(mStart.Uint64()), int64(mSize.Uint64())) + if err != nil { + // mSize was unrealistically large + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "callTracer", "offset", mStart, "size", mSize) + return + } + + log := callLog{Address: scope.Contract.Address(), Topics: topics, Data: hexutil.Bytes(data)} + t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, log) + } } // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). @@ -126,18 +210,18 @@ func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common. return } // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { - t.env.Cancel() + if t.interrupt.Load() { return } + toCopy := to call := callFrame{ - Type: typ.String(), - From: addrToHex(from), - To: addrToHex(to), - Input: bytesToHex(input), - Gas: uintToHex(gas), - Value: bigToHex(value), + Type: typ, + From: from, + To: &toCopy, + Input: common.CopyBytes(input), + Gas: gas, + Value: value, } t.callstack = append(t.callstack, call) } @@ -157,21 +241,22 @@ func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { t.callstack = t.callstack[:size-1] size -= 1 - call.GasUsed = uintToHex(gasUsed) - if err == nil { - call.Output = bytesToHex(output) - } else { - call.Error = err.Error() - if call.Type == "CREATE" || call.Type == "CREATE2" { - call.To = "" - } - } + call.GasUsed = gasUsed + call.processOutput(output, err) t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call) } -func (*callTracer) CaptureTxStart(gasLimit uint64) {} +func (t *callTracer) CaptureTxStart(gasLimit uint64) { + t.gasLimit = gasLimit +} -func (*callTracer) CaptureTxEnd(restGas uint64) {} +func (t *callTracer) CaptureTxEnd(restGas uint64) { + t.callstack[0].GasUsed = t.gasLimit - restGas + if t.config.WithLog { + // Logs are not emitted when the call fails + clearFailedLogs(&t.callstack[0], false) + } +} // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). @@ -179,6 +264,7 @@ func (t *callTracer) GetResult() (json.RawMessage, error) { if len(t.callstack) != 1 { return nil, errors.New("incorrect number of top-level calls") } + res, err := json.Marshal(t.callstack[0]) if err != nil { return nil, err @@ -189,24 +275,18 @@ func (t *callTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *callTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) } -func bytesToHex(s []byte) string { - return "0x" + common.Bytes2Hex(s) -} - -func bigToHex(n *big.Int) string { - if n == nil { - return "" +// clearFailedLogs clears the logs of a callframe and all its children +// in case of execution failure. +func clearFailedLogs(cf *callFrame, parentFailed bool) { + failed := cf.failed() || parentFailed + // Clear own logs + if failed { + cf.Logs = nil + } + for i := range cf.Calls { + clearFailedLogs(&cf.Calls[i], failed) } - return "0x" + n.Text(16) -} - -func uintToHex(n uint64) string { - return "0x" + strconv.FormatUint(n, 16) -} - -func addrToHex(a common.Address) string { - return strings.ToLower(a.Hex()) } diff --git a/coreth/eth/tracers/native/call_flat.go b/coreth/eth/tracers/native/call_flat.go new file mode 100644 index 00000000..36704ee4 --- /dev/null +++ b/coreth/eth/tracers/native/call_flat.go @@ -0,0 +1,392 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "strings" + + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +//go:generate go run github.com/fjl/gencodec -type flatCallAction -field-override flatCallActionMarshaling -out gen_flatcallaction_json.go +//go:generate go run github.com/fjl/gencodec -type flatCallResult -field-override flatCallResultMarshaling -out gen_flatcallresult_json.go + +func init() { + tracers.DefaultDirectory.Register("flatCallTracer", newFlatCallTracer, false) +} + +var parityErrorMapping = map[string]string{ + "contract creation code storage out of gas": "Out of gas", + "out of gas": "Out of gas", + "gas uint64 overflow": "Out of gas", + "max code size exceeded": "Out of gas", + "invalid jump destination": "Bad jump destination", + "execution reverted": "Reverted", + "return data out of bounds": "Out of bounds", + "stack limit reached 1024 (1023)": "Out of stack", + "precompiled failed": "Built-in failed", + "invalid input length": "Built-in failed", +} + +var parityErrorMappingStartingWith = map[string]string{ + "invalid opcode:": "Bad instruction", + "stack underflow": "Stack underflow", +} + +// flatCallFrame is a standalone callframe. +type flatCallFrame struct { + Action flatCallAction `json:"action"` + BlockHash *common.Hash `json:"blockHash"` + BlockNumber uint64 `json:"blockNumber"` + Error string `json:"error,omitempty"` + Result *flatCallResult `json:"result,omitempty"` + Subtraces int `json:"subtraces"` + TraceAddress []int `json:"traceAddress"` + TransactionHash *common.Hash `json:"transactionHash"` + TransactionPosition uint64 `json:"transactionPosition"` + Type string `json:"type"` +} + +type flatCallAction struct { + Author *common.Address `json:"author,omitempty"` + RewardType string `json:"rewardType,omitempty"` + SelfDestructed *common.Address `json:"address,omitempty"` + Balance *big.Int `json:"balance,omitempty"` + CallType string `json:"callType,omitempty"` + CreationMethod string `json:"creationMethod,omitempty"` + From *common.Address `json:"from,omitempty"` + Gas *uint64 `json:"gas,omitempty"` + Init *[]byte `json:"init,omitempty"` + Input *[]byte `json:"input,omitempty"` + RefundAddress *common.Address `json:"refundAddress,omitempty"` + To *common.Address `json:"to,omitempty"` + Value *big.Int `json:"value,omitempty"` +} + +type flatCallActionMarshaling struct { + Balance *hexutil.Big + Gas *hexutil.Uint64 + Init *hexutil.Bytes + Input *hexutil.Bytes + Value *hexutil.Big +} + +type flatCallResult struct { + Address *common.Address `json:"address,omitempty"` + Code *[]byte `json:"code,omitempty"` + GasUsed *uint64 `json:"gasUsed,omitempty"` + Output *[]byte `json:"output,omitempty"` +} + +type flatCallResultMarshaling struct { + Code *hexutil.Bytes + GasUsed *hexutil.Uint64 + Output *hexutil.Bytes +} + +// flatCallTracer reports call frame information of a tx in a flat format, i.e. +// as opposed to the nested format of `callTracer`. +type flatCallTracer struct { + tracer *callTracer + config flatCallTracerConfig + ctx *tracers.Context // Holds tracer context data + reason error // Textual reason for the interruption + activePrecompiles []common.Address // Updated on CaptureStart based on given rules +} + +type flatCallTracerConfig struct { + ConvertParityErrors bool `json:"convertParityErrors"` // If true, call tracer converts errors to parity format + IncludePrecompiles bool `json:"includePrecompiles"` // If true, call tracer includes calls to precompiled contracts +} + +// newFlatCallTracer returns a new flatCallTracer. +func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + var config flatCallTracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, err + } + } + + // Create inner call tracer with default configuration, don't forward + // the OnlyTopCall or WithLog to inner for now + tracer, err := tracers.DefaultDirectory.New("callTracer", ctx, nil) + if err != nil { + return nil, err + } + t, ok := tracer.(*callTracer) + if !ok { + return nil, errors.New("internal error: embedded tracer has wrong type") + } + + return &flatCallTracer{tracer: t, ctx: ctx, config: config}, nil +} + +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. +func (t *flatCallTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.tracer.CaptureStart(env, from, to, create, input, gas, value) + // Update list of precompiles based on current block + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Timestamp()) + t.activePrecompiles = vm.ActivePrecompiles(rules) +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (t *flatCallTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { + t.tracer.CaptureEnd(output, gasUsed, err) +} + +// CaptureState implements the EVMLogger interface to trace a single step of VM execution. +func (t *flatCallTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + t.tracer.CaptureState(pc, op, gas, cost, scope, rData, depth, err) +} + +// CaptureFault implements the EVMLogger interface to trace an execution fault. +func (t *flatCallTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + t.tracer.CaptureFault(pc, op, gas, cost, scope, depth, err) +} + +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *flatCallTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + t.tracer.CaptureEnter(typ, from, to, input, gas, value) + + // Child calls must have a value, even if it's zero. + // Practically speaking, only STATICCALL has nil value. Set it to zero. + if t.tracer.callstack[len(t.tracer.callstack)-1].Value == nil && value == nil { + t.tracer.callstack[len(t.tracer.callstack)-1].Value = big.NewInt(0) + } +} + +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *flatCallTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + t.tracer.CaptureExit(output, gasUsed, err) + + // Parity traces don't include CALL/STATICCALLs to precompiles. + // By default we remove them from the callstack. + if t.config.IncludePrecompiles { + return + } + var ( + // call has been nested in parent + parent = t.tracer.callstack[len(t.tracer.callstack)-1] + call = parent.Calls[len(parent.Calls)-1] + typ = call.Type + to = call.To + ) + if typ == vm.CALL || typ == vm.STATICCALL { + if t.isPrecompiled(*to) { + t.tracer.callstack[len(t.tracer.callstack)-1].Calls = parent.Calls[:len(parent.Calls)-1] + } + } +} + +func (t *flatCallTracer) CaptureTxStart(gasLimit uint64) { + t.tracer.CaptureTxStart(gasLimit) +} + +func (t *flatCallTracer) CaptureTxEnd(restGas uint64) { + t.tracer.CaptureTxEnd(restGas) +} + +// GetResult returns an empty json object. +func (t *flatCallTracer) GetResult() (json.RawMessage, error) { + if len(t.tracer.callstack) < 1 { + return nil, errors.New("invalid number of calls") + } + + flat, err := flatFromNested(&t.tracer.callstack[0], []int{}, t.config.ConvertParityErrors, t.ctx) + if err != nil { + return nil, err + } + + res, err := json.Marshal(flat) + if err != nil { + return nil, err + } + return res, t.reason +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (t *flatCallTracer) Stop(err error) { + t.tracer.Stop(err) +} + +// isPrecompiled returns whether the addr is a precompile. +func (t *flatCallTracer) isPrecompiled(addr common.Address) bool { + for _, p := range t.activePrecompiles { + if p == addr { + return true + } + } + return false +} + +func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx *tracers.Context) (output []flatCallFrame, err error) { + var frame *flatCallFrame + switch input.Type { + case vm.CREATE, vm.CREATE2: + frame = newFlatCreate(input) + case vm.SELFDESTRUCT: + frame = newFlatSuicide(input) + case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL: + frame = newFlatCall(input) + default: + return nil, fmt.Errorf("unrecognized call frame type: %s", input.Type) + } + + frame.TraceAddress = traceAddress + frame.Error = input.Error + frame.Subtraces = len(input.Calls) + fillCallFrameFromContext(frame, ctx) + if convertErrs { + convertErrorToParity(frame) + } + + // Revert output contains useful information (revert reason). + // Otherwise discard result. + if input.Error != "" && input.Error != vmerrs.ErrExecutionReverted.Error() { + frame.Result = nil + } + + output = append(output, *frame) + if len(input.Calls) > 0 { + for i, childCall := range input.Calls { + childAddr := childTraceAddress(traceAddress, i) + childCallCopy := childCall + flat, err := flatFromNested(&childCallCopy, childAddr, convertErrs, ctx) + if err != nil { + return nil, err + } + output = append(output, flat...) + } + } + + return output, nil +} + +func newFlatCreate(input *callFrame) *flatCallFrame { + var ( + actionInit = input.Input[:] + resultCode = input.Output[:] + ) + + return &flatCallFrame{ + Type: strings.ToLower(vm.CREATE.String()), + Action: flatCallAction{ + From: &input.From, + Gas: &input.Gas, + Value: input.Value, + Init: &actionInit, + }, + Result: &flatCallResult{ + GasUsed: &input.GasUsed, + Address: input.To, + Code: &resultCode, + }, + } +} + +func newFlatCall(input *callFrame) *flatCallFrame { + var ( + actionInput = input.Input[:] + resultOutput = input.Output[:] + ) + + return &flatCallFrame{ + Type: strings.ToLower(vm.CALL.String()), + Action: flatCallAction{ + From: &input.From, + To: input.To, + Gas: &input.Gas, + Value: input.Value, + CallType: strings.ToLower(input.Type.String()), + Input: &actionInput, + }, + Result: &flatCallResult{ + GasUsed: &input.GasUsed, + Output: &resultOutput, + }, + } +} + +func newFlatSuicide(input *callFrame) *flatCallFrame { + return &flatCallFrame{ + Type: "suicide", + Action: flatCallAction{ + SelfDestructed: &input.From, + Balance: input.Value, + RefundAddress: input.To, + }, + } +} + +func fillCallFrameFromContext(callFrame *flatCallFrame, ctx *tracers.Context) { + if ctx == nil { + return + } + if ctx.BlockHash != (common.Hash{}) { + callFrame.BlockHash = &ctx.BlockHash + } + if ctx.BlockNumber != nil { + callFrame.BlockNumber = ctx.BlockNumber.Uint64() + } + if ctx.TxHash != (common.Hash{}) { + callFrame.TransactionHash = &ctx.TxHash + } + callFrame.TransactionPosition = uint64(ctx.TxIndex) +} + +func convertErrorToParity(call *flatCallFrame) { + if call.Error == "" { + return + } + + if parityError, ok := parityErrorMapping[call.Error]; ok { + call.Error = parityError + } else { + for gethError, parityError := range parityErrorMappingStartingWith { + if strings.HasPrefix(call.Error, gethError) { + call.Error = parityError + } + } + } +} + +func childTraceAddress(a []int, i int) []int { + child := make([]int, 0, len(a)+1) + child = append(child, a...) + child = append(child, i) + return child +} diff --git a/coreth/eth/tracers/native/gen_account_json.go b/coreth/eth/tracers/native/gen_account_json.go new file mode 100644 index 00000000..4c39cbc3 --- /dev/null +++ b/coreth/eth/tracers/native/gen_account_json.go @@ -0,0 +1,56 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package native + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*accountMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (a account) MarshalJSON() ([]byte, error) { + type account struct { + Balance *hexutil.Big `json:"balance,omitempty"` + Code hexutil.Bytes `json:"code,omitempty"` + Nonce uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` + } + var enc account + enc.Balance = (*hexutil.Big)(a.Balance) + enc.Code = a.Code + enc.Nonce = a.Nonce + enc.Storage = a.Storage + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (a *account) UnmarshalJSON(input []byte) error { + type account struct { + Balance *hexutil.Big `json:"balance,omitempty"` + Code *hexutil.Bytes `json:"code,omitempty"` + Nonce *uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` + } + var dec account + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Balance != nil { + a.Balance = (*big.Int)(dec.Balance) + } + if dec.Code != nil { + a.Code = *dec.Code + } + if dec.Nonce != nil { + a.Nonce = *dec.Nonce + } + if dec.Storage != nil { + a.Storage = dec.Storage + } + return nil +} diff --git a/coreth/eth/tracers/native/gen_callframe_json.go b/coreth/eth/tracers/native/gen_callframe_json.go new file mode 100644 index 00000000..15e9f096 --- /dev/null +++ b/coreth/eth/tracers/native/gen_callframe_json.go @@ -0,0 +1,107 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package native + +import ( + "encoding/json" + "math/big" + + "github.com/ava-labs/coreth/core/vm" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*callFrameMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (c callFrame) MarshalJSON() ([]byte, error) { + type callFrame0 struct { + Type vm.OpCode `json:"-"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasUsed hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input hexutil.Bytes `json:"input" rlp:"optional"` + Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error string `json:"error,omitempty" rlp:"optional"` + RevertReason string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + TypeString string `json:"type"` + } + var enc callFrame0 + enc.Type = c.Type + enc.From = c.From + enc.Gas = hexutil.Uint64(c.Gas) + enc.GasUsed = hexutil.Uint64(c.GasUsed) + enc.To = c.To + enc.Input = c.Input + enc.Output = c.Output + enc.Error = c.Error + enc.RevertReason = c.RevertReason + enc.Calls = c.Calls + enc.Logs = c.Logs + enc.Value = (*hexutil.Big)(c.Value) + enc.TypeString = c.TypeString() + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (c *callFrame) UnmarshalJSON(input []byte) error { + type callFrame0 struct { + Type *vm.OpCode `json:"-"` + From *common.Address `json:"from"` + Gas *hexutil.Uint64 `json:"gas"` + GasUsed *hexutil.Uint64 `json:"gasUsed"` + To *common.Address `json:"to,omitempty" rlp:"optional"` + Input *hexutil.Bytes `json:"input" rlp:"optional"` + Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"` + Error *string `json:"error,omitempty" rlp:"optional"` + RevertReason *string `json:"revertReason,omitempty"` + Calls []callFrame `json:"calls,omitempty" rlp:"optional"` + Logs []callLog `json:"logs,omitempty" rlp:"optional"` + Value *hexutil.Big `json:"value,omitempty" rlp:"optional"` + } + var dec callFrame0 + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Type != nil { + c.Type = *dec.Type + } + if dec.From != nil { + c.From = *dec.From + } + if dec.Gas != nil { + c.Gas = uint64(*dec.Gas) + } + if dec.GasUsed != nil { + c.GasUsed = uint64(*dec.GasUsed) + } + if dec.To != nil { + c.To = dec.To + } + if dec.Input != nil { + c.Input = *dec.Input + } + if dec.Output != nil { + c.Output = *dec.Output + } + if dec.Error != nil { + c.Error = *dec.Error + } + if dec.RevertReason != nil { + c.RevertReason = *dec.RevertReason + } + if dec.Calls != nil { + c.Calls = dec.Calls + } + if dec.Logs != nil { + c.Logs = dec.Logs + } + if dec.Value != nil { + c.Value = (*big.Int)(dec.Value) + } + return nil +} diff --git a/coreth/eth/tracers/native/gen_flatcallaction_json.go b/coreth/eth/tracers/native/gen_flatcallaction_json.go new file mode 100644 index 00000000..c0756069 --- /dev/null +++ b/coreth/eth/tracers/native/gen_flatcallaction_json.go @@ -0,0 +1,110 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package native + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*flatCallActionMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (f flatCallAction) MarshalJSON() ([]byte, error) { + type flatCallAction struct { + Author *common.Address `json:"author,omitempty"` + RewardType string `json:"rewardType,omitempty"` + SelfDestructed *common.Address `json:"address,omitempty"` + Balance *hexutil.Big `json:"balance,omitempty"` + CallType string `json:"callType,omitempty"` + CreationMethod string `json:"creationMethod,omitempty"` + From *common.Address `json:"from,omitempty"` + Gas *hexutil.Uint64 `json:"gas,omitempty"` + Init *hexutil.Bytes `json:"init,omitempty"` + Input *hexutil.Bytes `json:"input,omitempty"` + RefundAddress *common.Address `json:"refundAddress,omitempty"` + To *common.Address `json:"to,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + } + var enc flatCallAction + enc.Author = f.Author + enc.RewardType = f.RewardType + enc.SelfDestructed = f.SelfDestructed + enc.Balance = (*hexutil.Big)(f.Balance) + enc.CallType = f.CallType + enc.CreationMethod = f.CreationMethod + enc.From = f.From + enc.Gas = (*hexutil.Uint64)(f.Gas) + enc.Init = (*hexutil.Bytes)(f.Init) + enc.Input = (*hexutil.Bytes)(f.Input) + enc.RefundAddress = f.RefundAddress + enc.To = f.To + enc.Value = (*hexutil.Big)(f.Value) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (f *flatCallAction) UnmarshalJSON(input []byte) error { + type flatCallAction struct { + Author *common.Address `json:"author,omitempty"` + RewardType *string `json:"rewardType,omitempty"` + SelfDestructed *common.Address `json:"address,omitempty"` + Balance *hexutil.Big `json:"balance,omitempty"` + CallType *string `json:"callType,omitempty"` + CreationMethod *string `json:"creationMethod,omitempty"` + From *common.Address `json:"from,omitempty"` + Gas *hexutil.Uint64 `json:"gas,omitempty"` + Init *hexutil.Bytes `json:"init,omitempty"` + Input *hexutil.Bytes `json:"input,omitempty"` + RefundAddress *common.Address `json:"refundAddress,omitempty"` + To *common.Address `json:"to,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + } + var dec flatCallAction + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Author != nil { + f.Author = dec.Author + } + if dec.RewardType != nil { + f.RewardType = *dec.RewardType + } + if dec.SelfDestructed != nil { + f.SelfDestructed = dec.SelfDestructed + } + if dec.Balance != nil { + f.Balance = (*big.Int)(dec.Balance) + } + if dec.CallType != nil { + f.CallType = *dec.CallType + } + if dec.CreationMethod != nil { + f.CreationMethod = *dec.CreationMethod + } + if dec.From != nil { + f.From = dec.From + } + if dec.Gas != nil { + f.Gas = (*uint64)(dec.Gas) + } + if dec.Init != nil { + f.Init = (*[]byte)(dec.Init) + } + if dec.Input != nil { + f.Input = (*[]byte)(dec.Input) + } + if dec.RefundAddress != nil { + f.RefundAddress = dec.RefundAddress + } + if dec.To != nil { + f.To = dec.To + } + if dec.Value != nil { + f.Value = (*big.Int)(dec.Value) + } + return nil +} diff --git a/coreth/eth/tracers/native/gen_flatcallresult_json.go b/coreth/eth/tracers/native/gen_flatcallresult_json.go new file mode 100644 index 00000000..e9fa5e44 --- /dev/null +++ b/coreth/eth/tracers/native/gen_flatcallresult_json.go @@ -0,0 +1,55 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package native + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*flatCallResultMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (f flatCallResult) MarshalJSON() ([]byte, error) { + type flatCallResult struct { + Address *common.Address `json:"address,omitempty"` + Code *hexutil.Bytes `json:"code,omitempty"` + GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` + Output *hexutil.Bytes `json:"output,omitempty"` + } + var enc flatCallResult + enc.Address = f.Address + enc.Code = (*hexutil.Bytes)(f.Code) + enc.GasUsed = (*hexutil.Uint64)(f.GasUsed) + enc.Output = (*hexutil.Bytes)(f.Output) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (f *flatCallResult) UnmarshalJSON(input []byte) error { + type flatCallResult struct { + Address *common.Address `json:"address,omitempty"` + Code *hexutil.Bytes `json:"code,omitempty"` + GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` + Output *hexutil.Bytes `json:"output,omitempty"` + } + var dec flatCallResult + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Address != nil { + f.Address = dec.Address + } + if dec.Code != nil { + f.Code = (*[]byte)(dec.Code) + } + if dec.GasUsed != nil { + f.GasUsed = (*uint64)(dec.GasUsed) + } + if dec.Output != nil { + f.Output = (*[]byte)(dec.Output) + } + return nil +} diff --git a/coreth/eth/tracers/native/mux.go b/coreth/eth/tracers/native/mux.go new file mode 100644 index 00000000..02a606e2 --- /dev/null +++ b/coreth/eth/tracers/native/mux.go @@ -0,0 +1,148 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "encoding/json" + "math/big" + + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" + "github.com/ethereum/go-ethereum/common" +) + +func init() { + tracers.DefaultDirectory.Register("muxTracer", newMuxTracer, false) +} + +// muxTracer is a go implementation of the Tracer interface which +// runs multiple tracers in one go. +type muxTracer struct { + names []string + tracers []tracers.Tracer +} + +// newMuxTracer returns a new mux tracer. +func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + var config map[string]json.RawMessage + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, err + } + } + objects := make([]tracers.Tracer, 0, len(config)) + names := make([]string, 0, len(config)) + for k, v := range config { + t, err := tracers.DefaultDirectory.New(k, ctx, v) + if err != nil { + return nil, err + } + objects = append(objects, t) + names = append(names, k) + } + + return &muxTracer{names: names, tracers: objects}, nil +} + +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. +func (t *muxTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + for _, t := range t.tracers { + t.CaptureStart(env, from, to, create, input, gas, value) + } +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (t *muxTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { + for _, t := range t.tracers { + t.CaptureEnd(output, gasUsed, err) + } +} + +// CaptureState implements the EVMLogger interface to trace a single step of VM execution. +func (t *muxTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + for _, t := range t.tracers { + t.CaptureState(pc, op, gas, cost, scope, rData, depth, err) + } +} + +// CaptureFault implements the EVMLogger interface to trace an execution fault. +func (t *muxTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + for _, t := range t.tracers { + t.CaptureFault(pc, op, gas, cost, scope, depth, err) + } +} + +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *muxTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + for _, t := range t.tracers { + t.CaptureEnter(typ, from, to, input, gas, value) + } +} + +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *muxTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + for _, t := range t.tracers { + t.CaptureExit(output, gasUsed, err) + } +} + +func (t *muxTracer) CaptureTxStart(gasLimit uint64) { + for _, t := range t.tracers { + t.CaptureTxStart(gasLimit) + } +} + +func (t *muxTracer) CaptureTxEnd(restGas uint64) { + for _, t := range t.tracers { + t.CaptureTxEnd(restGas) + } +} + +// GetResult returns an empty json object. +func (t *muxTracer) GetResult() (json.RawMessage, error) { + resObject := make(map[string]json.RawMessage) + for i, tt := range t.tracers { + r, err := tt.GetResult() + if err != nil { + return nil, err + } + resObject[t.names[i]] = r + } + res, err := json.Marshal(resObject) + if err != nil { + return nil, err + } + return res, nil +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (t *muxTracer) Stop(err error) { + for _, t := range t.tracers { + t.Stop(err) + } +} diff --git a/coreth/eth/tracers/native/noop.go b/coreth/eth/tracers/native/noop.go index 23884900..11daa94a 100644 --- a/coreth/eth/tracers/native/noop.go +++ b/coreth/eth/tracers/native/noop.go @@ -29,7 +29,6 @@ package native import ( "encoding/json" "math/big" - "time" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers" @@ -37,7 +36,7 @@ import ( ) func init() { - register("noopTracer", newNoopTracer) + tracers.DefaultDirectory.Register("noopTracer", newNoopTracer, false) } // noopTracer is a go implementation of the Tracer interface which @@ -54,7 +53,7 @@ func (t *noopTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Ad } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { +func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { } // CaptureState implements the EVMLogger interface to trace a single step of VM execution. diff --git a/coreth/eth/tracers/native/prestate.go b/coreth/eth/tracers/native/prestate.go index 30a2bc74..5c800646 100644 --- a/coreth/eth/tracers/native/prestate.go +++ b/coreth/eth/tracers/native/prestate.go @@ -27,44 +27,76 @@ package native import ( + "bytes" "encoding/json" "math/big" "sync/atomic" - "time" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/tracers" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) +//go:generate go run github.com/fjl/gencodec -type account -field-override accountMarshaling -out gen_account_json.go + func init() { - register("prestateTracer", newPrestateTracer) + tracers.DefaultDirectory.Register("prestateTracer", newPrestateTracer, false) } -type prestate = map[common.Address]*account +type state = map[common.Address]*account + type account struct { - Balance string `json:"balance"` - Nonce uint64 `json:"nonce"` - Code string `json:"code"` - Storage map[common.Hash]common.Hash `json:"storage"` + Balance *big.Int `json:"balance,omitempty"` + Code []byte `json:"code,omitempty"` + Nonce uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` +} + +func (a *account) exists() bool { + return a.Nonce > 0 || len(a.Code) > 0 || len(a.Storage) > 0 || (a.Balance != nil && a.Balance.Sign() != 0) +} + +type accountMarshaling struct { + Balance *hexutil.Big + Code hexutil.Bytes } type prestateTracer struct { + noopTracer env *vm.EVM - prestate prestate + pre state + post state create bool to common.Address gasLimit uint64 // Amount of gas bought for the whole tx - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption + config prestateTracerConfig + interrupt atomic.Bool // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption + created map[common.Address]bool + deleted map[common.Address]bool +} + +type prestateTracerConfig struct { + DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications } -func newPrestateTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { - // First callframe contains tx context info - // and is populated on start and end. - return &prestateTracer{prestate: prestate{}}, nil +func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + var config prestateTracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, err + } + } + return &prestateTracer{ + pre: state{}, + post: state{}, + config: config, + created: make(map[common.Address]bool), + deleted: make(map[common.Address]bool), + }, nil } // CaptureStart implements the EVMLogger interface to initialize the tracing operation. @@ -75,82 +107,168 @@ func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to commo t.lookupAccount(from) t.lookupAccount(to) + t.lookupAccount(env.Context.Coinbase) // The recipient balance includes the value transferred. - toBal := hexutil.MustDecodeBig(t.prestate[to].Balance) - toBal = new(big.Int).Sub(toBal, value) - t.prestate[to].Balance = hexutil.EncodeBig(toBal) + toBal := new(big.Int).Sub(t.pre[to].Balance, value) + t.pre[to].Balance = toBal // The sender balance is after reducing: value and gasLimit. // We need to re-add them to get the pre-tx balance. - fromBal := hexutil.MustDecodeBig(t.prestate[from].Balance) + fromBal := new(big.Int).Set(t.pre[from].Balance) gasPrice := env.TxContext.GasPrice consumedGas := new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(t.gasLimit)) fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas)) - t.prestate[from].Balance = hexutil.EncodeBig(fromBal) - t.prestate[from].Nonce-- + t.pre[from].Balance = fromBal + t.pre[from].Nonce-- + + if create && t.config.DiffMode { + t.created[to] = true + } } // CaptureEnd is called after the call finishes to finalize the tracing. -func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { +func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { + if t.config.DiffMode { + return + } + if t.create { - // Exclude created contract. - delete(t.prestate, t.to) + // Keep existing account prior to contract creation at that address + if s := t.pre[t.to]; s != nil && !s.exists() { + // Exclude newly created contract. + delete(t.pre, t.to) + } } } // CaptureState implements the EVMLogger interface to trace a single step of VM execution. func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if err != nil { + return + } + // Skip if tracing was interrupted + if t.interrupt.Load() { + return + } stack := scope.Stack stackData := stack.Data() stackLen := len(stackData) + caller := scope.Contract.Address() switch { case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE): slot := common.Hash(stackData[stackLen-1].Bytes32()) - t.lookupStorage(scope.Contract.Address(), slot) + t.lookupStorage(caller, slot) case stackLen >= 1 && (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT): addr := common.Address(stackData[stackLen-1].Bytes20()) t.lookupAccount(addr) + if op == vm.SELFDESTRUCT { + t.deleted[caller] = true + } case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE): addr := common.Address(stackData[stackLen-2].Bytes20()) t.lookupAccount(addr) case op == vm.CREATE: - addr := scope.Contract.Address() - nonce := t.env.StateDB.GetNonce(addr) - t.lookupAccount(crypto.CreateAddress(addr, nonce)) + nonce := t.env.StateDB.GetNonce(caller) + addr := crypto.CreateAddress(caller, nonce) + t.lookupAccount(addr) + t.created[addr] = true case stackLen >= 4 && op == vm.CREATE2: offset := stackData[stackLen-2] size := stackData[stackLen-3] - init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + init, err := tracers.GetMemoryCopyPadded(scope.Memory, int64(offset.Uint64()), int64(size.Uint64())) + if err != nil { + log.Warn("failed to copy CREATE2 input", "err", err, "tracer", "prestateTracer", "offset", offset, "size", size) + return + } inithash := crypto.Keccak256(init) salt := stackData[stackLen-4] - t.lookupAccount(crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), inithash)) + addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) + t.lookupAccount(addr) + t.created[addr] = true } } -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *prestateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { -} - -// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *prestateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { -} - func (t *prestateTracer) CaptureTxStart(gasLimit uint64) { t.gasLimit = gasLimit } -func (t *prestateTracer) CaptureTxEnd(restGas uint64) {} +func (t *prestateTracer) CaptureTxEnd(restGas uint64) { + if !t.config.DiffMode { + return + } -// CaptureExit is called when EVM exits a scope, even if the scope didn't -// execute any code. -func (t *prestateTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + for addr, state := range t.pre { + // The deleted account's state is pruned from `post` but kept in `pre` + if _, ok := t.deleted[addr]; ok { + continue + } + modified := false + postAccount := &account{Storage: make(map[common.Hash]common.Hash)} + newBalance := t.env.StateDB.GetBalance(addr) + newNonce := t.env.StateDB.GetNonce(addr) + newCode := t.env.StateDB.GetCode(addr) + + if newBalance.Cmp(t.pre[addr].Balance) != 0 { + modified = true + postAccount.Balance = newBalance + } + if newNonce != t.pre[addr].Nonce { + modified = true + postAccount.Nonce = newNonce + } + if !bytes.Equal(newCode, t.pre[addr].Code) { + modified = true + postAccount.Code = newCode + } + + for key, val := range state.Storage { + // don't include the empty slot + if val == (common.Hash{}) { + delete(t.pre[addr].Storage, key) + } + + newVal := t.env.StateDB.GetState(addr, key) + if val == newVal { + // Omit unchanged slots + delete(t.pre[addr].Storage, key) + } else { + modified = true + if newVal != (common.Hash{}) { + postAccount.Storage[key] = newVal + } + } + } + + if modified { + t.post[addr] = postAccount + } else { + // if state is not modified, then no need to include into the pre state + delete(t.pre, addr) + } + } + // the new created contracts' prestate were empty, so delete them + for a := range t.created { + // the created contract maybe exists in statedb before the creating tx + if s := t.pre[a]; s != nil && !s.exists() { + delete(t.pre, a) + } + } } // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *prestateTracer) GetResult() (json.RawMessage, error) { - res, err := json.Marshal(t.prestate) + var res []byte + var err error + if t.config.DiffMode { + res, err = json.Marshal(struct { + Post state `json:"post"` + Pre state `json:"pre"` + }{t.post, t.pre}) + } else { + res, err = json.Marshal(t.pre) + } if err != nil { return nil, err } @@ -160,19 +278,20 @@ func (t *prestateTracer) GetResult() (json.RawMessage, error) { // Stop terminates execution of the tracer at the first opportune moment. func (t *prestateTracer) Stop(err error) { t.reason = err - atomic.StoreUint32(&t.interrupt, 1) + t.interrupt.Store(true) } // lookupAccount fetches details of an account and adds it to the prestate // if it doesn't exist there. func (t *prestateTracer) lookupAccount(addr common.Address) { - if _, ok := t.prestate[addr]; ok { + if _, ok := t.pre[addr]; ok { return } - t.prestate[addr] = &account{ - Balance: bigToHex(t.env.StateDB.GetBalance(addr)), + + t.pre[addr] = &account{ + Balance: t.env.StateDB.GetBalance(addr), Nonce: t.env.StateDB.GetNonce(addr), - Code: bytesToHex(t.env.StateDB.GetCode(addr)), + Code: t.env.StateDB.GetCode(addr), Storage: make(map[common.Hash]common.Hash), } } @@ -181,8 +300,16 @@ func (t *prestateTracer) lookupAccount(addr common.Address) { // it to the prestate of the given contract. It assumes `lookupAccount` // has been performed on the contract before. func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) { - if _, ok := t.prestate[addr].Storage[key]; ok { + // lookupStorage assumes that lookupAccount has already been called. + // This assumption is violated for some historical blocks by the NativeAssetCall + // precompile. To fix this, we perform an extra call to lookupAccount here to ensure + // that the pre-state account is populated before attempting to read from the Storage + // map. When the invariant is maintained properly (since de-activation of the precompile), + // lookupAccount is a no-op. When the invariant is broken by the precompile, this avoids + // the panic and correctly captures the account prestate before the next opcode is executed. + t.lookupAccount(addr) + if _, ok := t.pre[addr].Storage[key]; ok { return } - t.prestate[addr].Storage[key] = t.env.StateDB.GetState(addr, key) + t.pre[addr].Storage[key] = t.env.StateDB.GetState(addr, key) } diff --git a/coreth/eth/tracers/native/revertreason.go b/coreth/eth/tracers/native/revertreason.go deleted file mode 100644 index 5056ae7c..00000000 --- a/coreth/eth/tracers/native/revertreason.go +++ /dev/null @@ -1,119 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package native - -import ( - "bytes" - "encoding/json" - "math/big" - "sync/atomic" - "time" - - "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/vmerrs" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -func init() { - register("revertReasonTracer", newRevertReasonTracer) -} - -var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] - -// revertReasonTracer is a go implementation of the Tracer interface which -// track the error message or revert reason return by the contract. -type revertReasonTracer struct { - env *vm.EVM - revertReason string // The revert reason return from the tx, if tx success, empty string return - interrupt uint32 // Atomic flag to signal execution interruption - reason error // Textual reason for the interruption -} - -// newRevertReasonTracer returns a new revert reason tracer. -func newRevertReasonTracer(_ *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { - return &revertReasonTracer{}, nil -} - -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *revertReasonTracer) CaptureStart(env *vm.EVM, _ common.Address, _ common.Address, _ bool, _ []byte, _ uint64, _ *big.Int) { - t.env = env -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *revertReasonTracer) CaptureEnd(output []byte, _ uint64, _ time.Duration, err error) { - if err != nil { - if err == vmerrs.ErrExecutionReverted && len(output) > 4 && bytes.Equal(output[:4], revertSelector) { - errMsg, _ := abi.UnpackRevert(output) - t.revertReason = err.Error() + ": " + errMsg - } else { - t.revertReason = err.Error() - } - } -} - -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *revertReasonTracer) CaptureState(_ uint64, _ vm.OpCode, _, _ uint64, _ *vm.ScopeContext, _ []byte, _ int, _ error) { -} - -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *revertReasonTracer) CaptureFault(_ uint64, _ vm.OpCode, _, _ uint64, _ *vm.ScopeContext, _ int, _ error) { -} - -// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *revertReasonTracer) CaptureEnter(_ vm.OpCode, _ common.Address, _ common.Address, _ []byte, _ uint64, _ *big.Int) { - // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { - t.env.Cancel() - return - } -} - -// CaptureExit is called when EVM exits a scope, even if the scope didn't -// execute any code. -func (t *revertReasonTracer) CaptureExit(_ []byte, _ uint64, _ error) {} - -func (t *revertReasonTracer) CaptureTxStart(_ uint64) {} - -func (t *revertReasonTracer) CaptureTxEnd(_ uint64) {} - -// GetResult returns an error message json object. -func (t *revertReasonTracer) GetResult() (json.RawMessage, error) { - res, err := json.Marshal(t.revertReason) - if err != nil { - return nil, err - } - return res, t.reason -} - -// Stop terminates execution of the tracer at the first opportune moment. -func (t *revertReasonTracer) Stop(err error) { - t.reason = err - atomic.StoreUint32(&t.interrupt, 1) -} diff --git a/coreth/eth/tracers/native/tracer.go b/coreth/eth/tracers/native/tracer.go deleted file mode 100644 index ed6f62df..00000000 --- a/coreth/eth/tracers/native/tracer.go +++ /dev/null @@ -1,89 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package native is a collection of tracers written in go. -// -// In order to add a native tracer and have it compiled into the binary, a new -// file needs to be added to this folder, containing an implementation of the -// `eth.tracers.Tracer` interface. -// -// Aside from implementing the tracer, it also needs to register itself, using the -// `register` method -- and this needs to be done in the package initialization. -// -// Example: -// -// func init() { -// register("noopTracerNative", newNoopTracer) -// } -package native - -import ( - "encoding/json" - "errors" - - "github.com/ava-labs/coreth/eth/tracers" -) - -// init registers itself this packages as a lookup for tracers. -func init() { - tracers.RegisterLookup(false, lookup) -} - -// ctorFn is the constructor signature of a native tracer. -type ctorFn = func(*tracers.Context, json.RawMessage) (tracers.Tracer, error) - -/* -ctors is a map of package-local tracer constructors. - -We cannot be certain about the order of init-functions within a package, -The go spec (https://golang.org/ref/spec#Package_initialization) says - -> To ensure reproducible initialization behavior, build systems -> are encouraged to present multiple files belonging to the same -> package in lexical file name order to a compiler. - -Hence, we cannot make the map in init, but must make it upon first use. -*/ -var ctors map[string]ctorFn - -// register is used by native tracers to register their presence. -func register(name string, ctor ctorFn) { - if ctors == nil { - ctors = make(map[string]ctorFn) - } - ctors[name] = ctor -} - -// lookup returns a tracer, if one can be matched to the given name. -func lookup(name string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { - if ctors == nil { - ctors = make(map[string]ctorFn) - } - if ctor, ok := ctors[name]; ok { - return ctor(ctx, cfg) - } - return nil, errors.New("no tracer found") -} diff --git a/coreth/eth/tracers/tracers.go b/coreth/eth/tracers/tracers.go index c8e1b5a2..7a754fa8 100644 --- a/coreth/eth/tracers/tracers.go +++ b/coreth/eth/tracers/tracers.go @@ -20,6 +20,8 @@ package tracers import ( "encoding/json" "errors" + "fmt" + "math/big" "github.com/ava-labs/coreth/core/vm" "github.com/ethereum/go-ethereum/common" @@ -28,9 +30,10 @@ import ( // Context contains some contextual infos for a transaction execution that is not // available from within the EVM object. type Context struct { - BlockHash common.Hash // Hash of the block the tx is contained within (zero if dangling tx or call) - TxIndex int // Index of the transaction within a block (zero if dangling tx or call) - TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) + BlockHash common.Hash // Hash of the block the tx is contained within (zero if dangling tx or call) + BlockNumber *big.Int // Number of the block the tx is contained within (zero if dangling tx or call) + TxIndex int // Index of the transaction within a block (zero if dangling tx or call) + TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) } // Tracer interface extends vm.EVMLogger and additionally @@ -42,31 +45,79 @@ type Tracer interface { Stop(err error) } -type lookupFunc func(string, *Context, json.RawMessage) (Tracer, error) +type ctorFn func(*Context, json.RawMessage) (Tracer, error) +type jsCtorFn func(string, *Context, json.RawMessage) (Tracer, error) -var ( - lookups []lookupFunc -) +type elem struct { + ctor ctorFn + isJS bool +} -// RegisterLookup registers a method as a lookup for tracers, meaning that -// users can invoke a named tracer through that lookup. If 'wildcard' is true, -// then the lookup will be placed last. This is typically meant for interpreted -// engines (js) which can evaluate dynamic user-supplied code. -func RegisterLookup(wildcard bool, lookup lookupFunc) { - if wildcard { - lookups = append(lookups, lookup) - } else { - lookups = append([]lookupFunc{lookup}, lookups...) - } +// DefaultDirectory is the collection of tracers bundled by default. +var DefaultDirectory = directory{elems: make(map[string]elem)} + +// directory provides functionality to lookup a tracer by name +// and a function to instantiate it. It falls back to a JS code evaluator +// if no tracer of the given name exists. +type directory struct { + elems map[string]elem + jsEval jsCtorFn +} + +// Register registers a method as a lookup for tracers, meaning that +// users can invoke a named tracer through that lookup. +func (d *directory) Register(name string, f ctorFn, isJS bool) { + d.elems[name] = elem{ctor: f, isJS: isJS} +} + +// RegisterJSEval registers a tracer that is able to parse +// dynamic user-provided JS code. +func (d *directory) RegisterJSEval(f jsCtorFn) { + d.jsEval = f } // New returns a new instance of a tracer, by iterating through the -// registered lookups. -func New(code string, ctx *Context, cfg json.RawMessage) (Tracer, error) { - for _, lookup := range lookups { - if tracer, err := lookup(code, ctx, cfg); err == nil { - return tracer, nil - } +// registered lookups. Name is either name of an existing tracer +// or an arbitrary JS code. +func (d *directory) New(name string, ctx *Context, cfg json.RawMessage) (Tracer, error) { + if elem, ok := d.elems[name]; ok { + return elem.ctor(ctx, cfg) + } + // Assume JS code + return d.jsEval(name, ctx, cfg) +} + +// IsJS will return true if the given tracer will evaluate +// JS code. Because code evaluation has high overhead, this +// info will be used in determining fast and slow code paths. +func (d *directory) IsJS(name string) bool { + if elem, ok := d.elems[name]; ok { + return elem.isJS + } + // JS eval will execute JS code + return true +} + +const ( + memoryPadLimit = 1024 * 1024 +) + +// GetMemoryCopyPadded returns offset + size as a new slice. +// It zero-pads the slice if it extends beyond memory bounds. +func GetMemoryCopyPadded(m *vm.Memory, offset, size int64) ([]byte, error) { + if offset < 0 || size < 0 { + return nil, errors.New("offset or size must not be negative") + } + if int(offset+size) < m.Len() { // slice fully inside memory + return m.GetCopy(offset, size), nil + } + paddingNeeded := int(offset+size) - m.Len() + if paddingNeeded > memoryPadLimit { + return nil, fmt.Errorf("reached limit for padding memory slice: %d", paddingNeeded) + } + cpy := make([]byte, size) + if overlap := int64(m.Len()) - offset; overlap > 0 { + copy(cpy, m.GetPtr(offset, overlap)) } - return nil, errors.New("tracer not found") + return cpy, nil } diff --git a/coreth/eth/tracers/tracers_test.go b/coreth/eth/tracers/tracers_test.go index 72a226fe..ff7200ac 100644 --- a/coreth/eth/tracers/tracers_test.go +++ b/coreth/eth/tracers/tracers_test.go @@ -66,7 +66,7 @@ func BenchmarkTransactionTrace(b *testing.B) { Transfer: core.Transfer, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(uint64(5)), - Time: new(big.Int).SetUint64(uint64(5)), + Time: 5, Difficulty: big.NewInt(0xffffffff), GasLimit: gas, BaseFee: big.NewInt(8), @@ -97,8 +97,8 @@ func BenchmarkTransactionTrace(b *testing.B) { //EnableMemory: false, //EnableReturnData: false, }) - evm := vm.NewEVM(context, txContext, statedb, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - msg, err := tx.AsMessage(signer, nil) + evm := vm.NewEVM(context, txContext, statedb, params.TestChainConfig, vm.Config{Tracer: tracer}) + msg, err := core.TransactionToMessage(tx, signer, nil) if err != nil { b.Fatalf("failed to prepare transaction for tracing: %v", err) } @@ -119,3 +119,41 @@ func BenchmarkTransactionTrace(b *testing.B) { tracer.Reset() } } + +func TestMemCopying(t *testing.T) { + for i, tc := range []struct { + memsize int64 + offset int64 + size int64 + wantErr string + wantSize int + }{ + {0, 0, 100, "", 100}, // Should pad up to 100 + {0, 100, 0, "", 0}, // No need to pad (0 size) + {100, 50, 100, "", 100}, // Should pad 100-150 + {100, 50, 5, "", 5}, // Wanted range fully within memory + {100, -50, 0, "offset or size must not be negative", 0}, // Errror + {0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Errror + {10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Errror + + } { + mem := vm.NewMemory() + mem.Resize(uint64(tc.memsize)) + cpy, err := GetMemoryCopyPadded(mem, tc.offset, tc.size) + if want := tc.wantErr; want != "" { + if err == nil { + t.Fatalf("test %d: want '%v' have no error", i, want) + } + if have := err.Error(); want != have { + t.Fatalf("test %d: want '%v' have '%v'", i, want, have) + } + continue + } + if err != nil { + t.Fatalf("test %d: unexpected error: %v", i, err) + } + if want, have := tc.wantSize, len(cpy); have != want { + t.Fatalf("test %d: want %v have %v", i, want, have) + } + } +} diff --git a/coreth/eth/tracers/tracker.go b/coreth/eth/tracers/tracker.go new file mode 100644 index 00000000..ead14cf3 --- /dev/null +++ b/coreth/eth/tracers/tracker.go @@ -0,0 +1,119 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracers + +import ( + "fmt" + "sync" +) + +// stateTracker is an auxiliary tool used to cache the release functions of all +// used trace states, and to determine whether the creation of trace state needs +// to be paused in case there are too many states waiting for tracing. +type stateTracker struct { + limit int // Maximum number of states allowed waiting for tracing + oldest uint64 // The number of the oldest state which is still using for trace + used []bool // List of flags indicating whether the trace state has been used up + releases []StateReleaseFunc // List of trace state release functions waiting to be called + cond *sync.Cond + lock *sync.RWMutex +} + +// newStateTracker initializes the tracker with provided state limits and +// the number of the first state that will be used. +func newStateTracker(limit int, oldest uint64) *stateTracker { + lock := new(sync.RWMutex) + return &stateTracker{ + limit: limit, + oldest: oldest, + used: make([]bool, limit), + cond: sync.NewCond(lock), + lock: lock, + } +} + +// releaseState marks the state specified by the number as released and caches +// the corresponding release functions internally. +func (t *stateTracker) releaseState(number uint64, release StateReleaseFunc) { + t.lock.Lock() + defer t.lock.Unlock() + + // Set the state as used, the corresponding flag is indexed by + // the distance between the specified state and the oldest state + // which is still using for trace. + t.used[int(number-t.oldest)] = true + + // If the oldest state is used up, update the oldest marker by moving + // it to the next state which is not used up. + if number == t.oldest { + var count int + for _, used := range t.used { + if !used { + break + } + count += 1 + } + t.oldest += uint64(count) + copy(t.used, t.used[count:]) + + // Clean up the array tail since they are useless now. + for i := t.limit - count; i < t.limit; i++ { + t.used[i] = false + } + // Fire the signal to all waiters that oldest marker is updated. + t.cond.Broadcast() + } + t.releases = append(t.releases, release) +} + +// callReleases invokes all cached release functions. +func (t *stateTracker) callReleases() { + t.lock.Lock() + defer t.lock.Unlock() + + for _, release := range t.releases { + release() + } + t.releases = t.releases[:0] +} + +// wait blocks until the accumulated trace states are less than the limit. +func (t *stateTracker) wait(number uint64) error { + t.lock.Lock() + defer t.lock.Unlock() + + for { + if number < t.oldest { + return fmt.Errorf("invalid state number %d head %d", number, t.oldest) + } + if number < t.oldest+uint64(t.limit) { + // number is now within limit, wait over + return nil + } + t.cond.Wait() + } +} diff --git a/coreth/eth/tracers/tracker_test.go b/coreth/eth/tracers/tracker_test.go new file mode 100644 index 00000000..cd0ce3a8 --- /dev/null +++ b/coreth/eth/tracers/tracker_test.go @@ -0,0 +1,181 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package tracers + +import ( + "reflect" + "testing" + "time" +) + +func TestTracker(t *testing.T) { + var cases = []struct { + limit int + calls []uint64 + expHead uint64 + }{ + // Release in order + { + limit: 3, + calls: []uint64{0, 1, 2}, + expHead: 3, + }, + { + limit: 3, + calls: []uint64{0, 1, 2, 3, 4, 5}, + expHead: 6, + }, + + // Release out of order + { + limit: 3, + calls: []uint64{1, 2, 0}, + expHead: 3, + }, + { + limit: 3, + calls: []uint64{1, 2, 0, 5, 4, 3}, + expHead: 6, + }, + } + for _, c := range cases { + tracker := newStateTracker(c.limit, 0) + for _, call := range c.calls { + tracker.releaseState(call, func() {}) + } + tracker.lock.RLock() + head := tracker.oldest + tracker.lock.RUnlock() + + if head != c.expHead { + t.Fatalf("Unexpected head want %d got %d", c.expHead, head) + } + } + + var calls = []struct { + number uint64 + expUsed []bool + expHead uint64 + }{ + // Release the first one, update the oldest flag + { + number: 0, + expUsed: []bool{false, false, false, false, false}, + expHead: 1, + }, + // Release the second one, oldest shouldn't be updated + { + number: 2, + expUsed: []bool{false, true, false, false, false}, + expHead: 1, + }, + // Release the forth one, oldest shouldn't be updated + { + number: 4, + expUsed: []bool{false, true, false, true, false}, + expHead: 1, + }, + // Release the first one, the first two should all be cleaned, + // and the remaining flags should all be left-shifted. + { + number: 1, + expUsed: []bool{false, true, false, false, false}, + expHead: 3, + }, + // Release the first one, the first two should all be cleaned + { + number: 3, + expUsed: []bool{false, false, false, false, false}, + expHead: 5, + }, + } + tracker := newStateTracker(5, 0) // limit = 5, oldest = 0 + for _, call := range calls { + tracker.releaseState(call.number, nil) + tracker.lock.RLock() + if !reflect.DeepEqual(tracker.used, call.expUsed) { + t.Fatalf("Unexpected used array") + } + if tracker.oldest != call.expHead { + t.Fatalf("Unexpected head") + } + tracker.lock.RUnlock() + } +} + +func TestTrackerWait(t *testing.T) { + var ( + tracker = newStateTracker(5, 0) // limit = 5, oldest = 0 + result = make(chan error, 1) + doCall = func(number uint64) { + go func() { + result <- tracker.wait(number) + }() + } + checkNoWait = func() { + select { + case <-result: + return + case <-time.NewTimer(time.Second).C: + t.Fatal("No signal fired") + } + } + checkWait = func() { + select { + case <-result: + t.Fatal("Unexpected signal") + case <-time.NewTimer(time.Millisecond * 100).C: + } + } + ) + // States [0, 5) should all be available + doCall(0) + checkNoWait() + + doCall(4) + checkNoWait() + + // State 5 is not available + doCall(5) + checkWait() + + // States [1, 6) are available + tracker.releaseState(0, nil) + checkNoWait() + + // States [1, 6) are available + doCall(7) + checkWait() + + // States [2, 7) are available + tracker.releaseState(1, nil) + checkWait() + + // States [3, 8) are available + tracker.releaseState(2, nil) + checkNoWait() +} diff --git a/coreth/ethclient/ethclient.go b/coreth/ethclient/ethclient.go index ee7a7b58..8804c7f9 100644 --- a/coreth/ethclient/ethclient.go +++ b/coreth/ethclient/ethclient.go @@ -67,11 +67,13 @@ var ( // Client defines interface for typed wrappers for the Ethereum RPC API. type Client interface { + Client() *rpc.Client Close() ChainID(context.Context) (*big.Int, error) BlockByHash(context.Context, common.Hash) (*types.Block, error) BlockByNumber(context.Context, *big.Int) (*types.Block, error) BlockNumber(context.Context) (uint64, error) + BlockReceipts(context.Context, rpc.BlockNumberOrHash) ([]*types.Receipt, error) HeaderByHash(context.Context, common.Hash) (*types.Header, error) HeaderByNumber(context.Context, *big.Int) (*types.Header, error) TransactionByHash(context.Context, common.Hash) (tx *types.Transaction, isPending bool, err error) @@ -114,6 +116,7 @@ func Dial(rawurl string) (Client, error) { return DialContext(context.Background(), rawurl) } +// DialContext connects a client to the given URL with context. func DialContext(ctx context.Context, rawurl string) (Client, error) { c, err := rpc.DialContext(ctx, rawurl) if err != nil { @@ -127,10 +130,16 @@ func NewClient(c *rpc.Client) Client { return &client{c} } +// Close closes the underlying RPC connection. func (ec *client) Close() { ec.c.Close() } +// Client gets the underlying RPC client. +func (ec *client) Client() *rpc.Client { + return ec.c +} + // Blockchain Access // ChainID retrieves the current chain ID for transaction replay protection. @@ -167,6 +176,16 @@ func (ec *client) BlockNumber(ctx context.Context) (uint64, error) { return uint64(result), err } +// BlockReceipts returns the receipts of a given block number or hash. +func (ec *client) BlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + var r []*types.Receipt + err := ec.c.CallContext(ctx, &r, "eth_getBlockReceipts", blockNrOrHash.String()) + if err == nil && r == nil { + return nil, interfaces.NotFound + } + return r, err +} + type rpcBlock struct { Hash common.Hash `json:"hash"` Transactions []rpcTransaction `json:"transactions"` @@ -180,30 +199,34 @@ func (ec *client) getBlock(ctx context.Context, method string, args ...interface err := ec.c.CallContext(ctx, &raw, method, args...) if err != nil { return nil, err - } else if len(raw) == 0 { - return nil, interfaces.NotFound } + // Decode header and transactions. var head *types.Header - var body rpcBlock if err := json.Unmarshal(raw, &head); err != nil { return nil, err } + // When the block is not found, the API returns JSON null. + if head == nil { + return nil, interfaces.NotFound + } + + var body rpcBlock if err := json.Unmarshal(raw, &body); err != nil { return nil, err } // Quick-verify transaction and uncle lists. This mostly helps with debugging the server. if head.UncleHash == types.EmptyUncleHash && len(body.UncleHashes) > 0 { - return nil, fmt.Errorf("server returned non-empty uncle list but block header indicates no uncles") + return nil, errors.New("server returned non-empty uncle list but block header indicates no uncles") } if head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 { - return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles") + return nil, errors.New("server returned empty uncle list but block header indicates uncles") } - if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 { - return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions") + if head.TxHash == types.EmptyTxsHash && len(body.Transactions) > 0 { + return nil, errors.New("server returned non-empty transaction list but block header indicates no transactions") } - if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 { - return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions") + if head.TxHash != types.EmptyTxsHash && len(body.Transactions) == 0 { + return nil, errors.New("server returned empty transaction list but block header indicates transactions") } // Load uncles because they are not included in the block response. var uncles []*types.Header @@ -237,7 +260,7 @@ func (ec *client) getBlock(ctx context.Context, method string, args ...interface } txs[i] = tx.tx } - return types.NewBlockWithHeader(head).WithBody(txs, uncles, body.Version, (*[]byte)(body.BlockExtraData)), nil + return types.NewBlockWithHeader(head).WithBody(txs, uncles).WithExtData(body.Version, (*[]byte)(body.BlockExtraData)), nil } // HeaderByHash returns the block header with the given hash. @@ -288,7 +311,7 @@ func (ec *client) TransactionByHash(ctx context.Context, hash common.Hash) (tx * } else if json == nil { return nil, false, interfaces.NotFound } else if _, r, _ := json.tx.RawSignatureValues(); r == nil { - return nil, false, fmt.Errorf("server returned transaction without signature") + return nil, false, errors.New("server returned transaction without signature") } if json.From != nil && json.BlockHash != nil { setSenderFromServer(json.tx, *json.From, *json.BlockHash) @@ -340,7 +363,7 @@ func (ec *client) TransactionInBlock(ctx context.Context, blockHash common.Hash, if json == nil { return nil, interfaces.NotFound } else if _, r, _ := json.tx.RawSignatureValues(); r == nil { - return nil, fmt.Errorf("server returned transaction without signature") + return nil, errors.New("server returned transaction without signature") } if json.From != nil && json.BlockHash != nil { setSenderFromServer(json.tx, *json.From, *json.BlockHash) @@ -381,23 +404,44 @@ func (ec *client) SyncProgress(ctx context.Context) error { // SubscribeNewAcceptedTransactions subscribes to notifications about the accepted transaction hashes on the given channel. func (ec *client) SubscribeNewAcceptedTransactions(ctx context.Context, ch chan<- *common.Hash) (interfaces.Subscription, error) { - return ec.c.EthSubscribe(ctx, ch, "newAcceptedTransactions") + sub, err := ec.c.EthSubscribe(ctx, ch, "newAcceptedTransactions") + if err != nil { + // Defensively prefer returning nil interface explicitly on error-path, instead + // of letting default golang behavior wrap it with non-nil interface that stores + // nil concrete type value. + return nil, err + } + return sub, nil } -// SubscribeNewAcceptedTransactions subscribes to notifications about the accepted transaction hashes on the given channel. +// SubscribeNewPendingTransactions subscribes to notifications about the pending transaction hashes on the given channel. func (ec *client) SubscribeNewPendingTransactions(ctx context.Context, ch chan<- *common.Hash) (interfaces.Subscription, error) { - return ec.c.EthSubscribe(ctx, ch, "newPendingTransactions") + sub, err := ec.c.EthSubscribe(ctx, ch, "newPendingTransactions") + if err != nil { + // Defensively prefer returning nil interface explicitly on error-path, instead + // of letting default golang behavior wrap it with non-nil interface that stores + // nil concrete type value. + return nil, err + } + return sub, nil } // SubscribeNewHead subscribes to notifications about the current blockchain head // on the given channel. func (ec *client) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (interfaces.Subscription, error) { - return ec.c.EthSubscribe(ctx, ch, "newHeads") + sub, err := ec.c.EthSubscribe(ctx, ch, "newHeads") + if err != nil { + // Defensively prefer returning nil interface explicitly on error-path, instead + // of letting default golang behavior wrap it with non-nil interface that stores + // nil concrete type value. + return nil, err + } + return sub, nil } // State Access -// NetworkID returns the network ID (also known as the chain ID) for this chain. +// NetworkID returns the network ID for this client. func (ec *client) NetworkID(ctx context.Context) (*big.Int, error) { version := new(big.Int) var ver string @@ -469,7 +513,14 @@ func (ec *client) SubscribeFilterLogs(ctx context.Context, q interfaces.FilterQu if err != nil { return nil, err } - return ec.c.EthSubscribe(ctx, ch, "logs", arg) + sub, err := ec.c.EthSubscribe(ctx, ch, "logs", arg) + if err != nil { + // Defensively prefer returning nil interface explicitly on error-path, instead + // of letting default golang behavior wrap it with non-nil interface that stores + // nil concrete type value. + return nil, err + } + return sub, nil } func toFilterArg(q interfaces.FilterQuery) (interface{}, error) { @@ -480,7 +531,7 @@ func toFilterArg(q interfaces.FilterQuery) (interface{}, error) { if q.BlockHash != nil { arg["blockHash"] = *q.BlockHash if q.FromBlock != nil || q.ToBlock != nil { - return nil, fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock") + return nil, errors.New("cannot specify both BlockHash and FromBlock/ToBlock") } } else { if q.FromBlock == nil { @@ -628,21 +679,18 @@ func (ec *client) SendTransaction(ctx context.Context, tx *types.Transaction) er } func ToBlockNumArg(number *big.Int) string { - // The Ethereum implementation uses a different mapping from - // negative numbers to special strings (latest, pending) then is - // used on its server side. See rpc/types.go for the comparison. - // In Coreth, latest, pending, and accepted are all treated the same - // therefore, if [number] is nil or a negative number in [-4, -1] - // we want the latest accepted block if number == nil { return "latest" } - low := big.NewInt(-4) - high := big.NewInt(-1) - if number.Cmp(low) >= 0 && number.Cmp(high) <= 0 { - return "latest" + if number.Sign() >= 0 { + return hexutil.EncodeBig(number) + } + // It's negative. + if number.IsInt64() { + return rpc.BlockNumber(number.Int64()).String() } - return hexutil.EncodeBig(number) + // It's negative and large, which is invalid. + return fmt.Sprintf("", number) } func toCallArg(msg interfaces.CallMsg) interface{} { diff --git a/coreth/ethdb/batch.go b/coreth/ethdb/batch.go deleted file mode 100644 index be4f52d8..00000000 --- a/coreth/ethdb/batch.go +++ /dev/null @@ -1,84 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethdb - -// IdealBatchSize defines the size of the data batches should ideally add in one -// write. -const IdealBatchSize = 100 * 1024 - -// Batch is a write-only database that commits changes to its host database -// when Write is called. A batch cannot be used concurrently. -type Batch interface { - KeyValueWriter - - // ValueSize retrieves the amount of data queued up for writing. - ValueSize() int - - // Write flushes any accumulated data to disk. - Write() error - - // Reset resets the batch for reuse. - Reset() - - // Replay replays the batch contents. - Replay(w KeyValueWriter) error -} - -// Batcher wraps the NewBatch method of a backing data store. -type Batcher interface { - // NewBatch creates a write-only database that buffers changes to its host db - // until a final write is called. - NewBatch() Batch - - // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. - NewBatchWithSize(size int) Batch -} - -// HookedBatch wraps an arbitrary batch where each operation may be hooked into -// to monitor from black box code. -type HookedBatch struct { - Batch - - OnPut func(key []byte, value []byte) // Callback if a key is inserted - OnDelete func(key []byte) // Callback if a key is deleted -} - -// Put inserts the given value into the key-value data store. -func (b HookedBatch) Put(key []byte, value []byte) error { - if b.OnPut != nil { - b.OnPut(key, value) - } - return b.Batch.Put(key, value) -} - -// Delete removes the key from the key-value data store. -func (b HookedBatch) Delete(key []byte) error { - if b.OnDelete != nil { - b.OnDelete(key) - } - return b.Batch.Delete(key) -} diff --git a/coreth/ethdb/database.go b/coreth/ethdb/database.go deleted file mode 100644 index 2cdac31c..00000000 --- a/coreth/ethdb/database.go +++ /dev/null @@ -1,100 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package ethdb defines the interfaces for an Ethereum data store. -package ethdb - -import "io" - -// KeyValueReader wraps the Has and Get method of a backing data store. -type KeyValueReader interface { - // Has retrieves if a key is present in the key-value data store. - Has(key []byte) (bool, error) - - // Get retrieves the given key if it's present in the key-value data store. - Get(key []byte) ([]byte, error) -} - -// KeyValueWriter wraps the Put method of a backing data store. -type KeyValueWriter interface { - // Put inserts the given value into the key-value data store. - Put(key []byte, value []byte) error - - // Delete removes the key from the key-value data store. - Delete(key []byte) error -} - -// Stater wraps the Stat method of a backing data store. -type Stater interface { - // Stat returns a particular internal stat of the database. - Stat(property string) (string, error) -} - -// Compacter wraps the Compact method of a backing data store. -type Compacter interface { - // Compact flattens the underlying data store for the given key range. In essence, - // deleted and overwritten versions are discarded, and the data is rearranged to - // reduce the cost of operations needed to access them. - // - // A nil start is treated as a key before all keys in the data store; a nil limit - // is treated as a key after all keys in the data store. If both is nil then it - // will compact entire data store. - Compact(start []byte, limit []byte) error -} - -// KeyValueStore contains all the methods required to allow handling different -// key-value data stores backing the high level database. -type KeyValueStore interface { - KeyValueReader - KeyValueWriter - Batcher - Iteratee - Stater - Compacter - io.Closer -} - -// Reader contains the methods required to read data from key-value storage. -type Reader interface { - KeyValueReader -} - -// Writer contains the methods required to write data to key-value storage. -type Writer interface { - KeyValueWriter -} - -// Database contains all the methods required by the high level database to not -// only access the key-value data store but also the chain freezer. -type Database interface { - Reader - Writer - Batcher - Iteratee - Stater - Compacter - io.Closer -} diff --git a/coreth/ethdb/dbtest/testsuite.go b/coreth/ethdb/dbtest/testsuite.go deleted file mode 100644 index 14a34fc9..00000000 --- a/coreth/ethdb/dbtest/testsuite.go +++ /dev/null @@ -1,335 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package dbtest - -import ( - "bytes" - "reflect" - "sort" - "testing" - - "github.com/ava-labs/coreth/ethdb" -) - -// TestDatabaseSuite runs a suite of tests against a KeyValueStore database -// implementation. -func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { - t.Run("Iterator", func(t *testing.T) { - tests := []struct { - content map[string]string - prefix string - start string - order []string - }{ - // Empty databases should be iterable - {map[string]string{}, "", "", nil}, - {map[string]string{}, "non-existent-prefix", "", nil}, - - // Single-item databases should be iterable - {map[string]string{"key": "val"}, "", "", []string{"key"}}, - {map[string]string{"key": "val"}, "k", "", []string{"key"}}, - {map[string]string{"key": "val"}, "l", "", nil}, - - // Multi-item databases should be fully iterable - { - map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, - "", "", - []string{"k1", "k2", "k3", "k4", "k5"}, - }, - { - map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, - "k", "", - []string{"k1", "k2", "k3", "k4", "k5"}, - }, - { - map[string]string{"k1": "v1", "k5": "v5", "k2": "v2", "k4": "v4", "k3": "v3"}, - "l", "", - nil, - }, - // Multi-item databases should be prefix-iterable - { - map[string]string{ - "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", - "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", - }, - "ka", "", - []string{"ka1", "ka2", "ka3", "ka4", "ka5"}, - }, - { - map[string]string{ - "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", - "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", - }, - "kc", "", - nil, - }, - // Multi-item databases should be prefix-iterable with start position - { - map[string]string{ - "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", - "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", - }, - "ka", "3", - []string{"ka3", "ka4", "ka5"}, - }, - { - map[string]string{ - "ka1": "va1", "ka5": "va5", "ka2": "va2", "ka4": "va4", "ka3": "va3", - "kb1": "vb1", "kb5": "vb5", "kb2": "vb2", "kb4": "vb4", "kb3": "vb3", - }, - "ka", "8", - nil, - }, - } - for i, tt := range tests { - // Create the key-value data store - db := New() - for key, val := range tt.content { - if err := db.Put([]byte(key), []byte(val)); err != nil { - t.Fatalf("test %d: failed to insert item %s:%s into database: %v", i, key, val, err) - } - } - // Iterate over the database with the given configs and verify the results - it, idx := db.NewIterator([]byte(tt.prefix), []byte(tt.start)), 0 - for it.Next() { - if len(tt.order) <= idx { - t.Errorf("test %d: prefix=%q more items than expected: checking idx=%d (key %q), expecting len=%d", i, tt.prefix, idx, it.Key(), len(tt.order)) - break - } - if !bytes.Equal(it.Key(), []byte(tt.order[idx])) { - t.Errorf("test %d: item %d: key mismatch: have %s, want %s", i, idx, string(it.Key()), tt.order[idx]) - } - if !bytes.Equal(it.Value(), []byte(tt.content[tt.order[idx]])) { - t.Errorf("test %d: item %d: value mismatch: have %s, want %s", i, idx, string(it.Value()), tt.content[tt.order[idx]]) - } - idx++ - } - if err := it.Error(); err != nil { - t.Errorf("test %d: iteration failed: %v", i, err) - } - if idx != len(tt.order) { - t.Errorf("test %d: iteration terminated prematurely: have %d, want %d", i, idx, len(tt.order)) - } - db.Close() - } - }) - - t.Run("IteratorWith", func(t *testing.T) { - db := New() - defer db.Close() - - keys := []string{"1", "2", "3", "4", "6", "10", "11", "12", "20", "21", "22"} - sort.Strings(keys) // 1, 10, 11, etc - - for _, k := range keys { - if err := db.Put([]byte(k), nil); err != nil { - t.Fatal(err) - } - } - - { - it := db.NewIterator(nil, nil) - got, want := iterateKeys(it), keys - if err := it.Error(); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Iterator: got: %s; want: %s", got, want) - } - } - - { - it := db.NewIterator([]byte("1"), nil) - got, want := iterateKeys(it), []string{"1", "10", "11", "12"} - if err := it.Error(); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("IteratorWith(1,nil): got: %s; want: %s", got, want) - } - } - - { - it := db.NewIterator([]byte("5"), nil) - got, want := iterateKeys(it), []string{} - if err := it.Error(); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("IteratorWith(5,nil): got: %s; want: %s", got, want) - } - } - - { - it := db.NewIterator(nil, []byte("2")) - got, want := iterateKeys(it), []string{"2", "20", "21", "22", "3", "4", "6"} - if err := it.Error(); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("IteratorWith(nil,2): got: %s; want: %s", got, want) - } - } - - { - it := db.NewIterator(nil, []byte("5")) - got, want := iterateKeys(it), []string{"6"} - if err := it.Error(); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("IteratorWith(nil,5): got: %s; want: %s", got, want) - } - } - }) - - t.Run("KeyValueOperations", func(t *testing.T) { - db := New() - defer db.Close() - - key := []byte("foo") - - if got, err := db.Has(key); err != nil { - t.Error(err) - } else if got { - t.Errorf("wrong value: %t", got) - } - - value := []byte("hello world") - if err := db.Put(key, value); err != nil { - t.Error(err) - } - - if got, err := db.Has(key); err != nil { - t.Error(err) - } else if !got { - t.Errorf("wrong value: %t", got) - } - - if got, err := db.Get(key); err != nil { - t.Error(err) - } else if !bytes.Equal(got, value) { - t.Errorf("wrong value: %q", got) - } - - if err := db.Delete(key); err != nil { - t.Error(err) - } - - if got, err := db.Has(key); err != nil { - t.Error(err) - } else if got { - t.Errorf("wrong value: %t", got) - } - }) - - t.Run("Batch", func(t *testing.T) { - db := New() - defer db.Close() - - b := db.NewBatch() - for _, k := range []string{"1", "2", "3", "4"} { - if err := b.Put([]byte(k), nil); err != nil { - t.Fatal(err) - } - } - - if has, err := db.Has([]byte("1")); err != nil { - t.Fatal(err) - } else if has { - t.Error("db contains element before batch write") - } - - if err := b.Write(); err != nil { - t.Fatal(err) - } - - { - it := db.NewIterator(nil, nil) - if got, want := iterateKeys(it), []string{"1", "2", "3", "4"}; !reflect.DeepEqual(got, want) { - t.Errorf("got: %s; want: %s", got, want) - } - } - - b.Reset() - - // Mix writes and deletes in batch - b.Put([]byte("5"), nil) - b.Delete([]byte("1")) - b.Put([]byte("6"), nil) - b.Delete([]byte("3")) - b.Put([]byte("3"), nil) - - if err := b.Write(); err != nil { - t.Fatal(err) - } - - { - it := db.NewIterator(nil, nil) - if got, want := iterateKeys(it), []string{"2", "3", "4", "5", "6"}; !reflect.DeepEqual(got, want) { - t.Errorf("got: %s; want: %s", got, want) - } - } - }) - - t.Run("BatchReplay", func(t *testing.T) { - db := New() - defer db.Close() - - want := []string{"1", "2", "3", "4"} - b := db.NewBatch() - for _, k := range want { - if err := b.Put([]byte(k), nil); err != nil { - t.Fatal(err) - } - } - - b2 := db.NewBatch() - if err := b.Replay(b2); err != nil { - t.Fatal(err) - } - - if err := b2.Replay(db); err != nil { - t.Fatal(err) - } - - it := db.NewIterator(nil, nil) - if got := iterateKeys(it); !reflect.DeepEqual(got, want) { - t.Errorf("got: %s; want: %s", got, want) - } - }) -} - -func iterateKeys(it ethdb.Iterator) []string { - keys := []string{} - for it.Next() { - keys = append(keys, string(it.Key())) - } - sort.Strings(keys) - it.Release() - return keys -} diff --git a/coreth/ethdb/iterator.go b/coreth/ethdb/iterator.go deleted file mode 100644 index 54b10838..00000000 --- a/coreth/ethdb/iterator.go +++ /dev/null @@ -1,71 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethdb - -// Iterator iterates over a database's key/value pairs in ascending key order. -// -// When it encounters an error any seek will return false and will yield no key/ -// value pairs. The error can be queried by calling the Error method. Calling -// Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read an -// iterator until exhaustion. An iterator is not safe for concurrent use, but it -// is safe to use multiple iterators concurrently. -type Iterator interface { - // Next moves the iterator to the next key/value pair. It returns whether the - // iterator is exhausted. - Next() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error - - // Key returns the key of the current key/value pair, or nil if done. The caller - // should not modify the contents of the returned slice, and its contents may - // change on the next call to Next. - Key() []byte - - // Value returns the value of the current key/value pair, or nil if done. The - // caller should not modify the contents of the returned slice, and its contents - // may change on the next call to Next. - Value() []byte - - // Release releases associated resources. Release should always succeed and can - // be called multiple times without causing error. - Release() -} - -// Iteratee wraps the NewIterator methods of a backing data store. -type Iteratee interface { - // NewIterator creates a binary-alphabetical iterator over a subset - // of database content with a particular key prefix, starting at a particular - // initial key (or after, if it does not exist). - // - // Note: This method assumes that the prefix is NOT part of the start, so there's - // no need for the caller to prepend the prefix to the start - NewIterator(prefix []byte, start []byte) Iterator -} diff --git a/coreth/ethdb/leveldb/leveldb.go b/coreth/ethdb/leveldb/leveldb.go deleted file mode 100644 index 7876ae60..00000000 --- a/coreth/ethdb/leveldb/leveldb.go +++ /dev/null @@ -1,540 +0,0 @@ -// (c) 2021-2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !js -// +build !js - -// Package leveldb implements the key-value database layer based on LevelDB. -package leveldb - -import ( - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/metrics" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const ( - // degradationWarnInterval specifies how often warning should be printed if the - // leveldb database cannot keep up with requested writes. - degradationWarnInterval = time.Minute - - // minCache is the minimum amount of memory in megabytes to allocate to leveldb - // read and write caching, split half and half. - minCache = 16 - - // minHandles is the minimum number of files handles to allocate to the open - // database files. - minHandles = 16 - - // metricsGatheringInterval specifies the interval to retrieve leveldb database - // compaction, io and pause stats to report to the user. - metricsGatheringInterval = 3 * time.Second -) - -// Database is a persistent key-value store. Apart from basic data storage -// functionality it also supports batch writes and iterating over the keyspace in -// binary-alphabetical order. -type Database struct { - fn string // filename for reporting - db *leveldb.DB // LevelDB instance - - compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction - compReadMeter metrics.Meter // Meter for measuring the data read during compaction - compWriteMeter metrics.Meter // Meter for measuring the data written during compaction - writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction - writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction - diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database - diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read - diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written - memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction - level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 - nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level - seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt - - quitLock sync.Mutex // Mutex protecting the quit channel access - quitChan chan chan error // Quit channel to stop the metrics collection before closing the database - - log log.Logger // Contextual logger tracking the database path -} - -// New returns a wrapped LevelDB object. The namespace is the prefix that the -// metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { - return NewCustom(file, namespace, func(options *opt.Options) { - // Ensure we have some minimal caching and file guarantees - if cache < minCache { - cache = minCache - } - if handles < minHandles { - handles = minHandles - } - // Set default options - options.OpenFilesCacheCapacity = handles - options.BlockCacheCapacity = cache / 2 * opt.MiB - options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally - if readonly { - options.ReadOnly = true - } - }) -} - -// NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the -// metrics reporting should use for surfacing internal stats. -// The customize function allows the caller to modify the leveldb options. -func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) { - options := configureOptions(customize) - logger := log.New("database", file) - usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 - logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} - if options.ReadOnly { - logCtx = append(logCtx, "readonly", "true") - } - logger.Info("Allocated cache and file handles", logCtx...) - - // Open the db and recover any potential corruptions - db, err := leveldb.OpenFile(file, options) - if _, corrupted := err.(*errors.ErrCorrupted); corrupted { - db, err = leveldb.RecoverFile(file, nil) - } - if err != nil { - return nil, err - } - // Assemble the wrapper with all the registered metrics - ldb := &Database{ - fn: file, - db: db, - log: logger, - quitChan: make(chan chan error), - } - ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil) - ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil) - ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil) - ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil) - ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil) - ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil) - ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil) - ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil) - ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil) - ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil) - ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil) - ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil) - - // Start up the metrics gathering and return - go ldb.meter(metricsGatheringInterval) - return ldb, nil -} - -// configureOptions sets some default options, then runs the provided setter. -func configureOptions(customizeFn func(*opt.Options)) *opt.Options { - // Set default options - options := &opt.Options{ - Filter: filter.NewBloomFilter(10), - DisableSeeksCompaction: true, - } - // Allow caller to make custom modifications to the options - if customizeFn != nil { - customizeFn(options) - } - return options -} - -// Close stops the metrics collection, flushes any pending data to disk and closes -// all io accesses to the underlying key-value store. -func (db *Database) Close() error { - db.quitLock.Lock() - defer db.quitLock.Unlock() - - if db.quitChan != nil { - errc := make(chan error) - db.quitChan <- errc - if err := <-errc; err != nil { - db.log.Error("Metrics collection failed", "err", err) - } - db.quitChan = nil - } - return db.db.Close() -} - -// Has retrieves if a key is present in the key-value store. -func (db *Database) Has(key []byte) (bool, error) { - return db.db.Has(key, nil) -} - -// Get retrieves the given key if it's present in the key-value store. -func (db *Database) Get(key []byte) ([]byte, error) { - dat, err := db.db.Get(key, nil) - if err != nil { - return nil, err - } - return dat, nil -} - -// Put inserts the given value into the key-value store. -func (db *Database) Put(key []byte, value []byte) error { - return db.db.Put(key, value, nil) -} - -// Delete removes the key from the key-value store. -func (db *Database) Delete(key []byte) error { - return db.db.Delete(key, nil) -} - -// NewBatch creates a write-only key-value store that buffers changes to its host -// database until a final write is called. -func (db *Database) NewBatch() ethdb.Batch { - return &batch{ - db: db.db, - b: new(leveldb.Batch), - } -} - -// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -func (db *Database) NewBatchWithSize(size int) ethdb.Batch { - return &batch{ - db: db.db, - b: leveldb.MakeBatch(size), - } -} - -// NewIterator creates a binary-alphabetical iterator over a subset -// of database content with a particular key prefix, starting at a particular -// initial key (or after, if it does not exist). -func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return db.db.NewIterator(bytesPrefixRange(prefix, start), nil) -} - -// Stat returns a particular internal stat of the database. -func (db *Database) Stat(property string) (string, error) { - return db.db.GetProperty(property) -} - -// Compact flattens the underlying data store for the given key range. In essence, -// deleted and overwritten versions are discarded, and the data is rearranged to -// reduce the cost of operations needed to access them. -// -// A nil start is treated as a key before all keys in the data store; a nil limit -// is treated as a key after all keys in the data store. If both is nil then it -// will compact entire data store. -func (db *Database) Compact(start []byte, limit []byte) error { - return db.db.CompactRange(util.Range{Start: start, Limit: limit}) -} - -// Path returns the path to the database directory. -func (db *Database) Path() string { - return db.fn -} - -// meter periodically retrieves internal leveldb counters and reports them to -// the metrics subsystem. -// -// This is how a LevelDB stats table looks like (currently): -// -// Compactions -// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) -// -------+------------+---------------+---------------+---------------+--------------- -// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 -// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 -// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 -// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 -// -// This is how the write delay look like (currently): -// DelayN:5 Delay:406.604657ms Paused: false -// -// This is how the iostats look like (currently): -// Read(MB):3895.04860 Write(MB):3654.64712 -func (db *Database) meter(refresh time.Duration) { - // Create the counters to store current and previous compaction values - compactions := make([][]float64, 2) - for i := 0; i < 2; i++ { - compactions[i] = make([]float64, 4) - } - // Create storage for iostats. - var iostats [2]float64 - - // Create storage and warning log tracer for write delay. - var ( - delaystats [2]int64 - lastWritePaused time.Time - ) - - var ( - errc chan error - merr error - ) - - timer := time.NewTimer(refresh) - defer timer.Stop() - - // Iterate ad infinitum and collect the stats - for i := 1; errc == nil && merr == nil; i++ { - // Retrieve the database stats - stats, err := db.db.GetProperty("leveldb.stats") - if err != nil { - db.log.Error("Failed to read database stats", "err", err) - merr = err - continue - } - // Find the compaction table, skip the header - lines := strings.Split(stats, "\n") - for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" { - lines = lines[1:] - } - if len(lines) <= 3 { - db.log.Error("Compaction leveldbTable not found") - merr = errors.New("compaction leveldbTable not found") - continue - } - lines = lines[3:] - - // Iterate over all the leveldbTable rows, and accumulate the entries - for j := 0; j < len(compactions[i%2]); j++ { - compactions[i%2][j] = 0 - } - for _, line := range lines { - parts := strings.Split(line, "|") - if len(parts) != 6 { - break - } - for idx, counter := range parts[2:] { - value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) - if err != nil { - db.log.Error("Compaction entry parsing failed", "err", err) - merr = err - continue - } - compactions[i%2][idx] += value - } - } - // Update all the requested meters - if db.diskSizeGauge != nil { - db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024)) - } - if db.compTimeMeter != nil { - db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000)) - } - if db.compReadMeter != nil { - db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024)) - } - if db.compWriteMeter != nil { - db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024)) - } - // Retrieve the write delay statistic - writedelay, err := db.db.GetProperty("leveldb.writedelay") - if err != nil { - db.log.Error("Failed to read database write delay statistic", "err", err) - merr = err - continue - } - var ( - delayN int64 - delayDuration string - duration time.Duration - paused bool - ) - if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { - db.log.Error("Write delay statistic not found") - merr = err - continue - } - duration, err = time.ParseDuration(delayDuration) - if err != nil { - db.log.Error("Failed to parse delay duration", "err", err) - merr = err - continue - } - if db.writeDelayNMeter != nil { - db.writeDelayNMeter.Mark(delayN - delaystats[0]) - } - if db.writeDelayMeter != nil { - db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1]) - } - // If a warning that db is performing compaction has been displayed, any subsequent - // warnings will be withheld for one minute not to overwhelm the user. - if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && - time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { - db.log.Warn("Database compacting, degraded performance") - lastWritePaused = time.Now() - } - delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() - - // Retrieve the database iostats. - ioStats, err := db.db.GetProperty("leveldb.iostats") - if err != nil { - db.log.Error("Failed to read database iostats", "err", err) - merr = err - continue - } - var nRead, nWrite float64 - parts := strings.Split(ioStats, " ") - if len(parts) < 2 { - db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) - merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) - continue - } - if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { - db.log.Error("Bad syntax of read entry", "entry", parts[0]) - merr = err - continue - } - if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { - db.log.Error("Bad syntax of write entry", "entry", parts[1]) - merr = err - continue - } - if db.diskReadMeter != nil { - db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) - } - if db.diskWriteMeter != nil { - db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) - } - iostats[0], iostats[1] = nRead, nWrite - - compCount, err := db.db.GetProperty("leveldb.compcount") - if err != nil { - db.log.Error("Failed to read database iostats", "err", err) - merr = err - continue - } - - var ( - memComp uint32 - level0Comp uint32 - nonLevel0Comp uint32 - seekComp uint32 - ) - if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil { - db.log.Error("Compaction count statistic not found") - merr = err - continue - } - db.memCompGauge.Update(int64(memComp)) - db.level0CompGauge.Update(int64(level0Comp)) - db.nonlevel0CompGauge.Update(int64(nonLevel0Comp)) - db.seekCompGauge.Update(int64(seekComp)) - - // Sleep a bit, then repeat the stats collection - select { - case errc = <-db.quitChan: - // Quit requesting, stop hammering the database - case <-timer.C: - timer.Reset(refresh) - // Timeout, gather a new set of stats - } - } - - if errc == nil { - errc = <-db.quitChan - } - errc <- merr -} - -// batch is a write-only leveldb batch that commits changes to its host database -// when Write is called. A batch cannot be used concurrently. -type batch struct { - db *leveldb.DB - b *leveldb.Batch - size int -} - -// Put inserts the given value into the batch for later committing. -func (b *batch) Put(key, value []byte) error { - b.b.Put(key, value) - b.size += len(value) - return nil -} - -// Delete inserts the a key removal into the batch for later committing. -func (b *batch) Delete(key []byte) error { - b.b.Delete(key) - b.size += len(key) - return nil -} - -// ValueSize retrieves the amount of data queued up for writing. -func (b *batch) ValueSize() int { - return b.size -} - -// Write flushes any accumulated data to disk. -func (b *batch) Write() error { - return b.db.Write(b.b, nil) -} - -// Reset resets the batch for reuse. -func (b *batch) Reset() { - b.b.Reset() - b.size = 0 -} - -// Replay replays the batch contents. -func (b *batch) Replay(w ethdb.KeyValueWriter) error { - return b.b.Replay(&replayer{writer: w}) -} - -// replayer is a small wrapper to implement the correct replay methods. -type replayer struct { - writer ethdb.KeyValueWriter - failure error -} - -// Put inserts the given value into the key-value data store. -func (r *replayer) Put(key, value []byte) { - // If the replay already failed, stop executing ops - if r.failure != nil { - return - } - r.failure = r.writer.Put(key, value) -} - -// Delete removes the key from the key-value data store. -func (r *replayer) Delete(key []byte) { - // If the replay already failed, stop executing ops - if r.failure != nil { - return - } - r.failure = r.writer.Delete(key) -} - -// bytesPrefixRange returns key range that satisfy -// - the given prefix, and -// - the given seek position -func bytesPrefixRange(prefix, start []byte) *util.Range { - r := util.BytesPrefix(prefix) - r.Start = append(r.Start, start...) - return r -} diff --git a/coreth/ethdb/memorydb/memorydb.go b/coreth/ethdb/memorydb/memorydb.go deleted file mode 100644 index dbc9adac..00000000 --- a/coreth/ethdb/memorydb/memorydb.go +++ /dev/null @@ -1,330 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package memorydb implements the key-value database layer based on memory maps. -package memorydb - -import ( - "errors" - "sort" - "strings" - "sync" - - "github.com/ava-labs/coreth/ethdb" - "github.com/ethereum/go-ethereum/common" -) - -var ( - // errMemorydbClosed is returned if a memory database was already closed at the - // invocation of a data access operation. - errMemorydbClosed = errors.New("database closed") - - // errMemorydbNotFound is returned if a key is requested that is not found in - // the provided memory database. - errMemorydbNotFound = errors.New("not found") -) - -// Database is an ephemeral key-value store. Apart from basic data storage -// functionality it also supports batch writes and iterating over the keyspace in -// binary-alphabetical order. -type Database struct { - db map[string][]byte - lock sync.RWMutex -} - -// New returns a wrapped map with all the required database interface methods -// implemented. -func New() *Database { - return &Database{ - db: make(map[string][]byte), - } -} - -// NewWithCap returns a wrapped map pre-allocated to the provided capacity with -// all the required database interface methods implemented. -func NewWithCap(size int) *Database { - return &Database{ - db: make(map[string][]byte, size), - } -} - -// Close deallocates the internal map and ensures any consecutive data access op -// fails with an error. -func (db *Database) Close() error { - db.lock.Lock() - defer db.lock.Unlock() - - db.db = nil - return nil -} - -// Has retrieves if a key is present in the key-value store. -func (db *Database) Has(key []byte) (bool, error) { - db.lock.RLock() - defer db.lock.RUnlock() - - if db.db == nil { - return false, errMemorydbClosed - } - _, ok := db.db[string(key)] - return ok, nil -} - -// Get retrieves the given key if it's present in the key-value store. -func (db *Database) Get(key []byte) ([]byte, error) { - db.lock.RLock() - defer db.lock.RUnlock() - - if db.db == nil { - return nil, errMemorydbClosed - } - if entry, ok := db.db[string(key)]; ok { - return common.CopyBytes(entry), nil - } - return nil, errMemorydbNotFound -} - -// Put inserts the given value into the key-value store. -func (db *Database) Put(key []byte, value []byte) error { - db.lock.Lock() - defer db.lock.Unlock() - - if db.db == nil { - return errMemorydbClosed - } - db.db[string(key)] = common.CopyBytes(value) - return nil -} - -// Delete removes the key from the key-value store. -func (db *Database) Delete(key []byte) error { - db.lock.Lock() - defer db.lock.Unlock() - - if db.db == nil { - return errMemorydbClosed - } - delete(db.db, string(key)) - return nil -} - -// NewBatch creates a write-only key-value store that buffers changes to its host -// database until a final write is called. -func (db *Database) NewBatch() ethdb.Batch { - return &batch{ - db: db, - } -} - -// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -func (db *Database) NewBatchWithSize(size int) ethdb.Batch { - return &batch{ - db: db, - } -} - -// NewIterator creates a binary-alphabetical iterator over a subset -// of database content with a particular key prefix, starting at a particular -// initial key (or after, if it does not exist). -func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - db.lock.RLock() - defer db.lock.RUnlock() - - var ( - pr = string(prefix) - st = string(append(prefix, start...)) - keys = make([]string, 0, len(db.db)) - values = make([][]byte, 0, len(db.db)) - ) - // Collect the keys from the memory database corresponding to the given prefix - // and start - for key := range db.db { - if !strings.HasPrefix(key, pr) { - continue - } - if key >= st { - keys = append(keys, key) - } - } - // Sort the items and retrieve the associated values - sort.Strings(keys) - for _, key := range keys { - values = append(values, db.db[key]) - } - return &iterator{ - index: -1, - keys: keys, - values: values, - } -} - -// Stat returns a particular internal stat of the database. -func (db *Database) Stat(property string) (string, error) { - return "", errors.New("unknown property") -} - -// Compact is not supported on a memory database, but there's no need either as -// a memory database doesn't waste space anyway. -func (db *Database) Compact(start []byte, limit []byte) error { - return nil -} - -// Len returns the number of entries currently present in the memory database. -// -// Note, this method is only used for testing (i.e. not public in general) and -// does not have explicit checks for closed-ness to allow simpler testing code. -func (db *Database) Len() int { - db.lock.RLock() - defer db.lock.RUnlock() - - return len(db.db) -} - -// keyvalue is a key-value tuple tagged with a deletion field to allow creating -// memory-database write batches. -type keyvalue struct { - key []byte - value []byte - delete bool -} - -// batch is a write-only memory batch that commits changes to its host -// database when Write is called. A batch cannot be used concurrently. -type batch struct { - db *Database - writes []keyvalue - size int -} - -// Put inserts the given value into the batch for later committing. -func (b *batch) Put(key, value []byte) error { - b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false}) - b.size += len(value) - return nil -} - -// Delete inserts the a key removal into the batch for later committing. -func (b *batch) Delete(key []byte) error { - b.writes = append(b.writes, keyvalue{common.CopyBytes(key), nil, true}) - b.size += len(key) - return nil -} - -// ValueSize retrieves the amount of data queued up for writing. -func (b *batch) ValueSize() int { - return b.size -} - -// Write flushes any accumulated data to the memory database. -func (b *batch) Write() error { - b.db.lock.Lock() - defer b.db.lock.Unlock() - - for _, keyvalue := range b.writes { - if keyvalue.delete { - delete(b.db.db, string(keyvalue.key)) - continue - } - b.db.db[string(keyvalue.key)] = keyvalue.value - } - return nil -} - -// Reset resets the batch for reuse. -func (b *batch) Reset() { - b.writes = b.writes[:0] - b.size = 0 -} - -// Replay replays the batch contents. -func (b *batch) Replay(w ethdb.KeyValueWriter) error { - for _, keyvalue := range b.writes { - if keyvalue.delete { - if err := w.Delete(keyvalue.key); err != nil { - return err - } - continue - } - if err := w.Put(keyvalue.key, keyvalue.value); err != nil { - return err - } - } - return nil -} - -// iterator can walk over the (potentially partial) keyspace of a memory key -// value store. Internally it is a deep copy of the entire iterated state, -// sorted by keys. -type iterator struct { - index int - keys []string - values [][]byte -} - -// Next moves the iterator to the next key/value pair. It returns whether the -// iterator is exhausted. -func (it *iterator) Next() bool { - // Short circuit if iterator is already exhausted in the forward direction. - if it.index >= len(it.keys) { - return false - } - it.index += 1 - return it.index < len(it.keys) -} - -// Error returns any accumulated error. Exhausting all the key/value pairs -// is not considered to be an error. A memory iterator cannot encounter errors. -func (it *iterator) Error() error { - return nil -} - -// Key returns the key of the current key/value pair, or nil if done. The caller -// should not modify the contents of the returned slice, and its contents may -// change on the next call to Next. -func (it *iterator) Key() []byte { - // Short circuit if iterator is not in a valid position - if it.index < 0 || it.index >= len(it.keys) { - return nil - } - return []byte(it.keys[it.index]) -} - -// Value returns the value of the current key/value pair, or nil if done. The -// caller should not modify the contents of the returned slice, and its contents -// may change on the next call to Next. -func (it *iterator) Value() []byte { - // Short circuit if iterator is not in a valid position - if it.index < 0 || it.index >= len(it.keys) { - return nil - } - return it.values[it.index] -} - -// Release releases associated resources. Release should always succeed and can -// be called multiple times without causing error. -func (it *iterator) Release() { - it.index, it.keys, it.values = -1, nil, nil -} diff --git a/coreth/go.mod b/coreth/go.mod index 5285877b..bcdd60e8 100644 --- a/coreth/go.mod +++ b/coreth/go.mod @@ -4,97 +4,117 @@ go 1.21 require ( github.com/VictoriaMetrics/fastcache v1.10.0 - github.com/ava-labs/avalanchego v1.9.16 + github.com/ava-labs/avalanchego v1.11.0-rc.1.0.20240207163634-e248179ae759 github.com/cespare/cp v0.1.0 github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set v1.8.0 - github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf - github.com/ethereum/go-ethereum v1.10.26 + github.com/deckarep/golang-set/v2 v2.1.0 + github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 + github.com/ethereum/go-ethereum v1.12.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 - github.com/google/uuid v1.2.0 + github.com/google/uuid v1.3.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.2.0 - github.com/mattn/go-colorable v0.1.12 - github.com/mattn/go-isatty v0.0.14 + github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c + github.com/kylelemons/godebug v1.1.0 + github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-isatty v0.0.16 github.com/olekukonko/tablewriter v0.0.5 - github.com/prometheus/client_golang v1.13.0 - github.com/prometheus/client_model v0.2.0 - github.com/rjeczalik/notify v0.9.3 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_model v0.3.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 - github.com/stretchr/testify v1.8.1 - github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a - github.com/tyler-smith/go-bip39 v1.0.2 - github.com/urfave/cli/v2 v2.10.2 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 - golang.org/x/text v0.7.0 - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - gopkg.in/urfave/cli.v1 v1.20.0 + github.com/status-im/keycard-go v0.2.0 + github.com/stretchr/testify v1.8.4 + github.com/tyler-smith/go-bip39 v1.1.0 + github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa + go.uber.org/goleak v1.2.1 + go.uber.org/mock v0.4.0 + golang.org/x/crypto v0.17.0 + golang.org/x/exp v0.0.0-20231127185646-65229373498e + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.15.0 + golang.org/x/text v0.14.0 + golang.org/x/time v0.0.0-20220922220347-f3bd1da661af + google.golang.org/protobuf v1.31.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( + github.com/DataDog/zstd v1.5.2 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/btcutil v1.1.3 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect - github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect - github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/ethereum/c-kzg-4844 v0.2.0 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-stack/stack v1.8.0 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huin/goupnp v1.0.3 // indirect - github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect + github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/prometheus/tsdb v0.10.0 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect - github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 // indirect + github.com/supranational/blst v0.3.11 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect @@ -107,19 +127,18 @@ require ( go.opentelemetry.io/otel/sdk v1.11.0 // indirect go.opentelemetry.io/otel/trace v1.11.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/term v0.5.0 // indirect + go.uber.org/multierr v1.10.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/term v0.15.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect - google.golang.org/grpc v1.50.1 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.3 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/coreth/go.sum b/coreth/go.sum index 9712f713..2a868512 100644 --- a/coreth/go.sum +++ b/coreth/go.sum @@ -1,14 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -25,7 +23,6 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -38,36 +35,34 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/ava-labs/avalanchego v1.9.16 h1:JarxIn7gy4V9f1dBgUxubRRO6CrqY2MprOLGqEmk+Vg= -github.com/ava-labs/avalanchego v1.9.16/go.mod h1:Unm7ruhAvLSRP+7gIfwyHNf+wEehWLsFhY9yp10nDbw= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/ava-labs/avalanchego v1.11.0-rc.1.0.20240207163634-e248179ae759 h1:WpjLYFWarM6GzHL5NwJqs+SGJUJljLSK5BpL8C1jFMM= +github.com/ava-labs/avalanchego v1.11.0-rc.1.0.20240207163634-e248179ae759/go.mod h1:PZUfF751H3p3P4HK+sCI8glzEa3WJ2GgdVH6SQGkA5g= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= +github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= @@ -92,7 +87,6 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -100,11 +94,15 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -114,17 +112,38 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4= +github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= -github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= +github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= @@ -134,83 +153,98 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= -github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= +github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s= -github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q= +github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8= +github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= +github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -221,8 +255,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -238,16 +270,16 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -259,7 +291,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -275,10 +309,15 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -286,6 +325,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= @@ -298,129 +338,140 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauG github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= -github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= -github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= -github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs= +github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM= github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= -github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= -github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -433,98 +484,78 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= -github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sanity-io/litter v1.5.1 h1:dwnrSypP6q56o3lFxTU+t2fwQ9A+U5qrXVO4Qg9KwVU= +github.com/sanity-io/litter v1.5.1/go.mod h1:5Z71SvaYy5kcGtyglXOC9rrUi3c1E8CamFWjQsazTh0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E= -github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -533,36 +564,51 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 h1:rVKS9JjtqE4/PscoIsP46sRnJhfq8YFbjlk0fUJTRnY= -github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/thepudds/fzgen v0.4.2 h1:HlEHl5hk2/cqEomf2uK5SA/FeJc12s/vIHmOG+FbACw= +github.com/thepudds/fzgen v0.4.2/go.mod h1:kHCWdsv5tdnt32NIHYDdgq083m6bMtaY0M+ipiO9xWE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= -github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y= -github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -588,34 +634,32 @@ go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -625,9 +669,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -641,6 +684,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -652,22 +696,24 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -692,12 +738,12 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -707,9 +753,7 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -721,22 +765,21 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -747,8 +790,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -763,8 +804,6 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -779,26 +818,30 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -808,24 +851,24 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -843,7 +886,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -859,6 +901,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -868,22 +911,18 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -910,12 +949,12 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -923,7 +962,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= @@ -949,9 +987,15 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -970,10 +1014,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -987,9 +1032,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -997,24 +1041,26 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -1027,6 +1073,7 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/coreth/internal/debug/flags.go b/coreth/internal/debug/flags.go index 3c7a33dc..a8c7bb24 100644 --- a/coreth/internal/debug/flags.go +++ b/coreth/internal/debug/flags.go @@ -30,83 +30,152 @@ import ( "fmt" "io" "net/http" - _ "net/http/pprof" // nolint: gosec + _ "net/http/pprof" "os" + "path/filepath" "runtime" + "github.com/ava-labs/coreth/internal/flags" "github.com/ethereum/go-ethereum/log" "github.com/fjl/memsize/memsizeui" "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" - "gopkg.in/urfave/cli.v1" + "github.com/urfave/cli/v2" + "gopkg.in/natefinch/lumberjack.v2" ) var Memsize memsizeui.Handler var ( - verbosityFlag = cli.IntFlag{ - Name: "verbosity", - Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", - Value: 3, - } - vmoduleFlag = cli.StringFlag{ - Name: "vmodule", - Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", - Value: "", - } - logjsonFlag = cli.BoolFlag{ - Name: "log.json", - Usage: "Format logs with JSON", - } - backtraceAtFlag = cli.StringFlag{ - Name: "log.backtrace", - Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")", - Value: "", - } - debugFlag = cli.BoolFlag{ - Name: "log.debug", - Usage: "Prepends log messages with call-site location (file and line number)", - } - pprofFlag = cli.BoolFlag{ - Name: "pprof", - Usage: "Enable the pprof HTTP server", - } - pprofPortFlag = cli.IntFlag{ - Name: "pprof.port", - Usage: "pprof HTTP server listening port", - Value: 6060, - } - pprofAddrFlag = cli.StringFlag{ - Name: "pprof.addr", - Usage: "pprof HTTP server listening interface", - Value: "127.0.0.1", - } - memprofilerateFlag = cli.IntFlag{ - Name: "pprof.memprofilerate", - Usage: "Turn on memory profiling with the given rate", - Value: runtime.MemProfileRate, - } - blockprofilerateFlag = cli.IntFlag{ - Name: "pprof.blockprofilerate", - Usage: "Turn on block profiling with the given rate", - } - cpuprofileFlag = cli.StringFlag{ - Name: "pprof.cpuprofile", - Usage: "Write CPU profile to the given file", - } - traceFlag = cli.StringFlag{ - Name: "trace", - Usage: "Write execution trace to the given file", + verbosityFlag = &cli.IntFlag{ + Name: "verbosity", + Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", + Value: 3, + Category: flags.LoggingCategory, + } + logVmoduleFlag = &cli.StringFlag{ + Name: "log.vmodule", + Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", + Value: "", + Category: flags.LoggingCategory, + } + vmoduleFlag = &cli.StringFlag{ + Name: "vmodule", + Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", + Value: "", + Hidden: true, + Category: flags.LoggingCategory, + } + logjsonFlag = &cli.BoolFlag{ + Name: "log.json", + Usage: "Format logs with JSON", + Hidden: true, + Category: flags.LoggingCategory, + } + logFormatFlag = &cli.StringFlag{ + Name: "log.format", + Usage: "Log format to use (json|logfmt|terminal)", + Category: flags.LoggingCategory, + } + logFileFlag = &cli.StringFlag{ + Name: "log.file", + Usage: "Write logs to a file", + Category: flags.LoggingCategory, + } + backtraceAtFlag = &cli.StringFlag{ + Name: "log.backtrace", + Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")", + Value: "", + Category: flags.LoggingCategory, + } + debugFlag = &cli.BoolFlag{ + Name: "log.debug", + Usage: "Prepends log messages with call-site location (file and line number)", + Category: flags.LoggingCategory, + } + logRotateFlag = &cli.BoolFlag{ + Name: "log.rotate", + Usage: "Enables log file rotation", + } + logMaxSizeMBsFlag = &cli.IntFlag{ + Name: "log.maxsize", + Usage: "Maximum size in MBs of a single log file", + Value: 100, + Category: flags.LoggingCategory, + } + logMaxBackupsFlag = &cli.IntFlag{ + Name: "log.maxbackups", + Usage: "Maximum number of log files to retain", + Value: 10, + Category: flags.LoggingCategory, + } + logMaxAgeFlag = &cli.IntFlag{ + Name: "log.maxage", + Usage: "Maximum number of days to retain a log file", + Value: 30, + Category: flags.LoggingCategory, + } + logCompressFlag = &cli.BoolFlag{ + Name: "log.compress", + Usage: "Compress the log files", + Value: false, + Category: flags.LoggingCategory, + } + pprofFlag = &cli.BoolFlag{ + Name: "pprof", + Usage: "Enable the pprof HTTP server", + Category: flags.LoggingCategory, + } + pprofPortFlag = &cli.IntFlag{ + Name: "pprof.port", + Usage: "pprof HTTP server listening port", + Value: 6060, + Category: flags.LoggingCategory, + } + pprofAddrFlag = &cli.StringFlag{ + Name: "pprof.addr", + Usage: "pprof HTTP server listening interface", + Value: "127.0.0.1", + Category: flags.LoggingCategory, + } + memprofilerateFlag = &cli.IntFlag{ + Name: "pprof.memprofilerate", + Usage: "Turn on memory profiling with the given rate", + Value: runtime.MemProfileRate, + Category: flags.LoggingCategory, + } + blockprofilerateFlag = &cli.IntFlag{ + Name: "pprof.blockprofilerate", + Usage: "Turn on block profiling with the given rate", + Category: flags.LoggingCategory, + } + cpuprofileFlag = &cli.StringFlag{ + Name: "pprof.cpuprofile", + Usage: "Write CPU profile to the given file", + Category: flags.LoggingCategory, + } + traceFlag = &cli.StringFlag{ + Name: "trace", + Usage: "Write execution trace to the given file", + Category: flags.LoggingCategory, } ) // Flags holds all command-line flags required for debugging. var Flags = []cli.Flag{ verbosityFlag, + logVmoduleFlag, vmoduleFlag, - logjsonFlag, backtraceAtFlag, debugFlag, + logjsonFlag, + logFormatFlag, + logFileFlag, + logRotateFlag, + logMaxSizeMBsFlag, + logMaxBackupsFlag, + logMaxAgeFlag, + logCompressFlag, pprofFlag, pprofAddrFlag, pprofPortFlag, @@ -116,7 +185,10 @@ var Flags = []cli.Flag{ traceFlag, } -var glogger *log.GlogHandler +var ( + glogger *log.GlogHandler + logOutputStream log.Handler +) func init() { glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) @@ -127,66 +199,129 @@ func init() { // Setup initializes profiling and logging based on the CLI flags. // It should be called as early as possible in the program. func Setup(ctx *cli.Context) error { - var ostream log.Handler - output := io.Writer(os.Stderr) - if ctx.GlobalBool(logjsonFlag.Name) { - ostream = log.StreamHandler(output, log.JSONFormat()) - } else { - usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" - if usecolor { + var ( + logfmt log.Format + output = io.Writer(os.Stderr) + logFmtFlag = ctx.String(logFormatFlag.Name) + ) + switch { + case ctx.Bool(logjsonFlag.Name): + // Retain backwards compatibility with `--log.json` flag if `--log.format` not set + defer log.Warn("The flag '--log.json' is deprecated, please use '--log.format=json' instead") + logfmt = log.JSONFormat() + case logFmtFlag == "json": + logfmt = log.JSONFormat() + case logFmtFlag == "logfmt": + logfmt = log.LogfmtFormat() + case logFmtFlag == "", logFmtFlag == "terminal": + useColor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" + if useColor { output = colorable.NewColorableStderr() } - ostream = log.StreamHandler(output, log.TerminalFormat(usecolor)) + logfmt = log.TerminalFormat(useColor) + default: + // Unknown log format specified + return fmt.Errorf("unknown log format: %v", ctx.String(logFormatFlag.Name)) + } + var ( + stdHandler = log.StreamHandler(output, logfmt) + ostream = stdHandler + logFile = ctx.String(logFileFlag.Name) + rotation = ctx.Bool(logRotateFlag.Name) + ) + if len(logFile) > 0 { + if err := validateLogLocation(filepath.Dir(logFile)); err != nil { + return fmt.Errorf("failed to initiatilize file logger: %v", err) + } + } + context := []interface{}{"rotate", rotation} + if len(logFmtFlag) > 0 { + context = append(context, "format", logFmtFlag) + } else { + context = append(context, "format", "terminal") + } + if rotation { + // Lumberjack uses -lumberjack.log in is.TempDir() if empty. + // so typically /tmp/geth-lumberjack.log on linux + if len(logFile) > 0 { + context = append(context, "location", logFile) + } else { + context = append(context, "location", filepath.Join(os.TempDir(), "geth-lumberjack.log")) + } + ostream = log.MultiHandler(log.StreamHandler(&lumberjack.Logger{ + Filename: logFile, + MaxSize: ctx.Int(logMaxSizeMBsFlag.Name), + MaxBackups: ctx.Int(logMaxBackupsFlag.Name), + MaxAge: ctx.Int(logMaxAgeFlag.Name), + Compress: ctx.Bool(logCompressFlag.Name), + }, logfmt), stdHandler) + } else if logFile != "" { + if logOutputStream, err := log.FileHandler(logFile, logfmt); err != nil { + return err + } else { + ostream = log.MultiHandler(logOutputStream, stdHandler) + context = append(context, "location", logFile) + } } glogger.SetHandler(ostream) // logging - verbosity := ctx.GlobalInt(verbosityFlag.Name) + verbosity := ctx.Int(verbosityFlag.Name) glogger.Verbosity(log.Lvl(verbosity)) - vmodule := ctx.GlobalString(vmoduleFlag.Name) + vmodule := ctx.String(logVmoduleFlag.Name) + if vmodule == "" { + // Retain backwards compatibility with `--vmodule` flag if `--log.vmodule` not set + vmodule = ctx.String(vmoduleFlag.Name) + if vmodule != "" { + defer log.Warn("The flag '--vmodule' is deprecated, please use '--log.vmodule' instead") + } + } glogger.Vmodule(vmodule) - debug := ctx.GlobalBool(debugFlag.Name) - if ctx.GlobalIsSet(debugFlag.Name) { - debug = ctx.GlobalBool(debugFlag.Name) + debug := ctx.Bool(debugFlag.Name) + if ctx.IsSet(debugFlag.Name) { + debug = ctx.Bool(debugFlag.Name) } log.PrintOrigins(debug) - backtrace := ctx.GlobalString(backtraceAtFlag.Name) + backtrace := ctx.String(backtraceAtFlag.Name) glogger.BacktraceAt(backtrace) log.Root().SetHandler(glogger) // profiling, tracing runtime.MemProfileRate = memprofilerateFlag.Value - if ctx.GlobalIsSet(memprofilerateFlag.Name) { - runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) + if ctx.IsSet(memprofilerateFlag.Name) { + runtime.MemProfileRate = ctx.Int(memprofilerateFlag.Name) } - blockProfileRate := ctx.GlobalInt(blockprofilerateFlag.Name) + blockProfileRate := ctx.Int(blockprofilerateFlag.Name) Handler.SetBlockProfileRate(blockProfileRate) - if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { + if traceFile := ctx.String(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { return err } } - if cpuFile := ctx.GlobalString(cpuprofileFlag.Name); cpuFile != "" { + if cpuFile := ctx.String(cpuprofileFlag.Name); cpuFile != "" { if err := Handler.StartCPUProfile(cpuFile); err != nil { return err } } // pprof server - if ctx.GlobalBool(pprofFlag.Name) { - listenHost := ctx.GlobalString(pprofAddrFlag.Name) + if ctx.Bool(pprofFlag.Name) { + listenHost := ctx.String(pprofAddrFlag.Name) - port := ctx.GlobalInt(pprofPortFlag.Name) + port := ctx.Int(pprofPortFlag.Name) address := fmt.Sprintf("%s:%d", listenHost, port) StartPProf(address) } + if len(logFile) > 0 || rotation { + log.Info("Logging configured", context...) + } return nil } @@ -205,4 +340,21 @@ func StartPProf(address string) { func Exit() { Handler.StopCPUProfile() Handler.StopGoTrace() + if closer, ok := logOutputStream.(io.Closer); ok { + closer.Close() + } +} + +func validateLogLocation(path string) error { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return fmt.Errorf("error creating the directory: %w", err) + } + // Check if the path is writable by trying to create a temporary file + tmp := filepath.Join(path, "tmp") + if f, err := os.Create(tmp); err != nil { + return err + } else { + f.Close() + } + return os.Remove(tmp) } diff --git a/coreth/internal/debug/trace.go b/coreth/internal/debug/trace.go index 7ecef70a..7c51228b 100644 --- a/coreth/internal/debug/trace.go +++ b/coreth/internal/debug/trace.go @@ -58,7 +58,7 @@ func (h *HandlerT) StartGoTrace(file string) error { return nil } -// StopTrace stops an ongoing trace. +// StopGoTrace stops an ongoing trace. func (h *HandlerT) StopGoTrace() error { h.mu.Lock() defer h.mu.Unlock() diff --git a/coreth/internal/ethapi/api.go b/coreth/internal/ethapi/api.go index 79d8ddf5..b41105e1 100644 --- a/coreth/internal/ethapi/api.go +++ b/coreth/internal/ethapi/api.go @@ -28,9 +28,11 @@ package ethapi import ( "context" + "encoding/hex" "errors" "fmt" "math/big" + "strings" "time" "github.com/ava-labs/avalanchego/ids" @@ -38,6 +40,7 @@ import ( "github.com/ava-labs/coreth/accounts/abi" "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/accounts/scwallet" + "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" @@ -102,8 +105,8 @@ type feeHistoryResult struct { } // FeeHistory returns the fee market history. -func (s *EthereumAPI) FeeHistory(ctx context.Context, blockCount rpc.DecimalOrHex, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { - oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, int(blockCount), lastBlock, rewardPercentiles) +func (s *EthereumAPI) FeeHistory(ctx context.Context, blockCount math.HexOrDecimal64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { + oldest, reward, baseFee, gasUsed, err := s.b.FeeHistory(ctx, uint64(blockCount), lastBlock, rewardPercentiles) if err != nil { return nil, err } @@ -138,7 +141,7 @@ func (s *EthereumAPI) Syncing() (interface{}, error) { return false, nil } -// TxPoolAPI offers and API for the transaction pool. It only operates on data that is non confidential. +// TxPoolAPI offers and API for the transaction pool. It only operates on data that is non-confidential. type TxPoolAPI struct { b Backend } @@ -161,7 +164,7 @@ func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { for account, txs := range pending { dump := make(map[string]*RPCTransaction) for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["pending"][account.Hex()] = dump } @@ -169,7 +172,7 @@ func (s *TxPoolAPI) Content() map[string]map[string]map[string]*RPCTransaction { for account, txs := range queue { dump := make(map[string]*RPCTransaction) for _, tx := range txs { - dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["queued"][account.Hex()] = dump } @@ -186,14 +189,14 @@ func (s *TxPoolAPI) ContentFrom(addr common.Address) map[string]map[string]*RPCT // Build the pending transactions dump := make(map[string]*RPCTransaction, len(pending)) for _, tx := range pending { - dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["pending"] = dump // Build the queued transactions dump = make(map[string]*RPCTransaction, len(queue)) for _, tx := range queue { - dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) + dump[fmt.Sprintf("%d", tx.Nonce())] = NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig()) } content["queued"] = dump @@ -327,7 +330,7 @@ func (s *PersonalAccountAPI) OpenWallet(url string, passphrase *string) error { return wallet.Open(pass) } -// DeriveAccount requests a HD wallet to derive a new account, optionally pinning +// DeriveAccount requests an HD wallet to derive a new account, optionally pinning // it for later reuse. func (s *PersonalAccountAPI) DeriveAccount(url string, path string, pin *bool) (accounts.Account, error) { wallet, err := s.am.Wallet(url) @@ -345,19 +348,20 @@ func (s *PersonalAccountAPI) DeriveAccount(url string, path string, pin *bool) ( } // NewAccount will create a new account and returns the address for the new account. -func (s *PersonalAccountAPI) NewAccount(password string) (common.Address, error) { +func (s *PersonalAccountAPI) NewAccount(password string) (common.AddressEIP55, error) { ks, err := fetchKeystore(s.am) if err != nil { - return common.Address{}, err + return common.AddressEIP55{}, err } acc, err := ks.NewAccount(password) if err == nil { - log.Info("Your new key was generated", "address", acc.Address) + addrEIP55 := common.AddressEIP55(acc.Address) + log.Info("Your new key was generated", "address", addrEIP55.String()) log.Warn("Please backup your key file!", "path", acc.URL.Path) log.Warn("Please remember your password!") - return acc.Address, nil + return addrEIP55, nil } - return common.Address{}, err + return common.AddressEIP55{}, err } // fetchKeystore retrieves the encrypted keystore from the account manager. @@ -447,7 +451,7 @@ func (s *PersonalAccountAPI) signTransaction(ctx context.Context, args *Transact // passwd isn't able to decrypt the key it fails. func (s *PersonalAccountAPI) SendTransaction(ctx context.Context, args TransactionArgs, passwd string) (common.Hash, error) { if args.Nonce == nil { - // Hold the addresse's mutex around signing to prevent concurrent assignment of + // Hold the mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. s.nonceLock.LockAddr(args.from()) defer s.nonceLock.UnlockAddr(args.from()) @@ -468,16 +472,16 @@ func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transacti // No need to obtain the noncelock mutex, since we won't be sending this // tx into the transaction pool, but right back to the user if args.From == nil { - return nil, fmt.Errorf("sender not specified") + return nil, errors.New("sender not specified") } if args.Gas == nil { - return nil, fmt.Errorf("gas not specified") + return nil, errors.New("gas not specified") } if args.GasPrice == nil && (args.MaxFeePerGas == nil || args.MaxPriorityFeePerGas == nil) { - return nil, fmt.Errorf("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") + return nil, errors.New("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") } if args.Nonce == nil { - return nil, fmt.Errorf("nonce not specified") + return nil, errors.New("nonce not specified") } // Before actually signing the transaction, ensure the transaction fee is reasonable. tx := args.toTransaction() @@ -538,7 +542,7 @@ func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.By return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength) } if sig[crypto.RecoveryIDOffset] != 27 && sig[crypto.RecoveryIDOffset] != 28 { - return common.Address{}, fmt.Errorf("invalid Ethereum signature (V is not 27 or 28)") + return common.Address{}, errors.New("invalid Ethereum signature (V is not 27 or 28)") } sig[crypto.RecoveryIDOffset] -= 27 // Transform yellow paper V from 27/28 to 0/1 @@ -572,7 +576,7 @@ func (s *PersonalAccountAPI) InitializeWallet(ctx context.Context, url string) ( case *scwallet.Wallet: return mnemonic, wallet.Initialize(seed) default: - return "", fmt.Errorf("specified wallet does not support initialization") + return "", errors.New("specified wallet does not support initialization") } } @@ -587,7 +591,7 @@ func (s *PersonalAccountAPI) Unpair(ctx context.Context, url string, pin string) case *scwallet.Wallet: return wallet.Unpair([]byte(pin)) default: - return fmt.Errorf("specified wallet does not support pairing") + return errors.New("specified wallet does not support pairing") } } @@ -667,8 +671,10 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st if state == nil || err != nil { return nil, err } - - storageTrie := state.StorageTrie(address) + storageTrie, err := state.StorageTrie(address) + if err != nil { + return nil, err + } storageHash := types.EmptyRootHash codeHash := state.GetCodeHash(address) storageProof := make([]StorageResult, len(storageKeys)) @@ -682,15 +688,19 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st } // create the proof for the storageKeys - for i, key := range storageKeys { + for i, hexKey := range storageKeys { + key, err := decodeHash(hexKey) + if err != nil { + return nil, err + } if storageTrie != nil { - proof, storageError := state.GetStorageProof(address, common.HexToHash(key)) + proof, storageError := state.GetStorageProof(address, key) if storageError != nil { return nil, storageError } - storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), toHexSlice(proof)} + storageProof[i] = StorageResult{hexKey, (*hexutil.Big)(state.GetState(address, key).Big()), toHexSlice(proof)} } else { - storageProof[i] = StorageResult{key, &hexutil.Big{}, []string{}} + storageProof[i] = StorageResult{hexKey, &hexutil.Big{}, []string{}} } } @@ -711,6 +721,25 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st }, state.Error() } +// decodeHash parses a hex-encoded 32-byte hash. The input may optionally +// be prefixed by 0x and can have a byte length up to 32. +func decodeHash(s string) (common.Hash, error) { + if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { + s = s[2:] + } + if (len(s) & 1) > 0 { + s = "0" + s + } + b, err := hex.DecodeString(s) + if err != nil { + return common.Hash{}, errors.New("hex string invalid") + } + if len(b) > 32 { + return common.Hash{}, errors.New("hex string too long, want at most 32 bytes") + } + return common.BytesToHash(b), nil +} + // GetHeaderByNumber returns the requested canonical block header. // * When blockNr is -1 the chain head is returned. // * When blockNr is -2 the pending chain head is returned. @@ -831,15 +860,47 @@ func (s *BlockChainAPI) GetCode(ctx context.Context, address common.Address, blo // GetStorageAt returns the storage from the state at the given address, key and // block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block // numbers are also allowed. -func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { +func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address, hexKey string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } - res := state.GetState(address, common.HexToHash(key)) + key, err := decodeHash(hexKey) + if err != nil { + return nil, fmt.Errorf("unable to decode storage key: %s", err) + } + res := state.GetState(address, key) return res[:], state.Error() } +// GetBlockReceipts returns the block receipts for the given block hash or number or tag. +func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) { + block, err := s.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if block == nil || err != nil { + // When the block doesn't exist, the RPC method should return JSON null + // as per specification. + return nil, nil + } + receipts, err := s.b.GetReceipts(ctx, block.Hash()) + if err != nil { + return nil, err + } + txs := block.Transactions() + if len(txs) != len(receipts) { + return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts)) + } + + // Derive the sender. + signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time()) + + result := make([]map[string]interface{}, len(receipts)) + for i, receipt := range receipts { + result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i) + } + + return result, nil +} + // OverrideAccount indicates the overriding fields of account during the execution // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is @@ -889,6 +950,10 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { } } } + // Now finalize the changes. Finalize is normally performed between transactions. + // By using finalize, the overrides are semantically behaving as + // if they were created in a transaction just before the tracing occur. + state.Finalise(false) return nil } @@ -896,7 +961,7 @@ func (diff *StateOverride) Apply(state *state.StateDB) error { type BlockOverrides struct { Number *hexutil.Big Difficulty *hexutil.Big - Time *hexutil.Big + Time *hexutil.Uint64 GasLimit *hexutil.Uint64 Coinbase *common.Address BaseFee *hexutil.Big @@ -914,7 +979,7 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { blockCtx.Difficulty = diff.Difficulty.ToInt() } if diff.Time != nil { - blockCtx.Time = diff.Time.ToInt() + blockCtx.Time = uint64(*diff.Time) } if diff.GasLimit != nil { blockCtx.GasLimit = uint64(*diff.GasLimit) @@ -927,7 +992,39 @@ func (diff *BlockOverrides) Apply(blockCtx *vm.BlockContext) { } } -func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { +// ChainContextBackend provides methods required to implement ChainContext. +type ChainContextBackend interface { + Engine() consensus.Engine + HeaderByNumber(context.Context, rpc.BlockNumber) (*types.Header, error) +} + +// ChainContext is an implementation of core.ChainContext. It's main use-case +// is instantiating a vm.BlockContext without having access to the BlockChain object. +type ChainContext struct { + b ChainContextBackend + ctx context.Context +} + +// NewChainContext creates a new ChainContext object. +func NewChainContext(ctx context.Context, backend ChainContextBackend) *ChainContext { + return &ChainContext{ctx: ctx, b: backend} +} + +func (context *ChainContext) Engine() consensus.Engine { + return context.b.Engine() +} + +func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.Header { + // This method is called to get the hash for a block number when executing the BLOCKHASH + // opcode. Hence no need to search for non-canonical blocks. + header, err := context.b.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) + if err != nil || header.Hash() != hash { + return nil + } + return header +} + +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) @@ -972,10 +1069,12 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash if err != nil { return nil, err } - evm, vmError, err := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}) - if err != nil { - return nil, err + blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil) + if blockOverrides != nil { + blockOverrides.Apply(&blockCtx) } + evm, vmError := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx) + // Wait for the context to be done and cancel the evm. Even if the // EVM has finished, cancelling may be done (repeatedly) go func() { @@ -995,7 +1094,7 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) } if err != nil { - return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas()) + return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.GasLimit) } return result, nil } @@ -1039,7 +1138,7 @@ type ExecutionResult struct { // CallDetailed performs the same call as Call, but returns the full context func (s *BlockChainAPI) CallDetailed(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (*ExecutionResult, error) { - result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) + result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, nil, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err } @@ -1069,8 +1168,8 @@ func (s *BlockChainAPI) CallDetailed(ctx context.Context, args TransactionArgs, // // Note, this function doesn't make and changes in the state/blockchain and is // useful to execute and retrieve values. -func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { - result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) +func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides) (hexutil.Bytes, error) { + result, err := DoCall(ctx, s.b, args, blockNrOrHash, overrides, blockOverrides, s.b.RPCEVMTimeout(), s.b.RPCGasCap()) if err != nil { return nil, err } @@ -1127,7 +1226,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr available := new(big.Int).Set(balance) if args.Value != nil { if args.Value.ToInt().Cmp(available) >= 0 { - return 0, errors.New("insufficient funds for transfer") + return 0, core.ErrInsufficientFundsForTransfer } available.Sub(available, args.Value.ToInt()) } @@ -1139,14 +1238,14 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr if transfer == nil { transfer = new(hexutil.Big) } - log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + log.Info("Gas estimation capped by limited funds", "original", hi, "balance", balance, "sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance) hi = allowance.Uint64() } } // Recap the highest gas allowance with specified gascap. if gasCap != 0 && hi > gasCap { - log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) + log.Info("Caller gas above allowance, capping", "requested", hi, "cap", gasCap) hi = gasCap } cap = hi @@ -1155,7 +1254,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr executable := func(gas uint64) (bool, *core.ExecutionResult, error) { args.Gas = (*hexutil.Uint64)(&gas) - result, err := DoCall(ctx, b, args, blockNrOrHash, nil, 0, gasCap) + result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit @@ -1204,7 +1303,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr // EstimateGas returns an estimate of the amount of gas needed to execute the // given transaction against the current pending block. func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { - bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } @@ -1256,21 +1355,18 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param fields["blockExtraData"] = hexutil.Bytes(block.ExtData()) if inclTx { - formatTx := func(tx *types.Transaction) (interface{}, error) { - return tx.Hash(), nil + formatTx := func(idx int, tx *types.Transaction) interface{} { + return tx.Hash() } if fullTx { - formatTx = func(tx *types.Transaction) (interface{}, error) { - return newRPCTransactionFromBlockHash(block, tx.Hash(), config), nil + formatTx = func(idx int, tx *types.Transaction) interface{} { + return newRPCTransactionFromBlockIndex(block, uint64(idx), config) } } txs := block.Transactions() transactions := make([]interface{}, len(txs)) - var err error for i, tx := range txs { - if transactions[i], err = formatTx(tx); err != nil { - return nil, err - } + transactions[i] = formatTx(i, tx) } fields["transactions"] = transactions } @@ -1334,8 +1430,8 @@ type RPCTransaction struct { // newRPCTransaction returns a transaction that will serialize to the RPC // representation, with the given location metadata set (if available). -func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTimestamp uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { - signer := types.MakeSigner(config, new(big.Int).SetUint64(blockNumber), new(big.Int).SetUint64(blockTimestamp)) +func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTime uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { + signer := types.MakeSigner(config, new(big.Int).SetUint64(blockNumber), blockTime) from, _ := types.Sender(signer, tx) v, r, s := tx.RawSignatureValues() result := &RPCTransaction{ @@ -1385,15 +1481,19 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber return result } -// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation -func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { - blockNumber := uint64(0) - blockTimestamp := uint64(0) +// NewRPCTransaction returns a pending transaction that will serialize to the RPC representation +// Note: in go-ethereum this function is called NewRPCPendingTransaction. +// In coreth, we have renamed it to NewRPCTransaction as it is used for accepted transactions as well. +func NewRPCTransaction(tx *types.Transaction, current *types.Header, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction { + var ( + blockNumber = uint64(0) + blockTime = uint64(0) + ) if current != nil { blockNumber = current.Number.Uint64() - blockTimestamp = current.Time + blockTime = current.Time } - return newRPCTransaction(tx, common.Hash{}, blockNumber, blockTimestamp, 0, baseFee, config) + return newRPCTransaction(tx, common.Hash{}, blockNumber, blockTime, 0, baseFee, config) } // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. @@ -1415,18 +1515,8 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By return blob } -// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash, config *params.ChainConfig) *RPCTransaction { - for idx, tx := range b.Transactions() { - if tx.Hash() == hash { - return newRPCTransactionFromBlockIndex(b, uint64(idx), config) - } - } - return nil -} - // accessListResult returns an optional accesslist -// Its the result of the `debug_createAccessList` RPC call. +// It's the result of the `debug_createAccessList` RPC call. // It contains an error if the transaction itself failed. type accessListResult struct { Accesslist *types.AccessList `json:"accessList"` @@ -1434,7 +1524,7 @@ type accessListResult struct { GasUsed hexutil.Uint64 `json:"gasUsed"` } -// CreateAccessList creates a EIP-2930 type AccessList for the given transaction. +// CreateAccessList creates an EIP-2930 type AccessList for the given transaction. // Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state. func (s *BlockChainAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) @@ -1478,7 +1568,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH to = crypto.CreateAddress(args.from(), uint64(*args.Nonce)) } // Retrieve the precompiles since they don't need to be added to the access list - precompiles := vm.ActivePrecompiles(b.ChainConfig().AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time))) + precompiles := vm.ActivePrecompiles(b.ChainConfig().AvalancheRules(header.Number, header.Time)) // Create an initial tracer prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles) @@ -1502,12 +1592,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles) - config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} - vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) - if err != nil { - return nil, 0, nil, err - } - res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + config := vm.Config{Tracer: tracer, NoBaseFee: true} + vmenv, _ := b.GetEVM(ctx, msg, statedb, header, &config, nil) + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) if err != nil { return nil, 0, nil, fmt.Errorf("failed to apply transaction: %v err: %v", args.toTransaction().Hash(), err) } @@ -1662,7 +1749,7 @@ func (s *TransactionAPI) GetTransactionByHash(ctx context.Context, hash common.H // No finalized transaction, try to retrieve it from the pool if tx := s.b.GetPoolTransaction(hash); tx != nil { estimatedBaseFee, _ := s.b.EstimateBaseFee(ctx) - return newRPCPendingTransaction(tx, s.b.CurrentHeader(), estimatedBaseFee, s.b.ChainConfig()), nil + return NewRPCTransaction(tx, s.b.CurrentHeader(), estimatedBaseFee, s.b.ChainConfig()), nil } // Transaction unknown, return as such @@ -1698,6 +1785,7 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if err != nil { return nil, err } + receipts, err := s.b.GetReceipts(ctx, blockHash) if err != nil { return nil, err @@ -1708,16 +1796,19 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. receipt := receipts[index] // Derive the sender. - bigblock := new(big.Int).SetUint64(blockNumber) - timestamp := new(big.Int).SetUint64(header.Time) - signer := types.MakeSigner(s.b.ChainConfig(), bigblock, timestamp) + signer := types.MakeSigner(s.b.ChainConfig(), header.Number, header.Time) + return marshalReceipt(receipt, blockHash, blockNumber, signer, tx, int(index)), nil +} + +// marshalReceipt marshals a transaction receipt into a JSON object. +func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber uint64, signer types.Signer, tx *types.Transaction, txIndex int) map[string]interface{} { from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ "blockHash": blockHash, "blockNumber": hexutil.Uint64(blockNumber), - "transactionHash": hash, - "transactionIndex": hexutil.Uint64(index), + "transactionHash": tx.Hash(), + "transactionIndex": hexutil.Uint64(txIndex), "from": from, "to": tx.To(), "gasUsed": hexutil.Uint64(receipt.GasUsed), @@ -1726,13 +1817,7 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. "logs": receipt.Logs, "logsBloom": receipt.Bloom, "type": hexutil.Uint(tx.Type()), - } - // Assign the effective gas price paid - if !s.b.ChainConfig().IsApricotPhase3(timestamp) { - fields["effectiveGasPrice"] = hexutil.Uint64(tx.GasPrice().Uint64()) - } else { - gasPrice := new(big.Int).Add(header.BaseFee, tx.EffectiveGasTipValue(header.BaseFee)) - fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64()) + "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } // Assign receipt status or post state. if len(receipt.PostState) > 0 { @@ -1743,11 +1828,12 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if receipt.Logs == nil { fields["logs"] = []*types.Log{} } + // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation if receipt.ContractAddress != (common.Address{}) { fields["contractAddress"] = receipt.ContractAddress } - return fields, nil + return fields } // sign is a helper function that signs a transaction with the private key of the given address. @@ -1778,8 +1864,8 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c return common.Hash{}, err } // Print a log with full tx details for manual investigations and interventions - currentBlock := b.CurrentBlock() - signer := types.MakeSigner(b.ChainConfig(), currentBlock.Number(), new(big.Int).SetUint64(currentBlock.Time())) + head := b.CurrentBlock() + signer := types.MakeSigner(b.ChainConfig(), head.Number, head.Time) from, err := types.Sender(signer, tx) if err != nil { return common.Hash{}, err @@ -1806,7 +1892,7 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr } if args.Nonce == nil { - // Hold the addresse's mutex around signing to prevent concurrent assignment of + // Hold the mutex around signing to prevent concurrent assignment of // the same nonce to multiple accounts. s.nonceLock.LockAddr(args.from()) defer s.nonceLock.UnlockAddr(args.from()) @@ -1889,13 +1975,13 @@ type SignTransactionResult struct { // the given from address and it needs to be unlocked. func (s *TransactionAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) { if args.Gas == nil { - return nil, fmt.Errorf("gas not specified") + return nil, errors.New("gas not specified") } if args.GasPrice == nil && (args.MaxPriorityFeePerGas == nil || args.MaxFeePerGas == nil) { - return nil, fmt.Errorf("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") + return nil, errors.New("missing gasPrice or maxFeePerGas/maxPriorityFeePerGas") } if args.Nonce == nil { - return nil, fmt.Errorf("nonce not specified") + return nil, errors.New("nonce not specified") } if err := args.setDefaults(ctx, s.b); err != nil { return nil, err @@ -1935,7 +2021,7 @@ func (s *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) { from, _ := types.Sender(s.signer, tx) if _, exists := accounts[from]; exists { estimatedBaseFee, _ := s.b.EstimateBaseFee(context.Background()) - transactions = append(transactions, newRPCPendingTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig())) + transactions = append(transactions, NewRPCTransaction(tx, curHeader, estimatedBaseFee, s.b.ChainConfig())) } } return transactions, nil @@ -1945,7 +2031,7 @@ func (s *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) { // the given transaction from the pool and reinsert it with the new gas price and limit. func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) { if sendArgs.Nonce == nil { - return common.Hash{}, fmt.Errorf("missing transaction nonce in transaction spec") + return common.Hash{}, errors.New("missing transaction nonce in transaction spec") } if err := sendArgs.setDefaults(ctx, s.b); err != nil { return common.Hash{}, err diff --git a/coreth/internal/ethapi/api_test.go b/coreth/internal/ethapi/api_test.go new file mode 100644 index 00000000..b90c3b65 --- /dev/null +++ b/coreth/internal/ethapi/api_test.go @@ -0,0 +1,924 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethapi + +import ( + "bytes" + "context" + "crypto/ecdsa" + "encoding/json" + "errors" + "fmt" + "hash" + "math/big" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/coreth/accounts" + "github.com/ava-labs/coreth/consensus" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "golang.org/x/crypto/sha3" +) + +func TestTransaction_RoundTripRpcJSON(t *testing.T) { + var ( + config = params.TestChainConfig + signer = types.LatestSigner(config) + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + tests = allTransactionTypes(common.Address{0xde, 0xad}, config) + ) + t.Parallel() + for i, tt := range tests { + var tx2 types.Transaction + tx, err := types.SignNewTx(key, signer, tt) + if err != nil { + t.Fatalf("test %d: signing failed: %v", i, err) + } + // Regular transaction + if data, err := json.Marshal(tx); err != nil { + t.Fatalf("test %d: marshalling failed; %v", i, err) + } else if err = tx2.UnmarshalJSON(data); err != nil { + t.Fatalf("test %d: sunmarshal failed: %v", i, err) + } else if want, have := tx.Hash(), tx2.Hash(); want != have { + t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) + } + + // rpcTransaction + rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config) + if data, err := json.Marshal(rpcTx); err != nil { + t.Fatalf("test %d: marshalling failed; %v", i, err) + } else if err = tx2.UnmarshalJSON(data); err != nil { + t.Fatalf("test %d: unmarshal failed: %v", i, err) + } else if want, have := tx.Hash(), tx2.Hash(); want != have { + t.Fatalf("test %d: tx changed, want %x have %x", i, want, have) + } + } +} + +func allTransactionTypes(addr common.Address, config *params.ChainConfig) []types.TxData { + return []types.TxData{ + &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(9), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, + }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, + }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, + }, + }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + } +} + +type testBackend struct { + db ethdb.Database + chain *core.BlockChain +} + +func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { + var ( + engine = dummy.NewCoinbaseFaker() + backend = &testBackend{ + db: rawdb.NewMemoryDatabase(), + } + cacheConfig = &core.CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + SnapshotLimit: 0, + Pruning: false, // Archive mode + } + ) + // Generate blocks for testing + _, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator) + chain, err := core.NewBlockChain(backend.db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + backend.chain = chain + return backend +} + +func (b testBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} +func (b testBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { + return nil, nil, nil, nil, nil +} +func (b testBackend) ChainDb() ethdb.Database { return b.db } +func (b testBackend) AccountManager() *accounts.Manager { return nil } +func (b testBackend) ExtRPCEnabled() bool { return false } +func (b testBackend) RPCGasCap() uint64 { return 10000000 } +func (b testBackend) RPCEVMTimeout() time.Duration { return time.Second } +func (b testBackend) RPCTxFeeCap() float64 { return 0 } +func (b testBackend) UnprotectedAllowed(*types.Transaction) bool { return false } +func (b testBackend) SetHead(number uint64) {} +func (b testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + if number == rpc.LatestBlockNumber { + return b.chain.CurrentBlock(), nil + } + return b.chain.GetHeaderByNumber(uint64(number)), nil +} +func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + return b.chain.GetHeaderByHash(hash), nil +} +func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { + panic("implement me") +} +func (b testBackend) CurrentHeader() *types.Header { panic("implement me") } +func (b testBackend) CurrentBlock() *types.Header { panic("implement me") } +func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + if number == rpc.LatestBlockNumber { + head := b.chain.CurrentBlock() + return b.chain.GetBlock(head.Hash(), head.Number.Uint64()), nil + } + return b.chain.GetBlockByNumber(uint64(number)), nil +} +func (b testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return b.chain.GetBlockByHash(hash), nil +} +func (b testBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { + if blockNr, ok := blockNrOrHash.Number(); ok { + return b.BlockByNumber(ctx, blockNr) + } + if blockHash, ok := blockNrOrHash.Hash(); ok { + return b.BlockByHash(ctx, blockHash) + } + panic("unknown type rpc.BlockNumberOrHash") +} +func (b testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { + return b.chain.GetBlock(hash, uint64(number.Int64())).Body(), nil +} +func (b testBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { + if number == rpc.PendingBlockNumber { + panic("pending state not implemented") + } + header, err := b.HeaderByNumber(ctx, number) + if err != nil { + return nil, nil, err + } + if header == nil { + return nil, nil, errors.New("header not found") + } + stateDb, err := b.chain.StateAt(header.Root) + return stateDb, header, err +} +func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { + if blockNr, ok := blockNrOrHash.Number(); ok { + return b.StateAndHeaderByNumber(ctx, blockNr) + } + panic("only implemented for number") +} +func (b testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { panic("implement me") } +func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + header, err := b.HeaderByHash(ctx, hash) + if header == nil || err != nil { + return nil, err + } + receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) + return receipts, nil +} +func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") } +func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) { + vmError := func() error { return nil } + if vmConfig == nil { + vmConfig = b.chain.GetVMConfig() + } + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(header, b.chain, nil) + if blockContext != nil { + context = *blockContext + } + return vm.NewEVM(context, txContext, state, b.chain.Config(), *vmConfig), vmError +} +func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { + panic("implement me") +} +func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { + panic("implement me") +} +func (b testBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription { + panic("implement me") +} +func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { + panic("implement me") +} +func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { + panic("implement me") +} +func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") } +func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") } +func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { + panic("implement me") +} +func (b testBackend) Stats() (pending int, queued int) { panic("implement me") } +func (b testBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { + panic("implement me") +} +func (b testBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { + panic("implement me") +} +func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { + panic("implement me") +} +func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } +func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() } +func (b testBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { + panic("implement me") +} +func (b testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { + panic("implement me") +} +func (b testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { + panic("implement me") +} +func (b testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + panic("implement me") +} +func (b testBackend) BloomStatus() (uint64, uint64) { panic("implement me") } +func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { + panic("implement me") +} +func (b testBackend) BadBlocks() ([]*types.Block, []*core.BadBlockReason) { return nil, nil } +func (b testBackend) EstimateBaseFee(ctx context.Context) (*big.Int, error) { + panic("implement me") +} +func (b testBackend) LastAcceptedBlock() *types.Block { panic("implement me") } +func (b testBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { + panic("implement me") +} + +func TestEstimateGas(t *testing.T) { + t.Parallel() + // Initialize test accounts + var ( + accounts = newAccounts(2) + genesis = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + }, + } + genBlocks = 10 + signer = types.HomesteadSigner{} + randomAccounts = newAccounts(2) + ) + api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &accounts[1].addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, accounts[0].key) + b.AddTx(tx) + })) + var testSuite = []struct { + blockNumber rpc.BlockNumber + call TransactionArgs + expectErr error + want uint64 + }{ + // simple transfer on latest block + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: nil, + want: 21000, + }, + // simple transfer with insufficient funds on latest block + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &randomAccounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: core.ErrInsufficientFunds, + want: 21000, + }, + // empty create + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{}, + expectErr: nil, + want: 53000, + }, + } + for i, tc := range testSuite { + result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}) + if tc.expectErr != nil { + if err == nil { + t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) + continue + } + if !errors.Is(err, tc.expectErr) { + t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) + } + continue + } + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + if uint64(result) != tc.want { + t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, uint64(result), tc.want) + } + } +} + +func TestCall(t *testing.T) { + t.Parallel() + // Initialize test accounts + var ( + accounts = newAccounts(3) + genesis = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }, + } + genBlocks = 10 + signer = types.HomesteadSigner{} + ) + api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &accounts[1].addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, accounts[0].key) + b.AddTx(tx) + })) + randomAccounts := newAccounts(3) + var testSuite = []struct { + blockNumber rpc.BlockNumber + overrides StateOverride + call TransactionArgs + blockOverrides BlockOverrides + expectErr error + want string + }{ + // transfer on genesis + { + blockNumber: rpc.BlockNumber(0), + call: TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: nil, + want: "0x", + }, + // transfer on the head + { + blockNumber: rpc.BlockNumber(genBlocks), + call: TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: nil, + want: "0x", + }, + // transfer on a non-existent block, error expects + { + blockNumber: rpc.BlockNumber(genBlocks + 1), + call: TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: errors.New("header not found"), + }, + // transfer on the latest block + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: nil, + want: "0x", + }, + // Call which can only succeed if state is state overridden + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + overrides: StateOverride{ + randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, + }, + want: "0x", + }, + // Invalid call without state overriding + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + expectErr: core.ErrInsufficientFunds, + }, + // Successful simple contract call + // + // // SPDX-License-Identifier: GPL-3.0 + // + // pragma solidity >=0.7.0 <0.8.0; + // + // /** + // * @title Storage + // * @dev Store & retrieve value in a variable + // */ + // contract Storage { + // uint256 public number; + // constructor() { + // number = block.number; + // } + // } + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &randomAccounts[0].addr, + To: &randomAccounts[2].addr, + Data: hex2Bytes("8381f58a"), // call number() + }, + overrides: StateOverride{ + randomAccounts[2].addr: OverrideAccount{ + Code: hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033"), + StateDiff: &map[common.Hash]common.Hash{{}: common.BigToHash(big.NewInt(123))}, + }, + }, + want: "0x000000000000000000000000000000000000000000000000000000000000007b", + }, + // Block overrides should work + { + blockNumber: rpc.LatestBlockNumber, + call: TransactionArgs{ + From: &accounts[1].addr, + Input: &hexutil.Bytes{ + 0x43, // NUMBER + 0x60, 0x00, 0x52, // MSTORE offset 0 + 0x60, 0x20, 0x60, 0x00, 0xf3, + }, + }, + blockOverrides: BlockOverrides{Number: (*hexutil.Big)(big.NewInt(11))}, + want: "0x000000000000000000000000000000000000000000000000000000000000000b", + }, + } + for i, tc := range testSuite { + result, err := api.Call(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) + if tc.expectErr != nil { + if err == nil { + t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) + continue + } + if !errors.Is(err, tc.expectErr) { + // Second try + if !reflect.DeepEqual(err, tc.expectErr) { + t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) + } + } + continue + } + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + if !reflect.DeepEqual(result.String(), tc.want) { + t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, result.String(), tc.want) + } + } +} + +type Account struct { + key *ecdsa.PrivateKey + addr common.Address +} + +type Accounts []Account + +func (a Accounts) Len() int { return len(a) } +func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } + +func newAccounts(n int) (accounts Accounts) { + for i := 0; i < n; i++ { + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + accounts = append(accounts, Account{key: key, addr: addr}) + } + sort.Sort(accounts) + return accounts +} + +func newRPCBalance(balance *big.Int) **hexutil.Big { + rpcBalance := (*hexutil.Big)(balance) + return &rpcBalance +} + +func hex2Bytes(str string) *hexutil.Bytes { + rpcBytes := hexutil.Bytes(common.Hex2Bytes(str)) + return &rpcBytes +} + +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) error { + h.hasher.Write(key) + h.hasher.Write(val) + return nil +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + +func TestRPCMarshalBlock(t *testing.T) { + var ( + txs []*types.Transaction + to = common.BytesToAddress([]byte{0x11}) + ) + for i := uint64(1); i <= 4; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } else { + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } + txs = append(txs, tx) + } + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, newHasher()) + + var testSuite = []struct { + inclTx bool + fullTx bool + want string + }{ + // without txs + { + inclTx: false, + fullTx: false, + want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + }, + // only tx hashes + { + inclTx: true, + fullTx: false, + want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":["0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + }, + + // full tx details + { + inclTx: true, + fullTx: true, + want: `{"blockExtraData":"0x","difficulty":"0x0","extDataHash":"0x0000000000000000000000000000000000000000000000000000000000000000","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x2b9","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":[{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","input":"0x111111","nonce":"0x1","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x0","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","input":"0x111111","nonce":"0x2","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x1","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","input":"0x111111","nonce":"0x3","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x2","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0xed74541829e559a9256f4810c2358498c7fe41287cb57f4b8b8334ea81560757","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1","input":"0x111111","nonce":"0x4","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x3","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"}],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, + }, + } + + for i, tc := range testSuite { + resp, err := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestChainConfig) + if err != nil { + t.Errorf("test %d: got error %v", i, err) + continue + } + out, err := json.Marshal(resp) + if err != nil { + t.Errorf("test %d: json marshal error: %v", i, err) + continue + } + if have := string(out); have != tc.want { + t.Errorf("test %d: want: %s have: %s", i, tc.want, have) + } + } +} + +func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) { + // Initialize test accounts + var ( + acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) + contract = common.HexToAddress("0000000000000000000000000000000000031ec7") + genesis = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + acc1Addr: {Balance: big.NewInt(params.Ether)}, + acc2Addr: {Balance: big.NewInt(params.Ether)}, + // // SPDX-License-Identifier: GPL-3.0 + // pragma solidity >=0.7.0 <0.9.0; + // + // contract Token { + // event Transfer(address indexed from, address indexed to, uint256 value); + // function transfer(address to, uint256 value) public returns (bool) { + // emit Transfer(msg.sender, to, value); + // return true; + // } + // } + contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")}, + }, + } + signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID) + txHashes = make([]common.Hash, genBlocks) + ) + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + var ( + tx *types.Transaction + err error + ) + switch i { + case 0: + // transfer 1000wei + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key) + case 1: + // create contract + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key) + case 2: + // with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key) + case 3: + // dynamic fee with logs + // transfer(address to, uint256 value) + data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) + fee := big.NewInt(500) + fee.Add(fee, b.BaseFee()) + tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key) + case 4: + // access list with contract create + accessList := types.AccessList{{ + Address: contract, + StorageKeys: []common.Hash{{0}}, + }} + tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key) + } + if err != nil { + t.Errorf("failed to sign tx: %v", err) + } + if tx != nil { + b.AddTx(tx) + txHashes[i] = tx.Hash() + } + }) + return backend, txHashes +} + +func TestRPCGetBlockReceipts(t *testing.T) { + t.Parallel() + + var ( + genBlocks = 5 + backend, _ = setupReceiptBackend(t, genBlocks) + api = NewBlockChainAPI(backend) + ) + blockHashes := make([]common.Hash, genBlocks+1) + ctx := context.Background() + for i := 0; i <= genBlocks; i++ { + header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) + if err != nil { + t.Errorf("failed to get block: %d err: %v", i, err) + } + blockHashes[i] = header.Hash() + } + + var testSuite = []struct { + test rpc.BlockNumberOrHash + want string + }{ + // 0. block without any txs(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false), + want: `[]`, + }, + // 1. block without any txs(number) + { + test: rpc.BlockNumberOrHashWithNumber(0), + want: `[]`, + }, + // 2. earliest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber), + want: `[]`, + }, + // 3. latest tag + { + test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), + want: `[{"blockHash":"0x87489b491ec07c0f4f2b0cabe17e14c7fee090fc34f10051459a1470d39705e0","blockNumber":"0x5","contractAddress":"0xfdaa97661a584d977b4d3abb5370766ff5b86a18","cumulativeGasUsed":"0xe01c","effectiveGasPrice":"0x2ecde015a8","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xe01c","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0xa9616380994fd7502d0350ee57882bb6e95d6678fa6c4782f179c9f5f3529c48","transactionIndex":"0x0","type":"0x1"}]`, + }, + // 4. block with legacy transfer tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false), + want: `[{"blockHash":"0xd3e0bbe89c00b832d4a608c30c8627eabc8cbfe4f60b5eb0bf00b10b85a7d7fb","blockNumber":"0x1","contractAddress":null,"cumulativeGasUsed":"0x5208","effectiveGasPrice":"0x34630b8a00","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5208","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","transactionHash":"0x09220a8629fd020cbb341ab146e6acb4dc4811ab5fdf021bec3d3219c5a29ab3","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 5. block with contract create tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)), + want: `[{"blockHash":"0x9c2343a191ef3ea558ee4563770b15cfcecd5d24e98d93f2b04306f66271b001","blockNumber":"0x2","contractAddress":"0xae9bea628c4ce503dcfd7e305cab4e29e7476592","cumulativeGasUsed":"0xcf50","effectiveGasPrice":"0x32ee841b80","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0xcf50","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0x517f3174bd4501d55f0f93589ef0102152ab808f51bf595f2779461f04871a32","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 6. block with legacy contract call tx(hash) + { + test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false), + want: `[{"blockHash":"0x86579d9985828b26ff5593b162dfed399de868102d376a1f2363ba1312e29b32","blockNumber":"0x3","contractAddress":null,"cumulativeGasUsed":"0x5e28","effectiveGasPrice":"0x318455c568","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x5e28","logs":[{"address":"0x0000000000000000000000000000000000031ec7","topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7","0x0000000000000000000000000000000000000000000000000000000000000003"],"data":"0x000000000000000000000000000000000000000000000000000000000000000d","blockNumber":"0x3","transactionHash":"0x0e9c460065fee166157eaadf702a01fb6ac1ce27b651e32850a8b09f71f93937","transactionIndex":"0x0","blockHash":"0x86579d9985828b26ff5593b162dfed399de868102d376a1f2363ba1312e29b32","logIndex":"0x0","removed":false}],"logsBloom":"0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000","status":"0x1","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0x0e9c460065fee166157eaadf702a01fb6ac1ce27b651e32850a8b09f71f93937","transactionIndex":"0x0","type":"0x0"}]`, + }, + // 7. block with dynamic fee tx(number) + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)), + want: `[{"blockHash":"0x252b4b92ff1caafbaa857f9b6277d81e3570056c138292aa92494e51f44d7448","blockNumber":"0x4","contractAddress":null,"cumulativeGasUsed":"0x538d","effectiveGasPrice":"0x302436f3a8","from":"0x703c4b2bd70c169f5717101caee543299fc946c7","gasUsed":"0x538d","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x0","to":"0x0000000000000000000000000000000000031ec7","transactionHash":"0xcdd1122456f8ea113309e2ba5ecc8f389bbdc2e6bcced8eb103c6fdef201bf1a","transactionIndex":"0x0","type":"0x2"}]`, + }, + // 8. block is empty + { + test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false), + want: `null`, + }, + // 9. block is not found + { + test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false), + want: `null`, + }, + // 10. block is not found + { + test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)), + want: `null`, + }, + } + + for i, tt := range testSuite { + var ( + result interface{} + err error + ) + result, err = api.GetBlockReceipts(context.Background(), tt.test) + if err != nil { + t.Errorf("test %d: want no error, have %v", i, err) + continue + } + data, err := json.Marshal(result) + if err != nil { + t.Errorf("test %d: json marshal error", i) + continue + } + want, have := tt.want, string(data) + require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have) + } +} diff --git a/coreth/internal/ethapi/backend.go b/coreth/internal/ethapi/backend.go index 98a28def..78c95ce1 100644 --- a/coreth/internal/ethapi/backend.go +++ b/coreth/internal/ethapi/backend.go @@ -35,14 +35,14 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/eth/filters" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" ) @@ -53,7 +53,7 @@ type Backend interface { EstimateBaseFee(ctx context.Context) (*big.Int, error) SuggestPrice(ctx context.Context) (*big.Int, error) SuggestGasTipCap(ctx context.Context) (*big.Int, error) - FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) + FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool @@ -67,14 +67,14 @@ type Backend interface { HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) CurrentHeader() *types.Header - CurrentBlock() *types.Block + CurrentBlock() *types.Header BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) - GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) + GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) (*vm.EVM, func() error) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription @@ -95,9 +95,16 @@ type Backend interface { Engine() consensus.Engine LastAcceptedBlock() *types.Block + // This is copied from filters.Backend // eth/filters needs to be initialized from this backend type, so methods needed by // it must also be included here. - filters.Backend + GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) + GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) + SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription + SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription + SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription + BloomStatus() (uint64, uint64) + ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/coreth/internal/ethapi/transaction_args.go b/coreth/internal/ethapi/transaction_args.go index 8d1876c4..375766a2 100644 --- a/coreth/internal/ethapi/transaction_args.go +++ b/coreth/internal/ethapi/transaction_args.go @@ -33,6 +33,7 @@ import ( "fmt" "math/big" + "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" @@ -167,7 +168,7 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b feeBackend) e } // Now attempt to fill in default value depending on whether London is active or not. head := b.CurrentHeader() - if b.ChainConfig().IsApricotPhase3(new(big.Int).SetUint64(head.Time)) { + if b.ChainConfig().IsApricotPhase3(head.Time) { // London is active, set maxPriorityFeePerGas and maxFeePerGas. if err := args.setApricotPhase3FeeDefault(ctx, head, b); err != nil { return err @@ -218,10 +219,10 @@ func (args *TransactionArgs) setApricotPhase3FeeDefault(ctx context.Context, hea // ToMessage converts the transaction arguments to the Message type used by the // core evm. This method is used in calls and traces that do not require a real // live transaction. -func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) { +func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Message, error) { // Reject invalid combinations of pre- and post-1559 fee styles if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { - return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } // Set sender address or use zero address if none specified. addr := args.from() @@ -235,7 +236,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t gas = uint64(*args.Gas) } if globalGasCap != 0 && globalGasCap < gas { - log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + log.Info("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) gas = globalGasCap } var ( @@ -282,7 +283,18 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t if args.AccessList != nil { accessList = *args.AccessList } - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true) + msg := &core.Message{ + From: addr, + To: args.To, + Value: value, + GasLimit: gas, + GasPrice: gasPrice, + GasFeeCap: gasFeeCap, + GasTipCap: gasTipCap, + Data: data, + AccessList: accessList, + SkipAccountChecks: true, + } return msg, nil } diff --git a/coreth/internal/ethapi/transaction_args_test.go b/coreth/internal/ethapi/transaction_args_test.go index e29098ac..c77fa7b9 100644 --- a/coreth/internal/ethapi/transaction_args_test.go +++ b/coreth/internal/ethapi/transaction_args_test.go @@ -28,13 +28,14 @@ package ethapi import ( "context" - "fmt" + "errors" "math/big" "reflect" "testing" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -139,28 +140,28 @@ func TestSetFeeDefaults(t *testing.T) { false, &TransactionArgs{MaxFeePerGas: maxFee}, nil, - fmt.Errorf("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"), + errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"), }, { "dynamic fee tx pre-London, priorityFee set", false, &TransactionArgs{MaxPriorityFeePerGas: fortytwo}, nil, - fmt.Errorf("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"), + errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"), }, { "dynamic fee tx, maxFee < priorityFee", true, &TransactionArgs{MaxFeePerGas: maxFee, MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1000))}, nil, - fmt.Errorf("maxFeePerGas (0x3e) < maxPriorityFeePerGas (0x3e8)"), + errors.New("maxFeePerGas (0x3e) < maxPriorityFeePerGas (0x3e8)"), }, { "dynamic fee tx, maxFee < priorityFee while setting default", true, &TransactionArgs{MaxFeePerGas: (*hexutil.Big)(big.NewInt(7))}, nil, - fmt.Errorf("maxFeePerGas (0x7) < maxPriorityFeePerGas (0x2a)"), + errors.New("maxFeePerGas (0x7) < maxPriorityFeePerGas (0x2a)"), }, // Misc @@ -169,21 +170,21 @@ func TestSetFeeDefaults(t *testing.T) { false, &TransactionArgs{GasPrice: fortytwo, MaxFeePerGas: maxFee, MaxPriorityFeePerGas: fortytwo}, nil, - fmt.Errorf("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), + errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), }, { "set gas price and maxPriorityFee", false, &TransactionArgs{GasPrice: fortytwo, MaxPriorityFeePerGas: fortytwo}, nil, - fmt.Errorf("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), + errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), }, { "set gas price and maxFee", true, &TransactionArgs{GasPrice: fortytwo, MaxFeePerGas: maxFee}, nil, - fmt.Errorf("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), + errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"), }, } @@ -227,7 +228,7 @@ func newBackendMock() *backendMock { PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(1000), + ApricotPhase3BlockTimestamp: utils.NewUint64(1000), } return &backendMock{ current: &types.Header{ diff --git a/coreth/internal/flags/categories.go b/coreth/internal/flags/categories.go new file mode 100644 index 00000000..02d063a6 --- /dev/null +++ b/coreth/internal/flags/categories.go @@ -0,0 +1,53 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package flags + +import "github.com/urfave/cli/v2" + +const ( + EthCategory = "ETHEREUM" + LightCategory = "LIGHT CLIENT" + DevCategory = "DEVELOPER CHAIN" + EthashCategory = "ETHASH" + TxPoolCategory = "TRANSACTION POOL" + PerfCategory = "PERFORMANCE TUNING" + AccountCategory = "ACCOUNT" + APICategory = "API AND CONSOLE" + NetworkingCategory = "NETWORKING" + MinerCategory = "MINER" + GasPriceCategory = "GAS PRICE ORACLE" + VMCategory = "VIRTUAL MACHINE" + LoggingCategory = "LOGGING AND DEBUGGING" + MetricsCategory = "METRICS AND STATS" + MiscCategory = "MISC" + DeprecatedCategory = "ALIASED (deprecated)" +) + +func init() { + cli.HelpFlag.(*cli.BoolFlag).Category = MiscCategory + cli.VersionFlag.(*cli.BoolFlag).Category = MiscCategory +} diff --git a/coreth/internal/flags/helpers.go b/coreth/internal/flags/helpers.go index cc935c0f..e1fe1886 100644 --- a/coreth/internal/flags/helpers.go +++ b/coreth/internal/flags/helpers.go @@ -30,17 +30,19 @@ import ( "fmt" "strings" + "github.com/ava-labs/coreth/internal/version" "github.com/ava-labs/coreth/params" "github.com/urfave/cli/v2" ) // NewApp creates an app with sane defaults. -func NewApp(gitCommit, gitDate, usage string) *cli.App { +func NewApp(usage string) *cli.App { + git, _ := version.VCS() app := cli.NewApp() app.EnableBashCompletion = true - app.Version = params.VersionWithCommit(gitCommit, gitDate) + app.Version = params.VersionWithCommit(git.Commit, git.Date) app.Usage = usage - app.Copyright = "Copyright 2013-2022 The go-ethereum Authors" + app.Copyright = "Copyright 2013-2023 The go-ethereum Authors" app.Before = func(ctx *cli.Context) error { MigrateGlobalFlags(ctx) return nil @@ -102,10 +104,34 @@ func MigrateGlobalFlags(ctx *cli.Context) { } func doMigrateFlags(ctx *cli.Context) { + // Figure out if there are any aliases of commands. If there are, we want + // to ignore them when iterating over the flags. + var aliases = make(map[string]bool) + for _, fl := range ctx.Command.Flags { + for _, alias := range fl.Names()[1:] { + aliases[alias] = true + } + } for _, name := range ctx.FlagNames() { for _, parent := range ctx.Lineage()[1:] { if parent.IsSet(name) { - ctx.Set(name, parent.String(name)) + // When iterating across the lineage, we will be served both + // the 'canon' and alias formats of all commmands. In most cases, + // it's fine to set it in the ctx multiple times (one for each + // name), however, the Slice-flags are not fine. + // The slice-flags accumulate, so if we set it once as + // "foo" and once as alias "F", then both will be present in the slice. + if _, isAlias := aliases[name]; isAlias { + continue + } + // If it is a string-slice, we need to set it as + // "alfa, beta, gamma" instead of "[alfa beta gamma]", in order + // for the backing StringSlice to parse it properly. + if result := parent.StringSlice(name); len(result) > 0 { + ctx.Set(name, strings.Join(result, ",")) + } else { + ctx.Set(name, parent.String(name)) + } break } } diff --git a/coreth/internal/shutdowncheck/shutdown_tracker.go b/coreth/internal/shutdowncheck/shutdown_tracker.go index 8395da42..35382305 100644 --- a/coreth/internal/shutdowncheck/shutdown_tracker.go +++ b/coreth/internal/shutdowncheck/shutdown_tracker.go @@ -30,8 +30,8 @@ import ( "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) diff --git a/coreth/node/defaults.go b/coreth/internal/version/vcs.go similarity index 51% rename from coreth/node/defaults.go rename to coreth/internal/version/vcs.go index e4c826b9..70164589 100644 --- a/coreth/node/defaults.go +++ b/coreth/internal/version/vcs.go @@ -1,4 +1,4 @@ -// (c) 2019-2020, Ava Labs, Inc. +// (c) 2023, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2016 The go-ethereum Authors +// Copyright 2022 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,28 +24,39 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package node +package version import ( - "github.com/ava-labs/coreth/rpc" + "runtime/debug" + "time" ) +// In go 1.18 and beyond, the go tool embeds VCS information into the build. + const ( - DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server - DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server - DefaultWSHost = "localhost" // Default host interface for the websocket RPC server - DefaultWSPort = 8546 // Default TCP port for the websocket RPC server - DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server - DefaultGraphQLPort = 8547 // Default TCP port for the GraphQL server + govcsTimeLayout = "2006-01-02T15:04:05Z" + ourTimeLayout = "20060102" ) -// DefaultConfig contains reasonable default settings. -var DefaultConfig = Config{ - HTTPPort: DefaultHTTPPort, - HTTPModules: []string{"net", "web3"}, - HTTPVirtualHosts: []string{"localhost"}, - HTTPTimeouts: rpc.DefaultHTTPTimeouts, - WSPort: DefaultWSPort, - WSModules: []string{"net", "web3"}, - GraphQLVirtualHosts: []string{"localhost"}, +// buildInfoVCS returns VCS information of the build. +func buildInfoVCS(info *debug.BuildInfo) (s VCSInfo, ok bool) { + for _, v := range info.Settings { + switch v.Key { + case "vcs.revision": + s.Commit = v.Value + case "vcs.modified": + if v.Value == "true" { + s.Dirty = true + } + case "vcs.time": + t, err := time.Parse(govcsTimeLayout, v.Value) + if err == nil { + s.Date = t.Format(ourTimeLayout) + } + } + } + if s.Commit != "" && s.Date != "" { + ok = true + } + return } diff --git a/coreth/internal/version/version.go b/coreth/internal/version/version.go new file mode 100644 index 00000000..8b32ed31 --- /dev/null +++ b/coreth/internal/version/version.go @@ -0,0 +1,151 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package version implements reading of build version information. +package version + +import ( + "fmt" + "runtime" + "runtime/debug" + "strings" + + "github.com/ava-labs/coreth/params" +) + +const ourPath = "github.com/ava-labs/coreth" // Path to our module + +// These variables are set at build-time by the linker when the build is +// done by build/ci.go. +var gitCommit, gitDate string + +// VCSInfo represents the git repository state. +type VCSInfo struct { + Commit string // head commit hash + Date string // commit time in YYYYMMDD format + Dirty bool +} + +// VCS returns version control information of the current executable. +func VCS() (VCSInfo, bool) { + if gitCommit != "" { + // Use information set by the build script if present. + return VCSInfo{Commit: gitCommit, Date: gitDate}, true + } + if buildInfo, ok := debug.ReadBuildInfo(); ok { + if buildInfo.Main.Path == ourPath { + return buildInfoVCS(buildInfo) + } + } + return VCSInfo{}, false +} + +// ClientName creates a software name/version identifier according to common +// conventions in the Ethereum p2p network. +func ClientName(clientIdentifier string) string { + git, _ := VCS() + return fmt.Sprintf("%s/v%v/%v-%v/%v", + strings.Title(clientIdentifier), + params.VersionWithCommit(git.Commit, git.Date), + runtime.GOOS, runtime.GOARCH, + runtime.Version(), + ) +} + +// runtimeInfo returns build and platform information about the current binary. +// +// If the package that is currently executing is a prefixed by our go-ethereum +// module path, it will print out commit and date VCS information. Otherwise, +// it will assume it's imported by a third-party and will return the imported +// version and whether it was replaced by another module. +func Info() (version, vcs string) { + version = params.VersionWithMeta + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return version, "" + } + version = versionInfo(buildInfo) + if status, ok := VCS(); ok { + modified := "" + if status.Dirty { + modified = " (dirty)" + } + commit := status.Commit + if len(commit) > 8 { + commit = commit[:8] + } + vcs = commit + "-" + status.Date + modified + } + return version, vcs +} + +// versionInfo returns version information for the currently executing +// implementation. +// +// Depending on how the code is instantiated, it returns different amounts of +// information. If it is unable to determine which module is related to our +// package it falls back to the hardcoded values in the params package. +func versionInfo(info *debug.BuildInfo) string { + // If the main package is from our repo, prefix version with "geth". + if strings.HasPrefix(info.Path, ourPath) { + return fmt.Sprintf("geth %s", info.Main.Version) + } + // Not our main package, so explicitly print out the module path and + // version. + var version string + if info.Main.Path != "" && info.Main.Version != "" { + // These can be empty when invoked with "go run". + version = fmt.Sprintf("%s@%s ", info.Main.Path, info.Main.Version) + } + mod := findModule(info, ourPath) + if mod == nil { + // If our module path wasn't imported, it's unclear which + // version of our code they are running. Fallback to hardcoded + // version. + return version + fmt.Sprintf("geth %s", params.VersionWithMeta) + } + // Our package is a dependency for the main module. Return path and + // version data for both. + version += fmt.Sprintf("%s@%s", mod.Path, mod.Version) + if mod.Replace != nil { + // If our package was replaced by something else, also note that. + version += fmt.Sprintf(" (replaced by %s@%s)", mod.Replace.Path, mod.Replace.Version) + } + return version +} + +// findModule returns the module at path. +func findModule(info *debug.BuildInfo, path string) *debug.Module { + if info.Path == ourPath { + return &info.Main + } + for _, mod := range info.Deps { + if mod.Path == path { + return mod + } + } + return nil +} diff --git a/coreth/metrics/README.md b/coreth/metrics/README.md index 0fbaabe4..cf153c80 100644 --- a/coreth/metrics/README.md +++ b/coreth/metrics/README.md @@ -5,7 +5,7 @@ go-metrics Go port of Coda Hale's Metrics library: . -Documentation: . +Documentation: . Usage ----- diff --git a/coreth/metrics/counter.go b/coreth/metrics/counter.go index 2f78c90d..55e1c595 100644 --- a/coreth/metrics/counter.go +++ b/coreth/metrics/counter.go @@ -38,13 +38,13 @@ func NewCounter() Counter { if !Enabled { return NilCounter{} } - return &StandardCounter{0} + return &StandardCounter{} } // NewCounterForced constructs a new StandardCounter and returns it no matter if // the global switch is enabled or not. func NewCounterForced() Counter { - return &StandardCounter{0} + return &StandardCounter{} } // NewRegisteredCounter constructs and registers a new StandardCounter. @@ -115,27 +115,27 @@ func (NilCounter) Snapshot() Counter { return NilCounter{} } // StandardCounter is the standard implementation of a Counter and uses the // sync/atomic package to manage a single int64 value. type StandardCounter struct { - count int64 + count atomic.Int64 } // Clear sets the counter to zero. func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) + c.count.Store(0) } // Count returns the current count. func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) + return c.count.Load() } // Dec decrements the counter by the given amount. func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) + c.count.Add(-i) } // Inc increments the counter by the given amount. func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) + c.count.Add(i) } // Snapshot returns a read-only copy of the counter. diff --git a/coreth/metrics/counter_float64.go b/coreth/metrics/counter_float64.go new file mode 100644 index 00000000..d1197bb8 --- /dev/null +++ b/coreth/metrics/counter_float64.go @@ -0,0 +1,155 @@ +package metrics + +import ( + "math" + "sync/atomic" +) + +// CounterFloat64 holds a float64 value that can be incremented and decremented. +type CounterFloat64 interface { + Clear() + Count() float64 + Dec(float64) + Inc(float64) + Snapshot() CounterFloat64 +} + +// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers +// a new StandardCounterFloat64. +func GetOrRegisterCounterFloat64(name string, r Registry) CounterFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounterFloat64).(CounterFloat64) +} + +// GetOrRegisterCounterFloat64Forced returns an existing CounterFloat64 or constructs and registers a +// new CounterFloat64 no matter the global switch is enabled or not. +// Be sure to unregister the counter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterCounterFloat64Forced(name string, r Registry) CounterFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounterFloat64Forced).(CounterFloat64) +} + +// NewCounterFloat64 constructs a new StandardCounterFloat64. +func NewCounterFloat64() CounterFloat64 { + if !Enabled { + return NilCounterFloat64{} + } + return &StandardCounterFloat64{} +} + +// NewCounterFloat64Forced constructs a new StandardCounterFloat64 and returns it no matter if +// the global switch is enabled or not. +func NewCounterFloat64Forced() CounterFloat64 { + return &StandardCounterFloat64{} +} + +// NewRegisteredCounterFloat64 constructs and registers a new StandardCounterFloat64. +func NewRegisteredCounterFloat64(name string, r Registry) CounterFloat64 { + c := NewCounterFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewRegisteredCounterFloat64Forced constructs and registers a new StandardCounterFloat64 +// and launches a goroutine no matter the global switch is enabled or not. +// Be sure to unregister the counter from the registry once it is of no use to +// allow for garbage collection. +func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 { + c := NewCounterFloat64Forced() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterFloat64Snapshot is a read-only copy of another CounterFloat64. +type CounterFloat64Snapshot float64 + +// Clear panics. +func (CounterFloat64Snapshot) Clear() { + panic("Clear called on a CounterFloat64Snapshot") +} + +// Count returns the value at the time the snapshot was taken. +func (c CounterFloat64Snapshot) Count() float64 { return float64(c) } + +// Dec panics. +func (CounterFloat64Snapshot) Dec(float64) { + panic("Dec called on a CounterFloat64Snapshot") +} + +// Inc panics. +func (CounterFloat64Snapshot) Inc(float64) { + panic("Inc called on a CounterFloat64Snapshot") +} + +// Snapshot returns the snapshot. +func (c CounterFloat64Snapshot) Snapshot() CounterFloat64 { return c } + +// NilCounterFloat64 is a no-op CounterFloat64. +type NilCounterFloat64 struct{} + +// Clear is a no-op. +func (NilCounterFloat64) Clear() {} + +// Count is a no-op. +func (NilCounterFloat64) Count() float64 { return 0.0 } + +// Dec is a no-op. +func (NilCounterFloat64) Dec(i float64) {} + +// Inc is a no-op. +func (NilCounterFloat64) Inc(i float64) {} + +// Snapshot is a no-op. +func (NilCounterFloat64) Snapshot() CounterFloat64 { return NilCounterFloat64{} } + +// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the +// atomic to manage a single float64 value. +type StandardCounterFloat64 struct { + floatBits atomic.Uint64 +} + +// Clear sets the counter to zero. +func (c *StandardCounterFloat64) Clear() { + c.floatBits.Store(0) +} + +// Count returns the current value. +func (c *StandardCounterFloat64) Count() float64 { + return math.Float64frombits(c.floatBits.Load()) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounterFloat64) Dec(v float64) { + atomicAddFloat(&c.floatBits, -v) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounterFloat64) Inc(v float64) { + atomicAddFloat(&c.floatBits, v) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounterFloat64) Snapshot() CounterFloat64 { + return CounterFloat64Snapshot(c.Count()) +} + +func atomicAddFloat(fbits *atomic.Uint64, v float64) { + for { + loadedBits := fbits.Load() + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if fbits.CompareAndSwap(loadedBits, newBits) { + break + } + } +} diff --git a/coreth/metrics/counter_float_64_test.go b/coreth/metrics/counter_float_64_test.go new file mode 100644 index 00000000..f17aca33 --- /dev/null +++ b/coreth/metrics/counter_float_64_test.go @@ -0,0 +1,99 @@ +package metrics + +import ( + "sync" + "testing" +) + +func BenchmarkCounterFloat64(b *testing.B) { + c := NewCounterFloat64() + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.Inc(1.0) + } +} + +func BenchmarkCounterFloat64Parallel(b *testing.B) { + c := NewCounterFloat64() + b.ResetTimer() + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for i := 0; i < b.N; i++ { + c.Inc(1.0) + } + wg.Done() + }() + } + wg.Wait() + if have, want := c.Count(), 10.0*float64(b.N); have != want { + b.Fatalf("have %f want %f", have, want) + } +} + +func TestCounterFloat64Clear(t *testing.T) { + c := NewCounterFloat64() + c.Inc(1.0) + c.Clear() + if count := c.Count(); count != 0 { + t.Errorf("c.Count(): 0 != %v\n", count) + } +} + +func TestCounterFloat64Dec1(t *testing.T) { + c := NewCounterFloat64() + c.Dec(1.0) + if count := c.Count(); count != -1.0 { + t.Errorf("c.Count(): -1.0 != %v\n", count) + } +} + +func TestCounterFloat64Dec2(t *testing.T) { + c := NewCounterFloat64() + c.Dec(2.0) + if count := c.Count(); count != -2.0 { + t.Errorf("c.Count(): -2.0 != %v\n", count) + } +} + +func TestCounterFloat64Inc1(t *testing.T) { + c := NewCounterFloat64() + c.Inc(1.0) + if count := c.Count(); count != 1.0 { + t.Errorf("c.Count(): 1.0 != %v\n", count) + } +} + +func TestCounterFloat64Inc2(t *testing.T) { + c := NewCounterFloat64() + c.Inc(2.0) + if count := c.Count(); count != 2.0 { + t.Errorf("c.Count(): 2.0 != %v\n", count) + } +} + +func TestCounterFloat64Snapshot(t *testing.T) { + c := NewCounterFloat64() + c.Inc(1.0) + snapshot := c.Snapshot() + c.Inc(1.0) + if count := snapshot.Count(); count != 1.0 { + t.Errorf("c.Count(): 1.0 != %v\n", count) + } +} + +func TestCounterFloat64Zero(t *testing.T) { + c := NewCounterFloat64() + if count := c.Count(); count != 0 { + t.Errorf("c.Count(): 0 != %v\n", count) + } +} + +func TestGetOrRegisterCounterFloat64(t *testing.T) { + r := NewRegistry() + NewRegisteredCounterFloat64("foo", r).Inc(47.0) + if c := GetOrRegisterCounterFloat64("foo", r); c.Count() != 47.0 { + t.Fatal(c) + } +} diff --git a/coreth/metrics/cpu.go b/coreth/metrics/cpu.go index 6bd560b3..472a1a42 100644 --- a/coreth/metrics/cpu.go +++ b/coreth/metrics/cpu.go @@ -27,8 +27,9 @@ package metrics // CPUStats is the system and process CPU stats. +// All values are in seconds. type CPUStats struct { - GlobalTime int64 // Time spent by the CPU working on all processes - GlobalWait int64 // Time spent by waiting on disk for all processes - LocalTime int64 // Time spent by the CPU working on this process + GlobalTime float64 // Time spent by the CPU working on all processes + GlobalWait float64 // Time spent by waiting on disk for all processes + LocalTime float64 // Time spent by the CPU working on this process } diff --git a/coreth/metrics/cpu_enabled.go b/coreth/metrics/cpu_enabled.go index b04b432f..7b5fe4d2 100644 --- a/coreth/metrics/cpu_enabled.go +++ b/coreth/metrics/cpu_enabled.go @@ -48,7 +48,7 @@ func ReadCPUStats(stats *CPUStats) { } // requesting all cpu times will always return an array with only one time stats entry timeStat := timeStats[0] - stats.GlobalTime = int64((timeStat.User + timeStat.Nice + timeStat.System) * cpu.ClocksPerSec) - stats.GlobalWait = int64((timeStat.Iowait) * cpu.ClocksPerSec) + stats.GlobalTime = timeStat.User + timeStat.Nice + timeStat.System + stats.GlobalWait = timeStat.Iowait stats.LocalTime = getProcessCPUTime() } diff --git a/coreth/metrics/cputime_nop.go b/coreth/metrics/cputime_nop.go index 995a68fe..275b9837 100644 --- a/coreth/metrics/cputime_nop.go +++ b/coreth/metrics/cputime_nop.go @@ -31,6 +31,6 @@ package metrics // getProcessCPUTime returns 0 on Windows as there is no system call to resolve // the actual process' CPU time. -func getProcessCPUTime() int64 { +func getProcessCPUTime() float64 { return 0 } diff --git a/coreth/metrics/cputime_unix.go b/coreth/metrics/cputime_unix.go index 95f60ed8..5a479d8a 100644 --- a/coreth/metrics/cputime_unix.go +++ b/coreth/metrics/cputime_unix.go @@ -36,11 +36,11 @@ import ( ) // getProcessCPUTime retrieves the process' CPU time since program startup. -func getProcessCPUTime() int64 { +func getProcessCPUTime() float64 { var usage syscall.Rusage if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil { log.Warn("Failed to retrieve CPU time", "err", err) return 0 } - return int64(usage.Utime.Sec+usage.Stime.Sec)*100 + int64(usage.Utime.Usec+usage.Stime.Usec)/10000 //nolint:unconvert + return float64(usage.Utime.Sec+usage.Stime.Sec) + float64(usage.Utime.Usec+usage.Stime.Usec)/1000000 //nolint:unconvert } diff --git a/coreth/metrics/ewma.go b/coreth/metrics/ewma.go index 03928649..ed95cba1 100644 --- a/coreth/metrics/ewma.go +++ b/coreth/metrics/ewma.go @@ -75,7 +75,7 @@ func (NilEWMA) Update(n int64) {} // of uncounted events and processes them on each tick. It uses the // sync/atomic package to manage uncounted events. type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + uncounted atomic.Int64 alpha float64 rate float64 init bool @@ -97,8 +97,8 @@ func (a *StandardEWMA) Snapshot() EWMA { // Tick ticks the clock to update the moving average. It assumes it is called // every five seconds. func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) + count := a.uncounted.Load() + a.uncounted.Add(-count) instantRate := float64(count) / float64(5*time.Second) a.mutex.Lock() defer a.mutex.Unlock() @@ -112,5 +112,5 @@ func (a *StandardEWMA) Tick() { // Update adds n uncounted events. func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) + a.uncounted.Add(n) } diff --git a/coreth/metrics/gauge.go b/coreth/metrics/gauge.go index b6b2758b..81137d7f 100644 --- a/coreth/metrics/gauge.go +++ b/coreth/metrics/gauge.go @@ -25,7 +25,7 @@ func NewGauge() Gauge { if !Enabled { return NilGauge{} } - return &StandardGauge{0} + return &StandardGauge{} } // NewRegisteredGauge constructs and registers a new StandardGauge. @@ -101,7 +101,7 @@ func (NilGauge) Value() int64 { return 0 } // StandardGauge is the standard implementation of a Gauge and uses the // sync/atomic package to manage a single int64 value. type StandardGauge struct { - value int64 + value atomic.Int64 } // Snapshot returns a read-only copy of the gauge. @@ -111,22 +111,22 @@ func (g *StandardGauge) Snapshot() Gauge { // Update updates the gauge's value. func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) + g.value.Store(v) } // Value returns the gauge's current value. func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) + return g.value.Load() } // Dec decrements the gauge's current value by the given amount. func (g *StandardGauge) Dec(i int64) { - atomic.AddInt64(&g.value, -i) + g.value.Add(-i) } // Inc increments the gauge's current value by the given amount. func (g *StandardGauge) Inc(i int64) { - atomic.AddInt64(&g.value, i) + g.value.Add(i) } // FunctionalGauge returns value from given function diff --git a/coreth/metrics/gauge_float64.go b/coreth/metrics/gauge_float64.go index 66819c95..237ff803 100644 --- a/coreth/metrics/gauge_float64.go +++ b/coreth/metrics/gauge_float64.go @@ -1,6 +1,9 @@ package metrics -import "sync" +import ( + "math" + "sync/atomic" +) // GaugeFloat64s hold a float64 value that can be set arbitrarily. type GaugeFloat64 interface { @@ -23,9 +26,7 @@ func NewGaugeFloat64() GaugeFloat64 { if !Enabled { return NilGaugeFloat64{} } - return &StandardGaugeFloat64{ - value: 0.0, - } + return &StandardGaugeFloat64{} } // NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. @@ -83,10 +84,9 @@ func (NilGaugeFloat64) Update(v float64) {} func (NilGaugeFloat64) Value() float64 { return 0.0 } // StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. +// atomic to manage a single float64 value. type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 + floatBits atomic.Uint64 } // Snapshot returns a read-only copy of the gauge. @@ -96,16 +96,12 @@ func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { // Update updates the gauge's value. func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v + g.floatBits.Store(math.Float64bits(v)) } // Value returns the gauge's current value. func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value + return math.Float64frombits(g.floatBits.Load()) } // FunctionalGaugeFloat64 returns value from given function diff --git a/coreth/metrics/gauge_float64_test.go b/coreth/metrics/gauge_float64_test.go index 7b854d23..647d0900 100644 --- a/coreth/metrics/gauge_float64_test.go +++ b/coreth/metrics/gauge_float64_test.go @@ -1,6 +1,9 @@ package metrics -import "testing" +import ( + "sync" + "testing" +) func BenchmarkGaugeFloat64(b *testing.B) { g := NewGaugeFloat64() @@ -10,6 +13,24 @@ func BenchmarkGaugeFloat64(b *testing.B) { } } +func BenchmarkGaugeFloat64Parallel(b *testing.B) { + c := NewGaugeFloat64() + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for i := 0; i < b.N; i++ { + c.Update(float64(i)) + } + wg.Done() + }() + } + wg.Wait() + if have, want := c.Value(), float64(b.N-1); have != want { + b.Fatalf("have %f want %f", have, want) + } +} + func TestGaugeFloat64(t *testing.T) { g := NewGaugeFloat64() g.Update(47.0) diff --git a/coreth/metrics/graphite.go b/coreth/metrics/graphite.go index 142eec86..29f72b0c 100644 --- a/coreth/metrics/graphite.go +++ b/coreth/metrics/graphite.go @@ -67,6 +67,8 @@ func graphite(c *GraphiteConfig) error { switch metric := i.(type) { case Counter: fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case CounterFloat64: + fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Count(), now) case Gauge: fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) case GaugeFloat64: diff --git a/coreth/metrics/log.go b/coreth/metrics/log.go index 0c8ea7c9..d1ce627a 100644 --- a/coreth/metrics/log.go +++ b/coreth/metrics/log.go @@ -24,6 +24,9 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { case Counter: l.Printf("counter %s\n", name) l.Printf(" count: %9d\n", metric.Count()) + case CounterFloat64: + l.Printf("counter %s\n", name) + l.Printf(" count: %f\n", metric.Count()) case Gauge: l.Printf("gauge %s\n", name) l.Printf(" value: %9d\n", metric.Value()) diff --git a/coreth/metrics/meter.go b/coreth/metrics/meter.go index 60ae919d..e8564d6a 100644 --- a/coreth/metrics/meter.go +++ b/coreth/metrics/meter.go @@ -101,11 +101,7 @@ func NewRegisteredMeterForced(name string, r Registry) Meter { // MeterSnapshot is a read-only copy of another Meter. type MeterSnapshot struct { - // WARNING: The `temp` field is accessed atomically. - // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is - // guaranteed to be so aligned, so take advantage of that. For more information, - // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - temp int64 + temp atomic.Int64 count int64 rate1, rate5, rate15, rateMean float64 } @@ -173,7 +169,7 @@ type StandardMeter struct { snapshot *MeterSnapshot a1, a5, a15 EWMA startTime time.Time - stopped uint32 + stopped atomic.Bool } func newStandardMeter() *StandardMeter { @@ -188,8 +184,8 @@ func newStandardMeter() *StandardMeter { // Stop stops the meter, Mark() will be a no-op if you use it after being stopped. func (m *StandardMeter) Stop() { - stopped := atomic.SwapUint32(&m.stopped, 1) - if stopped != 1 { + stopped := m.stopped.Swap(true) + if !stopped { arbiter.Lock() delete(arbiter.meters, m) arbiter.Unlock() @@ -207,7 +203,7 @@ func (m *StandardMeter) Count() int64 { // Mark records the occurrence of n events. func (m *StandardMeter) Mark(n int64) { - atomic.AddInt64(&m.snapshot.temp, n) + m.snapshot.temp.Add(n) } // Rate1 returns the one-minute moving average rate of events per second. @@ -241,7 +237,14 @@ func (m *StandardMeter) RateMean() float64 { // Snapshot returns a read-only copy of the meter. func (m *StandardMeter) Snapshot() Meter { m.lock.RLock() - snapshot := *m.snapshot + snapshot := MeterSnapshot{ + count: m.snapshot.count, + rate1: m.snapshot.rate1, + rate5: m.snapshot.rate5, + rate15: m.snapshot.rate15, + rateMean: m.snapshot.rateMean, + } + snapshot.temp.Store(m.snapshot.temp.Load()) m.lock.RUnlock() return &snapshot } @@ -257,7 +260,7 @@ func (m *StandardMeter) updateSnapshot() { func (m *StandardMeter) updateMeter() { // should only run with write lock held on m.lock - n := atomic.SwapInt64(&m.snapshot.temp, 0) + n := m.snapshot.temp.Swap(0) m.snapshot.count += n m.a1.Update(n) m.a5.Update(n) diff --git a/coreth/metrics/metrics_test.go b/coreth/metrics/metrics_test.go index 029c9987..faa74ee0 100644 --- a/coreth/metrics/metrics_test.go +++ b/coreth/metrics/metrics_test.go @@ -2,8 +2,6 @@ package metrics import ( "fmt" - "io" - "log" "sync" "testing" "time" @@ -11,22 +9,16 @@ import ( const FANOUT = 128 -// Stop the compiler from complaining during debugging. -var ( - _ = io.Discard - _ = log.LstdFlags -) - func BenchmarkMetrics(b *testing.B) { r := NewRegistry() c := NewRegisteredCounter("counter", r) + cf := NewRegisteredCounterFloat64("counterfloat64", r) g := NewRegisteredGauge("gauge", r) gf := NewRegisteredGaugeFloat64("gaugefloat64", r) h := NewRegisteredHistogram("histogram", r, NewUniformSample(100)) m := NewRegisteredMeter("meter", r) t := NewRegisteredTimer("timer", r) RegisterDebugGCStats(r) - RegisterRuntimeMemStats(r) b.ResetTimer() ch := make(chan bool) @@ -48,24 +40,6 @@ func BenchmarkMetrics(b *testing.B) { }() //*/ - wgR := &sync.WaitGroup{} - //* - wgR.Add(1) - go func() { - defer wgR.Done() - //log.Println("go CaptureRuntimeMemStats") - for { - select { - case <-ch: - //log.Println("done CaptureRuntimeMemStats") - return - default: - CaptureRuntimeMemStatsOnce(r) - } - } - }() - //*/ - wgW := &sync.WaitGroup{} /* wgW.Add(1) @@ -92,6 +66,7 @@ func BenchmarkMetrics(b *testing.B) { //log.Println("go", i) for i := 0; i < b.N; i++ { c.Inc(1) + cf.Inc(1.0) g.Update(int64(i)) gf.Update(float64(i)) h.Update(int64(i)) @@ -104,7 +79,6 @@ func BenchmarkMetrics(b *testing.B) { wg.Wait() close(ch) wgD.Wait() - wgR.Wait() wgW.Wait() } diff --git a/coreth/metrics/opentsdb.go b/coreth/metrics/opentsdb.go index 3fde5545..c9fd2e75 100644 --- a/coreth/metrics/opentsdb.go +++ b/coreth/metrics/opentsdb.go @@ -71,6 +71,8 @@ func openTSDB(c *OpenTSDBConfig) error { switch metric := i.(type) { case Counter: fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case CounterFloat64: + fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) case Gauge: fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) case GaugeFloat64: diff --git a/coreth/metrics/prometheus/prometheus.go b/coreth/metrics/prometheus/prometheus.go index 46d7c5bc..910b03db 100644 --- a/coreth/metrics/prometheus/prometheus.go +++ b/coreth/metrics/prometheus/prometheus.go @@ -50,6 +50,17 @@ func (g gatherer) Gather() ([]*dto.MetricFamily, error) { }, }}, }) + case metrics.CounterFloat64: + val := m.Snapshot().Count() + mfs = append(mfs, &dto.MetricFamily{ + Name: &name, + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{{ + Counter: &dto.Counter{ + Value: &val, + }, + }}, + }) case metrics.Gauge: val := m.Snapshot().Value() valFloat := float64(val) diff --git a/coreth/metrics/registry.go b/coreth/metrics/registry.go index c5435adf..ec6e37c5 100644 --- a/coreth/metrics/registry.go +++ b/coreth/metrics/registry.go @@ -45,21 +45,17 @@ type Registry interface { // Unregister the metric with the given name. Unregister(string) - - // Unregister all metrics. (Mostly for testing.) - UnregisterAll() } -// The standard implementation of a Registry is a mutex-protected map +// The standard implementation of a Registry uses sync.map // of names to metrics. type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.Mutex + metrics sync.Map } // Create a new registry. func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} + return &StandardRegistry{} } // Call the given function for each registered metric. @@ -71,9 +67,8 @@ func (r *StandardRegistry) Each(f func(string, interface{})) { // Get the metric by the given name or nil if none is registered. func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.metrics[name] + item, _ := r.metrics.Load(name) + return item } // Gets an existing metric or creates and registers a new one. Threadsafe @@ -81,35 +76,48 @@ func (r *StandardRegistry) Get(name string) interface{} { // The interface can be the metric to register if not found in registry, // or a function returning the metric for lazy instantiation. func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric + // fast path + cached, ok := r.metrics.Load(name) + if ok { + return cached } if v := reflect.ValueOf(i); v.Kind() == reflect.Func { i = v.Call(nil)[0].Interface() } - r.register(name, i) - return i + item, _, ok := r.loadOrRegister(name, i) + if !ok { + return i + } + return item } // Register the given metric under the given name. Returns a DuplicateMetric // if a metric by the given name is already registered. func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) + // fast path + _, ok := r.metrics.Load(name) + if ok { + return DuplicateMetric(name) + } + + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + _, loaded, _ := r.loadOrRegister(name, i) + if loaded { + return DuplicateMetric(name) + } + return nil } // Run all registered healthchecks. func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { + r.metrics.Range(func(key, value any) bool { + if h, ok := value.(Healthcheck); ok { h.Check() } - } + return true + }) } // GetAll metrics in the Registry @@ -120,6 +128,8 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { switch metric := i.(type) { case Counter: values["count"] = metric.Count() + case CounterFloat64: + values["count"] = metric.Count() case Gauge: values["value"] = metric.Value() case GaugeFloat64: @@ -175,45 +185,31 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} { // Unregister the metric with the given name. func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() r.stop(name) - delete(r.metrics, name) -} - -// Unregister all metrics. (Mostly for testing.) -func (r *StandardRegistry) UnregisterAll() { - r.mutex.Lock() - defer r.mutex.Unlock() - for name := range r.metrics { - r.stop(name) - delete(r.metrics, name) - } + r.metrics.LoadAndDelete(name) } -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } +func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) { switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer: - r.metrics[name] = i + case Counter, CounterFloat64, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer: + default: + return nil, false, false } - return nil + item, loaded := r.metrics.LoadOrStore(name, i) + return item, loaded, true } func (r *StandardRegistry) registered() map[string]interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - metrics := make(map[string]interface{}, len(r.metrics)) - for name, i := range r.metrics { - metrics[name] = i - } + metrics := make(map[string]interface{}) + r.metrics.Range(func(key, value any) bool { + metrics[key.(string)] = value + return true + }) return metrics } func (r *StandardRegistry) stop(name string) { - if i, ok := r.metrics[name]; ok { + if i, ok := r.metrics.Load(name); ok { if s, ok := i.(Stoppable); ok { s.Stop() } @@ -306,11 +302,6 @@ func (r *PrefixedRegistry) Unregister(name string) { r.underlying.Unregister(realName) } -// Unregister all metrics. (Mostly for testing.) -func (r *PrefixedRegistry) UnregisterAll() { - r.underlying.UnregisterAll() -} - var ( DefaultRegistry = NewRegistry() EphemeralRegistry = NewRegistry() diff --git a/coreth/metrics/registry_test.go b/coreth/metrics/registry_test.go index d277ae5c..7cc5cf14 100644 --- a/coreth/metrics/registry_test.go +++ b/coreth/metrics/registry_test.go @@ -1,6 +1,7 @@ package metrics import ( + "sync" "testing" ) @@ -13,6 +14,30 @@ func BenchmarkRegistry(b *testing.B) { } } +func BenchmarkRegistryGetOrRegisterParallel_8(b *testing.B) { + benchmarkRegistryGetOrRegisterParallel(b, 8) +} + +func BenchmarkRegistryGetOrRegisterParallel_32(b *testing.B) { + benchmarkRegistryGetOrRegisterParallel(b, 32) +} + +func benchmarkRegistryGetOrRegisterParallel(b *testing.B, amount int) { + r := NewRegistry() + b.ResetTimer() + var wg sync.WaitGroup + for i := 0; i < amount; i++ { + wg.Add(1) + go func() { + for i := 0; i < b.N; i++ { + r.GetOrRegister("foo", NewMeter) + } + wg.Done() + }() + } + wg.Wait() +} + func TestRegistry(t *testing.T) { r := NewRegistry() r.Register("foo", NewCounter()) diff --git a/coreth/metrics/runtime.go b/coreth/metrics/runtime.go deleted file mode 100644 index 9450c479..00000000 --- a/coreth/metrics/runtime.go +++ /dev/null @@ -1,212 +0,0 @@ -package metrics - -import ( - "runtime" - "runtime/pprof" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - GCCPUFraction GaugeFloat64 - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - NumThread Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 - - threadCreateProfile = pprof.Lookup("threadcreate") -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) - - runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.NumThread = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.NumThread", runtimeMetrics.NumThread) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/coreth/metrics/runtime_cgo.go b/coreth/metrics/runtime_cgo.go deleted file mode 100644 index 4307ebdb..00000000 --- a/coreth/metrics/runtime_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build cgo && !appengine && !js -// +build cgo,!appengine,!js - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/coreth/metrics/runtime_gccpufraction.go b/coreth/metrics/runtime_gccpufraction.go deleted file mode 100644 index 28cd4475..00000000 --- a/coreth/metrics/runtime_gccpufraction.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.5 -// +build go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return memStats.GCCPUFraction -} diff --git a/coreth/metrics/runtime_no_cgo.go b/coreth/metrics/runtime_no_cgo.go deleted file mode 100644 index 1799bef6..00000000 --- a/coreth/metrics/runtime_no_cgo.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !cgo || appengine || js -// +build !cgo appengine js - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/coreth/metrics/runtime_no_gccpufraction.go b/coreth/metrics/runtime_no_gccpufraction.go deleted file mode 100644 index af1a4b63..00000000 --- a/coreth/metrics/runtime_no_gccpufraction.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.5 -// +build !go1.5 - -package metrics - -import "runtime" - -func gcCPUFraction(memStats *runtime.MemStats) float64 { - return 0 -} diff --git a/coreth/metrics/runtime_test.go b/coreth/metrics/runtime_test.go deleted file mode 100644 index e011bf59..00000000 --- a/coreth/metrics/runtime_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package metrics - -import ( - "os" - "runtime" - "testing" - "time" -) - -func BenchmarkRuntimeMemStats(b *testing.B) { - r := NewRegistry() - RegisterRuntimeMemStats(r) - b.ResetTimer() - for i := 0; i < b.N; i++ { - CaptureRuntimeMemStatsOnce(r) - } -} - -func TestRuntimeMemStats(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } - r := NewRegistry() - RegisterRuntimeMemStats(r) - CaptureRuntimeMemStatsOnce(r) - zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests. - runtime.GC() - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 1 { - t.Fatal(count - zero) - } - runtime.GC() - runtime.GC() - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 3 { - t.Fatal(count - zero) - } - for i := 0; i < 256; i++ { - runtime.GC() - } - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 259 { - t.Fatal(count - zero) - } - for i := 0; i < 257; i++ { - runtime.GC() - } - CaptureRuntimeMemStatsOnce(r) - if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 515 { // We lost one because there were too many GCs between captures. - t.Fatal(count - zero) - } -} - -func TestRuntimeMemStatsNumThread(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } - r := NewRegistry() - RegisterRuntimeMemStats(r) - CaptureRuntimeMemStatsOnce(r) - - if value := runtimeMetrics.NumThread.Value(); value < 1 { - t.Fatalf("got NumThread: %d, wanted at least 1", value) - } -} - -func TestRuntimeMemStatsBlocking(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } - if g := runtime.GOMAXPROCS(0); g < 2 { - t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g) - } - ch := make(chan int) - go testRuntimeMemStatsBlocking(ch) - var memStats runtime.MemStats - t0 := time.Now() - runtime.ReadMemStats(&memStats) - t1 := time.Now() - t.Log("i++ during runtime.ReadMemStats:", <-ch) - go testRuntimeMemStatsBlocking(ch) - d := t1.Sub(t0) - t.Log(d) - time.Sleep(d) - t.Log("i++ during time.Sleep:", <-ch) -} - -func testRuntimeMemStatsBlocking(ch chan int) { - i := 0 - for { - select { - case ch <- i: - return - default: - i++ - } - } -} diff --git a/coreth/metrics/sample.go b/coreth/metrics/sample.go index fa2bfb27..afcaa211 100644 --- a/coreth/metrics/sample.go +++ b/coreth/metrics/sample.go @@ -41,6 +41,7 @@ type ExpDecaySample struct { reservoirSize int t0, t1 time.Time values *expDecaySampleHeap + rand *rand.Rand } // NewExpDecaySample constructs a new exponentially-decaying sample with the @@ -59,6 +60,12 @@ func NewExpDecaySample(reservoirSize int, alpha float64) Sample { return s } +// SetRand sets the random source (useful in tests) +func (s *ExpDecaySample) SetRand(prng *rand.Rand) Sample { + s.rand = prng + return s +} + // Clear clears all samples. func (s *ExpDecaySample) Clear() { s.mutex.Lock() @@ -168,8 +175,14 @@ func (s *ExpDecaySample) update(t time.Time, v int64) { if s.values.Size() == s.reservoirSize { s.values.Pop() } + var f64 float64 + if s.rand != nil { + f64 = s.rand.Float64() + } else { + f64 = rand.Float64() + } s.values.Push(expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / f64, v: v, }) if t.After(s.t1) { @@ -402,6 +415,7 @@ type UniformSample struct { mutex sync.Mutex reservoirSize int values []int64 + rand *rand.Rand } // NewUniformSample constructs a new uniform sample with the given reservoir @@ -416,6 +430,12 @@ func NewUniformSample(reservoirSize int) Sample { } } +// SetRand sets the random source (useful in tests) +func (s *UniformSample) SetRand(prng *rand.Rand) Sample { + s.rand = prng + return s +} + // Clear clears all samples. func (s *UniformSample) Clear() { s.mutex.Lock() @@ -511,7 +531,12 @@ func (s *UniformSample) Update(v int64) { if len(s.values) < s.reservoirSize { s.values = append(s.values, v) } else { - r := rand.Int63n(s.count) + var r int64 + if s.rand != nil { + r = s.rand.Int63n(s.count) + } else { + r = rand.Int63n(s.count) + } if r < int64(len(s.values)) { s.values[int(r)] = v } diff --git a/coreth/metrics/sample_test.go b/coreth/metrics/sample_test.go index c9168d3e..3ae128d5 100644 --- a/coreth/metrics/sample_test.go +++ b/coreth/metrics/sample_test.go @@ -80,7 +80,6 @@ func BenchmarkUniformSample1028(b *testing.B) { } func TestExpDecaySample10(t *testing.T) { - rand.Seed(1) s := NewExpDecaySample(100, 0.99) for i := 0; i < 10; i++ { s.Update(int64(i)) @@ -102,7 +101,6 @@ func TestExpDecaySample10(t *testing.T) { } func TestExpDecaySample100(t *testing.T) { - rand.Seed(1) s := NewExpDecaySample(1000, 0.01) for i := 0; i < 100; i++ { s.Update(int64(i)) @@ -124,7 +122,6 @@ func TestExpDecaySample100(t *testing.T) { } func TestExpDecaySample1000(t *testing.T) { - rand.Seed(1) s := NewExpDecaySample(100, 0.99) for i := 0; i < 1000; i++ { s.Update(int64(i)) @@ -150,7 +147,6 @@ func TestExpDecaySample1000(t *testing.T) { // The priority becomes +Inf quickly after starting if this is done, // effectively freezing the set of samples until a rescale step happens. func TestExpDecaySampleNanosecondRegression(t *testing.T) { - rand.Seed(1) s := NewExpDecaySample(100, 0.99) for i := 0; i < 100; i++ { s.Update(10) @@ -183,8 +179,7 @@ func TestExpDecaySampleRescale(t *testing.T) { func TestExpDecaySampleSnapshot(t *testing.T) { now := time.Now() - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) + s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) for i := 1; i <= 10000; i++ { s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) } @@ -195,8 +190,7 @@ func TestExpDecaySampleSnapshot(t *testing.T) { func TestExpDecaySampleStatistics(t *testing.T) { now := time.Now() - rand.Seed(1) - s := NewExpDecaySample(100, 0.99) + s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1))) for i := 1; i <= 10000; i++ { s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i)) } @@ -204,7 +198,6 @@ func TestExpDecaySampleStatistics(t *testing.T) { } func TestUniformSample(t *testing.T) { - rand.Seed(1) s := NewUniformSample(100) for i := 0; i < 1000; i++ { s.Update(int64(i)) @@ -226,7 +219,6 @@ func TestUniformSample(t *testing.T) { } func TestUniformSampleIncludesTail(t *testing.T) { - rand.Seed(1) s := NewUniformSample(100) max := 100 for i := 0; i < max; i++ { @@ -244,7 +236,7 @@ func TestUniformSampleIncludesTail(t *testing.T) { } func TestUniformSampleSnapshot(t *testing.T) { - s := NewUniformSample(100) + s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) for i := 1; i <= 10000; i++ { s.Update(int64(i)) } @@ -254,8 +246,7 @@ func TestUniformSampleSnapshot(t *testing.T) { } func TestUniformSampleStatistics(t *testing.T) { - rand.Seed(1) - s := NewUniformSample(100) + s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1))) for i := 1; i <= 10000; i++ { s.Update(int64(i)) } diff --git a/coreth/metrics/syslog.go b/coreth/metrics/syslog.go index 551a2bd0..f23b07e1 100644 --- a/coreth/metrics/syslog.go +++ b/coreth/metrics/syslog.go @@ -17,6 +17,8 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) { switch metric := i.(type) { case Counter: w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case CounterFloat64: + w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Count())) case Gauge: w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) case GaugeFloat64: diff --git a/coreth/metrics/validate.sh b/coreth/metrics/validate.sh index c4ae91e6..d4e69f88 100755 --- a/coreth/metrics/validate.sh +++ b/coreth/metrics/validate.sh @@ -3,8 +3,8 @@ set -e # check there are no formatting issues -GOFMT_LINES=`gofmt -l . | wc -l | xargs` -test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" +GOFMT_LINES=$(gofmt -l . | wc -l | xargs) +test "$GOFMT_LINES" -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues" # run the tests for the root package go test -race . diff --git a/coreth/metrics/writer.go b/coreth/metrics/writer.go index 88521a80..256fbd14 100644 --- a/coreth/metrics/writer.go +++ b/coreth/metrics/writer.go @@ -29,6 +29,9 @@ func WriteOnce(r Registry, w io.Writer) { case Counter: fmt.Fprintf(w, "counter %s\n", namedMetric.name) fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case CounterFloat64: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %f\n", metric.Count()) case Gauge: fmt.Fprintf(w, "gauge %s\n", namedMetric.name) fmt.Fprintf(w, " value: %9d\n", metric.Value()) diff --git a/coreth/miner/miner.go b/coreth/miner/miner.go index 6a9979fd..b6d02fed 100644 --- a/coreth/miner/miner.go +++ b/coreth/miner/miner.go @@ -31,8 +31,10 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" ) @@ -40,12 +42,12 @@ import ( // Backend wraps all methods required for mining. type Backend interface { BlockChain() *core.BlockChain - TxPool() *core.TxPool + TxPool() *txpool.TxPool } // Config is the configuration parameters of mining. type Config struct { - Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards (default = first account) + Etherbase common.Address `toml:",omitempty"` // Public address for block mining rewards } type Miner struct { @@ -62,8 +64,8 @@ func (miner *Miner) SetEtherbase(addr common.Address) { miner.worker.setEtherbase(addr) } -func (miner *Miner) GenerateBlock() (*types.Block, error) { - return miner.worker.commitNewWork() +func (miner *Miner) GenerateBlock(predicateContext *precompileconfig.PredicateContext) (*types.Block, error) { + return miner.worker.commitNewWork(predicateContext) } // SubscribePendingLogs starts delivering logs from pending transactions diff --git a/coreth/miner/worker.go b/coreth/miner/worker.go index 5c7013bf..d11a4917 100644 --- a/coreth/miner/worker.go +++ b/coreth/miner/worker.go @@ -43,7 +43,10 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/predicate" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -67,7 +70,15 @@ type environment struct { header *types.Header txs []*types.Transaction receipts []*types.Receipt - size common.StorageSize + size uint64 + + rules params.Rules + predicateContext *precompileconfig.PredicateContext + // predicateResults contains the results of checking the predicates for each transaction in the miner. + // The results are accumulated as transactions are executed by the miner and set on the BlockContext. + // If a transaction is dropped, its results must explicitly be removed from predicateResults in the same + // way that the gas pool and state is reset. + predicateResults *predicate.Results start time.Time // Time that block building began } @@ -98,8 +109,9 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus chainConfig: chainConfig, engine: engine, eth: eth, - mux: mux, chain: eth.BlockChain(), + mux: mux, + coinbase: config.Etherbase, clock: clock, } @@ -114,61 +126,64 @@ func (w *worker) setEtherbase(addr common.Address) { } // commitNewWork generates several new sealing tasks based on the parent block. -func (w *worker) commitNewWork() (*types.Block, error) { +func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateContext) (*types.Block, error) { w.mu.RLock() defer w.mu.RUnlock() tstart := w.clock.Time() - timestamp := tstart.Unix() + timestamp := uint64(tstart.Unix()) parent := w.chain.CurrentBlock() // Note: in order to support asynchronous block production, blocks are allowed to have // the same timestamp as their parent. This allows more than one block to be produced // per second. - if parent.Time() >= uint64(timestamp) { - timestamp = int64(parent.Time()) + if parent.Time >= timestamp { + timestamp = parent.Time } var gasLimit uint64 - if w.chainConfig.IsCortina(big.NewInt(timestamp)) { + if w.chainConfig.IsCortina(timestamp) { gasLimit = params.CortinaGasLimit } else { + // The gas limit is set in phase1 to ApricotPhase1GasLimit because the ceiling and floor were set to the same value + // such that the gas limit converged to it. Since this is hardbaked now, we remove the ability to configure it. + gasLimit = core.CalcGasLimit(parent.GasUsed, parent.GasLimit, params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) if w.chainConfig.IsSongbirdCode() { - if w.chainConfig.IsSongbirdTransition(big.NewInt(timestamp)) { + if w.chainConfig.IsSongbirdTransition(timestamp) { gasLimit = params.SgbTransitionGasLimit - } else if w.chainConfig.IsApricotPhase5(big.NewInt(timestamp)) { + } else if w.chainConfig.IsApricotPhase5(timestamp) { gasLimit = params.SgbApricotPhase5GasLimit - } else if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { + } else if w.chainConfig.IsApricotPhase1(timestamp) { gasLimit = params.ApricotPhase1GasLimit } else { - gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + gasLimit = core.CalcGasLimit(parent.GasUsed, parent.GasLimit, params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) } } else { - if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { + if w.chainConfig.IsApricotPhase1(timestamp) { gasLimit = params.ApricotPhase1GasLimit } else { // The gas limit is set in phase1 to ApricotPhase1GasLimit because the ceiling and floor were set to the same value // such that the gas limit converged to it. Since this is hardbaked now, we remove the ability to configure it. - gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + gasLimit = core.CalcGasLimit(parent.GasUsed, parent.GasLimit, params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) } } } - num := parent.Number() header := &types.Header{ ParentHash: parent.Hash(), - Number: num.Add(num, common.Big1), + Number: new(big.Int).Add(parent.Number, common.Big1), GasLimit: gasLimit, Extra: nil, - Time: uint64(timestamp), + Time: timestamp, } + // Set BaseFee and Extra data field if we are post ApricotPhase3 - bigTimestamp := big.NewInt(timestamp) - if w.chainConfig.IsApricotPhase3(bigTimestamp) { + if w.chainConfig.IsApricotPhase3(timestamp) { var err error - header.Extra, header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent.Header(), uint64(timestamp)) + header.Extra, header.BaseFee, err = dummy.CalcBaseFee(w.chainConfig, parent, timestamp) if err != nil { return nil, fmt.Errorf("failed to calculate new base fee: %w", err) } } + if w.coinbase == (common.Address{}) { return nil, errors.New("cannot mine without etherbase") } @@ -177,15 +192,26 @@ func (w *worker) commitNewWork() (*types.Block, error) { return nil, fmt.Errorf("failed to prepare header for mining: %w", err) } - env, err := w.createCurrentEnvironment(parent, header, tstart) + env, err := w.createCurrentEnvironment(predicateContext, parent, header, tstart) if err != nil { return nil, fmt.Errorf("failed to create new current environment: %w", err) } - // Configure any stateful precompiles that should go into effect during this block. - w.chainConfig.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time()), types.NewBlockWithHeader(header), env.state) + // Ensure we always stop prefetcher after block building is complete. + defer func() { + if env.state == nil { + return + } + env.state.StopPrefetcher() + }() + // Configure any upgrades that should go into effect during this block. + err = core.ApplyUpgrades(w.chainConfig, &parent.Time, types.NewBlockWithHeader(header), env.state) + if err != nil { + log.Error("failed to configure precompiles mining new block", "parent", parent.Hash(), "number", header.Number, "timestamp", header.Time, "err", err) + return nil, err + } // Fill the block with all available pending transactions. - pending := w.eth.TxPool().Pending(true) + pending := w.eth.TxPool().PendingWithBaseFee(true, header.BaseFee) // Split the pending transactions into locals and remotes localTxs := make(map[common.Address]types.Transactions) @@ -208,28 +234,51 @@ func (w *worker) commitNewWork() (*types.Block, error) { return w.commit(env) } -func (w *worker) createCurrentEnvironment(parent *types.Block, header *types.Header, tstart time.Time) (*environment, error) { - state, err := w.chain.StateAt(parent.Root()) +func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.PredicateContext, parent *types.Header, header *types.Header, tstart time.Time) (*environment, error) { + state, err := w.chain.StateAt(parent.Root) if err != nil { return nil, err } + state.StartPrefetcher("miner", w.eth.BlockChain().CacheConfig().TriePrefetcherParallelism) return &environment{ - signer: types.MakeSigner(w.chainConfig, header.Number, new(big.Int).SetUint64(header.Time)), - state: state, - parent: parent.Header(), - header: header, - tcount: 0, - gasPool: new(core.GasPool).AddGas(header.GasLimit), - start: tstart, + signer: types.MakeSigner(w.chainConfig, header.Number, header.Time), + state: state, + parent: parent, + header: header, + tcount: 0, + gasPool: new(core.GasPool).AddGas(header.GasLimit), + rules: w.chainConfig.AvalancheRules(header.Number, header.Time), + predicateContext: predicateContext, + predicateResults: predicate.NewResults(), + start: tstart, }, nil } func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { - snap := env.state.Snapshot() + var ( + snap = env.state.Snapshot() + gp = env.gasPool.Gas() + blockContext vm.BlockContext + ) + + if env.rules.IsDurango { + results, err := core.CheckPredicates(env.rules, env.predicateContext, tx) + if err != nil { + log.Debug("Transaction predicate failed verification in miner", "tx", tx.Hash(), "err", err) + return nil, err + } + env.predicateResults.SetTxResults(tx.Hash(), results) + + blockContext = core.NewEVMBlockContextWithPredicateResults(env.header, w.chain, &coinbase, env.predicateResults) + } else { + blockContext = core.NewEVMBlockContext(env.header, w.chain, &coinbase) + } - receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { env.state.RevertToSnapshot(snap) + env.gasPool.SetGas(gp) + env.predicateResults.DeleteTxResults(tx.Hash()) return nil, err } env.txs = append(env.txs, tx) @@ -241,12 +290,12 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coin func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address) { for { - // If we don't have enough gas for any further transactions then we're done + // If we don't have enough gas for any further transactions then we're done. if env.gasPool.Gas() < params.TxGas { log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } - // Retrieve the next transaction and abort if all done + // Retrieve the next transaction and abort if all done. tx := txs.Peek() if tx == nil { break @@ -261,9 +310,8 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP } // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. - // - // We use the eip155 signer regardless of the current hf. from, _ := types.Sender(env.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { @@ -273,39 +321,24 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP continue } // Start executing the transaction - env.state.Prepare(tx.Hash(), env.tcount) + env.state.SetTxContext(tx.Hash(), env.tcount) _, err := w.commitTransaction(env, tx, coinbase) switch { - case errors.Is(err, core.ErrGasLimitReached): - // Pop the current out-of-gas transaction without shifting in the next from the account - log.Trace("Gas limit exceeded for current block", "sender", from) - txs.Pop() - case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) txs.Shift() - case errors.Is(err, core.ErrNonceTooHigh): - // Reorg notification data race between the transaction pool and miner, skip account = - log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce()) - txs.Pop() - case errors.Is(err, nil): env.tcount++ txs.Shift() - case errors.Is(err, core.ErrTxTypeNotSupported): - // Pop the unsupported transaction without shifting in the next from the account - log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) - txs.Pop() - default: - // Strange error, discard the transaction and get the next in line (note, the - // nonce-too-high clause will prevent us from executing in vain). + // Transaction is regarded as invalid, drop all consecutive transactions from + // the same sender because of `nonce-too-high` clause. log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) - txs.Shift() + txs.Pop() } } } @@ -313,6 +346,13 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP // commit runs any post-transaction state modifications, assembles the final block // and commits new work if consensus engine is running. func (w *worker) commit(env *environment) (*types.Block, error) { + if env.rules.IsDurango { + predicateResultsBytes, err := env.predicateResults.Bytes() + if err != nil { + return nil, fmt.Errorf("failed to marshal predicate results: %w", err) + } + env.header.Extra = append(env.header.Extra, predicateResultsBytes...) + } // Deep copy receipts here to avoid interaction between different tasks. receipts := copyReceipts(env.receipts) block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.parent, env.state, env.txs, nil, receipts) @@ -355,9 +395,12 @@ func (w *worker) handleResult(env *environment, block *types.Block, createdAt ti } logs = append(logs, receipt.Logs...) } - - log.Info("Commit new mining work", "number", block.Number(), "hash", hash, "uncles", 0, "txs", env.tcount, - "gas", block.GasUsed(), "fees", totalFees(block, receipts), "elapsed", common.PrettyDuration(time.Since(env.start))) + fees := totalFees(block, receipts) + feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether)) + log.Info("Commit new mining work", "number", block.Number(), "hash", hash, + "uncles", 0, "txs", env.tcount, + "gas", block.GasUsed(), "fees", feesInEther, + "elapsed", common.PrettyDuration(time.Since(env.start))) // Note: the miner no longer emits a NewMinedBlock event. Instead the caller // is responsible for running any additional verification and then inserting @@ -375,11 +418,19 @@ func copyReceipts(receipts []*types.Receipt) []*types.Receipt { return result } -// totalFees computes total consumed fees in ETH. Block transactions and receipts have to have the same order. -func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { +// totalFees computes total consumed miner fees in Wei. Block transactions and receipts have to have the same order. +func totalFees(block *types.Block, receipts []*types.Receipt) *big.Int { feesWei := new(big.Int) for i, tx := range block.Transactions() { - feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), tx.GasPrice())) + var minerFee *big.Int + if baseFee := block.BaseFee(); baseFee != nil { + // Note in coreth the coinbase payment is (baseFee + effectiveGasTip) * gasUsed + minerFee = new(big.Int).Add(baseFee, tx.EffectiveGasTipValue(baseFee)) + } else { + // Prior to activation of EIP-1559, the coinbase payment was gasPrice * gasUsed + minerFee = tx.GasPrice() + } + feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee)) } - return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) + return feesWei } diff --git a/coreth/node/config.go b/coreth/node/config.go index b5664964..83addddb 100644 --- a/coreth/node/config.go +++ b/coreth/node/config.go @@ -34,7 +34,6 @@ import ( "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/accounts/external" "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/rpc" "github.com/ethereum/go-ethereum/log" ) @@ -51,7 +50,7 @@ type Config struct { // is created by New and destroyed when the node is stopped. KeyStoreDir string `toml:",omitempty"` - // ExternalSigner specifies an external URI for a clef-type signer + // ExternalSigner specifies an external URI for a clef-type signer. ExternalSigner string `toml:",omitempty"` // UseLightweightKDF lowers the memory and CPU requirements of the key store @@ -61,115 +60,14 @@ type Config struct { // InsecureUnlockAllowed allows user to unlock accounts in unsafe http environment. InsecureUnlockAllowed bool `toml:",omitempty"` - // HTTPHost is the host interface on which to start the HTTP RPC server. If this - // field is empty, no HTTP API endpoint will be started. - HTTPHost string - - // HTTPPort is the TCP port number on which to start the HTTP RPC server. The - // default zero value is/ valid and will pick a port number randomly (useful - // for ephemeral nodes). - HTTPPort int `toml:",omitempty"` - - // HTTPCors is the Cross-Origin Resource Sharing header to send to requesting - // clients. Please be aware that CORS is a browser enforced security, it's fully - // useless for custom HTTP clients. - HTTPCors []string `toml:",omitempty"` - - // HTTPVirtualHosts is the list of virtual hostnames which are allowed on incoming requests. - // This is by default {'localhost'}. Using this prevents attacks like - // DNS rebinding, which bypasses SOP by simply masquerading as being within the same - // origin. These attacks do not utilize CORS, since they are not cross-domain. - // By explicitly checking the Host-header, the server will not allow requests - // made against the server with a malicious host domain. - // Requests using ip address directly are not affected - HTTPVirtualHosts []string `toml:",omitempty"` - - // HTTPModules is a list of API modules to expose via the HTTP RPC interface. - // If the module list is empty, all RPC API endpoints designated public will be - // exposed. - HTTPModules []string - - // HTTPTimeouts allows for customization of the timeout values used by the HTTP RPC - // interface. - HTTPTimeouts rpc.HTTPTimeouts - - // WSHost is the host interface on which to start the websocket RPC server. If - // this field is empty, no websocket API endpoint will be started. - WSHost string - - // WSPort is the TCP port number on which to start the websocket RPC server. The - // default zero value is/ valid and will pick a port number randomly (useful for - // ephemeral nodes). - WSPort int `toml:",omitempty"` - - // WSOrigins is the list of domain to accept websocket requests from. Please be - // aware that the server can only act upon the HTTP request the client sends and - // cannot verify the validity of the request header. - WSOrigins []string `toml:",omitempty"` - - // WSModules is a list of API modules to expose via the websocket RPC interface. - // If the module list is empty, all RPC API endpoints designated public will be - // exposed. - WSModules []string - - // WSExposeAll exposes all API modules via the WebSocket RPC interface rather - // than just the public ones. - // - // *WARNING* Only set this if the node is running in a trusted network, exposing - // private APIs to untrusted users is a major security risk. - WSExposeAll bool `toml:",omitempty"` - - // GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting - // clients. Please be aware that CORS is a browser enforced security, it's fully - // useless for custom HTTP clients. - GraphQLCors []string `toml:",omitempty"` - - // GraphQLVirtualHosts is the list of virtual hostnames which are allowed on incoming requests. - // This is by default {'localhost'}. Using this prevents attacks like - // DNS rebinding, which bypasses SOP by simply masquerading as being within the same - // origin. These attacks do not utilize CORS, since they are not cross-domain. - // By explicitly checking the Host-header, the server will not allow requests - // made against the server with a malicious host domain. - // Requests using ip address directly are not affected - GraphQLVirtualHosts []string `toml:",omitempty"` - CorethVersion string } -// HTTPEndpoint resolves an HTTP endpoint based on the configured host interface -// and port parameters. -func (c *Config) HTTPEndpoint() string { - if c.HTTPHost == "" { - return "" - } - return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort) -} - -// DefaultHTTPEndpoint returns the HTTP endpoint used by default. -func DefaultHTTPEndpoint() string { - config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort} - return config.HTTPEndpoint() -} - -// WSEndpoint resolves a websocket endpoint based on the configured host interface -// and port parameters. -func (c *Config) WSEndpoint() string { - if c.WSHost == "" { - return "" - } - return fmt.Sprintf("%s:%d", c.WSHost, c.WSPort) -} - -// DefaultWSEndpoint returns the websocket endpoint used by default. -func DefaultWSEndpoint() string { - config := &Config{WSHost: DefaultWSHost, WSPort: DefaultWSPort} - return config.WSEndpoint() -} - // ExtRPCEnabled returns the indicator whether node enables the external // RPC(http, ws or graphql). func (c *Config) ExtRPCEnabled() bool { - return c.HTTPHost != "" || c.WSHost != "" + // In avalanche, we always disable the external RPC. + return false } // KeyDirConfig determines the settings for keydirectory @@ -187,10 +85,10 @@ func (c *Config) KeyDirConfig() (string, error) { return keydir, err } -// getKeyStoreDir retrieves the key directory and will create +// GetKeyStoreDir retrieves the key directory and will create // and ephemeral one if necessary. -func getKeyStoreDir(conf *Config) (string, bool, error) { - keydir, err := conf.KeyDirConfig() +func (c *Config) GetKeyStoreDir() (string, bool, error) { + keydir, err := c.KeyDirConfig() if err != nil { return "", false, err } @@ -219,7 +117,7 @@ func makeAccountManager(conf *Config) (*accounts.Manager, error) { scryptP = keystore.LightScryptP } - keydir, _, err := getKeyStoreDir(conf) + keydir, _, err := conf.GetKeyStoreDir() if err != nil { return nil, err } diff --git a/coreth/params/avalanche_params.go b/coreth/params/avalanche_params.go index 61a71ff0..c71f5af8 100644 --- a/coreth/params/avalanche_params.go +++ b/coreth/params/avalanche_params.go @@ -37,22 +37,18 @@ const ( SgbApricotPhase5TargetGas uint64 = 150_000_000 ApricotPhase5BaseFeeChangeDenominator uint64 = 36 + DynamicFeeExtraDataSize = 80 + RollupWindow uint64 = 10 + // The base cost to charge per atomic transaction. Added in Apricot Phase 5. AtomicTxBaseCost uint64 = 10_000 ) -// Constants for message sizes -const ( - MaxCodeHashesPerRequest = 5 -) - -var ( - // The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic - // transactions included in a block and is enforced as of ApricotPhase5. Prior to ApricotPhase5, - // a block included a single atomic transaction. As of ApricotPhase5, each block can include a set - // of atomic transactions where the cumulative atomic gas consumed is capped by the atomic gas limit, - // similar to the block gas limit. - // - // This value must always remain <= MaxUint64. - AtomicGasLimit *big.Int = big.NewInt(100_000) -) +// The atomic gas limit specifies the maximum amount of gas that can be consumed by the atomic +// transactions included in a block and is enforced as of ApricotPhase5. Prior to ApricotPhase5, +// a block included a single atomic transaction. As of ApricotPhase5, each block can include a set +// of atomic transactions where the cumulative atomic gas consumed is capped by the atomic gas limit, +// similar to the block gas limit. +// +// This value must always remain <= MaxUint64. +var AtomicGasLimit *big.Int = big.NewInt(100_000) diff --git a/coreth/params/config.go b/coreth/params/config.go index 36566f50..25e2a610 100644 --- a/coreth/params/config.go +++ b/coreth/params/config.go @@ -32,7 +32,10 @@ import ( "math/big" "time" - "github.com/ava-labs/coreth/precompile" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/coreth/constants" + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) @@ -51,7 +54,6 @@ var ( CostonChainID = big.NewInt(16) // https://github.com/ethereum-lists/chains/blob/master/_data/chains/eip155-16.json SongbirdChainID = big.NewInt(19) // https://github.com/ethereum-lists/chains/blob/master/_data/chains/eip155-19.json CostwoChainID = big.NewInt(114) // TO-DO: Register with https://github.com/ethereum-lists - StagingChainID = big.NewInt(161) LocalFlareChainID = big.NewInt(162) LocalChainID = big.NewInt(4294967295) @@ -60,13 +62,28 @@ var ( var ( // AvalancheMainnetChainConfig is the configuration for Avalanche Main Network - AvalancheMainnetChainConfig = &ChainConfig{ - ChainID: AvalancheMainnetChainID, + AvalancheMainnetChainConfig = getChainConfig(constants.MainnetID, AvalancheMainnetChainID) + + // AvalancheLocalChainConfig is the configuration for the Avalanche Local Network + AvalancheLocalChainConfig = getChainConfig(constants.LocalID, AvalancheLocalChainID) + + // Configuration for Flare main, test (Costwo) and local networks + FlareChainConfig = getChainConfig(constants.FlareID, FlareChainID) + CostwoChainConfig = getChainConfig(constants.CostwoID, CostwoChainID) + LocalFlareChainConfig = getChainConfig(constants.LocalFlareID, LocalFlareChainID) + + // Configuration for Songbird main, test (Coston) and local networks + SongbirdChainConfig = getChainConfig(constants.SongbirdID, SongbirdChainID) + CostonChainConfig = getChainConfig(constants.CostonID, CostonChainID) + LocalChainConfig = getChainConfig(constants.LocalID, LocalChainID) + + TestChainConfig = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -74,27 +91,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase2BlockTimestamp: big.NewInt(time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC).Unix()), - ApricotPhase3BlockTimestamp: big.NewInt(time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase4BlockTimestamp: big.NewInt(time.Date(2021, time.September, 22, 21, 0, 0, 0, time.UTC).Unix()), - ApricotPhase5BlockTimestamp: big.NewInt(time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 5, 1, 30, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC).Unix()), - // TODO Add DUpgrade timestamp - } - - // AvalancheFujiChainConfig is the configuration for the Fuji Test Network - AvalancheFujiChainConfig = &ChainConfig{ - ChainID: AvalancheFujiChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + DurangoBlockTimestamp: utils.NewUint64(0), + } + + TestLaunchConfig = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -102,27 +118,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase2BlockTimestamp: big.NewInt(time.Date(2021, time.May, 5, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase3BlockTimestamp: big.NewInt(time.Date(2021, time.August, 16, 19, 0, 0, 0, time.UTC).Unix()), - ApricotPhase4BlockTimestamp: big.NewInt(time.Date(2021, time.September, 16, 21, 0, 0, 0, time.UTC).Unix()), - ApricotPhase5BlockTimestamp: big.NewInt(time.Date(2021, time.November, 24, 15, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC).Unix()), - // TODO Add DUpgrade timestamp + ApricotPhase1BlockTimestamp: nil, + ApricotPhase2BlockTimestamp: nil, + ApricotPhase3BlockTimestamp: nil, + ApricotPhase4BlockTimestamp: nil, + ApricotPhase5BlockTimestamp: nil, + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, } - // AvalancheLocalChainConfig is the configuration for the Avalanche Local Network - AvalancheLocalChainConfig = &ChainConfig{ - ChainID: AvalancheLocalChainID, + TestApricotPhase1Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -130,25 +145,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - } - - FlareChainConfig = &ChainConfig{ - ChainID: FlareChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: nil, + ApricotPhase3BlockTimestamp: nil, + ApricotPhase4BlockTimestamp: nil, + ApricotPhase5BlockTimestamp: nil, + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhase2Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -156,25 +172,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 13, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 14, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2025, time.May, 13, 12, 0, 0, 0, time.UTC).Unix()), - } - - CostwoChainConfig = &ChainConfig{ - ChainID: CostwoChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: nil, + ApricotPhase4BlockTimestamp: nil, + ApricotPhase5BlockTimestamp: nil, + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhase3Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -182,25 +199,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 13, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 14, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 15, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2025, time.April, 8, 12, 0, 0, 0, time.UTC).Unix()), - } - - StagingChainConfig = &ChainConfig{ - ChainID: StagingChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: nil, + ApricotPhase5BlockTimestamp: nil, + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhase4Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -208,25 +226,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - } - - LocalFlareChainConfig = &ChainConfig{ - ChainID: LocalFlareChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: nil, + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhase5Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -234,26 +253,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - } - - // CostonChainConfig is the configuration for the Coston test network. - CostonChainConfig = &ChainConfig{ - ChainID: CostonChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: nil, + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhasePre6Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -261,27 +280,26 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), - ApricotPhase2BlockTimestamp: big.NewInt(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), - ApricotPhase3BlockTimestamp: big.NewInt(time.Date(2022, time.February, 25, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase4BlockTimestamp: big.NewInt(time.Date(2022, time.February, 25, 15, 0, 0, 0, time.UTC).Unix()), - ApricotPhase5BlockTimestamp: big.NewInt(time.Date(2022, time.February, 25, 16, 0, 0, 0, time.UTC).Unix()), - SongbirdTransitionTimestamp: big.NewInt(time.Date(2024, time.July, 23, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 13, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 14, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 15, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2025, time.March, 27, 13, 0, 0, 0, time.UTC).Unix()), - } - - // LocalChainConfig is the configuration for the Songbird Local network. - LocalChainConfig = &ChainConfig{ - ChainID: LocalChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: nil, + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhase6Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, + DAOForkBlock: nil, + DAOForkSupport: false, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -289,27 +307,136 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - SongbirdTransitionTimestamp: big.NewInt(0), - ApricotPhasePre6BlockTimestamp: big.NewInt(0), - ApricotPhase6BlockTimestamp: big.NewInt(0), - ApricotPhasePost6BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - } - - // SongbirdChainConfig is the configuration for the Songbird canary network. - SongbirdChainConfig = &ChainConfig{ - ChainID: SongbirdChainID, + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: nil, + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestApricotPhasePost6Config = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: false, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: nil, + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestBanffChainConfig = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: false, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: nil, + DurangoBlockTimestamp: nil, + } + + TestCortinaChainConfig = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: false, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + DurangoBlockTimestamp: nil, + } + + TestDurangoChainConfig = &ChainConfig{ + AvalancheContext: AvalancheContext{utils.TestSnowContext()}, + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: false, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + ApricotPhasePre6BlockTimestamp: utils.NewUint64(0), + ApricotPhase6BlockTimestamp: utils.NewUint64(0), + ApricotPhasePost6BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + } + + TestRules = TestChainConfig.AvalancheRules(new(big.Int), 0) +) + +func getChainConfig(networkID uint32, chainID *big.Int) *ChainConfig { + return &ChainConfig{ + ChainID: chainID, HomesteadBlock: big.NewInt(0), DAOForkBlock: big.NewInt(0), DAOForkSupport: true, EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(0), @@ -317,35 +444,42 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), - ApricotPhase2BlockTimestamp: big.NewInt(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), - ApricotPhase3BlockTimestamp: big.NewInt(time.Date(2022, time.March, 7, 14, 0, 0, 0, time.UTC).Unix()), - ApricotPhase4BlockTimestamp: big.NewInt(time.Date(2022, time.March, 7, 15, 0, 0, 0, time.UTC).Unix()), - ApricotPhase5BlockTimestamp: big.NewInt(time.Date(2022, time.March, 7, 16, 0, 0, 0, time.UTC).Unix()), - SongbirdTransitionTimestamp: big.NewInt(time.Date(2024, time.October, 29, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePre6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 12, 0, 0, 0, time.UTC).Unix()), - ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 13, 0, 0, 0, time.UTC).Unix()), - ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 14, 0, 0, 0, time.UTC).Unix()), - BanffBlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 15, 0, 0, 0, time.UTC).Unix()), - CortinaBlockTimestamp: big.NewInt(time.Date(2025, time.May, 6, 12, 0, 0, 0, time.UTC).Unix()), - } - - TestChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} - TestLaunchConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase1Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase2Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase3Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase4Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase5Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil} - TestSgbTransitionChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil} - TestApricotPhasePre6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil} - TestApricotPhase6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil} - TestApricotPhasePost6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil} - TestBanffChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil} - TestCortinaChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil} - TestDUpgradeChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} - TestRules = TestChainConfig.AvalancheRules(new(big.Int), new(big.Int)) -) + ApricotPhase1BlockTimestamp: getUpgradeTime(networkID, ApricotPhase1Times), + ApricotPhase2BlockTimestamp: getUpgradeTime(networkID, ApricotPhase2Times), + ApricotPhase3BlockTimestamp: getUpgradeTime(networkID, ApricotPhase3Times), + ApricotPhase4BlockTimestamp: getUpgradeTime(networkID, ApricotPhase4Times), + ApricotPhase5BlockTimestamp: getUpgradeTime(networkID, ApricotPhase5Times), + SongbirdTransitionTimestamp: getUpgradeTime(networkID, SongbirdTransitionTimes), + ApricotPhasePre6BlockTimestamp: getUpgradeTime(networkID, ApricotPhasePre6Times), + ApricotPhase6BlockTimestamp: getUpgradeTime(networkID, ApricotPhase6Times), + ApricotPhasePost6BlockTimestamp: getUpgradeTime(networkID, ApricotPhasePost6Times), + BanffBlockTimestamp: getUpgradeTime(networkID, BanffTimes), + CortinaBlockTimestamp: getUpgradeTime(networkID, CortinaTimes), + DurangoBlockTimestamp: getUpgradeTime(networkID, DurangoTimes), + } +} + +func getUpgradeTime(networkID uint32, upgradeTimes map[uint32]time.Time) *uint64 { + if upgradeTime, ok := upgradeTimes[networkID]; ok { + return utils.TimeToNewUint64(upgradeTime) + } + // If the upgrade time isn't specified, default being enabled in the + // genesis. + return utils.NewUint64(0) +} + +// UpgradeConfig includes the following configs that may be specified in upgradeBytes: +// - Timestamps that enable avalanche network upgrades, +// - Enabling or disabling precompiles as network upgrades. +type UpgradeConfig struct { + // Config for enabling and disabling precompiles as network upgrades. + PrecompileUpgrades []PrecompileUpgrade `json:"precompileUpgrades,omitempty"` +} + +// AvalancheContext provides Avalanche specific context directly into the EVM. +type AvalancheContext struct { + SnowCtx *snow.Context +} // ChainConfig is the core config which determines the blockchain settings. // @@ -363,9 +497,7 @@ type ChainConfig struct { DAOForkSupport bool `json:"daoForkSupport,omitempty"` // Whether the nodes supports or opposes the DAO hard-fork // EIP150 implements the Gas price changes (https://github.com/ethereum/EIPs/issues/150) - EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork) - EIP150Hash common.Hash `json:"eip150Hash,omitempty"` // EIP150 HF hash (needed for header only clients as only gas pricing changed) - + EIP150Block *big.Int `json:"eip150Block,omitempty"` // EIP150 HF block (nil = no fork) EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block @@ -376,39 +508,40 @@ type ChainConfig struct { MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) // Avalanche Network Upgrades - ApricotPhase1BlockTimestamp *big.Int `json:"apricotPhase1BlockTimestamp,omitempty"` // Apricot Phase 1 Block Timestamp (nil = no fork, 0 = already activated) + ApricotPhase1BlockTimestamp *uint64 `json:"apricotPhase1BlockTimestamp,omitempty"` // Apricot Phase 1 Block Timestamp (nil = no fork, 0 = already activated) // Apricot Phase 2 Block Timestamp (nil = no fork, 0 = already activated) // Apricot Phase 2 includes a modified version of the Berlin Hard Fork from Ethereum - ApricotPhase2BlockTimestamp *big.Int `json:"apricotPhase2BlockTimestamp,omitempty"` + ApricotPhase2BlockTimestamp *uint64 `json:"apricotPhase2BlockTimestamp,omitempty"` // Apricot Phase 3 introduces dynamic fees and a modified version of the London Hard Fork from Ethereum (nil = no fork, 0 = already activated) - ApricotPhase3BlockTimestamp *big.Int `json:"apricotPhase3BlockTimestamp,omitempty"` + ApricotPhase3BlockTimestamp *uint64 `json:"apricotPhase3BlockTimestamp,omitempty"` // Apricot Phase 4 introduces the notion of a block fee to the dynamic fee algorithm (nil = no fork, 0 = already activated) - ApricotPhase4BlockTimestamp *big.Int `json:"apricotPhase4BlockTimestamp,omitempty"` + ApricotPhase4BlockTimestamp *uint64 `json:"apricotPhase4BlockTimestamp,omitempty"` // Apricot Phase 5 introduces a batch of atomic transactions with a maximum atomic gas limit per block. (nil = no fork, 0 = already activated) - ApricotPhase5BlockTimestamp *big.Int `json:"apricotPhase5BlockTimestamp,omitempty"` + ApricotPhase5BlockTimestamp *uint64 `json:"apricotPhase5BlockTimestamp,omitempty"` // When export/import transactions will be allowed on songbird code (Songbird, Coston, Local) - SongbirdTransitionTimestamp *big.Int `json:"songbirdTransitionTimestamp,omitempty"` + SongbirdTransitionTimestamp *uint64 `json:"songbirdTransitionTimestamp,omitempty"` // Apricot Phase Pre-6 deprecates the NativeAssetCall precompile (soft). (nil = no fork, 0 = already activated) - ApricotPhasePre6BlockTimestamp *big.Int `json:"apricotPhasePre6BlockTimestamp,omitempty"` + ApricotPhasePre6BlockTimestamp *uint64 `json:"apricotPhasePre6BlockTimestamp,omitempty"` // Apricot Phase 6 deprecates the NativeAssetBalance and NativeAssetCall precompiles. (nil = no fork, 0 = already activated) - ApricotPhase6BlockTimestamp *big.Int `json:"apricotPhase6BlockTimestamp,omitempty"` + ApricotPhase6BlockTimestamp *uint64 `json:"apricotPhase6BlockTimestamp,omitempty"` // Apricot Phase Post-6 deprecates the NativeAssetCall precompile (soft). (nil = no fork, 0 = already activated) - ApricotPhasePost6BlockTimestamp *big.Int `json:"apricotPhasePost6BlockTimestamp,omitempty"` + ApricotPhasePost6BlockTimestamp *uint64 `json:"apricotPhasePost6BlockTimestamp,omitempty"` // Banff restricts import/export transactions to AVAX. (nil = no fork, 0 = already activated) - BanffBlockTimestamp *big.Int `json:"banffBlockTimestamp,omitempty"` + BanffBlockTimestamp *uint64 `json:"banffBlockTimestamp,omitempty"` // Cortina increases the block gas limit to 15M. (nil = no fork, 0 = already activated) - CortinaBlockTimestamp *big.Int `json:"cortinaBlockTimestamp,omitempty"` - // DUpgrade activates the Shanghai upgrade from Ethereum. (nil = no fork, 0 = already activated) - DUpgradeBlockTimestamp *big.Int `json:"dUpgradeBlockTimestamp,omitempty"` + CortinaBlockTimestamp *uint64 `json:"cortinaBlockTimestamp,omitempty"` + // Durango activates the Shanghai Execution Spec Upgrade from Ethereum (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/shanghai.md#included-eips) + // and Avalanche Warp Messaging. (nil = no fork, 0 = already activated) + // Note: EIP-4895 is excluded since withdrawals are not relevant to the Avalanche C-Chain or Subnets running the EVM. + DurangoBlockTimestamp *uint64 `json:"durangoBlockTimestamp,omitempty"` + // Cancun activates the Cancun upgrade from Ethereum. (nil = no fork, 0 = already activated) + CancunTime *uint64 `json:"cancunTime,omitempty"` + + UpgradeConfig `json:"-"` // Config specified in upgradeBytes (avalanche network upgrades or enable/disabling precompiles). Skip encoding/decoding directly into ChainConfig. } -// AvalancheContext provides Avalanche specific context directly into the EVM. -type AvalancheContext struct { - BlockchainID common.Hash -} - -// String implements the fmt.Stringer interface. -func (c *ChainConfig) String() string { +// Description returns a human-readable description of ChainConfig. +func (c *ChainConfig) Description() string { var banner string banner += fmt.Sprintf("Chain ID: %v\n", c.ChainID) @@ -418,32 +551,33 @@ func (c *ChainConfig) String() string { // makes sense for mainnet should be optional at printing to avoid bloating // the output for testnets and private networks. banner += "Hard Forks:\n" - banner += fmt.Sprintf(" - Homestead: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock) + banner += fmt.Sprintf(" - Homestead: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock) if c.DAOForkBlock != nil { - banner += fmt.Sprintf(" - DAO Fork: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/dao-fork.md)\n", c.DAOForkBlock) - } - banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block) - banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) - banner += fmt.Sprintf(" - Byzantium: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/byzantium.md)\n", c.ByzantiumBlock) - banner += fmt.Sprintf(" - Constantinople: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/constantinople.md)\n", c.ConstantinopleBlock) - banner += fmt.Sprintf(" - Petersburg: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/petersburg.md)\n", c.PetersburgBlock) - banner += fmt.Sprintf(" - Istanbul: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/istanbul.md)\n", c.IstanbulBlock) + banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/dao-fork.md)\n", c.DAOForkBlock) + } + banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block) + banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) + banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) + banner += fmt.Sprintf(" - Byzantium: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/byzantium.md)\n", c.ByzantiumBlock) + banner += fmt.Sprintf(" - Constantinople: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/constantinople.md)\n", c.ConstantinopleBlock) + banner += fmt.Sprintf(" - Petersburg: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/petersburg.md)\n", c.PetersburgBlock) + banner += fmt.Sprintf(" - Istanbul: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/istanbul.md)\n", c.IstanbulBlock) if c.MuirGlacierBlock != nil { - banner += fmt.Sprintf(" - Muir Glacier: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock) + banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock) } - banner += fmt.Sprintf(" - Apricot Phase 1 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.3.0)\n", c.ApricotPhase1BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase 2 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.4.0)\n", c.ApricotPhase2BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase 3 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.5.0)\n", c.ApricotPhase3BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase 4 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.6.0)\n", c.ApricotPhase4BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase 5 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.7.0)\n", c.ApricotPhase5BlockTimestamp) + banner += fmt.Sprintf(" - Apricot Phase 1 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.3.0)\n", ptrToString(c.ApricotPhase1BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase 2 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.4.0)\n", ptrToString(c.ApricotPhase2BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase 3 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.5.0)\n", ptrToString(c.ApricotPhase3BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase 4 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.6.0)\n", ptrToString(c.ApricotPhase4BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase 5 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.7.0)\n", ptrToString(c.ApricotPhase5BlockTimestamp)) banner += fmt.Sprintf(" - Songbird Transition Timestamp: %-8v\n", c.SongbirdTransitionTimestamp) - banner += fmt.Sprintf(" - Apricot Phase P6 Timestamp %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0)\n", c.ApricotPhasePre6BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase 6 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0)\n", c.ApricotPhase6BlockTimestamp) - banner += fmt.Sprintf(" - Apricot Phase Post-6 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0\n", c.ApricotPhasePost6BlockTimestamp) - banner += fmt.Sprintf(" - Banff Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0)\n", c.BanffBlockTimestamp) - banner += fmt.Sprintf(" - Cortina Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", c.CortinaBlockTimestamp) - banner += fmt.Sprintf(" - DUpgrade Timestamp %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", c.DUpgradeBlockTimestamp) + banner += fmt.Sprintf(" - Apricot Phase P6 Timestamp @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0)\n", ptrToString(c.ApricotPhasePre6BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase 6 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0)\n", ptrToString(c.ApricotPhase6BlockTimestamp)) + banner += fmt.Sprintf(" - Apricot Phase Post-6 Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0\n", ptrToString(c.ApricotPhasePost6BlockTimestamp)) + banner += fmt.Sprintf(" - Banff Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0)\n", ptrToString(c.BanffBlockTimestamp)) + banner += fmt.Sprintf(" - Cortina Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", ptrToString(c.CortinaBlockTimestamp)) + banner += fmt.Sprintf(" - Durango Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", ptrToString(c.DurangoBlockTimestamp)) + banner += fmt.Sprintf(" - Cancun Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(c.CancunTime)) banner += "\n" return banner } @@ -455,156 +589,194 @@ func (c *ChainConfig) IsSongbirdCode() bool { // IsHomestead returns whether num is either equal to the homestead block or greater. func (c *ChainConfig) IsHomestead(num *big.Int) bool { - return utils.IsForked(c.HomesteadBlock, num) + return utils.IsBlockForked(c.HomesteadBlock, num) } // IsDAOFork returns whether num is either equal to the DAO fork block or greater. func (c *ChainConfig) IsDAOFork(num *big.Int) bool { - return utils.IsForked(c.DAOForkBlock, num) + return utils.IsBlockForked(c.DAOForkBlock, num) } // IsEIP150 returns whether num is either equal to the EIP150 fork block or greater. func (c *ChainConfig) IsEIP150(num *big.Int) bool { - return utils.IsForked(c.EIP150Block, num) + return utils.IsBlockForked(c.EIP150Block, num) } // IsEIP155 returns whether num is either equal to the EIP155 fork block or greater. func (c *ChainConfig) IsEIP155(num *big.Int) bool { - return utils.IsForked(c.EIP155Block, num) + return utils.IsBlockForked(c.EIP155Block, num) } // IsEIP158 returns whether num is either equal to the EIP158 fork block or greater. func (c *ChainConfig) IsEIP158(num *big.Int) bool { - return utils.IsForked(c.EIP158Block, num) + return utils.IsBlockForked(c.EIP158Block, num) } // IsByzantium returns whether num is either equal to the Byzantium fork block or greater. func (c *ChainConfig) IsByzantium(num *big.Int) bool { - return utils.IsForked(c.ByzantiumBlock, num) + return utils.IsBlockForked(c.ByzantiumBlock, num) } // IsConstantinople returns whether num is either equal to the Constantinople fork block or greater. func (c *ChainConfig) IsConstantinople(num *big.Int) bool { - return utils.IsForked(c.ConstantinopleBlock, num) + return utils.IsBlockForked(c.ConstantinopleBlock, num) } // IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater. func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool { - return utils.IsForked(c.MuirGlacierBlock, num) + return utils.IsBlockForked(c.MuirGlacierBlock, num) } // IsPetersburg returns whether num is either // - equal to or greater than the PetersburgBlock fork block, // - OR is nil, and Constantinople is active func (c *ChainConfig) IsPetersburg(num *big.Int) bool { - return utils.IsForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && utils.IsForked(c.ConstantinopleBlock, num) + return utils.IsBlockForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && utils.IsBlockForked(c.ConstantinopleBlock, num) } // IsIstanbul returns whether num is either equal to the Istanbul fork block or greater. func (c *ChainConfig) IsIstanbul(num *big.Int) bool { - return utils.IsForked(c.IstanbulBlock, num) + return utils.IsBlockForked(c.IstanbulBlock, num) } // Avalanche Upgrades: -// IsApricotPhase1 returns whether [blockTimestamp] represents a block +// IsApricotPhase1 returns whether [time] represents a block // with a timestamp after the Apricot Phase 1 upgrade time. -func (c *ChainConfig) IsApricotPhase1(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase1BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase1(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase1BlockTimestamp, time) } -// IsApricotPhase2 returns whether [blockTimestamp] represents a block +// IsApricotPhase2 returns whether [time] represents a block // with a timestamp after the Apricot Phase 2 upgrade time. -func (c *ChainConfig) IsApricotPhase2(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase2BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase2(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase2BlockTimestamp, time) } -// IsApricotPhase3 returns whether [blockTimestamp] represents a block +// IsApricotPhase3 returns whether [time] represents a block // with a timestamp after the Apricot Phase 3 upgrade time. -func (c *ChainConfig) IsApricotPhase3(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase3BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase3(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase3BlockTimestamp, time) } -// IsApricotPhase4 returns whether [blockTimestamp] represents a block +// IsApricotPhase4 returns whether [time] represents a block // with a timestamp after the Apricot Phase 4 upgrade time. -func (c *ChainConfig) IsApricotPhase4(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase4BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase4(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase4BlockTimestamp, time) } -// IsApricotPhase5 returns whether [blockTimestamp] represents a block +// IsApricotPhase5 returns whether [time] represents a block // with a timestamp after the Apricot Phase 5 upgrade time. -func (c *ChainConfig) IsApricotPhase5(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase5BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase5(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase5BlockTimestamp, time) } -// IsApricotPhasePre6 returns whether [blockTimestamp] represents a block +// IsApricotPhasePre6 returns whether [time] represents a block // with a timestamp after the Apricot Phase Pre 6 upgrade time. -func (c *ChainConfig) IsApricotPhasePre6(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhasePre6BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhasePre6(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhasePre6BlockTimestamp, time) } -// IsApricotPhase6 returns whether [blockTimestamp] represents a block +// IsApricotPhase6 returns whether [time] represents a block // with a timestamp after the Apricot Phase 6 upgrade time. -func (c *ChainConfig) IsApricotPhase6(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhase6BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhase6(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhase6BlockTimestamp, time) } -// IsApricotPhasePost6 returns whether [blockTimestamp] represents a block +// IsApricotPhasePost6 returns whether [time] represents a block // with a timestamp after the Apricot Phase 6 Post upgrade time. -func (c *ChainConfig) IsApricotPhasePost6(blockTimestamp *big.Int) bool { - return utils.IsForked(c.ApricotPhasePost6BlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsApricotPhasePost6(time uint64) bool { + return utils.IsTimestampForked(c.ApricotPhasePost6BlockTimestamp, time) } // IsSongbirdTransition returns whether [blockTimestamp] represents a block // with a timestamp after the Songbird code transition time. -func (c *ChainConfig) IsSongbirdTransition(blockTimestamp *big.Int) bool { - return utils.IsForked(c.SongbirdTransitionTimestamp, blockTimestamp) +func (c *ChainConfig) IsSongbirdTransition(time uint64) bool { + return utils.IsTimestampForked(c.SongbirdTransitionTimestamp, time) } -// IsBanff returns whether [blockTimestamp] represents a block +// IsBanff returns whether [time] represents a block // with a timestamp after the Banff upgrade time. -func (c *ChainConfig) IsBanff(blockTimestamp *big.Int) bool { - return utils.IsForked(c.BanffBlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsBanff(time uint64) bool { + return utils.IsTimestampForked(c.BanffBlockTimestamp, time) } -// IsCortina returns whether [blockTimestamp] represents a block +// IsCortina returns whether [time] represents a block // with a timestamp after the Cortina upgrade time. -func (c *ChainConfig) IsCortina(blockTimestamp *big.Int) bool { - return utils.IsForked(c.CortinaBlockTimestamp, blockTimestamp) +func (c *ChainConfig) IsCortina(time uint64) bool { + return utils.IsTimestampForked(c.CortinaBlockTimestamp, time) } -// IsDUpgrade returns whether [blockTimestamp] represents a block -// with a timestamp after the DUpgrade upgrade time. -func (c *ChainConfig) IsDUpgrade(blockTimestamp *big.Int) bool { - return utils.IsForked(c.DUpgradeBlockTimestamp, blockTimestamp) +// IsDurango returns whether [time] represents a block +// with a timestamp after the Durango upgrade time. +func (c *ChainConfig) IsDurango(time uint64) bool { + return utils.IsTimestampForked(c.DurangoBlockTimestamp, time) +} + +// IsCancun returns whether [time] represents a block +// with a timestamp after the Cancun upgrade time. +func (c *ChainConfig) IsCancun(time uint64) bool { + return utils.IsTimestampForked(c.CancunTime, time) +} + +func (r *Rules) PredicatersExist() bool { + return len(r.Predicaters) > 0 +} + +func (r *Rules) PredicaterExists(addr common.Address) bool { + _, PredicaterExists := r.Predicaters[addr] + return PredicaterExists +} + +// IsPrecompileEnabled returns whether precompile with [address] is enabled at [timestamp]. +func (c *ChainConfig) IsPrecompileEnabled(address common.Address, timestamp uint64) bool { + config := c.getActivePrecompileConfig(address, timestamp) + return config != nil && !config.IsDisabled() } // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. -func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, timestamp uint64) *ConfigCompatError { - bNumber := new(big.Int).SetUint64(height) - bTimestamp := new(big.Int).SetUint64(timestamp) - +func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { + var ( + bhead = new(big.Int).SetUint64(height) + btime = time + ) // Iterate checkCompatible to find the lowest conflict. var lasterr *ConfigCompatError for { - err := c.checkCompatible(newcfg, bNumber, bTimestamp) - if err == nil || (lasterr != nil && err.RewindTo == lasterr.RewindTo) { + err := c.checkCompatible(newcfg, bhead, btime) + if err == nil || (lasterr != nil && err.RewindToBlock == lasterr.RewindToBlock && err.RewindToTime == lasterr.RewindToTime) { break } lasterr = err - bNumber.SetUint64(err.RewindTo) + + if err.RewindToTime > 0 { + btime = err.RewindToTime + } else { + bhead.SetUint64(err.RewindToBlock) + } } return lasterr } +// Verify verifies chain config and returns error +func (c *ChainConfig) Verify() error { + // Verify the precompile upgrades are internally consistent given the existing chainConfig. + if err := c.verifyPrecompileUpgrades(); err != nil { + return fmt.Errorf("invalid precompile upgrades: %w", err) + } + + return nil +} + // CheckConfigForkOrder checks that we don't "skip" any forks, geth isn't pluggable enough // to guarantee that forks can be implemented in a different order than on official networks func (c *ChainConfig) CheckConfigForkOrder() error { type fork struct { - name string - block *big.Int - optional bool // if true, the fork may be nil and next fork is still allowed + name string + block *big.Int // some go-ethereum forks use block numbers + timestamp *uint64 // Avalanche forks use timestamps + optional bool // if true, the fork may be nil and next fork is still allowed } var lastFork fork for _, cur := range []fork{ @@ -649,33 +821,34 @@ func (c *ChainConfig) CheckConfigForkOrder() error { // Instead, we check only that Apricot Phases are enabled in order. lastFork = fork{} for _, cur := range []fork{ - {name: "apricotPhase1BlockTimestamp", block: c.ApricotPhase1BlockTimestamp}, - {name: "apricotPhase2BlockTimestamp", block: c.ApricotPhase2BlockTimestamp}, - {name: "apricotPhase3BlockTimestamp", block: c.ApricotPhase3BlockTimestamp}, - {name: "apricotPhase4BlockTimestamp", block: c.ApricotPhase4BlockTimestamp}, - {name: "apricotPhase5BlockTimestamp", block: c.ApricotPhase5BlockTimestamp}, - {name: "apricotPhasePre6BlockTimestamp", block: c.ApricotPhasePre6BlockTimestamp}, - {name: "apricotPhase6BlockTimestamp", block: c.ApricotPhase6BlockTimestamp}, - {name: "apricotPhasePost6BlockTimestamp", block: c.ApricotPhasePost6BlockTimestamp}, - {name: "banffBlockTimestamp", block: c.BanffBlockTimestamp}, - {name: "cortinaBlockTimestamp", block: c.CortinaBlockTimestamp}, - {name: "dUpgradeBlockTimestamp", block: c.DUpgradeBlockTimestamp}, + {name: "apricotPhase1BlockTimestamp", timestamp: c.ApricotPhase1BlockTimestamp}, + {name: "apricotPhase2BlockTimestamp", timestamp: c.ApricotPhase2BlockTimestamp}, + {name: "apricotPhase3BlockTimestamp", timestamp: c.ApricotPhase3BlockTimestamp}, + {name: "apricotPhase4BlockTimestamp", timestamp: c.ApricotPhase4BlockTimestamp}, + {name: "apricotPhase5BlockTimestamp", timestamp: c.ApricotPhase5BlockTimestamp}, + {name: "apricotPhasePre6BlockTimestamp", timestamp: c.ApricotPhasePre6BlockTimestamp}, + {name: "apricotPhase6BlockTimestamp", timestamp: c.ApricotPhase6BlockTimestamp}, + {name: "apricotPhasePost6BlockTimestamp", timestamp: c.ApricotPhasePost6BlockTimestamp}, + {name: "banffBlockTimestamp", timestamp: c.BanffBlockTimestamp}, + {name: "cortinaBlockTimestamp", timestamp: c.CortinaBlockTimestamp}, + {name: "durangoBlockTimestamp", timestamp: c.DurangoBlockTimestamp}, + {name: "cancunTime", timestamp: c.CancunTime}, } { if lastFork.name != "" { // Next one must be higher number - if lastFork.block == nil && cur.block != nil { + if lastFork.timestamp == nil && cur.timestamp != nil { return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v", - lastFork.name, cur.name, cur.block) + lastFork.name, cur.name, cur.timestamp) } - if lastFork.block != nil && cur.block != nil { - if lastFork.block.Cmp(cur.block) > 0 { + if lastFork.timestamp != nil && cur.timestamp != nil { + if *lastFork.timestamp > *cur.timestamp { return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v", - lastFork.name, lastFork.block, cur.name, cur.block) + lastFork.name, lastFork.timestamp, cur.name, cur.timestamp) } } } // If it was optional and not set, then ignore it - if !cur.optional || cur.block != nil { + if !cur.optional || cur.timestamp != nil { lastFork = cur } } @@ -686,90 +859,94 @@ func (c *ChainConfig) CheckConfigForkOrder() error { return nil } -func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, lastHeight *big.Int, lastTimestamp *big.Int) *ConfigCompatError { - if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, lastHeight) { - return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock) +func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, height *big.Int, time uint64) *ConfigCompatError { + if isForkBlockIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, height) { + return newBlockCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock) } - if isForkIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, lastHeight) { - return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock) + if isForkBlockIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, height) { + return newBlockCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock) } - if c.IsDAOFork(lastHeight) && c.DAOForkSupport != newcfg.DAOForkSupport { - return newCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock) + if c.IsDAOFork(height) && c.DAOForkSupport != newcfg.DAOForkSupport { + return newBlockCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock) } - if isForkIncompatible(c.EIP150Block, newcfg.EIP150Block, lastHeight) { - return newCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block) + if isForkBlockIncompatible(c.EIP150Block, newcfg.EIP150Block, height) { + return newBlockCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block) } - if isForkIncompatible(c.EIP155Block, newcfg.EIP155Block, lastHeight) { - return newCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block) + if isForkBlockIncompatible(c.EIP155Block, newcfg.EIP155Block, height) { + return newBlockCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block) } - if isForkIncompatible(c.EIP158Block, newcfg.EIP158Block, lastHeight) { - return newCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block) + if isForkBlockIncompatible(c.EIP158Block, newcfg.EIP158Block, height) { + return newBlockCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block) } - if c.IsEIP158(lastHeight) && !configNumEqual(c.ChainID, newcfg.ChainID) { - return newCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block) + if c.IsEIP158(height) && !configBlockEqual(c.ChainID, newcfg.ChainID) { + return newBlockCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block) } - if isForkIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, lastHeight) { - return newCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock) + if isForkBlockIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, height) { + return newBlockCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock) } - if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, lastHeight) { - return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock) + if isForkBlockIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, height) { + return newBlockCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock) } - if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, lastHeight) { + if isForkBlockIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, height) { // the only case where we allow Petersburg to be set in the past is if it is equal to Constantinople // mainly to satisfy fork ordering requirements which state that Petersburg fork be set if Constantinople fork is set - if isForkIncompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, lastHeight) { - return newCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock) + if isForkBlockIncompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, height) { + return newBlockCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock) } } - if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, lastHeight) { - return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock) + if isForkBlockIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, height) { + return newBlockCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock) + } + if isForkBlockIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, height) { + return newBlockCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock) } - if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, lastHeight) { - return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock) + if isForkTimestampIncompatible(c.ApricotPhase1BlockTimestamp, newcfg.ApricotPhase1BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase1 fork block timestamp", c.ApricotPhase1BlockTimestamp, newcfg.ApricotPhase1BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase1BlockTimestamp, newcfg.ApricotPhase1BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase1 fork block timestamp", c.ApricotPhase1BlockTimestamp, newcfg.ApricotPhase1BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhase2BlockTimestamp, newcfg.ApricotPhase2BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase2 fork block timestamp", c.ApricotPhase2BlockTimestamp, newcfg.ApricotPhase2BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase2BlockTimestamp, newcfg.ApricotPhase2BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase2 fork block timestamp", c.ApricotPhase2BlockTimestamp, newcfg.ApricotPhase2BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhase3BlockTimestamp, newcfg.ApricotPhase3BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase3 fork block timestamp", c.ApricotPhase3BlockTimestamp, newcfg.ApricotPhase3BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase3BlockTimestamp, newcfg.ApricotPhase3BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase3 fork block timestamp", c.ApricotPhase3BlockTimestamp, newcfg.ApricotPhase3BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhase4BlockTimestamp, newcfg.ApricotPhase4BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase4 fork block timestamp", c.ApricotPhase4BlockTimestamp, newcfg.ApricotPhase4BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase4BlockTimestamp, newcfg.ApricotPhase4BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase4 fork block timestamp", c.ApricotPhase4BlockTimestamp, newcfg.ApricotPhase4BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase5 fork block timestamp", c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase5 fork block timestamp", c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhasePre6 fork block timestamp", c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp) } - if isForkIncompatible(c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhasePre6 fork block timestamp", c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhase6 fork block timestamp", c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp) } - if isForkIncompatible(c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhase6 fork block timestamp", c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp) + if isForkTimestampIncompatible(c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp, time) { + return newTimestampCompatError("ApricotPhasePost6 fork block timestamp", c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp) } - if isForkIncompatible(c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp, lastTimestamp) { - return newCompatError("ApricotPhasePost6 fork block timestamp", c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp) + if isForkTimestampIncompatible(c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp, time) { + return newTimestampCompatError("Banff fork block timestamp", c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp) } - if isForkIncompatible(c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp, lastTimestamp) { - return newCompatError("Banff fork block timestamp", c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp) + if isForkTimestampIncompatible(c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp, time) { + return newTimestampCompatError("Cortina fork block timestamp", c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp) } - if isForkIncompatible(c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp, lastTimestamp) { - return newCompatError("Cortina fork block timestamp", c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp) + if isForkTimestampIncompatible(c.DurangoBlockTimestamp, newcfg.DurangoBlockTimestamp, time) { + return newTimestampCompatError("Durango fork block timestamp", c.DurangoBlockTimestamp, newcfg.DurangoBlockTimestamp) } - if isForkIncompatible(c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp, lastTimestamp) { - return newCompatError("DUpgrade fork block timestamp", c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp) + if isForkTimestampIncompatible(c.CancunTime, newcfg.CancunTime, time) { + return newTimestampCompatError("Cancun fork block timestamp", c.CancunTime, newcfg.CancunTime) } + return nil } -// isForkIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to +// isForkBlockIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to // block s2 because head is already past the fork. -func isForkIncompatible(s1, s2, head *big.Int) bool { - return (utils.IsForked(s1, head) || utils.IsForked(s2, head)) && !configNumEqual(s1, s2) +func isForkBlockIncompatible(s1, s2, head *big.Int) bool { + return (utils.IsBlockForked(s1, head) || utils.IsBlockForked(s2, head)) && !configBlockEqual(s1, s2) } -func configNumEqual(x, y *big.Int) bool { +func configBlockEqual(x, y *big.Int) bool { if x == nil { return y == nil } @@ -779,17 +956,41 @@ func configNumEqual(x, y *big.Int) bool { return x.Cmp(y) == 0 } +// isForkTimestampIncompatible returns true if a fork scheduled at timestamp s1 +// cannot be rescheduled to timestamp s2 because head is already past the fork. +func isForkTimestampIncompatible(s1, s2 *uint64, head uint64) bool { + return (utils.IsTimestampForked(s1, head) || utils.IsTimestampForked(s2, head)) && !configTimestampEqual(s1, s2) +} + +func configTimestampEqual(x, y *uint64) bool { + if x == nil { + return y == nil + } + if y == nil { + return x == nil + } + return *x == *y +} + // ConfigCompatError is raised if the locally-stored blockchain is initialised with a // ChainConfig that would alter the past. type ConfigCompatError struct { What string - // block numbers of the stored and new configurations - StoredConfig, NewConfig *big.Int + + // block numbers of the stored and new configurations if block based forking + StoredBlock, NewBlock *big.Int + + // timestamps of the stored and new configurations if time based forking + StoredTime, NewTime *uint64 + // the block number to which the local chain must be rewound to correct the error - RewindTo uint64 + RewindToBlock uint64 + + // the timestamp to which the local chain must be rewound to correct the error + RewindToTime uint64 } -func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError { +func newBlockCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError { var rew *big.Int switch { case storedblock == nil: @@ -799,15 +1000,52 @@ func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatEr default: rew = newblock } - err := &ConfigCompatError{what, storedblock, newblock, 0} + err := &ConfigCompatError{ + What: what, + StoredBlock: storedblock, + NewBlock: newblock, + RewindToBlock: 0, + } if rew != nil && rew.Sign() > 0 { - err.RewindTo = rew.Uint64() - 1 + err.RewindToBlock = rew.Uint64() - 1 + } + return err +} + +func newTimestampCompatError(what string, storedtime, newtime *uint64) *ConfigCompatError { + var rew *uint64 + switch { + case storedtime == nil: + rew = newtime + case newtime == nil || *storedtime < *newtime: + rew = storedtime + default: + rew = newtime + } + err := &ConfigCompatError{ + What: what, + StoredTime: storedtime, + NewTime: newtime, + RewindToTime: 0, + } + if rew != nil && *rew > 0 { + err.RewindToTime = *rew - 1 } return err } func (err *ConfigCompatError) Error() string { - return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo) + if err.StoredBlock != nil { + return fmt.Sprintf("mismatching %s in database (have block %d, want block %d, rewindto block %d)", err.What, err.StoredBlock, err.NewBlock, err.RewindToBlock) + } + return fmt.Sprintf("mismatching %s in database (have timestamp %s, want timestamp %s, rewindto timestamp %d)", err.What, ptrToString(err.StoredTime), ptrToString(err.NewTime), err.RewindToTime) +} + +func ptrToString(val *uint64) string { + if val == nil { + return "nil" + } + return fmt.Sprintf("%d", *val) } // Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions @@ -819,27 +1057,40 @@ type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool + IsCancun bool // Rules for Avalanche releases IsApricotPhase1, IsApricotPhase2, IsApricotPhase3, IsApricotPhase4, IsApricotPhase5 bool IsApricotPhasePre6, IsApricotPhase6, IsApricotPhasePost6 bool IsBanff bool IsCortina bool - IsDUpgrade bool + IsDurango bool // Songbird (coston, local) IsSongbirdCode bool IsSongbirdTransition bool - // Precompiles maps addresses to stateful precompiled contracts that are enabled + // ActivePrecompiles maps addresses to stateful precompiled contracts that are enabled // for this rule set. // Note: none of these addresses should conflict with the address space used by // any existing precompiles. - Precompiles map[common.Address]precompile.StatefulPrecompiledContract + ActivePrecompiles map[common.Address]precompileconfig.Config + // Predicaters maps addresses to stateful precompile Predicaters + // that are enabled for this rule set. + Predicaters map[common.Address]precompileconfig.Predicater + // AccepterPrecompiles map addresses to stateful precompile accepter functions + // that are enabled for this rule set. + AccepterPrecompiles map[common.Address]precompileconfig.Accepter +} + +// IsPrecompileEnabled returns true if the precompile at [addr] is enabled for this rule set. +func (r *Rules) IsPrecompileEnabled(addr common.Address) bool { + _, ok := r.ActivePrecompiles[addr] + return ok } // Rules ensures c's ChainID is not nil. -func (c *ChainConfig) rules(num *big.Int) Rules { +func (c *ChainConfig) rules(num *big.Int, timestamp uint64) Rules { chainID := c.ChainID if chainID == nil { chainID = new(big.Int) @@ -855,57 +1106,43 @@ func (c *ChainConfig) rules(num *big.Int) Rules { IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), IsSongbirdCode: c.IsSongbirdCode(), + IsCancun: c.IsCancun(timestamp), } } // AvalancheRules returns the Avalanche modified rules to support Avalanche // network upgrades -func (c *ChainConfig) AvalancheRules(blockNum, blockTimestamp *big.Int) Rules { - rules := c.rules(blockNum) - - rules.IsApricotPhase1 = c.IsApricotPhase1(blockTimestamp) - rules.IsApricotPhase2 = c.IsApricotPhase2(blockTimestamp) - rules.IsApricotPhase3 = c.IsApricotPhase3(blockTimestamp) - rules.IsApricotPhase4 = c.IsApricotPhase4(blockTimestamp) - rules.IsApricotPhase5 = c.IsApricotPhase5(blockTimestamp) - rules.IsApricotPhasePre6 = c.IsApricotPhasePre6(blockTimestamp) - rules.IsApricotPhase6 = c.IsApricotPhase6(blockTimestamp) - rules.IsApricotPhasePost6 = c.IsApricotPhasePost6(blockTimestamp) - rules.IsSongbirdTransition = c.IsSongbirdTransition(blockTimestamp) - rules.IsBanff = c.IsBanff(blockTimestamp) - rules.IsCortina = c.IsCortina(blockTimestamp) - rules.IsDUpgrade = c.IsDUpgrade(blockTimestamp) +func (c *ChainConfig) AvalancheRules(blockNum *big.Int, timestamp uint64) Rules { + rules := c.rules(blockNum, timestamp) + + rules.IsApricotPhase1 = c.IsApricotPhase1(timestamp) + rules.IsApricotPhase2 = c.IsApricotPhase2(timestamp) + rules.IsApricotPhase3 = c.IsApricotPhase3(timestamp) + rules.IsApricotPhase4 = c.IsApricotPhase4(timestamp) + rules.IsApricotPhase5 = c.IsApricotPhase5(timestamp) + rules.IsApricotPhasePre6 = c.IsApricotPhasePre6(timestamp) + rules.IsApricotPhase6 = c.IsApricotPhase6(timestamp) + rules.IsApricotPhasePost6 = c.IsApricotPhasePost6(timestamp) + rules.IsSongbirdTransition = c.IsSongbirdTransition(timestamp) + rules.IsBanff = c.IsBanff(timestamp) + rules.IsCortina = c.IsCortina(timestamp) + rules.IsDurango = c.IsDurango(timestamp) // Initialize the stateful precompiles that should be enabled at [blockTimestamp]. - rules.Precompiles = make(map[common.Address]precompile.StatefulPrecompiledContract) - for _, config := range c.enabledStatefulPrecompiles() { - if utils.IsForked(config.Timestamp(), blockTimestamp) { - rules.Precompiles[config.Address()] = config.Contract() + rules.ActivePrecompiles = make(map[common.Address]precompileconfig.Config) + rules.Predicaters = make(map[common.Address]precompileconfig.Predicater) + rules.AccepterPrecompiles = make(map[common.Address]precompileconfig.Accepter) + for _, module := range modules.RegisteredModules() { + if config := c.getActivePrecompileConfig(module.Address, timestamp); config != nil && !config.IsDisabled() { + rules.ActivePrecompiles[module.Address] = config + if predicater, ok := config.(precompileconfig.Predicater); ok { + rules.Predicaters[module.Address] = predicater + } + if precompileAccepter, ok := config.(precompileconfig.Accepter); ok { + rules.AccepterPrecompiles[module.Address] = precompileAccepter + } } } return rules } - -// enabledStatefulPrecompiles returns a list of stateful precompile configs in the order that they are enabled -// by block timestamp. -// Note: the return value does not include the native precompiles [nativeAssetCall] and [nativeAssetBalance]. -// These are handled in [evm.precompile] directly. -func (c *ChainConfig) enabledStatefulPrecompiles() []precompile.StatefulPrecompileConfig { - statefulPrecompileConfigs := make([]precompile.StatefulPrecompileConfig, 0) - - return statefulPrecompileConfigs -} - -// CheckConfigurePrecompiles checks if any of the precompiles specified in the chain config are enabled by the block -// transition from [parentTimestamp] to the timestamp set in [blockContext]. If this is the case, it calls [Configure] -// to apply the necessary state transitions for the upgrade. -// This function is called: -// - within genesis setup to configure the starting state for precompiles enabled at genesis, -// - during block processing to update the state before processing the given block. -func (c *ChainConfig) CheckConfigurePrecompiles(parentTimestamp *big.Int, blockContext precompile.BlockContext, statedb precompile.StateDB) { - // Iterate the enabled stateful precompiles and configure them if needed - for _, config := range c.enabledStatefulPrecompiles() { - precompile.CheckConfigure(c, parentTimestamp, blockContext, config, statedb) - } -} diff --git a/coreth/params/config_test.go b/coreth/params/config_test.go index 0f242adc..cdc771de 100644 --- a/coreth/params/config_test.go +++ b/coreth/params/config_test.go @@ -27,112 +27,136 @@ package params import ( + "math" "math/big" "reflect" "testing" + "time" + + "github.com/ava-labs/coreth/utils" ) func TestCheckCompatible(t *testing.T) { type test struct { - stored, new *ChainConfig - blockHeight, blockTimestamp uint64 - wantErr *ConfigCompatError + stored, new *ChainConfig + headBlock uint64 + headTimestamp uint64 + wantErr *ConfigCompatError } tests := []test{ - {stored: TestChainConfig, new: TestChainConfig, blockHeight: 0, blockTimestamp: 0, wantErr: nil}, - {stored: TestChainConfig, new: TestChainConfig, blockHeight: 100, blockTimestamp: 1000, wantErr: nil}, + {stored: TestChainConfig, new: TestChainConfig, headBlock: 0, headTimestamp: 0, wantErr: nil}, + {stored: TestChainConfig, new: TestChainConfig, headBlock: 0, headTimestamp: uint64(time.Now().Unix()), wantErr: nil}, + {stored: TestChainConfig, new: TestChainConfig, headBlock: 100, wantErr: nil}, { - stored: &ChainConfig{EIP150Block: big.NewInt(10)}, - new: &ChainConfig{EIP150Block: big.NewInt(20)}, - blockHeight: 9, - blockTimestamp: 90, - wantErr: nil, + stored: &ChainConfig{EIP150Block: big.NewInt(10)}, + new: &ChainConfig{EIP150Block: big.NewInt(20)}, + headBlock: 9, + headTimestamp: 90, + wantErr: nil, }, { - stored: TestChainConfig, - new: &ChainConfig{HomesteadBlock: nil}, - blockHeight: 3, - blockTimestamp: 30, + stored: TestChainConfig, + new: &ChainConfig{HomesteadBlock: nil}, + headBlock: 3, + headTimestamp: 30, wantErr: &ConfigCompatError{ - What: "Homestead fork block", - StoredConfig: big.NewInt(0), - NewConfig: nil, - RewindTo: 0, + What: "Homestead fork block", + StoredBlock: big.NewInt(0), + NewBlock: nil, + RewindToBlock: 0, }, }, { - stored: TestChainConfig, - new: &ChainConfig{HomesteadBlock: big.NewInt(1)}, - blockHeight: 3, - blockTimestamp: 30, + stored: TestChainConfig, + new: &ChainConfig{HomesteadBlock: big.NewInt(1)}, + headBlock: 3, + headTimestamp: 30, wantErr: &ConfigCompatError{ - What: "Homestead fork block", - StoredConfig: big.NewInt(0), - NewConfig: big.NewInt(1), - RewindTo: 0, + What: "Homestead fork block", + StoredBlock: big.NewInt(0), + NewBlock: big.NewInt(1), + RewindToBlock: 0, }, }, { - stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)}, - new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)}, - blockHeight: 25, - blockTimestamp: 250, + stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)}, + new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)}, + headBlock: 25, + headTimestamp: 250, wantErr: &ConfigCompatError{ - What: "EIP150 fork block", - StoredConfig: big.NewInt(10), - NewConfig: big.NewInt(20), - RewindTo: 9, + What: "EIP150 fork block", + StoredBlock: big.NewInt(10), + NewBlock: big.NewInt(20), + RewindToBlock: 9, }, }, { - stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, - new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)}, - blockHeight: 40, - blockTimestamp: 400, - wantErr: nil, + stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, + new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)}, + headBlock: 40, + headTimestamp: 400, + wantErr: nil, }, { - stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, - new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)}, - blockHeight: 40, - blockTimestamp: 400, + stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)}, + new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)}, + headBlock: 40, + headTimestamp: 400, wantErr: &ConfigCompatError{ - What: "Petersburg fork block", - StoredConfig: nil, - NewConfig: big.NewInt(31), - RewindTo: 30, + What: "Petersburg fork block", + StoredBlock: nil, + NewBlock: big.NewInt(31), + RewindToBlock: 30, }, }, { - stored: TestChainConfig, - new: TestApricotPhase4Config, - blockHeight: 0, - blockTimestamp: 0, + stored: TestChainConfig, + new: TestApricotPhase4Config, + headBlock: 0, + headTimestamp: 0, wantErr: &ConfigCompatError{ What: "ApricotPhase5 fork block timestamp", - StoredConfig: big.NewInt(0), - NewConfig: nil, - RewindTo: 0, + StoredTime: utils.NewUint64(0), + NewTime: nil, + RewindToTime: 0, }, }, { - stored: TestChainConfig, - new: TestApricotPhase4Config, - blockHeight: 10, - blockTimestamp: 100, + stored: TestChainConfig, + new: TestApricotPhase4Config, + headBlock: 10, + headTimestamp: 100, wantErr: &ConfigCompatError{ What: "ApricotPhase5 fork block timestamp", - StoredConfig: big.NewInt(0), - NewConfig: nil, - RewindTo: 0, + StoredTime: utils.NewUint64(0), + NewTime: nil, + RewindToTime: 0, }, }, } for _, test := range tests { - err := test.stored.CheckCompatible(test.new, test.blockHeight, test.blockTimestamp) + err := test.stored.CheckCompatible(test.new, test.headBlock, test.headTimestamp) if !reflect.DeepEqual(err, test.wantErr) { - t.Errorf("error mismatch:\nstored: %v\nnew: %v\nblockHeight: %v\nerr: %v\nwant: %v", test.stored, test.new, test.blockHeight, err, test.wantErr) + t.Errorf("error mismatch:\nstored: %v\nnew: %v\nblockHeight: %v\nerr: %v\nwant: %v", test.stored, test.new, test.headBlock, err, test.wantErr) } } } + +func TestConfigRules(t *testing.T) { + c := &ChainConfig{ + CortinaBlockTimestamp: utils.NewUint64(500), + } + var stamp uint64 + if r := c.AvalancheRules(big.NewInt(0), stamp); r.IsCortina { + t.Errorf("expected %v to not be cortina", stamp) + } + stamp = 500 + if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsCortina { + t.Errorf("expected %v to be cortina", stamp) + } + stamp = math.MaxInt64 + if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsCortina { + t.Errorf("expected %v to be cortina", stamp) + } +} diff --git a/coreth/params/constants.go b/coreth/params/constants.go new file mode 100644 index 00000000..79fbfd92 --- /dev/null +++ b/coreth/params/constants.go @@ -0,0 +1,96 @@ +package params + +import ( + "time" + + "github.com/ava-labs/coreth/constants" +) + +// Fork times: copied from avalanchego/version/constants.go +// There is an "import cycle" between coreth and avalanchego on Avalanche GitHub repository which lacks +// times for Flare and Songbird networks. +var ( + ApricotPhase1Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + + ApricotPhase2Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + + ApricotPhase3Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 14, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 14, 0, 0, 0, time.UTC), + } + + ApricotPhase4Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.September, 22, 21, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 15, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 15, 0, 0, 0, time.UTC), + } + + ApricotPhase5Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.December, 2, 18, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2022, time.February, 25, 16, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2022, time.March, 7, 16, 0, 0, 0, time.UTC), + } + + SongbirdTransitionTimes = map[uint32]time.Time{ + constants.SongbirdID: time.Date(2024, time.October, 29, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2024, time.July, 23, 12, 0, 0, 0, time.UTC), + } + + ApricotPhasePre6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 5, 1, 30, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 12, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 12, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 12, 0, 0, 0, time.UTC), + } + + ApricotPhase6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 13, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 13, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 13, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 13, 0, 0, 0, time.UTC), + } + + ApricotPhasePost6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 14, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 14, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 14, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 14, 0, 0, 0, time.UTC), + } + + BanffTimes = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2024, time.November, 26, 15, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.January, 7, 15, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.January, 28, 15, 0, 0, 0, time.UTC), + } + + CortinaTimes = map[uint32]time.Time{ + constants.MainnetID: time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2025, time.May, 13, 12, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2025, time.April, 8, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.March, 27, 13, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.May, 6, 12, 0, 0, 0, time.UTC), + } + + DurangoTimes = map[uint32]time.Time{ + constants.MainnetID: time.Date(2024, time.March, 6, 16, 0, 0, 0, time.UTC), + constants.FlareID: time.Date(2025, time.August, 5, 12, 0, 0, 0, time.UTC), + constants.CostwoID: time.Date(2025, time.June, 24, 12, 0, 0, 0, time.UTC), + constants.CostonID: time.Date(2025, time.July, 1, 12, 0, 0, 0, time.UTC), + constants.SongbirdID: time.Date(2025, time.July, 22, 12, 0, 0, 0, time.UTC), + constants.LocalID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), + } +) diff --git a/coreth/params/precompile_upgrade.go b/coreth/params/precompile_upgrade.go new file mode 100644 index 00000000..8130db5d --- /dev/null +++ b/coreth/params/precompile_upgrade.go @@ -0,0 +1,234 @@ +// (c) 2023 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package params + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" +) + +var errNoKey = errors.New("PrecompileUpgrade cannot be empty") + +// PrecompileUpgrade is a helper struct embedded in UpgradeConfig. +// It is used to unmarshal the json into the correct precompile config type +// based on the key. Keys are defined in each precompile module, and registered in +// precompile/registry/registry.go. +type PrecompileUpgrade struct { + precompileconfig.Config +} + +// UnmarshalJSON unmarshals the json into the correct precompile config type +// based on the key. Keys are defined in each precompile module, and registered in +// precompile/registry/registry.go. +// Ex: {"feeManagerConfig": {...}} where "feeManagerConfig" is the key +func (u *PrecompileUpgrade) UnmarshalJSON(data []byte) error { + raw := make(map[string]json.RawMessage) + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if len(raw) == 0 { + return errNoKey + } + if len(raw) > 1 { + return fmt.Errorf("PrecompileUpgrade must have exactly one key, got %d", len(raw)) + } + for key, value := range raw { + module, ok := modules.GetPrecompileModule(key) + if !ok { + return fmt.Errorf("unknown precompile config: %s", key) + } + config := module.MakeConfig() + if err := json.Unmarshal(value, config); err != nil { + return err + } + u.Config = config + } + return nil +} + +// MarshalJSON marshal the precompile config into json based on the precompile key. +// Ex: {"feeManagerConfig": {...}} where "feeManagerConfig" is the key +func (u *PrecompileUpgrade) MarshalJSON() ([]byte, error) { + res := make(map[string]precompileconfig.Config) + res[u.Key()] = u.Config + return json.Marshal(res) +} + +// verifyPrecompileUpgrades checks [c.PrecompileUpgrades] is well formed: +// - [upgrades] must specify exactly one key per PrecompileUpgrade +// - the specified blockTimestamps must monotonically increase +// - the specified blockTimestamps must be compatible with those +// specified in the chainConfig by genesis. +// - check a precompile is disabled before it is re-enabled +func (c *ChainConfig) verifyPrecompileUpgrades() error { + // Store this struct to keep track of the last upgrade for each precompile key. + // Required for timestamp and disabled checks. + type lastUpgradeData struct { + blockTimestamp uint64 + disabled bool + } + + lastPrecompileUpgrades := make(map[string]lastUpgradeData) + + // next range over upgrades to verify correct use of disabled and blockTimestamps. + // previousUpgradeTimestamp is used to verify monotonically increasing timestamps. + var previousUpgradeTimestamp *uint64 + for i, upgrade := range c.PrecompileUpgrades { + key := upgrade.Key() + + // lastUpgradeByKey is the previous processed upgrade for this precompile key. + lastUpgradeByKey, ok := lastPrecompileUpgrades[key] + var ( + disabled bool + lastTimestamp *uint64 + ) + if !ok { + disabled = true + lastTimestamp = nil + } else { + disabled = lastUpgradeByKey.disabled + lastTimestamp = utils.NewUint64(lastUpgradeByKey.blockTimestamp) + } + upgradeTimestamp := upgrade.Timestamp() + + if upgradeTimestamp == nil { + return fmt.Errorf("PrecompileUpgrade (%s) at [%d]: block timestamp cannot be nil ", key, i) + } + // Verify specified timestamps are monotonically increasing across all precompile keys. + // Note: It is OK for multiple configs of DIFFERENT keys to specify the same timestamp. + if previousUpgradeTimestamp != nil && *upgradeTimestamp < *previousUpgradeTimestamp { + return fmt.Errorf("PrecompileUpgrade (%s) at [%d]: config block timestamp (%v) < previous timestamp (%v)", key, i, *upgradeTimestamp, *previousUpgradeTimestamp) + } + + if disabled == upgrade.IsDisabled() { + return fmt.Errorf("PrecompileUpgrade (%s) at [%d]: disable should be [%v]", key, i, !disabled) + } + // Verify specified timestamps are monotonically increasing across same precompile keys. + // Note: It is NOT OK for multiple configs of the SAME key to specify the same timestamp. + if lastTimestamp != nil && *upgradeTimestamp <= *lastTimestamp { + return fmt.Errorf("PrecompileUpgrade (%s) at [%d]: config block timestamp (%v) <= previous timestamp (%v) of same key", key, i, *upgradeTimestamp, *lastTimestamp) + } + + if err := upgrade.Verify(c); err != nil { + return err + } + + lastPrecompileUpgrades[key] = lastUpgradeData{ + disabled: upgrade.IsDisabled(), + blockTimestamp: *upgradeTimestamp, + } + + previousUpgradeTimestamp = upgradeTimestamp + } + + return nil +} + +// getActivePrecompileConfig returns the most recent precompile config corresponding to [address]. +// If none have occurred, returns nil. +func (c *ChainConfig) getActivePrecompileConfig(address common.Address, timestamp uint64) precompileconfig.Config { + configs := c.GetActivatingPrecompileConfigs(address, nil, timestamp, c.PrecompileUpgrades) + if len(configs) == 0 { + return nil + } + return configs[len(configs)-1] // return the most recent config +} + +// GetActivatingPrecompileConfigs returns all precompile upgrades configured to activate during the +// state transition from a block with timestamp [from] to a block with timestamp [to]. +func (c *ChainConfig) GetActivatingPrecompileConfigs(address common.Address, from *uint64, to uint64, upgrades []PrecompileUpgrade) []precompileconfig.Config { + // Get key from address. + module, ok := modules.GetPrecompileModuleByAddress(address) + if !ok { + return nil + } + configs := make([]precompileconfig.Config, 0) + key := module.ConfigKey + // Loop over all upgrades checking for the requested precompile config. + for _, upgrade := range upgrades { + if upgrade.Key() == key { + // Check if the precompile activates in the specified range. + if utils.IsForkTransition(upgrade.Timestamp(), from, to) { + configs = append(configs, upgrade.Config) + } + } + } + return configs +} + +// CheckPrecompilesCompatible checks if [precompileUpgrades] are compatible with [c] at [headTimestamp]. +// Returns a ConfigCompatError if upgrades already activated at [headTimestamp] are missing from +// [precompileUpgrades]. Upgrades not already activated may be modified or absent from [precompileUpgrades]. +// Returns nil if [precompileUpgrades] is compatible with [c]. +// Assumes given timestamp is the last accepted block timestamp. +// This ensures that as long as the node has not accepted a block with a different rule set it will allow a +// new upgrade to be applied as long as it activates after the last accepted block. +func (c *ChainConfig) CheckPrecompilesCompatible(precompileUpgrades []PrecompileUpgrade, time uint64) *ConfigCompatError { + for _, module := range modules.RegisteredModules() { + if err := c.checkPrecompileCompatible(module.Address, precompileUpgrades, time); err != nil { + return err + } + } + + return nil +} + +// checkPrecompileCompatible verifies that the precompile specified by [address] is compatible between [c] +// and [precompileUpgrades] at [headTimestamp]. +// Returns an error if upgrades already activated at [headTimestamp] are missing from [precompileUpgrades]. +// Upgrades that have already gone into effect cannot be modified or absent from [precompileUpgrades]. +func (c *ChainConfig) checkPrecompileCompatible(address common.Address, precompileUpgrades []PrecompileUpgrade, time uint64) *ConfigCompatError { + // All active upgrades (from nil to [lastTimestamp]) must match. + activeUpgrades := c.GetActivatingPrecompileConfigs(address, nil, time, c.PrecompileUpgrades) + newUpgrades := c.GetActivatingPrecompileConfigs(address, nil, time, precompileUpgrades) + + // Check activated upgrades are still present. + for i, upgrade := range activeUpgrades { + if len(newUpgrades) <= i { + // missing upgrade + return newTimestampCompatError( + fmt.Sprintf("missing PrecompileUpgrade[%d]", i), + upgrade.Timestamp(), + nil, + ) + } + // All upgrades that have activated must be identical. + if !upgrade.Equal(newUpgrades[i]) { + return newTimestampCompatError( + fmt.Sprintf("PrecompileUpgrade[%d]", i), + upgrade.Timestamp(), + newUpgrades[i].Timestamp(), + ) + } + } + // then, make sure newUpgrades does not have additional upgrades + // that are already activated. (cannot perform retroactive upgrade) + if len(newUpgrades) > len(activeUpgrades) { + return newTimestampCompatError( + fmt.Sprintf("cannot retroactively enable PrecompileUpgrade[%d]", len(activeUpgrades)), + nil, + newUpgrades[len(activeUpgrades)].Timestamp(), // this indexes to the first element in newUpgrades after the end of activeUpgrades + ) + } + + return nil +} + +// EnabledStatefulPrecompiles returns current stateful precompile configs that are enabled at [blockTimestamp]. +func (c *ChainConfig) EnabledStatefulPrecompiles(blockTimestamp uint64) Precompiles { + statefulPrecompileConfigs := make(Precompiles) + for _, module := range modules.RegisteredModules() { + if config := c.getActivePrecompileConfig(module.Address, blockTimestamp); config != nil && !config.IsDisabled() { + statefulPrecompileConfigs[module.ConfigKey] = config + } + } + + return statefulPrecompileConfigs +} diff --git a/coreth/params/precompiles.go b/coreth/params/precompiles.go new file mode 100644 index 00000000..9b47b219 --- /dev/null +++ b/coreth/params/precompiles.go @@ -0,0 +1,36 @@ +// (c) 2023 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package params + +import ( + "encoding/json" + + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" +) + +type Precompiles map[string]precompileconfig.Config + +// UnmarshalJSON parses the JSON-encoded data into the ChainConfigPrecompiles. +// ChainConfigPrecompiles is a map of precompile module keys to their +// configuration. +func (ccp *Precompiles) UnmarshalJSON(data []byte) error { + raw := make(map[string]json.RawMessage) + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + *ccp = make(Precompiles) + for _, module := range modules.RegisteredModules() { + key := module.ConfigKey + if value, ok := raw[key]; ok { + conf := module.MakeConfig() + if err := json.Unmarshal(value, conf); err != nil { + return err + } + (*ccp)[key] = conf + } + } + return nil +} diff --git a/coreth/params/protocol_params.go b/coreth/params/protocol_params.go index 44839d6d..8c598d05 100644 --- a/coreth/params/protocol_params.go +++ b/coreth/params/protocol_params.go @@ -163,6 +163,10 @@ const ( Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation + BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) + BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs + BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price + // Avalanche Stateful Precompile Params // Gas price for native asset balance lookup. Based on the cost of an SLOAD operation since native // asset balances are kept in state storage. diff --git a/coreth/params/version.go b/coreth/params/version.go index dbc62d1c..b63bd113 100644 --- a/coreth/params/version.go +++ b/coreth/params/version.go @@ -31,10 +31,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 12 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/coreth/peer/client.go b/coreth/peer/client.go index 6a002d0e..41ebc7d6 100644 --- a/coreth/peer/client.go +++ b/coreth/peer/client.go @@ -4,6 +4,7 @@ package peer import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -23,15 +24,15 @@ type NetworkClient interface { // node version greater than or equal to minVersion. // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if // the request should be retried. - SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) + SendAppRequestAny(ctx context.Context, minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) // SendAppRequest synchronously sends request to the selected nodeID // Returns response bytes, and ErrRequestFailed if the request should be retried. - SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) + SendAppRequest(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) // SendCrossChainRequest sends a request to a specific blockchain running on this node. // Returns response bytes, and ErrRequestFailed if the request failed. - SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) + SendCrossChainRequest(ctx context.Context, chainID ids.ID, request []byte) ([]byte, error) // Gossip sends given gossip message to peers Gossip(gossip []byte) error @@ -59,45 +60,34 @@ func NewNetworkClient(network Network) NetworkClient { // node version greater than or equal to minVersion. // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if // the request should be retried. -func (c *client) SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { +func (c *client) SendAppRequestAny(ctx context.Context, minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { waitingHandler := newWaitingResponseHandler() - nodeID, err := c.network.SendAppRequestAny(minVersion, request, waitingHandler) + nodeID, err := c.network.SendAppRequestAny(ctx, minVersion, request, waitingHandler) if err != nil { return nil, nodeID, err } - response := <-waitingHandler.responseChan - if waitingHandler.failed { - return nil, nodeID, ErrRequestFailed - } - return response, nodeID, nil + response, err := waitingHandler.WaitForResult(ctx) + return response, nodeID, err } // SendAppRequest synchronously sends request to the specified nodeID // Returns response bytes and ErrRequestFailed if the request should be retried. -func (c *client) SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) { +func (c *client) SendAppRequest(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { waitingHandler := newWaitingResponseHandler() - if err := c.network.SendAppRequest(nodeID, request, waitingHandler); err != nil { + if err := c.network.SendAppRequest(ctx, nodeID, request, waitingHandler); err != nil { return nil, err } - response := <-waitingHandler.responseChan - if waitingHandler.failed { - return nil, ErrRequestFailed - } - return response, nil + return waitingHandler.WaitForResult(ctx) } // SendCrossChainRequest synchronously sends request to the specified chainID // Returns response bytes and ErrRequestFailed if the request should be retried. -func (c *client) SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) { +func (c *client) SendCrossChainRequest(ctx context.Context, chainID ids.ID, request []byte) ([]byte, error) { waitingHandler := newWaitingResponseHandler() - if err := c.network.SendCrossChainRequest(chainID, request, waitingHandler); err != nil { + if err := c.network.SendCrossChainRequest(ctx, chainID, request, waitingHandler); err != nil { return nil, err } - response := <-waitingHandler.responseChan - if waitingHandler.failed { - return nil, ErrRequestFailed - } - return response, nil + return waitingHandler.WaitForResult(ctx) } func (c *client) Gossip(gossip []byte) error { diff --git a/coreth/peer/network.go b/coreth/peer/network.go index 4a88da28..1def7f8b 100644 --- a/coreth/peer/network.go +++ b/coreth/peer/network.go @@ -16,8 +16,10 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -44,16 +46,16 @@ type Network interface { // node version greater than or equal to minVersion. // Returns the ID of the chosen peer, and an error if the request could not // be sent to a peer with the desired [minVersion]. - SendAppRequestAny(minVersion *version.Application, message []byte, handler message.ResponseHandler) (ids.NodeID, error) + SendAppRequestAny(ctx context.Context, minVersion *version.Application, message []byte, handler message.ResponseHandler) (ids.NodeID, error) // SendAppRequest sends message to given nodeID, notifying handler when there's a response or timeout - SendAppRequest(nodeID ids.NodeID, message []byte, handler message.ResponseHandler) error + SendAppRequest(ctx context.Context, nodeID ids.NodeID, message []byte, handler message.ResponseHandler) error // Gossip sends given gossip message to peers Gossip(gossip []byte) error // SendCrossChainRequest sends a message to given chainID notifying handler when there's a response or timeout - SendCrossChainRequest(chainID ids.ID, message []byte, handler message.ResponseHandler) error + SendCrossChainRequest(ctx context.Context, chainID ids.ID, message []byte, handler message.ResponseHandler) error // Shutdown stops all peer channel listeners and marks the node to have stopped // n.Start() can be called again but the peers will have to be reconnected @@ -75,6 +77,11 @@ type Network interface { // TrackBandwidth should be called for each valid request with the bandwidth // (length of response divided by request time), and with 0 if the response is invalid. TrackBandwidth(nodeID ids.NodeID, bandwidth float64) + + // NewClient returns a client to send messages with for the given protocol + NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client + // AddHandler registers a server handler for an application protocol + AddHandler(protocol uint64, handler p2p.Handler) error } // network is an implementation of Network that processes message requests for @@ -86,18 +93,29 @@ type network struct { outstandingRequestHandlers map[uint32]message.ResponseHandler // maps avalanchego requestID => message.ResponseHandler activeAppRequests *semaphore.Weighted // controls maximum number of active outbound requests activeCrossChainRequests *semaphore.Weighted // controls maximum number of active outbound cross chain requests - appSender common.AppSender // avalanchego AppSender for sending messages - codec codec.Manager // Codec used for parsing messages - crossChainCodec codec.Manager // Codec used for parsing cross chain messages - appRequestHandler message.RequestHandler // maps request type => handler - crossChainRequestHandler message.CrossChainRequestHandler // maps cross chain request type => handler - gossipHandler message.GossipHandler // maps gossip type => handler - peers *peerTracker // tracking of peers & bandwidth - appStats stats.RequestHandlerStats // Provide request handler metrics - crossChainStats stats.RequestHandlerStats // Provide cross chain request handler metrics + p2pNetwork *p2p.Network + appSender common.AppSender // avalanchego AppSender for sending messages + codec codec.Manager // Codec used for parsing messages + crossChainCodec codec.Manager // Codec used for parsing cross chain messages + appRequestHandler message.RequestHandler // maps request type => handler + crossChainRequestHandler message.CrossChainRequestHandler // maps cross chain request type => handler + gossipHandler message.GossipHandler // maps gossip type => handler + peers *peerTracker // tracking of peers & bandwidth + appStats stats.RequestHandlerStats // Provide request handler metrics + crossChainStats stats.RequestHandlerStats // Provide cross chain request handler metrics + + // Set to true when Shutdown is called, after which all operations on this + // struct are no-ops. + // + // Invariant: Even though `closed` is an atomic, `lock` is required to be + // held when sending requests to guarantee that the network isn't closed + // during these calls. This is because closing the network cancels all + // outstanding requests, which means we must guarantee never to register a + // request that will never be fulfilled or cancelled. + closed utils.Atomic[bool] } -func NewNetwork(appSender common.AppSender, codec codec.Manager, crossChainCodec codec.Manager, self ids.NodeID, maxActiveAppRequests int64, maxActiveCrossChainRequests int64) Network { +func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec.Manager, crossChainCodec codec.Manager, self ids.NodeID, maxActiveAppRequests int64, maxActiveCrossChainRequests int64) Network { return &network{ appSender: appSender, codec: codec, @@ -106,6 +124,7 @@ func NewNetwork(appSender common.AppSender, codec codec.Manager, crossChainCodec outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), activeCrossChainRequests: semaphore.NewWeighted(maxActiveCrossChainRequests), + p2pNetwork: p2pNetwork, gossipHandler: message.NoopMempoolGossipHandler{}, appRequestHandler: message.NoopRequestHandler{}, crossChainRequestHandler: message.NoopCrossChainRequestHandler{}, @@ -120,16 +139,16 @@ func NewNetwork(appSender common.AppSender, codec codec.Manager, crossChainCodec // the request will be sent to any peer regardless of their version. // Returns the ID of the chosen peer, and an error if the request could not // be sent to a peer with the desired [minVersion]. -func (n *network) SendAppRequestAny(minVersion *version.Application, request []byte, handler message.ResponseHandler) (ids.NodeID, error) { +func (n *network) SendAppRequestAny(ctx context.Context, minVersion *version.Application, request []byte, handler message.ResponseHandler) (ids.NodeID, error) { // Take a slot from total [activeAppRequests] and block until a slot becomes available. - if err := n.activeAppRequests.Acquire(context.Background(), 1); err != nil { + if err := n.activeAppRequests.Acquire(ctx, 1); err != nil { return ids.EmptyNodeID, errAcquiringSemaphore } n.lock.Lock() defer n.lock.Unlock() if nodeID, ok := n.peers.GetAnyPeer(minVersion); ok { - return nodeID, n.sendAppRequest(nodeID, request, handler) + return nodeID, n.sendAppRequest(ctx, nodeID, request, handler) } n.activeAppRequests.Release(1) @@ -137,20 +156,20 @@ func (n *network) SendAppRequestAny(minVersion *version.Application, request []b } // SendAppRequest sends request message bytes to specified nodeID, notifying the responseHandler on response or failure -func (n *network) SendAppRequest(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { +func (n *network) SendAppRequest(ctx context.Context, nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { if nodeID == ids.EmptyNodeID { return fmt.Errorf("cannot send request to empty nodeID, nodeID=%s, requestLen=%d", nodeID, len(request)) } // Take a slot from total [activeAppRequests] and block until a slot becomes available. - if err := n.activeAppRequests.Acquire(context.Background(), 1); err != nil { + if err := n.activeAppRequests.Acquire(ctx, 1); err != nil { return errAcquiringSemaphore } n.lock.Lock() defer n.lock.Unlock() - return n.sendAppRequest(nodeID, request, responseHandler) + return n.sendAppRequest(ctx, nodeID, request, responseHandler) } // sendAppRequest sends request message bytes to specified nodeID and adds [responseHandler] to [outstandingRequestHandlers] @@ -159,14 +178,16 @@ func (n *network) SendAppRequest(nodeID ids.NodeID, request []byte, responseHand // Releases active requests semaphore if there was an error in sending the request // Returns an error if [appSender] is unable to make the request. // Assumes write lock is held -func (n *network) sendAppRequest(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { +func (n *network) sendAppRequest(ctx context.Context, nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { + if n.closed.Get() { + n.activeAppRequests.Release(1) + return nil + } + log.Debug("sending request to peer", "nodeID", nodeID, "requestLen", len(request)) n.peers.TrackPeer(nodeID) - // generate requestID - requestID := n.requestIDGen - n.requestIDGen++ - + requestID := n.nextRequestID() n.outstandingRequestHandlers[requestID] = responseHandler nodeIDs := set.NewSet[ids.NodeID](1) @@ -174,7 +195,7 @@ func (n *network) sendAppRequest(nodeID ids.NodeID, request []byte, responseHand // Send app request to [nodeID]. // On failure, release the slot from [activeAppRequests] and delete request from [outstandingRequestHandlers] - if err := n.appSender.SendAppRequest(context.TODO(), nodeIDs, requestID, request); err != nil { + if err := n.appSender.SendAppRequest(ctx, nodeIDs, requestID, request); err != nil { n.activeAppRequests.Release(1) delete(n.outstandingRequestHandlers, requestID) return err @@ -187,24 +208,26 @@ func (n *network) sendAppRequest(nodeID ids.NodeID, request []byte, responseHand // SendCrossChainRequest sends request message bytes to specified chainID and adds [handler] to [outstandingRequestHandlers] // so that it can be invoked when the network receives either a response or failure message. // Returns an error if [appSender] is unable to make the request. -func (n *network) SendCrossChainRequest(chainID ids.ID, request []byte, handler message.ResponseHandler) error { +func (n *network) SendCrossChainRequest(ctx context.Context, chainID ids.ID, request []byte, handler message.ResponseHandler) error { // Take a slot from total [activeCrossChainRequests] and block until a slot becomes available. - if err := n.activeCrossChainRequests.Acquire(context.Background(), 1); err != nil { + if err := n.activeCrossChainRequests.Acquire(ctx, 1); err != nil { return errAcquiringSemaphore } n.lock.Lock() defer n.lock.Unlock() - // generate requestID - requestID := n.requestIDGen - n.requestIDGen++ + if n.closed.Get() { + n.activeCrossChainRequests.Release(1) + return nil + } + requestID := n.nextRequestID() n.outstandingRequestHandlers[requestID] = handler // Send cross chain request to [chainID]. // On failure, release the slot from [activeCrossChainRequests] and delete request from [outstandingRequestHandlers]. - if err := n.appSender.SendCrossChainAppRequest(context.TODO(), chainID, requestID, request); err != nil { + if err := n.appSender.SendCrossChainAppRequest(ctx, chainID, requestID, request); err != nil { n.activeCrossChainRequests.Release(1) delete(n.outstandingRequestHandlers, requestID) return err @@ -218,6 +241,10 @@ func (n *network) SendCrossChainRequest(chainID ids.ID, request []byte, handler // Send a CrossChainAppResponse to [chainID] in response to a valid message using the same // [requestID] before the deadline. func (n *network) CrossChainAppRequest(ctx context.Context, requestingChainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + if n.closed.Get() { + return nil + } + log.Debug("received CrossChainAppRequest from chain", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request)) var req message.CrossChainRequest @@ -254,16 +281,13 @@ func (n *network) CrossChainAppRequest(ctx context.Context, requestingChainID id // - request times out before a response is provided // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. -func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChainID ids.ID, requestID uint32) error { - n.lock.Lock() - defer n.lock.Unlock() - +func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChainID ids.ID, requestID uint32, _ *common.AppError) error { log.Debug("received CrossChainAppRequestFailed from chain", "respondingChainID", respondingChainID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - log.Error("received CrossChainAppRequestFailed to unknown request", "respondingChainID", respondingChainID, "requestID", requestID) + // Can happen after the network has been closed. + log.Debug("received CrossChainAppRequestFailed to unknown request", "respondingChainID", respondingChainID, "requestID", requestID) return nil } @@ -278,15 +302,12 @@ func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChai // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. func (n *network) CrossChainAppResponse(ctx context.Context, respondingChainID ids.ID, requestID uint32, response []byte) error { - n.lock.Lock() - defer n.lock.Unlock() - log.Debug("received CrossChainAppResponse from responding chain", "respondingChainID", respondingChainID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - log.Error("received CrossChainAppResponse to unknown request", "respondingChainID", respondingChainID, "requestID", requestID, "responseLen", len(response)) + // Can happen after the network has been closed. + log.Debug("received CrossChainAppResponse to unknown request", "respondingChainID", respondingChainID, "requestID", requestID, "responseLen", len(response)) return nil } @@ -302,12 +323,16 @@ func (n *network) CrossChainAppResponse(ctx context.Context, respondingChainID i // sends a response back to the sender if length of response returned by the handler is >0 // expects the deadline to not have been passed func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + if n.closed.Get() { + return nil + } + log.Debug("received AppRequest from node", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request)) var req message.Request if _, err := n.codec.Unmarshal(request, &req); err != nil { - log.Debug("failed to unmarshal app request", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request), "err", err) - return nil + log.Debug("forwarding AppRequest to SDK network", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request), "err", err) + return n.p2pNetwork.AppRequest(ctx, nodeID, requestID, deadline, request) } bufferedDeadline, err := calculateTimeUntilDeadline(deadline, n.appStats) @@ -337,17 +362,13 @@ func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u // Error returned by this function is expected to be treated as fatal by the engine // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. -func (n *network) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - n.lock.Lock() - defer n.lock.Unlock() - +func (n *network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { log.Debug("received AppResponse from peer", "nodeID", nodeID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - log.Error("received AppResponse to unknown request", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) - return nil + log.Debug("forwarding AppResponse to SDK network", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) + return n.p2pNetwork.AppResponse(ctx, nodeID, requestID, response) } // We must release the slot @@ -362,17 +383,13 @@ func (n *network) AppResponse(_ context.Context, nodeID ids.NodeID, requestID ui // - request times out before a response is provided // error returned by this function is expected to be treated as fatal by the engine // returns error only when the response handler returns an error -func (n *network) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { - n.lock.Lock() - defer n.lock.Unlock() - +func (n *network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { log.Debug("received AppRequestFailed from peer", "nodeID", nodeID, "requestID", requestID) handler, exists := n.markRequestFulfilled(requestID) if !exists { - // Should never happen since the engine should be managing outstanding requests - log.Error("received AppRequestFailed to unknown request", "nodeID", nodeID, "requestID", requestID) - return nil + log.Debug("forwarding AppRequestFailed to SDK network", "nodeID", nodeID, "requestID", requestID) + return n.p2pNetwork.AppRequestFailed(ctx, nodeID, requestID, appErr) } // We must release the slot @@ -405,8 +422,11 @@ func calculateTimeUntilDeadline(deadline time.Time, stats stats.RequestHandlerSt // markRequestFulfilled fetches the handler for [requestID] and marks the request with [requestID] as having been fulfilled. // This is called by either [AppResponse] or [AppRequestFailed]. -// Assumes that the write lock is held. +// Assumes that the write lock is not held. func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandler, bool) { + n.lock.Lock() + defer n.lock.Unlock() + handler, exists := n.outstandingRequestHandlers[requestID] if !exists { return nil, false @@ -419,17 +439,21 @@ func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandle // Gossip sends given gossip message to peers func (n *network) Gossip(gossip []byte) error { + if n.closed.Get() { + return nil + } + return n.appSender.SendAppGossip(context.TODO(), gossip) } -// AppGossip is called by avalanchego -> VM when there is an incoming AppGossip from a peer -// error returned by this function is expected to be treated as fatal by the engine -// returns error if request could not be parsed as message.Request or when the requestHandler returns an error -func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) error { +// AppGossip is called by avalanchego -> VM when there is an incoming AppGossip +// from a peer. An error returned by this function is treated as fatal by the +// engine. +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { var gossipMsg message.GossipMessage if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { - log.Debug("could not parse app gossip", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) - return nil + log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) + return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } log.Debug("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) @@ -437,29 +461,37 @@ func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes [] } // Connected adds the given nodeID to the peer list so that it can receive messages -func (n *network) Connected(_ context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { +func (n *network) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { log.Debug("adding new peer", "nodeID", nodeID) n.lock.Lock() defer n.lock.Unlock() + if n.closed.Get() { + return nil + } + if nodeID == n.self { log.Debug("skipping registering self as peer") return nil } n.peers.Connected(nodeID, nodeVersion) - return nil + return n.p2pNetwork.Connected(ctx, nodeID, nodeVersion) } // Disconnected removes given [nodeID] from the peer list -func (n *network) Disconnected(_ context.Context, nodeID ids.NodeID) error { +func (n *network) Disconnected(ctx context.Context, nodeID ids.NodeID) error { log.Debug("disconnecting peer", "nodeID", nodeID) n.lock.Lock() defer n.lock.Unlock() + if n.closed.Get() { + return nil + } + n.peers.Disconnected(nodeID) - return nil + return n.p2pNetwork.Disconnected(ctx, nodeID) } // Shutdown disconnects all peers @@ -468,12 +500,13 @@ func (n *network) Shutdown() { defer n.lock.Unlock() // clean up any pending requests - for requestID := range n.outstandingRequestHandlers { + for requestID, handler := range n.outstandingRequestHandlers { + _ = handler.OnFailure() // make sure all waiting threads are unblocked delete(n.outstandingRequestHandlers, requestID) } - // reset peers - n.peers = NewPeerTracker() + n.peers = NewPeerTracker() // reset peers + n.closed.Set(true) // mark network as closed } func (n *network) SetGossipHandler(handler message.GossipHandler) { @@ -510,3 +543,23 @@ func (n *network) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { n.peers.TrackBandwidth(nodeID, bandwidth) } + +func (n *network) NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client { + return n.p2pNetwork.NewClient(protocol, options...) +} + +func (n *network) AddHandler(protocol uint64, handler p2p.Handler) error { + return n.p2pNetwork.AddHandler(protocol, handler) +} + +// invariant: peer/network must use explicitly even request ids. +// for this reason, [n.requestID] is initialized as zero and incremented by 2. +// This is for backwards-compatibility while the SDK router exists with the +// legacy coreth handlers to avoid a (very) narrow edge case where request ids +// can overlap, resulting in a dropped timeout. +func (n *network) nextRequestID() uint32 { + next := n.requestIDGen + n.requestIDGen += 2 + + return next +} diff --git a/coreth/peer/network_test.go b/coreth/peer/network_test.go index 3c173115..d04e9420 100644 --- a/coreth/peer/network_test.go +++ b/coreth/peer/network_test.go @@ -12,13 +12,16 @@ import ( "testing" "time" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -48,11 +51,15 @@ var ( _ message.CrossChainRequest = &ExampleCrossChainRequest{} _ message.CrossChainRequestHandler = &testCrossChainHandler{} + + _ p2p.Handler = &testSDKHandler{} ) func TestNetworkDoesNotConnectToItself(t *testing.T) { selfNodeID := ids.GenerateTestNodeID() - n := NewNetwork(nil, nil, nil, selfNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + n := NewNetwork(p2pNetwork, nil, nil, nil, selfNodeID, 1, 1) assert.NoError(t, n.Connected(context.Background(), selfNodeID, defaultPeerVersion)) assert.EqualValues(t, 0, n.Size()) } @@ -62,7 +69,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { senderWg := &sync.WaitGroup{} var net Network sender := testAppSender{ - sendAppRequestFn: func(nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { nodeID, _ := nodes.Pop() senderWg.Add(1) go func() { @@ -88,7 +95,9 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) nodeID := ids.GenerateTestNodeID() @@ -110,7 +119,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { defer wg.Done() requestBytes, err := message.RequestToBytes(codecManager, requestMessage) assert.NoError(t, err) - responseBytes, _, err := client.SendAppRequestAny(defaultPeerVersion, requestBytes) + responseBytes, _, err := client.SendAppRequestAny(context.Background(), defaultPeerVersion, requestBytes) assert.NoError(t, err) assert.NotNil(t, responseBytes) @@ -127,6 +136,37 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) } +func TestAppRequestOnCtxCancellation(t *testing.T) { + codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + + sender := testAppSender{ + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + return nil + }, + sendAppResponseFn: func(nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + return nil + }, + } + + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) + + requestMessage := HelloRequest{Message: "this is a request"} + requestBytes, err := message.RequestToBytes(codecManager, requestMessage) + assert.NoError(t, err) + + nodeID := ids.GenerateTestNodeID() + ctx, cancel := context.WithCancel(context.Background()) + // cancel context prior to sending + cancel() + client := NewNetworkClient(net) + _, err = client.SendAppRequest(ctx, nodeID, requestBytes) + assert.ErrorIs(t, err, context.Canceled) +} + func TestRequestRequestsRoutingAndResponse(t *testing.T) { callNum := uint32(0) senderWg := &sync.WaitGroup{} @@ -134,7 +174,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { var lock sync.Mutex contactedNodes := make(map[ids.NodeID]struct{}) sender := testAppSender{ - sendAppRequestFn: func(nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { nodeID, _ := nodes.Pop() lock.Lock() contactedNodes[nodeID] = struct{}{} @@ -163,7 +203,9 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) @@ -195,7 +237,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { defer wg.Done() requestBytes, err := message.RequestToBytes(codecManager, requestMessage) assert.NoError(t, err) - responseBytes, err := client.SendAppRequest(nodeID, requestBytes) + responseBytes, err := client.SendAppRequest(context.Background(), nodeID, requestBytes) assert.NoError(t, err) assert.NotNil(t, responseBytes) @@ -217,11 +259,134 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { } // ensure empty nodeID is not allowed - _, err := client.SendAppRequest(ids.EmptyNodeID, []byte("hello there")) + _, err = client.SendAppRequest(context.Background(), ids.EmptyNodeID, []byte("hello there")) assert.Error(t, err) assert.Contains(t, err.Error(), "cannot send request to empty nodeID") } +func TestAppRequestOnShutdown(t *testing.T) { + var ( + net Network + wg sync.WaitGroup + called bool + ) + sender := testAppSender{ + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + wg.Add(1) + go func() { + called = true + // shutdown the network here to ensure any outstanding requests are handled as failed + net.Shutdown() + wg.Done() + }() // this is on a goroutine to avoid a deadlock since calling Shutdown takes the lock. + return nil + }, + } + + codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + client := NewNetworkClient(net) + nodeID := ids.GenerateTestNodeID() + require.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) + + requestMessage := HelloRequest{Message: "this is a request"} + require.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) + + wg.Add(1) + go func() { + defer wg.Done() + requestBytes, err := message.RequestToBytes(codecManager, requestMessage) + require.NoError(t, err) + responseBytes, _, err := client.SendAppRequestAny(context.Background(), defaultPeerVersion, requestBytes) + require.Error(t, err, ErrRequestFailed) + require.Nil(t, responseBytes) + }() + wg.Wait() + require.True(t, called) +} + +func TestAppRequestAnyOnCtxCancellation(t *testing.T) { + codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + + type reqInfo struct { + nodeID ids.NodeID + requestID uint32 + } + sentAppRequest := make(chan reqInfo, 1) + + sender := testAppSender{ + sendAppRequestFn: func(ctx context.Context, nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + if err := ctx.Err(); err != nil { + return err + } + + assert.Len(t, nodes, 1) + sentAppRequest <- reqInfo{ + nodeID: nodes.List()[0], + requestID: requestID, + } + return nil + }, + sendAppResponseFn: func(nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + return nil + }, + } + + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) + assert.NoError(t, + net.Connected( + context.Background(), + ids.GenerateTestNodeID(), + version.CurrentApp, + ), + ) + + requestMessage := HelloRequest{Message: "this is a request"} + requestBytes, err := message.RequestToBytes(codecManager, requestMessage) + assert.NoError(t, err) + + // cancel context prior to sending + ctx, cancel := context.WithCancel(context.Background()) + cancel() + client := NewNetworkClient(net) + _, _, err = client.SendAppRequestAny(ctx, defaultPeerVersion, requestBytes) + assert.ErrorIs(t, err, context.Canceled) + // Assert we didn't send anything + select { + case <-sentAppRequest: + assert.FailNow(t, "should not have sent request") + default: + } + + // Cancel context after sending + assert.Empty(t, net.(*network).outstandingRequestHandlers) // no outstanding requests + ctx, cancel = context.WithCancel(context.Background()) + doneChan := make(chan struct{}) + go func() { + _, _, err = client.SendAppRequestAny(ctx, defaultPeerVersion, requestBytes) + assert.ErrorIs(t, err, context.Canceled) + close(doneChan) + }() + // Wait until we've "sent" the app request over the network + // before cancelling context. + sentAppRequestInfo := <-sentAppRequest + assert.Len(t, net.(*network).outstandingRequestHandlers, 1) + cancel() + <-doneChan + // Should still be able to process a response after cancelling. + assert.Len(t, net.(*network).outstandingRequestHandlers, 1) // context cancellation SendAppRequestAny failure doesn't clear + err = net.AppResponse(context.Background(), sentAppRequestInfo.nodeID, sentAppRequestInfo.requestID, []byte{}) + assert.NoError(t, err) + assert.Empty(t, net.(*network).outstandingRequestHandlers) // Received response +} + func TestRequestMinVersion(t *testing.T) { callNum := uint32(0) nodeID := ids.GenerateTestNodeID() @@ -230,7 +395,7 @@ func TestRequestMinVersion(t *testing.T) { var net Network sender := testAppSender{ - sendAppRequestFn: func(nodes set.Set[ids.NodeID], reqID uint32, messageBytes []byte) error { + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], reqID uint32, messageBytes []byte) error { atomic.AddUint32(&callNum, 1) assert.True(t, nodes.Contains(nodeID), "request nodes should contain expected nodeID") assert.Len(t, nodes, 1, "request nodes should contain exactly one node") @@ -250,7 +415,9 @@ func TestRequestMinVersion(t *testing.T) { } // passing nil as codec works because the net.AppRequest is never called - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 16) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 16) client := NewNetworkClient(net) requestMessage := TestMessage{Message: "this is a request"} requestBytes, err := message.RequestToBytes(codecManager, requestMessage) @@ -260,6 +427,7 @@ func TestRequestMinVersion(t *testing.T) { context.Background(), nodeID, &version.Application{ + Name: version.Client, Major: 1, Minor: 7, Patch: 1, @@ -269,18 +437,20 @@ func TestRequestMinVersion(t *testing.T) { // ensure version does not match responseBytes, _, err := client.SendAppRequestAny( + context.Background(), &version.Application{ + Name: version.Client, Major: 2, Minor: 0, Patch: 0, }, requestBytes, ) - assert.Equal(t, err.Error(), "no peers found matching version avalanche/2.0.0 out of 1 peers") + assert.Equal(t, err.Error(), "no peers found matching version avalanchego/2.0.0 out of 1 peers") assert.Nil(t, responseBytes) // ensure version matches and the request goes through - responseBytes, _, err = client.SendAppRequestAny(defaultPeerVersion, requestBytes) + responseBytes, _, err = client.SendAppRequestAny(context.Background(), defaultPeerVersion, requestBytes) assert.NoError(t, err) var response TestMessage @@ -294,7 +464,7 @@ func TestOnRequestHonoursDeadline(t *testing.T) { var net Network responded := false sender := testAppSender{ - sendAppRequestFn: func(nodes set.Set[ids.NodeID], reqID uint32, message []byte) error { + sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], reqID uint32, message []byte) error { return nil }, sendAppResponseFn: func(nodeID ids.NodeID, reqID uint32, message []byte) error { @@ -313,7 +483,9 @@ func TestOnRequestHonoursDeadline(t *testing.T) { processingDuration: 500 * time.Millisecond, } - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetRequestHandler(requestHandler) nodeID := ids.GenerateTestNodeID() @@ -353,7 +525,9 @@ func TestGossip(t *testing.T) { } gossipHandler := &testGossipHandler{} - clientNetwork = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(gossipHandler) assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -380,7 +554,9 @@ func TestHandleInvalidMessages(t *testing.T) { requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) @@ -414,12 +590,11 @@ func TestHandleInvalidMessages(t *testing.T) { assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), garbageResponse)) assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), emptyResponse)) assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), nilResponse)) - assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, gossipMsg)) - assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, requestMessage)) - assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, garbageResponse)) - assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, emptyResponse)) - assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, nilResponse)) - assert.NoError(t, clientNetwork.AppRequestFailed(context.Background(), nodeID, requestID)) + assert.ErrorIs(t, p2p.ErrUnrequestedResponse, clientNetwork.AppResponse(context.Background(), nodeID, requestID, gossipMsg)) + assert.ErrorIs(t, p2p.ErrUnrequestedResponse, clientNetwork.AppResponse(context.Background(), nodeID, requestID, requestMessage)) + assert.ErrorIs(t, p2p.ErrUnrequestedResponse, clientNetwork.AppResponse(context.Background(), nodeID, requestID, garbageResponse)) + assert.ErrorIs(t, p2p.ErrUnrequestedResponse, clientNetwork.AppResponse(context.Background(), nodeID, requestID, emptyResponse)) + assert.ErrorIs(t, p2p.ErrUnrequestedResponse, clientNetwork.AppResponse(context.Background(), nodeID, requestID, nilResponse)) } func TestNetworkPropagatesRequestHandlerError(t *testing.T) { @@ -430,7 +605,9 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler @@ -470,7 +647,9 @@ func TestCrossChainAppRequest(t *testing.T) { }, } - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) client := NewNetworkClient(net) @@ -482,7 +661,7 @@ func TestCrossChainAppRequest(t *testing.T) { assert.NoError(t, err) chainID := ids.ID(ethcommon.BytesToHash([]byte{1, 2, 3, 4, 5})) - responseBytes, err := client.SendCrossChainRequest(chainID, crossChainRequest) + responseBytes, err := client.SendCrossChainRequest(context.Background(), chainID, crossChainRequest) assert.NoError(t, err) var response ExampleCrossChainResponse @@ -492,6 +671,40 @@ func TestCrossChainAppRequest(t *testing.T) { assert.Equal(t, "this is an example response", response.Response) } +func TestCrossChainAppRequestOnCtxCancellation(t *testing.T) { + codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + + sender := testAppSender{ + sendCrossChainAppRequestFn: func(requestingChainID ids.ID, requestID uint32, requestBytes []byte) error { + return nil + }, + sendCrossChainAppResponseFn: func(respondingChainID ids.ID, requestID uint32, responseBytes []byte) error { + return nil + }, + } + + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net := NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) + + exampleCrossChainRequest := ExampleCrossChainRequest{ + Message: "hello this is an example request", + } + + crossChainRequest, err := buildCrossChainRequest(crossChainCodecManager, exampleCrossChainRequest) + assert.NoError(t, err) + + chainID := ids.ID(ethcommon.BytesToHash([]byte{1, 2, 3, 4, 5})) + ctx, cancel := context.WithCancel(context.Background()) + // cancel context prior to sending + cancel() + client := NewNetworkClient(net) + _, err = client.SendCrossChainRequest(ctx, chainID, crossChainRequest) + assert.ErrorIs(t, err, context.Canceled) +} + func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { var ( callNum uint32 @@ -525,7 +738,9 @@ func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { codecManager := buildCodec(t, TestMessage{}) crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) - net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) client := NewNetworkClient(net) @@ -548,7 +763,7 @@ func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { defer requestWg.Done() crossChainRequest, err := buildCrossChainRequest(crossChainCodecManager, exampleCrossChainRequest) assert.NoError(t, err) - responseBytes, err := client.SendCrossChainRequest(chainID, crossChainRequest) + responseBytes, err := client.SendCrossChainRequest(context.Background(), chainID, crossChainRequest) assert.NoError(t, err) assert.NotNil(t, responseBytes) @@ -565,9 +780,114 @@ func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) } +func TestCrossChainRequestOnShutdown(t *testing.T) { + var ( + net Network + wg sync.WaitGroup + called bool + ) + sender := testAppSender{ + sendCrossChainAppRequestFn: func(requestingChainID ids.ID, requestID uint32, requestBytes []byte) error { + wg.Add(1) + go func() { + called = true + // shutdown the network here to ensure any outstanding requests are handled as failed + net.Shutdown() + wg.Done() + }() // this is on a goroutine to avoid a deadlock since calling Shutdown takes the lock. + return nil + }, + } + codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") + require.NoError(t, err) + net = NewNetwork(p2pNetwork, sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + client := NewNetworkClient(net) + + exampleCrossChainRequest := ExampleCrossChainRequest{ + Message: "hello this is an example request", + } + chainID := ids.ID(ethcommon.BytesToHash([]byte{1, 2, 3, 4, 5})) + + wg.Add(1) + go func() { + defer wg.Done() + crossChainRequest, err := buildCrossChainRequest(crossChainCodecManager, exampleCrossChainRequest) + require.NoError(t, err) + responseBytes, err := client.SendCrossChainRequest(context.Background(), chainID, crossChainRequest) + require.ErrorIs(t, err, ErrRequestFailed) + require.Nil(t, responseBytes) + }() + wg.Wait() + require.True(t, called) +} + +func TestNetworkAppRequestAfterShutdown(t *testing.T) { + require := require.New(t) + + net := NewNetwork(nil, nil, nil, nil, ids.EmptyNodeID, 1, 0) + net.Shutdown() + + require.NoError(net.SendAppRequest(context.Background(), ids.GenerateTestNodeID(), nil, nil)) + require.NoError(net.SendAppRequest(context.Background(), ids.GenerateTestNodeID(), nil, nil)) +} + +func TestNetworkCrossChainAppRequestAfterShutdown(t *testing.T) { + require := require.New(t) + + net := NewNetwork(nil, nil, nil, nil, ids.EmptyNodeID, 0, 1) + net.Shutdown() + + require.NoError(net.SendCrossChainRequest(context.Background(), ids.GenerateTestID(), nil, nil)) + require.NoError(net.SendCrossChainRequest(context.Background(), ids.GenerateTestID(), nil, nil)) +} + +func TestNetworkRouting(t *testing.T) { + require := require.New(t) + sender := &testAppSender{ + sendAppRequestFn: func(_ context.Context, s set.Set[ids.NodeID], u uint32, bytes []byte) error { + return nil + }, + sendAppResponseFn: func(id ids.NodeID, u uint32, bytes []byte) error { + return nil + }, + } + protocol := 0 + handler := &testSDKHandler{} + p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + require.NoError(p2pNetwork.AddHandler(uint64(protocol), handler)) + + networkCodec := codec.NewManager(0) + crossChainCodec := codec.NewManager(0) + + network := NewNetwork( + p2pNetwork, + nil, + networkCodec, + crossChainCodec, + ids.EmptyNodeID, + 1, + 1, + ) + + nodeID := ids.GenerateTestNodeID() + foobar := append([]byte{byte(protocol)}, []byte("foobar")...) + err = network.AppRequest(context.Background(), nodeID, 0, time.Time{}, foobar) + require.NoError(err) + require.True(handler.appRequested) + + err = network.AppResponse(context.Background(), ids.GenerateTestNodeID(), 0, foobar) + require.ErrorIs(err, p2p.ErrUnrequestedResponse) + + err = network.AppRequestFailed(context.Background(), nodeID, 0, common.ErrTimeout) + require.ErrorIs(err, p2p.ErrUnrequestedResponse) +} + func buildCodec(t *testing.T, types ...interface{}) codec.Manager { codecManager := codec.NewDefaultManager() - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) for _, typ := range types { assert.NoError(t, c.RegisterType(typ)) } @@ -592,7 +912,7 @@ func buildCrossChainRequest(codec codec.Manager, msg message.CrossChainRequest) type testAppSender struct { sendCrossChainAppRequestFn func(ids.ID, uint32, []byte) error sendCrossChainAppResponseFn func(ids.ID, uint32, []byte) error - sendAppRequestFn func(set.Set[ids.NodeID], uint32, []byte) error + sendAppRequestFn func(context.Context, set.Set[ids.NodeID], uint32, []byte) error sendAppResponseFn func(ids.NodeID, uint32, []byte) error sendAppGossipFn func([]byte) error } @@ -609,8 +929,8 @@ func (t testAppSender) SendAppGossipSpecific(context.Context, set.Set[ids.NodeID panic("not implemented") } -func (t testAppSender) SendAppRequest(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, message []byte) error { - return t.sendAppRequestFn(nodeIDs, requestID, message) +func (t testAppSender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, message []byte) error { + return t.sendAppRequestFn(ctx, nodeIDs, requestID, message) } func (t testAppSender) SendAppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, message []byte) error { @@ -766,3 +1086,22 @@ type testCrossChainHandler struct { func (t *testCrossChainHandler) HandleCrossChainRequest(ctx context.Context, requestingChainID ids.ID, requestID uint32, exampleRequest message.CrossChainRequest) ([]byte, error) { return t.codec.Marshal(message.Version, ExampleCrossChainResponse{Response: "this is an example response"}) } + +type testSDKHandler struct { + appRequested bool +} + +func (t *testSDKHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + // TODO implement me + panic("implement me") +} + +func (t *testSDKHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + t.appRequested = true + return nil, nil +} + +func (t *testSDKHandler) CrossChainAppRequest(ctx context.Context, chainID ids.ID, deadline time.Time, requestBytes []byte) ([]byte, error) { + // TODO implement me + panic("implement me") +} diff --git a/coreth/peer/peer_tracker_test.go b/coreth/peer/peer_tracker_test.go new file mode 100644 index 00000000..f4a510fc --- /dev/null +++ b/coreth/peer/peer_tracker_test.go @@ -0,0 +1,88 @@ +// (c) 2019-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/stretchr/testify/require" +) + +func TestPeerTracker(t *testing.T) { + require := require.New(t) + p := NewPeerTracker() + + // Connect some peers + numExtraPeers := 10 + numPeers := desiredMinResponsivePeers + numExtraPeers + peerIDs := make([]ids.NodeID, numPeers) + + for i := range peerIDs { + peerIDs[i] = ids.GenerateTestNodeID() + p.Connected(peerIDs[i], defaultPeerVersion) + } + + responsivePeers := make(map[ids.NodeID]bool) + + // Expect requests to go to new peers until we have desiredMinResponsivePeers responsive peers. + for i := 0; i < desiredMinResponsivePeers+numExtraPeers/2; i++ { + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + _, exists := responsivePeers[peer] + require.Falsef(exists, "expected connecting to a new peer, but got the same peer twice: peer %s iteration %d", peer, i) + responsivePeers[peer] = true + + p.TrackPeer(peer) // mark the peer as having a message sent to it + } + + // Mark some peers as responsive and others as not responsive + i := 0 + for peer := range responsivePeers { + if i < desiredMinResponsivePeers { + p.TrackBandwidth(peer, 10) + } else { + responsivePeers[peer] = false // remember which peers were not responsive + p.TrackBandwidth(peer, 0) + } + i++ + } + + // Expect requests to go to responsive or new peers, so long as they are available + numRequests := 50 + for i := 0; i < numRequests; i++ { + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + responsive, ok := responsivePeers[peer] + if ok { + require.Truef(responsive, "expected connecting to a responsive peer, but got a peer that was not responsive: peer %s iteration %d", peer, i) + p.TrackBandwidth(peer, 10) + } else { + responsivePeers[peer] = false // remember that we connected to this peer + p.TrackPeer(peer) // mark the peer as having a message sent to it + p.TrackBandwidth(peer, 0) // mark the peer as non-responsive + } + } + + // Disconnect from peers that were previously responsive and ones we didn't connect to yet. + for _, peer := range peerIDs { + responsive, ok := responsivePeers[peer] + if ok && responsive || !ok { + p.Disconnected(peer) + } + } + + // Requests should fall back on non-responsive peers when no other choice is left + peer, ok := p.GetAnyPeer(nil) + require.True(ok) + require.NotNil(peer) + + responsive, ok := responsivePeers[peer] + require.True(ok) + require.Falsef(responsive, "expected connecting to a non-responsive peer, but got a peer that was responsive: peer %s", peer) +} diff --git a/coreth/peer/waiting_handler.go b/coreth/peer/waiting_handler.go index 64c209d1..cf625131 100644 --- a/coreth/peer/waiting_handler.go +++ b/coreth/peer/waiting_handler.go @@ -4,6 +4,8 @@ package peer import ( + "context" + "github.com/ava-labs/coreth/plugin/evm/message" ) @@ -18,6 +20,16 @@ type waitingResponseHandler struct { failed bool // whether the original request is failed } +// newWaitingResponseHandler returns new instance of the waitingResponseHandler +func newWaitingResponseHandler() *waitingResponseHandler { + return &waitingResponseHandler{ + // Make buffer length 1 so that OnResponse can complete + // even if no goroutine is waiting on the channel (i.e. + // the context of a request is cancelled.) + responseChan: make(chan []byte, 1), + } +} + // OnResponse passes the response bytes to the responseChan and closes the channel func (w *waitingResponseHandler) OnResponse(response []byte) error { w.responseChan <- response @@ -32,7 +44,14 @@ func (w *waitingResponseHandler) OnFailure() error { return nil } -// newWaitingResponseHandler returns new instance of the waitingResponseHandler -func newWaitingResponseHandler() *waitingResponseHandler { - return &waitingResponseHandler{responseChan: make(chan []byte)} +func (waitingHandler *waitingResponseHandler) WaitForResult(ctx context.Context) ([]byte, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case response := <-waitingHandler.responseChan: + if waitingHandler.failed { + return nil, ErrRequestFailed + } + return response, nil + } } diff --git a/coreth/plugin/evm/ExampleWarp.abi b/coreth/plugin/evm/ExampleWarp.abi new file mode 100644 index 00000000..9d4b442c --- /dev/null +++ b/coreth/plugin/evm/ExampleWarp.abi @@ -0,0 +1,105 @@ +[ + { + "inputs": [ + { + "internalType": "bytes", + "name": "payload", + "type": "bytes" + } + ], + "name": "sendWarpMessage", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "blockchainID", + "type": "bytes32" + } + ], + "name": "validateGetBlockchainID", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + } + ], + "name": "validateInvalidWarpBlockHash", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + } + ], + "name": "validateInvalidWarpMessage", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + }, + { + "internalType": "bytes32", + "name": "sourceChainID", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + } + ], + "name": "validateWarpBlockHash", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + }, + { + "internalType": "bytes32", + "name": "sourceChainID", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "originSenderAddress", + "type": "address" + }, + { + "internalType": "bytes", + "name": "payload", + "type": "bytes" + } + ], + "name": "validateWarpMessage", + "outputs": [], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/coreth/plugin/evm/ExampleWarp.bin b/coreth/plugin/evm/ExampleWarp.bin new file mode 100644 index 00000000..c5963ac7 --- /dev/null +++ b/coreth/plugin/evm/ExampleWarp.bin @@ -0,0 +1 @@ +60806040527302000000000000000000000000000000000000055f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550348015610062575f80fd5b50610d15806100705f395ff3fe608060405234801561000f575f80fd5b5060043610610060575f3560e01c806315f0c959146100645780635bd05f061461008057806377ca84db1461009c578063e519286f146100b8578063ee5b48eb146100d4578063f25ec06a146100f0575b5f80fd5b61007e60048036038101906100799190610658565b61010c565b005b61009a60048036038101906100959190610777565b6101a5565b005b6100b660048036038101906100b191906107fb565b6102cd565b005b6100d260048036038101906100cd9190610826565b61039a565b005b6100ee60048036038101906100e99190610876565b610464565b005b61010a600480360381019061010591906107fb565b6104ef565b005b5f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634213cf786040518163ffffffff1660e01b8152600401602060405180830381865afa158015610174573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061019891906108d5565b81146101a2575f80fd5b50565b5f805f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16636f825350886040518263ffffffff1660e01b81526004016101ff919061090f565b5f60405180830381865afa158015610219573d5f803e3d5ffd5b505050506040513d5f823e3d601f19601f820116820180604052508101906102419190610b48565b915091508061024e575f80fd5b85825f01511461025c575f80fd5b8473ffffffffffffffffffffffffffffffffffffffff16826020015173ffffffffffffffffffffffffffffffffffffffff1614610297575f80fd5b83836040516102a7929190610bde565b6040518091039020826040015180519060200120146102c4575f80fd5b50505050505050565b5f805f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663ce7f5929846040518263ffffffff1660e01b8152600401610327919061090f565b606060405180830381865afa158015610342573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103669190610c43565b915091508015610374575f80fd5b5f801b825f015114610384575f80fd5b5f801b826020015114610395575f80fd5b505050565b5f805f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663ce7f5929866040518263ffffffff1660e01b81526004016103f4919061090f565b606060405180830381865afa15801561040f573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104339190610c43565b9150915080610440575f80fd5b83825f01511461044e575f80fd5b8282602001511461045d575f80fd5b5050505050565b5f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663ee5b48eb83836040518363ffffffff1660e01b81526004016104be929190610cbd565b5f604051808303815f87803b1580156104d5575f80fd5b505af11580156104e7573d5f803e3d5ffd5b505050505050565b5f805f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16636f825350846040518263ffffffff1660e01b8152600401610549919061090f565b5f60405180830381865afa158015610563573d5f803e3d5ffd5b505050506040513d5f823e3d601f19601f8201168201806040525081019061058b9190610b48565b915091508015610599575f80fd5b5f801b825f0151146105a9575f80fd5b5f73ffffffffffffffffffffffffffffffffffffffff16826020015173ffffffffffffffffffffffffffffffffffffffff16146105e4575f80fd5b60405180602001604052805f815250805190602001208260400151805190602001201461060f575f80fd5b505050565b5f604051905090565b5f80fd5b5f80fd5b5f819050919050565b61063781610625565b8114610641575f80fd5b50565b5f813590506106528161062e565b92915050565b5f6020828403121561066d5761066c61061d565b5b5f61067a84828501610644565b91505092915050565b5f63ffffffff82169050919050565b61069b81610683565b81146106a5575f80fd5b50565b5f813590506106b681610692565b92915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6106e5826106bc565b9050919050565b6106f5816106db565b81146106ff575f80fd5b50565b5f81359050610710816106ec565b92915050565b5f80fd5b5f80fd5b5f80fd5b5f8083601f84011261073757610736610716565b5b8235905067ffffffffffffffff8111156107545761075361071a565b5b6020830191508360018202830111156107705761076f61071e565b5b9250929050565b5f805f805f608086880312156107905761078f61061d565b5b5f61079d888289016106a8565b95505060206107ae88828901610644565b94505060406107bf88828901610702565b935050606086013567ffffffffffffffff8111156107e0576107df610621565b5b6107ec88828901610722565b92509250509295509295909350565b5f602082840312156108105761080f61061d565b5b5f61081d848285016106a8565b91505092915050565b5f805f6060848603121561083d5761083c61061d565b5b5f61084a868287016106a8565b935050602061085b86828701610644565b925050604061086c86828701610644565b9150509250925092565b5f806020838503121561088c5761088b61061d565b5b5f83013567ffffffffffffffff8111156108a9576108a8610621565b5b6108b585828601610722565b92509250509250929050565b5f815190506108cf8161062e565b92915050565b5f602082840312156108ea576108e961061d565b5b5f6108f7848285016108c1565b91505092915050565b61090981610683565b82525050565b5f6020820190506109225f830184610900565b92915050565b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6109728261092c565b810181811067ffffffffffffffff821117156109915761099061093c565b5b80604052505050565b5f6109a3610614565b90506109af8282610969565b919050565b5f80fd5b5f815190506109c6816106ec565b92915050565b5f80fd5b5f67ffffffffffffffff8211156109ea576109e961093c565b5b6109f38261092c565b9050602081019050919050565b5f5b83811015610a1d578082015181840152602081019050610a02565b5f8484015250505050565b5f610a3a610a35846109d0565b61099a565b905082815260208101848484011115610a5657610a556109cc565b5b610a61848285610a00565b509392505050565b5f82601f830112610a7d57610a7c610716565b5b8151610a8d848260208601610a28565b91505092915050565b5f60608284031215610aab57610aaa610928565b5b610ab5606061099a565b90505f610ac4848285016108c1565b5f830152506020610ad7848285016109b8565b602083015250604082015167ffffffffffffffff811115610afb57610afa6109b4565b5b610b0784828501610a69565b60408301525092915050565b5f8115159050919050565b610b2781610b13565b8114610b31575f80fd5b50565b5f81519050610b4281610b1e565b92915050565b5f8060408385031215610b5e57610b5d61061d565b5b5f83015167ffffffffffffffff811115610b7b57610b7a610621565b5b610b8785828601610a96565b9250506020610b9885828601610b34565b9150509250929050565b5f81905092915050565b828183375f83830152505050565b5f610bc58385610ba2565b9350610bd2838584610bac565b82840190509392505050565b5f610bea828486610bba565b91508190509392505050565b5f60408284031215610c0b57610c0a610928565b5b610c15604061099a565b90505f610c24848285016108c1565b5f830152506020610c37848285016108c1565b60208301525092915050565b5f8060608385031215610c5957610c5861061d565b5b5f610c6685828601610bf6565b9250506040610c7785828601610b34565b9150509250929050565b5f82825260208201905092915050565b5f610c9c8385610c81565b9350610ca9838584610bac565b610cb28361092c565b840190509392505050565b5f6020820190508181035f830152610cd6818486610c91565b9050939250505056fea2646970667358221220d2f09e48f2e77361389456025f7337767127dc73767d50ff2f46bc5273493cec64736f6c63430008150033 \ No newline at end of file diff --git a/coreth/plugin/evm/admin.go b/coreth/plugin/evm/admin.go index 0028398e..fd8d7f8d 100644 --- a/coreth/plugin/evm/admin.go +++ b/coreth/plugin/evm/admin.go @@ -29,6 +29,9 @@ func NewAdminService(vm *VM, performanceDir string) *Admin { func (p *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: StartCPUProfiler called") + p.vm.ctx.Lock.Lock() + defer p.vm.ctx.Lock.Unlock() + return p.profiler.StartCPUProfiler() } @@ -36,6 +39,9 @@ func (p *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply func (p *Admin) StopCPUProfiler(r *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: StopCPUProfiler called") + p.vm.ctx.Lock.Lock() + defer p.vm.ctx.Lock.Unlock() + return p.profiler.StopCPUProfiler() } @@ -43,6 +49,9 @@ func (p *Admin) StopCPUProfiler(r *http.Request, _ *struct{}, _ *api.EmptyReply) func (p *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: MemoryProfile called") + p.vm.ctx.Lock.Lock() + defer p.vm.ctx.Lock.Unlock() + return p.profiler.MemoryProfile() } @@ -50,6 +59,9 @@ func (p *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) e func (p *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { log.Info("Admin: LockProfile called") + p.vm.ctx.Lock.Lock() + defer p.vm.ctx.Lock.Unlock() + return p.profiler.LockProfile() } @@ -59,6 +71,10 @@ type SetLogLevelArgs struct { func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.EmptyReply) error { log.Info("EVM: SetLogLevel called", "logLevel", args.Level) + + p.vm.ctx.Lock.Lock() + defer p.vm.ctx.Lock.Unlock() + if err := p.vm.logger.SetLogLevel(args.Level); err != nil { return fmt.Errorf("failed to parse log level: %w ", err) } diff --git a/coreth/plugin/evm/atomic_backend.go b/coreth/plugin/evm/atomic_backend.go index 76a6ffb5..2bb500fc 100644 --- a/coreth/plugin/evm/atomic_backend.go +++ b/coreth/plugin/evm/atomic_backend.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/core/types" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -57,7 +58,7 @@ type AtomicBackend interface { // Syncer creates and returns a new Syncer object that can be used to sync the // state of the atomic trie from peers - Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64) (Syncer, error) + Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) // SetLastAccepted is used after state-sync to reset the last accepted block. SetLastAccepted(lastAcceptedHash common.Hash) @@ -88,13 +89,37 @@ func NewAtomicBackend( bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (AtomicBackend, error) { + atomicBacked, _, err := NewAtomicBackendWithBonusBlockRepair( + db, sharedMemory, bonusBlocks, nil, repo, + lastAcceptedHeight, lastAcceptedHash, commitInterval, + ) + return atomicBacked, err +} + +func NewAtomicBackendWithBonusBlockRepair( + db *versiondb.Database, sharedMemory atomic.SharedMemory, + bonusBlocks map[uint64]ids.ID, bonusBlocksParsed map[uint64]*types.Block, + repo AtomicTxRepository, + lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, +) (AtomicBackend, int, error) { atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, db) codec := repo.Codec() atomicTrie, err := newAtomicTrie(atomicTrieDB, metadataDB, codec, lastAcceptedHeight, commitInterval) if err != nil { - return nil, err + return nil, 0, err + } + var heightsRepaired int + if len(bonusBlocksParsed) > 0 { + if heightsRepaired, err = atomicTrie.repairAtomicTrie(bonusBlocks, bonusBlocksParsed); err != nil { + return nil, 0, err + } + if heightsRepaired > 0 { + if err := db.Commit(); err != nil { + return nil, 0, err + } + } } atomicBackend := &atomicBackend{ codec: codec, @@ -113,9 +138,9 @@ func NewAtomicBackend( // return an atomic trie that is out of sync with shared memory. // In normal operation, the cursor is not set, such that this call will be a no-op. if err := atomicBackend.ApplyToSharedMemory(lastAcceptedHeight); err != nil { - return nil, err + return nil, 0, err } - return atomicBackend, atomicBackend.initialize(lastAcceptedHeight) + return atomicBackend, heightsRepaired, atomicBackend.initialize(lastAcceptedHeight) } // initializes the atomic trie using the atomic repository height index. @@ -161,17 +186,12 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { return err } - if _, found := a.bonusBlocks[height]; found { - // If [height] is a bonus block, do not index the atomic operations into the trie - continue - } + // Note: The atomic trie canonically contains the duplicate operations + // from any bonus blocks. if err := a.atomicTrie.UpdateTrie(tr, height, combinedOps); err != nil { return err } - root, nodes, err := tr.Commit(false) - if err != nil { - return err - } + root, nodes := tr.Commit(false) if err := a.atomicTrie.InsertTrie(nodes, root); err != nil { return err } @@ -184,6 +204,11 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { return err } } + // Trie must be re-opened after committing (not safe for re-use after commit) + tr, err = a.atomicTrie.OpenTrie(root) + if err != nil { + return err + } heightsIndexed++ if time.Since(lastUpdate) > progressLogFrequency { @@ -232,8 +257,10 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { return err } + lastHeight := binary.BigEndian.Uint64(sharedMemoryCursor[:wrappers.LongLen]) + lastCommittedRoot, _ := a.atomicTrie.LastCommitted() - log.Info("applying atomic operations to shared memory", "root", lastCommittedRoot, "lastAcceptedBlock", lastAcceptedBlock, "startHeight", binary.BigEndian.Uint64(sharedMemoryCursor[:wrappers.LongLen])) + log.Info("applying atomic operations to shared memory", "root", lastCommittedRoot, "lastAcceptedBlock", lastAcceptedBlock, "startHeight", lastHeight) it, err := a.atomicTrie.Iterator(lastCommittedRoot, sharedMemoryCursor) if err != nil { @@ -251,20 +278,34 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { // specifying the last atomic operation that was applied to shared memory. // To avoid applying the same operation twice, we call [it.Next()] in the // latter case. + var lastBlockchainID ids.ID if len(sharedMemoryCursor) > wrappers.LongLen { + lastBlockchainID, err = ids.ToID(sharedMemoryCursor[wrappers.LongLen:]) + if err != nil { + return err + } + it.Next() } batchOps := make(map[ids.ID]*atomic.Requests) for it.Next() { height := it.BlockNumber() - atomicOps := it.AtomicOps() - if height > lastAcceptedBlock { log.Warn("Found height above last accepted block while applying operations to shared memory", "height", height, "lastAcceptedBlock", lastAcceptedBlock) break } + // If [height] is a bonus block, do not apply the atomic operations to shared memory + if _, found := a.bonusBlocks[height]; found { + log.Debug( + "skipping bonus block in applying atomic ops from atomic trie to shared memory", + "height", height, + ) + continue + } + + atomicOps := it.AtomicOps() putRequests += len(atomicOps.PutRequests) removeRequests += len(atomicOps.RemoveRequests) totalPutRequests += len(atomicOps.PutRequests) @@ -273,7 +314,9 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { log.Info("atomic trie iteration", "height", height, "puts", totalPutRequests, "removes", totalRemoveRequests) lastUpdate = time.Now() } - mergeAtomicOpsToMap(batchOps, it.BlockchainID(), atomicOps) + + blockchainID := it.BlockchainID() + mergeAtomicOpsToMap(batchOps, blockchainID, atomicOps) if putRequests+removeRequests > sharedMemoryApplyBatchSize { // Update the cursor to the key of the atomic operation being executed on shared memory. @@ -288,8 +331,14 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { } // calling [sharedMemory.Apply] updates the last applied pointer atomically with the shared memory operation. if err = a.sharedMemory.Apply(batchOps, batch); err != nil { - return err + return fmt.Errorf("failed committing shared memory operations between %d:%s and %d:%s with: %w", + lastHeight, lastBlockchainID, + height, blockchainID, + err, + ) } + lastHeight = height + lastBlockchainID = blockchainID putRequests, removeRequests = 0, 0 batchOps = make(map[ids.ID]*atomic.Requests) } @@ -306,7 +355,11 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { return err } if err = a.sharedMemory.Apply(batchOps, batch); err != nil { - return err + return fmt.Errorf("failed committing shared memory operations between %d:%s and %d with: %w", + lastHeight, lastBlockchainID, + lastAcceptedBlock, + err, + ) } log.Info("finished applying atomic operations", "puts", totalPutRequests, "removes", totalRemoveRequests) return nil @@ -325,8 +378,8 @@ func (a *atomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight // Syncer creates and returns a new Syncer object that can be used to sync the // state of the atomic trie from peers -func (a *atomicBackend) Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64) (Syncer, error) { - return newAtomicSyncer(client, a, targetRoot, targetHeight) +func (a *atomicBackend) Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) { + return newAtomicSyncer(client, a, targetRoot, targetHeight, requestSize) } func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) { @@ -375,7 +428,10 @@ func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, par return common.Hash{}, err } - // update the atomic trie + // Insert the operations into the atomic trie + // + // Note: The atomic trie canonically contains the duplicate operations from + // any bonus blocks. atomicOps, err := mergeAtomicOps(txs) if err != nil { return common.Hash{}, err @@ -390,10 +446,7 @@ func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, par } // get the new root and pin the atomic trie changes in memory. - root, nodes, err := tr.Commit(false) - if err != nil { - return common.Hash{}, err - } + root, nodes := tr.Commit(false) if err := a.atomicTrie.InsertTrie(nodes, root); err != nil { return common.Hash{}, err } diff --git a/coreth/plugin/evm/atomic_state.go b/coreth/plugin/evm/atomic_state.go index 0cb7d2fe..667e4c25 100644 --- a/coreth/plugin/evm/atomic_state.go +++ b/coreth/plugin/evm/atomic_state.go @@ -25,7 +25,7 @@ type AtomicState interface { Root() common.Hash // Accept applies the state change to VM's persistent storage // Changes are persisted atomically along with the provided [commitBatch]. - Accept(commitBatch database.Batch) error + Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error // Reject frees memory associated with the state change. Reject() error } @@ -46,7 +46,11 @@ func (a *atomicState) Root() common.Hash { } // Accept applies the state change to VM's persistent storage. -func (a *atomicState) Accept(commitBatch database.Batch) error { +func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error { + // Add the new requests to the batch to be accepted + for chainID, requests := range requests { + mergeAtomicOpsToMap(a.atomicOps, chainID, requests) + } // Update the atomic tx repository. Note it is necessary to invoke // the correct method taking bonus blocks into consideration. if a.backend.IsBonus(a.blockHeight, a.blockHash) { diff --git a/coreth/plugin/evm/atomic_syncer.go b/coreth/plugin/evm/atomic_syncer.go index 54430587..b3af770b 100644 --- a/coreth/plugin/evm/atomic_syncer.go +++ b/coreth/plugin/evm/atomic_syncer.go @@ -37,9 +37,9 @@ type atomicSyncer struct { // syncer is used to sync leaves from the network. syncer *syncclient.CallbackLeafSyncer - // nextHeight is the height which key / values - // are being inserted into [atomicTrie] for - nextHeight uint64 + // lastHeight is the greatest height for which key / values + // were last inserted into the [atomicTrie] + lastHeight uint64 } // addZeros adds [common.HashLenth] zeros to [height] and returns the result as []byte @@ -50,7 +50,7 @@ func addZeroes(height uint64) []byte { return packer.Bytes } -func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, targetRoot common.Hash, targetHeight uint64) (*atomicSyncer, error) { +func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { atomicTrie := atomicBackend.AtomicTrie() lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() trie, err := atomicTrie.OpenTrie(lastCommittedRoot) @@ -64,12 +64,12 @@ func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, trie: trie, targetRoot: targetRoot, targetHeight: targetHeight, - nextHeight: lastCommit + 1, + lastHeight: lastCommit, } tasks := make(chan syncclient.LeafSyncTask, 1) tasks <- &atomicSyncerLeafTask{atomicSyncer: atomicSyncer} close(tasks) - atomicSyncer.syncer = syncclient.NewCallbackLeafSyncer(client, tasks) + atomicSyncer.syncer = syncclient.NewCallbackLeafSyncer(client, tasks, requestSize) return atomicSyncer, nil } @@ -81,27 +81,22 @@ func (s *atomicSyncer) Start(ctx context.Context) error { // onLeafs is the callback for the leaf syncer, which will insert the key-value pairs into the trie. func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error { - _, lastCommittedHeight := s.atomicTrie.LastCommitted() - lastHeight := lastCommittedHeight // track heights so we calculate roots after each height for i, key := range keys { if len(key) != atomicKeyLength { return fmt.Errorf("unexpected key len (%d) in atomic trie sync", len(key)) } // key = height + blockchainID height := binary.BigEndian.Uint64(key[:wrappers.LongLen]) - if height > lastHeight { + if height > s.lastHeight { // If this key belongs to a new height, we commit // the trie at the previous height before adding this key. - root, nodes, err := s.trie.Commit(false) - if err != nil { - return err - } + root, nodes := s.trie.Commit(false) if err := s.atomicTrie.InsertTrie(nodes, root); err != nil { return err } // AcceptTrie commits the trieDB and returns [isCommit] as true // if we have reached or crossed a commit interval. - isCommit, err := s.atomicTrie.AcceptTrie(lastHeight, root) + isCommit, err := s.atomicTrie.AcceptTrie(s.lastHeight, root) if err != nil { return err } @@ -112,10 +107,16 @@ func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error { return err } } - lastHeight = height + // Trie must be re-opened after committing (not safe for re-use after commit) + trie, err := s.atomicTrie.OpenTrie(root) + if err != nil { + return err + } + s.trie = trie + s.lastHeight = height } - if err := s.trie.TryUpdate(key, values[i]); err != nil { + if err := s.trie.Update(key, values[i]); err != nil { return err } } @@ -126,10 +127,7 @@ func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error { // commit the trie to disk and perform the final checks that we synced the target root correctly. func (s *atomicSyncer) onFinish() error { // commit the trie on finish - root, nodes, err := s.trie.Commit(false) - if err != nil { - return err - } + root, nodes := s.trie.Commit(false) if err := s.atomicTrie.InsertTrie(nodes, root); err != nil { return err } @@ -161,7 +159,7 @@ type atomicSyncerLeafTask struct { atomicSyncer *atomicSyncer } -func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.nextHeight) } +func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.lastHeight + 1) } func (a *atomicSyncerLeafTask) End() []byte { return nil } func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return message.AtomicTrieNode } func (a *atomicSyncerLeafTask) OnFinish(context.Context) error { return a.atomicSyncer.onFinish() } diff --git a/coreth/plugin/evm/atomic_syncer_test.go b/coreth/plugin/evm/atomic_syncer_test.go index 8cab412d..49c365b4 100644 --- a/coreth/plugin/evm/atomic_syncer_test.go +++ b/coreth/plugin/evm/atomic_syncer_test.go @@ -4,6 +4,7 @@ package evm import ( + "bytes" "context" "fmt" "math/rand" @@ -11,14 +12,16 @@ import ( "github.com/stretchr/testify/assert" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" ) @@ -47,7 +50,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui ) clientDB := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0, nil, nil, nil) + repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0, nil) if err != nil { t.Fatal("could not initialize atomix tx repository", err) } @@ -55,13 +58,12 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui if err != nil { t.Fatal("could not initialize atomic backend", err) } - atomicTrie := atomicBackend.AtomicTrie() // For each checkpoint, replace the leafsIntercept to shut off the syncer at the correct point and force resume from the checkpoint's // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -88,7 +90,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -109,22 +111,46 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *trie.Database, targetHeight ui // we re-initialise trie DB for asserting the trie to make sure any issues with unflushed writes // are caught here as this will only pass if all trie nodes have been written to the underlying DB + atomicTrie := atomicBackend.AtomicTrie() clientTrieDB := atomicTrie.TrieDB() - trie.AssertTrieConsistency(t, targetRoot, serverTrieDB, clientTrieDB, nil) + syncutils.AssertTrieConsistency(t, targetRoot, serverTrieDB, clientTrieDB, nil) + + // check all commit heights are created correctly + hasher := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) + assert.NoError(t, err) + + serverTrie, err := trie.New(trie.TrieID(targetRoot), serverTrieDB) + assert.NoError(t, err) + addAllKeysWithPrefix := func(prefix []byte) error { + it := trie.NewIterator(serverTrie.NodeIterator(prefix)) + for it.Next() { + if !bytes.HasPrefix(it.Key, prefix) { + return it.Err + } + err := hasher.Update(it.Key, it.Value) + assert.NoError(t, err) + } + return it.Err + } - // check all commit heights are created - for height := uint64(commitInterval); height <= targetHeight; height += commitInterval { - root, err := atomicTrie.Root(height) + for height := uint64(0); height <= targetHeight; height++ { + err := addAllKeysWithPrefix(database.PackUInt64(height)) assert.NoError(t, err) - assert.NotZero(t, root) + + if height%commitInterval == 0 { + expected := hasher.Hash() + root, err := atomicTrie.Root(height) + assert.NoError(t, err) + assert.Equal(t, expected, root) + } } } func TestAtomicSyncer(t *testing.T) { rand.Seed(1) targetHeight := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(memorydb.New()) - root, _, _ := trie.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength) + serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase()) + root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, nil, int64(targetHeight)) } @@ -132,9 +158,9 @@ func TestAtomicSyncer(t *testing.T) { func TestAtomicSyncerResume(t *testing.T) { rand.Seed(1) targetHeight := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase()) numTrieKeys := int(targetHeight) - 1 // no atomic ops for genesis - root, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength) + root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, []atomicSyncTestCheckpoint{ { @@ -149,14 +175,14 @@ func TestAtomicSyncerResume(t *testing.T) { func TestAtomicSyncerResumeNewRootCheckpoint(t *testing.T) { rand.Seed(1) targetHeight1 := 10 * uint64(commitInterval) - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverTrieDB := trie.NewDatabase(rawdb.NewMemoryDatabase()) numTrieKeys1 := int(targetHeight1) - 1 // no atomic ops for genesis - root1, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength) + root1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength) rand.Seed(1) // seed rand again to get the same leafs in GenerateTrie targetHeight2 := 20 * uint64(commitInterval) numTrieKeys2 := int(targetHeight2) - 1 // no atomic ops for genesis - root2, _, _ := trie.GenerateTrie(t, serverTrieDB, numTrieKeys2, atomicKeyLength) + root2, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys2, atomicKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight1, root1, []atomicSyncTestCheckpoint{ { diff --git a/coreth/plugin/evm/atomic_trie.go b/coreth/plugin/evm/atomic_trie.go index 3e67a682..daf764bd 100644 --- a/coreth/plugin/evm/atomic_trie.go +++ b/coreth/plugin/evm/atomic_trie.go @@ -4,7 +4,6 @@ package evm import ( - "encoding/binary" "fmt" "time" @@ -16,10 +15,12 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -36,6 +37,7 @@ var ( _ AtomicTrie = &atomicTrie{} lastCommittedKey = []byte("atomicTrieLastCommittedBlock") appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") + heightMapRepairKey = []byte("atomicTrieHeightMapRepair") ) // AtomicTrie maintains an index of atomic operations by blockchainIDs for every block @@ -73,7 +75,7 @@ type AtomicTrie interface { // InsertTrie updates the trieDB with the provided node set and adds a reference // to root in the trieDB. Once InsertTrie is called, it is expected either // AcceptTrie or RejectTrie be called for the same root. - InsertTrie(nodes *trie.NodeSet, root common.Hash) error + InsertTrie(nodes *trienode.NodeSet, root common.Hash) error // AcceptTrie marks root as the last accepted atomic trie root, and // commits the trie to persistent storage if height is divisible by @@ -82,6 +84,10 @@ type AtomicTrie interface { // RejectTrie dereferences root from the trieDB, freeing memory. RejectTrie(root common.Hash) error + + // RepairHeightMap repairs the height map of the atomic trie by iterating + // over all leaves in the trie and committing the trie at every commit interval. + RepairHeightMap(to uint64) (bool, error) } // AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie @@ -94,6 +100,9 @@ type AtomicTrieIterator interface { // returned []byte can be freely modified Key() []byte + // Value returns the current database value that the iterator is iterating + Value() []byte + // BlockNumber returns the current block number BlockNumber() uint64 @@ -146,7 +155,7 @@ func newAtomicTrie( } trieDB := trie.NewDatabaseWithConfig( - Database{atomicTrieDB}, + rawdb.NewDatabase(Database{atomicTrieDB}), &trie.Config{ Cache: 64, // Allocate 64MB of memory for clean cache }, @@ -181,10 +190,13 @@ func lastCommittedRootIfExists(db database.Database) (common.Hash, uint64, error return common.Hash{}, 0, nil case err != nil: return common.Hash{}, 0, err - case len(lastCommittedHeightBytes) != wrappers.LongLen: - return common.Hash{}, 0, fmt.Errorf("expected value of lastCommittedKey to be %d but was %d", wrappers.LongLen, len(lastCommittedHeightBytes)) } - height := binary.BigEndian.Uint64(lastCommittedHeightBytes) + + height, err := database.ParseUInt64(lastCommittedHeightBytes) + if err != nil { + return common.Hash{}, 0, fmt.Errorf("expected value at lastCommittedKey to be a valid uint64: %w", err) + } + hash, err := db.Get(lastCommittedHeightBytes) if err != nil { return common.Hash{}, 0, fmt.Errorf("committed hash does not exist for committed height: %d: %w", height, err) @@ -198,12 +210,12 @@ func nearestCommitHeight(blockNumber uint64, commitInterval uint64) uint64 { } func (a *atomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { - return trie.New(common.Hash{}, root, a.trieDB) + return trie.New(trie.TrieID(root), a.trieDB) } // commit calls commit on the underlying trieDB and updates metadata pointers. func (a *atomicTrie) commit(height uint64, root common.Hash) error { - if err := a.trieDB.Commit(root, false, nil); err != nil { + if err := a.trieDB.Commit(root, false); err != nil { return err } log.Info("committed atomic trie", "root", root.String(), "height", height) @@ -223,7 +235,7 @@ func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[id keyPacker := wrappers.Packer{Bytes: make([]byte, atomicKeyLength)} keyPacker.PackLong(height) keyPacker.PackFixedBytes(blockchainID[:]) - if err := trie.TryUpdate(keyPacker.Bytes, valueBytes); err != nil { + if err := trie.Update(keyPacker.Bytes, valueBytes); err != nil { return err } } @@ -239,8 +251,7 @@ func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { - heightBytes := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(heightBytes, height) + heightBytes := database.PackUInt64(height) // now save the trie hash against the height it was committed at if err := a.metadataDB.Put(heightBytes, root[:]); err != nil { @@ -260,7 +271,7 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error // Iterator returns a types.AtomicTrieIterator that iterates the trie from the given // atomic trie root, starting at the specified [cursor]. func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (AtomicTrieIterator, error) { - t, err := trie.New(common.Hash{}, root, a.trieDB) + t, err := trie.New(trie.TrieID(root), a.trieDB) if err != nil { return nil, err } @@ -290,9 +301,7 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return types.EmptyRootHash, nil } - heightBytes := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(heightBytes, height) - + heightBytes := database.PackUInt64(height) hash, err := metadataDB.Get(heightBytes) switch { case err == database.ErrNotFound: @@ -307,9 +316,9 @@ func (a *atomicTrie) LastAcceptedRoot() common.Hash { return a.lastAcceptedRoot } -func (a *atomicTrie) InsertTrie(nodes *trie.NodeSet, root common.Hash) error { +func (a *atomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error { if nodes != nil { - if err := a.trieDB.Update(trie.NewWithNodeSet(nodes)); err != nil { + if err := a.trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { return err } } @@ -330,12 +339,10 @@ func (a *atomicTrie) InsertTrie(nodes *trie.NodeSet, root common.Hash) error { // AcceptTrie commits the triedb at [root] if needed and returns true if a commit // was performed. func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { - // Check whether we have crossed over a commitHeight. - // If so, make a commit with the last accepted root. hasCommitted := false - commitHeight := nearestCommitHeight(height, a.commitInterval) - for commitHeight > a.lastCommittedHeight && height > commitHeight { - nextCommitHeight := a.lastCommittedHeight + a.commitInterval + // Because we do not accept the trie at every height, we may need to + // populate roots at prior commit heights that were skipped. + for nextCommitHeight := a.lastCommittedHeight + a.commitInterval; nextCommitHeight < height; nextCommitHeight += a.commitInterval { if err := a.commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { return false, err } @@ -349,7 +356,7 @@ func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { a.tipBuffer.Insert(root) // Commit this root if we have reached the [commitInterval]. - if commitHeight == height { + if height%a.commitInterval == 0 { if err := a.commit(height, root); err != nil { return false, err } diff --git a/coreth/plugin/evm/atomic_trie_height_map_repair.go b/coreth/plugin/evm/atomic_trie_height_map_repair.go new file mode 100644 index 00000000..2096853d --- /dev/null +++ b/coreth/plugin/evm/atomic_trie_height_map_repair.go @@ -0,0 +1,133 @@ +// (c) 2020-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "errors" + "fmt" + "math" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ethereum/go-ethereum/log" +) + +const ( + repairDone = math.MaxUint64 // used as a marker for when the height map is repaired + + iterationsPerDelay = 1000 // after this many iterations, pause for [iterationDelay] + iterationDelay = 100 * time.Millisecond // delay between iterations of the repair loop +) + +func (a *atomicTrie) RepairHeightMap(to uint64) (bool, error) { + repairFrom, err := database.GetUInt64(a.metadataDB, heightMapRepairKey) + switch { + case errors.Is(err, database.ErrNotFound): + repairFrom = 0 // height map not repaired yet, start at 0 + case err != nil: + return false, err + case repairFrom == repairDone: + // height map already repaired, nothing to do + return false, nil + } + return true, a.repairHeightMap(repairFrom, to) +} + +func (a *atomicTrie) repairHeightMap(from, to uint64) error { + // open the atomic trie at the last known root with correct height map + // correspondance + fromRoot, err := getRoot(a.metadataDB, from) + if err != nil { + return fmt.Errorf("could not get root at height %d: %w", from, err) + } + hasher, err := a.OpenTrie(fromRoot) + if err != nil { + return fmt.Errorf("could not open atomic trie at root %s: %w", fromRoot, err) + } + + // hashes values inserted in [hasher], and stores the result in the height + // map at [commitHeight]. Additionally, it updates the resume marker and + // re-opens [hasher] to respect the trie's no use after commit invariant. + var ( + lastLog = time.Now() + logEach = 90 * time.Second + ) + commitRepairedHeight := func(commitHeight uint64) error { + root, nodes := hasher.Commit(false) + if nodes != nil { + err := a.trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + if err != nil { + return err + } + err = a.trieDB.Commit(root, false) + if err != nil { + return err + } + } + err = a.metadataDB.Put(database.PackUInt64(commitHeight), root[:]) + if err != nil { + return err + } + err = database.PutUInt64(a.metadataDB, heightMapRepairKey, commitHeight) + if err != nil { + return err + } + if time.Since(lastLog) > logEach { + log.Info("repairing atomic trie height map", "height", commitHeight, "root", root) + lastLog = time.Now() + } + hasher, err = a.OpenTrie(root) + return err + } + + // iterate over all leaves in the current atomic trie + root, _ := a.LastCommitted() + it, err := a.Iterator(root, database.PackUInt64(from+1)) + if err != nil { + return fmt.Errorf("could not create iterator for atomic trie at root %s: %w", root, err) + } + + var height uint64 + lastCommit := from + numIterations := 0 + for it.Next() { + height = it.BlockNumber() + if height > to { + break + } + + for next := lastCommit + a.commitInterval; next < height; next += a.commitInterval { + if err := commitRepairedHeight(next); err != nil { + return err + } + lastCommit = next + } + + if err := hasher.Update(it.Key(), it.Value()); err != nil { + return fmt.Errorf("could not update atomic trie at root %s: %w", root, err) + } + + numIterations++ + if numIterations%iterationsPerDelay == 0 { + time.Sleep(iterationDelay) // pause to avoid putting a spike of load on the disk + } + } + if err := it.Error(); err != nil { + return fmt.Errorf("error iterating atomic trie: %w", err) + } + for next := lastCommit + a.commitInterval; next <= to; next += a.commitInterval { + if err := commitRepairedHeight(next); err != nil { + return err + } + } + + // mark height map as repaired + if err := database.PutUInt64(a.metadataDB, heightMapRepairKey, repairDone); err != nil { + return err + } + log.Info("atomic trie height map repair complete", "height", height, "root", root) + return nil +} diff --git a/coreth/plugin/evm/atomic_trie_height_map_repair_test.go b/coreth/plugin/evm/atomic_trie_height_map_repair_test.go new file mode 100644 index 00000000..0b95b252 --- /dev/null +++ b/coreth/plugin/evm/atomic_trie_height_map_repair_test.go @@ -0,0 +1,116 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "testing" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAtomicTrieRepairHeightMap(t *testing.T) { + for name, test := range map[string]testAtomicTrieRepairHeightMap{ + "last accepted after commit interval": { + lastAccepted: 3*testCommitInterval + 5, + skipAtomicTxs: func(height uint64) bool { return false }, + }, + "last accepted exactly a commit interval": { + lastAccepted: 3 * testCommitInterval, + skipAtomicTxs: func(height uint64) bool { return false }, + }, + "no atomic txs in a commit interval": { + lastAccepted: 3 * testCommitInterval, + skipAtomicTxs: func(height uint64) bool { return height > testCommitInterval && height <= 2*testCommitInterval }, + }, + "no atomic txs in the most recent commit intervals": { + lastAccepted: 3 * testCommitInterval, + skipAtomicTxs: func(height uint64) bool { return height > testCommitInterval+1 }, + }, + } { + t.Run(name, func(t *testing.T) { test.run(t) }) + } +} + +type testAtomicTrieRepairHeightMap struct { + lastAccepted uint64 + skipAtomicTxs func(height uint64) bool +} + +func (test testAtomicTrieRepairHeightMap) run(t *testing.T) { + require := require.New(t) + + db := versiondb.New(memdb.New()) + repo, err := NewAtomicTxRepository(db, testTxCodec(), 0, nil) + require.NoError(err) + atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, 0, common.Hash{}, testCommitInterval) + require.NoError(err) + atomicTrie := atomicBackend.AtomicTrie().(*atomicTrie) + + heightMap := make(map[uint64]common.Hash) + for height := uint64(1); height <= test.lastAccepted; height++ { + atomicRequests := testDataImportTx().mustAtomicOps() + if test.skipAtomicTxs(height) { + atomicRequests = nil + } + err := indexAtomicTxs(atomicTrie, height, atomicRequests) + require.NoError(err) + if height%testCommitInterval == 0 { + root, _ := atomicTrie.LastCommitted() + heightMap[height] = root + } + } + + // Verify that [atomicTrie] can access each of the expected roots + verifyRoots := func(expectZero bool) { + for height, hash := range heightMap { + root, err := atomicTrie.Root(height) + require.NoError(err) + if expectZero { + require.Zero(root) + } else { + require.Equal(hash, root) + } + } + } + verifyRoots(false) + + // destroy the height map + for height := range heightMap { + err := atomicTrie.metadataDB.Delete(database.PackUInt64(height)) + require.NoError(err) + } + require.NoError(db.Commit()) + verifyRoots(true) + + // repair the height map + repaired, err := atomicTrie.RepairHeightMap(test.lastAccepted) + require.NoError(err) + verifyRoots(false) + require.True(repaired) + + // partially destroy the height map + _, lastHeight := atomicTrie.LastCommitted() + err = atomicTrie.metadataDB.Delete(database.PackUInt64(lastHeight)) + require.NoError(err) + err = atomicTrie.metadataDB.Put( + heightMapRepairKey, + database.PackUInt64(lastHeight-testCommitInterval), + ) + require.NoError(err) + + // repair the height map + repaired, err = atomicTrie.RepairHeightMap(test.lastAccepted) + require.NoError(err) + verifyRoots(false) + require.True(repaired) + + // try to repair the height map again + repaired, err = atomicTrie.RepairHeightMap(test.lastAccepted) + require.NoError(err) + require.False(repaired) +} diff --git a/coreth/plugin/evm/atomic_trie_iterator.go b/coreth/plugin/evm/atomic_trie_iterator.go index 394af477..2bdf90b5 100644 --- a/coreth/plugin/evm/atomic_trie_iterator.go +++ b/coreth/plugin/evm/atomic_trie_iterator.go @@ -112,3 +112,8 @@ func (a *atomicTrieIterator) AtomicOps() *atomic.Requests { func (a *atomicTrieIterator) Key() []byte { return a.key } + +// Value returns the current database value that the iterator is iterating +func (a *atomicTrieIterator) Value() []byte { + return a.trieIterator.Value +} diff --git a/coreth/plugin/evm/atomic_trie_iterator_test.go b/coreth/plugin/evm/atomic_trie_iterator_test.go index 944d2c47..6d8c98cf 100644 --- a/coreth/plugin/evm/atomic_trie_iterator_test.go +++ b/coreth/plugin/evm/atomic_trie_iterator_test.go @@ -25,7 +25,7 @@ func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) assert.NoError(t, err) // create state with multiple transactions @@ -65,7 +65,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) require.NoError(err) // create state with multiple transactions @@ -92,10 +92,9 @@ func TestIteratorHandlesInvalidData(t *testing.T) { // handles an error when it runs into an unexpected key-value pair in the trie. atomicTrieSnapshot, err := atomicTrie.OpenTrie(lastCommittedHash) require.NoError(err) - require.NoError(atomicTrieSnapshot.TryUpdate(utils.RandomBytes(50), utils.RandomBytes(50))) + require.NoError(atomicTrieSnapshot.Update(utils.RandomBytes(50), utils.RandomBytes(50))) - nextRoot, nodes, err := atomicTrieSnapshot.Commit(false) - require.NoError(err) + nextRoot, nodes := atomicTrieSnapshot.Commit(false) err = atomicTrie.InsertTrie(nodes, nextRoot) require.NoError(err) isCommit, err := atomicTrie.AcceptTrie(lastCommittedHeight+commitInterval, nextRoot) diff --git a/coreth/plugin/evm/atomic_trie_repair.go b/coreth/plugin/evm/atomic_trie_repair.go new file mode 100644 index 00000000..83bcc6f1 --- /dev/null +++ b/coreth/plugin/evm/atomic_trie_repair.go @@ -0,0 +1,91 @@ +// (c) 2020-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ethereum/go-ethereum/log" +) + +var repairedKey = []byte("atomicTrieHasBonusBlocks") + +// TODO: Remove this after the Durango +// repairAtomicTrie applies the bonus blocks to the atomic trie so all nodes +// can have a canonical atomic trie. +// Initially, bonus blocks were not indexed into the atomic trie. However, a +// regression caused some nodes to index these blocks. +// Returns the number of heights repaired. +func (a *atomicTrie) repairAtomicTrie(bonusBlockIDs map[uint64]ids.ID, bonusBlocks map[uint64]*types.Block) (int, error) { + done, err := a.metadataDB.Has(repairedKey) + if err != nil { + return 0, err + } + if done { + return 0, nil + } + + root, lastCommitted := a.LastCommitted() + tr, err := a.OpenTrie(root) + if err != nil { + return 0, err + } + + heightsRepaired := 0 + puts, removes := 0, 0 + for height, block := range bonusBlocks { + if height > lastCommitted { + // Avoid applying the repair to heights not yet committed + continue + } + + blockID, ok := bonusBlockIDs[height] + if !ok { + // Should not happen since we enforce the keys of bonusBlockIDs + // to be the same as the keys of bonusBlocks on init. + return 0, fmt.Errorf("missing block ID for height %d", height) + } + txs, err := ExtractAtomicTxs(block.ExtData(), false, a.codec) + if err != nil { + return 0, fmt.Errorf("failed to extract atomic txs from bonus block at height %d: %w", height, err) + } + log.Info("repairing atomic trie", "height", height, "block", blockID, "txs", len(txs)) + combinedOps, err := mergeAtomicOps(txs) + if err != nil { + return 0, err + } + if err := a.UpdateTrie(tr, height, combinedOps); err != nil { + return 0, err + } + for _, op := range combinedOps { + puts += len(op.PutRequests) + removes += len(op.RemoveRequests) + } + heightsRepaired++ + } + newRoot, nodes := tr.Commit(false) + if nodes != nil { + if err := a.trieDB.Update(newRoot, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { + return 0, err + } + if err := a.commit(lastCommitted, newRoot); err != nil { + return 0, err + } + } + + // Putting either true or false are both considered repaired since we check + // for the presence of the key to skip the repair. + if database.PutBool(a.metadataDB, repairedKey, true); err != nil { + return 0, err + } + log.Info( + "repaired atomic trie", "originalRoot", root, "newRoot", newRoot, + "heightsRepaired", heightsRepaired, "puts", puts, "removes", removes, + ) + return heightsRepaired, nil +} diff --git a/coreth/plugin/evm/atomic_trie_repair_test.go b/coreth/plugin/evm/atomic_trie_repair_test.go new file mode 100644 index 00000000..e0e27750 --- /dev/null +++ b/coreth/plugin/evm/atomic_trie_repair_test.go @@ -0,0 +1,140 @@ +// (c) 2020-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "slices" + "testing" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" +) + +type atomicTrieRepairTest struct { + setup func(a *atomicTrie, db *versiondb.Database) + expectedHeightsRepaired int +} + +func TestAtomicTrieRepair(t *testing.T) { + require := require.New(t) + for name, test := range map[string]atomicTrieRepairTest{ + "needs repair": { + setup: func(a *atomicTrie, db *versiondb.Database) {}, + expectedHeightsRepaired: len(mainnetBonusBlocksParsed), + }, + "should not be repaired twice": { + setup: func(a *atomicTrie, db *versiondb.Database) { + _, err := a.repairAtomicTrie(bonusBlockMainnetHeights, mainnetBonusBlocksParsed) + require.NoError(err) + require.NoError(db.Commit()) + }, + expectedHeightsRepaired: 0, + }, + "did not need repair": { + setup: func(a *atomicTrie, db *versiondb.Database) { + // simulates a node that has the bonus blocks in the atomic trie + // but has not yet run the repair + _, err := a.repairAtomicTrie(bonusBlockMainnetHeights, mainnetBonusBlocksParsed) + require.NoError(err) + require.NoError(a.metadataDB.Delete(repairedKey)) + require.NoError(db.Commit()) + }, + expectedHeightsRepaired: len(mainnetBonusBlocksParsed), + }, + } { + t.Run(name, test.test) + } +} + +func (test atomicTrieRepairTest) test(t *testing.T) { + require := require.New(t) + commitInterval := uint64(4096) + + // create an unrepaired atomic trie for setup + db := versiondb.New(memdb.New()) + repo, err := NewAtomicTxRepository(db, Codec, 0, nil) + require.NoError(err) + atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) + require.NoError(err) + a := atomicBackend.AtomicTrie().(*atomicTrie) + + // make a commit at a height larger than all bonus blocks + maxBonusBlockHeight := slices.Max(maps.Keys(mainnetBonusBlocksParsed)) + commitHeight := nearestCommitHeight(maxBonusBlockHeight, commitInterval) + commitInterval + err = a.commit(commitHeight, types.EmptyRootHash) + require.NoError(err) + require.NoError(db.Commit()) + + // perform additional setup + test.setup(a, db) + + // recreate the trie with the repair constructor to test the repair + var heightsRepaired int + atomicBackend, heightsRepaired, err = NewAtomicBackendWithBonusBlockRepair( + db, testSharedMemory(), bonusBlockMainnetHeights, mainnetBonusBlocksParsed, + repo, commitHeight, common.Hash{}, commitInterval, + ) + require.NoError(err) + require.Equal(test.expectedHeightsRepaired, heightsRepaired) + + // call Abort to make sure the repair has called Commit + db.Abort() + // verify the trie is repaired + verifyAtomicTrieIsAlreadyRepaired(require, db, repo, commitHeight, commitInterval) +} + +func verifyAtomicTrieIsAlreadyRepaired( + require *require.Assertions, db *versiondb.Database, repo *atomicTxRepository, + commitHeight uint64, commitInterval uint64, +) { + // create a map to track the expected items in the atomic trie. + // note we serialize the atomic ops to bytes so we can compare nil + // and empty slices as equal + expectedKeys := 0 + expected := make(map[uint64]map[ids.ID][]byte) + for height, block := range mainnetBonusBlocksParsed { + txs, err := ExtractAtomicTxs(block.ExtData(), false, Codec) + require.NoError(err) + + requests := make(map[ids.ID][]byte) + ops, err := mergeAtomicOps(txs) + require.NoError(err) + for id, op := range ops { + bytes, err := Codec.Marshal(codecVersion, op) + require.NoError(err) + requests[id] = bytes + expectedKeys++ + } + expected[height] = requests + } + + atomicBackend, heightsRepaired, err := NewAtomicBackendWithBonusBlockRepair( + db, testSharedMemory(), bonusBlockMainnetHeights, mainnetBonusBlocksParsed, + repo, commitHeight, common.Hash{}, commitInterval, + ) + require.NoError(err) + a := atomicBackend.AtomicTrie().(*atomicTrie) + require.NoError(err) + require.Zero(heightsRepaired) // migration should not run a second time + + // iterate over the trie and check it contains the expected items + root, err := a.Root(commitHeight) + require.NoError(err) + it, err := a.Iterator(root, nil) + require.NoError(err) + + foundKeys := 0 + for it.Next() { + bytes, err := a.codec.Marshal(codecVersion, it.AtomicOps()) + require.NoError(err) + require.Equal(expected[it.BlockNumber()][it.BlockchainID()], bytes) + foundKeys++ + } + require.Equal(expectedKeys, foundKeys) +} diff --git a/coreth/plugin/evm/atomic_trie_test.go b/coreth/plugin/evm/atomic_trie_test.go index 05e94cc7..ede6ca1b 100644 --- a/coreth/plugin/evm/atomic_trie_test.go +++ b/coreth/plugin/evm/atomic_trie_test.go @@ -44,10 +44,7 @@ func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*atomic.R if err := tr.UpdateTrie(snapshot, height, atomicOps); err != nil { return err } - root, nodes, err := snapshot.Commit(false) - if err != nil { - return err - } + root, nodes := snapshot.Commit(false) if err := tr.InsertTrie(nodes, root); err != nil { return err } @@ -139,7 +136,7 @@ func TestAtomicTrieInitialize(t *testing.T) { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight, nil) if err != nil { t.Fatal(err) } @@ -228,7 +225,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) @@ -261,7 +258,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec(), 0, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, testTxCodec(), 0, nil) if err != nil { t.Fatal(err) } @@ -296,7 +293,7 @@ func TestIndexerWriteAndRead(t *testing.T) { assert.Len(t, blockRootMap, 3) hash, height := atomicTrie.LastCommitted() - assert.EqualValues(t, lastCommittedBlockHeight, height, "expected %d was %d", 200, lastCommittedBlockHeight) + assert.EqualValues(t, lastCommittedBlockHeight, height) assert.Equal(t, lastCommittedBlockHash, hash) // Verify that [atomicTrie] can access each of the expected roots @@ -332,14 +329,14 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { assert.Equal(t, root1, root2) } -func TestAtomicTrieSkipsBonusBlocks(t *testing.T) { +func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { lastAcceptedHeight := uint64(100) numTxsPerBlock := 3 commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) if err != nil { t.Fatal(err) } @@ -362,10 +359,7 @@ func TestAtomicTrieSkipsBonusBlocks(t *testing.T) { assert.EqualValues(t, expectedCommitHeight, commitHeight) assert.NotEqual(t, common.Hash{}, rootHash) - // Verify the operations are as expected with the bonus block heights removed from the operations map - for height := range bonusBlocks { - delete(operationsMap, height) - } + // Verify the operations are as expected verifyOperations(t, atomicTrie, codec, rootHash, 1, expectedCommitHeight, operationsMap) } @@ -487,6 +481,7 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval, lastAcceptedHeight uint64 setMarker func(*atomicBackend) error expectOpsApplied func(height uint64) bool + bonusBlockHeights map[uint64]ids.ID } for name, test := range map[string]test{ @@ -496,6 +491,18 @@ func TestApplyToSharedMemory(t *testing.T) { setMarker: func(a *atomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, }, + "marker is set to height, should skip bonus blocks": { + commitInterval: 10, + lastAcceptedHeight: 25, + setMarker: func(a *atomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, + bonusBlockHeights: map[uint64]ids.ID{15: {}}, + expectOpsApplied: func(height uint64) bool { + if height == 15 { + return false + } + return height > 10 && height <= 20 + }, + }, "marker is set to height + blockchain ID": { commitInterval: 10, lastAcceptedHeight: 25, @@ -517,7 +524,7 @@ func TestApplyToSharedMemory(t *testing.T) { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight, nil) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) @@ -525,7 +532,7 @@ func TestApplyToSharedMemory(t *testing.T) { // Initialize atomic repository m := atomic.NewMemory(db) sharedMemories := newSharedMemories(m, testCChainID, blockChainID) - backend, err := NewAtomicBackend(db, sharedMemories.thisChain, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -588,7 +595,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { lastAcceptedHeight := uint64(25000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) @@ -623,7 +630,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { lastAcceptedHeight := uint64(25_000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) @@ -700,7 +707,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u sharedMemory := testSharedMemory() lastAcceptedHeight := blocks - repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight, nil) assert.NoError(b, err) backend, err := NewAtomicBackend(db, sharedMemory, nil, repo, 0, common.Hash{}, 5000) diff --git a/coreth/plugin/evm/atomic_tx_repository.go b/coreth/plugin/evm/atomic_tx_repository.go index 6bb863d5..f20eb331 100644 --- a/coreth/plugin/evm/atomic_tx_repository.go +++ b/coreth/plugin/evm/atomic_tx_repository.go @@ -5,9 +5,7 @@ package evm import ( "encoding/binary" - "errors" "fmt" - "sort" "time" "github.com/ethereum/go-ethereum/common" @@ -18,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -31,7 +30,9 @@ var ( atomicHeightTxDBPrefix = []byte("atomicHeightTxDB") atomicRepoMetadataDBPrefix = []byte("atomicRepoMetadataDB") maxIndexedHeightKey = []byte("maxIndexedAtomicTxHeight") - bonusBlocksRepairedKey = []byte("bonusBlocksRepaired") + + // Historically used to track the completion of a migration + // bonusBlocksRepairedKey = []byte("bonusBlocksRepaired") ) // AtomicTxRepository defines an entity that manages storage and indexing of @@ -68,7 +69,6 @@ type atomicTxRepository struct { func NewAtomicTxRepository( db *versiondb.Database, codec codec.Manager, lastAcceptedHeight uint64, - bonusBlocks map[uint64]ids.ID, canonicalBlocks []uint64, getAtomicTxFromBlockByHeight func(height uint64) (*Tx, error), ) (*atomicTxRepository, error) { repo := &atomicTxRepository{ @@ -81,13 +81,6 @@ func NewAtomicTxRepository( if err := repo.initializeHeightIndex(lastAcceptedHeight); err != nil { return nil, err } - - // TODO: remove post banff as all network participants will have applied the repair script. - repairHeights := getAtomicRepositoryRepairHeights(bonusBlocks, canonicalBlocks) - if err := repo.RepairForBonusBlocks(repairHeights, getAtomicTxFromBlockByHeight); err != nil { - return nil, fmt.Errorf("failed to repair atomic repository: %w", err) - } - return repo, nil } @@ -272,7 +265,7 @@ func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { // with txs initialized from the txID index. copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) - sort.Slice(copyTxs, func(i, j int) bool { return copyTxs[i].ID().Hex() < copyTxs[j].ID().Hex() }) + utils.Sort(copyTxs) txs = copyTxs } heightBytes := make([]byte, wrappers.LongLen) @@ -373,85 +366,3 @@ func (a *atomicTxRepository) IterateByHeight(height uint64) database.Iterator { func (a *atomicTxRepository) Codec() codec.Manager { return a.codec } - -func (a *atomicTxRepository) isBonusBlocksRepaired() (bool, error) { - return a.atomicRepoMetadataDB.Has(bonusBlocksRepairedKey) -} - -func (a *atomicTxRepository) markBonusBlocksRepaired(repairedEntries uint64) error { - val := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(val, repairedEntries) - return a.atomicRepoMetadataDB.Put(bonusBlocksRepairedKey, val) -} - -// RepairForBonusBlocks ensures that atomic txs that were processed on more than one block -// (canonical block + a number of bonus blocks) are indexed to the first height they were -// processed on (canonical block). [sortedHeights] should include all canonical block and -// bonus block heights in ascending order, and will only be passed as non-empty on mainnet. -func (a *atomicTxRepository) RepairForBonusBlocks( - sortedHeights []uint64, getAtomicTxFromBlockByHeight func(height uint64) (*Tx, error), -) error { - done, err := a.isBonusBlocksRepaired() - if err != nil { - return err - } - if done { - return nil - } - repairedEntries := uint64(0) - seenTxs := make(map[ids.ID][]uint64) - for _, height := range sortedHeights { - // get atomic tx from block - tx, err := getAtomicTxFromBlockByHeight(height) - if err != nil { - return err - } - if tx == nil { - continue - } - - // get the tx by txID and update it, the first time we encounter - // a given [txID], overwrite the previous [txID] => [height] - // mapping. This provides a canonical mapping across nodes. - heights, seen := seenTxs[tx.ID()] - _, foundHeight, err := a.GetByTxID(tx.ID()) - if err != nil && !errors.Is(err, database.ErrNotFound) { - return err - } - if !seen { - if err := a.Write(height, []*Tx{tx}); err != nil { - return err - } - } else { - if err := a.WriteBonus(height, []*Tx{tx}); err != nil { - return err - } - } - if foundHeight != height && !seen { - repairedEntries++ - } - seenTxs[tx.ID()] = append(heights, height) - } - if err := a.markBonusBlocksRepaired(repairedEntries); err != nil { - return err - } - log.Info("atomic tx repository RepairForBonusBlocks complete", "repairedEntries", repairedEntries) - return a.db.Commit() -} - -// getAtomicRepositoryRepairHeights returns a slice containing heights from bonus blocks and -// canonical blocks sorted by height. -func getAtomicRepositoryRepairHeights(bonusBlocks map[uint64]ids.ID, canonicalBlocks []uint64) []uint64 { - repairHeights := make([]uint64, 0, len(bonusBlocks)+len(canonicalBlocks)) - for height := range bonusBlocks { - repairHeights = append(repairHeights, height) - } - for _, height := range canonicalBlocks { - // avoid appending duplicates - if _, exists := bonusBlocks[height]; !exists { - repairHeights = append(repairHeights, height) - } - } - sort.Slice(repairHeights, func(i, j int) bool { return repairHeights[i] < repairHeights[j] }) - return repairHeights -} diff --git a/coreth/plugin/evm/atomic_tx_repository_test.go b/coreth/plugin/evm/atomic_tx_repository_test.go index 614d9c9e..d74066e1 100644 --- a/coreth/plugin/evm/atomic_tx_repository_test.go +++ b/coreth/plugin/evm/atomic_tx_repository_test.go @@ -5,14 +5,13 @@ package evm import ( "encoding/binary" - "fmt" - "sort" "testing" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/utils" "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/codec" @@ -98,17 +97,12 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // We should be able to fetch indexed txs by height: - getComparator := func(txs []*Tx) func(int, int) bool { - return func(i, j int) bool { - return txs[i].ID().Hex() < txs[j].ID().Hex() - } - } for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) assert.NoErrorf(t, err, "unexpected error on GetByHeight at height=%d", height) assert.Lenf(t, txs, len(expectedTxs), "wrong len of txs at height=%d", height) // txs should be stored in order of txID - sort.Slice(expectedTxs, getComparator(expectedTxs)) + utils.Sort(expectedTxs) txIDs := set.Set[ids.ID]{} for i := 0; i < len(txs); i++ { @@ -189,7 +183,7 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, 0, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, 0, nil) if err != nil { t.Fatal(err) } @@ -202,7 +196,7 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) codec := testTxCodec() - repo, err := NewAtomicTxRepository(db, codec, 0, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, 0, nil) if err != nil { t.Fatal(err) } @@ -225,7 +219,7 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, codec, 100, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, 100, nil) if err != nil { t.Fatal(err) } @@ -251,7 +245,7 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, codec, 200, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, 200, nil) if err != nil { t.Fatal(err) } @@ -273,7 +267,7 @@ func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeig if err := db.Commit(); err != nil { b.Fatal(err) } - repo, err := NewAtomicTxRepository(db, codec, maxHeight, nil, nil, nil) + repo, err := NewAtomicTxRepository(db, codec, maxHeight, nil) if err != nil { b.Fatal(err) } @@ -292,64 +286,3 @@ func BenchmarkAtomicRepositoryIndex_10kBlocks_10Tx(b *testing.B) { benchAtomicRepositoryIndex10_000(b, 10_000, 10) } } - -func TestRepairAtomicRepositoryForBonusBlockTxs(t *testing.T) { - db := versiondb.New(memdb.New()) - atomicTxRepository, err := NewAtomicTxRepository(db, testTxCodec(), 0, nil, nil, nil) - if err != nil { - t.Fatal(err) - } - - // check completion flag is set - done, err := atomicTxRepository.isBonusBlocksRepaired() - assert.NoError(t, err) - assert.True(t, done) - - // delete the key so we can simulate an unrepaired repository - atomicTxRepository.atomicRepoMetadataDB.Delete(bonusBlocksRepairedKey) - - tx := newTestTx() - // write the same tx to 3 heights. - canonical, bonus1, bonus2 := uint64(10), uint64(20), uint64(30) - atomicTxRepository.Write(canonical, []*Tx{tx}) - atomicTxRepository.Write(bonus1, []*Tx{tx}) - atomicTxRepository.Write(bonus2, []*Tx{tx}) - db.Commit() - - _, foundHeight, err := atomicTxRepository.GetByTxID(tx.ID()) - assert.NoError(t, err) - assert.Equal(t, bonus2, foundHeight) - - allHeights := []uint64{canonical, bonus1, bonus2} - if err := atomicTxRepository.RepairForBonusBlocks( - allHeights, - func(height uint64) (*Tx, error) { - if height == 10 || height == 20 || height == 30 { - return tx, nil - } - return nil, fmt.Errorf("unexpected height %d", height) - }, - ); err != nil { - t.Fatal(err) - } - - // check canonical height is indexed against txID - _, foundHeight, err = atomicTxRepository.GetByTxID(tx.ID()) - assert.NoError(t, err) - assert.Equal(t, canonical, foundHeight) - - // check tx can be found with any of the heights - for _, height := range allHeights { - txs, err := atomicTxRepository.GetByHeight(height) - if err != nil { - t.Fatal(err) - } - assert.Len(t, txs, 1) - assert.Equal(t, tx.ID(), txs[0].ID()) - } - - // check completion flag is set - done, err = atomicTxRepository.isBonusBlocksRepaired() - assert.NoError(t, err) - assert.True(t, done) -} diff --git a/coreth/plugin/evm/block.go b/coreth/plugin/evm/block.go index f4b53081..41ad543c 100644 --- a/coreth/plugin/evm/block.go +++ b/coreth/plugin/evm/block.go @@ -4,33 +4,49 @@ package evm import ( + "bytes" "context" + _ "embed" + "encoding/json" "errors" "fmt" - "math/big" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/predicate" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var ( + _ snowman.Block = (*Block)(nil) + _ block.WithVerifyContext = (*Block)(nil) ) var ( bonusBlockMainnetHeights = make(map[uint64]ids.ID) - // first height that processed a TX included on a - // bonus block is the canonical height for that TX. - canonicalBlockMainnetHeights = []uint64{ - 102928, 103035, 103038, 103114, 103193, - 103234, 103338, 103444, 103480, 103491, - 103513, 103533, 103535, 103538, 103541, - 103546, 103571, 103572, 103619, - 103287, 103624, 103591, - } + + //go:embed bonus_blocks.json + mainnetBonusBlocksJson []byte + + // mainnetBonusBlocksParsed is a map of bonus block numbers to the parsed + // data. These blocks are hardcoded so nodes that do not have these blocks + // can add their atomic operations to the atomic trie so all nodes on have a + // canonical atomic trie. + // Initially, bonus blocks were not indexed into the atomic trie. However, a + // regression caused some nodes to index these blocks. + mainnetBonusBlocksParsed map[uint64]*types.Block = make(map[uint64]*types.Block) errMissingUTXOs = errors.New("missing UTXOs") ) @@ -103,6 +119,30 @@ func init() { } bonusBlockMainnetHeights[height] = blkID } + + var rlpMap map[uint64]string + err := json.Unmarshal(mainnetBonusBlocksJson, &rlpMap) + if err != nil { + panic(err) + } + for height, rlpHex := range rlpMap { + expectedHash, ok := bonusBlockMainnetHeights[height] + if !ok { + panic(fmt.Sprintf("missing bonus block at height %d", height)) + } + var ethBlock types.Block + if err := rlp.DecodeBytes(common.Hex2Bytes(rlpHex), ðBlock); err != nil { + panic(fmt.Sprintf("failed to decode bonus block at height %d: %s", height, err)) + } + if ids.ID(ethBlock.Hash()) != expectedHash { + panic(fmt.Sprintf("block ID mismatch at (%s != %s)", ids.ID(ethBlock.Hash()), expectedHash)) + } + + mainnetBonusBlocksParsed[height] = ðBlock + } + if len(mainnetBonusBlocksParsed) != len(bonusBlockMainnetHeights) { + panic("mismatched bonus block heights") + } } // Block implements the snowman.Block interface @@ -116,7 +156,7 @@ type Block struct { // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { - isApricotPhase5 := vm.chainConfig.IsApricotPhase5(new(big.Int).SetUint64(ethBlock.Time())) + isApricotPhase5 := vm.chainConfig.IsApricotPhase5(ethBlock.Time()) atomicTxs, err := ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, vm.codec) if err != nil { return nil, err @@ -143,9 +183,21 @@ func (b *Block) Accept(context.Context) error { b.status = choices.Accepted log.Debug(fmt.Sprintf("Accepting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) + + // Call Accept for relevant precompile logs. Note we do this prior to + // calling Accept on the blockChain so any side effects (eg warp signatures) + // take place before the accepted log is emitted to subscribers. Use of the + // sharedMemoryWriter ensures shared memory requests generated by + // precompiles are committed atomically with the vm's lastAcceptedKey. + rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + sharedMemoryWriter := NewSharedMemoryWriter() + if err := b.handlePrecompileAccept(rules, sharedMemoryWriter); err != nil { + return err + } if err := vm.blockChain.Accept(b.ethBlock); err != nil { return fmt.Errorf("chain could not accept %s: %w", b.ID(), err) } + if err := vm.acceptedBlockDB.Put(lastAcceptedKey, b.id[:]); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } @@ -162,11 +214,54 @@ func (b *Block) Accept(context.Context) error { // should never occur since [b] must be verified before calling Accept return err } - commitBatch, err := b.vm.db.CommitBatch() + // Get pending operations on the vm's versionDB so we can apply them atomically + // with the shared memory requests. + vdbBatch, err := b.vm.db.CommitBatch() if err != nil { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } - return atomicState.Accept(commitBatch) + + // Apply any shared memory requests that accumulated from processing the logs + // of the accepted block (generated by precompiles) atomically with other pending + // changes to the vm's versionDB. + return atomicState.Accept(vdbBatch, sharedMemoryWriter.requests) +} + +// handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements +// contract.Accepter +// This function assumes that the Accept function will ONLY operate on state maintained in the VM's versiondb. +// This ensures that any DB operations are performed atomically with marking the block as accepted. +func (b *Block) handlePrecompileAccept(rules params.Rules, sharedMemoryWriter *sharedMemoryWriter) error { + // Short circuit early if there are no precompile accepters to execute + if len(rules.AccepterPrecompiles) == 0 { + return nil + } + + // Read receipts from disk + receipts := rawdb.ReadReceipts(b.vm.chaindb, b.ethBlock.Hash(), b.ethBlock.NumberU64(), b.ethBlock.Time(), b.vm.chainConfig) + // If there are no receipts, ReadReceipts may be nil, so we check the length and confirm the ReceiptHash + // is empty to ensure that missing receipts results in an error on accept. + if len(receipts) == 0 && b.ethBlock.ReceiptHash() != types.EmptyRootHash { + return fmt.Errorf("failed to fetch receipts for accepted block with non-empty root hash (%s) (Block: %s, Height: %d)", b.ethBlock.ReceiptHash(), b.ethBlock.Hash(), b.ethBlock.NumberU64()) + } + acceptCtx := &precompileconfig.AcceptContext{ + SnowCtx: b.vm.ctx, + SharedMemory: sharedMemoryWriter, + Warp: b.vm.warpBackend, + } + for _, receipt := range receipts { + for logIdx, log := range receipt.Logs { + accepter, ok := rules.AccepterPrecompiles[log.Address] + if !ok { + continue + } + if err := accepter.Accept(acceptCtx, log.BlockHash, log.BlockNumber, log.TxHash, logIdx, log.Topics, log.Data); err != nil { + return err + } + } + } + + return nil } // Reject implements the snowman.Block interface @@ -176,7 +271,7 @@ func (b *Block) Reject(context.Context) error { log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) for _, tx := range b.atomicTxs { b.vm.mempool.RemoveTx(tx) - if err := b.vm.issueTx(tx, false /* set local to false when re-issuing */); err != nil { + if err := b.vm.mempool.AddTx(tx); err != nil { log.Debug("Failed to re-issue transaction in rejected block", "txID", tx.ID(), "err", err) } } @@ -222,16 +317,58 @@ func (b *Block) syntacticVerify() error { } header := b.ethBlock.Header() - rules := b.vm.chainConfig.AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time)) + rules := b.vm.chainConfig.AvalancheRules(header.Number, header.Time) return b.vm.syntacticBlockValidator.SyntacticVerify(b, rules) } // Verify implements the snowman.Block interface func (b *Block) Verify(context.Context) error { - return b.verify(true) + return b.verify(&precompileconfig.PredicateContext{ + SnowCtx: b.vm.ctx, + ProposerVMBlockCtx: nil, + }, true) } -func (b *Block) verify(writes bool) error { +// ShouldVerifyWithContext implements the block.WithVerifyContext interface +func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { + predicates := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters + // Short circuit early if there are no predicates to verify + if len(predicates) == 0 { + return false, nil + } + + // Check if any of the transactions in the block specify a precompile that enforces a predicate, which requires + // the ProposerVMBlockCtx. + for _, tx := range b.ethBlock.Transactions() { + for _, accessTuple := range tx.AccessList() { + if _, ok := predicates[accessTuple.Address]; ok { + log.Debug("Block verification requires proposerVM context", "block", b.ID(), "height", b.Height()) + return true, nil + } + } + } + + log.Debug("Block verification does not require proposerVM context", "block", b.ID(), "height", b.Height()) + return false, nil +} + +// VerifyWithContext implements the block.WithVerifyContext interface +func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { + return b.verify(&precompileconfig.PredicateContext{ + SnowCtx: b.vm.ctx, + ProposerVMBlockCtx: proposerVMBlockCtx, + }, true) +} + +// Verify the block is valid. +// Enforces that the predicates are valid within [predicateContext]. +// Writes the block details to disk and the state to the trie manager iff writes=true. +func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { + if predicateContext.ProposerVMBlockCtx != nil { + log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) + } else { + log.Debug("Verifying block without context", "block", b.ID(), "height", b.Height()) + } if err := b.syntacticVerify(); err != nil { return fmt.Errorf("syntactic block verification failed: %w", err) } @@ -241,6 +378,25 @@ func (b *Block) verify(writes bool) error { return err } + // Only enforce predicates if the chain has already bootstrapped. + // If the chain is still bootstrapping, we can assume that all blocks we are verifying have + // been accepted by the network (so the predicate was validated by the network when the + // block was originally verified). + if b.vm.bootstrapped { + if err := b.verifyPredicates(predicateContext); err != nil { + return fmt.Errorf("failed to verify predicates: %w", err) + } + } + + // The engine may call VerifyWithContext multiple times on the same block with different contexts. + // Since the engine will only call Accept/Reject once, we should only call InsertBlockManual once. + // Additionally, if a block is already in processing, then it has already passed verification and + // at this point we have checked the predicates are still valid in the different context so we + // can return nil. + if b.vm.State.IsProcessing(b.id) { + return nil + } + err := b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) if err != nil || !writes { // if an error occurred inserting the block into the chain @@ -253,6 +409,41 @@ func (b *Block) verify(writes bool) error { return err } +// verifyPredicates verifies the predicates in the block are valid according to predicateContext. +func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { + rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + + switch { + case !rules.IsDurango && rules.PredicatersExist(): + return errors.New("cannot enable predicates before Durango activation") + case !rules.IsDurango: + return nil + } + + predicateResults := predicate.NewResults() + for _, tx := range b.ethBlock.Transactions() { + results, err := core.CheckPredicates(rules, predicateContext, tx) + if err != nil { + return err + } + predicateResults.SetTxResults(tx.Hash(), results) + } + // TODO: document required gas constraints to ensure marshalling predicate results does not error + predicateResultsBytes, err := predicateResults.Bytes() + if err != nil { + return fmt.Errorf("failed to marshal predicate results: %w", err) + } + extraData := b.ethBlock.Extra() + headerPredicateResultsBytes, ok := predicate.GetPredicateResultBytes(extraData) + if !ok { + return fmt.Errorf("failed to find predicate results in extra data: %x", extraData) + } + if !bytes.Equal(headerPredicateResultsBytes, predicateResultsBytes) { + return fmt.Errorf("%w (remote: %x local: %x)", errInvalidHeaderPredicateResults, headerPredicateResultsBytes, predicateResultsBytes) + } + return nil +} + // verifyUTXOsPresent returns an error if any of the atomic transactions name UTXOs that // are not present in shared memory. func (b *Block) verifyUTXOsPresent() error { diff --git a/coreth/plugin/evm/block_builder.go b/coreth/plugin/evm/block_builder.go index 381c3e7a..522c911a 100644 --- a/coreth/plugin/evm/block_builder.go +++ b/coreth/plugin/evm/block_builder.go @@ -8,25 +8,16 @@ import ( "time" "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" "github.com/ava-labs/avalanchego/snow" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/coreth/core" "github.com/ethereum/go-ethereum/log" ) const ( - // waitBlockTime is the amount of time to wait for BuildBlock to be - // called by the engine before deciding whether or not to gossip the - // transaction that triggered the PendingTxs message to the engine. - // - // This is done to reduce contention in the network when there is no - // preferred producer. If we did not wait here, we may gossip a new - // transaction to a peer while building a block that will conflict with - // whatever the peer makes. - waitBlockTime = 100 * time.Millisecond - // Minimum amount of time to wait after building a block before attempting to build a block // a second time without changing the contents of the mempool. minBlockBuildingRetryDelay = 500 * time.Millisecond @@ -36,7 +27,7 @@ type blockBuilder struct { ctx *snow.Context chainConfig *params.ChainConfig - txPool *core.TxPool + txPool *txpool.TxPool mempool *Mempool gossiper Gossiper @@ -111,7 +102,7 @@ func (b *blockBuilder) handleGenerateBlock() { // needToBuild returns true if there are outstanding transactions to be issued // into a block. func (b *blockBuilder) needToBuild() bool { - size := b.txPool.PendingSize() + size := b.txPool.PendingSize(true) return size > 0 || b.mempool.Len() > 0 } @@ -168,9 +159,6 @@ func (b *blockBuilder) awaitSubmittedTxs() { b.signalTxsReady() if b.gossiper != nil && len(ethTxsEvent.Txs) > 0 { - // Give time for this node to build a block before attempting to - // gossip - time.Sleep(waitBlockTime) // [GossipEthTxs] will block unless [gossiper.ethTxsToGossipChan] (an // unbuffered channel) is listened on if err := b.gossiper.GossipEthTxs(ethTxsEvent.Txs); err != nil { @@ -186,9 +174,6 @@ func (b *blockBuilder) awaitSubmittedTxs() { newTxs := b.mempool.GetNewTxs() if b.gossiper != nil && len(newTxs) > 0 { - // Give time for this node to build a block before attempting to - // gossip - time.Sleep(waitBlockTime) if err := b.gossiper.GossipAtomicTxs(newTxs); err != nil { log.Warn( "failed to gossip new atomic transactions", diff --git a/coreth/plugin/evm/block_builder_test.go b/coreth/plugin/evm/block_builder_test.go deleted file mode 100644 index ce65c0c0..00000000 --- a/coreth/plugin/evm/block_builder_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "sync" - "testing" - "time" - - "github.com/ava-labs/coreth/params" - - "github.com/ava-labs/avalanchego/snow" -) - -func TestBlockBuilderShutsDown(t *testing.T) { - shutdownChan := make(chan struct{}) - wg := &sync.WaitGroup{} - builder := &blockBuilder{ - ctx: snow.DefaultContextTest(), - chainConfig: params.TestChainConfig, - shutdownChan: shutdownChan, - shutdownWg: wg, - } - - builder.handleBlockBuilding() - // Close [shutdownChan] and ensure that the wait group finishes in a reasonable - // amount of time. - close(shutdownChan) - attemptAwait(t, wg, 5*time.Second) -} - -func TestBlockBuilderSkipsTimerInitialization(t *testing.T) { - shutdownChan := make(chan struct{}) - wg := &sync.WaitGroup{} - builder := &blockBuilder{ - ctx: snow.DefaultContextTest(), - chainConfig: params.TestChainConfig, - shutdownChan: shutdownChan, - shutdownWg: wg, - } - - builder.handleBlockBuilding() - - if builder.buildBlockTimer == nil { - t.Fatal("expected block timer to be non-nil") - } - - // The wait group should finish immediately since no goroutine - // should be created when all prices should be set from the start - attemptAwait(t, wg, time.Millisecond) -} diff --git a/coreth/plugin/evm/block_verification.go b/coreth/plugin/evm/block_verification.go index a868225f..6847d6f5 100644 --- a/coreth/plugin/evm/block_verification.go +++ b/coreth/plugin/evm/block_verification.go @@ -4,6 +4,7 @@ package evm import ( + "errors" "fmt" "math/big" @@ -84,6 +85,7 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } + // Perform block and header sanity checks if !ethHeader.Number.IsUint64() { return fmt.Errorf("invalid block number: %v", ethHeader.Number) } @@ -91,8 +93,12 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) } if ethHeader.Nonce.Uint64() != 0 { - return fmt.Errorf("invalid block nonce: %v", ethHeader.Nonce) + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) } + if ethHeader.MixDigest != (common.Hash{}) { return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) } @@ -122,6 +128,13 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { ) } + } else if rules.IsApricotPhase1 { + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d after apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } } } else { if rules.IsApricotPhase1 { @@ -136,13 +149,20 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } // Check that the size of the header's Extra data field is correct for [rules]. - headerExtraDataSize := uint64(len(ethHeader.Extra)) + headerExtraDataSize := len(ethHeader.Extra) switch { + case rules.IsDurango: + if headerExtraDataSize < params.DynamicFeeExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be len >= %d but got %d", + params.DynamicFeeExtraDataSize, len(ethHeader.Extra), + ) + } case rules.IsApricotPhase3: - if headerExtraDataSize != params.ApricotPhase3ExtraDataSize { + if headerExtraDataSize != params.DynamicFeeExtraDataSize { return fmt.Errorf( - "expected header ExtraData to be %d but got %d", - params.ApricotPhase3ExtraDataSize, headerExtraDataSize, + "expected header ExtraData to be len %d but got %d", + params.DynamicFeeExtraDataSize, headerExtraDataSize, ) } case rules.IsApricotPhase1: @@ -153,7 +173,7 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { ) } default: - if headerExtraDataSize > params.MaximumExtraDataSize { + if uint64(headerExtraDataSize) > params.MaximumExtraDataSize { return fmt.Errorf( "expected header ExtraData to be <= %d but got %d", params.MaximumExtraDataSize, headerExtraDataSize, @@ -166,7 +186,7 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } // Check that the tx hash in the header matches the body - txsHash := types.DeriveSha(b.ethBlock.Transactions(), new(trie.Trie)) + txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) if txsHash != ethHeader.TxHash { return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) } @@ -269,5 +289,13 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } + // Verify the existence / non-existence of excessDataGas + if rules.IsCancun && ethHeader.ExcessDataGas == nil { + return errors.New("missing excessDataGas") + } + if !rules.IsCancun && ethHeader.ExcessDataGas != nil { + return fmt.Errorf("invalid excessDataGas: have %d, expected nil", ethHeader.ExcessDataGas) + } + return nil } diff --git a/coreth/plugin/evm/bonus_blocks.json b/coreth/plugin/evm/bonus_blocks.json new file mode 100644 index 00000000..85a9ce89 --- /dev/null +++ b/coreth/plugin/evm/bonus_blocks.json @@ -0,0 +1,59 @@ +{ + "102972": "f9038ef90252a0c74ee094c31c5b32bbeec1c11121f24aa7d8b8a2619decccfd50fd81acb40f23a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0429317f06493a5b62345b352dc89886349be1516fabb5c7ced0c89e525b5db2ca056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301923c837a120080846024074bb839d883010916846765746888676f312e31352e35856c696e75786493289b101f4e5351891648eed78102ce04454e3c76e2add98359008418dd5aa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013b2118b491af375375e9eadee2de46c97ff6feae4f668731db688d607220caa70000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000014feaaf65a52f8888d9e1feedd98ddee6ba3a792e0000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000013e810da674b8d93e36edfe860bdb7173ebd6590b69f9eb64668fcb002ac482bd087e6148cb23d60848fd1fb849a8ef6feafff82ee4bbd67e21e5b118f899787700", + "103105": "f9038ef90252a0513e505367361711aca9cdecdd9230d66e21b358916a002ef9d4f68ea99a278da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0fa9462076426eabbd24888f52f0f779fdfafc77f243b2a513460ce5f43389c19a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830192c1837a1200808460240a49b839d883010916846765746888676f312e31352e35856c696e757887c4816af18045303899777b8c3b7e645cbc3ec0ac67f32e690e78361ec2a8c7a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000019deb549fc00b363feb9bce660a8dd38e8bf952a8579d38815ee1999b1c6a55df0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000842139d00000000100000000000000017280dc9990d02cb3f19769bd00a7d56b1a89de6a00000000842139d021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000165dee5ddda561b966e23cc0a809e179718bb1e70557adb7fe3b7b5af75da17d41d533ee6ac393757a24bf52c19bed7475b0b4ee0bf152bc71d42e568dc52ceca01", + "103143": "f9038ef90252a0a642e1966e72099899dcf3eda04acd8b8955c6842a615844548558a8bcd5f1d1a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a006d4b510e5341d9510162cb81e37dbbe8085c8492d230a1b6fdc70b63c8575b5a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830192e7837a1200808460240b6bb839d883010916846765746888676f312e31352e35856c696e75784860cf90de8a10a46cd2be1b50babd42b67b43ec05f73b6cd2876ad2c0921f50a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000019deb549fc00b363feb9bce660a8dd38e8bf952a8579d38815ee1999b1c6a55df0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000842139d00000000100000000000000017280dc9990d02cb3f19769bd00a7d56b1a89de6a00000000842139d021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000165dee5ddda561b966e23cc0a809e179718bb1e70557adb7fe3b7b5af75da17d41d533ee6ac393757a24bf52c19bed7475b0b4ee0bf152bc71d42e568dc52ceca01", + "103183": "f9038ef90252a00911cb3d5e4e070f361510acbebc2ed1110231e9a614a604e2e5e06e57d8fbc3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a05bc744c960fef22f67d6961ec6b78105401aec6946a174dfaf822d0962f3280ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301930f837a1200808460240cb8b839d883010916846765746888676f312e31352e35856c696e7578f92d1df32fb3cfe8dc1ff561cf50adfc7ccf74eaff9ec6c65a83b07a9a211f76a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001e7a32122bb09e70840529012e98983201f950327c0956b24df03fb8a49673b8f0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000766f36c000000001000000000000000112c9f2c04673b4920f8591eb3d161e4fab9cea4400000000766f36c021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001fae39f1db76d1eee3cb63c6233b4456bea63858906c0120ffff7b42e34faf9be0402a1d4ed205120f93592fd09f0992e7cc4f67ab98be7d05d16762a42d78e6f01", + "103197": "f9038ef90252a04c667e7d1e720742a74bb95fc6df73fb157c7b3570ba97eddfc3708ad846bd71a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0328f1bd8d86f31d3f3429d86604c6866b99e55da124e07c178bae5d961c33d19a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301931d837a1200808460240d08b839d883010916846765746888676f312e31352e35856c696e75789d976d0ab20c29126bad2ef1e691c900a92950bbf46e803e19331379ebfeb860a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000193278e877a4df0a9b9acb9fce5131d257256a5c38091bb6080538cd2e66921f90000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000473a40c00000000100000000000000016847030511f7f93716659ab560bd9dc15918947500000000473a40c021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001c7e08e0addec2636f1bc6554e3f61593a0dd560c8296f9f187cdfaacd1f4e3b25d0c35787fc3cfe25ee2a2b50a4ed44cf852acb5d8acfb629acff6032681c63d00", + "103203": "f9038ef90252a0bd7a542da0830f94cb0fec58d73549eab3707cbc4b0fc72a08712d6b8d4bb335a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0f675a0c150f35f58d311c0f8f3c08ea13edf89f6db0c34aa5dacec72cfb6e1d7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019323837a1200808460240d1db839d883010916846765746888676f312e31352e35856c696e75784296d76298666d75f9838d4ccab5801c096e03d787955dd28582adcb1e335704a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103208": "f9038ef90252a0a2fda7cdc8b17dd75c5c80617c065081f6616fdc7eb0967cdd753b7fb4081d5aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0daf621b199444bc4e863979c7214e1da42d43272ecaa078667894b7fc3ab4eb9a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019328837a1200808460240d4db839d883010916846765746888676f312e31352e35856c696e7578643a244fc51171894f5f4b264c39dd2c93ec4d984547e75f35c62e890dfde9b7a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000193278e877a4df0a9b9acb9fce5131d257256a5c38091bb6080538cd2e66921f90000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000473a40c00000000100000000000000016847030511f7f93716659ab560bd9dc15918947500000000473a40c021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001c7e08e0addec2636f1bc6554e3f61593a0dd560c8296f9f187cdfaacd1f4e3b25d0c35787fc3cfe25ee2a2b50a4ed44cf852acb5d8acfb629acff6032681c63d00", + "103209": "f9038ef90252a05601f43df502887b06455cbcebe32a1b905485a878550accf9b6d9dd3a1e1cf2a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a06608f6e0caafe481010fb2c230530dc12834757992a7cec8bf40708a35c6a173a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019329837a1200808460240d50b839d883010916846765746888676f312e31352e35856c696e757854f141b17b41a6e4dd4e5309f6890e18662bd797da637188a399cbe0a32b2709a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000019deb549fc00b363feb9bce660a8dd38e8bf952a8579d38815ee1999b1c6a55df0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000842139d00000000100000000000000017280dc9990d02cb3f19769bd00a7d56b1a89de6a00000000842139d021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000165dee5ddda561b966e23cc0a809e179718bb1e70557adb7fe3b7b5af75da17d41d533ee6ac393757a24bf52c19bed7475b0b4ee0bf152bc71d42e568dc52ceca01", + "103259": "f9038ef90252a07f58dbf7f272b4f901f9a67f8578b9d219a4f3990cf9d3c177ff666006677a5ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a05c18537dcde66c462fae0e82ddb6f26302f342d301ce556f4f260de2ba7a5246a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301935b837a1200808460240ed0b839d883010916846765746888676f312e31352e35856c696e757818a5e0af611f0c5e68f6d7ec115955f3f7c924e061614dc723ee751fea825fb1a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000019deb549fc00b363feb9bce660a8dd38e8bf952a8579d38815ee1999b1c6a55df0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000842139d00000000100000000000000017280dc9990d02cb3f19769bd00a7d56b1a89de6a00000000842139d021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000165dee5ddda561b966e23cc0a809e179718bb1e70557adb7fe3b7b5af75da17d41d533ee6ac393757a24bf52c19bed7475b0b4ee0bf152bc71d42e568dc52ceca01", + "103261": "f9046bf90252a0f2edb380a6bd63868efdb5e90a79f7ec4db05a931901fceecb5e41b1c92d8bfea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a091aab77fca03b8edfdf9f8899280236067478315bb46e6a1940950d38bb6c72da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301935d837a1200808460240ed9b839d883010916846765746888676f312e31352e35856c696e757868a4b00c5d80c9928a74f44e803a727afaa4fd42a9074dbc5060d62cc0985847a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000026fdbfb3f106d90c93a0fe870d9bf7a7dfb834652ac98ef023494267da03dd3000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc93800000000100000000d7293778849781e6ddca7b0ea3e62c13e0738c27d1fa844e54775467fba908440000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc9380000000010000000000000002f1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dfff1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c580000000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c5800", + "103266": "f9038ef90252a0c153c78c9e1122cbf09f865dc46bdf5fac069f7a2b0160544d18c0e1dfaa02dfa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0c28a2827aec4f07a50e05327be8aba6fbda8c03774456f6d76930f6dee12fad7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019362837a1200808460240ef2b839d883010916846765746888676f312e31352e35856c696e7578e0a6549610da7eae126f670d52c471e90702a69fd7b037833156c4733b104114a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000019deb549fc00b363feb9bce660a8dd38e8bf952a8579d38815ee1999b1c6a55df0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000842139d00000000100000000000000017280dc9990d02cb3f19769bd00a7d56b1a89de6a00000000842139d021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000165dee5ddda561b966e23cc0a809e179718bb1e70557adb7fe3b7b5af75da17d41d533ee6ac393757a24bf52c19bed7475b0b4ee0bf152bc71d42e568dc52ceca01", + "103287": "f9038ef90252a04008dbc7200d2ccec8ca6ae6d0ab10dad8f45c1afecea73d5520a4f38688c4eda01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a03e2c6954e145c25b793f639ba9a165e92d05091533752f31133c2e0ddb7c1214a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019377837a1200808460240fa9b839d883010916846765746888676f312e31352e35856c696e7578542dd250b5cc10604111577d6b66c8cf14ae4e9573679ee3299046fa2c1d5184a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001d7293778849781e6ddca7b0ea3e62c13e0738c27d1fa844e54775467fba908440000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc9380000000010000000000000001f1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000013ce5ec2b57265abb6fe41b0b4ec918a2e3ffbcc22e58c66571472037d12c37d15f21cdaa28c7e2313a3a3a82c096537a819cc140c6c638e1728c03b9112412d601", + "103339": "f9046bf90252a0c71c9be75cc061e9412a3da3d16e23a0256d9c2205d3473939421126383e95eea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a096db4bfd3e0198405dad9354c1873d724575204d9edec57c430cec9f4881bb10a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830193ab837a1200808460241156b839d883010916846765746888676f312e31352e35856c696e757825ee5f976d9a915c537013161afb10a4900a9fe66e2e13e00df197ca34933100a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000026fdbfb3f106d90c93a0fe870d9bf7a7dfb834652ac98ef023494267da03dd3000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc93800000000100000000d7293778849781e6ddca7b0ea3e62c13e0738c27d1fa844e54775467fba908440000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc9380000000010000000000000002f1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dfff1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c580000000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c5800", + "103346": "f9038ef90252a06191d0ce38e9f3e49ebce2fef480c4d5a2a4e1c0801abbab97906302dd5c7a4ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0e4a7deed2779bd933d35b535ff3c22537ba465f3d066b9a538dcc20b553b9f14a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830193b2837a12008084602411a9b839d883010916846765746888676f312e31352e35856c696e7578eb224de136de36472c04604c0496aef34c7b81ed01f59e716cc1c0a6567cbe8fa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103350": "f9046bf90252a0a9cc1323fead79be07f3272a5e15ee6bb8b58bf5a3ebcab579c7b30a94bbd3bca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a08fb6e09bfe543c559d518bf73a0d5b91de15bcde4b5fda04da38e2797e49c98ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830193b6837a12008084602411e3b839d883010916846765746888676f312e31352e35856c696e7578777eeed570c4f59dd005778d6204a3d69769809ed546dc03013e40801e940b5da00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000026fdbfb3f106d90c93a0fe870d9bf7a7dfb834652ac98ef023494267da03dd3000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc93800000000100000000d7293778849781e6ddca7b0ea3e62c13e0738c27d1fa844e54775467fba908440000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc9380000000010000000000000002f1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dfff1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c580000000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c5800", + "103358": "f9046bf90252a0dfb66e9d79c501c38da13c7e5f731046d19bd8590007c853c4a48ab6201c4cbfa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a011e8cd5dceaee6d6885b68409d34f6573afd36bc67575378fa9e8a06f4086f4ba056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830193be837a120080846024123bb839d883010916846765746888676f312e31352e35856c696e7578c76542a0a122686805bc189d1fe54c67ec547e7b4eebb90ce8320828f3c31d2ba00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000026fdbfb3f106d90c93a0fe870d9bf7a7dfb834652ac98ef023494267da03dd3000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc93800000000100000000d7293778849781e6ddca7b0ea3e62c13e0738c27d1fa844e54775467fba908440000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000014dc9380000000010000000000000002f1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dfff1c2a7833a74ae5797a9f0aae4592d21f8cdd1f20000000014dc938021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c580000000009000000010c8c4cad5ddda1ca3d3ace0744e93d193fccf1725195e0a5e954b0751e414912495b5727fa5422fd5c3a43050520d98283369d4a10ab6a46d51e80f98c5c6c5800", + "103437": "f9038ef90252a06abfdbe56b6255282759a7d576943d085a793437fcb2d7bb17249349a3236211a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0781a4e607a3ea427b94b1038a90a1898ca89fb17b2e2207fab44cbe1bd6adb6ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301940d837a12008084602416cbb839d883010916846765746888676f312e31352e35856c696e7578795753f1703e0647bc2081825bb0dd48d5df22ac5175e77b08eed3a543faa9dfa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000010bf0a6168c7299d99c8ca3b0c9a7a80d0557b96e2274ae3e3338c43a1227272f0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002eafd1c580000000100000000000000018600a3e3403ad0d9456bc9c7cf7922b6dc1ac9b600000002eafd1c5821e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001745dfcd3d26aa92edb94592294ff21eeee8f36ef86f33d74d7fa11d6121a89ed1f8104bfb26942642cc9c52f9eece2b76c10d6aa6074d9ca64cbeca78c20b49f00", + "103472": "f9038ef90252a0d9adcb85ea6d4eaa9856d3a508ed981abfb3dd56fc407f4f25fd0799c1c76996a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a06e63441952502f2f1730e5ddb707f0c191dda413fbdd8ce2bf4c6c0d4330e20fa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019430837a12008084602418beb839d883010916846765746888676f312e31352e35856c696e757891cc6335d2be55ed65b34e1c04f537b12370ff6847e277a49cb905bc8ab59dc7a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103478": "f9038ef90252a0e1884ec4bf719b48f54f1ba33ecd297c87a08924f0b9f373f70a654ed2d1d5d5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a08b67c01af303de5c661ccd473a121f3c9b022da75d9139ba3ce128e9c0b67705a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019436837a12008084602418e0b839d883010916846765746888676f312e31352e35856c696e7578d6639f3bf0f177052c62ff07796c6e9169df166d7a365bb6612669123747a143a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001d39342f77daf933c1909e4da0b04384974f2708841acfd831764a068b0a9eda10000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000003ec381400000000100000000000000013fa223ec31d13ebeb6d514ba0bb7570f32f9b408000000003ec3814021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001097fea7f7eee4808a2097caa93c0ba8d0f360cddbf6246e74ff8bd5c522aba6c0374a93ea64aca703bc16e3a93e9519d01362d2178b2631eccbc9b5dd66ca15600", + "103493": "f9038ef90252a08ece295a4f4fa38c72e3807ec8130aed38342711765b097615aa7edac621aef0a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0082569e3dcec35aae869262cafe3a9d5ed097365d7af57a6a02ec2ab05cf259da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019445837a1200808460241978b839d883010916846765746888676f312e31352e35856c696e7578cf7a6e008f9ec5d252a71c0bff16fe7fbee3be888c2c2d1b0b149a7e2c17f7caa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001f0211b9325134297437827fe9adc9c2b146880494d1cafb1b3a7cffb43e3111a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000006fb6c91000000000100000000000000018d125687f0a9ea05e48cca928031bace3557767a00000006fb6c910021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001849be52f3a04ca0773a125ddf896c9c015d21c4278bb2d2b4132176d9fd36f1c3c4ab2492dc2a3e10cb9957ede492b1a10fd0e2c63de12aaa2952f3e8aae0c3b01", + "103514": "f9038ef90252a0e9137ca267ed5c20158cf401943efdfe9b9e5470bf865b7e6992829a75222db3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0c651ca04575701abe6388b09035e90fa21e1f380457e85e82695973922a357e5a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301945a837a1200808460241adeb839d883010916846765746888676f312e31352e35856c696e7578f665575ed6570793989476baf7b247db183ffe99f5eeac4c398f8ab02a46294da00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103536": "f9038ef90252a014782480276c156c0ea08ee6b39f903b9b8521fce9ad150311e010b692c35024a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0ee3691656bc7e9da7c9b091a20f87f53ae54a67bdb86207742434ae68b335ce6a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019470837a1200808460241bd1b839d883010916846765746888676f312e31352e35856c696e7578d0b4f387ee8fa5df7ca5b96c5a2aff4e1a702be6daa5c20f4c77e9263b233f67a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103545": "f9038ef90252a01fb16a8ce166f8cecdfc9c768dba81d593d523849486d3c33a5790cf6b882b8fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a02e4f6358d003dc69348351953fa0dcc88cfd2766a71c76d02955b7184aa88cbda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019479837a1200808460241c4bb839d883010916846765746888676f312e31352e35856c696e7578ea958560f491f6fd26945789cfa74caf94fd7f825f4839ee13afa7233f07016ea00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103547": "f9038ef90252a08591d5623f68656af145a611a57f134dfae52789542631548265617d3d2bca85a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0af0cab04723fd525a302a737e154fad796735648a9fd7838e38d55e5028ed4a0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301947b837a1200808460241c6ab839d883010916846765746888676f312e31352e35856c696e7578ba06dddce9623f4d7a3d3c6f11f0ac1692f71462ea54f7f989dfa3e5023d8ebda00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001536cfa0cb18494a7f56db284c0045a879863ccc7aa3e4524fc6284078231f2420000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000165a0bc000000000100000000000000010f2114b7a88f320b5f8b16335c35cf08fc794cb40000000165a0bc0021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000014359c4c6bce53952a54c94613bc30507f1e708b27f643591b769e7e73c9d689420dff5c69eb0de0588acf0eb1a1ff40b4685cab7092417a1e7529e9b2d04a6b300", + "103554": "f9038ef90252a0d0f9f9521d07b169cd36b0bebf60d584dc249514160f9bd9210616ea820468d7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a04de5e67dae5381e1c8a74eed02b36b8512b3c1148fa23ed8d83169f06676741da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019482837a1200808460241c8eb839d883010916846765746888676f312e31352e35856c696e7578d9727588dbc846c45c1bd9d0c64f66897236e056c0e151f48b23993fbc28c621a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103555": "f9038ef90252a013fce5dfc44e6acf34d5aa52241d83d44451b0e4a536cc2eb5712259fc79ac3fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a00d52f6869415d282172b54e32c91d0981691f575654454764d1e0c36033ae351a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019483837a1200808460241c9eb839d883010916846765746888676f312e31352e35856c696e7578c5f5540a0f50a5f81edc48d8550a5f14ed8b1d6bb5b622974345eee5d314e3c9a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103559": "f9038ef90252a0a661fc962551d4276affab0421026ea32a0b1f87162b44a910e9b3550a525ad6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a067059f76d8a00b0e68b3d9cb3f902c195c4fab348389b2457dcc85bc97f90437a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019487837a1200808460241cbfb839d883010916846765746888676f312e31352e35856c696e75781f2f14a9027030359dd8313efab1583ce7bda8381a801b8559e0525d090d4a65a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103561": "f9038ef90252a0b56a157744a89979da81d9ebbd0d29cfb44651a399049b2aa8770b336a35fc80a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0d84de957a9386e45f4930c4e0103221de493ef47629c6a8868c5715910097cd8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019489837a1200808460241cc4b839d883010916846765746888676f312e31352e35856c696e75781608617ad9cc93120b4ecd601a83ef13d9a53b03cfff8f98c99c767a0b3d5711a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103563": "f9038ef90252a041d27bcdcdf57c9899da36d2eb3cda8ffc71a9c1cb1bfd5d92300cbde2e886baa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0c431f97e5be97f66666d6c3d62313ccdb495851823d4e4d685b9634df79918c4a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301948b837a1200808460241cecb839d883010916846765746888676f312e31352e35856c696e7578e8fec876892c0ff218a3f1977a6a7d9a81ddb8fa75baf2f0381b8e6b48c98fc2a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001de8bcffc06b236e572194aca5b65bcca1b998b9dad5f3c15b981c812a4ab8e690000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000253640b4000000001000000000000000198961c99d66d6f07283b222dc4d955b4f4fcb0590000000253640b4021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000019a9b70101aa5203ccaa5857bd42707e47c794b348b4836ab2caf43793b88c7bf12a411aa6e95c30657345b46d326b701b01f021d189d67b7bb002578c064186101", + "103564": "f9038ef90252a09957f50cf6a6dc652107c32aef990516fa2870265984d81b476314b74737338da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0da8adbdc3b2acc2da89a9bdeda53fbea07b3c4081f0dc897291306e7e55d3ecba056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301948c837a1200808460241cedb839d883010916846765746888676f312e31352e35856c696e757811060f20bfd1af1592c9f59f8d3b9e7febb6c6a9fddbb8faab0b59766469d6f3a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103569": "f9038ef90252a0c0b61fa5a766988e46511bf733071ef33bc7986b255996afc9808d99335fdeaba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0514cc69e100e5c091daf7a1b70e313453123f2d3b977db81a5549e360d99aabaa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019491837a1200808460241cfdb839d883010916846765746888676f312e31352e35856c696e757853f645a18112556301b84b45a839b5640e5607af66171a678ab82873d6c0db24a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001536cfa0cb18494a7f56db284c0045a879863ccc7aa3e4524fc6284078231f2420000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000165a0bc000000000100000000000000010f2114b7a88f320b5f8b16335c35cf08fc794cb40000000165a0bc0021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000014359c4c6bce53952a54c94613bc30507f1e708b27f643591b769e7e73c9d689420dff5c69eb0de0588acf0eb1a1ff40b4685cab7092417a1e7529e9b2d04a6b300", + "103570": "f9038ef90252a0846666b4289b3947c5b1469d33cb5aa3919f04300425a4dbfb5b542b635d8d3ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a04b18add7a786777bb24ed9de242c4364a3b3ad6b1c0195832a5d1d6009aca1a5a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019492837a1200808460241d0db839d883010916846765746888676f312e31352e35856c696e75785bb0bbb3fee7bd537291a10743bf2718f9ccc1d5be26fa82fcca14513b4c6959a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013d11c512dd5faf9c42f747ddbd74b2d2a026574e0b63378b10529431f6d043c10000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000054e0840000000010000000000000001d241b974a06d64fcae4321c6b9423f62e3b3739900000000054e084021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001d915dfb3bf7fbb6640fbf1a3f30c8f8f0e5b2fa2a9204279f4eb27f1ee848dab5363ece1b1fbc4c31272536eb9a7043e3a6dd6fe2a394f451fac47f503ea776300", + "103575": "f9038ef90252a0d81b9334723134f9d606eebd2ebfc5391c26a038d358d3329963c7a2ed9ef138a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a04dd2470e948f1134e5efc970a8b1a2589516c798e3f4863f67855ded3c0cd043a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019497837a1200808460241d36b839d883010916846765746888676f312e31352e35856c696e7578aa5b5d2b1ed1609fd7c850988dea584195fba21f222ca28fb063f2532bb60540a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103577": "f9038ef90252a01c5bee8463bd836c401a83144a3d48c9749ad09716bd0d20b8bcc60a267f4adca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0b5ce30a72dacb78ef4399c9f9a2c0625a5136d6fe2a6af3262dbed3c1e33480aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000183019499837a1200808460241d52b839d883010916846765746888676f312e31352e35856c696e7578b81e465c51d66936be320e718e10fd702bb0435192515c9d6d764530a3e79697a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000114f1afd76b49e07e57ad1d93e05fc2d03437b21452647e4cbe124d5ce22c3e950000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000195d3cc0000000010000000000000001429b75791e2ca9f7b1ce16b18553b2c3db6d745d00000000195d3cc021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001c6cf346c1db640688fe01c35b6cc3dc3de5c4193b54bcac3bddb8521a6a24213278b7ad73f91b11bbc2618259da70c75dcd875e376d3b80c71d52d53bd6a2b3201", + "103578": "f9038ef90252a076c7a807fb6bd7cb13632e8c8b83f6050ecfffbbb917718eb013dc58a462046ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a081916e411c58f3298676e327519eed09bb8deb5a56218c0ba29392fe080c9d28a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301949a837a1200808460241d56b839d883010916846765746888676f312e31352e35856c696e75784fdcaa7c513127405249f7ebc010b978e870d40660fe5a8a8d31c131d070b329a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001de8bcffc06b236e572194aca5b65bcca1b998b9dad5f3c15b981c812a4ab8e690000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000253640b4000000001000000000000000198961c99d66d6f07283b222dc4d955b4f4fcb0590000000253640b4021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000019a9b70101aa5203ccaa5857bd42707e47c794b348b4836ab2caf43793b88c7bf12a411aa6e95c30657345b46d326b701b01f021d189d67b7bb002578c064186101", + "103582": "f9038ef90252a0129d409ec6bf944a2b8e297ca28110738b14073fa5591ef2a53f1212b13aa372a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a02d1923dbd134fccc2c948e9dc4785518b809f816009ebd6d0b0bfb8d4ba9b012a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018301949e837a1200808460241d7eb839d883010916846765746888676f312e31352e35856c696e7578064661d4095dac500c7fcfbf0eb1d4cb05d2f61008788584ad6ac5f1ab25b7eda00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103587": "f9038ef90252a0f9c812c3b5ff0600817ffb804376c62ce5c4078ed148d4e069441f6ac7290805a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0be3b0517a0776cdbfde1cd23015a13e2ef3028477c1dc6cb220ef822b2c9e4e6a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194a3837a1200808460241dceb839d883010916846765746888676f312e31352e35856c696e7578c782a84b497101a07fd626e199c4a376169de4323a33350164b6aafd5682e655a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000013547216bd5757e5815c212450cccd97f1bdc1e7f9d1d0770cdf9f7f72703ed6a0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000ec60f26aa0000000100000000000000010a99c32affaec0a697d4cb4bf660eeddcf432c210000000ec60f26aa21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000161a837760af37f5f376b4efc05309c7247bb0d1309fd0514f95cfde01e9211b15376eea97d96e95a4a9265ced221c94dd1a3804ad88ca93e49ba964f01df7e9d01", + "103590": "f9038ef90252a0f3b9c52c56a0aa90f995459cbe932e4a0377510daf7460844c204e1b1e995319a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a003acfeab6cd021d64012138c04678641d0a599b0791324ebeb3f832a6e286d96a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194a6837a1200808460241df7b839d883010916846765746888676f312e31352e35856c696e75787813463ce511c54bb46068ecfc27ff1340ecf3dfd56b3d759956083980a62527a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103591": "f9046bf90252a0dd4e7627b243f66703e26d6d6b40d7a34f7eeaa19602f9b42994d6130a29d216a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0c75cbd13fe7cd7c92f36c92785b1df6272c3bd30112daafc21bc7fc8f815c480a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194a7837a1200808460241e02b839d883010916846765746888676f312e31352e35856c696e75782abc11df3f0e7fe66ad7a27661e0c0c4d11cbf4b2563aed8e4e1ae71cf7a882aa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000025d9b575698319af5c171ddc4ac9a42d3cca8240b7501525160b8e52fe6c57a050000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000001dcd65000000000100000000b6d67f4207669dbc0a968390d24ab63843439bc2ba7cd18c7f941dab53f368190000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000029abfc4d3d0fcc431042656b5c60abf986b69c9180000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff9abfc4d3d0fcc431042656b5c60abf986b69c918000000001dcd650021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc0000000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc00", + "103594": "f9038ef90252a0a3c510ea6731b44947826a6e5184e1b66902dd77faa2c1556548310cbe871392a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a07b2c48ad6bf163275829c5a7a28dac7f5c64da44371955e892ce31baa19350e0a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194aa837a1200808460241e31b839d883010916846765746888676f312e31352e35856c696e757848c7467be7c456e9b126cd0fbc96bbbf657f1d1c6fa1f44ab4f76113f893e118a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001de8bcffc06b236e572194aca5b65bcca1b998b9dad5f3c15b981c812a4ab8e690000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000253640b4000000001000000000000000198961c99d66d6f07283b222dc4d955b4f4fcb0590000000253640b4021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000019a9b70101aa5203ccaa5857bd42707e47c794b348b4836ab2caf43793b88c7bf12a411aa6e95c30657345b46d326b701b01f021d189d67b7bb002578c064186101", + "103597": "f9038ef90252a02bbdd48087012bce43d97c33c6d12376ec11e3cccdda2cdfc673a0804be3c00ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a03696593ff66e2a8ba97fe5c3a3a047391e2dc7df1700ca3d24491d028dfbc965a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194ad837a1200808460241e52b839d883010916846765746888676f312e31352e35856c696e7578c59e3e13029feeec087cc0ee482410aa8e274399a5ad6d89b3bad62f2ae3c5eda00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103598": "f9046bf90252a0aa9bf89ee1705328c25fccc492e4fcfd2943928ac0bc5236d3fd6543f1299b9ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0b0e1a7769affe0e97a6b7dda2810dc38628f7e7b5bfa16186bb78a15b49741b7a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194ae837a1200808460241e56b839d883010916846765746888676f312e31352e35856c696e7578bdbeb5868403b67c3939d76f97d0767cb9c42bef0f401119ea7520055cb66935a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000025d9b575698319af5c171ddc4ac9a42d3cca8240b7501525160b8e52fe6c57a050000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000001dcd65000000000100000000b6d67f4207669dbc0a968390d24ab63843439bc2ba7cd18c7f941dab53f368190000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000029abfc4d3d0fcc431042656b5c60abf986b69c9180000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff9abfc4d3d0fcc431042656b5c60abf986b69c918000000001dcd650021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc0000000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc00", + "103603": "f9038ef90252a064fc5aa958489c9871ffe356e9a207aa803888b7b746b098e9a4d367e4ae09b8a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0531356b4753c45f96c1cf91f9cc5491dd0d36b2914edfc75693f83ee633c3a72a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194b3837a1200808460241e7fb839d883010916846765746888676f312e31352e35856c696e757832420f2f13fa9a6d64ffeca2da8c446d8960d48009424adcf19c957eedebeac0a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000114f1afd76b49e07e57ad1d93e05fc2d03437b21452647e4cbe124d5ce22c3e950000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000000195d3cc0000000010000000000000001429b75791e2ca9f7b1ce16b18553b2c3db6d745d00000000195d3cc021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001c6cf346c1db640688fe01c35b6cc3dc3de5c4193b54bcac3bddb8521a6a24213278b7ad73f91b11bbc2618259da70c75dcd875e376d3b80c71d52d53bd6a2b3201", + "103604": "f9038ef90252a0fc26d2246709cfa7b49286a0ceaeee118777cde644334e19e1b1f3ac0ae30c94a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0de44a3ae89801a43838ae534aca8d26d55fa6f44cf15bb03701ab0a750d9296aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194b4837a1200808460241e88b839d883010916846765746888676f312e31352e35856c696e7578a538ae1f8854a2bcfa50c281cdc3955a555e0895e66003d0a5a770f9e46552a2a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103607": "f9046bf90252a059d1d51f30b0447d113402901319f7c13129aa8b5f0c3fb442e6c96d66191af3a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0bcffae03bdbf93fc350f1830a0b6d2a18a1ca3be00fabe9169ccbf45f7a62742a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194b7837a1200808460241e97b839d883010916846765746888676f312e31352e35856c696e757843116a8aa0e7c07a3b93430e592e160fe8bfdd83f71480d393abd4c2645bdd8da00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000025d9b575698319af5c171ddc4ac9a42d3cca8240b7501525160b8e52fe6c57a050000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000001dcd65000000000100000000b6d67f4207669dbc0a968390d24ab63843439bc2ba7cd18c7f941dab53f368190000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000029abfc4d3d0fcc431042656b5c60abf986b69c9180000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff9abfc4d3d0fcc431042656b5c60abf986b69c918000000001dcd650021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc0000000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc00", + "103612": "f9038ef90252a0a09e0d8dff73950dba0444b4c86c0b498ccd06ffd464bb7b5870e6689d5293e9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a026426ccd3f691f6d02b946fe143a63d222d8298d788178f437c0d53f53e69050a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194bc837a1200808460241ec7b839d883010916846765746888676f312e31352e35856c696e7578b232de94a208032a136001a2d8b921559cdc8deac3b5b2a763611b5b2501a960a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001d32c53c8886a0540795f42e97e9a9ef77dc4f9b7987222ef682c67949654a95c0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000000bd94b70000000010000000000000001fa14b5789c934c74188c13b480aaf16401e3e9c9000000000bd94b7021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000011c8578107eafbf7d54374bd94d15eba78021c79069e877ff5dd06114c27c9e867032573223c5be048e9b5bc0c5042323a0bc507f8d8c7123b3624b4c3750420b00", + "103614": "f9038ef90252a0367d69dfb7e29a21423e93d849145815b4286d589cb38f7a26ea7fd8a41979a8a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0fa56241a99023fcb0fb91243b0b57aa4bec28b7cf66e3cf76e333b66de032757a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194be837a1200808460241ed6b839d883010916846765746888676f312e31352e35856c696e75787286ea4ceef23f221cb9193fcbe055b3c31bf95ce3ceef2c5405611cdd9cdeeea00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103617": "f9046bf90252a0f5bb99d39031841c43f442efcbc5bbca42607a42efb2a7c80a03f5e83b000158a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0af205ecf627d5abb3e785aa92e01b3433e86ef5bceae56503278c9eb68a924dea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194c1837a1200808460241eedb839d883010916846765746888676f312e31352e35856c696e75784c0f2eabb74ce72308aa072785a8cdbfcec42d9c9e2f35de45580922cdd033dda00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000025d9b575698319af5c171ddc4ac9a42d3cca8240b7501525160b8e52fe6c57a050000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000001dcd65000000000100000000b6d67f4207669dbc0a968390d24ab63843439bc2ba7cd18c7f941dab53f368190000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000029abfc4d3d0fcc431042656b5c60abf986b69c9180000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff9abfc4d3d0fcc431042656b5c60abf986b69c918000000001dcd650021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc0000000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc00", + "103618": "f9038ef90252a04cadf984e41223df21cb74c66af1454a953c3a94d58c4ec36a3fcc4dc416aa7da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0d7bd69f787e85383688e0ced62ac51c53e2d8bc71bdde91236fe4232a63ca1c4a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194c2837a1200808460241ef1b839d883010916846765746888676f312e31352e35856c696e75788afd9d5856dbee12919895d2f1fcaf36514ecdb44ed2980c5a413599f3e4cd8da00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000131c7db6b65a3c7fcecbc25f8057c8d54e2d14ce0fa34c657fdbd8a12ffd25e820000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000003b9aca00000000010000000000000001ea25ecc74b7b66cc43b36674ceb170236df66c97000000003b9aca0021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000140888820b9d36864d1c3a81b2b8cf7af9c005e3ed4dd6065c9703ea4dafa7df526cc200e7cad620c5b23de4552fb6177e0d5230be7503e1d36cc56424996413000", + "103621": "f9038ef90252a0732e9d9f354304aa75686efd3eee1f04435eafa935944e84fe40ad6db720d819a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a09d28d8e2428768c8b376d641a783c838f5f711b2ea375dd1940212276c3d38aaa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194c5837a1200808460241effb839d883010916846765746888676f312e31352e35856c696e757884368b3462a223f87facc0978baabf4eb91077401826000074fc229154d13b90a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000017c8254f5d9cfdb36f45df37cd39ea168a7c1c964b367dd66208d4979c5cc6c4e0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000002e9045f740000000010000000000000001e670023a06b5d72c86c7c9b8e9b192d3b06dfdc00000002e9045f74021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000011c208908937c7c7608e9c750d973cb49a1c3f6aecc3cc9674cd42d44035024416f6f5a83504313593fc9a75e4af771c78005b9f5b8c2949b9d419d4692ba6b8800", + "103623": "f9038ef90252a0d9541b3c77d3e78e0bc14f13282a6ab59a4a74d0ac7956b83d39ca30e3395e5ca01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0b5208a536f0882fbd336c63328482dc441c83ce39620c17103ce0190fc5a2ad3a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194c7837a1200808460241f12b839d883010916846765746888676f312e31352e35856c696e7578267eda5bc792e05b4b5ef308320bebb407c0985725fe796cb9a3bf9b424c927fa00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800", + "103624": "f9046bf90252a0f357c1d275f0275a48081546c30d58a6b261169843f1b555a8c666a24dabfc75a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a05693dd5a28cf783d2486cf24f9b8f537eb085a4b6db5f7b694c13f0aa0cca91fa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194c8837a1200808460241f17b839d883010916846765746888676f312e31352e35856c696e75781f93c010c1d297b98199232078b1bffa2bbe591b854679ca880437415fb19553a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000002a6e934808ee55b395d63d73cf6aa9467ab9fb43d79c2989d2312996e2d2fe4ea0000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000012a05f2000000000100000000b901fb251a9f6055f7a9f69a437c67907aa3188de21bc1987582e527141063db0000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000003b7c45800000000100000000000000024ddf610d8127567c152dc4f79a2b6c7d1fe6f535000000003b7c458021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff4ddf610d8127567c152dc4f79a2b6c7d1fe6f535000000012a05f20021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000011e4fb0db4e8db43e047fcdd04b23a69fb3a3e0c77da2f1973340825e1ef02d5f4a1c622b94334d440cb91ecefc50295376ac942661eb0dfbf8e4f00aaa66e0050100000009000000011e4fb0db4e8db43e047fcdd04b23a69fb3a3e0c77da2f1973340825e1ef02d5f4a1c622b94334d440cb91ecefc50295376ac942661eb0dfbf8e4f00aaa66e00501", + "103627": "f9038ef90252a014c76a84bd47f925558015c67d272f7ccf87a56651871a50d0d8dd1af7505f71a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0809245be2fa1466ea96d7b7ea9f95f789d0857527d99aaa5f87234b6eebc17caa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194cb837a1200808460241f27b839d883010916846765746888676f312e31352e35856c696e75788fc3100070bc9a0ee5bdad362ed263b2ef7aa14a6d476407646dbe205ec19597a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001ebe4e9d5b951063a6ad72b65595e2735b64a7d1db47d1be9e435427ebc1866ba0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000003af2f140000000010000000000000001dfe3209fdeb3a1720d2ac776c9e1ccaf94a94b74000000003af2f14021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000001000000090000000188548ccb00beb8324448a1f1f404109552716c979136c9ba849c77e72a04fb8d2f840df824ab8acabb14da415029e036fd7970acf4c1f334cf1da5f42706c0af00", + "103628": "f9038ef90252a0748f125a9a32c54159b042358ade94d3c2d82517a8ddb91054199a58a0ad87c7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a0cda4bf8231a6035c21b448aae2cc4c713d0f423134c0f6d3ea7515cf0669eee8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194cc837a1200808460241f3bb839d883010916846765746888676f312e31352e35856c696e75788e992eef88ed698ed1fbc20ebcf7f9ea0a1ddfe584e9eb8f3fc16669c0fe8221a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000018e5dbb04c489663d970aecd90e8c1dc9a74d1e79cedbb361c38c0c025702b2f30000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000007faeeb90000000010000000000000001efa8f53bcd86fdb8268e43c8b4e28ac110073f61000000007faeeb9021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000010000000900000001e6ab0095f5493b6178c13f40260dc8c6f47016a2d9f44877329d0703849bd7455f16c358eafcc8515c75ab66adb600d5d2d76a3bef5825873f7515a8fe21da6901", + "103629": "f9046bf90252a052e292bffa48c5a4456dc92374c25f50d95d9f0a188684b1dbaccc4b1828c344a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a088488162ea5db809bdba130676da3f9fef7bd2b080cb15bca6693bc08a5ff501a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194cd837a1200808460241f3eb839d883010916846765746888676f312e31352e35856c696e75785e3ebc267dd2ff96d35c52c61bdd2409488267d2c7ec51cdcfa923e181f92844a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000002a6e934808ee55b395d63d73cf6aa9467ab9fb43d79c2989d2312996e2d2fe4ea0000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000012a05f2000000000100000000b901fb251a9f6055f7a9f69a437c67907aa3188de21bc1987582e527141063db0000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000003b7c45800000000100000000000000024ddf610d8127567c152dc4f79a2b6c7d1fe6f535000000003b7c458021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff4ddf610d8127567c152dc4f79a2b6c7d1fe6f535000000012a05f20021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000011e4fb0db4e8db43e047fcdd04b23a69fb3a3e0c77da2f1973340825e1ef02d5f4a1c622b94334d440cb91ecefc50295376ac942661eb0dfbf8e4f00aaa66e0050100000009000000011e4fb0db4e8db43e047fcdd04b23a69fb3a3e0c77da2f1973340825e1ef02d5f4a1c622b94334d440cb91ecefc50295376ac942661eb0dfbf8e4f00aaa66e00501", + "103630": "f9046bf90252a044eecf7e5365b28507c51b7689077cc40b9a7f106a58fb78f318a2b5c65b4e87a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a05c96f51ae4ec3bb8408d9412b9cbe99c1e442845f59598ce2354a437b5e6fc80a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194ce837a1200808460241f4bb839d883010916846765746888676f312e31352e35856c696e7578425b276193226a20c8ac9a55100ac646b87df7de510dbe813de37f7a231ad1f2a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90210000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000025d9b575698319af5c171ddc4ac9a42d3cca8240b7501525160b8e52fe6c57a050000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000005000000001dcd65000000000100000000b6d67f4207669dbc0a968390d24ab63843439bc2ba7cd18c7f941dab53f368190000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000005f5e1000000000100000000000000029abfc4d3d0fcc431042656b5c60abf986b69c9180000000005f5e10021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff9abfc4d3d0fcc431042656b5c60abf986b69c918000000001dcd650021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000200000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc0000000009000000018bede31875ec6980b402f60c43e296b2c76f6be711cd2ad099e4db186e732bb122b858522e82ca618c3b244bb238da1de341cacc3576f562acbbeb5706417bfc00", + "103633": "f9038ef90252a0939682ad7f6e77b3437bab1b0bc805082cb9eabf0c608a924b252a44ba0c74ada01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940100000000000000000000000000000000000000a090bcd14d3d802afa881f185f4efff27cc50338ed5b5a097a5ea4c45ad8263d6fa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001830194d1837a1200808460241f60b839d883010916846765746888676f312e31352e35856c696e757876a460a53ae612e88aa58a1b2a40b9544f54df3200531731324e60c92eccbc45a00000000000000000000000000000000000000000000000000000000000000000880000000000000000a00000000000000000000000000000000000000000000000000000000000000000c0c080b90133000000000000000000010427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd652ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b000000014f086e98c63e6d3eef4f7d896ffd2bedf34524c9b2261c37c33a4e71efe329650000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000002540be400000000010000000000000001c1bf05f27cb5cda00c1dfb01c423ccd899a2c48b00000002540be40021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000100000009000000015a1bd3d80a6345f3de5648e9313519d76635210845f72bfc7cba1526f2febb8a17c5603109060f8b224ee3c0e38a1e4f8f3687943a93265b26ef933afdb20fe800" +} \ No newline at end of file diff --git a/coreth/plugin/evm/client.go b/coreth/plugin/evm/client.go index 4e0163a2..0fa46501 100644 --- a/coreth/plugin/evm/client.go +++ b/coreth/plugin/evm/client.go @@ -7,15 +7,16 @@ import ( "context" "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" - - cjson "github.com/ava-labs/avalanchego/utils/json" ) // Interface compliance @@ -23,22 +24,21 @@ var _ Client = (*client)(nil) // Client interface for interacting with EVM [chain] type Client interface { - IssueTx(ctx context.Context, txBytes []byte) (ids.ID, error) - GetAtomicTxStatus(ctx context.Context, txID ids.ID) (Status, error) - GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) - GetAtomicUTXOs(ctx context.Context, addrs []string, sourceChain string, limit uint32, startAddress, startUTXOID string) ([][]byte, api.Index, error) - ListAddresses(ctx context.Context, userPass api.UserPass) ([]string, error) - ExportKey(ctx context.Context, userPass api.UserPass, addr string) (*secp256k1.PrivateKey, string, error) - ImportKey(ctx context.Context, userPass api.UserPass, privateKey *secp256k1.PrivateKey) (string, error) - Import(ctx context.Context, userPass api.UserPass, to string, sourceChain string) (ids.ID, error) - ExportAVAX(ctx context.Context, userPass api.UserPass, amount uint64, to string) (ids.ID, error) - Export(ctx context.Context, userPass api.UserPass, amount uint64, to string, assetID string) (ids.ID, error) - StartCPUProfiler(ctx context.Context) error - StopCPUProfiler(ctx context.Context) error - MemoryProfile(ctx context.Context) error - LockProfile(ctx context.Context) error - SetLogLevel(ctx context.Context, level log.Lvl) error - GetVMConfig(ctx context.Context) (*Config, error) + IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) + GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) + GetAtomicTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) + GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, sourceChain string, limit uint32, startAddress ids.ShortID, startUTXOID ids.ID, options ...rpc.Option) ([][]byte, ids.ShortID, ids.ID, error) + ExportKey(ctx context.Context, userPass api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) + ImportKey(ctx context.Context, userPass api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) + Import(ctx context.Context, userPass api.UserPass, to common.Address, sourceChain string, options ...rpc.Option) (ids.ID, error) + ExportAVAX(ctx context.Context, userPass api.UserPass, amount uint64, to ids.ShortID, targetChain string, options ...rpc.Option) (ids.ID, error) + Export(ctx context.Context, userPass api.UserPass, amount uint64, to ids.ShortID, targetChain string, assetID string, options ...rpc.Option) (ids.ID, error) + StartCPUProfiler(ctx context.Context, options ...rpc.Option) error + StopCPUProfiler(ctx context.Context, options ...rpc.Option) error + MemoryProfile(ctx context.Context, options ...rpc.Option) error + LockProfile(ctx context.Context, options ...rpc.Option) error + SetLogLevel(ctx context.Context, level log.Lvl, options ...rpc.Option) error + GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) } // Client implementation for interacting with EVM [chain] @@ -61,7 +61,7 @@ func NewCChainClient(uri string) Client { } // IssueTx issues a transaction to a node and returns the TxID -func (c *client) IssueTx(ctx context.Context, txBytes []byte) (ids.ID, error) { +func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) { res := &api.JSONTxID{} txStr, err := formatting.Encode(formatting.Hex, txBytes) if err != nil { @@ -70,26 +70,26 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte) (ids.ID, error) { err = c.requester.SendRequest(ctx, "avax.issueTx", &api.FormattedTx{ Tx: txStr, Encoding: formatting.Hex, - }, res) + }, res, options...) return res.TxID, err } // GetAtomicTxStatus returns the status of [txID] -func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID) (Status, error) { +func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) { res := &GetAtomicTxStatusReply{} err := c.requester.SendRequest(ctx, "avax.getAtomicTxStatus", &api.JSONTxID{ TxID: txID, - }, res) + }, res, options...) return res.Status, err } // GetAtomicTx returns the byte representation of [txID] -func (c *client) GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) { +func (c *client) GetAtomicTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) { res := &api.FormattedTx{} err := c.requester.SendRequest(ctx, "avax.getAtomicTx", &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, - }, res) + }, res, options...) if err != nil { return nil, err } @@ -99,70 +99,71 @@ func (c *client) GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) { // GetAtomicUTXOs returns the byte representation of the atomic UTXOs controlled by [addresses] // from [sourceChain] -func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []string, sourceChain string, limit uint32, startAddress, startUTXOID string) ([][]byte, api.Index, error) { +func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, sourceChain string, limit uint32, startAddress ids.ShortID, startUTXOID ids.ID, options ...rpc.Option) ([][]byte, ids.ShortID, ids.ID, error) { res := &api.GetUTXOsReply{} err := c.requester.SendRequest(ctx, "avax.getUTXOs", &api.GetUTXOsArgs{ - Addresses: addrs, + Addresses: ids.ShortIDsToStrings(addrs), SourceChain: sourceChain, - Limit: cjson.Uint32(limit), + Limit: json.Uint32(limit), StartIndex: api.Index{ - Address: startAddress, - UTXO: startUTXOID, + Address: startAddress.String(), + UTXO: startUTXOID.String(), }, Encoding: formatting.Hex, - }, res) + }, res, options...) if err != nil { - return nil, api.Index{}, err + return nil, ids.ShortID{}, ids.Empty, err } utxos := make([][]byte, len(res.UTXOs)) for i, utxo := range res.UTXOs { - b, err := formatting.Decode(formatting.Hex, utxo) + utxoBytes, err := formatting.Decode(res.Encoding, utxo) if err != nil { - return nil, api.Index{}, err + return nil, ids.ShortID{}, ids.Empty, err } - utxos[i] = b + utxos[i] = utxoBytes } - return utxos, res.EndIndex, nil -} - -// ListAddresses returns all addresses on this chain controlled by [user] -func (c *client) ListAddresses(ctx context.Context, user api.UserPass) ([]string, error) { - res := &api.JSONAddresses{} - err := c.requester.SendRequest(ctx, "avax.listAddresses", &user, res) - return res.Addresses, err + endAddr, err := address.ParseToID(res.EndIndex.Address) + if err != nil { + return nil, ids.ShortID{}, ids.Empty, err + } + endUTXOID, err := ids.FromString(res.EndIndex.UTXO) + return utxos, endAddr, endUTXOID, err } // ExportKey returns the private key corresponding to [addr] controlled by [user] // in both Avalanche standard format and hex format -func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr string) (*secp256k1.PrivateKey, string, error) { +func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) { res := &ExportKeyReply{} err := c.requester.SendRequest(ctx, "avax.exportKey", &ExportKeyArgs{ UserPass: user, - Address: addr, - }, res) + Address: addr.Hex(), + }, res, options...) return res.PrivateKey, res.PrivateKeyHex, err } // ImportKey imports [privateKey] to [user] -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey) (string, error) { +func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) { res := &api.JSONAddress{} err := c.requester.SendRequest(ctx, "avax.importKey", &ImportKeyArgs{ UserPass: user, PrivateKey: privateKey, - }, res) - return res.Address, err + }, res, options...) + if err != nil { + return common.Address{}, err + } + return ParseEthAddress(res.Address) } // Import sends an import transaction to import funds from [sourceChain] and // returns the ID of the newly created transaction -func (c *client) Import(ctx context.Context, user api.UserPass, to, sourceChain string) (ids.ID, error) { +func (c *client) Import(ctx context.Context, user api.UserPass, to common.Address, sourceChain string, options ...rpc.Option) (ids.ID, error) { res := &api.JSONTxID{} err := c.requester.SendRequest(ctx, "avax.import", &ImportArgs{ UserPass: user, To: to, SourceChain: sourceChain, - }, res) + }, res, options...) return res.TxID, err } @@ -172,9 +173,11 @@ func (c *client) ExportAVAX( ctx context.Context, user api.UserPass, amount uint64, - to string, + to ids.ShortID, + targetChain string, + options ...rpc.Option, ) (ids.ID, error) { - return c.Export(ctx, user, amount, to, "AVAX") + return c.Export(ctx, user, amount, to, targetChain, "AVAX", options...) } // Export sends an asset from this chain to the P/C-Chain. @@ -184,47 +187,50 @@ func (c *client) Export( ctx context.Context, user api.UserPass, amount uint64, - to string, + to ids.ShortID, + targetChain string, assetID string, + options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} err := c.requester.SendRequest(ctx, "avax.export", &ExportArgs{ ExportAVAXArgs: ExportAVAXArgs{ - UserPass: user, - Amount: cjson.Uint64(amount), - To: to, + UserPass: user, + Amount: json.Uint64(amount), + TargetChain: targetChain, + To: to.String(), }, AssetID: assetID, - }, res) + }, res, options...) return res.TxID, err } -func (c *client) StartCPUProfiler(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "admin.startCPUProfiler", struct{}{}, &api.EmptyReply{}) +func (c *client) StartCPUProfiler(ctx context.Context, options ...rpc.Option) error { + return c.adminRequester.SendRequest(ctx, "admin.startCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) } -func (c *client) StopCPUProfiler(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "admin.stopCPUProfiler", struct{}{}, &api.EmptyReply{}) +func (c *client) StopCPUProfiler(ctx context.Context, options ...rpc.Option) error { + return c.adminRequester.SendRequest(ctx, "admin.stopCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) } -func (c *client) MemoryProfile(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "admin.memoryProfile", struct{}{}, &api.EmptyReply{}) +func (c *client) MemoryProfile(ctx context.Context, options ...rpc.Option) error { + return c.adminRequester.SendRequest(ctx, "admin.memoryProfile", struct{}{}, &api.EmptyReply{}, options...) } -func (c *client) LockProfile(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}) +func (c *client) LockProfile(ctx context.Context, options ...rpc.Option) error { + return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}, options...) } // SetLogLevel dynamically sets the log level for the C Chain -func (c *client) SetLogLevel(ctx context.Context, level log.Lvl) error { +func (c *client) SetLogLevel(ctx context.Context, level log.Lvl, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.setLogLevel", &SetLogLevelArgs{ Level: level.String(), - }, &api.EmptyReply{}) + }, &api.EmptyReply{}, options...) } // GetVMConfig returns the current config of the VM -func (c *client) GetVMConfig(ctx context.Context) (*Config, error) { +func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { res := &ConfigReply{} - err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res) + err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) return res.Config, err } diff --git a/coreth/plugin/evm/codec.go b/coreth/plugin/evm/codec.go index b9f8de44..92179677 100644 --- a/coreth/plugin/evm/codec.go +++ b/coreth/plugin/evm/codec.go @@ -5,6 +5,7 @@ package evm import ( "fmt" + "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -15,29 +16,41 @@ import ( // Codec does serialization and deserialization var Codec codec.Manager -func init() { - Codec = codec.NewDefaultManager() - c := linearcodec.NewDefault() - - errs := wrappers.Errs{} +// TODO: Remove after v1.11.x has activated +// +// Invariant: InitCodec and Codec must not be accessed concurrently +func InitCodec(durangoTime time.Time) error { + var ( + lc = linearcodec.NewDefault(durangoTime) + newCodec = codec.NewDefaultManager() + errs = wrappers.Errs{} + ) errs.Add( - c.RegisterType(&UnsignedImportTx{}), - c.RegisterType(&UnsignedExportTx{}), + lc.RegisterType(&UnsignedImportTx{}), + lc.RegisterType(&UnsignedExportTx{}), ) - c.SkipRegistrations(3) + lc.SkipRegistrations(3) errs.Add( - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.MintOperation{}), - c.RegisterType(&secp256k1fx.Credential{}), - c.RegisterType(&secp256k1fx.Input{}), - c.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterCodec(codecVersion, c), + lc.RegisterType(&secp256k1fx.TransferInput{}), + lc.RegisterType(&secp256k1fx.MintOutput{}), + lc.RegisterType(&secp256k1fx.TransferOutput{}), + lc.RegisterType(&secp256k1fx.MintOperation{}), + lc.RegisterType(&secp256k1fx.Credential{}), + lc.RegisterType(&secp256k1fx.Input{}), + lc.RegisterType(&secp256k1fx.OutputOwners{}), + newCodec.RegisterCodec(codecVersion, lc), ) - if errs.Errored() { - panic(errs.Err) + return errs.Err + } + + Codec = newCodec + return nil +} + +func init() { + if err := InitCodec(time.Time{}); err != nil { + panic(err) } } diff --git a/coreth/plugin/evm/config.go b/coreth/plugin/evm/config.go index 99425ad6..9bf2d0c8 100644 --- a/coreth/plugin/evm/config.go +++ b/coreth/plugin/evm/config.go @@ -8,9 +8,10 @@ import ( "fmt" "time" - "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/eth" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/spf13/cast" ) @@ -19,11 +20,12 @@ const ( defaultPruningEnabled = true defaultCommitInterval = 4096 defaultTrieCleanCache = 512 - defaultTrieDirtyCache = 256 + defaultTrieDirtyCache = 512 defaultTrieDirtyCommitTarget = 20 + defaultTriePrefetcherParallelism = 16 defaultSnapshotCache = 256 defaultSyncableCommitInterval = defaultCommitInterval * 4 - defaultSnapshotAsync = true + defaultSnapshotWait = false defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit defaultRpcTxFeeCap = 100 // 100 AVAX defaultMetricsExpensiveEnabled = true @@ -38,9 +40,9 @@ const ( defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use defaultLogLevel = "info" defaultLogJSONFormat = false - defaultPopulateMissingTriesParallelism = 1024 defaultMaxOutboundActiveRequests = 16 defaultMaxOutboundActiveCrossChainRequests = 64 + defaultPopulateMissingTriesParallelism = 1024 defaultStateSyncServerTrieCache = 64 // MB defaultAcceptedCacheSize = 32 // blocks @@ -51,7 +53,8 @@ const ( // time assumptions: // - normal bootstrap processing time: ~14 blocks / second // - state sync time: ~6 hrs. - defaultStateSyncMinBlocks = 300_000 + defaultStateSyncMinBlocks = 300_000 + defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request ) var ( @@ -77,8 +80,11 @@ type Duration struct { type Config struct { // Coreth APIs SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` - CorethAdminAPIDir string `json:"coreth-admin-api-dir"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + CorethAdminAPIEnabled bool `json:"coreth-admin-api-enabled"` // Deprecated: use AdminAPIEnabled instead + CorethAdminAPIDir string `json:"coreth-admin-api-dir"` // Deprecated: use AdminAPIDir instead + WarpAPIEnabled bool `json:"warp-api-enabled"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -89,21 +95,22 @@ type Config struct { ContinuousProfilerFrequency Duration `json:"continuous-profiler-frequency"` // Frequency to run continuous profiler if enabled ContinuousProfilerMaxFiles int `json:"continuous-profiler-max-files"` // Maximum number of files to maintain - // Coreth API Gas/Price Caps + // API Gas/Price Caps RPCGasCap uint64 `json:"rpc-gas-cap"` RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) - TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) + TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) // Eth Settings Preimages bool `json:"preimages-enabled"` - SnapshotAsync bool `json:"snapshot-async"` + SnapshotWait bool `json:"snapshot-wait"` SnapshotVerify bool `json:"snapshot-verification-enabled"` // Pruning Settings @@ -113,6 +120,7 @@ type Config struct { AllowMissingTries bool `json:"allow-missing-tries"` // If enabled, warnings preventing an incomplete trie index are suppressed PopulateMissingTries *uint64 `json:"populate-missing-tries,omitempty"` // Sets the starting point for re-populating missing tries. Disables re-generation if nil. PopulateMissingTriesParallelism int `json:"populate-missing-tries-parallelism"` // Number of concurrent readers to use when re-populating missing tries on startup. + PruneWarpDB bool `json:"prune-warp-db-enabled"` // Determines if the warpDB should be cleared on startup // Metric Settings MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance @@ -143,9 +151,12 @@ type Config struct { KeystoreInsecureUnlockAllowed bool `json:"keystore-insecure-unlock-allowed"` // Gossip Settings - RemoteTxGossipOnlyEnabled bool `json:"remote-tx-gossip-only-enabled"` - TxRegossipFrequency Duration `json:"tx-regossip-frequency"` - TxRegossipMaxSize int `json:"tx-regossip-max-size"` + RemoteGossipOnlyEnabled bool `json:"remote-gossip-only-enabled"` + RegossipFrequency Duration `json:"regossip-frequency"` + RegossipMaxTxs int `json:"regossip-max-txs"` + RemoteTxGossipOnlyEnabled bool `json:"remote-tx-gossip-only-enabled"` // Deprecated: use RemoteGossipOnlyEnabled instead + TxRegossipFrequency Duration `json:"tx-regossip-frequency"` // Deprecated: use RegossipFrequency instead + TxRegossipMaxSize int `json:"tx-regossip-max-size"` // Deprecated: use RegossipMaxTxs instead // Log LogLevel string `json:"log-level"` @@ -167,6 +178,7 @@ type Config struct { StateSyncIDs string `json:"state-sync-ids"` StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` + StateSyncRequestSize uint16 `json:"state-sync-request-size"` // Database Settings InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. @@ -189,6 +201,17 @@ type Config struct { // * 0: means no limit // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes TxLookupLimit uint64 `json:"tx-lookup-limit"` + + // SkipTxIndexing skips indexing transactions. + // This is useful for validators that don't need to index transactions. + // TxLookupLimit can be still used to control unindexing old transactions. + SkipTxIndexing bool `json:"skip-tx-indexing"` + + // WarpOffChainMessages encodes off-chain messages (unrelated to any on-chain event ie. block or AddressedCall) + // that the node should be willing to sign. + // Note: only supports AddressedCall payloads as defined here: + // https://github.com/ava-labs/avalanchego/tree/7623ffd4be915a5185c9ed5e11fa9be15a6e1f00/vms/platformvm/warp/payload#addressedcall + WarpOffChainMessages []hexutil.Bytes `json:"warp-off-chain-messages"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -206,14 +229,14 @@ func (c *Config) SetDefaults() { c.RPCTxFeeCap = defaultRpcTxFeeCap c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - c.TxPoolJournal = core.DefaultTxPoolConfig.Journal - c.TxPoolRejournal = Duration{core.DefaultTxPoolConfig.Rejournal} - c.TxPoolPriceLimit = core.DefaultTxPoolConfig.PriceLimit - c.TxPoolPriceBump = core.DefaultTxPoolConfig.PriceBump - c.TxPoolAccountSlots = core.DefaultTxPoolConfig.AccountSlots - c.TxPoolGlobalSlots = core.DefaultTxPoolConfig.GlobalSlots - c.TxPoolAccountQueue = core.DefaultTxPoolConfig.AccountQueue - c.TxPoolGlobalQueue = core.DefaultTxPoolConfig.GlobalQueue + c.TxPoolJournal = txpool.DefaultConfig.Journal + c.TxPoolRejournal = Duration{txpool.DefaultConfig.Rejournal} + c.TxPoolPriceLimit = txpool.DefaultConfig.PriceLimit + c.TxPoolPriceBump = txpool.DefaultConfig.PriceBump + c.TxPoolAccountSlots = txpool.DefaultConfig.AccountSlots + c.TxPoolGlobalSlots = txpool.DefaultConfig.GlobalSlots + c.TxPoolAccountQueue = txpool.DefaultConfig.AccountQueue + c.TxPoolGlobalQueue = txpool.DefaultConfig.GlobalQueue c.APIMaxDuration.Duration = defaultApiMaxDuration c.WSCPURefillRate.Duration = defaultWsCpuRefillRate @@ -225,21 +248,23 @@ func (c *Config) SetDefaults() { c.TrieCleanCache = defaultTrieCleanCache c.TrieDirtyCache = defaultTrieDirtyCache c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget + c.TriePrefetcherParallelism = defaultTriePrefetcherParallelism c.SnapshotCache = defaultSnapshotCache c.AcceptorQueueLimit = defaultAcceptorQueueLimit - c.SnapshotAsync = defaultSnapshotAsync - c.TxRegossipFrequency.Duration = defaultTxRegossipFrequency - c.TxRegossipMaxSize = defaultTxRegossipMaxSize + c.CommitInterval = defaultCommitInterval + c.SnapshotWait = defaultSnapshotWait + c.RegossipFrequency.Duration = defaultTxRegossipFrequency + c.RegossipMaxTxs = defaultTxRegossipMaxSize c.OfflinePruningBloomFilterSize = defaultOfflinePruningBloomFilterSize c.LogLevel = defaultLogLevel - c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism c.LogJSONFormat = defaultLogJSONFormat c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests c.MaxOutboundActiveCrossChainRequests = defaultMaxOutboundActiveCrossChainRequests + c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache - c.CommitInterval = defaultCommitInterval c.StateSyncCommitInterval = defaultSyncableCommitInterval c.StateSyncMinBlocks = defaultStateSyncMinBlocks + c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize } @@ -282,3 +307,30 @@ func (c *Config) Validate() error { return nil } + +func (c *Config) Deprecate() string { + msg := "" + // Deprecate the old config options and set the new ones. + if c.CorethAdminAPIEnabled { + msg += "coreth-admin-api-enabled is deprecated, use admin-api-enabled instead. " + c.AdminAPIEnabled = c.CorethAdminAPIEnabled + } + if c.CorethAdminAPIDir != "" { + msg += "coreth-admin-api-dir is deprecated, use admin-api-dir instead. " + c.AdminAPIDir = c.CorethAdminAPIDir + } + if c.RemoteTxGossipOnlyEnabled { + msg += "remote-tx-gossip-only-enabled is deprecated, use tx-gossip-enabled instead. " + c.RemoteGossipOnlyEnabled = c.RemoteTxGossipOnlyEnabled + } + if c.TxRegossipFrequency != (Duration{}) { + msg += "tx-regossip-frequency is deprecated, use regossip-frequency instead. " + c.RegossipFrequency = c.TxRegossipFrequency + } + if c.TxRegossipMaxSize != 0 { + msg += "tx-regossip-max-size is deprecated, use regossip-max-txs instead. " + c.RegossipMaxTxs = c.TxRegossipMaxSize + } + + return msg +} diff --git a/coreth/plugin/evm/database.go b/coreth/plugin/evm/database.go index 1fca7387..479c995b 100644 --- a/coreth/plugin/evm/database.go +++ b/coreth/plugin/evm/database.go @@ -4,11 +4,17 @@ package evm import ( + "errors" + "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/ethdb" ) -var _ ethdb.Database = &Database{} +var ( + _ ethdb.KeyValueStore = &Database{} + + ErrSnapshotNotSupported = errors.New("snapshot is not supported") +) // Database implements ethdb.Database type Database struct{ database.Database } @@ -23,6 +29,10 @@ func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} // TODO: propagate size through avalanchego Database interface func (db Database) NewBatchWithSize(size int) ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db Database) NewSnapshot() (ethdb.Snapshot, error) { + return nil, ErrSnapshotNotSupported +} + // NewIterator implements ethdb.Database // // Note: This method assumes that the prefix is NOT part of the start, so there's diff --git a/coreth/plugin/evm/export_tx.go b/coreth/plugin/evm/export_tx.go index fe83483d..110b3a94 100644 --- a/coreth/plugin/evm/export_tx.go +++ b/coreth/plugin/evm/export_tx.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" @@ -124,7 +125,7 @@ func (utx *UnsignedExportTx) Verify( if !avax.IsSortedTransferableOutputs(utx.ExportedOutputs, Codec) { return errOutputsNotSorted } - if rules.IsApricotPhase1 && !IsSortedAndUniqueEVMInputs(utx.Ins) { + if rules.IsApricotPhase1 && !utils.IsSortedAndUnique(utx.Ins) { return errInputsNotSortedUnique } @@ -200,7 +201,7 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - txFee, err := calculateDynamicFee(gasUsed, baseFee) + txFee, err := CalculateDynamicFee(gasUsed, baseFee) if err != nil { return err } @@ -241,25 +242,25 @@ func (utx *UnsignedExportTx) SemanticVerify( } sig := cred.Sigs[0][:] - - // Verify the address recovered from the signature of the transaction hash without a prefix - // (Standard Avalanche approach, but unsupported/deprecated by most signing tools) - recoveredAddress, err := recoverAddress(vm, txHash, sig) + pubKey, err := vm.secpCache.RecoverPublicKeyFromHash(txHash, sig) if err != nil { return err } - if input.Address == recoveredAddress { + + // Verify the address recovered from the signature of the transaction hash without a prefix + // (Standard Avalanche approach, but unsupported/deprecated by most signing tools) + if input.Address == PublicKeyToEthAddress(pubKey) { continue } // Verify the address recovered from the signature of the transaction hash with the // standard Ethereum prefix (see accounts.TextHash) if rules.IsBanff { - recoveredAddress, err = recoverAddress(vm, txHashEth, sig) + pubKey, err := vm.secpCache.RecoverPublicKeyFromHash(txHashEth, sig) if err != nil { return err } - if input.Address == recoveredAddress { + if input.Address == PublicKeyToEthAddress(pubKey) { continue } } @@ -428,12 +429,3 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St } return nil } - -// Recover the address from the signature of the transaction hash -func recoverAddress(vm *VM, txHash []byte, sig []byte) (common.Address, error) { - pubKey, err := vm.secpFactory.RecoverHashPublicKey(txHash, sig) - if err != nil { - return common.Address{}, err - } - return PublicKeyToEthAddress(pubKey), nil -} diff --git a/coreth/plugin/evm/export_tx_test.go b/coreth/plugin/evm/export_tx_test.go index 50b782e1..918cb711 100644 --- a/coreth/plugin/evm/export_tx_test.go +++ b/coreth/plugin/evm/export_tx_test.go @@ -59,7 +59,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -371,7 +371,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(tx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(tx); err != nil { t.Fatal(err) } @@ -1617,7 +1617,7 @@ func TestExportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := calculateDynamicFee(gasUsed, test.BaseFee) + fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } @@ -1726,7 +1726,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(tx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(tx); err != nil { t.Fatal(err) } @@ -1916,7 +1916,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(tx, false); err != nil { + if err := vm.mempool.AddTx(tx); err != nil { t.Fatal(err) } diff --git a/coreth/plugin/evm/formatting.go b/coreth/plugin/evm/formatting.go index a586a455..ba9cea58 100644 --- a/coreth/plugin/evm/formatting.go +++ b/coreth/plugin/evm/formatting.go @@ -14,6 +14,17 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +// ParseServiceAddress get address ID from address string, being it either localized (using address manager, +// doing also components validations), or not localized. +// If both attempts fail, reports error from localized address parsing +func (vm *VM) ParseServiceAddress(addrStr string) (ids.ShortID, error) { + addr, err := ids.ShortFromString(addrStr) + if err == nil { + return addr, nil + } + return vm.ParseLocalAddress(addrStr) +} + // ParseLocalAddress takes in an address for this chain and produces the ID func (vm *VM) ParseLocalAddress(addrStr string) (ids.ShortID, error) { chainID, addr, err := vm.ParseAddress(addrStr) diff --git a/coreth/plugin/evm/gasprice_update.go b/coreth/plugin/evm/gasprice_update.go index cdac087a..04dad40a 100644 --- a/coreth/plugin/evm/gasprice_update.go +++ b/coreth/plugin/evm/gasprice_update.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" ) type gasPriceUpdater struct { @@ -62,13 +63,13 @@ func (gpu *gasPriceUpdater) start() { // 2) If [timestamp] has already passed, update is called immediately // 3) [timestamp] is some time in the future, starts a goroutine that will call update(price) at the time // given by [timestamp]. -func (gpu *gasPriceUpdater) handleUpdate(update func(price *big.Int), timestamp *big.Int, price *big.Int) bool { +func (gpu *gasPriceUpdater) handleUpdate(update func(price *big.Int), timestamp *uint64, price *big.Int) bool { if timestamp == nil { return true } currentTime := time.Now() - upgradeTime := time.Unix(timestamp.Int64(), 0) + upgradeTime := utils.Uint64ToTime(timestamp) if currentTime.After(upgradeTime) { update(price) } else { diff --git a/coreth/plugin/evm/gasprice_update_test.go b/coreth/plugin/evm/gasprice_update_test.go index 24d8337f..d9ee265d 100644 --- a/coreth/plugin/evm/gasprice_update_test.go +++ b/coreth/plugin/evm/gasprice_update_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" ) type mockGasPriceSetter struct { @@ -62,7 +63,7 @@ func TestUpdateGasPriceShutsDown(t *testing.T) { config := *params.TestChainConfig // Set ApricotPhase3BlockTime one hour in the future so that it will // create a goroutine waiting for an hour before updating the gas price - config.ApricotPhase3BlockTimestamp = big.NewInt(time.Now().Add(time.Hour).Unix()) + config.ApricotPhase3BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(time.Hour)) gpu := &gasPriceUpdater{ setter: &mockGasPriceSetter{price: big.NewInt(1)}, chainConfig: &config, @@ -106,8 +107,8 @@ func TestUpdateGasPriceUpdatesPrice(t *testing.T) { config := *params.TestChainConfig // Set ApricotPhase3BlockTime 250ms in the future so that it will // create a goroutine waiting for the time to update the gas price - config.ApricotPhase3BlockTimestamp = big.NewInt(time.Now().Add(250 * time.Millisecond).Unix()) - config.ApricotPhase4BlockTimestamp = big.NewInt(time.Now().Add(3 * time.Second).Unix()) + config.ApricotPhase3BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(250 * time.Millisecond)) + config.ApricotPhase4BlockTimestamp = utils.TimeToNewUint64(time.Now().Add(3 * time.Second)) gpu := &gasPriceUpdater{ setter: &mockGasPriceSetter{price: big.NewInt(1)}, chainConfig: &config, diff --git a/coreth/plugin/evm/gossip.go b/coreth/plugin/evm/gossip.go new file mode 100644 index 00000000..3664b12a --- /dev/null +++ b/coreth/plugin/evm/gossip.go @@ -0,0 +1,206 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ethereum/go-ethereum/log" + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/txpool" + "github.com/ava-labs/coreth/core/types" +) + +var ( + _ p2p.Handler = (*txGossipHandler)(nil) + + _ gossip.Gossipable = (*GossipEthTx)(nil) + _ gossip.Gossipable = (*GossipAtomicTx)(nil) + _ gossip.Marshaller[*GossipAtomicTx] = (*GossipAtomicTxMarshaller)(nil) + _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) + _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) +) + +func newTxGossipHandler[T gossip.Gossipable]( + log logging.Logger, + marshaller gossip.Marshaller[T], + mempool gossip.Set[T], + metrics gossip.Metrics, + maxMessageSize int, + throttlingPeriod time.Duration, + throttlingLimit int, + validators *p2p.Validators, +) txGossipHandler { + // push gossip messages can be handled from any peer + handler := gossip.NewHandler[T]( + log, + marshaller, + // Don't forward gossip to avoid double-forwarding + gossip.NoOpAccumulator[T]{}, + mempool, + metrics, + maxMessageSize, + ) + + // pull gossip requests are filtered by validators and are throttled + // to prevent spamming + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler(throttlingPeriod, throttlingLimit), + log, + ), + validators, + log, + ) + + return txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } +} + +type txGossipHandler struct { + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t txGossipHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, error) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} + +func (t txGossipHandler) CrossChainAppRequest(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return nil, nil +} + +type GossipAtomicTxMarshaller struct{} + +func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, error) { + return tx.Tx.SignedBytes(), nil +} + +func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { + tx, err := ExtractAtomicTx(bytes, Codec) + return &GossipAtomicTx{ + Tx: tx, + }, err +} + +type GossipAtomicTx struct { + Tx *Tx +} + +func (tx *GossipAtomicTx) GossipID() ids.ID { + return tx.Tx.ID() +} + +func NewGossipEthTxPool(mempool *txpool.TxPool, registerer prometheus.Registerer) (*GossipEthTxPool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "eth_tx_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + if err != nil { + return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) + } + + return &GossipEthTxPool{ + mempool: mempool, + pendingTxs: make(chan core.NewTxsEvent), + bloom: bloom, + }, nil +} + +type GossipEthTxPool struct { + mempool *txpool.TxPool + pendingTxs chan core.NewTxsEvent + + bloom *gossip.BloomFilter + lock sync.RWMutex +} + +func (g *GossipEthTxPool) Subscribe(ctx context.Context) { + g.mempool.SubscribeNewTxsEvent(g.pendingTxs) + + for { + select { + case <-ctx.Done(): + log.Debug("shutting down subscription") + return + case pendingTxs := <-g.pendingTxs: + g.lock.Lock() + optimalElements := (g.mempool.PendingSize(false) + len(pendingTxs.Txs)) * txGossipBloomChurnMultiplier + for _, pendingTx := range pendingTxs.Txs { + tx := &GossipEthTx{Tx: pendingTx} + g.bloom.Add(tx) + reset, err := gossip.ResetBloomFilterIfNeeded(g.bloom, optimalElements) + if err != nil { + log.Error("failed to reset bloom filter", "err", err) + continue + } + + if reset { + log.Debug("resetting bloom filter", "reason", "reached max filled ratio") + + g.mempool.IteratePending(func(tx *types.Transaction) bool { + g.bloom.Add(&GossipEthTx{Tx: tx}) + return true + }) + } + } + g.lock.Unlock() + } + } +} + +// Add enqueues the transaction to the mempool. Subscribe should be called +// to receive an event if tx is actually added to the mempool or not. +func (g *GossipEthTxPool) Add(tx *GossipEthTx) error { + return g.mempool.AddRemotes([]*types.Transaction{tx.Tx})[0] +} + +func (g *GossipEthTxPool) Iterate(f func(tx *GossipEthTx) bool) { + g.mempool.IteratePending(func(tx *types.Transaction) bool { + return f(&GossipEthTx{Tx: tx}) + }) +} + +func (g *GossipEthTxPool) GetFilter() ([]byte, []byte) { + g.lock.RLock() + defer g.lock.RUnlock() + + return g.bloom.Marshal() +} + +type GossipEthTxMarshaller struct{} + +func (g GossipEthTxMarshaller) MarshalGossip(tx *GossipEthTx) ([]byte, error) { + return tx.Tx.MarshalBinary() +} + +func (g GossipEthTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipEthTx, error) { + tx := &GossipEthTx{ + Tx: &types.Transaction{}, + } + + return tx, tx.Tx.UnmarshalBinary(bytes) +} + +type GossipEthTx struct { + Tx *types.Transaction +} + +func (tx *GossipEthTx) GossipID() ids.ID { + return ids.ID(tx.Tx.Hash()) +} diff --git a/coreth/plugin/evm/gossip_stats.go b/coreth/plugin/evm/gossip_stats.go index ab12c607..ce15bd87 100644 --- a/coreth/plugin/evm/gossip_stats.go +++ b/coreth/plugin/evm/gossip_stats.go @@ -23,6 +23,7 @@ type GossipReceivedStats interface { IncAtomicGossipReceivedError() IncAtomicGossipReceivedKnown() IncAtomicGossipReceivedNew() + IncEthTxsGossipReceivedError() IncEthTxsGossipReceivedKnown() IncEthTxsGossipReceivedNew() } @@ -56,6 +57,7 @@ type gossipStats struct { atomicGossipReceivedError metrics.Counter atomicGossipReceivedKnown metrics.Counter atomicGossipReceivedNew metrics.Counter + ethTxsGossipReceivedError metrics.Counter ethTxsGossipReceivedKnown metrics.Counter ethTxsGossipReceivedNew metrics.Counter } @@ -75,6 +77,7 @@ func NewGossipStats() GossipStats { atomicGossipReceivedError: metrics.GetOrRegisterCounter("gossip_atomic_received_error", nil), atomicGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_atomic_received_known", nil), atomicGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_atomic_received_new", nil), + ethTxsGossipReceivedError: metrics.GetOrRegisterCounter("gossip_eth_txs_received_error", nil), ethTxsGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_eth_txs_received_known", nil), ethTxsGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_eth_txs_received_new", nil), } @@ -89,6 +92,7 @@ func (g *gossipStats) IncAtomicGossipReceivedDropped() { g.atomicGossipReceivedD func (g *gossipStats) IncAtomicGossipReceivedError() { g.atomicGossipReceivedError.Inc(1) } func (g *gossipStats) IncAtomicGossipReceivedKnown() { g.atomicGossipReceivedKnown.Inc(1) } func (g *gossipStats) IncAtomicGossipReceivedNew() { g.atomicGossipReceivedNew.Inc(1) } +func (g *gossipStats) IncEthTxsGossipReceivedError() { g.ethTxsGossipReceivedError.Inc(1) } func (g *gossipStats) IncEthTxsGossipReceivedKnown() { g.ethTxsGossipReceivedKnown.Inc(1) } func (g *gossipStats) IncEthTxsGossipReceivedNew() { g.ethTxsGossipReceivedNew.Inc(1) } diff --git a/coreth/plugin/evm/gossip_test.go b/coreth/plugin/evm/gossip_test.go new file mode 100644 index 00000000..9c924a00 --- /dev/null +++ b/coreth/plugin/evm/gossip_test.go @@ -0,0 +1,213 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/txpool" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestGossipAtomicTxMarshaller(t *testing.T) { + require := require.New(t) + + want := &GossipAtomicTx{ + Tx: &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{}, + Creds: []verify.Verifiable{}, + }, + } + marshaller := GossipAtomicTxMarshaller{} + + key0 := testKeys[0] + require.NoError(want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}})) + + bytes, err := marshaller.MarshalGossip(want) + require.NoError(err) + + got, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestAtomicMempoolIterate(t *testing.T) { + txs := []*GossipAtomicTx{ + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + } + + tests := []struct { + name string + add []*GossipAtomicTx + f func(tx *GossipAtomicTx) bool + possibleValues []*GossipAtomicTx + expectedLen int + }{ + { + name: "func matches nothing", + add: txs, + f: func(*GossipAtomicTx) bool { + return false + }, + possibleValues: nil, + }, + { + name: "func matches all", + add: txs, + f: func(*GossipAtomicTx) bool { + return true + }, + possibleValues: txs, + expectedLen: 2, + }, + { + name: "func matches subset", + add: txs, + f: func(tx *GossipAtomicTx) bool { + return tx.Tx == txs[0].Tx + }, + possibleValues: txs, + expectedLen: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) + require.NoError(err) + + for _, add := range tt.add { + require.NoError(m.Add(add)) + } + + matches := make([]*GossipAtomicTx, 0) + f := func(tx *GossipAtomicTx) bool { + match := tt.f(tx) + + if match { + matches = append(matches, tx) + } + + return match + } + + m.Iterate(f) + + require.Len(matches, tt.expectedLen) + require.Subset(tt.possibleValues, matches) + }) + } +} + +func TestGossipEthTxMarshaller(t *testing.T) { + require := require.New(t) + + blobTx := &types.BlobTx{} + want := &GossipEthTx{Tx: types.NewTx(blobTx)} + marshaller := GossipEthTxMarshaller{} + + bytes, err := marshaller.MarshalGossip(want) + require.NoError(err) + + got, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestGossipSubscribe(t *testing.T) { + require := require.New(t) + key, err := crypto.GenerateKey() + require.NoError(err) + addr := crypto.PubkeyToAddress(key.PublicKey) + + require.NoError(err) + txPool := setupPoolWithConfig(t, params.TestChainConfig, addr) + defer txPool.Stop() + txPool.SetGasPrice(common.Big1) + txPool.SetMinFee(common.Big0) + + gossipTxPool, err := NewGossipEthTxPool(txPool, prometheus.NewRegistry()) + require.NoError(err) + + // use a custom bloom filter to test the bloom filter reset + gossipTxPool.bloom, err = gossip.NewBloomFilter(prometheus.NewRegistry(), "", 1, 0.01, 0.0000000000000001) // maxCount =1 + require.NoError(err) + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + go gossipTxPool.Subscribe(ctx) + + // create eth txs + ethTxs := getValidEthTxs(key, 10, big.NewInt(226*params.GWei)) + + // Notify mempool about txs + errs := txPool.AddRemotesSync(ethTxs) + for _, err := range errs { + require.NoError(err, "failed adding subnet-evm tx to remote mempool") + } + + require.Eventually( + func() bool { + gossipTxPool.lock.RLock() + defer gossipTxPool.lock.RUnlock() + + for _, tx := range ethTxs { + if !gossipTxPool.bloom.Has(&GossipEthTx{Tx: tx}) { + return false + } + } + return true + }, + 10*time.Second, + 10*time.Millisecond, + "expected all transactions to eventually be in the bloom filter", + ) +} + +func setupPoolWithConfig(t *testing.T, config *params.ChainConfig, fundedAddress common.Address) *txpool.TxPool { + diskdb := rawdb.NewMemoryDatabase() + engine := dummy.NewETHFaker() + + gspec := &core.Genesis{ + Config: config, + Alloc: core.GenesisAlloc{fundedAddress: core.GenesisAccount{Balance: big.NewInt(1000000000000000000)}}, + } + chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) + require.NoError(t, err) + testTxPoolConfig := txpool.DefaultConfig + testTxPoolConfig.Journal = "" + pool := txpool.NewTxPool(testTxPoolConfig, config, chain) + + return pool +} diff --git a/coreth/plugin/evm/gossiper.go b/coreth/plugin/evm/gossiper.go index e1bae8e1..b3f08d8c 100644 --- a/coreth/plugin/evm/gossiper.go +++ b/coreth/plugin/evm/gossiper.go @@ -5,11 +5,13 @@ package evm import ( "container/heap" + "context" "math/big" "sync" "time" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/coreth/peer" @@ -24,6 +26,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/message" ) @@ -36,6 +39,10 @@ const ( // [ethTxsGossipInterval] is how often we attempt to gossip newly seen // transactions to other nodes. ethTxsGossipInterval = 500 * time.Millisecond + + // [minGossipBatchInterval] is the minimum amount of time that must pass + // before our last gossip to peers. + minGossipBatchInterval = 50 * time.Millisecond ) // Gossiper handles outgoing gossip of transactions @@ -48,14 +55,15 @@ type Gossiper interface { // pushGossiper is used to gossip transactions to the network type pushGossiper struct { - ctx *snow.Context - gossipActivationTime time.Time - config Config + ctx *snow.Context + config Config - client peer.NetworkClient - blockchain *core.BlockChain - txPool *core.TxPool - atomicMempool *Mempool + client peer.NetworkClient + blockchain *core.BlockChain + txPool *txpool.TxPool + atomicMempool *Mempool + ethTxGossiper gossip.Accumulator[*GossipEthTx] + atomicTxGossiper gossip.Accumulator[*GossipAtomicTx] // We attempt to batch transactions we need to gossip to avoid runaway // amplification of mempol chatter. @@ -76,28 +84,30 @@ type pushGossiper struct { // createGossiper constructs and returns a pushGossiper or noopGossiper // based on whether vm.chainConfig.ApricotPhase4BlockTimestamp is set -func (vm *VM) createGossiper(stats GossipStats) Gossiper { - if vm.chainConfig.ApricotPhase4BlockTimestamp == nil { - return &noopGossiper{} - } - +func (vm *VM) createGossiper( + stats GossipStats, + ethTxGossiper gossip.Accumulator[*GossipEthTx], + atomicTxGossiper gossip.Accumulator[*GossipAtomicTx], +) Gossiper { net := &pushGossiper{ - ctx: vm.ctx, - gossipActivationTime: time.Unix(vm.chainConfig.ApricotPhase4BlockTimestamp.Int64(), 0), - config: vm.config, - client: vm.client, - blockchain: vm.blockChain, - txPool: vm.txPool, - atomicMempool: vm.mempool, - ethTxsToGossipChan: make(chan []*types.Transaction), - ethTxsToGossip: make(map[common.Hash]*types.Transaction), - shutdownChan: vm.shutdownChan, - shutdownWg: &vm.shutdownWg, - recentAtomicTxs: &cache.LRU[ids.ID, interface{}]{Size: recentCacheSize}, - recentEthTxs: &cache.LRU[common.Hash, interface{}]{Size: recentCacheSize}, - codec: vm.networkCodec, - stats: stats, + ctx: vm.ctx, + config: vm.config, + client: vm.client, + blockchain: vm.blockChain, + txPool: vm.txPool, + atomicMempool: vm.mempool, + ethTxsToGossipChan: make(chan []*types.Transaction), + ethTxsToGossip: make(map[common.Hash]*types.Transaction), + shutdownChan: vm.shutdownChan, + shutdownWg: &vm.shutdownWg, + recentAtomicTxs: &cache.LRU[ids.ID, interface{}]{Size: recentCacheSize}, + recentEthTxs: &cache.LRU[common.Hash, interface{}]{Size: recentCacheSize}, + codec: vm.networkCodec, + stats: stats, + ethTxGossiper: ethTxGossiper, + atomicTxGossiper: atomicTxGossiper, } + net.awaitEthTxGossip() return net } @@ -108,7 +118,12 @@ func (vm *VM) createGossiper(stats GossipStats) Gossiper { // We assume that [txs] contains an array of nonce-ordered transactions for a given // account. This array of transactions can have gaps and start at a nonce lower // than the current state of an account. -func (n *pushGossiper) queueExecutableTxs(state *state.StateDB, baseFee *big.Int, txs map[common.Address]types.Transactions, maxTxs int) types.Transactions { +func (n *pushGossiper) queueExecutableTxs( + state *state.StateDB, + baseFee *big.Int, + txs map[common.Address]types.Transactions, + maxTxs int, +) types.Transactions { // Setup heap for transactions heads := make(types.TxByPriceAndTime, 0, len(txs)) for addr, accountTxs := range txs { @@ -141,7 +156,7 @@ func (n *pushGossiper) queueExecutableTxs(state *state.StateDB, baseFee *big.Int } // Don't try to regossip a transaction too frequently - if time.Since(tx.FirstSeen()) < n.config.TxRegossipFrequency.Duration { + if time.Since(tx.FirstSeen()) < n.config.RegossipFrequency.Duration { continue } @@ -189,7 +204,7 @@ func (n *pushGossiper) queueRegossipTxs() types.Transactions { // Add best transactions to be gossiped (preferring local txs) tip := n.blockchain.CurrentBlock() - state, err := n.blockchain.StateAt(tip.Root()) + state, err := n.blockchain.StateAt(tip.Root) if err != nil || state == nil { log.Debug( "could not get state at tip", @@ -198,14 +213,14 @@ func (n *pushGossiper) queueRegossipTxs() types.Transactions { ) return nil } - localQueued := n.queueExecutableTxs(state, tip.BaseFee(), localTxs, n.config.TxRegossipMaxSize) + localQueued := n.queueExecutableTxs(state, tip.BaseFee, localTxs, n.config.RegossipMaxTxs) localCount := len(localQueued) n.stats.IncEthTxsRegossipQueuedLocal(localCount) - if localCount >= n.config.TxRegossipMaxSize { + if localCount >= n.config.RegossipMaxTxs { n.stats.IncEthTxsRegossipQueued() return localQueued } - remoteQueued := n.queueExecutableTxs(state, tip.BaseFee(), remoteTxs, n.config.TxRegossipMaxSize-localCount) + remoteQueued := n.queueExecutableTxs(state, tip.BaseFee, remoteTxs, n.config.RegossipMaxTxs-localCount) n.stats.IncEthTxsRegossipQueuedRemote(len(remoteQueued)) if localCount+len(remoteQueued) > 0 { // only increment the regossip stat when there are any txs queued @@ -221,7 +236,7 @@ func (n *pushGossiper) awaitEthTxGossip() { go n.ctx.Log.RecoverAndPanic(func() { var ( gossipTicker = time.NewTicker(ethTxsGossipInterval) - regossipTicker = time.NewTicker(n.config.TxRegossipFrequency.Duration) + regossipTicker = time.NewTicker(n.config.RegossipFrequency.Duration) ) defer func() { gossipTicker.Stop() @@ -239,6 +254,12 @@ func (n *pushGossiper) awaitEthTxGossip() { "err", err, ) } + if err := n.ethTxGossiper.Gossip(context.TODO()); err != nil { + log.Warn( + "failed to send eth transactions", + "err", err, + ) + } case <-regossipTicker.C: for _, tx := range n.queueRegossipTxs() { n.ethTxsToGossip[tx.Hash()] = tx @@ -261,6 +282,21 @@ func (n *pushGossiper) awaitEthTxGossip() { "err", err, ) } + + gossipTxs := make([]*GossipEthTx, 0, len(txs)) + for _, tx := range txs { + gossipTxs = append(gossipTxs, &GossipEthTx{Tx: tx}) + } + + n.ethTxGossiper.Add(gossipTxs...) + if err := n.ethTxGossiper.Gossip(context.TODO()); err != nil { + log.Warn( + "failed to send eth transactions", + "len(txs)", len(txs), + "err", err, + ) + } + case <-n.shutdownChan: return } @@ -269,14 +305,6 @@ func (n *pushGossiper) awaitEthTxGossip() { } func (n *pushGossiper) GossipAtomicTxs(txs []*Tx) error { - if time.Now().Before(n.gossipActivationTime) { - log.Trace( - "not gossiping atomic tx before the gossiping activation time", - "txs", txs, - ) - return nil - } - errs := wrappers.Errs{} for _, tx := range txs { errs.Add(n.gossipAtomicTx(tx)) @@ -310,6 +338,11 @@ func (n *pushGossiper) gossipAtomicTx(tx *Tx) error { "txID", txID, ) n.stats.IncAtomicGossipSent() + n.atomicTxGossiper.Add(&GossipAtomicTx{Tx: tx}) + if err := n.atomicTxGossiper.Gossip(context.TODO()); err != nil { + return err + } + return n.client.Gossip(msgBytes) } @@ -340,7 +373,7 @@ func (n *pushGossiper) sendEthTxs(txs []*types.Transaction) error { } func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { - if (!force && time.Since(n.lastGossiped) < ethTxsGossipInterval) || len(n.ethTxsToGossip) == 0 { + if (!force && time.Since(n.lastGossiped) < minGossipBatchInterval) || len(n.ethTxsToGossip) == 0 { return 0, nil } n.lastGossiped = time.Now() @@ -354,11 +387,11 @@ func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { for _, tx := range txs { txHash := tx.Hash() txStatus := n.txPool.Status([]common.Hash{txHash})[0] - if txStatus != core.TxStatusPending { + if txStatus != txpool.TxStatusPending { continue } - if n.config.RemoteTxGossipOnlyEnabled && n.txPool.HasLocal(txHash) { + if n.config.RemoteGossipOnlyEnabled && n.txPool.HasLocal(txHash) { continue } @@ -380,7 +413,7 @@ func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { // Attempt to gossip [selectedTxs] msgTxs := make([]*types.Transaction, 0) - msgTxsSize := common.StorageSize(0) + msgTxsSize := uint64(0) for _, tx := range selectedTxs { size := tx.Size() if msgTxsSize+size > message.EthMsgSoftCapSize { @@ -405,14 +438,6 @@ func (n *pushGossiper) gossipEthTxs(force bool) (int, error) { // NOTE: We never return a non-nil error from this function but retain the // option to do so in case it becomes useful. func (n *pushGossiper) GossipEthTxs(txs []*types.Transaction) error { - if time.Now().Before(n.gossipActivationTime) { - log.Trace( - "not gossiping eth txs before the gossiping activation time", - "len(txs)", len(txs), - ) - return nil - } - select { case n.ethTxsToGossipChan <- txs: case <-n.shutdownChan: @@ -424,7 +449,7 @@ func (n *pushGossiper) GossipEthTxs(txs []*types.Transaction) error { type GossipHandler struct { vm *VM atomicMempool *Mempool - txPool *core.TxPool + txPool *txpool.TxPool stats GossipReceivedStats } @@ -482,12 +507,17 @@ func (h *GossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGo } h.stats.IncAtomicGossipReceivedNew() - if err := h.vm.issueTx(&tx, false /*=local*/); err != nil { + + h.vm.ctx.Lock.RLock() + defer h.vm.ctx.Lock.RUnlock() + + if err := h.vm.mempool.AddTx(&tx); err != nil { log.Trace( "AppGossip provided invalid transaction", "peerID", nodeID, "err", err, ) + h.stats.IncAtomicGossipReceivedError() } return nil @@ -527,10 +557,10 @@ func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip "err", err, "tx", txs[i].Hash(), ) - if err == core.ErrAlreadyKnown { + if err == txpool.ErrAlreadyKnown { h.stats.IncEthTxsGossipReceivedKnown() } else { - h.stats.IncAtomicGossipReceivedError() + h.stats.IncEthTxsGossipReceivedError() } continue } @@ -538,13 +568,3 @@ func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip } return nil } - -// noopGossiper should be used when gossip communication is not supported -type noopGossiper struct{} - -func (n *noopGossiper) GossipAtomicTxs([]*Tx) error { - return nil -} -func (n *noopGossiper) GossipEthTxs([]*types.Transaction) error { - return nil -} diff --git a/coreth/plugin/evm/gossiper_atomic_gossiping_test.go b/coreth/plugin/evm/gossiper_atomic_gossiping_test.go index 3f3b70c5..267aaf73 100644 --- a/coreth/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/coreth/plugin/evm/gossiper_atomic_gossiping_test.go @@ -11,8 +11,8 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" - "github.com/stretchr/testify/assert" "github.com/ava-labs/coreth/plugin/evm/message" @@ -22,10 +22,11 @@ import ( func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisVM(t, false, "", "", "") defer func() { assert.NoError(vm.Shutdown(context.Background())) }() + assert.NoError(vm.Connected(context.Background(), ids.GenerateTestNodeID(), nil)) // Create conflicting transactions importTxs := createImportTxOptions(t, vm, sharedMemory) @@ -56,12 +57,15 @@ func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { return nil } + assert.NoError(vm.SetState(context.Background(), snow.NormalOp)) + // Optimistically gossip raw tx - assert.NoError(vm.issueTx(tx, true /*=local*/)) - time.Sleep(waitBlockTime * 3) + assert.NoError(vm.mempool.AddLocalTx(tx)) + time.Sleep(500 * time.Millisecond) gossipedLock.Lock() assert.Equal(1, gossiped) gossipedLock.Unlock() + assert.True(vm.mempool.bloom.Has(&GossipAtomicTx{Tx: tx})) // Test hash on retry assert.NoError(vm.gossiper.GossipAtomicTxs([]*Tx{tx})) @@ -70,7 +74,7 @@ func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { gossipedLock.Unlock() // Attempt to gossip conflicting tx - assert.ErrorIs(vm.issueTx(conflictingTx, true /*=local*/), errConflictingAtomicTx) + assert.ErrorIs(vm.mempool.AddLocalTx(conflictingTx), errConflictingAtomicTx) gossipedLock.Lock() assert.Equal(1, gossiped) gossipedLock.Unlock() @@ -117,9 +121,13 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) + vm.ctx.Lock.Unlock() + // show that no txID is requested assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - time.Sleep(waitBlockTime * 3) + time.Sleep(500 * time.Millisecond) + + vm.ctx.Lock.Lock() assert.False(txRequested, "tx should not have been requested") txGossipedLock.Lock() @@ -127,8 +135,13 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { txGossipedLock.Unlock() assert.True(vm.mempool.has(tx.ID())) + vm.ctx.Lock.Unlock() + // show that tx is not re-gossiped assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) + + vm.ctx.Lock.Lock() + txGossipedLock.Lock() assert.Equal(1, txGossiped, "tx should have only been gossiped once") txGossipedLock.Unlock() @@ -139,7 +152,13 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { } msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) + + vm.ctx.Lock.Unlock() + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) + + vm.ctx.Lock.Lock() + assert.False(txRequested, "tx should not have been requested") txGossipedLock.Lock() assert.Equal(1, txGossiped, "tx should not have been gossiped") @@ -199,7 +218,12 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) + vm.ctx.Lock.Unlock() + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) + + vm.ctx.Lock.Lock() + assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() assert.Zero(txGossiped, "tx should not have been gossiped") @@ -217,8 +241,13 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) + vm.ctx.Lock.Unlock() + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) - time.Sleep(waitBlockTime * 3) + time.Sleep(500 * time.Millisecond) + + vm.ctx.Lock.Lock() + assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() assert.Equal(1, txGossiped, "conflicting tx should have been gossiped") diff --git a/coreth/plugin/evm/gossiper_eth_gossiping_test.go b/coreth/plugin/evm/gossiper_eth_gossiping_test.go index 4a2a3f6d..97e7b831 100644 --- a/coreth/plugin/evm/gossiper_eth_gossiping_test.go +++ b/coreth/plugin/evm/gossiper_eth_gossiping_test.go @@ -42,14 +42,7 @@ func fundAddressByGenesis(addrs []common.Address) (string, error) { } } genesis.Alloc = funds - - genesis.Config = ¶ms.ChainConfig{ - ChainID: params.AvalancheLocalChainID, - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - } + genesis.Config = params.TestChainConfig bytes, err := json.Marshal(genesis) return string(bytes), err @@ -59,8 +52,8 @@ func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*type res := make([]*types.Transaction, count) to := common.Address{} - amount := big.NewInt(10000) - gasLimit := uint64(100000) + amount := big.NewInt(0) + gasLimit := uint64(37000) for i := 0; i < count; i++ { tx, _ := types.SignTx( diff --git a/coreth/plugin/evm/import_tx.go b/coreth/plugin/evm/import_tx.go index 8f02e906..54cca09c 100644 --- a/coreth/plugin/evm/import_tx.go +++ b/coreth/plugin/evm/import_tx.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "slices" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" @@ -109,16 +110,16 @@ func (utx *UnsignedImportTx) Verify( return errImportNonAVAXInputBanff } } - if !utils.IsSortedAndUniqueSortable(utx.ImportedInputs) { + if !utils.IsSortedAndUnique(utx.ImportedInputs) { return errInputsNotSortedUnique } if rules.IsApricotPhase2 { - if !IsSortedAndUniqueEVMOutputs(utx.Outs) { + if !utils.IsSortedAndUnique(utx.Outs) { return errOutputsNotSortedUnique } } else if rules.IsApricotPhase1 { - if !IsSortedEVMOutputs(utx.Outs) { + if !slices.IsSortedFunc(utx.Outs, EVMOutput.Compare) { return errOutputsNotSorted } } @@ -198,7 +199,7 @@ func (utx *UnsignedImportTx) SemanticVerify( if err != nil { return err } - txFee, err := calculateDynamicFee(gasUsed, baseFee) + txFee, err := CalculateDynamicFee(gasUsed, baseFee) if err != nil { return err } @@ -379,11 +380,11 @@ func (vm *VM) newImportTxWithUTXOs( } gasUsedWithChange := gasUsedWithoutChange + EVMOutputGas - txFeeWithoutChange, err = calculateDynamicFee(gasUsedWithoutChange, baseFee) + txFeeWithoutChange, err = CalculateDynamicFee(gasUsedWithoutChange, baseFee) if err != nil { return nil, err } - txFeeWithChange, err = calculateDynamicFee(gasUsedWithChange, baseFee) + txFeeWithChange, err = CalculateDynamicFee(gasUsedWithChange, baseFee) if err != nil { return nil, err } @@ -412,7 +413,7 @@ func (vm *VM) newImportTxWithUTXOs( return nil, errNoEVMOutputs } - SortEVMOutputs(outs) + utils.Sort(outs) // Create the transaction utx := &UnsignedImportTx{ diff --git a/coreth/plugin/evm/import_tx_test.go b/coreth/plugin/evm/import_tx_test.go index d292ef95..ddcde4a8 100644 --- a/coreth/plugin/evm/import_tx_test.go +++ b/coreth/plugin/evm/import_tx_test.go @@ -116,7 +116,7 @@ func TestImportTxVerify(t *testing.T) { // Sort the inputs and outputs to ensure the transaction is canonical utils.Sort(importTx.ImportedInputs) - SortEVMOutputs(importTx.Outs) + utils.Sort(importTx.Outs) tests := map[string]atomicTxVerifyTest{ "nil tx": { @@ -449,7 +449,7 @@ func TestNewImportTx(t *testing.T) { if err != nil { t.Fatal(err) } - actualFee, err = calculateDynamicFee(actualCost, initialBaseFee) + actualFee, err = CalculateDynamicFee(actualCost, initialBaseFee) if err != nil { t.Fatal(err) } @@ -853,7 +853,7 @@ func TestImportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := calculateDynamicFee(gasUsed, test.BaseFee) + fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } diff --git a/coreth/plugin/evm/mempool.go b/coreth/plugin/evm/mempool.go index 25b67298..bab9bccf 100644 --- a/coreth/plugin/evm/mempool.go +++ b/coreth/plugin/evm/mempool.go @@ -10,6 +10,10 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/coreth/metrics" "github.com/ethereum/go-ethereum/log" ) @@ -18,7 +22,12 @@ const ( discardedTxsCacheSize = 50 ) -var errNoGasUsed = errors.New("no gas used") +var ( + errTxAlreadyKnown = errors.New("tx already known") + errNoGasUsed = errors.New("no gas used") + + _ gossip.Set[*GossipAtomicTx] = (*Mempool)(nil) +) // mempoolMetrics defines the metrics for the atomic mempool type mempoolMetrics struct { @@ -48,8 +57,7 @@ func newMempoolMetrics() *mempoolMetrics { type Mempool struct { lock sync.RWMutex - // AVAXAssetID is the fee paying currency of any atomic transaction - AVAXAssetID ids.ID + ctx *snow.Context // maxSize is the maximum number of transactions allowed to be kept in mempool maxSize int // currentTxs is the set of transactions about to be added to a block. @@ -69,14 +77,23 @@ type Mempool struct { txHeap *txHeap // utxoSpenders maps utxoIDs to the transaction consuming them in the mempool utxoSpenders map[ids.ID]*Tx + // bloom is a bloom filter containing the txs in the mempool + bloom *gossip.BloomFilter metrics *mempoolMetrics + + verify func(tx *Tx) error } // NewMempool returns a Mempool with [maxSize] -func NewMempool(AVAXAssetID ids.ID, maxSize int) *Mempool { +func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *Tx) error) (*Mempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + if err != nil { + return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) + } + return &Mempool{ - AVAXAssetID: AVAXAssetID, + ctx: ctx, issuedTxs: make(map[ids.ID]*Tx), discardedTxs: &cache.LRU[ids.ID, *Tx]{Size: discardedTxsCacheSize}, currentTxs: make(map[ids.ID]*Tx), @@ -84,8 +101,10 @@ func NewMempool(AVAXAssetID ids.ID, maxSize int) *Mempool { txHeap: newTxHeap(maxSize), maxSize: maxSize, utxoSpenders: make(map[ids.ID]*Tx), + bloom: bloom, metrics: newMempoolMetrics(), - } + verify: verify, + }, nil } // Len returns the number of transactions in the mempool @@ -118,20 +137,71 @@ func (m *Mempool) atomicTxGasPrice(tx *Tx) (uint64, error) { if gasUsed == 0 { return 0, errNoGasUsed } - burned, err := tx.Burned(m.AVAXAssetID) + burned, err := tx.Burned(m.ctx.AVAXAssetID) if err != nil { return 0, err } return burned / gasUsed, nil } -// Add attempts to add [tx] to the mempool and returns an error if -// it could not be addeed to the mempool. +func (m *Mempool) Add(tx *GossipAtomicTx) error { + m.ctx.Lock.RLock() + defer m.ctx.Lock.RUnlock() + + m.lock.Lock() + defer m.lock.Unlock() + + err := m.addTx(tx.Tx, false) + if errors.Is(err, errTxAlreadyKnown) { + return err + } + + if err != nil { + txID := tx.Tx.ID() + m.discardedTxs.Put(txID, tx.Tx) + log.Debug("failed to issue remote tx to mempool", + "txID", txID, + "err", err, + ) + } + + return err +} + +// AddTx attempts to add [tx] to the mempool and returns an error if +// it could not be added to the mempool. func (m *Mempool) AddTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - return m.addTx(tx, false) + err := m.addTx(tx, false) + if errors.Is(err, errTxAlreadyKnown) { + return nil + } + + if err != nil { + // unlike local txs, invalid remote txs are recorded as discarded + // so that they won't be requested again + txID := tx.ID() + m.discardedTxs.Put(tx.ID(), tx) + log.Debug("failed to issue remote tx to mempool", + "txID", txID, + "err", err, + ) + } + return err +} + +func (m *Mempool) AddLocalTx(tx *Tx) error { + m.lock.Lock() + defer m.lock.Unlock() + + err := m.addTx(tx, false) + if errors.Is(err, errTxAlreadyKnown) { + return nil + } + + return err } // forceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. @@ -139,7 +209,12 @@ func (m *Mempool) ForceAddTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - return m.addTx(tx, true) + err := m.addTx(tx, true) + if errors.Is(err, errTxAlreadyKnown) { + return nil + } + + return nil } // checkConflictTx checks for any transactions in the mempool that spend the same input UTXOs as [tx]. @@ -181,13 +256,18 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { // If [txID] has already been issued or is in the currentTxs map // there's no need to add it. if _, exists := m.issuedTxs[txID]; exists { - return nil + return fmt.Errorf("%w: tx %s was issued previously", errTxAlreadyKnown, tx.ID()) } if _, exists := m.currentTxs[txID]; exists { - return nil + return fmt.Errorf("%w: tx %s is being built into a block", errTxAlreadyKnown, tx.ID()) } if _, exists := m.txHeap.Get(txID); exists { - return nil + return fmt.Errorf("%w: tx %s is pending", errTxAlreadyKnown, tx.ID()) + } + if !force && m.verify != nil { + if err := m.verify(tx); err != nil { + return err + } } utxoSet := tx.InputUTXOs() @@ -259,6 +339,21 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { for utxoID := range utxoSet { m.utxoSpenders[utxoID] = tx } + + m.bloom.Add(&GossipAtomicTx{Tx: tx}) + reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*txGossipBloomChurnMultiplier) + if err != nil { + return err + } + + if reset { + log.Debug("resetting bloom filter", "reason", "reached max filled ratio") + + for _, pendingTx := range m.txHeap.minHeap.items { + m.bloom.Add(&GossipAtomicTx{Tx: pendingTx.tx}) + } + } + // When adding [tx] to the mempool make sure that there is an item in Pending // to signal the VM to produce a block. Note: if the VM's buildStatus has already // been set to something other than [dontBuild], this will be ignored and won't be @@ -266,9 +361,28 @@ func (m *Mempool) addTx(tx *Tx, force bool) error { // and CancelCurrentTx. m.newTxs = append(m.newTxs, tx) m.addPending() + return nil } +func (m *Mempool) Iterate(f func(tx *GossipAtomicTx) bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + for _, item := range m.txHeap.maxHeap.items { + if !f(&GossipAtomicTx{Tx: item.tx}) { + return + } + } +} + +func (m *Mempool) GetFilter() ([]byte, []byte) { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.bloom.Marshal() +} + // NextTx returns a transaction to be issued from the mempool. func (m *Mempool) NextTx() (*Tx, bool) { m.lock.Lock() diff --git a/coreth/plugin/evm/mempool_atomic_gossiping_test.go b/coreth/plugin/evm/mempool_atomic_gossiping_test.go index 84a9dd10..741c177b 100644 --- a/coreth/plugin/evm/mempool_atomic_gossiping_test.go +++ b/coreth/plugin/evm/mempool_atomic_gossiping_test.go @@ -5,16 +5,12 @@ package evm import ( "context" + "math/big" "testing" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/stretchr/testify/assert" ) @@ -49,13 +45,13 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { conflictingTxID := conflictingTx.ID() // add a tx to the mempool - err := vm.issueTx(tx, true /*=local*/) + err := vm.mempool.AddLocalTx(tx) assert.NoError(err) has := mempool.has(txID) assert.True(has, "valid tx not recorded into mempool") // try to add a conflicting tx - err = vm.issueTx(conflictingTx, true /*=local*/) + err = vm.mempool.AddLocalTx(conflictingTx) assert.ErrorIs(err, errConflictingAtomicTx) has = mempool.has(conflictingTxID) assert.False(has, "conflicting tx in mempool") @@ -118,73 +114,16 @@ func TestMempoolMaxMempoolSizeHandling(t *testing.T) { assert.True(mempool.has(tx.ID())) } -func createImportTx(t *testing.T, vm *VM, txID ids.ID, feeAmount uint64) *Tx { - var importAmount uint64 = 10000000 - importTx := &UnsignedImportTx{ - NetworkID: testNetworkID, - BlockchainID: testCChainID, - SourceChain: testXChainID, - ImportedInputs: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(0), - }, - Asset: avax.Asset{ID: testAvaxAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: importAmount, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(1), - }, - Asset: avax.Asset{ID: testAvaxAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: importAmount, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }, - }, - Outs: []EVMOutput{ - { - Address: testEthAddrs[0], - Amount: importAmount - feeAmount, - AssetID: testAvaxAssetID, - }, - { - Address: testEthAddrs[1], - Amount: importAmount, - AssetID: testAvaxAssetID, - }, - }, - } - - // Sort the inputs and outputs to ensure the transaction is canonical - utils.Sort(importTx.ImportedInputs) - SortEVMOutputs(importTx.Outs) - - tx := &Tx{UnsignedAtomicTx: importTx} - // Sign with the correct key - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { - t.Fatal(err) - } - - return tx -} - // mempool will drop transaction with the lowest fee func TestMempoolPriorityDrop(t *testing.T) { assert := assert.New(t) // we use AP3 genesis here to not trip any block fees - _, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase3, "", "") + importAmount := uint64(50000000) + _, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase3, "", "", map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + testShortIDAddrs[1]: importAmount, + }) defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -192,14 +131,25 @@ func TestMempoolPriorityDrop(t *testing.T) { mempool := vm.mempool mempool.maxSize = 1 - tx1 := createImportTx(t, vm, ids.ID{1}, params.AvalancheAtomicTxFee) + tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + if err != nil { + t.Fatal(err) + } assert.NoError(mempool.AddTx(tx1)) assert.True(mempool.has(tx1.ID())) - tx2 := createImportTx(t, vm, ids.ID{2}, params.AvalancheAtomicTxFee) + + tx2, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) + if err != nil { + t.Fatal(err) + } assert.ErrorIs(mempool.AddTx(tx2), errInsufficientAtomicTxFee) assert.True(mempool.has(tx1.ID())) assert.False(mempool.has(tx2.ID())) - tx3 := createImportTx(t, vm, ids.ID{3}, 2*params.AvalancheAtomicTxFee) + + tx3, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], new(big.Int).Mul(initialBaseFee, big.NewInt(2)), []*secp256k1.PrivateKey{testKeys[1]}) + if err != nil { + t.Fatal(err) + } assert.NoError(mempool.AddTx(tx3)) assert.False(mempool.has(tx1.ID())) assert.False(mempool.has(tx2.ID())) diff --git a/coreth/plugin/evm/mempool_test.go b/coreth/plugin/evm/mempool_test.go new file mode 100644 index 00000000..a56c43bb --- /dev/null +++ b/coreth/plugin/evm/mempool_test.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestMempoolAddTx(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + require.NoError(err) + + txs := make([]*GossipAtomicTx, 0) + for i := 0; i < 3_000; i++ { + tx := &GossipAtomicTx{ + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + } + + txs = append(txs, tx) + require.NoError(m.Add(tx)) + } + + for _, tx := range txs { + require.True(m.bloom.Has(tx)) + } +} + +// Add should return an error if a tx is already known +func TestMempoolAdd(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + require.NoError(err) + + tx := &GossipAtomicTx{ + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + } + + require.NoError(m.Add(tx)) + err = m.Add(tx) + require.ErrorIs(err, errTxAlreadyKnown) +} diff --git a/coreth/plugin/evm/message/codec.go b/coreth/plugin/evm/message/codec.go index a698e821..f1e2cfcb 100644 --- a/coreth/plugin/evm/message/codec.go +++ b/coreth/plugin/evm/message/codec.go @@ -4,6 +4,8 @@ package message import ( + "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" @@ -12,7 +14,7 @@ import ( const ( Version = uint16(0) - maxMessageSize = 1 * units.MiB + maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) var ( @@ -22,7 +24,7 @@ var ( func init() { Codec = codec.NewManager(maxMessageSize) - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) errs := wrappers.Errs{} errs.Add( @@ -41,6 +43,11 @@ func init() { c.RegisterType(CodeRequest{}), c.RegisterType(CodeResponse{}), + // Warp request types + c.RegisterType(MessageSignatureRequest{}), + c.RegisterType(BlockSignatureRequest{}), + c.RegisterType(SignatureResponse{}), + Codec.RegisterCodec(Version, c), ) @@ -49,7 +56,7 @@ func init() { } CrossChainCodec = codec.NewManager(maxMessageSize) - ccc := linearcodec.NewDefault() + ccc := linearcodec.NewDefault(time.Time{}) errs = wrappers.Errs{} errs.Add( diff --git a/coreth/plugin/evm/message/cross_chain_handler.go b/coreth/plugin/evm/message/cross_chain_handler.go index 5a810d34..dc568d7a 100644 --- a/coreth/plugin/evm/message/cross_chain_handler.go +++ b/coreth/plugin/evm/message/cross_chain_handler.go @@ -9,8 +9,10 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/rpc" + "github.com/ethereum/go-ethereum/log" ) @@ -41,19 +43,27 @@ func (c *crossChainHandler) HandleEthCallRequest(ctx context.Context, requesting transactionArgs := ethapi.TransactionArgs{} err := json.Unmarshal(ethCallRequest.RequestArgs, &transactionArgs) if err != nil { - log.Debug("error occurred with JSON unmarshalling ethCallRequest.RequestArgs", "err", err) + log.Error("error occurred with JSON unmarshalling ethCallRequest.RequestArgs", "err", err) return nil, nil } - result, err := ethapi.DoCall(ctx, c.backend, transactionArgs, lastAcceptedBlockNumberOrHash, nil, c.backend.RPCEVMTimeout(), c.backend.RPCGasCap()) + result, err := ethapi.DoCall( + ctx, + c.backend, + transactionArgs, + lastAcceptedBlockNumberOrHash, + nil, + nil, + c.backend.RPCEVMTimeout(), + c.backend.RPCGasCap()) if err != nil { - log.Debug("error occurred with EthCall", "err", err, "transactionArgs", ethCallRequest.RequestArgs, "blockNumberOrHash", lastAcceptedBlockNumberOrHash) + log.Error("error occurred with EthCall", "err", err, "transactionArgs", ethCallRequest.RequestArgs, "blockNumberOrHash", lastAcceptedBlockNumberOrHash) return nil, nil } executionResult, err := json.Marshal(&result) if err != nil { - log.Debug("error occurred with JSON marshalling result", "err", err) + log.Error("error occurred with JSON marshalling result", "err", err) return nil, nil } @@ -63,7 +73,7 @@ func (c *crossChainHandler) HandleEthCallRequest(ctx context.Context, requesting responseBytes, err := c.crossChainCodec.Marshal(Version, response) if err != nil { - log.Warn("error occurred with marshalling EthCallResponse", "err", err, "EthCallResponse", response) + log.Error("error occurred with marshalling EthCallResponse", "err", err, "EthCallResponse", response) return nil, nil } diff --git a/coreth/plugin/evm/message/handler.go b/coreth/plugin/evm/message/handler.go index ad8f7665..a25ae7e8 100644 --- a/coreth/plugin/evm/message/handler.go +++ b/coreth/plugin/evm/message/handler.go @@ -36,7 +36,7 @@ func (NoopMempoolGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip } // RequestHandler interface handles incoming requests from peers -// Must have methods in format of handleType(context.Context, ids.ShortID, uint32, request Type) error +// Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error // so that the Request object of relevant Type can invoke its respective handle method // on this struct. // Also see GossipHandler for implementation style. @@ -45,6 +45,8 @@ type RequestHandler interface { HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request BlockRequest) ([]byte, error) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest CodeRequest) ([]byte, error) + HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest MessageSignatureRequest) ([]byte, error) + HandleBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest BlockSignatureRequest) ([]byte, error) } // ResponseHandler handles response for a sent request @@ -74,6 +76,14 @@ func (NoopRequestHandler) HandleCodeRequest(ctx context.Context, nodeID ids.Node return nil, nil } +func (NoopRequestHandler) HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest MessageSignatureRequest) ([]byte, error) { + return nil, nil +} + +func (NoopRequestHandler) HandleBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest BlockSignatureRequest) ([]byte, error) { + return nil, nil +} + // CrossChainRequestHandler interface handles incoming requests from another chain type CrossChainRequestHandler interface { HandleEthCallRequest(ctx context.Context, requestingchainID ids.ID, requestID uint32, ethCallRequest EthCallRequest) ([]byte, error) diff --git a/coreth/plugin/evm/message/handler_test.go b/coreth/plugin/evm/message/handler_test.go index 2c1c4a6a..a27b1f9d 100644 --- a/coreth/plugin/evm/message/handler_test.go +++ b/coreth/plugin/evm/message/handler_test.go @@ -54,9 +54,9 @@ func TestNoopHandler(t *testing.T) { handler := NoopMempoolGossipHandler{} - err := handler.HandleAtomicTx(ids.EmptyNodeID, AtomicTxGossip{}) + err := handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) assert.NoError(err) - err = handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) + err = handler.HandleAtomicTx(ids.EmptyNodeID, AtomicTxGossip{}) assert.NoError(err) } diff --git a/coreth/plugin/evm/message/leafs_request.go b/coreth/plugin/evm/message/leafs_request.go index 8338c4db..22629e62 100644 --- a/coreth/plugin/evm/message/leafs_request.go +++ b/coreth/plugin/evm/message/leafs_request.go @@ -12,6 +12,8 @@ import ( "github.com/ethereum/go-ethereum/log" ) +const MaxCodeHashesPerRequest = 5 + var _ Request = LeafsRequest{} // NodeType outlines the trie that a leaf node belongs to diff --git a/coreth/plugin/evm/message/leafs_request_test.go b/coreth/plugin/evm/message/leafs_request_test.go index fab4d4fa..ab6cab51 100644 --- a/coreth/plugin/evm/message/leafs_request_test.go +++ b/coreth/plugin/evm/message/leafs_request_test.go @@ -177,7 +177,9 @@ type mockHandler struct { handleStateTrieCalled, handleAtomicTrieCalled, handleBlockRequestCalled, - handleCodeRequestCalled bool + handleCodeRequestCalled, + handleMessageSignatureCalled, + handleBlockSignatureCalled bool } func (m *mockHandler) HandleStateTrieLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { @@ -200,6 +202,15 @@ func (m *mockHandler) HandleCodeRequest(context.Context, ids.NodeID, uint32, Cod return nil, nil } +func (m *mockHandler) HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest MessageSignatureRequest) ([]byte, error) { + m.handleMessageSignatureCalled = true + return nil, nil +} +func (m *mockHandler) HandleBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest BlockSignatureRequest) ([]byte, error) { + m.handleBlockSignatureCalled = true + return nil, nil +} + func (m *mockHandler) reset() { m.handleStateTrieCalled = false m.handleAtomicTrieCalled = false diff --git a/coreth/plugin/evm/message/message.go b/coreth/plugin/evm/message/message.go index 331074ef..c8c80a03 100644 --- a/coreth/plugin/evm/message/message.go +++ b/coreth/plugin/evm/message/message.go @@ -9,8 +9,6 @@ import ( "github.com/ava-labs/avalanchego/codec" - "github.com/ethereum/go-ethereum/common" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" ) @@ -20,7 +18,7 @@ const ( // any [EthTxsGossip] or [AtomicTxGossip] message. We do not limit inbound messages to // this size, however. Max inbound message size is enforced by the codec // (512KB). - EthMsgSoftCapSize = common.StorageSize(64 * units.KiB) + EthMsgSoftCapSize = 64 * units.KiB ) var ( diff --git a/coreth/plugin/evm/message/message_test.go b/coreth/plugin/evm/message/message_test.go index aa08b6c4..dbcdea2d 100644 --- a/coreth/plugin/evm/message/message_test.go +++ b/coreth/plugin/evm/message/message_test.go @@ -63,7 +63,7 @@ func TestEthTxsTooLarge(t *testing.T) { assert := assert.New(t) builtMsg := EthTxsGossip{ - Txs: utils.RandomBytes(1024 * units.KiB), + Txs: utils.RandomBytes(maxMessageSize), } _, err := BuildGossipMessage(Codec, builtMsg) assert.Error(err) diff --git a/coreth/plugin/evm/message/signature_request.go b/coreth/plugin/evm/message/signature_request.go new file mode 100644 index 00000000..12771661 --- /dev/null +++ b/coreth/plugin/evm/message/signature_request.go @@ -0,0 +1,49 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +var ( + _ Request = MessageSignatureRequest{} + _ Request = BlockSignatureRequest{} +) + +// MessageSignatureRequest is used to request a warp message's signature. +type MessageSignatureRequest struct { + MessageID ids.ID `serialize:"true"` +} + +func (s MessageSignatureRequest) String() string { + return fmt.Sprintf("MessageSignatureRequest(MessageID=%s)", s.MessageID.String()) +} + +func (s MessageSignatureRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { + return handler.HandleMessageSignatureRequest(ctx, nodeID, requestID, s) +} + +// BlockSignatureRequest is used to request a warp message's signature. +type BlockSignatureRequest struct { + BlockID ids.ID `serialize:"true"` +} + +func (s BlockSignatureRequest) String() string { + return fmt.Sprintf("BlockSignatureRequest(BlockID=%s)", s.BlockID.String()) +} + +func (s BlockSignatureRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { + return handler.HandleBlockSignatureRequest(ctx, nodeID, requestID, s) +} + +// SignatureResponse is the response to a BlockSignatureRequest or MessageSignatureRequest. +// The response contains a BLS signature of the requested message, signed by the responding node's BLS private key. +type SignatureResponse struct { + Signature [bls.SignatureLen]byte `serialize:"true"` +} diff --git a/coreth/plugin/evm/message/signature_request_test.go b/coreth/plugin/evm/message/signature_request_test.go new file mode 100644 index 00000000..59614fbb --- /dev/null +++ b/coreth/plugin/evm/message/signature_request_test.go @@ -0,0 +1,73 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "encoding/base64" + "encoding/hex" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/stretchr/testify/require" +) + +// TestMarshalMessageSignatureRequest asserts that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalMessageSignatureRequest(t *testing.T) { + signatureRequest := MessageSignatureRequest{ + MessageID: ids.ID{68, 79, 70, 65, 72, 73, 64, 107}, + } + + base64MessageSignatureRequest := "AABET0ZBSElAawAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + signatureRequestBytes, err := Codec.Marshal(Version, signatureRequest) + require.NoError(t, err) + require.Equal(t, base64MessageSignatureRequest, base64.StdEncoding.EncodeToString(signatureRequestBytes)) + + var s MessageSignatureRequest + _, err = Codec.Unmarshal(signatureRequestBytes, &s) + require.NoError(t, err) + require.Equal(t, signatureRequest.MessageID, s.MessageID) +} + +// TestMarshalBlockSignatureRequest asserts that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalBlockSignatureRequest(t *testing.T) { + signatureRequest := BlockSignatureRequest{ + BlockID: ids.ID{68, 79, 70, 65, 72, 73, 64, 107}, + } + + base64BlockSignatureRequest := "AABET0ZBSElAawAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + signatureRequestBytes, err := Codec.Marshal(Version, signatureRequest) + require.NoError(t, err) + require.Equal(t, base64BlockSignatureRequest, base64.StdEncoding.EncodeToString(signatureRequestBytes)) + + var s BlockSignatureRequest + _, err = Codec.Unmarshal(signatureRequestBytes, &s) + require.NoError(t, err) + require.Equal(t, signatureRequest.BlockID, s.BlockID) +} + +// TestMarshalSignatureResponse asserts that the structure or serialization logic hasn't changed, primarily to +// ensure compatibility with the network. +func TestMarshalSignatureResponse(t *testing.T) { + var signature [bls.SignatureLen]byte + sig, err := hex.DecodeString("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + require.NoError(t, err, "failed to decode string to hex") + + copy(signature[:], sig) + signatureResponse := SignatureResponse{ + Signature: signature, + } + + base64SignatureResponse := "AAABI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8=" + signatureResponseBytes, err := Codec.Marshal(Version, signatureResponse) + require.NoError(t, err) + require.Equal(t, base64SignatureResponse, base64.StdEncoding.EncodeToString(signatureResponseBytes)) + + var s SignatureResponse + _, err = Codec.Unmarshal(signatureResponseBytes, &s) + require.NoError(t, err) + require.Equal(t, signatureResponse.Signature, s.Signature) +} diff --git a/coreth/plugin/evm/network_handler.go b/coreth/plugin/evm/network_handler.go new file mode 100644 index 00000000..33670ed4 --- /dev/null +++ b/coreth/plugin/evm/network_handler.go @@ -0,0 +1,72 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/message" + syncHandlers "github.com/ava-labs/coreth/sync/handlers" + syncStats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/warp" + warpHandlers "github.com/ava-labs/coreth/warp/handlers" + "github.com/ethereum/go-ethereum/ethdb" +) + +var _ message.RequestHandler = &networkHandler{} + +type networkHandler struct { + stateTrieLeafsRequestHandler *syncHandlers.LeafsRequestHandler + atomicTrieLeafsRequestHandler *syncHandlers.LeafsRequestHandler + blockRequestHandler *syncHandlers.BlockRequestHandler + codeRequestHandler *syncHandlers.CodeRequestHandler + signatureRequestHandler *warpHandlers.SignatureRequestHandler +} + +// newNetworkHandler constructs the handler for serving network requests. +func newNetworkHandler( + provider syncHandlers.SyncDataProvider, + diskDB ethdb.KeyValueReader, + evmTrieDB *trie.Database, + atomicTrieDB *trie.Database, + warpBackend warp.Backend, + networkCodec codec.Manager, +) message.RequestHandler { + syncStats := syncStats.NewHandlerStats(metrics.Enabled) + return &networkHandler{ + stateTrieLeafsRequestHandler: syncHandlers.NewLeafsRequestHandler(evmTrieDB, provider, networkCodec, syncStats), + atomicTrieLeafsRequestHandler: syncHandlers.NewLeafsRequestHandler(atomicTrieDB, nil, networkCodec, syncStats), + blockRequestHandler: syncHandlers.NewBlockRequestHandler(provider, networkCodec, syncStats), + codeRequestHandler: syncHandlers.NewCodeRequestHandler(diskDB, networkCodec, syncStats), + signatureRequestHandler: warpHandlers.NewSignatureRequestHandler(warpBackend, networkCodec), + } +} + +func (n networkHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + return n.stateTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) +} + +func (n networkHandler) HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + return n.atomicTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) +} + +func (n networkHandler) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockRequest message.BlockRequest) ([]byte, error) { + return n.blockRequestHandler.OnBlockRequest(ctx, nodeID, requestID, blockRequest) +} + +func (n networkHandler) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest message.CodeRequest) ([]byte, error) { + return n.codeRequestHandler.OnCodeRequest(ctx, nodeID, requestID, codeRequest) +} + +func (n networkHandler) HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, messageSignatureRequest message.MessageSignatureRequest) ([]byte, error) { + return n.signatureRequestHandler.OnMessageSignatureRequest(ctx, nodeID, requestID, messageSignatureRequest) +} + +func (n networkHandler) HandleBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockSignatureRequest message.BlockSignatureRequest) ([]byte, error) { + return n.signatureRequestHandler.OnBlockSignatureRequest(ctx, nodeID, requestID, blockSignatureRequest) +} diff --git a/coreth/plugin/evm/service.go b/coreth/plugin/evm/service.go index 5d5af423..d079648f 100644 --- a/coreth/plugin/evm/service.go +++ b/coreth/plugin/evm/service.go @@ -86,7 +86,7 @@ type VersionReply struct { } // ClientVersion returns the version of the VM running -func (service *AvaxAPI) Version(r *http.Request, args *struct{}, reply *VersionReply) error { +func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionReply) error { reply.Version = Version return nil } @@ -113,16 +113,16 @@ func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { return fmt.Errorf("problem retrieving user '%s': %w", args.Username, err) } defer db.Close() - user := user{ - secpFactory: &service.vm.secpFactory, - db: db, - } + user := user{db: db} reply.PrivateKey, err = user.getKey(address) if err != nil { return fmt.Errorf("problem retrieving private key: %w", err) @@ -147,16 +147,16 @@ func (service *AvaxAPI) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a reply.Address = GetEthAddress(args.PrivateKey).Hex() + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { return fmt.Errorf("problem retrieving data: %w", err) } defer db.Close() - user := user{ - secpFactory: &service.vm.secpFactory, - db: db, - } + user := user{db: db} if err := user.putAddress(args.PrivateKey); err != nil { return fmt.Errorf("problem saving key %w", err) } @@ -174,7 +174,7 @@ type ImportArgs struct { SourceChain string `json:"sourceChain"` // The address that will receive the imported funds - To string `json:"to"` + To common.Address `json:"to"` } // ImportAVAX is a deprecated name for Import. @@ -192,10 +192,8 @@ func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api. return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } - to, err := ParseEthAddress(args.To) - if err != nil { // Parse address - return fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) - } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() // Get the user's info db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -204,10 +202,7 @@ func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api. } defer db.Close() - user := user{ - secpFactory: &service.vm.secpFactory, - db: db, - } + user := user{db: db} privKeys, err := user.getKeys() if err != nil { // Get keys return fmt.Errorf("couldn't get keys controlled by the user: %w", err) @@ -224,13 +219,13 @@ func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api. baseFee = args.BaseFee.ToInt() } - tx, err := service.vm.newImportTx(chainID, to, baseFee, privKeys) + tx, err := service.vm.newImportTx(chainID, args.To, baseFee, privKeys) if err != nil { return err } response.TxID = tx.ID() - return service.vm.issueTx(tx, true /*=local*/) + return service.vm.mempool.AddLocalTx(tx) } // ExportAVAXArgs are the arguments to ExportAVAX @@ -243,8 +238,12 @@ type ExportAVAXArgs struct { // Amount of asset to send Amount json.Uint64 `json:"amount"` - // ID of the address that will receive the AVAX. This address includes the - // chainID, which is used to determine what the destination chain is. + // Chain the funds are going to. Optional. Used if To address does not + // include the chainID. + TargetChain string `json:"targetChain"` + + // ID of the address that will receive the AVAX. This address may include + // the chainID, which is used to determine what the destination chain is. To string `json:"to"` } @@ -278,11 +277,22 @@ func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api. return errors.New("argument 'amount' must be > 0") } + // Get the chainID and parse the to address chainID, to, err := service.vm.ParseAddress(args.To) if err != nil { - return err + chainID, err = service.vm.ctx.BCLookup.Lookup(args.TargetChain) + if err != nil { + return err + } + to, err = ids.ShortFromString(args.To) + if err != nil { + return err + } } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + // Get this user's data db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -290,10 +300,7 @@ func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api. } defer db.Close() - user := user{ - secpFactory: &service.vm.secpFactory, - db: db, - } + user := user{db: db} privKeys, err := user.getKeys() if err != nil { return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) @@ -324,7 +331,7 @@ func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api. } response.TxID = tx.ID() - return service.vm.issueTx(tx, true /*=local*/) + return service.vm.mempool.AddLocalTx(tx) } // GetUTXOs gets all utxos for passed in addresses @@ -350,7 +357,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply addrSet := set.Set[ids.ShortID]{} for _, addrStr := range args.Addresses { - addr, err := service.vm.ParseLocalAddress(addrStr) + addr, err := service.vm.ParseServiceAddress(addrStr) if err != nil { return fmt.Errorf("couldn't parse address %q: %w", addrStr, err) } @@ -360,7 +367,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply startAddr := ids.ShortEmpty startUTXO := ids.Empty if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { - startAddr, err = service.vm.ParseLocalAddress(args.StartIndex.Address) + startAddr, err = service.vm.ParseServiceAddress(args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) } @@ -370,6 +377,9 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply } } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + utxos, endAddr, endUTXOID, err := service.vm.GetAtomicUTXOs( sourceChain, addrSet, @@ -406,7 +416,6 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply return nil } -// IssueTx ... func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { log.Info("EVM: IssueTx called") @@ -424,7 +433,11 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response } response.TxID = tx.ID() - return service.vm.issueTx(tx, true /*=local*/) + + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + + return service.vm.mempool.AddLocalTx(tx) } // GetAtomicTxStatusReply defines the GetAtomicTxStatus replies returned from the API @@ -441,6 +454,9 @@ func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, r return errNilTxID } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + _, status, height, _ := service.vm.getAtomicTx(args.TxID) reply.Status = status @@ -464,6 +480,9 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply return errNilTxID } + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + tx, status, height, err := service.vm.getAtomicTx(args.TxID) if err != nil { return err diff --git a/coreth/plugin/evm/shared_memory_writer.go b/coreth/plugin/evm/shared_memory_writer.go new file mode 100644 index 00000000..7e6de6f8 --- /dev/null +++ b/coreth/plugin/evm/shared_memory_writer.go @@ -0,0 +1,37 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/precompile/precompileconfig" +) + +var _ precompileconfig.SharedMemoryWriter = &sharedMemoryWriter{} + +type sharedMemoryWriter struct { + requests map[ids.ID]*atomic.Requests +} + +func NewSharedMemoryWriter() *sharedMemoryWriter { + return &sharedMemoryWriter{ + requests: make(map[ids.ID]*atomic.Requests), + } +} + +func (s *sharedMemoryWriter) AddSharedMemoryRequests(chainID ids.ID, requests *atomic.Requests) { + mergeAtomicOpsToMap(s.requests, chainID, requests) +} + +// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] +// to the [output] map provided. +func mergeAtomicOpsToMap(output map[ids.ID]*atomic.Requests, chainID ids.ID, requests *atomic.Requests) { + if request, exists := output[chainID]; exists { + request.PutRequests = append(request.PutRequests, requests.PutRequests...) + request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) + } else { + output[chainID] = requests + } +} diff --git a/coreth/plugin/evm/syncervm_client.go b/coreth/plugin/evm/syncervm_client.go index 41f076e6..6bccb0f4 100644 --- a/coreth/plugin/evm/syncervm_client.go +++ b/coreth/plugin/evm/syncervm_client.go @@ -18,12 +18,12 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -42,7 +42,8 @@ type stateSyncClientConfig struct { // Specifies the number of blocks behind the latest state summary that the chain must be // in order to prefer performing state sync over falling back to the normal bootstrapping // algorithm. - stateSyncMinBlocks uint64 + stateSyncMinBlocks uint64 + stateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request lastAcceptedHeight uint64 @@ -263,7 +264,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common } blocks, err := client.client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) if err != nil { - log.Warn("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) + log.Error("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) return err } for _, block := range blocks { @@ -282,7 +283,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber) + atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.stateSyncRequestSize) if err != nil { return err } @@ -303,6 +304,7 @@ func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { DB: client.chaindb, MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, NumCodeFetchingWorkers: statesync.DefaultNumCodeFetchingWorkers, + RequestSize: client.stateSyncRequestSize, }) if err != nil { return err diff --git a/coreth/plugin/evm/syncervm_test.go b/coreth/plugin/evm/syncervm_test.go index 156e59e2..251107fd 100644 --- a/coreth/plugin/evm/syncervm_test.go +++ b/coreth/plugin/evm/syncervm_test.go @@ -16,7 +16,8 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -26,22 +27,22 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ava-labs/coreth/trie" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" ) func TestSkipStateSync(t *testing.T) { @@ -52,7 +53,6 @@ func TestSkipStateSync(t *testing.T) { syncMode: block.StateSyncSkipped, } vmSetup := createSyncServerAndClientVMs(t, test) - defer vmSetup.Teardown(t) testSyncerVM(t, vmSetup, test) } @@ -65,7 +65,6 @@ func TestStateSyncFromScratch(t *testing.T) { syncMode: block.StateSyncStatic, } vmSetup := createSyncServerAndClientVMs(t, test) - defer vmSetup.Teardown(t) testSyncerVM(t, vmSetup, test) } @@ -91,7 +90,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID); err != nil { + if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { panic(err) } cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel @@ -107,7 +106,6 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { expectedErr: context.Canceled, } vmSetup := createSyncServerAndClientVMs(t, test) - defer vmSetup.Teardown(t) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -132,7 +130,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err := syncDisabledVM.Initialize( context.Background(), vmSetup.syncerVM.ctx, - vmSetup.syncerDBManager, + vmSetup.syncerDB, []byte(genesisJSONLatest), nil, []byte(stateSyncDisabledConfigJSON), @@ -179,7 +177,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } } // Verify the snapshot disk layer matches the last block root - lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root() + lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil { t.Fatal(err) } @@ -195,7 +193,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err := syncReEnabledVM.Initialize( context.Background(), vmSetup.syncerVM.ctx, - vmSetup.syncerDBManager, + vmSetup.syncerDB, []byte(genesisJSONLatest), nil, []byte(configJSON), @@ -232,61 +230,69 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { testSyncerVM(t, vmSetup, test) } -func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { +func TestVMShutdownWhileSyncing(t *testing.T) { var ( - serverVM, syncerVM *VM + lock sync.Mutex + vmSetup *syncVMSetup ) - // If there is an error shutdown the VMs if they have been instantiated - defer func() { - // If the test has not already failed, shut down the VMs since the caller - // will not get the chance to shut them down. - if !t.Failed() { - return - } + reqCount := 0 + test := syncTest{ + syncableInterval: 256, + stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync + syncMode: block.StateSyncStatic, + responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { + lock.Lock() + defer lock.Unlock() - // If the test already failed, shut down the VMs if they were instantiated. - if serverVM != nil { - log.Info("Shutting down server VM") - if err := serverVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - } - if syncerVM != nil { - log.Info("Shutting down syncerVM") - if err := syncerVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) + reqCount++ + // Shutdown the VM after 50 requests to interrupt the sync + if reqCount == 50 { + // Note this verifies the VM shutdown does not time out while syncing. + require.NoError(t, vmSetup.shutdownOnceSyncerVM.Shutdown(context.Background())) + } else if reqCount < 50 { + err := syncerVM.AppResponse(context.Background(), nodeID, requestID, response) + require.NoError(t, err) } - } - }() + }, + expectedErr: context.Canceled, + } + vmSetup = createSyncServerAndClientVMs(t, test) + // Perform sync resulting in early termination. + testSyncerVM(t, vmSetup, test) +} - // configure [serverVM] - importAmount := 2000000 * units.Avax // 2M avax - _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( - t, - true, - "", - "", - "", - map[ids.ShortID]uint64{ +func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { + var ( + require = require.New(t) + importAmount = 2000000 * units.Avax // 2M avax + alloc = map[ids.ShortID]uint64{ testShortIDAddrs[0]: importAmount, - }, + } ) + _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( + t, true, "", "", "", alloc, + ) + t.Cleanup(func() { + log.Info("Shutting down server VM") + require.NoError(serverVM.Shutdown(context.Background())) + }) var ( importTx, exportTx *Tx err error ) generateAndAcceptBlocks(t, serverVM, parentsToGet, func(i int, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) switch i { case 0: // spend the UTXOs from shared memory importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - if err := serverVM.issueTx(importTx, true /*=local*/); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(serverVM.mempool.AddLocalTx(importTx)) case 1: // export some of the imported UTXOs to test exportTx is properly synced exportTx, err = serverVM.newExportTx( @@ -297,19 +303,13 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}, ) - if err != nil { - t.Fatal(err) - } - if err := serverVM.issueTx(exportTx, true /*=local*/); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(serverVM.mempool.AddLocalTx(exportTx)) default: // Generate simple transfer transactions. pk := testKeys[0].ToECDSA() tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, params.TxGas, initialBaseFee, nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), pk) - if err != nil { - t.Fatal(t) - } + require.NoError(err) gen.AddTx(signedTx) } }) @@ -319,8 +319,8 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { // fetching a state summary. serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie) serverAtomicTrie.commitInterval = test.syncableInterval - assert.NoError(t, serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) - assert.NoError(t, serverVM.db.Commit()) + require.NoError(serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) + require.NoError(serverVM.db.Commit()) serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) serverSharedMemories.assertOpsApplied(t, importTx.mustAtomicOps()) @@ -336,37 +336,28 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { lastAccepted := serverVM.blockChain.LastAcceptedBlock() patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb) blockBytes, err := rlp.EncodeToBytes(patchedBlock) - if err != nil { - t.Fatal(err) - } + require.NoError(err) internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) internalBlock.(*Block).SetStatus(choices.Accepted) - assert.NoError(t, serverVM.State.SetLastAcceptedBlock(internalBlock)) + require.NoError(serverVM.State.SetLastAcceptedBlock(internalBlock)) // patch syncableInterval for test serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state - stateSyncEnabledJSON := fmt.Sprintf("{\"state-sync-enabled\":true, \"state-sync-min-blocks\": %d}", test.stateSyncMinBlocks) - syncerEngineChan, syncerVM, syncerDBManager, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( - t, - false, - "", - stateSyncEnabledJSON, - "", - map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }, + stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d}`, test.stateSyncMinBlocks) + syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( + t, false, "", stateSyncEnabledJSON, "", alloc, ) - if err := syncerVM.SetState(context.Background(), snow.StateSyncing); err != nil { - t.Fatal(err) - } + shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} + t.Cleanup(func() { + require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) + }) + require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) enabled, err := syncerVM.StateSyncEnabled(context.Background()) - assert.NoError(t, err) - assert.True(t, enabled) + require.NoError(err) + require.True(enabled) // override [syncerVM]'s commit interval so the atomic trie works correctly. syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval @@ -383,19 +374,20 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { } // connect peer to [syncerVM] - assert.NoError(t, syncerVM.Connected( - context.Background(), - serverVM.ctx.NodeID, - statesyncclient.StateSyncVersion, - )) + require.NoError( + syncerVM.Connected( + context.Background(), + serverVM.ctx.NodeID, + statesyncclient.StateSyncVersion, + ), + ) // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() - if !hasItem { - t.Fatal("expected nodeSet to contain at least 1 nodeID") - } - go serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") + err := serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + require.NoError(err) return nil } @@ -406,11 +398,12 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { importTx, exportTx, }, - fundedAccounts: accounts, - syncerVM: syncerVM, - syncerDBManager: syncerDBManager, - syncerEngineChan: syncerEngineChan, - syncerAtomicMemory: syncerAtomicMemory, + fundedAccounts: accounts, + syncerVM: syncerVM, + syncerDB: syncerDB, + syncerEngineChan: syncerEngineChan, + syncerAtomicMemory: syncerAtomicMemory, + shutdownOnceSyncerVM: shutdownOnceSyncerVM, } } @@ -423,17 +416,22 @@ type syncVMSetup struct { includedAtomicTxs []*Tx fundedAccounts map[*keystore.Key]*types.StateAccount - syncerVM *VM - syncerDBManager manager.Manager - syncerEngineChan <-chan commonEng.Message - syncerAtomicMemory *atomic.Memory + syncerVM *VM + syncerDB database.Database + syncerEngineChan <-chan commonEng.Message + syncerAtomicMemory *atomic.Memory + shutdownOnceSyncerVM *shutdownOnceVM } -// Teardown shuts down both VMs and asserts that both exit without error. -// Note: assumes both serverVM and sycnerVM have been initialized. -func (s *syncVMSetup) Teardown(t *testing.T) { - assert.NoError(t, s.serverVM.Shutdown(context.Background())) - assert.NoError(t, s.syncerVM.Shutdown(context.Background())) +type shutdownOnceVM struct { + *VM + shutdownOnce sync.Once +} + +func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { + var err error + vm.shutdownOnce.Do(func() { err = vm.VM.Shutdown(ctx) }) + return err } // syncTest contains both the actual VMs as well as the parameters with the expected output. @@ -448,6 +446,7 @@ type syncTest struct { func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { t.Helper() var ( + require = require.New(t) serverVM = vmSetup.serverVM includedAtomicTxs = vmSetup.includedAtomicTxs fundedAccounts = vmSetup.fundedAccounts @@ -455,67 +454,58 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { syncerEngineChan = vmSetup.syncerEngineChan syncerAtomicMemory = vmSetup.syncerAtomicMemory ) - // get last summary and test related methods summary, err := serverVM.GetLastStateSummary(context.Background()) - if err != nil { - t.Fatal("error getting state sync last summary", "err", err) - } + require.NoError(err, "error getting state sync last summary") parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes()) - if err != nil { - t.Fatal("error getting state sync last summary", "err", err) - } + require.NoError(err, "error parsing state summary") retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height()) - if err != nil { - t.Fatal("error when checking if summary is accepted", "err", err) - } - assert.Equal(t, summary, retrievedSummary) + require.NoError(err, "error getting state sync summary at height") + require.Equal(summary, retrievedSummary) syncMode, err := parsedSummary.Accept(context.Background()) - if err != nil { - t.Fatal("unexpected error accepting state summary", "err", err) - } - if syncMode != test.syncMode { - t.Fatal("unexpected value returned from accept", "expected", test.syncMode, "got", syncMode) - } + require.NoError(err, "error accepting state summary") + require.Equal(syncMode, test.syncMode) if syncMode == block.StateSyncSkipped { return } + msg := <-syncerEngineChan - assert.Equal(t, commonEng.StateSyncDone, msg) + require.Equal(commonEng.StateSyncDone, msg) // If the test is expected to error, assert the correct error is returned and finish the test. err = syncerVM.StateSyncClient.Error() if test.expectedErr != nil { - assert.ErrorIs(t, err, test.expectedErr) - assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{}) + require.ErrorIs(err, test.expectedErr) + // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. + chaindb := Database{prefixdb.NewNested(ethDBPrefix, syncerVM.db)} + assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) return } - if err != nil { - t.Fatal("state sync failed", err) - } + require.NoError(err, "state sync failed") // set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly // and that we can bootstrap and process some blocks. - if err := syncerVM.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - assert.Equal(t, serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") - assert.Equal(t, serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") - assert.True(t, syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") + require.NoError(syncerVM.SetState(context.Background(), snow.Bootstrapping)) + require.Equal(serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") + require.Equal(serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") + require.True(syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{retrievedSummary.Height(): {}}) blocksToBuild := 10 txsPerBlock := 10 - toAddress := testEthAddrs[2] // arbitrary choice + toAddress := testEthAddrs[1] // arbitrary choice generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey) - if err != nil { - t.Fatal(err) - } + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) + require.NoError(err) gen.AddTx(signedTx) i++ if i >= txsPerBlock { @@ -525,8 +515,8 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { }) // check we can transition to [NormalOp] state and continue to process blocks. - assert.NoError(t, syncerVM.SetState(context.Background(), snow.NormalOp)) - assert.True(t, syncerVM.bootstrapped) + require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) + require.True(syncerVM.bootstrapped) // check atomic memory was synced properly syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) @@ -537,13 +527,16 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { // Generate blocks after we have entered normal consensus as well generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey) - if err != nil { - t.Fatal(err) - } + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) + require.NoError(err) gen.AddTx(signedTx) i++ if i >= txsPerBlock { @@ -562,7 +555,7 @@ func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Bl header := blk.Header() header.Root = root receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64()) - newBlk := types.NewBlock( + newBlk := types.NewBlockWithExtData( header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil), blk.ExtData(), true, ) rawdb.WriteBlock(db, newBlk) @@ -597,7 +590,7 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, _, _, err := core.GenerateChain( vm.chainConfig, vm.blockChain.LastAcceptedBlock(), - dummy.NewDummyEngine(vm.createConsensusCallbacks()), + dummy.NewFakerWithCallbacks(vm.createConsensusCallbacks()), vm.chaindb, numBlocks, 10, diff --git a/coreth/plugin/evm/test_tx.go b/coreth/plugin/evm/test_tx.go index c057c874..458059fe 100644 --- a/coreth/plugin/evm/test_tx.go +++ b/coreth/plugin/evm/test_tx.go @@ -6,6 +6,7 @@ package evm import ( "math/big" "math/rand" + "time" "github.com/ava-labs/avalanchego/utils" @@ -77,7 +78,7 @@ func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state *state.StateD func testTxCodec() codec.Manager { codec := codec.NewDefaultManager() - c := linearcodec.NewDefault() + c := linearcodec.NewDefault(time.Time{}) errs := wrappers.Errs{} errs.Add( diff --git a/coreth/plugin/evm/tx.go b/coreth/plugin/evm/tx.go index fc7cce74..5c8497a3 100644 --- a/coreth/plugin/evm/tx.go +++ b/coreth/plugin/evm/tx.go @@ -55,6 +55,14 @@ type EVMOutput struct { AssetID ids.ID `serialize:"true" json:"assetID"` } +func (o EVMOutput) Compare(other EVMOutput) int { + addrComp := bytes.Compare(o.Address.Bytes(), other.Address.Bytes()) + if addrComp != 0 { + return addrComp + } + return bytes.Compare(o.AssetID[:], other.AssetID[:]) +} + // EVMInput defines an input created from the EVM state to fund export transactions type EVMInput struct { Address common.Address `serialize:"true" json:"address"` @@ -63,6 +71,14 @@ type EVMInput struct { Nonce uint64 `serialize:"true" json:"nonce"` } +func (i EVMInput) Compare(other EVMInput) int { + addrComp := bytes.Compare(i.Address.Bytes(), other.Address.Bytes()) + if addrComp != 0 { + return addrComp + } + return bytes.Compare(i.AssetID[:], other.AssetID[:]) +} + // Verify ... func (out *EVMOutput) Verify() error { switch { @@ -126,6 +142,19 @@ type Tx struct { Creds []verify.Verifiable `serialize:"true" json:"credentials"` } +func (tx *Tx) Compare(other *Tx) int { + txHex := tx.ID().Hex() + otherHex := other.ID().Hex() + switch { + case txHex < otherHex: + return -1 + case txHex > otherHex: + return 1 + default: + return 0 + } +} + // Sign this transaction with the provided signers func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(codecVersion, &tx.UnsignedAtomicTx) @@ -172,7 +201,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b if err != nil { return nil, nil, err } - txFee, err := calculateDynamicFee(gasUsed, baseFee) + txFee, err := CalculateDynamicFee(gasUsed, baseFee) if err != nil { return nil, nil, err } @@ -217,52 +246,9 @@ func SortEVMInputsAndSigners(inputs []EVMInput, signers [][]*secp256k1.PrivateKe sort.Sort(&innerSortInputsAndSigners{inputs: inputs, signers: signers}) } -// IsSortedAndUniqueEVMInputs returns true if the EVM Inputs are sorted and unique -// based on the account addresses -func IsSortedAndUniqueEVMInputs(inputs []EVMInput) bool { - return utils.IsSortedAndUnique(&innerSortInputsAndSigners{inputs: inputs}) -} - -// innerSortEVMOutputs implements sort.Interface for EVMOutput -type innerSortEVMOutputs struct { - outputs []EVMOutput -} - -func (outs *innerSortEVMOutputs) Less(i, j int) bool { - addrComp := bytes.Compare(outs.outputs[i].Address.Bytes(), outs.outputs[j].Address.Bytes()) - if addrComp != 0 { - return addrComp < 0 - } - return bytes.Compare(outs.outputs[i].AssetID[:], outs.outputs[j].AssetID[:]) < 0 -} - -func (outs *innerSortEVMOutputs) Len() int { return len(outs.outputs) } - -func (outs *innerSortEVMOutputs) Swap(i, j int) { - outs.outputs[j], outs.outputs[i] = outs.outputs[i], outs.outputs[j] -} - -// SortEVMOutputs sorts the list of EVMOutputs based on the addresses and assetIDs -// of the outputs -func SortEVMOutputs(outputs []EVMOutput) { - sort.Sort(&innerSortEVMOutputs{outputs: outputs}) -} - -// IsSortedEVMOutputs returns true if the EVMOutputs are sorted -// based on the account addresses and assetIDs -func IsSortedEVMOutputs(outputs []EVMOutput) bool { - return sort.IsSorted(&innerSortEVMOutputs{outputs: outputs}) -} - -// IsSortedAndUniqueEVMOutputs returns true if the EVMOutputs are sorted -// and unique based on the account addresses and assetIDs -func IsSortedAndUniqueEVMOutputs(outputs []EVMOutput) bool { - return utils.IsSortedAndUnique(&innerSortEVMOutputs{outputs: outputs}) -} - // calculates the amount of AVAX that must be burned by an atomic transaction // that consumes [cost] at [baseFee]. -func calculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { +func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { if baseFee == nil { return 0, errNilBaseFee } @@ -289,7 +275,7 @@ func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { // with txs initialized from the txID index. copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) - sort.Slice(copyTxs, func(i, j int) bool { return copyTxs[i].ID().Hex() < copyTxs[j].ID().Hex() }) + utils.Sort(copyTxs) txs = copyTxs } output := make(map[ids.ID]*atomic.Requests) @@ -302,14 +288,3 @@ func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { } return output, nil } - -// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] -// to the [output] map provided. -func mergeAtomicOpsToMap(output map[ids.ID]*atomic.Requests, chainID ids.ID, requests *atomic.Requests) { - if request, exists := output[chainID]; exists { - request.PutRequests = append(request.PutRequests, requests.PutRequests...) - request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) - } else { - output[chainID] = requests - } -} diff --git a/coreth/plugin/evm/tx_gossip_test.go b/coreth/plugin/evm/tx_gossip_test.go new file mode 100644 index 00000000..6e6f6eca --- /dev/null +++ b/coreth/plugin/evm/tx_gossip_test.go @@ -0,0 +1,581 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "encoding/binary" + "math/big" + "sync" + "testing" + "time" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + agoUtils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" +) + +func TestEthTxGossip(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + validatorState := &validators.TestState{} + snowCtx.ValidatorState = validatorState + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + responseSender := &common.FakeSender{ + SentAppResponse: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: responseSender, + atomicTxGossipHandler: &p2p.NoOpHandler{}, + atomicTxPullGossiper: &gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.SenderTest{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + defer func() { + require.NoError(vm.Shutdown(ctx)) + }() + + // sender for the peer requesting gossip from [vm] + peerSender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + + network, err := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(ethTxGossipProtocol) + + // we only accept gossip requests from validators + requestingNodeID := ids.GenerateTestNodeID() + require.NoError(vm.Network.Connected(ctx, requestingNodeID, nil)) + validatorState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return 0, nil + } + validatorState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{requestingNodeID: nil}, nil + } + + // Ask the VM for any new transactions. We should get nothing at first. + emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + require.NoError(err) + emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() + request := &sdk.PullGossipRequest{ + Filter: emptyBloomFilterBytes, + Salt: agoUtils.RandomBytes(32), + } + + requestBytes, err := proto.Marshal(request) + require.NoError(err) + + wg := &sync.WaitGroup{} + wg.Add(1) + onResponse := func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Empty(response.Gossip) + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 1, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 1, <-responseSender.SentAppResponse)) + wg.Wait() + + // Issue a tx to the VM + tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), pk.ToECDSA()) + require.NoError(err) + + errs := vm.txPool.AddLocals([]*types.Transaction{signedTx}) + require.Len(errs, 1) + require.Nil(errs[0]) + + // wait so we aren't throttled by the vm + time.Sleep(5 * time.Second) + + marshaller := GossipEthTxMarshaller{} + // Ask the VM for new transactions. We should get the newly issued tx. + wg.Add(1) + onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Len(response.Gossip, 1) + + gotTx, err := marshaller.UnmarshalGossip(response.Gossip[0]) + require.NoError(err) + require.Equal(signedTx.Hash(), gotTx.Tx.Hash()) + + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 3, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 3, <-responseSender.SentAppResponse)) + wg.Wait() +} + +func TestAtomicTxGossip(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + snowCtx.XChainID = ids.GenerateTestID() + validatorState := &validators.TestState{ + GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { + return ids.Empty, nil + }, + } + snowCtx.ValidatorState = validatorState + memory := atomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + responseSender := &common.FakeSender{ + SentAppResponse: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: responseSender, + ethTxGossipHandler: &p2p.NoOpHandler{}, + ethTxPullGossiper: &gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.SenderTest{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + defer func() { + require.NoError(vm.Shutdown(ctx)) + }() + + // sender for the peer requesting gossip from [vm] + peerSender := &common.FakeSender{ + SentAppRequest: make(chan []byte, 1), + } + network, err := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(atomicTxGossipProtocol) + + // we only accept gossip requests from validators + requestingNodeID := ids.GenerateTestNodeID() + require.NoError(vm.Network.Connected(ctx, requestingNodeID, nil)) + validatorState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return 0, nil + } + validatorState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{requestingNodeID: nil}, nil + } + + // Ask the VM for any new transactions. We should get nothing at first. + emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + require.NoError(err) + emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() + request := &sdk.PullGossipRequest{ + Filter: emptyBloomFilterBytes, + Salt: agoUtils.RandomBytes(32), + } + + requestBytes, err := proto.Marshal(request) + require.NoError(err) + + wg := &sync.WaitGroup{} + wg.Add(1) + onResponse := func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Empty(response.Gossip) + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 1, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 1, <-responseSender.SentAppResponse)) + wg.Wait() + + // Issue a tx to the VM + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.PublicKey().Address(), + ) + require.NoError(err) + tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + + // wait so we aren't throttled by the vm + time.Sleep(5 * time.Second) + + // Ask the VM for new transactions. We should get the newly issued tx. + wg.Add(1) + + marshaller := GossipAtomicTxMarshaller{} + onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Len(response.Gossip, 1) + + gotTx, err := marshaller.UnmarshalGossip(response.Gossip[0]) + require.NoError(err) + require.Equal(tx.ID(), gotTx.GossipID()) + + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 3, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 3, <-responseSender.SentAppResponse)) + wg.Wait() +} + +// Tests that a tx is gossiped when it is issued +func TestEthTxPushGossipOutbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + atomicTxPullGossiper: gossip.NoOpGossiper{}, + } + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), pk.ToECDSA()) + require.NoError(err) + + // issue a tx + require.NoError(vm.txPool.AddLocal(signedTx)) + + sent := <-sender.SentAppGossip + got := &sdk.PushGossip{} + + // we should get a message that has the protocol prefix and the gossip + // message + require.Equal(byte(ethTxGossipProtocol), sent[0]) + require.NoError(proto.Unmarshal(sent[1:], got)) + + marshaller := GossipEthTxMarshaller{} + require.Len(got.Gossip, 1) + gossipedTx, err := marshaller.UnmarshalGossip(got.Gossip[0]) + require.NoError(err) + require.Equal(ids.ID(signedTx.Hash()), gossipedTx.GossipID()) +} + +// Tests that a gossiped tx is added to the mempool and forwarded +func TestEthTxPushGossipInbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + atomicTxPullGossiper: gossip.NoOpGossiper{}, + } + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + tx := types.NewTransaction(0, address, big.NewInt(10), 100_000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), pk.ToECDSA()) + require.NoError(err) + + marshaller := GossipEthTxMarshaller{} + gossipedTx := &GossipEthTx{ + Tx: signedTx, + } + gossipedTxBytes, err := marshaller.MarshalGossip(gossipedTx) + require.NoError(err) + + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{gossipedTxBytes}, + } + + inboundGossipBytes, err := proto.Marshal(inboundGossip) + require.NoError(err) + + inboundGossipMsg := append(binary.AppendUvarint(nil, ethTxGossipProtocol), inboundGossipBytes...) + require.NoError(vm.AppGossip(ctx, ids.EmptyNodeID, inboundGossipMsg)) + + forwardedMsg := &sdk.PushGossip{} + outboundGossipBytes := <-sender.SentAppGossip + + require.Equal(byte(ethTxGossipProtocol), outboundGossipBytes[0]) + require.NoError(proto.Unmarshal(outboundGossipBytes[1:], forwardedMsg)) + require.Len(forwardedMsg.Gossip, 1) + + forwardedTx, err := marshaller.UnmarshalGossip(forwardedMsg.Gossip[0]) + require.NoError(err) + require.Equal(gossipedTx.GossipID(), forwardedTx.GossipID()) + require.True(vm.txPool.Has(signedTx.Hash())) +} + +// Tests that a tx is gossiped when it is issued +func TestAtomicTxPushGossipOutbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + snowCtx.XChainID = ids.GenerateTestID() + validatorState := &validators.TestState{ + GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { + return ids.Empty, nil + }, + } + snowCtx.ValidatorState = validatorState + memory := atomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + atomicTxPullGossiper: gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + // Issue a tx to the VM + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.PublicKey().Address(), + ) + require.NoError(err) + tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + + gossipedBytes := <-sender.SentAppGossip + require.Equal(byte(atomicTxGossipProtocol), gossipedBytes[0]) + + outboundGossipMsg := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(gossipedBytes[1:], outboundGossipMsg)) + require.Len(outboundGossipMsg.Gossip, 1) + + marshaller := GossipAtomicTxMarshaller{} + gossipedTx, err := marshaller.UnmarshalGossip(outboundGossipMsg.Gossip[0]) + require.NoError(err) + require.Equal(tx.ID(), gossipedTx.Tx.ID()) +} + +// Tests that a tx is gossiped when it is issued +func TestAtomicTxPushGossipInbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + snowCtx.XChainID = ids.GenerateTestID() + validatorState := &validators.TestState{ + GetSubnetIDF: func(context.Context, ids.ID) (ids.ID, error) { + return ids.Empty, nil + }, + } + snowCtx.ValidatorState = validatorState + memory := atomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(ids.Empty) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := GetEthAddress(pk) + genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + sender := &common.FakeSender{ + SentAppGossip: make(chan []byte, 1), + } + vm := &VM{ + p2pSender: sender, + ethTxPullGossiper: gossip.NoOpGossiper{}, + atomicTxPullGossiper: gossip.NoOpGossiper{}, + } + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + &common.FakeSender{}, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + // issue a tx to the vm + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.PublicKey().Address(), + ) + require.NoError(err) + tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + + marshaller := GossipAtomicTxMarshaller{} + gossipedTx := &GossipAtomicTx{ + Tx: tx, + } + gossipBytes, err := marshaller.MarshalGossip(gossipedTx) + require.NoError(err) + + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{gossipBytes}, + } + inboundGossipBytes, err := proto.Marshal(inboundGossip) + require.NoError(err) + + inboundGossipMsg := append(binary.AppendUvarint(nil, atomicTxGossipProtocol), inboundGossipBytes...) + + require.NoError(vm.AppGossip(ctx, ids.EmptyNodeID, inboundGossipMsg)) + + forwardedBytes := <-sender.SentAppGossip + require.Equal(byte(atomicTxGossipProtocol), forwardedBytes[0]) + + forwardedGossipMsg := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(forwardedBytes[1:], forwardedGossipMsg)) + require.Len(forwardedGossipMsg.Gossip, 1) + + forwardedTx, err := marshaller.UnmarshalGossip(forwardedGossipMsg.Gossip[0]) + require.NoError(err) + require.Equal(tx.ID(), forwardedTx.Tx.ID()) + require.True(vm.mempool.has(tx.ID())) +} diff --git a/coreth/plugin/evm/tx_test.go b/coreth/plugin/evm/tx_test.go index 91f8ab6c..1c721200 100644 --- a/coreth/plugin/evm/tx_test.go +++ b/coreth/plugin/evm/tx_test.go @@ -9,10 +9,15 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/coreth/params" - "github.com/stretchr/testify/require" ) func TestCalculateDynamicFee(t *testing.T) { @@ -36,7 +41,7 @@ func TestCalculateDynamicFee(t *testing.T) { } for _, test := range tests { - cost, err := calculateDynamicFee(test.gas, test.baseFee) + cost, err := CalculateDynamicFee(test.gas, test.baseFee) if test.expectedErr == nil { if err != nil { t.Fatalf("Unexpectedly failed to calculate dynamic fee: %s", err) @@ -145,10 +150,12 @@ func executeTxTest(t *testing.T, test atomicTxTest) { // If this test simulates processing txs during bootstrapping (where some verification is skipped), // initialize the block building goroutines normally initialized in SetState(snow.NormalOps). // This ensures that the VM can build a block correctly during the test. - vm.initBlockBuilding() + if err := vm.initBlockBuilding(); err != nil { + t.Fatal(err) + } } - if err := vm.issueTx(tx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(tx); err != nil { t.Fatal(err) } <-issuer @@ -180,3 +187,127 @@ func executeTxTest(t *testing.T, test atomicTxTest) { test.checkState(t, vm) } } + +func TestEVMOutputCompare(t *testing.T) { + type test struct { + name string + a, b EVMOutput + expected int + } + + tests := []test{ + { + name: "address less", + a: EVMOutput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{1}, + }, + b: EVMOutput{ + Address: common.BytesToAddress([]byte{0x02}), + AssetID: ids.ID{0}, + }, + expected: -1, + }, + { + name: "address greater; assetIDs equal", + a: EVMOutput{ + Address: common.BytesToAddress([]byte{0x02}), + AssetID: ids.ID{}, + }, + b: EVMOutput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{}, + }, + expected: 1, + }, + { + name: "addresses equal; assetID less", + a: EVMOutput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{0}, + }, + b: EVMOutput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{1}, + }, + expected: -1, + }, + { + name: "equal", + a: EVMOutput{}, + b: EVMOutput{}, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + require.Equal(tt.expected, tt.a.Compare(tt.b)) + require.Equal(-tt.expected, tt.b.Compare(tt.a)) + }) + } +} + +func TestEVMInputCompare(t *testing.T) { + type test struct { + name string + a, b EVMInput + expected int + } + + tests := []test{ + { + name: "address less", + a: EVMInput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{1}, + }, + b: EVMInput{ + Address: common.BytesToAddress([]byte{0x02}), + AssetID: ids.ID{0}, + }, + expected: -1, + }, + { + name: "address greater; assetIDs equal", + a: EVMInput{ + Address: common.BytesToAddress([]byte{0x02}), + AssetID: ids.ID{}, + }, + b: EVMInput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{}, + }, + expected: 1, + }, + { + name: "addresses equal; assetID less", + a: EVMInput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{0}, + }, + b: EVMInput{ + Address: common.BytesToAddress([]byte{0x01}), + AssetID: ids.ID{1}, + }, + expected: -1, + }, + { + name: "equal", + a: EVMInput{}, + b: EVMInput{}, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + require.Equal(tt.expected, tt.a.Compare(tt.b)) + require.Equal(-tt.expected, tt.b.Compare(tt.a)) + }) + } +} diff --git a/coreth/plugin/evm/user.go b/coreth/plugin/evm/user.go index 51a3fa38..330d03b0 100644 --- a/coreth/plugin/evm/user.go +++ b/coreth/plugin/evm/user.go @@ -22,7 +22,6 @@ var ( ) type user struct { - secpFactory *secp256k1.Factory // This user's database, acquired from the keystore db *encdb.Database } @@ -116,7 +115,7 @@ func (u *user) getKey(address common.Address) (*secp256k1.PrivateKey, error) { if err != nil { return nil, err } - return u.secpFactory.ToPrivateKey(bytes) + return secp256k1.ToPrivateKey(bytes) } // Return all private keys controlled by this user diff --git a/coreth/plugin/evm/version.go b/coreth/plugin/evm/version.go index 5fdcddd8..6a96eef3 100644 --- a/coreth/plugin/evm/version.go +++ b/coreth/plugin/evm/version.go @@ -11,7 +11,7 @@ var ( // GitCommit is set by the build script GitCommit string // Version is the version of Coreth - Version string = "v0.12.0" + Version string = "v0.12.11" ) func init() { diff --git a/coreth/plugin/evm/vm.go b/coreth/plugin/evm/vm.go index c7ffb490..db72c895 100644 --- a/coreth/plugin/evm/vm.go +++ b/coreth/plugin/evm/vm.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "math/big" + "net/http" "os" "path/filepath" "strings" @@ -17,30 +18,37 @@ import ( "time" avalanchegoMetrics "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + avalanchegoConstants "github.com/ava-labs/avalanchego/utils/constants" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/consensus/dummy" - corethConstants "github.com/ava-labs/coreth/constants" + "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/eth/ethconfig" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/metrics" corethPrometheus "github.com/ava-labs/coreth/metrics/prometheus" "github.com/ava-labs/coreth/miner" "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/message" + + warpPrecompile "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/rpc" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/client/stats" - "github.com/ava-labs/coreth/sync/handlers" - handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/warp" + warpValidators "github.com/ava-labs/coreth/warp/validators" - "github.com/prometheus/client_golang/prometheus" // Force-load tracer engine to trigger registration // // We must import this package (not referenced elsewhere) so that the native "callTracer" @@ -49,19 +57,21 @@ import ( _ "github.com/ava-labs/coreth/eth/tracers/js" _ "github.com/ava-labs/coreth/eth/tracers/native" + "github.com/ava-labs/coreth/precompile/precompileconfig" + // Force-load precompiles to trigger registration + _ "github.com/ava-labs/coreth/precompile/registry" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - "github.com/ava-labs/coreth/metrics" - avalancheRPC "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -69,7 +79,6 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" @@ -88,13 +97,16 @@ import ( avalancheJSON "github.com/ava-labs/avalanchego/utils/json" ) +var ( + _ block.ChainVM = &VM{} + _ block.BuildBlockWithContextChainVM = &VM{} + _ block.StateSyncableVM = &VM{} + _ statesyncclient.EthBlockParser = &VM{} +) + const ( x2cRateInt64 int64 = 1_000_000_000 x2cRateMinus1Int64 int64 = x2cRateInt64 - 1 - - // Prefixes for metrics gatherers - ethMetricsPrefix = "eth" - chainStateMetricsPrefix = "chain_state" ) var ( @@ -104,35 +116,54 @@ var ( // places on the X and P chains, but is 18 decimal places within the EVM. x2cRate = big.NewInt(x2cRateInt64) x2cRateMinus1 = big.NewInt(x2cRateMinus1Int64) - - _ block.ChainVM = &VM{} - _ block.StateSyncableVM = &VM{} - _ block.HeightIndexedChainVM = &VM{} - _ statesyncclient.EthBlockParser = &VM{} ) const ( // Max time from current time allowed for blocks, before they're considered future blocks // and fail verification - maxFutureBlockTime = 10 * time.Second - maxUTXOsToFetch = 1024 - defaultMempoolSize = 4096 - codecVersion = uint16(0) - secpFactoryCacheSize = 1024 + maxFutureBlockTime = 10 * time.Second + maxUTXOsToFetch = 1024 + defaultMempoolSize = 4096 + codecVersion = uint16(0) + + secpCacheSize = 1024 + decidedCacheSize = 10 * units.MiB + missingCacheSize = 50 + unverifiedCacheSize = 5 * units.MiB + bytesToIDCacheSize = 5 * units.MiB + warpSignatureCacheSize = 500 - decidedCacheSize = 100 - missingCacheSize = 50 - unverifiedCacheSize = 50 + // Prefixes for metrics gatherers + ethMetricsPrefix = "eth" + chainStateMetricsPrefix = "chain_state" targetAtomicTxsSize = 40 * units.KiB + + // p2p app protocols + ethTxGossipProtocol = 0x0 + atomicTxGossipProtocol = 0x1 + + // gossip constants + txGossipBloomMinTargetElements = 8 * 1024 + txGossipBloomTargetFalsePositiveRate = 0.01 + txGossipBloomResetFalsePositiveRate = 0.05 + txGossipBloomChurnMultiplier = 3 + txGossipTargetMessageSize = 20 * units.KiB + maxValidatorSetStaleness = time.Minute + txGossipThrottlingPeriod = 10 * time.Second + txGossipThrottlingLimit = 2 + gossipFrequency = 10 * time.Second + txGossipPollSize = 10 ) // Define the API endpoints for the VM const ( - avaxEndpoint = "/avax" - adminEndpoint = "/admin" - ethRPCEndpoint = "/rpc" - ethWSEndpoint = "/ws" + avaxEndpoint = "/avax" + adminEndpoint = "/admin" + ethRPCEndpoint = "/rpc" + ethWSEndpoint = "/ws" + ethTxGossipNamespace = "eth_tx_gossip" + atomicTxGossipNamespace = "atomic_tx_gossip" ) var ( @@ -140,6 +171,7 @@ var ( lastAcceptedKey = []byte("last_accepted_key") acceptedPrefix = []byte("snowman_accepted") metadataPrefix = []byte("metadata") + warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") // Prefixes for atomic trie @@ -175,6 +207,7 @@ var ( errConflictingAtomicTx = errors.New("conflicting atomic tx present") errTooManyAtomicTx = errors.New("too many atomic tx") errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") + errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") errImportTxsDisabled = errors.New("import transactions are disabled") errExportTxsDisabled = errors.New("export transactions are disabled") ) @@ -210,6 +243,8 @@ func init() { // VM implements the snowman.ChainVM interface type VM struct { ctx *snow.Context + // [cancel] may be nil until [snow.NormalOp] starts + cancel context.CancelFunc // *chain.State helps to implement the VM interface by wrapping blocks // with an efficient caching layer. *chain.State @@ -224,7 +259,7 @@ type VM struct { // pointers to eth constructs eth *eth.Ethereum - txPool *core.TxPool + txPool *txpool.TxPool blockChain *core.BlockChain miner *miner.Miner @@ -241,6 +276,10 @@ type VM struct { // block. acceptedBlockDB database.Database + // [warpDB] is used to store warp message signatures + // set to a prefixDB with the prefix [warpPrefix] + warpDB database.Database + toEngine chan<- commonEng.Message syntacticBlockValidator BlockValidator @@ -266,8 +305,8 @@ type VM struct { shutdownChan chan struct{} shutdownWg sync.WaitGroup - fx secp256k1fx.Fx - secpFactory secp256k1.Factory + fx secp256k1fx.Fx + secpCache secp256k1.RecoverCache // Continuous Profiler profiler profiler.ContinuousProfiler @@ -276,8 +315,11 @@ type VM struct { client peer.NetworkClient networkCodec codec.Manager + validators *p2p.Validators + // Metrics multiGatherer avalanchegoMetrics.MultiGatherer + sdkMetrics *prometheus.Registry bootstrapped bool IsPlugin bool @@ -286,6 +328,19 @@ type VM struct { // State sync server and client StateSyncServer StateSyncClient + + // Avalanche Warp Messaging backend + // Used to serve BLS signatures of warp messages over RPC + warpBackend warp.Backend + + // Initialize only sets these if nil so they can be overridden in tests + p2pSender commonEng.AppSender + ethTxGossipHandler p2p.Handler + atomicTxGossipHandler p2p.Handler + ethTxPullGossiper gossip.Gossiper + atomicTxPullGossiper gossip.Gossiper + ethTxPushGossiper gossip.Accumulator[*GossipEthTx] + atomicTxPushGossiper gossip.Accumulator[*GossipAtomicTx] } // Codec implements the secp256k1fx interface @@ -308,14 +363,14 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } // implements SnowmanPlusPlusVM interface func (vm *VM) GetActivationTime() time.Time { - return time.Unix(vm.chainConfig.ApricotPhase4BlockTimestamp.Int64(), 0) + return utils.Uint64ToTime(vm.chainConfig.ApricotPhase4BlockTimestamp) } // Initialize implements the snowman.ChainVM interface func (vm *VM) Initialize( _ context.Context, chainCtx *snow.Context, - dbManager manager.Manager, + db database.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -332,12 +387,17 @@ func (vm *VM) Initialize( if err := vm.config.Validate(); err != nil { return err } + // We should deprecate config flags as the first thing, before we do anything else + // because this can set old flags to new flags. log the message after we have + // initialized the logger. + deprecateMsg := vm.config.Deprecate() vm.ctx = chainCtx // Create logger alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) if err != nil { + // fallback to ChainID string instead of erroring alias = vm.ctx.ChainID.String() } @@ -354,6 +414,10 @@ func (vm *VM) Initialize( log.Info("Initializing Coreth VM", "Version", Version, "Config", vm.config) + if deprecateMsg != "" { + log.Warn("Deprecation Warning", "msg", deprecateMsg) + } + if len(fxs) > 0 { return errUnsupportedFXs } @@ -363,13 +427,16 @@ func (vm *VM) Initialize( vm.toEngine = toEngine vm.shutdownChan = make(chan struct{}, 1) - baseDB := dbManager.Current().Database // Use NewNested rather than New so that the structure of the database // remains the same regardless of the provided baseDB type. - vm.chaindb = Database{prefixdb.NewNested(ethDBPrefix, baseDB)} - vm.db = versiondb.New(baseDB) + vm.chaindb = rawdb.NewDatabase(Database{prefixdb.NewNested(ethDBPrefix, db)}) + vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) + // Note warpDB is not part of versiondb because it is not necessary + // that warp signatures are committed to the database atomically with + // the last accepted block. + vm.warpDB = prefixdb.New(warpPrefix, db) if vm.config.InspectDatabase { start := time.Now() @@ -389,7 +456,8 @@ func (vm *VM) Initialize( // Set the chain config for mainnet/fuji chain IDs switch { case g.Config.ChainID.Cmp(params.AvalancheMainnetChainID) == 0: - g.Config = params.AvalancheMainnetChainConfig + config := *params.AvalancheMainnetChainConfig + g.Config = &config extDataHashes = mainnetExtDataHashes case g.Config.ChainID.Cmp(params.FlareChainID) == 0: g.Config = params.FlareChainConfig @@ -403,15 +471,19 @@ func (vm *VM) Initialize( g.Config = params.LocalFlareChainConfig case g.Config.ChainID.Cmp(params.LocalChainID) == 0: g.Config = params.LocalChainConfig - case g.Config.ChainID.Cmp(params.AvalancheFujiChainID) == 0: - g.Config = params.AvalancheFujiChainConfig - extDataHashes = fujiExtDataHashes case g.Config.ChainID.Cmp(params.AvalancheLocalChainID) == 0: - g.Config = params.AvalancheLocalChainConfig + config := *params.AvalancheLocalChainConfig + g.Config = &config + } + // If the Durango is activated, activate the Warp Precompile at the same time + if g.Config.DurangoBlockTimestamp != nil { + g.Config.PrecompileUpgrades = append(g.Config.PrecompileUpgrades, params.PrecompileUpgrade{ + Config: warpPrecompile.NewDefaultConfig(g.Config.DurangoBlockTimestamp), + }) } // Set the Avalanche Context on the ChainConfig g.Config.AvalancheContext = params.AvalancheContext{ - BlockchainID: common.Hash(chainCtx.ChainID), + SnowCtx: chainCtx, } vm.syntacticBlockValidator = NewBlockValidator(extDataHashes) @@ -439,7 +511,7 @@ func (vm *VM) Initialize( } vm.ethConfig.Genesis = g vm.ethConfig.NetworkId = vm.chainID.Uint64() - vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() // must create genesis hash before [vm.readLastAccepted] + vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted] lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() if err != nil { return err @@ -466,19 +538,20 @@ func (vm *VM) Initialize( vm.ethConfig.AllowUnprotectedTxs = vm.config.AllowUnprotectedTxs vm.ethConfig.AllowUnprotectedTxHashes = vm.config.AllowUnprotectedTxHashes vm.ethConfig.Preimages = vm.config.Preimages + vm.ethConfig.Pruning = vm.config.Pruning vm.ethConfig.TrieCleanCache = vm.config.TrieCleanCache vm.ethConfig.TrieCleanJournal = vm.config.TrieCleanJournal vm.ethConfig.TrieCleanRejournal = vm.config.TrieCleanRejournal.Duration vm.ethConfig.TrieDirtyCache = vm.config.TrieDirtyCache vm.ethConfig.TrieDirtyCommitTarget = vm.config.TrieDirtyCommitTarget + vm.ethConfig.TriePrefetcherParallelism = vm.config.TriePrefetcherParallelism vm.ethConfig.SnapshotCache = vm.config.SnapshotCache - vm.ethConfig.Pruning = vm.config.Pruning vm.ethConfig.AcceptorQueueLimit = vm.config.AcceptorQueueLimit vm.ethConfig.PopulateMissingTries = vm.config.PopulateMissingTries vm.ethConfig.PopulateMissingTriesParallelism = vm.config.PopulateMissingTriesParallelism vm.ethConfig.AllowMissingTries = vm.config.AllowMissingTries vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) - vm.ethConfig.SnapshotAsync = vm.config.SnapshotAsync + vm.ethConfig.SnapshotWait = vm.config.SnapshotWait vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify vm.ethConfig.OfflinePruning = vm.config.OfflinePruning vm.ethConfig.OfflinePruningBloomFilterSize = vm.config.OfflinePruningBloomFilterSize @@ -487,6 +560,7 @@ func (vm *VM) Initialize( vm.ethConfig.SkipUpgradeCheck = vm.config.SkipUpgradeCheck vm.ethConfig.AcceptedCacheSize = vm.config.AcceptedCacheSize vm.ethConfig.TxLookupLimit = vm.config.TxLookupLimit + vm.ethConfig.SkipTxIndexing = vm.config.SkipTxIndexing // Create directory for offline pruning if len(vm.ethConfig.OfflinePruningDataDirectory) != 0 { @@ -498,63 +572,110 @@ func (vm *VM) Initialize( vm.chainConfig = g.Config vm.networkID = vm.ethConfig.NetworkId - vm.secpFactory = secp256k1.Factory{ - Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ - Size: secpFactoryCacheSize, + vm.secpCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + Size: secpCacheSize, }, } - vm.codec = Codec + if err := vm.chainConfig.Verify(); err != nil { + return fmt.Errorf("failed to verify chain config: %w", err) + } - // TODO: read size from settings - vm.mempool = NewMempool(chainCtx.AVAXAssetID, defaultMempoolSize) + vm.codec = Codec if err := vm.initializeMetrics(); err != nil { return err } + // TODO: read size from settings + vm.mempool, err = NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) + if err != nil { + return fmt.Errorf("failed to initialize mempool: %w", err) + } + // initialize peer network + if vm.p2pSender == nil { + vm.p2pSender = appSender + } + + p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, vm.p2pSender, vm.sdkMetrics, "p2p") + if err != nil { + return fmt.Errorf("failed to initialize p2p network: %w", err) + } + vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec - vm.Network = peer.NewNetwork(appSender, vm.networkCodec, message.CrossChainCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests, vm.config.MaxOutboundActiveCrossChainRequests) + vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, message.CrossChainCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests, vm.config.MaxOutboundActiveCrossChainRequests) vm.client = peer.NewNetworkClient(vm.Network) + // Initialize warp backend + offchainWarpMessages := make([][]byte, len(vm.config.WarpOffChainMessages)) + for i, hexMsg := range vm.config.WarpOffChainMessages { + offchainWarpMessages[i] = []byte(hexMsg) + } + vm.warpBackend, err = warp.NewBackend(vm.ctx.NetworkID, vm.ctx.ChainID, vm.ctx.WarpSigner, vm, vm.warpDB, warpSignatureCacheSize, offchainWarpMessages) + if err != nil { + return err + } + + // clear warpdb on initialization if config enabled + if vm.config.PruneWarpDB { + if err := vm.warpBackend.Clear(); err != nil { + return fmt.Errorf("failed to prune warpDB: %w", err) + } + } + if err := vm.initializeChain(lastAcceptedHash); err != nil { return err } // initialize bonus blocks on mainnet var ( - bonusBlockHeights map[uint64]ids.ID - canonicalBlockHeights []uint64 + bonusBlockHeights map[uint64]ids.ID + bonusBlockRepair map[uint64]*types.Block ) if vm.chainID.Cmp(params.AvalancheMainnetChainID) == 0 { bonusBlockHeights = bonusBlockMainnetHeights - canonicalBlockHeights = canonicalBlockMainnetHeights + bonusBlockRepair = mainnetBonusBlocksParsed } + defer func() { + // Free memory after VM is initialized + mainnetBonusBlocksParsed = nil + mainnetBonusBlocksJson = nil + }() // initialize atomic repository vm.atomicTxRepository, err = NewAtomicTxRepository( vm.db, vm.codec, lastAcceptedHeight, - bonusBlockHeights, canonicalBlockHeights, vm.getAtomicTxFromPreApricot5BlockByHeight, ) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } - vm.atomicBackend, err = NewAtomicBackend( - vm.db, vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, vm.config.CommitInterval, + vm.atomicBackend, _, err = NewAtomicBackendWithBonusBlockRepair( + vm.db, vm.ctx.SharedMemory, bonusBlockHeights, bonusBlockRepair, + vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, + vm.config.CommitInterval, ) if err != nil { return fmt.Errorf("failed to create atomic backend: %w", err) } vm.atomicTrie = vm.atomicBackend.AtomicTrie() + // Run the atomic trie height map repair in the background on mainnet/fuji + // TODO: remove after Durango + if vm.chainID.Cmp(params.AvalancheMainnetChainID) == 0 || + vm.chainID.Cmp(params.AvalancheFujiChainID) == 0 { + _, lastCommitted := vm.atomicTrie.LastCommitted() + go vm.atomicTrie.RepairHeightMap(lastCommitted) + } + go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler) // The Codec explicitly registers the types it requires from the secp256k1fx // so [vm.baseCodec] is a dummy codec use to fulfill the secp256k1fx VM // interface. The fx will register all of its types, which can be safely // ignored by the VM's codec. - vm.baseCodec = linearcodec.NewDefault() + vm.baseCodec = linearcodec.NewDefault(time.Time{}) if err := vm.fx.Initialize(vm); err != nil { return err @@ -565,6 +686,7 @@ func (vm *VM) Initialize( } func (vm *VM) initializeMetrics() error { + vm.sdkMetrics = prometheus.NewRegistry() vm.multiGatherer = avalanchegoMetrics.NewMultiGatherer() // If metrics are enabled, register the default metrics regitry if metrics.Enabled { @@ -572,6 +694,9 @@ func (vm *VM) initializeMetrics() error { if err := vm.multiGatherer.Register(ethMetricsPrefix, gatherer); err != nil { return err } + if err := vm.multiGatherer.Register("sdk", vm.sdkMetrics); err != nil { + return err + } // Register [multiGatherer] after registerers have been registered to it if err := vm.ctx.Metrics.Register(vm.multiGatherer); err != nil { return err @@ -603,7 +728,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { if err != nil { return err } - vm.eth.SetEtherbase(corethConstants.BlackholeAddr) + vm.eth.SetEtherbase(constants.BlackholeAddr) vm.txPool = vm.eth.TxPool() vm.blockChain = vm.eth.BlockChain() vm.miner = vm.eth.Miner() @@ -647,16 +772,17 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { IsSongbirdCode: vm.chainConfig.IsSongbirdCode(), }, ), - enabled: stateSyncEnabled, - skipResume: vm.config.StateSyncSkipResume, - stateSyncMinBlocks: vm.config.StateSyncMinBlocks, - lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around - chaindb: vm.chaindb, - metadataDB: vm.metadataDB, - acceptedBlockDB: vm.acceptedBlockDB, - db: vm.db, - atomicBackend: vm.atomicBackend, - toEngine: vm.toEngine, + enabled: stateSyncEnabled, + skipResume: vm.config.StateSyncSkipResume, + stateSyncMinBlocks: vm.config.StateSyncMinBlocks, + stateSyncRequestSize: vm.config.StateSyncRequestSize, + lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around + chaindb: vm.chaindb, + metadataDB: vm.metadataDB, + acceptedBlockDB: vm.acceptedBlockDB, + db: vm.db, + atomicBackend: vm.atomicBackend, + toEngine: vm.toEngine, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume @@ -688,14 +814,16 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { block.status = choices.Accepted config := &chain.Config{ - DecidedCacheSize: decidedCacheSize, - MissingCacheSize: missingCacheSize, - UnverifiedCacheSize: unverifiedCacheSize, - GetBlockIDAtHeight: vm.GetBlockIDAtHeight, - GetBlock: vm.getBlock, - UnmarshalBlock: vm.parseBlock, - BuildBlock: vm.buildBlock, - LastAcceptedBlock: block, + DecidedCacheSize: decidedCacheSize, + MissingCacheSize: missingCacheSize, + UnverifiedCacheSize: unverifiedCacheSize, + BytesToIDCacheSize: bytesToIDCacheSize, + GetBlockIDAtHeight: vm.GetBlockIDAtHeight, + GetBlock: vm.getBlock, + UnmarshalBlock: vm.parseBlock, + BuildBlock: vm.buildBlock, + BuildBlockWithContext: vm.buildBlockWithContext, + LastAcceptedBlock: block, } // Register chain state metrics @@ -709,8 +837,8 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) } -func (vm *VM) createConsensusCallbacks() *dummy.ConsensusCallbacks { - return &dummy.ConsensusCallbacks{ +func (vm *VM) createConsensusCallbacks() dummy.ConsensusCallbacks { + return dummy.ConsensusCallbacks{ OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, OnExtraStateChange: vm.onExtraStateChange, } @@ -727,9 +855,10 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S // Note: snapshot is taken inside the loop because you cannot revert to the same snapshot more than // once. snapshot := state.Snapshot() - rules := vm.chainConfig.AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time)) + rules := vm.chainConfig.AvalancheRules(header.Number, header.Time) if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { // Discard the transaction from the mempool on failed verification. + log.Debug("discarding tx from mempool on failed verification", "txID", tx.ID(), "err", err) vm.mempool.DiscardCurrentTx(tx.ID()) state.RevertToSnapshot(snapshot) continue @@ -739,6 +868,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S if err != nil { // Discard the transaction from the mempool and error if the transaction // cannot be marshalled. This should never happen. + log.Debug("discarding tx due to unmarshal err", "txID", tx.ID(), "err", err) vm.mempool.DiscardCurrentTx(tx.ID()) return nil, nil, nil, fmt.Errorf("failed to marshal atomic transaction %s due to %w", tx.ID(), err) } @@ -767,7 +897,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. batchAtomicUTXOs set.Set[ids.ID] batchContribution *big.Int = new(big.Int).Set(common.Big0) batchGasUsed *big.Int = new(big.Int).Set(common.Big0) - rules = vm.chainConfig.AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time)) + rules = vm.chainConfig.AvalancheRules(header.Number, header.Time) size int ) @@ -810,6 +940,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. // valid, but we discard it early here based on the assumption that the proposed // block will most likely be accepted. // Discard the transaction from the mempool on failed verification. + log.Debug("discarding tx due to overlapping input utxos", "txID", tx.ID()) vm.mempool.DiscardCurrentTx(tx.ID()) continue } @@ -820,6 +951,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. // if it fails verification here. // Note: prior to this point, we have not modified [state] so there is no need to // revert to a snapshot if we discard the transaction prior to this point. + log.Debug("discarding tx from mempool due to failed verification", "txID", tx.ID(), "err", err) vm.mempool.DiscardCurrentTx(tx.ID()) state.RevertToSnapshot(snapshot) continue @@ -840,6 +972,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. if err != nil { // If we fail to marshal the batch of atomic transactions for any reason, // discard the entire set of current transactions. + log.Debug("discarding txs due to error marshaling atomic transactions", "err", err) vm.mempool.DiscardCurrentTxs() return nil, nil, nil, fmt.Errorf("failed to marshal batch of atomic transactions due to %w", err) } @@ -859,7 +992,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. } func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - if !vm.chainConfig.IsApricotPhase5(new(big.Int).SetUint64(header.Time)) { + if !vm.chainConfig.IsApricotPhase5(header.Time) { return vm.preBatchOnFinalizeAndAssemble(header, state, txs) } return vm.postBatchOnFinalizeAndAssemble(header, state, txs) @@ -870,7 +1003,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big batchContribution *big.Int = big.NewInt(0) batchGasUsed *big.Int = big.NewInt(0) header = block.Header() - rules = vm.chainConfig.AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time)) + rules = vm.chainConfig.AvalancheRules(header.Number, header.Time) ) txs, err := ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, vm.codec) @@ -889,6 +1022,9 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big } } // Update the atomic backend with [txs] from this block. + // + // Note: The atomic trie canonically contains the duplicate operations + // from any bonus blocks. _, err := vm.atomicBackend.InsertTxs(block.Hash(), block.NumberU64(), block.ParentHash(), txs) if err != nil { return nil, nil, err @@ -940,7 +1076,9 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { return vm.fx.Bootstrapping() case snow.NormalOp: // Initialize goroutines related to block building once we enter normal operation as there is no need to handle mempool gossip before this point. - vm.initBlockBuilding() + if err := vm.initBlockBuilding(); err != nil { + return fmt.Errorf("failed to initialize block building: %w", err) + } vm.bootstrapped = true return vm.fx.Bootstrapped() default: @@ -949,13 +1087,142 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { } // initBlockBuilding starts goroutines to manage block building -func (vm *VM) initBlockBuilding() { +func (vm *VM) initBlockBuilding() error { + ctx, cancel := context.WithCancel(context.TODO()) + vm.cancel = cancel + + ethTxGossipMarshaller := GossipEthTxMarshaller{} + atomicTxGossipMarshaller := GossipAtomicTxMarshaller{} + + ethTxGossipClient := vm.Network.NewClient(ethTxGossipProtocol, p2p.WithValidatorSampling(vm.validators)) + atomicTxGossipClient := vm.Network.NewClient(atomicTxGossipProtocol, p2p.WithValidatorSampling(vm.validators)) + + ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) + if err != nil { + return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) + } + + atomicTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, atomicTxGossipNamespace) + if err != nil { + return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) + } + + if vm.ethTxPushGossiper == nil { + vm.ethTxPushGossiper = gossip.NewPushGossiper[*GossipEthTx]( + ethTxGossipMarshaller, + ethTxGossipClient, + ethTxGossipMetrics, + txGossipTargetMessageSize, + ) + } + + if vm.atomicTxPushGossiper == nil { + vm.atomicTxPushGossiper = gossip.NewPushGossiper[*GossipAtomicTx]( + atomicTxGossipMarshaller, + atomicTxGossipClient, + atomicTxGossipMetrics, + txGossipTargetMessageSize, + ) + } + // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. gossipStats := NewGossipStats() - vm.gossiper = vm.createGossiper(gossipStats) + vm.gossiper = vm.createGossiper(gossipStats, vm.ethTxPushGossiper, vm.atomicTxPushGossiper) vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() vm.Network.SetGossipHandler(NewGossipHandler(vm, gossipStats)) + + ethTxPool, err := NewGossipEthTxPool(vm.txPool, vm.sdkMetrics) + if err != nil { + return err + } + vm.shutdownWg.Add(1) + go func() { + ethTxPool.Subscribe(ctx) + vm.shutdownWg.Done() + }() + + if vm.ethTxGossipHandler == nil { + vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( + vm.ctx.Log, + ethTxGossipMarshaller, + ethTxPool, + ethTxGossipMetrics, + txGossipTargetMessageSize, + txGossipThrottlingPeriod, + txGossipThrottlingLimit, + vm.validators, + ) + } + + if err := vm.Network.AddHandler(ethTxGossipProtocol, vm.ethTxGossipHandler); err != nil { + return err + } + + if vm.atomicTxGossipHandler == nil { + vm.atomicTxGossipHandler = newTxGossipHandler[*GossipAtomicTx]( + vm.ctx.Log, + atomicTxGossipMarshaller, + vm.mempool, + atomicTxGossipMetrics, + txGossipTargetMessageSize, + txGossipThrottlingPeriod, + txGossipThrottlingLimit, + vm.validators, + ) + } + + if err := vm.Network.AddHandler(atomicTxGossipProtocol, vm.atomicTxGossipHandler); err != nil { + return err + } + + if vm.ethTxPullGossiper == nil { + ethTxPullGossiper := gossip.NewPullGossiper[*GossipEthTx]( + vm.ctx.Log, + ethTxGossipMarshaller, + ethTxPool, + ethTxGossipClient, + ethTxGossipMetrics, + txGossipPollSize, + ) + + vm.ethTxPullGossiper = gossip.ValidatorGossiper{ + Gossiper: ethTxPullGossiper, + NodeID: vm.ctx.NodeID, + Validators: vm.validators, + } + } + + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, gossipFrequency) + vm.shutdownWg.Done() + }() + + if vm.atomicTxPullGossiper == nil { + atomicTxPullGossiper := gossip.NewPullGossiper[*GossipAtomicTx]( + vm.ctx.Log, + atomicTxGossipMarshaller, + vm.mempool, + atomicTxGossipClient, + atomicTxGossipMetrics, + txGossipPollSize, + ) + + vm.atomicTxPullGossiper = &gossip.ValidatorGossiper{ + Gossiper: atomicTxPullGossiper, + NodeID: vm.ctx.NodeID, + Validators: vm.validators, + } + } + + vm.shutdownWg.Add(1) + go func() { + gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, gossipFrequency) + vm.shutdownWg.Done() + }() + + return nil } // setAppRequestHandlers sets the request handlers for the VM to serve state sync @@ -970,15 +1237,15 @@ func (vm *VM) setAppRequestHandlers() { Cache: vm.config.StateSyncServerTrieCache, }, ) - syncRequestHandler := handlers.NewSyncHandler( + networkHandler := newNetworkHandler( vm.blockChain, vm.chaindb, evmTrieDB, vm.atomicTrie.TrieDB(), + vm.warpBackend, vm.networkCodec, - handlerstats.NewHandlerStats(metrics.Enabled), ) - vm.Network.SetRequestHandler(syncRequestHandler) + vm.Network.SetRequestHandler(networkHandler) } // setCrossChainAppRequestHandler sets the request handlers for the VM to serve cross chain @@ -993,6 +1260,9 @@ func (vm *VM) Shutdown(context.Context) error { if vm.ctx == nil { return nil } + if vm.cancel != nil { + vm.cancel() + } vm.Network.Shutdown() if err := vm.StateSyncClient.Shutdown(); err != nil { log.Error("error stopping state syncer", "err", err) @@ -1004,8 +1274,22 @@ func (vm *VM) Shutdown(context.Context) error { } // buildBlock builds a block to be wrapped by ChainState -func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { - block, err := vm.miner.GenerateBlock() +func (vm *VM) buildBlock(ctx context.Context) (snowman.Block, error) { + return vm.buildBlockWithContext(ctx, nil) +} + +func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { + if proposerVMBlockCtx != nil { + log.Debug("Building block with context", "pChainBlockHeight", proposerVMBlockCtx.PChainHeight) + } else { + log.Debug("Building block without context") + } + predicateCtx := &precompileconfig.PredicateContext{ + SnowCtx: vm.ctx, + ProposerVMBlockCtx: proposerVMBlockCtx, + } + + block, err := vm.miner.GenerateBlock(predicateCtx) vm.builder.handleGenerateBlock() if err != nil { vm.mempool.CancelCurrentTxs() @@ -1015,12 +1299,10 @@ func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { // Note: the status of block is set by ChainState blk, err := vm.newBlock(block) if err != nil { + log.Debug("discarding txs due to error making new block", "err", err) vm.mempool.DiscardCurrentTxs() return nil, err } - if err != nil { - return nil, err - } // Verify is called on a non-wrapped block here, such that this // does not add [blk] to the processing blocks map in ChainState. @@ -1034,7 +1316,7 @@ func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { // We call verify without writes here to avoid generating a reference // to the blk state root in the triedb when we are going to call verify // again from the consensus engine with writes enabled. - if err := blk.verify(false /*=writes*/); err != nil { + if err := blk.verify(predicateCtx, false /*=writes*/); err != nil { vm.mempool.CancelCurrentTxs() return nil, fmt.Errorf("block failed verification due to: %w", err) } @@ -1107,8 +1389,7 @@ func (vm *VM) VerifyHeightIndex(context.Context) error { return nil } -// GetBlockAtHeight implements the HeightIndexedChainVM interface and returns the -// canonical block at [blkHeight]. +// GetBlockAtHeight returns the canonical block at [blkHeight]. // If [blkHeight] is less than the height of the last accepted block, this will return // the block accepted at that height. Otherwise, it may return a blkID that has not yet // been accepted. @@ -1132,26 +1413,15 @@ func (vm *VM) Version(context.Context) (string, error) { // - The handler's functionality is defined by [service] // [service] should be a gorilla RPC service (see https://www.gorillatoolkit.org/pkg/rpc/v2) // - The name of the service is [name] -// - The LockOption is the first element of [lockOption] -// By default the LockOption is WriteLock -// [lockOption] should have either 0 or 1 elements. Elements beside the first are ignored. -func newHandler(name string, service interface{}, lockOption ...commonEng.LockOption) (*commonEng.HTTPHandler, error) { +func newHandler(name string, service interface{}) (http.Handler, error) { server := avalancheRPC.NewServer() server.RegisterCodec(avalancheJSON.NewCodec(), "application/json") server.RegisterCodec(avalancheJSON.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(service, name); err != nil { - return nil, err - } - - var lock commonEng.LockOption = commonEng.WriteLock - if len(lockOption) != 0 { - lock = lockOption[0] - } - return &commonEng.HTTPHandler{LockOptions: lock, Handler: server}, nil + return server, server.RegisterService(service, name) } // CreateHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateHandlers(context.Context) (map[string]*commonEng.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) enabledAPIs := vm.config.EthAPIs() if err := attachEthService(handler, vm.eth.APIs(), enabledAPIs); err != nil { @@ -1162,7 +1432,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*commonEng.HTTPHandler if err != nil { return nil, fmt.Errorf("failed to get primary alias for chain due to %w", err) } - apis := make(map[string]*commonEng.HTTPHandler) + apis := make(map[string]http.Handler) avaxAPI, err := newHandler("avax", &AvaxAPI{vm}) if err != nil { return nil, fmt.Errorf("failed to register service for AVAX API due to %w", err) @@ -1170,8 +1440,8 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*commonEng.HTTPHandler enabledAPIs = append(enabledAPIs, "avax") apis[avaxEndpoint] = avaxAPI - if vm.config.CorethAdminAPIEnabled { - adminAPI, err := newHandler("admin", NewAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.CorethAdminAPIDir, primaryAlias)))) + if vm.config.AdminAPIEnabled { + adminAPI, err := newHandler("admin", NewAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, primaryAlias)))) if err != nil { return nil, fmt.Errorf("failed to register service for admin API due to %w", err) } @@ -1186,33 +1456,35 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*commonEng.HTTPHandler enabledAPIs = append(enabledAPIs, "snowman") } - log.Info(fmt.Sprintf("Enabled APIs: %s", strings.Join(enabledAPIs, ", "))) - apis[ethRPCEndpoint] = &commonEng.HTTPHandler{ - LockOptions: commonEng.NoLock, - Handler: handler, - } - apis[ethWSEndpoint] = &commonEng.HTTPHandler{ - LockOptions: commonEng.NoLock, - Handler: handler.WebsocketHandlerWithDuration( - []string{"*"}, - vm.config.APIMaxDuration.Duration, - vm.config.WSCPURefillRate.Duration, - vm.config.WSCPUMaxStored.Duration, - ), + if vm.config.WarpAPIEnabled { + validatorsState := warpValidators.NewState(vm.ctx) + if err := handler.RegisterName("warp", warp.NewAPI(vm.ctx.NetworkID, vm.ctx.SubnetID, vm.ctx.ChainID, validatorsState, vm.warpBackend, vm.client)); err != nil { + return nil, err + } + enabledAPIs = append(enabledAPIs, "warp") } + log.Info(fmt.Sprintf("Enabled APIs: %s", strings.Join(enabledAPIs, ", "))) + apis[ethRPCEndpoint] = handler + apis[ethWSEndpoint] = handler.WebsocketHandlerWithDuration( + []string{"*"}, + vm.config.APIMaxDuration.Duration, + vm.config.WSCPURefillRate.Duration, + vm.config.WSCPUMaxStored.Duration, + ) + return apis, nil } // CreateStaticHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateStaticHandlers(context.Context) (map[string]*commonEng.HTTPHandler, error) { +func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(0) if err := handler.RegisterName("static", &StaticService{}); err != nil { return nil, err } - return map[string]*commonEng.HTTPHandler{ - "/rpc": {LockOptions: commonEng.NoLock, Handler: handler}, + return map[string]http.Handler{ + "/rpc": handler, }, nil } @@ -1295,7 +1567,7 @@ func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { return ids.ID{}, ids.ShortID{}, err } - expectedHRP := constants.GetHRP(vm.ctx.NetworkID) + expectedHRP := avalanchegoConstants.GetHRP(vm.ctx.NetworkID) if hrp != expectedHRP { return ids.ID{}, ids.ShortID{}, fmt.Errorf("expected hrp %q but got %q", expectedHRP, hrp) @@ -1308,64 +1580,40 @@ func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { return chainID, addr, nil } -// issueTx verifies [tx] as valid to be issued on top of the currently preferred block -// and then issues [tx] into the mempool if valid. -func (vm *VM) issueTx(tx *Tx, local bool) error { - if err := vm.verifyTxAtTip(tx); err != nil { - if !local { - // unlike local txs, invalid remote txs are recorded as discarded - // so that they won't be requested again - txID := tx.ID() - vm.mempool.discardedTxs.Put(txID, tx) - log.Debug("failed to verify remote tx being issued to the mempool", - "txID", txID, - "err", err, - ) - return nil - } - return err +// verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block +func (vm *VM) verifyTxAtTip(tx *Tx) error { + if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { + return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) } - // add to mempool and possibly re-gossip - if err := vm.mempool.AddTx(tx); err != nil { - if !local { - // unlike local txs, invalid remote txs are recorded as discarded - // so that they won't be requested again - txID := tx.ID() - vm.mempool.discardedTxs.Put(tx.ID(), tx) - log.Debug("failed to issue remote tx to mempool", - "txID", txID, - "err", err, - ) - return nil - } + gasUsed, err := tx.GasUsed(true) + if err != nil { return err } - // NOTE: Gossiping of the issued [Tx] is handled in [AddTx] - return nil -} + if new(big.Int).SetUint64(gasUsed).Cmp(params.AtomicGasLimit) > 0 { + return fmt.Errorf("tx gas usage (%d) exceeds atomic gas limit (%d)", gasUsed, params.AtomicGasLimit.Uint64()) + } -// verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *VM) verifyTxAtTip(tx *Tx) error { // Note: we fetch the current block and then the state at that block instead of the current state directly // since we need the header of the current block below. preferredBlock := vm.blockChain.CurrentBlock() - preferredState, err := vm.blockChain.StateAt(preferredBlock.Root()) + preferredState, err := vm.blockChain.StateAt(preferredBlock.Root) if err != nil { return fmt.Errorf("failed to retrieve block state at tip while verifying atomic tx: %w", err) } rules := vm.currentRules() - parentHeader := preferredBlock.Header() + parentHeader := preferredBlock var nextBaseFee *big.Int - timestamp := vm.clock.Time().Unix() - bigTimestamp := big.NewInt(timestamp) - if vm.chainConfig.IsApricotPhase3(bigTimestamp) { - _, nextBaseFee, err = dummy.EstimateNextBaseFee(vm.chainConfig, parentHeader, uint64(timestamp)) + timestamp := uint64(vm.clock.Time().Unix()) + if vm.chainConfig.IsApricotPhase3(timestamp) { + _, nextBaseFee, err = dummy.EstimateNextBaseFee(vm.chainConfig, parentHeader, timestamp) if err != nil { // Return extremely detailed error since CalcBaseFee should never encounter an issue here return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) } } + // We don’t need to revert the state here in case verifyTx errors, because + // [preferredState] is thrown away either way. return vm.verifyTx(tx, parentHeader.Hash(), nextBaseFee, preferredState, rules) } @@ -1559,7 +1807,7 @@ func (vm *VM) GetSpendableAVAXWithFee( return nil, nil, err } - initialFee, err := calculateDynamicFee(cost, baseFee) + initialFee, err := CalculateDynamicFee(cost, baseFee) if err != nil { return nil, nil, err } @@ -1579,13 +1827,13 @@ func (vm *VM) GetSpendableAVAXWithFee( break } - prevFee, err := calculateDynamicFee(cost, baseFee) + prevFee, err := CalculateDynamicFee(cost, baseFee) if err != nil { return nil, nil, err } newCost := cost + EVMInputGas - newFee, err := calculateDynamicFee(newCost, baseFee) + newFee, err := CalculateDynamicFee(newCost, baseFee) if err != nil { return nil, nil, err } @@ -1653,7 +1901,7 @@ func (vm *VM) GetCurrentNonce(address common.Address) (uint64, error) { // currentRules returns the chain rules for the current block. func (vm *VM) currentRules() params.Rules { header := vm.eth.APIBackend.CurrentHeader() - return vm.chainConfig.AvalancheRules(header.Number, big.NewInt(int64(header.Time))) + return vm.chainConfig.AvalancheRules(header.Number, header.Time) } func (vm *VM) startContinuousProfiler() { diff --git a/coreth/plugin/evm/vm_test.go b/coreth/plugin/evm/vm_test.go index 73d99b33..5da6449e 100644 --- a/coreth/plugin/evm/vm_test.go +++ b/coreth/plugin/evm/vm_test.go @@ -12,7 +12,6 @@ import ( "math/big" "os" "path/filepath" - "sort" "strings" "testing" "time" @@ -23,17 +22,20 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/internal/ethapi" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -41,18 +43,18 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - engCommon "github.com/ava-labs/avalanchego/snow/engine/common" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" @@ -61,6 +63,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/coreth/accounts/abi" accountKeystore "github.com/ava-labs/coreth/accounts/keystore" ) @@ -91,7 +94,8 @@ var ( genesisJSONBanff = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" genesisJSONCortina = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0,\"cortinaBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" - genesisJSONLatest = genesisJSONCortina + genesisJSONDurango = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0,\"cortinaBlockTimestamp\":0,\"durangoBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5\":{\"balance\":\"0x56bc75e2d63100000\"},\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" + genesisJSONLatest = genesisJSONDurango apricotRulesPhase0 = params.Rules{} apricotRulesPhase1 = params.Rules{IsApricotPhase1: true} @@ -106,7 +110,6 @@ var ( func init() { var b []byte - factory := secp256k1.Factory{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", @@ -114,13 +117,31 @@ func init() { "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", } { b, _ = cb58.Decode(key) - pk, _ := factory.ToPrivateKey(b) + pk, _ := secp256k1.ToPrivateKey(b) testKeys = append(testKeys, pk) testEthAddrs = append(testEthAddrs, GetEthAddress(pk)) testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) } } +func newPrefundedGenesis( + balance int, + addresses ...common.Address, +) *core.Genesis { + alloc := core.GenesisAlloc{} + for _, address := range addresses { + alloc[address] = core.GenesisAccount{ + Balance: big.NewInt(int64(balance)), + } + } + + return &core.Genesis{ + Config: params.TestChainConfig, + Difficulty: big.NewInt(0), + Alloc: alloc, + } +} + // BuildGenesisTest returns the genesis bytes for Coreth VM to be used in testing func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { ss := StaticService{} @@ -141,7 +162,7 @@ func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { } func NewContext() *snow.Context { - ctx := snow.DefaultContextTest() + ctx := utils.TestSnowContext() ctx.NodeID = ids.GenerateTestNodeID() ctx.NetworkID = testNetworkID ctx.ChainID = testCChainID @@ -166,73 +187,74 @@ func NewContext() *snow.Context { return subnetID, nil }, } + blsSecretKey, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) + ctx.PublicKey = bls.PublicFromSecretKey(blsSecretKey) return ctx } // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func setupGenesis(t *testing.T, +func setupGenesis( + t *testing.T, genesisJSON string, ) (*snow.Context, - manager.Manager, + database.Database, []byte, - chan engCommon.Message, - *atomic.Memory) { + chan commonEng.Message, + *atomic.Memory, +) { if len(genesisJSON) == 0 { genesisJSON = genesisJSONLatest } genesisBytes := BuildGenesisTest(t, genesisJSON) ctx := NewContext() - baseDBManager := manager.NewMemDB(&version.Semantic{ - Major: 1, - Minor: 4, - Patch: 5, - }) + baseDB := memdb.New() - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + // initialize the atomic memory + atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) // NB: this lock is intentionally left locked when this function returns. // The caller of this function is responsible for unlocking. ctx.Lock.Lock() - userKeystore := keystore.New( - logging.NoLog{}, - manager.NewMemDB(&version.Semantic{ - Major: 1, - Minor: 4, - Patch: 5, - }), - ) + userKeystore := keystore.New(logging.NoLog{}, memdb.New()) if err := userKeystore.CreateUser(username, password); err != nil { t.Fatal(err) } ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - issuer := make(chan engCommon.Message, 1) - prefixedDBManager := baseDBManager.NewPrefixDBManager([]byte{1}) - return ctx, prefixedDBManager, genesisBytes, issuer, m + issuer := make(chan commonEng.Message, 1) + prefixedDB := prefixdb.New([]byte{1}, baseDB) + return ctx, prefixedDB, genesisBytes, issuer, atomicMemory } // GenesisVM creates a VM instance with the genesis test bytes and returns -// the channel use to send messages to the engine, the vm, and atomic memory +// the channel use to send messages to the engine, the VM, database manager, +// sender, and atomic memory. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func GenesisVM(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, -) (chan engCommon.Message, - *VM, manager.Manager, +) (chan commonEng.Message, + *VM, database.Database, *atomic.Memory, - *engCommon.SenderTest) { + *commonEng.SenderTest, +) { vm := &VM{} + vm.p2pSender = &commonEng.FakeSender{} ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) - appSender := &engCommon.SenderTest{T: t} + appSender := &commonEng.SenderTest{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } - if err := vm.Initialize( + err := vm.Initialize( context.Background(), ctx, dbManager, @@ -240,15 +262,14 @@ func GenesisVM(t *testing.T, []byte(upgradeJSON), []byte(configJSON), issuer, - []*engCommon.Fx{}, + []*commonEng.Fx{}, appSender, - ); err != nil { - t.Fatal(err) - } + ) + require.NoError(t, err, "error initializing GenesisVM") if finishBootstrapping { - assert.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) - assert.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) } return issuer, vm, dbManager, m, appSender @@ -292,8 +313,8 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan engCommon.Message, *VM, manager.Manager, *atomic.Memory, *engCommon.SenderTest) { - issuer, vm, dbManager, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *atomic.Memory, *commonEng.SenderTest) { + issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) if err != nil { @@ -304,7 +325,7 @@ func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON stri } } - return issuer, vm, dbManager, sharedMemory, sender + return issuer, vm, db, sharedMemory, sender } func TestVMConfig(t *testing.T) { @@ -312,9 +333,9 @@ func TestVMConfig(t *testing.T) { enabledEthAPIs := []string{"debug"} configJSON := fmt.Sprintf("{\"rpc-tx-fee-cap\": %g,\"eth-apis\": %s}", txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - assert.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") - assert.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") - assert.NoError(t, vm.Shutdown(context.Background())) + require.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") + require.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") + require.NoError(t, vm.Shutdown(context.Background())) } func TestCrossChainMessagestoVM(t *testing.T) { @@ -374,7 +395,7 @@ func TestCrossChainMessagestoVM(t *testing.T) { importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(err) - err = vm.issueTx(importTx, true /*=local*/) + err = vm.mempool.AddLocalTx(importTx) require.NoError(err) <-issuer @@ -493,8 +514,8 @@ func TestVMConfigDefaults(t *testing.T) { vmConfig.SetDefaults() vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs - assert.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") - assert.NoError(t, vm.Shutdown(context.Background())) + require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") + require.NoError(t, vm.Shutdown(context.Background())) } func TestVMNilConfig(t *testing.T) { @@ -503,27 +524,27 @@ func TestVMNilConfig(t *testing.T) { // VM Config should match defaults if no config is passed in var vmConfig Config vmConfig.SetDefaults() - assert.Equal(t, vmConfig, vm.config, "VM Config should match default config") - assert.NoError(t, vm.Shutdown(context.Background())) + require.Equal(t, vmConfig, vm.config, "VM Config should match default config") + require.NoError(t, vm.Shutdown(context.Background())) } -func TestVMContinuosProfiler(t *testing.T) { +func TestVMContinuousProfiler(t *testing.T) { profilerDir := t.TempDir() profilerFrequency := 500 * time.Millisecond configJSON := fmt.Sprintf("{\"continuous-profiler-dir\": %q,\"continuous-profiler-frequency\": \"500ms\"}", profilerDir) _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - assert.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") - assert.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") + require.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") + require.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") // Sleep for twice the frequency of the profiler to give it time // to generate the first profile. time.Sleep(2 * time.Second) - assert.NoError(t, vm.Shutdown(context.Background())) + require.NoError(t, vm.Shutdown(context.Background())) // Check that the first profile was generated expectedFileName := filepath.Join(profilerDir, "cpu.profile.1") _, err := os.Stat(expectedFileName) - assert.NoError(t, err, "Expected continuous profiler to generate the first CPU profile at %s", expectedFileName) + require.NoError(t, err, "Expected continuous profiler to generate the first CPU profile at %s", expectedFileName) } func TestVMUpgrades(t *testing.T) { @@ -587,6 +608,11 @@ func TestVMUpgrades(t *testing.T) { genesis: genesisJSONCortina, expectedGasPrice: big.NewInt(0), }, + { + name: "Durango", + genesis: genesisJSONDurango, + expectedGasPrice: big.NewInt(0), + }, } for _, test := range genesisTests { t.Run(test.name, func(t *testing.T) { @@ -658,7 +684,7 @@ func TestImportMissingUTXOs(t *testing.T) { importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(t, err) - err = vm.issueTx(importTx, true /*=local*/) + err = vm.mempool.AddLocalTx(importTx) require.NoError(t, err) <-issuer blk, err := vm.BuildBlock(context.Background()) @@ -701,7 +727,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -737,13 +763,30 @@ func TestIssueAtomicTxs(t *testing.T) { } else if lastAcceptedID != blk.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) } + vm.blockChain.DrainAcceptorQueue() + filterAPI := filters.NewFilterAPI(filters.NewFilterSystem(vm.eth.APIBackend, filters.Config{ + Timeout: 5 * time.Minute, + })) + blockHash := common.Hash(blk.ID()) + logs, err := filterAPI.GetLogs(context.Background(), filters.FilterCriteria{ + BlockHash: &blockHash, + }) + if err != nil { + t.Fatal(err) + } + if len(logs) != 0 { + t.Fatalf("Expected log length to be 0, but found %d", len(logs)) + } + if logs == nil { + t.Fatal("Expected logs to be non-nil") + } exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } - if err := vm.issueTx(exportTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(exportTx); err != nil { t.Fatal(err) } @@ -810,7 +853,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -931,7 +974,7 @@ func TestBuildEthTxBlock(t *testing.T) { []byte(""), []byte("{\"pruning-enabled\":true}"), issuer, - []*engCommon.Fx{}, + []*commonEng.Fx{}, nil, ); err != nil { t.Fatal(err) @@ -985,7 +1028,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } for i, tx := range importTxs[:2] { - if err := vm.issueTx(tx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(tx); err != nil { t.Fatal(err) } @@ -1019,7 +1062,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { // the VM returns an error when it attempts to issue the conflict into the mempool // and when it attempts to build a block with the conflict force added to the mempool. for i, tx := range conflictTxs[:2] { - if err := vm.issueTx(tx, true /*=local*/); err == nil { + if err := vm.mempool.AddLocalTx(tx); err == nil { t.Fatal("Expected issueTx to fail due to conflicting transaction") } // Force issue transaction directly to the mempool @@ -1041,7 +1084,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { // Generate one more valid block so that we can copy the header to create an invalid block // with modified extra data. This new block will be invalid for more than one reason (invalid merkle root) // so we check to make sure that the expected error is returned from block verification. - if err := vm.issueTx(importTxs[2], true); err != nil { + if err := vm.mempool.AddLocalTx(importTxs[2]); err != nil { t.Fatal(err) } <-issuer @@ -1070,7 +1113,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - conflictingAtomicTxBlock := types.NewBlock( + conflictingAtomicTxBlock := types.NewBlockWithExtData( types.CopyHeader(validEthBlock.Header()), nil, nil, @@ -1106,7 +1149,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { header := types.CopyHeader(validEthBlock.Header()) header.ExtDataGasUsed.Mul(common.Big2, header.ExtDataGasUsed) - internalConflictBlock := types.NewBlock( + internalConflictBlock := types.NewBlockWithExtData( header, nil, nil, @@ -1149,10 +1192,10 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(tx1, true); err != nil { + if err := vm.mempool.AddLocalTx(tx1); err != nil { t.Fatal(err) } - if err := vm.issueTx(tx2, true); err != nil { + if err := vm.mempool.AddLocalTx(tx2); err != nil { t.Fatal(err) } @@ -1176,10 +1219,10 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(tx1, true); err != nil { + if err := vm.mempool.AddLocalTx(tx1); err != nil { t.Fatal(err) } - if err := vm.issueTx(tx2, true); err != nil { + if err := vm.mempool.AddLocalTx(tx2); err != nil { t.Fatal(err) } @@ -1209,15 +1252,15 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.issueTx(importTx1, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx1); err != nil { t.Fatal(err) } - if err := vm.issueTx(importTx2, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx2); err != nil { t.Fatal(err) } - if err := vm.issueTx(reissuanceTx1, true /*=local*/); !errors.Is(err, errConflictingAtomicTx) { + if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, errConflictingAtomicTx) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicTx, err) } @@ -1229,7 +1272,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.issueTx(reissuanceTx2, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(reissuanceTx2); err != nil { t.Fatal(err) } @@ -1309,7 +1352,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -1558,7 +1601,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx0A, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx0A); err != nil { t.Fatalf("Failed to issue importTx0A: %s", err) } @@ -1616,7 +1659,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { t.Fatalf("Failed to issue importTx1 due to: %s", err) } - if err := vm.issueTx(importTx1, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx1); err != nil { t.Fatal(err) } @@ -1635,7 +1678,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx0B, true /*=local*/); err == nil { + if err := vm.mempool.AddLocalTx(importTx0B); err == nil { t.Fatalf("Should not have been able to issue import tx with conflict") } // Force issue transaction directly into the mempool @@ -1695,7 +1738,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -1788,7 +1831,7 @@ func TestReorgProtection(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -1970,7 +2013,7 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2064,7 +2107,7 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatal(err) } - vm1.blockChain.GetVMConfig().AllowUnfinalizedQueries = true + vm1.eth.APIBackend.SetAllowUnfinalizedQueries(true) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() @@ -2145,7 +2188,7 @@ func TestStickyPreference(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2239,7 +2282,7 @@ func TestStickyPreference(t *testing.T) { t.Fatal(err) } - vm1.blockChain.GetVMConfig().AllowUnfinalizedQueries = true + vm1.eth.APIBackend.SetAllowUnfinalizedQueries(true) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() @@ -2418,7 +2461,7 @@ func TestUncleBlock(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2558,12 +2601,12 @@ func TestUncleBlock(t *testing.T) { uncleBlockHeader := types.CopyHeader(blkDEthBlock.Header()) uncleBlockHeader.UncleHash = types.CalcUncleHash(uncles) - uncleEthBlock := types.NewBlock( + uncleEthBlock := types.NewBlockWithExtData( uncleBlockHeader, blkDEthBlock.Transactions(), uncles, nil, - new(trie.Trie), + trie.NewStackTrie(nil), blkDEthBlock.ExtData(), false, ) @@ -2601,7 +2644,7 @@ func TestEmptyBlock(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2615,7 +2658,7 @@ func TestEmptyBlock(t *testing.T) { // Create empty block from blkA ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - emptyEthBlock := types.NewBlock( + emptyEthBlock := types.NewBlockWithExtData( types.CopyHeader(ethBlock.Header()), nil, nil, @@ -2682,7 +2725,7 @@ func TestAcceptReorg(t *testing.T) { t.Fatal(err) } - if err := vm1.issueTx(importTx, true /*=local*/); err != nil { + if err := vm1.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2877,7 +2920,7 @@ func TestFutureBlock(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -2896,7 +2939,7 @@ func TestFutureBlock(t *testing.T) { // Set the modified time to exceed the allowed future time modifiedTime := modifiedHeader.Time + uint64(maxFutureBlockTime.Seconds()+1) modifiedHeader.Time = modifiedTime - modifiedBlock := types.NewBlock( + modifiedBlock := types.NewBlockWithExtData( modifiedHeader, nil, nil, @@ -2942,7 +2985,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -3058,7 +3101,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -3084,7 +3127,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { blkHeight := blk.Height() blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - vm.blockChain.GetVMConfig().AllowUnfinalizedQueries = true + vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) ctx := context.Background() b, err := vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) @@ -3095,7 +3138,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } - vm.blockChain.GetVMConfig().AllowUnfinalizedQueries = false + vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) _, err = vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if !errors.Is(err, eth.ErrUnfinalizedData) { @@ -3136,7 +3179,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -3238,7 +3281,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { exportTxs := createExportTxOptions(t, vm, issuer, sharedMemory) exportTx1, exportTx2 := exportTxs[0], exportTxs[1] - if err := vm.issueTx(exportTx1, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(exportTx1); err != nil { t.Fatal(err) } <-issuer @@ -3254,7 +3297,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(exportTx2, true /*=local*/); err == nil { + if err := vm.mempool.AddLocalTx(exportTx2); err == nil { t.Fatal("Should have failed to issue due to an invalid export tx") } @@ -3317,11 +3360,11 @@ func TestBuildInvalidBlockHead(t *testing.T) { // Verify that the transaction fails verification when attempting to issue // it into the atomic mempool. - if err := vm.issueTx(tx, true /*=local*/); err == nil { + if err := vm.mempool.AddLocalTx(tx); err == nil { t.Fatal("Should have failed to issue invalid transaction") } // Force issue the transaction directly to the mempool - if err := vm.mempool.AddTx(tx); err != nil { + if err := vm.mempool.ForceAddTx(tx); err != nil { t.Fatal(err) } @@ -3364,7 +3407,7 @@ func TestConfigureLogLevel(t *testing.T) { t.Run(test.name, func(t *testing.T) { vm := &VM{} ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) - appSender := &engCommon.SenderTest{T: t} + appSender := &commonEng.SenderTest{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } err := vm.Initialize( @@ -3375,7 +3418,7 @@ func TestConfigureLogLevel(t *testing.T) { []byte(""), []byte(test.logConfig), issuer, - []*engCommon.Fx{}, + []*commonEng.Fx{}, appSender, ) if len(test.expectedErr) == 0 && err != nil { @@ -3465,7 +3508,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -3647,7 +3690,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } @@ -3787,7 +3830,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { importTxs := createImportTxOptions(t, vm, sharedMemory) // Issue the first import transaction, build, and accept the block. - if err := vm.issueTx(importTxs[0], true); err != nil { + if err := vm.mempool.AddLocalTx(importTxs[0]); err != nil { t.Fatal(err) } @@ -3853,7 +3896,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } conflictSets[index].Add(importTx.ID()) @@ -3861,7 +3904,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.issueTx(conflictTx, true /*=local*/); err == nil { + if err := vm.mempool.AddLocalTx(conflictTx); err == nil { t.Fatal("should conflict with the utxoSet in the mempool") } // force add the tx @@ -3922,7 +3965,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm.issueTx(importTx, true); err != nil { + if err := vm.mempool.AddLocalTx(importTx); err != nil { t.Fatal(err) } } @@ -3981,7 +4024,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { if err != nil { t.Fatal(err) } - if err := vm1.issueTx(importTx, true); err != nil { + if err := vm1.mempool.ForceAddTx(importTx); err != nil { t.Fatal(err) } @@ -4002,7 +4045,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } // Construct the new block with the extra data in the new format (slice of atomic transactions). - ethBlk2 := types.NewBlock( + ethBlk2 := types.NewBlockWithExtData( types.CopyHeader(validEthBlock.Header()), nil, nil, @@ -4023,13 +4066,6 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } } -func TestGetAtomicRepositoryRepairHeights(t *testing.T) { - mainnetHeights := getAtomicRepositoryRepairHeights(bonusBlockMainnetHeights, canonicalBlockMainnetHeights) - assert.Len(t, mainnetHeights, 76) - sorted := sort.SliceIsSorted(mainnetHeights, func(i, j int) bool { return mainnetHeights[i] < mainnetHeights[j] }) - assert.True(t, sorted) -} - func TestSkipChainConfigCheckCompatible(t *testing.T) { // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. metrics.Enabled = false @@ -4045,7 +4081,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // accept one block to test the SkipUpgradeCheck functionality. importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(t, err) - require.NoError(t, vm.issueTx(importTx, true /*=local*/)) + require.NoError(t, vm.mempool.AddLocalTx(importTx)) <-issuer blk, err := vm.BuildBlock(context.Background()) @@ -4059,17 +4095,17 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // is hardcoded to be allowed in core/genesis.go. genesisWithUpgrade := &core.Genesis{} require.NoError(t, json.Unmarshal([]byte(genesisJSONApricotPhase1), genesisWithUpgrade)) - genesisWithUpgrade.Config.ApricotPhase2BlockTimestamp = big.NewInt(blk.Timestamp().Unix()) + genesisWithUpgrade.Config.ApricotPhase2BlockTimestamp = utils.TimeToNewUint64(blk.Timestamp()) genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) require.NoError(t, err) // this will not be allowed - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*engCommon.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) require.ErrorContains(t, err, "mismatching ApricotPhase2 fork block timestamp in database") // try again with skip-upgrade-check config := []byte("{\"skip-upgrade-check\": true}") - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*engCommon.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*commonEng.Fx{}, appSender) require.NoError(t, err) require.NoError(t, reinitVM.Shutdown(context.Background())) } diff --git a/coreth/plugin/evm/vm_warp_test.go b/coreth/plugin/evm/vm_warp_test.go new file mode 100644 index 00000000..40dbf9a2 --- /dev/null +++ b/coreth/plugin/evm/vm_warp_test.go @@ -0,0 +1,679 @@ +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "encoding/json" + "errors" + "math/big" + "testing" + "time" + + _ "embed" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/chain" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/eth/tracers" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/contracts/warp" + "github.com/ava-labs/coreth/predicate" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +var ( + //go:embed ExampleWarp.bin + exampleWarpBin string + //go:embed ExampleWarp.abi + exampleWarpABI string +) + +func TestSendWarpMessage(t *testing.T) { + require := require.New(t) + issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() + + acceptedLogsChan := make(chan []*types.Log, 10) + logsSub := vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) + defer logsSub.Unsubscribe() + + payloadData := utils.RandomBytes(100) + + warpSendMessageInput, err := warp.PackSendWarpMessage(payloadData) + require.NoError(err) + addressedPayload, err := payload.NewAddressedCall( + testEthAddrs[0].Bytes(), + payloadData, + ) + require.NoError(err) + expectedUnsignedMessage, err := avalancheWarp.NewUnsignedMessage( + vm.ctx.NetworkID, + vm.ctx.ChainID, + addressedPayload.Bytes(), + ) + require.NoError(err) + + // Submit a transaction to trigger sending a warp message + tx0 := types.NewTransaction(uint64(0), warp.ContractAddress, big.NewInt(1), 100_000, big.NewInt(params.LaunchMinGasPrice), warpSendMessageInput) + signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) + require.NoError(err) + + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + require.NoError(errs[0]) + + <-issuer + blk, err := vm.BuildBlock(context.Background()) + require.NoError(err) + + require.NoError(blk.Verify(context.Background())) + + require.Equal(choices.Processing, blk.Status()) + + // Verify that the constructed block contains the expected log with an unsigned warp message in the log data + ethBlock1 := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock + require.Len(ethBlock1.Transactions(), 1) + receipts := rawdb.ReadReceipts(vm.chaindb, ethBlock1.Hash(), ethBlock1.NumberU64(), ethBlock1.Time(), vm.chainConfig) + require.Len(receipts, 1) + + require.Len(receipts[0].Logs, 1) + expectedTopics := []common.Hash{ + warp.WarpABI.Events["SendWarpMessage"].ID, + testEthAddrs[0].Hash(), + common.Hash(expectedUnsignedMessage.ID()), + } + require.Equal(expectedTopics, receipts[0].Logs[0].Topics) + logData := receipts[0].Logs[0].Data + unsignedMessage, err := warp.UnpackSendWarpEventDataToMessage(logData) + require.NoError(err) + unsignedMessageID := unsignedMessage.ID() + + // Verify the signature cannot be fetched before the block is accepted + _, err = vm.warpBackend.GetMessageSignature(unsignedMessageID) + require.Error(err) + _, err = vm.warpBackend.GetBlockSignature(blk.ID()) + require.Error(err) + + require.NoError(vm.SetPreference(context.Background(), blk.ID())) + require.NoError(blk.Accept(context.Background())) + vm.blockChain.DrainAcceptorQueue() + + // Verify the message signature after accepting the block. + rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(unsignedMessageID) + require.NoError(err) + blsSignature, err := bls.SignatureFromBytes(rawSignatureBytes[:]) + require.NoError(err) + + select { + case acceptedLogs := <-acceptedLogsChan: + require.Len(acceptedLogs, 1, "unexpected length of accepted logs") + require.Equal(acceptedLogs[0], receipts[0].Logs[0]) + case <-time.After(time.Second): + require.Fail("Failed to read accepted logs from subscription") + } + + // Verify the produced message signature is valid + require.True(bls.Verify(vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) + + // Verify the blockID will now be signed by the backend and produces a valid signature. + rawSignatureBytes, err = vm.warpBackend.GetBlockSignature(blk.ID()) + require.NoError(err) + blsSignature, err = bls.SignatureFromBytes(rawSignatureBytes[:]) + require.NoError(err) + + blockHashPayload, err := payload.NewHash(blk.ID()) + require.NoError(err) + unsignedMessage, err = avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, blockHashPayload.Bytes()) + require.NoError(err) + + // Verify the produced message signature is valid + require.True(bls.Verify(vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) +} + +func TestValidateWarpMessage(t *testing.T) { + require := require.New(t) + sourceChainID := ids.GenerateTestID() + sourceAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2") + payloadData := []byte{1, 2, 3} + addressedPayload, err := payload.NewAddressedCall( + sourceAddress.Bytes(), + payloadData, + ) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(testNetworkID, sourceChainID, addressedPayload.Bytes()) + require.NoError(err) + + exampleWarpABI := contract.ParseABI(exampleWarpABI) + exampleWarpPayload, err := exampleWarpABI.Pack( + "validateWarpMessage", + uint32(0), + sourceChainID, + sourceAddress, + payloadData, + ) + require.NoError(err) + + testWarpVMTransaction(t, unsignedMessage, true, exampleWarpPayload) +} + +func TestValidateInvalidWarpMessage(t *testing.T) { + require := require.New(t) + sourceChainID := ids.GenerateTestID() + sourceAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2") + payloadData := []byte{1, 2, 3} + addressedPayload, err := payload.NewAddressedCall( + sourceAddress.Bytes(), + payloadData, + ) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(testNetworkID, sourceChainID, addressedPayload.Bytes()) + require.NoError(err) + + exampleWarpABI := contract.ParseABI(exampleWarpABI) + exampleWarpPayload, err := exampleWarpABI.Pack( + "validateInvalidWarpMessage", + uint32(0), + ) + require.NoError(err) + + testWarpVMTransaction(t, unsignedMessage, false, exampleWarpPayload) +} + +func TestValidateWarpBlockHash(t *testing.T) { + require := require.New(t) + sourceChainID := ids.GenerateTestID() + blockHash := ids.GenerateTestID() + blockHashPayload, err := payload.NewHash(blockHash) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(testNetworkID, sourceChainID, blockHashPayload.Bytes()) + require.NoError(err) + + exampleWarpABI := contract.ParseABI(exampleWarpABI) + exampleWarpPayload, err := exampleWarpABI.Pack( + "validateWarpBlockHash", + uint32(0), + sourceChainID, + blockHash, + ) + require.NoError(err) + + testWarpVMTransaction(t, unsignedMessage, true, exampleWarpPayload) +} + +func TestValidateInvalidWarpBlockHash(t *testing.T) { + require := require.New(t) + sourceChainID := ids.GenerateTestID() + blockHash := ids.GenerateTestID() + blockHashPayload, err := payload.NewHash(blockHash) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(testNetworkID, sourceChainID, blockHashPayload.Bytes()) + require.NoError(err) + + exampleWarpABI := contract.ParseABI(exampleWarpABI) + exampleWarpPayload, err := exampleWarpABI.Pack( + "validateInvalidWarpBlockHash", + uint32(0), + ) + require.NoError(err) + + testWarpVMTransaction(t, unsignedMessage, false, exampleWarpPayload) +} + +func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.UnsignedMessage, validSignature bool, txPayload []byte) { + require := require.New(t) + issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() + + acceptedLogsChan := make(chan []*types.Log, 10) + logsSub := vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) + defer logsSub.Unsubscribe() + + nodeID1 := ids.GenerateTestNodeID() + blsSecretKey1, err := bls.NewSecretKey() + require.NoError(err) + blsPublicKey1 := bls.PublicFromSecretKey(blsSecretKey1) + blsSignature1 := bls.Sign(blsSecretKey1, unsignedMessage.Bytes()) + + nodeID2 := ids.GenerateTestNodeID() + blsSecretKey2, err := bls.NewSecretKey() + require.NoError(err) + blsPublicKey2 := bls.PublicFromSecretKey(blsSecretKey2) + blsSignature2 := bls.Sign(blsSecretKey2, unsignedMessage.Bytes()) + + blsAggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + require.NoError(err) + + minimumValidPChainHeight := uint64(10) + getValidatorSetTestErr := errors.New("can't get validator set test error") + + vm.ctx.ValidatorState = &validators.TestState{ + // TODO: test both Primary Network / C-Chain and non-Primary Network + GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { + return ids.Empty, nil + }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + if height < minimumValidPChainHeight { + return nil, getValidatorSetTestErr + } + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: { + NodeID: nodeID1, + PublicKey: blsPublicKey1, + Weight: 50, + }, + nodeID2: { + NodeID: nodeID2, + PublicKey: blsPublicKey2, + Weight: 50, + }, + }, nil + }, + } + + signersBitSet := set.NewBits() + signersBitSet.Add(0) + signersBitSet.Add(1) + + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: signersBitSet.Bytes(), + } + + blsAggregatedSignatureBytes := bls.SignatureToBytes(blsAggregatedSignature) + copy(warpSignature.Signature[:], blsAggregatedSignatureBytes) + + signedMessage, err := avalancheWarp.NewMessage( + unsignedMessage, + warpSignature, + ) + require.NoError(err) + + createTx, err := types.SignTx( + types.NewContractCreation(0, common.Big0, 7_000_000, big.NewInt(225*params.GWei), common.Hex2Bytes(exampleWarpBin)), + types.LatestSignerForChainID(vm.chainConfig.ChainID), + testKeys[0].ToECDSA(), + ) + require.NoError(err) + exampleWarpAddress := crypto.CreateAddress(testEthAddrs[0], 0) + + tx, err := types.SignTx( + predicate.NewPredicateTx( + vm.chainConfig.ChainID, + 1, + &exampleWarpAddress, + 1_000_000, + big.NewInt(225*params.GWei), + big.NewInt(params.GWei), + common.Big0, + txPayload, + types.AccessList{}, + warp.ContractAddress, + signedMessage.Bytes(), + ), + types.LatestSignerForChainID(vm.chainConfig.ChainID), + testKeys[0].ToECDSA(), + ) + require.NoError(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{createTx, tx}) + for i, err := range errs { + require.NoError(err, "failed to add tx at index %d", i) + } + + // If [validSignature] set the signature to be considered valid at the verified height. + blockCtx := &block.Context{ + PChainHeight: minimumValidPChainHeight - 1, + } + if validSignature { + blockCtx.PChainHeight = minimumValidPChainHeight + } + vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + <-issuer + + warpBlock, err := vm.BuildBlockWithContext(context.Background(), blockCtx) + require.NoError(err) + + warpBlockVerifyWithCtx, ok := warpBlock.(block.WithVerifyContext) + require.True(ok) + shouldVerifyWithCtx, err := warpBlockVerifyWithCtx.ShouldVerifyWithContext(context.Background()) + require.NoError(err) + require.True(shouldVerifyWithCtx) + require.NoError(warpBlockVerifyWithCtx.VerifyWithContext(context.Background(), blockCtx)) + require.Equal(choices.Processing, warpBlock.Status()) + require.NoError(vm.SetPreference(context.Background(), warpBlock.ID())) + require.NoError(warpBlock.Accept(context.Background())) + vm.blockChain.DrainAcceptorQueue() + + ethBlock := warpBlock.(*chain.BlockWrapper).Block.(*Block).ethBlock + verifiedMessageReceipts := vm.blockChain.GetReceiptsByHash(ethBlock.Hash()) + require.Len(verifiedMessageReceipts, 2) + for i, receipt := range verifiedMessageReceipts { + require.Equal(types.ReceiptStatusSuccessful, receipt.Status, "index: %d", i) + } + + tracerAPI := tracers.NewAPI(vm.eth.APIBackend) + txTraceResults, err := tracerAPI.TraceBlockByHash(context.Background(), ethBlock.Hash(), nil) + require.NoError(err) + require.Len(txTraceResults, 2) + blockTxTraceResultBytes, err := json.Marshal(txTraceResults[1].Result) + require.NoError(err) + unmarshalResults := make(map[string]interface{}) + require.NoError(json.Unmarshal(blockTxTraceResultBytes, &unmarshalResults)) + require.Equal("", unmarshalResults["returnValue"]) + + txTraceResult, err := tracerAPI.TraceTransaction(context.Background(), tx.Hash(), nil) + require.NoError(err) + txTraceResultBytes, err := json.Marshal(txTraceResult) + require.NoError(err) + require.JSONEq(string(txTraceResultBytes), string(blockTxTraceResultBytes)) +} + +func TestReceiveWarpMessage(t *testing.T) { + require := require.New(t) + issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + + defer func() { + require.NoError(vm.Shutdown(context.Background())) + }() + + acceptedLogsChan := make(chan []*types.Log, 10) + logsSub := vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) + defer logsSub.Unsubscribe() + + payloadData := utils.RandomBytes(100) + + addressedPayload, err := payload.NewAddressedCall( + testEthAddrs[0].Bytes(), + payloadData, + ) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage( + vm.ctx.NetworkID, + vm.ctx.ChainID, + addressedPayload.Bytes(), + ) + require.NoError(err) + + nodeID1 := ids.GenerateTestNodeID() + blsSecretKey1, err := bls.NewSecretKey() + require.NoError(err) + blsPublicKey1 := bls.PublicFromSecretKey(blsSecretKey1) + blsSignature1 := bls.Sign(blsSecretKey1, unsignedMessage.Bytes()) + + nodeID2 := ids.GenerateTestNodeID() + blsSecretKey2, err := bls.NewSecretKey() + require.NoError(err) + blsPublicKey2 := bls.PublicFromSecretKey(blsSecretKey2) + blsSignature2 := bls.Sign(blsSecretKey2, unsignedMessage.Bytes()) + + blsAggregatedSignature, err := bls.AggregateSignatures([]*bls.Signature{blsSignature1, blsSignature2}) + require.NoError(err) + + minimumValidPChainHeight := uint64(10) + getValidatorSetTestErr := errors.New("can't get validator set test error") + + vm.ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { + return ids.Empty, nil + }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + if height < minimumValidPChainHeight { + return nil, getValidatorSetTestErr + } + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: { + NodeID: nodeID1, + PublicKey: blsPublicKey1, + Weight: 50, + }, + nodeID2: { + NodeID: nodeID2, + PublicKey: blsPublicKey2, + Weight: 50, + }, + }, nil + }, + } + + signersBitSet := set.NewBits() + signersBitSet.Add(0) + signersBitSet.Add(1) + + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: signersBitSet.Bytes(), + } + + blsAggregatedSignatureBytes := bls.SignatureToBytes(blsAggregatedSignature) + copy(warpSignature.Signature[:], blsAggregatedSignatureBytes) + + signedMessage, err := avalancheWarp.NewMessage( + unsignedMessage, + warpSignature, + ) + require.NoError(err) + + getWarpMsgInput, err := warp.PackGetVerifiedWarpMessage(0) + require.NoError(err) + getVerifiedWarpMessageTx, err := types.SignTx( + predicate.NewPredicateTx( + vm.chainConfig.ChainID, + 0, + &warp.Module.Address, + 1_000_000, + big.NewInt(225*params.GWei), + big.NewInt(params.GWei), + common.Big0, + getWarpMsgInput, + types.AccessList{}, + warp.ContractAddress, + signedMessage.Bytes(), + ), + types.LatestSignerForChainID(vm.chainConfig.ChainID), + testKeys[0].ToECDSA(), + ) + require.NoError(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{getVerifiedWarpMessageTx}) + for i, err := range errs { + require.NoError(err, "failed to add tx at index %d", i) + } + + // Build, verify, and accept block with valid proposer context. + validProposerCtx := &block.Context{ + PChainHeight: minimumValidPChainHeight, + } + vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + <-issuer + + block2, err := vm.BuildBlockWithContext(context.Background(), validProposerCtx) + require.NoError(err) + + block2VerifyWithCtx, ok := block2.(block.WithVerifyContext) + require.True(ok) + shouldVerifyWithCtx, err := block2VerifyWithCtx.ShouldVerifyWithContext(context.Background()) + require.NoError(err) + require.True(shouldVerifyWithCtx) + require.NoError(block2VerifyWithCtx.VerifyWithContext(context.Background(), validProposerCtx)) + require.Equal(choices.Processing, block2.Status()) + require.NoError(vm.SetPreference(context.Background(), block2.ID())) + + // Verify the block with another valid context with identical predicate results + require.NoError(block2VerifyWithCtx.VerifyWithContext(context.Background(), &block.Context{ + PChainHeight: minimumValidPChainHeight + 1, + })) + require.Equal(choices.Processing, block2.Status()) + + // Verify the block in a different context causing the warp message to fail verification changing + // the expected header predicate results. + require.ErrorIs(block2VerifyWithCtx.VerifyWithContext(context.Background(), &block.Context{ + PChainHeight: minimumValidPChainHeight - 1, + }), errInvalidHeaderPredicateResults) + + // Accept the block after performing multiple VerifyWithContext operations + require.NoError(block2.Accept(context.Background())) + vm.blockChain.DrainAcceptorQueue() + + ethBlock := block2.(*chain.BlockWrapper).Block.(*Block).ethBlock + verifiedMessageReceipts := vm.blockChain.GetReceiptsByHash(ethBlock.Hash()) + require.Len(verifiedMessageReceipts, 1) + verifiedMessageTxReceipt := verifiedMessageReceipts[0] + require.Equal(types.ReceiptStatusSuccessful, verifiedMessageTxReceipt.Status) + + expectedOutput, err := warp.PackGetVerifiedWarpMessageOutput(warp.GetVerifiedWarpMessageOutput{ + Message: warp.WarpMessage{ + SourceChainID: common.Hash(vm.ctx.ChainID), + OriginSenderAddress: testEthAddrs[0], + Payload: payloadData, + }, + Valid: true, + }) + require.NoError(err) + + tracerAPI := tracers.NewAPI(vm.eth.APIBackend) + txTraceResults, err := tracerAPI.TraceBlockByHash(context.Background(), ethBlock.Hash(), nil) + require.NoError(err) + require.Len(txTraceResults, 1) + blockTxTraceResultBytes, err := json.Marshal(txTraceResults[0].Result) + require.NoError(err) + unmarshalResults := make(map[string]interface{}) + require.NoError(json.Unmarshal(blockTxTraceResultBytes, &unmarshalResults)) + require.Equal(common.Bytes2Hex(expectedOutput), unmarshalResults["returnValue"]) + + txTraceResult, err := tracerAPI.TraceTransaction(context.Background(), getVerifiedWarpMessageTx.Hash(), nil) + require.NoError(err) + txTraceResultBytes, err := json.Marshal(txTraceResult) + require.NoError(err) + require.JSONEq(string(txTraceResultBytes), string(blockTxTraceResultBytes)) +} + +func TestMessageSignatureRequestsToVM(t *testing.T) { + _, vm, _, _, appSender := GenesisVM(t, true, genesisJSONDurango, "", "") + + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(t, err) + }() + + // Generate a new warp unsigned message and add to warp backend + warpMessage, err := avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, []byte{1, 2, 3}) + require.NoError(t, err) + + // Add the known message and get its signature to confirm. + err = vm.warpBackend.AddMessage(warpMessage) + require.NoError(t, err) + signature, err := vm.warpBackend.GetMessageSignature(warpMessage.ID()) + require.NoError(t, err) + + tests := map[string]struct { + messageID ids.ID + expectedResponse [bls.SignatureLen]byte + }{ + "known": { + messageID: warpMessage.ID(), + expectedResponse: signature, + }, + "unknown": { + messageID: ids.GenerateTestID(), + expectedResponse: [bls.SignatureLen]byte{}, + }, + } + + for name, test := range tests { + calledSendAppResponseFn := false + appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + calledSendAppResponseFn = true + var response message.SignatureResponse + _, err := message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Equal(t, test.expectedResponse, response.Signature) + + return nil + } + t.Run(name, func(t *testing.T) { + var signatureRequest message.Request = message.MessageSignatureRequest{ + MessageID: test.messageID, + } + + requestBytes, err := message.Codec.Marshal(message.Version, &signatureRequest) + require.NoError(t, err) + + // Send the app request and make sure we called SendAppResponseFn + deadline := time.Now().Add(60 * time.Second) + err = vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), 1, deadline, requestBytes) + require.NoError(t, err) + require.True(t, calledSendAppResponseFn) + }) + } +} + +func TestBlockSignatureRequestsToVM(t *testing.T) { + _, vm, _, _, appSender := GenesisVM(t, true, genesisJSONDurango, "", "") + + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(t, err) + }() + + lastAcceptedID, err := vm.LastAccepted(context.Background()) + require.NoError(t, err) + + signature, err := vm.warpBackend.GetBlockSignature(lastAcceptedID) + require.NoError(t, err) + + tests := map[string]struct { + blockID ids.ID + expectedResponse [bls.SignatureLen]byte + }{ + "known": { + blockID: lastAcceptedID, + expectedResponse: signature, + }, + "unknown": { + blockID: ids.GenerateTestID(), + expectedResponse: [bls.SignatureLen]byte{}, + }, + } + + for name, test := range tests { + calledSendAppResponseFn := false + appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + calledSendAppResponseFn = true + var response message.SignatureResponse + _, err := message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Equal(t, test.expectedResponse, response.Signature) + + return nil + } + t.Run(name, func(t *testing.T) { + var signatureRequest message.Request = message.BlockSignatureRequest{ + BlockID: test.blockID, + } + + requestBytes, err := message.Codec.Marshal(message.Version, &signatureRequest) + require.NoError(t, err) + + // Send the app request and make sure we called SendAppResponseFn + deadline := time.Now().Add(60 * time.Second) + err = vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), 1, deadline, requestBytes) + require.NoError(t, err) + require.True(t, calledSendAppResponseFn) + }) + } +} diff --git a/coreth/precompile/contract.go b/coreth/precompile/contract.go deleted file mode 100644 index 0d60b32a..00000000 --- a/coreth/precompile/contract.go +++ /dev/null @@ -1,143 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package precompile - -import ( - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -const ( - selectorLen = 4 -) - -type RunStatefulPrecompileFunc func(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) - -// PrecompileAccessibleState defines the interface exposed to stateful precompile contracts -type PrecompileAccessibleState interface { - GetStateDB() StateDB - GetBlockContext() BlockContext - NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasGost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) -} - -// BlockContext defines an interface that provides information to a stateful precompile -// about the block that activates the upgrade. The precompile can access this information -// to initialize its state. -type BlockContext interface { - Number() *big.Int - Timestamp() *big.Int -} - -// ChainContext defines an interface that provides information to a stateful precompile -// about the chain configuration. The precompile can access this information to initialize -// its state. -type ChainConfig interface { - // Note: None of the existing stateful precompiles currently access chain config information - // in Configure so this interface is empty. -} - -// StateDB is the interface for accessing EVM state -type StateDB interface { - GetState(common.Address, common.Hash) common.Hash - SetState(common.Address, common.Hash, common.Hash) - - SetCode(common.Address, []byte) - - SetNonce(common.Address, uint64) - GetNonce(common.Address) uint64 - - GetBalance(common.Address) *big.Int - AddBalance(common.Address, *big.Int) - SubBalance(common.Address, *big.Int) - - SubBalanceMultiCoin(common.Address, common.Hash, *big.Int) - AddBalanceMultiCoin(common.Address, common.Hash, *big.Int) - GetBalanceMultiCoin(common.Address, common.Hash) *big.Int - - CreateAccount(common.Address) - Exist(common.Address) bool -} - -// StatefulPrecompiledContract is the interface for executing a precompiled contract -type StatefulPrecompiledContract interface { - // Run executes the precompiled contract. - Run(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) -} - -// statefulPrecompileFunction defines a function implemented by a stateful precompile -type statefulPrecompileFunction struct { - // selector is the 4 byte function selector for this function - // This should be calculated from the function signature using CalculateFunctionSelector - selector []byte - // execute is performed when this function is selected - execute RunStatefulPrecompileFunc -} - -// newStatefulPrecompileFunction creates a stateful precompile function with the given arguments -//nolint:unused,deadcode -func newStatefulPrecompileFunction(selector []byte, execute RunStatefulPrecompileFunc) *statefulPrecompileFunction { - return &statefulPrecompileFunction{ - selector: selector, - execute: execute, - } -} - -// statefulPrecompileWithFunctionSelectors implements StatefulPrecompiledContract by using 4 byte function selectors to pass -// off responsibilities to internal execution functions. -// Note: because we only ever read from [functions] there no lock is required to make it thread-safe. -type statefulPrecompileWithFunctionSelectors struct { - fallback *statefulPrecompileFunction - functions map[string]*statefulPrecompileFunction -} - -// newStatefulPrecompileWithFunctionSelectors generates new StatefulPrecompile using [functions] as the available functions and [fallback] -// as an optional fallback if there is no input data. Note: the selector of [fallback] will be ignored, so it is required to be left empty. -//nolint:unused,deadcode -func newStatefulPrecompileWithFunctionSelectors(fallback *statefulPrecompileFunction, functions []*statefulPrecompileFunction) StatefulPrecompiledContract { - // Ensure that if a fallback is present, it does not have a mistakenly populated function selector. - if fallback != nil && len(fallback.selector) != 0 { - panic(fmt.Errorf("fallback function cannot specify non-zero length function selector")) - } - - // Construct the contract and populate [functions]. - contract := &statefulPrecompileWithFunctionSelectors{ - fallback: fallback, - functions: make(map[string]*statefulPrecompileFunction), - } - for _, function := range functions { - _, exists := contract.functions[string(function.selector)] - if exists { - panic(fmt.Errorf("cannot create stateful precompile with duplicated function selector: %q", function.selector)) - } - contract.functions[string(function.selector)] = function - } - - return contract -} - -// Run selects the function using the 4 byte function selector at the start of the input and executes the underlying function on the -// given arguments. -func (s *statefulPrecompileWithFunctionSelectors) Run(accessibleState PrecompileAccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - // If there is no input data present, call the fallback function if present. - if len(input) == 0 && s.fallback != nil { - return s.fallback.execute(accessibleState, caller, addr, nil, suppliedGas, readOnly) - } - - // Otherwise, an unexpected input size will result in an error. - if len(input) < selectorLen { - return nil, suppliedGas, fmt.Errorf("missing function selector to precompile - input length (%d)", len(input)) - } - - // Use the function selector to grab the correct function - selector := input[:selectorLen] - functionInput := input[selectorLen:] - function, ok := s.functions[string(selector)] - if !ok { - return nil, suppliedGas, fmt.Errorf("invalid function selector %#x", selector) - } - - return function.execute(accessibleState, caller, addr, functionInput, suppliedGas, readOnly) -} diff --git a/coreth/precompile/contract/contract.go b/coreth/precompile/contract/contract.go new file mode 100644 index 00000000..0be76a89 --- /dev/null +++ b/coreth/precompile/contract/contract.go @@ -0,0 +1,109 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package contract + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +const ( + SelectorLen = 4 +) + +type RunStatefulPrecompileFunc func(accessibleState AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) + +// ActivationFunc defines a function that is used to determine if a function is active +// The return value is whether or not the function is active +type ActivationFunc func(AccessibleState) bool + +// StatefulPrecompileFunction defines a function implemented by a stateful precompile +type StatefulPrecompileFunction struct { + // selector is the 4 byte function selector for this function + selector []byte + // execute is performed when this function is selected + execute RunStatefulPrecompileFunc + // activation is checked before this function is executed + activation ActivationFunc +} + +func (f *StatefulPrecompileFunction) IsActivated(accessibleState AccessibleState) bool { + if f.activation == nil { + return true + } + return f.activation(accessibleState) +} + +// NewStatefulPrecompileFunction creates a stateful precompile function with the given arguments +func NewStatefulPrecompileFunction(selector []byte, execute RunStatefulPrecompileFunc) *StatefulPrecompileFunction { + return &StatefulPrecompileFunction{ + selector: selector, + execute: execute, + } +} + +func NewStatefulPrecompileFunctionWithActivator(selector []byte, execute RunStatefulPrecompileFunc, activation ActivationFunc) *StatefulPrecompileFunction { + return &StatefulPrecompileFunction{ + selector: selector, + execute: execute, + activation: activation, + } +} + +// statefulPrecompileWithFunctionSelectors implements StatefulPrecompiledContract by using 4 byte function selectors to pass +// off responsibilities to internal execution functions. +// Note: because we only ever read from [functions] there no lock is required to make it thread-safe. +type statefulPrecompileWithFunctionSelectors struct { + fallback RunStatefulPrecompileFunc + functions map[string]*StatefulPrecompileFunction +} + +// NewStatefulPrecompileContract generates new StatefulPrecompile using [functions] as the available functions and [fallback] +// as an optional fallback if there is no input data. Note: the selector of [fallback] will be ignored, so it is required to be left empty. +func NewStatefulPrecompileContract(fallback RunStatefulPrecompileFunc, functions []*StatefulPrecompileFunction) (StatefulPrecompiledContract, error) { + // Construct the contract and populate [functions]. + contract := &statefulPrecompileWithFunctionSelectors{ + fallback: fallback, + functions: make(map[string]*StatefulPrecompileFunction), + } + for _, function := range functions { + _, exists := contract.functions[string(function.selector)] + if exists { + return nil, fmt.Errorf("cannot create stateful precompile with duplicated function selector: %q", function.selector) + } + contract.functions[string(function.selector)] = function + } + + return contract, nil +} + +// Run selects the function using the 4 byte function selector at the start of the input and executes the underlying function on the +// given arguments. +func (s *statefulPrecompileWithFunctionSelectors) Run(accessibleState AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + // If there is no input data present, call the fallback function if present. + if len(input) == 0 && s.fallback != nil { + return s.fallback(accessibleState, caller, addr, nil, suppliedGas, readOnly) + } + + // Otherwise, an unexpected input size will result in an error. + if len(input) < SelectorLen { + return nil, suppliedGas, fmt.Errorf("missing function selector to precompile - input length (%d)", len(input)) + } + + // Use the function selector to grab the correct function + selector := input[:SelectorLen] + functionInput := input[SelectorLen:] + function, ok := s.functions[string(selector)] + if !ok { + return nil, suppliedGas, fmt.Errorf("invalid function selector %#x", selector) + } + + // Check if the function is activated + if !function.IsActivated(accessibleState) { + return nil, suppliedGas, fmt.Errorf("invalid non-activated function selector %#x", selector) + } + + return function.execute(accessibleState, caller, addr, functionInput, suppliedGas, readOnly) +} diff --git a/coreth/precompile/contract/interfaces.go b/coreth/precompile/contract/interfaces.go new file mode 100644 index 00000000..266df564 --- /dev/null +++ b/coreth/precompile/contract/interfaces.go @@ -0,0 +1,79 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Defines the interface for the configuration and execution of a precompile contract +package contract + +import ( + "math/big" + + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ethereum/go-ethereum/common" +) + +// StatefulPrecompiledContract is the interface for executing a precompiled contract +type StatefulPrecompiledContract interface { + // Run executes the precompiled contract. + Run(accessibleState AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) +} + +// StateDB is the interface for accessing EVM state +type StateDB interface { + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + SetNonce(common.Address, uint64) + GetNonce(common.Address) uint64 + + GetBalance(common.Address) *big.Int + AddBalance(common.Address, *big.Int) + GetBalanceMultiCoin(common.Address, common.Hash) *big.Int + + CreateAccount(common.Address) + Exist(common.Address) bool + + AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) + GetLogData() (topics [][]common.Hash, data [][]byte) + GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) + SetPredicateStorageSlots(address common.Address, predicates [][]byte) + + GetTxHash() common.Hash + + Suicide(common.Address) bool + + Snapshot() int + RevertToSnapshot(int) +} + +// AccessibleState defines the interface exposed to stateful precompile contracts +type AccessibleState interface { + GetStateDB() StateDB + GetBlockContext() BlockContext + GetSnowContext() *snow.Context + GetChainConfig() precompileconfig.ChainConfig + NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasCost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) +} + +// ConfigurationBlockContext defines the interface required to configure a precompile. +type ConfigurationBlockContext interface { + Number() *big.Int + Timestamp() uint64 +} + +type BlockContext interface { + ConfigurationBlockContext + // GetResults returns an arbitrary byte array result of verifying the predicates + // of the given transaction, precompile address pair. + GetPredicateResults(txHash common.Hash, precompileAddress common.Address) []byte +} + +type Configurator interface { + MakeConfig() precompileconfig.Config + Configure( + chainConfig precompileconfig.ChainConfig, + precompileconfig precompileconfig.Config, + state StateDB, + blockContext ConfigurationBlockContext, + ) error +} diff --git a/coreth/precompile/contract/mocks.go b/coreth/precompile/contract/mocks.go new file mode 100644 index 00000000..f52044a8 --- /dev/null +++ b/coreth/precompile/contract/mocks.go @@ -0,0 +1,429 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/coreth/precompile/contract (interfaces: BlockContext,AccessibleState,StateDB) +// +// Generated by this command: +// +// mockgen -package=contract -destination=precompile/contract/mocks.go github.com/ava-labs/coreth/precompile/contract BlockContext,AccessibleState,StateDB +// + +// Package contract is a generated GoMock package. +package contract + +import ( + big "math/big" + reflect "reflect" + + snow "github.com/ava-labs/avalanchego/snow" + precompileconfig "github.com/ava-labs/coreth/precompile/precompileconfig" + common "github.com/ethereum/go-ethereum/common" + gomock "go.uber.org/mock/gomock" +) + +// MockBlockContext is a mock of BlockContext interface. +type MockBlockContext struct { + ctrl *gomock.Controller + recorder *MockBlockContextMockRecorder +} + +// MockBlockContextMockRecorder is the mock recorder for MockBlockContext. +type MockBlockContextMockRecorder struct { + mock *MockBlockContext +} + +// NewMockBlockContext creates a new mock instance. +func NewMockBlockContext(ctrl *gomock.Controller) *MockBlockContext { + mock := &MockBlockContext{ctrl: ctrl} + mock.recorder = &MockBlockContextMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlockContext) EXPECT() *MockBlockContextMockRecorder { + return m.recorder +} + +// GetPredicateResults mocks base method. +func (m *MockBlockContext) GetPredicateResults(arg0 common.Hash, arg1 common.Address) []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPredicateResults", arg0, arg1) + ret0, _ := ret[0].([]byte) + return ret0 +} + +// GetPredicateResults indicates an expected call of GetPredicateResults. +func (mr *MockBlockContextMockRecorder) GetPredicateResults(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateResults", reflect.TypeOf((*MockBlockContext)(nil).GetPredicateResults), arg0, arg1) +} + +// Number mocks base method. +func (m *MockBlockContext) Number() *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Number") + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// Number indicates an expected call of Number. +func (mr *MockBlockContextMockRecorder) Number() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Number", reflect.TypeOf((*MockBlockContext)(nil).Number)) +} + +// Timestamp mocks base method. +func (m *MockBlockContext) Timestamp() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp. +func (mr *MockBlockContextMockRecorder) Timestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockBlockContext)(nil).Timestamp)) +} + +// MockAccessibleState is a mock of AccessibleState interface. +type MockAccessibleState struct { + ctrl *gomock.Controller + recorder *MockAccessibleStateMockRecorder +} + +// MockAccessibleStateMockRecorder is the mock recorder for MockAccessibleState. +type MockAccessibleStateMockRecorder struct { + mock *MockAccessibleState +} + +// NewMockAccessibleState creates a new mock instance. +func NewMockAccessibleState(ctrl *gomock.Controller) *MockAccessibleState { + mock := &MockAccessibleState{ctrl: ctrl} + mock.recorder = &MockAccessibleStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccessibleState) EXPECT() *MockAccessibleStateMockRecorder { + return m.recorder +} + +// GetBlockContext mocks base method. +func (m *MockAccessibleState) GetBlockContext() BlockContext { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockContext") + ret0, _ := ret[0].(BlockContext) + return ret0 +} + +// GetBlockContext indicates an expected call of GetBlockContext. +func (mr *MockAccessibleStateMockRecorder) GetBlockContext() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockContext", reflect.TypeOf((*MockAccessibleState)(nil).GetBlockContext)) +} + +// GetChainConfig mocks base method. +func (m *MockAccessibleState) GetChainConfig() precompileconfig.ChainConfig { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainConfig") + ret0, _ := ret[0].(precompileconfig.ChainConfig) + return ret0 +} + +// GetChainConfig indicates an expected call of GetChainConfig. +func (mr *MockAccessibleStateMockRecorder) GetChainConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainConfig", reflect.TypeOf((*MockAccessibleState)(nil).GetChainConfig)) +} + +// GetSnowContext mocks base method. +func (m *MockAccessibleState) GetSnowContext() *snow.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSnowContext") + ret0, _ := ret[0].(*snow.Context) + return ret0 +} + +// GetSnowContext indicates an expected call of GetSnowContext. +func (mr *MockAccessibleStateMockRecorder) GetSnowContext() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnowContext", reflect.TypeOf((*MockAccessibleState)(nil).GetSnowContext)) +} + +// GetStateDB mocks base method. +func (m *MockAccessibleState) GetStateDB() StateDB { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStateDB") + ret0, _ := ret[0].(StateDB) + return ret0 +} + +// GetStateDB indicates an expected call of GetStateDB. +func (mr *MockAccessibleStateMockRecorder) GetStateDB() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateDB", reflect.TypeOf((*MockAccessibleState)(nil).GetStateDB)) +} + +// NativeAssetCall mocks base method. +func (m *MockAccessibleState) NativeAssetCall(arg0 common.Address, arg1 []byte, arg2, arg3 uint64, arg4 bool) ([]byte, uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NativeAssetCall", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(uint64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// NativeAssetCall indicates an expected call of NativeAssetCall. +func (mr *MockAccessibleStateMockRecorder) NativeAssetCall(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NativeAssetCall", reflect.TypeOf((*MockAccessibleState)(nil).NativeAssetCall), arg0, arg1, arg2, arg3, arg4) +} + +// MockStateDB is a mock of StateDB interface. +type MockStateDB struct { + ctrl *gomock.Controller + recorder *MockStateDBMockRecorder +} + +// MockStateDBMockRecorder is the mock recorder for MockStateDB. +type MockStateDBMockRecorder struct { + mock *MockStateDB +} + +// NewMockStateDB creates a new mock instance. +func NewMockStateDB(ctrl *gomock.Controller) *MockStateDB { + mock := &MockStateDB{ctrl: ctrl} + mock.recorder = &MockStateDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStateDB) EXPECT() *MockStateDBMockRecorder { + return m.recorder +} + +// AddBalance mocks base method. +func (m *MockStateDB) AddBalance(arg0 common.Address, arg1 *big.Int) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddBalance", arg0, arg1) +} + +// AddBalance indicates an expected call of AddBalance. +func (mr *MockStateDBMockRecorder) AddBalance(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBalance", reflect.TypeOf((*MockStateDB)(nil).AddBalance), arg0, arg1) +} + +// AddLog mocks base method. +func (m *MockStateDB) AddLog(arg0 common.Address, arg1 []common.Hash, arg2 []byte, arg3 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddLog", arg0, arg1, arg2, arg3) +} + +// AddLog indicates an expected call of AddLog. +func (mr *MockStateDBMockRecorder) AddLog(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLog", reflect.TypeOf((*MockStateDB)(nil).AddLog), arg0, arg1, arg2, arg3) +} + +// CreateAccount mocks base method. +func (m *MockStateDB) CreateAccount(arg0 common.Address) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CreateAccount", arg0) +} + +// CreateAccount indicates an expected call of CreateAccount. +func (mr *MockStateDBMockRecorder) CreateAccount(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAccount", reflect.TypeOf((*MockStateDB)(nil).CreateAccount), arg0) +} + +// Exist mocks base method. +func (m *MockStateDB) Exist(arg0 common.Address) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exist", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Exist indicates an expected call of Exist. +func (mr *MockStateDBMockRecorder) Exist(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockStateDB)(nil).Exist), arg0) +} + +// GetBalance mocks base method. +func (m *MockStateDB) GetBalance(arg0 common.Address) *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBalance", arg0) + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// GetBalance indicates an expected call of GetBalance. +func (mr *MockStateDBMockRecorder) GetBalance(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBalance", reflect.TypeOf((*MockStateDB)(nil).GetBalance), arg0) +} + +// GetBalanceMultiCoin mocks base method. +func (m *MockStateDB) GetBalanceMultiCoin(arg0 common.Address, arg1 common.Hash) *big.Int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBalanceMultiCoin", arg0, arg1) + ret0, _ := ret[0].(*big.Int) + return ret0 +} + +// GetBalanceMultiCoin indicates an expected call of GetBalanceMultiCoin. +func (mr *MockStateDBMockRecorder) GetBalanceMultiCoin(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBalanceMultiCoin", reflect.TypeOf((*MockStateDB)(nil).GetBalanceMultiCoin), arg0, arg1) +} + +// GetLogData mocks base method. +func (m *MockStateDB) GetLogData() ([][]common.Hash, [][]byte) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLogData") + ret0, _ := ret[0].([][]common.Hash) + ret1, _ := ret[1].([][]byte) + return ret0, ret1 +} + +// GetLogData indicates an expected call of GetLogData. +func (mr *MockStateDBMockRecorder) GetLogData() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogData", reflect.TypeOf((*MockStateDB)(nil).GetLogData)) +} + +// GetNonce mocks base method. +func (m *MockStateDB) GetNonce(arg0 common.Address) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNonce", arg0) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetNonce indicates an expected call of GetNonce. +func (mr *MockStateDBMockRecorder) GetNonce(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNonce", reflect.TypeOf((*MockStateDB)(nil).GetNonce), arg0) +} + +// GetPredicateStorageSlots mocks base method. +func (m *MockStateDB) GetPredicateStorageSlots(arg0 common.Address, arg1 int) ([]byte, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPredicateStorageSlots", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetPredicateStorageSlots indicates an expected call of GetPredicateStorageSlots. +func (mr *MockStateDBMockRecorder) GetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).GetPredicateStorageSlots), arg0, arg1) +} + +// GetState mocks base method. +func (m *MockStateDB) GetState(arg0 common.Address, arg1 common.Hash) common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetState", arg0, arg1) + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GetState indicates an expected call of GetState. +func (mr *MockStateDBMockRecorder) GetState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockStateDB)(nil).GetState), arg0, arg1) +} + +// GetTxHash mocks base method. +func (m *MockStateDB) GetTxHash() common.Hash { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTxHash") + ret0, _ := ret[0].(common.Hash) + return ret0 +} + +// GetTxHash indicates an expected call of GetTxHash. +func (mr *MockStateDBMockRecorder) GetTxHash() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTxHash", reflect.TypeOf((*MockStateDB)(nil).GetTxHash)) +} + +// RevertToSnapshot mocks base method. +func (m *MockStateDB) RevertToSnapshot(arg0 int) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RevertToSnapshot", arg0) +} + +// RevertToSnapshot indicates an expected call of RevertToSnapshot. +func (mr *MockStateDBMockRecorder) RevertToSnapshot(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RevertToSnapshot", reflect.TypeOf((*MockStateDB)(nil).RevertToSnapshot), arg0) +} + +// SetNonce mocks base method. +func (m *MockStateDB) SetNonce(arg0 common.Address, arg1 uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetNonce", arg0, arg1) +} + +// SetNonce indicates an expected call of SetNonce. +func (mr *MockStateDBMockRecorder) SetNonce(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNonce", reflect.TypeOf((*MockStateDB)(nil).SetNonce), arg0, arg1) +} + +// SetPredicateStorageSlots mocks base method. +func (m *MockStateDB) SetPredicateStorageSlots(arg0 common.Address, arg1 [][]byte) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetPredicateStorageSlots", arg0, arg1) +} + +// SetPredicateStorageSlots indicates an expected call of SetPredicateStorageSlots. +func (mr *MockStateDBMockRecorder) SetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).SetPredicateStorageSlots), arg0, arg1) +} + +// SetState mocks base method. +func (m *MockStateDB) SetState(arg0 common.Address, arg1, arg2 common.Hash) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetState", arg0, arg1, arg2) +} + +// SetState indicates an expected call of SetState. +func (mr *MockStateDBMockRecorder) SetState(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockStateDB)(nil).SetState), arg0, arg1, arg2) +} + +// Snapshot mocks base method. +func (m *MockStateDB) Snapshot() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Snapshot") + ret0, _ := ret[0].(int) + return ret0 +} + +// Snapshot indicates an expected call of Snapshot. +func (mr *MockStateDBMockRecorder) Snapshot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockStateDB)(nil).Snapshot)) +} + +// Suicide mocks base method. +func (m *MockStateDB) Suicide(arg0 common.Address) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Suicide", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Suicide indicates an expected call of Suicide. +func (mr *MockStateDBMockRecorder) Suicide(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Suicide", reflect.TypeOf((*MockStateDB)(nil).Suicide), arg0) +} diff --git a/coreth/precompile/contract/utils.go b/coreth/precompile/contract/utils.go new file mode 100644 index 00000000..6b8ed7c8 --- /dev/null +++ b/coreth/precompile/contract/utils.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package contract + +import ( + "fmt" + "regexp" + "strings" + + "github.com/ava-labs/coreth/accounts/abi" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/crypto" +) + +// Gas costs for stateful precompiles +const ( + WriteGasCostPerSlot = 20_000 + ReadGasCostPerSlot = 5_000 + + // Per LOG operation. + LogGas uint64 = 375 // from params/protocol_params.go + // Gas cost of single topic of the LOG. Should be multiplied by the number of topics. + LogTopicGas uint64 = 375 // from params/protocol_params.go + // Per byte cost in a LOG operation's data. Should be multiplied by the byte size of the data. + LogDataGas uint64 = 8 // from params/protocol_params.go +) + +var functionSignatureRegex = regexp.MustCompile(`\w+\((\w*|(\w+,)+\w+)\)`) + +// CalculateFunctionSelector returns the 4 byte function selector that results from [functionSignature] +// Ex. the function setBalance(addr address, balance uint256) should be passed in as the string: +// "setBalance(address,uint256)" +// TODO: remove this after moving to ABI based function selectors. +func CalculateFunctionSelector(functionSignature string) []byte { + if !functionSignatureRegex.MatchString(functionSignature) { + panic(fmt.Errorf("invalid function signature: %q", functionSignature)) + } + hash := crypto.Keccak256([]byte(functionSignature)) + return hash[:4] +} + +// DeductGas checks if [suppliedGas] is sufficient against [requiredGas] and deducts [requiredGas] from [suppliedGas]. +func DeductGas(suppliedGas uint64, requiredGas uint64) (uint64, error) { + if suppliedGas < requiredGas { + return 0, vmerrs.ErrOutOfGas + } + return suppliedGas - requiredGas, nil +} + +// ParseABI parses the given ABI string and returns the parsed ABI. +// If the ABI is invalid, it panics. +func ParseABI(rawABI string) abi.ABI { + parsed, err := abi.JSON(strings.NewReader(rawABI)) + if err != nil { + panic(err) + } + + return parsed +} diff --git a/coreth/precompile/utils_test.go b/coreth/precompile/contract/utils_test.go similarity index 88% rename from coreth/precompile/utils_test.go rename to coreth/precompile/contract/utils_test.go index 8d9f6c9e..6220af95 100644 --- a/coreth/precompile/utils_test.go +++ b/coreth/precompile/contract/utils_test.go @@ -1,4 +1,7 @@ -package precompile +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package contract import ( "testing" diff --git a/coreth/precompile/contracts/warp/README.md b/coreth/precompile/contracts/warp/README.md new file mode 100644 index 00000000..10e1daaa --- /dev/null +++ b/coreth/precompile/contracts/warp/README.md @@ -0,0 +1,151 @@ +# Integrating Avalanche Warp Messaging into the EVM + +Avalanche Warp Messaging offers a basic primitive to enable Cross-Subnet communication on the Avalanche Network. + +It is intended to allow communication between arbitrary Custom Virtual Machines (including, but not limited to Subnet-EVM and Coreth). + +## How does Avalanche Warp Messaging Work? + +Avalanche Warp Messaging uses BLS Multi-Signatures with Public-Key Aggregation where every Avalanche validator registers a public key alongside its NodeID on the Avalanche P-Chain. + +Every node tracking a Subnet has read access to the Avalanche P-Chain. This provides weighted sets of BLS Public Keys that correspond to the validator sets of each Subnet on the Avalanche Network. Avalanche Warp Messaging provides a basic primitive for signing and verifying messages between Subnets: the receiving network can verify whether an aggregation of signatures from a set of source Subnet validators represents a threshold of stake large enough for the receiving network to process the message. + +For more details on Avalanche Warp Messaging, see the AvalancheGo [Warp README](https://docs.avax.network/build/cross-chain/awm/deep-dive). + +### Flow of Sending / Receiving a Warp Message within the EVM + +The Avalanche Warp Precompile enables this flow to send a message from blockchain A to blockchain B: + +1. Call the Warp Precompile `sendWarpMessage` function with the arguments for the `UnsignedMessage` +2. Warp Precompile emits an event / log containing the `UnsignedMessage` specified by the caller of `sendWarpMessage` +3. Network accepts the block containing the `UnsignedMessage` in the log, so that validators are willing to sign the message +4. An off-chain relayer queries the validators for their signatures of the message and aggregates the signatures to create a `SignedMessage` +5. The off-chain relayer encodes the `SignedMessage` as the [predicate](#predicate-encoding) in the AccessList of a transaction to deliver on blockchain B +6. The transaction is delivered on blockchain B, the signature is verified prior to executing the block, and the message is accessible via the Warp Precompile's `getVerifiedWarpMessage` during the execution of that transaction + +### Warp Precompile + +The Warp Precompile is broken down into three functions defined in the Solidity interface file [here](../../../contracts/contracts/interfaces/IWarpMessenger.sol). + +#### sendWarpMessage + +`sendWarpMessage` is used to send a verifiable message. Calling this function results in sending a message with the following contents: + +- `SourceChainID` - blockchainID of the sourceChain on the Avalanche P-Chain +- `SourceAddress` - `msg.sender` encoded as a 32 byte value that calls `sendWarpMessage` +- `Payload` - `payload` argument specified in the call to `sendWarpMessage` emitted as the unindexed data of the resulting log + +Calling this function will issue a `SendWarpMessage` event from the Warp Precompile. Since the EVM limits the number of topics to 4 including the EventID, this message includes only the topics that would be expected to help filter messages emitted from the Warp Precompile the most. + +Specifically, the `payload` is not emitted as a topic because each topic must be encoded as a hash. Therefore, we opt to take advantage of each possible topic to maximize the possible filtering for emitted Warp Messages. + +Additionally, the `SourceChainID` is excluded because anyone parsing the chain can be expected to already know the blockchainID. Therefore, the `SendWarpMessage` event includes the indexable attributes: + +- `sender` +- The `messageID` of the unsigned message (sha256 of the unsigned message) + +The actual `message` is the entire [Avalanche Warp Unsigned Message](https://github.com/ava-labs/avalanchego/blob/master/vms/platformvm/warp/unsigned_message.go#L14) including an [AddressedCall](https://github.com/ava-labs/avalanchego/tree/master/vms/platformvm/warp/payload#readme). The unsigned message is emitted as the unindexed data in the log. + +#### getVerifiedMessage + +`getVerifiedMessage` is used to read the contents of the delivered Avalanche Warp Message into the expected format. + +It returns the message if present and a boolean indicating if a message is present. + +To use this function, the transaction must include the signed Avalanche Warp Message encoded in the [predicate](#predicate-encoding) of the transaction. Prior to executing a block, the VM iterates through transactions and pre-verifies all predicates. If a transaction's predicate is invalid, then it is considered invalid to include in the block and dropped. + +This leads to the following advantages: + +1. The EVM execution does not need to verify the Warp Message at runtime (no signature verification or external calls to the P-Chain) +2. The EVM can deterministically re-execute and re-verify blocks assuming the predicate was verified by the network (eg., in bootstrapping) + +This pre-verification is performed using the ProposerVM Block header during [block verification](../../../plugin/evm/block.go#L220) and [block building](../../../miner/worker.go#L200). + +#### getBlockchainID + +`getBlockchainID` returns the blockchainID of the blockchain that the VM is running on. + +This is different from the conventional Ethereum ChainID registered to [ChainList](https://chainlist.org/). + +The `blockchainID` in Avalanche refers to the txID that created the blockchain on the Avalanche P-Chain ([docs](https://docs.avax.network/specs/platform-transaction-serialization#unsigned-create-chain-tx)). + +### Predicate Encoding + +Avalanche Warp Messages are encoded as a signed Avalanche [Warp Message](https://github.com/ava-labs/avalanchego/blob/master/vms/platformvm/warp/message.go) where the [UnsignedMessage](https://github.com/ava-labs/avalanchego/blob/master/vms/platformvm/warp/unsigned_message.go)'s payload includes an [AddressedPayload](https://github.com/ava-labs/avalanchego/blob/master/vms/platformvm/warp/payload/payload.go). + +Since the predicate is encoded into the [Transaction Access List](https://eips.ethereum.org/EIPS/eip-2930), it is packed into 32 byte hashes intended to declare storage slots that should be pre-warmed into the cache prior to transaction execution. + +Therefore, we use the [Predicate Utils](https://github.com/ava-labs/coreth/blob/master/predicate/Predicate.md) package to encode the actual byte slice of size N into the access list. + +### Performance Optimization: C-Chain to Subnet + +To support C-Chain to Subnet communication, or more generally Primary Network to Subnet communication, we special case the C-Chain for two reasons: + +1. Every Subnet validator validates the C-Chain +2. The Primary Network has the largest possible number of validators + +Since the Primary Network has the largest possible number of validators for any Subnet on Avalanche, it would also be the most expensive Subnet to receive and verify Avalanche Warp Messages from as it reaching a threshold of stake on the primary network would require many signatures. Luckily, we can do something much smarter. + +When a Subnet receives a message from a blockchain on the Primary Network, we use the validator set of the receiving Subnet instead of the entire network when validating the message. This means that the C-Chain sending a message can be the exact same as Subnet to Subnet communication. + +However, when Subnet B receives a message from the C-Chain, it changes the semantics to the following: + +1. Read the SourceChainID of the signed message (C-Chain) +2. Look up the SubnetID that validates C-Chain: Primary Network +3. Look up the validator set of Subnet B (instead of the Primary Network) and the registered BLS Public Keys of Subnet B at the P-Chain height specified by the ProposerVM header +4. Continue Warp Message verification using the validator set of Subnet B instead of the Primary Network + +This means that C-Chain to Subnet communication only requires a threshold of stake on the receiving subnet to sign the message instead of a threshold of stake for the entire Primary Network. + +This assumes that the security of Subnet B already depends on the validators of Subnet B to behave virtuously. Therefore, requiring a threshold of stake from the receiving Subnet's validator set instead of the whole Primary Network does not meaningfully change security of the receiving Subnet. + +Note: this special case is ONLY applied during Warp Message verification. The message sent by the Primary Network will still contain the Avalanche C-Chain's blockchainID as the sourceChainID and signatures will be served by querying the C-Chain directly. + +## Design Considerations + +### Re-Processing Historical Blocks + +Avalanche Warp Messaging depends on the Avalanche P-Chain state at the P-Chain height specified by the ProposerVM block header. + +Verifying a message requires looking up the validator set of the source subnet on the P-Chain. To support this, Avalanche Warp Messaging uses the ProposerVM header, which includes the P-Chain height it was issued at as the canonical point to lookup the source subnet's validator set. + +This means verifying the Warp Message and therefore the state transition on a block depends on state that is external to the blockchain itself: the P-Chain. + +The Avalanche P-Chain tracks only its current state and reverse diff layers (reversing the changes from past blocks) in order to re-calculate the validator set at a historical height. This means calculating a very old validator set that is used to verify a Warp Message in an old block may become prohibitively expensive. + +Therefore, we need a heuristic to ensure that the network can correctly re-process old blocks (note: re-processing old blocks is a requirement to perform bootstrapping and is used in some VMs to serve or verify historical data). + +As a result, we require that the block itself provides a deterministic hint which determines which Avalanche Warp Messages were considered valid/invalid during the block's execution. This ensures that we can always re-process blocks and use the hint to decide whether an Avalanche Warp Message should be treated as valid/invalid even after the P-Chain state that was used at the original execution time may no longer support fast lookups. + +To provide that hint, we've explored two designs: + +1. Include a predicate in the transaction to ensure any referenced message is valid +2. Append the results of checking whether a Warp Message is valid/invalid to the block data itself + +The current implementation uses option (1). + +The original reason for this was that the notion of predicates for precompiles was designed with Shared Memory in mind. In the case of shared memory, there is no canonical "P-Chain height" in the block which determines whether or not Avalanche Warp Messages are valid. + +Instead, the VM interprets a shared memory import operation as valid as soon as the UTXO is available in shared memory. This means that if it were up to the block producer to staple the valid/invalid results of whether or not an attempted atomic operation should be treated as valid, a byzantine block producer could arbitrarily report that such atomic operations were invalid and cause a griefing attack to burn the gas of users that attempted to perform an import. + +Therefore, a transaction specified predicate is required to implement the shared memory precompile to prevent such a griefing attack. + +In contrast, Avalanche Warp Messages are validated within the context of an exact P-Chain height. Therefore, if a block producer attempted to lie about the validity of such a message, the network would interpret that block as invalid. + +### Guarantees Offered by Warp Precompile vs. Built on Top + +#### Guarantees Offered by Warp Precompile + +The Warp Precompile was designed with the intention of minimizing the trusted computing base for the VM as much as possible. Therefore, it makes several tradeoffs which encourage users to use protocols built ON TOP of the Warp Precompile itself as opposed to directly using the Warp Precompile. + +The Warp Precompile itself provides ONLY the following ability: + +- Emit a verifiable message from (Address A, Blockchain A) to (Address B, Blockchain B) that can be verified by the destination chain + +#### Explicitly Not Provided / Built on Top + +The Warp Precompile itself does not provide any guarantees of: + +- Eventual message delivery (may require re-send on blockchain A and additional assumptions about off-chain relayers and chain progress) +- Ordering of messages (requires ordering provided a layer above) +- Replay protection (requires replay protection provided a layer above) diff --git a/coreth/precompile/contracts/warp/config.go b/coreth/precompile/contracts/warp/config.go new file mode 100644 index 00000000..d4c49509 --- /dev/null +++ b/coreth/precompile/contracts/warp/config.go @@ -0,0 +1,217 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/predicate" + warpValidators "github.com/ava-labs/coreth/warp/validators" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/log" +) + +const ( + WarpDefaultQuorumNumerator uint64 = 67 + WarpQuorumNumeratorMinimum uint64 = 33 + WarpQuorumDenominator uint64 = 100 +) + +var ( + _ precompileconfig.Config = &Config{} + _ precompileconfig.Predicater = &Config{} + _ precompileconfig.Accepter = &Config{} +) + +var ( + errOverflowSignersGasCost = errors.New("overflow calculating warp signers gas cost") + errInvalidPredicateBytes = errors.New("cannot unpack predicate bytes") + errInvalidWarpMsg = errors.New("cannot unpack warp message") + errCannotParseWarpMsg = errors.New("cannot parse warp message") + errInvalidWarpMsgPayload = errors.New("cannot unpack warp message payload") + errInvalidAddressedPayload = errors.New("cannot unpack addressed payload") + errInvalidBlockHashPayload = errors.New("cannot unpack block hash payload") + errCannotGetNumSigners = errors.New("cannot fetch num signers from warp message") + errWarpCannotBeActivated = errors.New("warp cannot be activated before Durango") + errFailedVerification = errors.New("cannot verify warp signature") +) + +// Config implements the precompileconfig.Config interface and +// adds specific configuration for Warp. +type Config struct { + precompileconfig.Upgrade + QuorumNumerator uint64 `json:"quorumNumerator"` +} + +// NewConfig returns a config for a network upgrade at [blockTimestamp] that enables +// Warp with the given quorum numerator. +func NewConfig(blockTimestamp *uint64, quorumNumerator uint64) *Config { + return &Config{ + Upgrade: precompileconfig.Upgrade{BlockTimestamp: blockTimestamp}, + QuorumNumerator: quorumNumerator, + } +} + +// NewDefaultConfig returns a config for a network upgrade at [blockTimestamp] that enables +// Warp with the default quorum numerator (0 denotes using the default). +func NewDefaultConfig(blockTimestamp *uint64) *Config { + return NewConfig(blockTimestamp, 0) +} + +// NewDisableConfig returns config for a network upgrade at [blockTimestamp] +// that disables Warp. +func NewDisableConfig(blockTimestamp *uint64) *Config { + return &Config{ + Upgrade: precompileconfig.Upgrade{ + BlockTimestamp: blockTimestamp, + Disable: true, + }, + } +} + +// Key returns the key for the Warp precompileconfig. +// This should be the same key as used in the precompile module. +func (*Config) Key() string { return ConfigKey } + +// Verify tries to verify Config and returns an error accordingly. +func (c *Config) Verify(chainConfig precompileconfig.ChainConfig) error { + if c.Timestamp() != nil { + // If Warp attempts to activate before Durango, fail verification + timestamp := *c.Timestamp() + if !chainConfig.IsDurango(timestamp) { + return errWarpCannotBeActivated + } + } + + if c.QuorumNumerator > WarpQuorumDenominator { + return fmt.Errorf("cannot specify quorum numerator (%d) > quorum denominator (%d)", c.QuorumNumerator, WarpQuorumDenominator) + } + // If a non-default quorum numerator is specified and it is less than the minimum, return an error + if c.QuorumNumerator != 0 && c.QuorumNumerator < WarpQuorumNumeratorMinimum { + return fmt.Errorf("cannot specify quorum numerator (%d) < min quorum numerator (%d)", c.QuorumNumerator, WarpQuorumNumeratorMinimum) + } + return nil +} + +// Equal returns true if [s] is a [*Config] and it has been configured identical to [c]. +func (c *Config) Equal(s precompileconfig.Config) bool { + // typecast before comparison + other, ok := (s).(*Config) + if !ok { + return false + } + equals := c.Upgrade.Equal(&other.Upgrade) + return equals && c.QuorumNumerator == other.QuorumNumerator +} + +func (c *Config) Accept(acceptCtx *precompileconfig.AcceptContext, blockHash common.Hash, blockNumber uint64, txHash common.Hash, logIndex int, topics []common.Hash, logData []byte) error { + unsignedMessage, err := UnpackSendWarpEventDataToMessage(logData) + if err != nil { + return fmt.Errorf("failed to parse warp log data into unsigned message (TxHash: %s, LogIndex: %d): %w", txHash, logIndex, err) + } + log.Info( + "Accepted warp unsigned message", + "blockHash", blockHash, + "blockNumber", blockNumber, + "txHash", txHash, + "logIndex", logIndex, + "logData", common.Bytes2Hex(logData), + "warpMessageID", unsignedMessage.ID(), + ) + if err := acceptCtx.Warp.AddMessage(unsignedMessage); err != nil { + return fmt.Errorf("failed to add warp message during accept (TxHash: %s, LogIndex: %d): %w", txHash, logIndex, err) + } + return nil +} + +// PredicateGas returns the amount of gas necessary to verify the predicate +// PredicateGas charges for: +// 1. Base cost of the message +// 2. Size of the message +// 3. Number of signers +// 4. TODO: Lookup of the validator set +// +// If the payload of the warp message fails parsing, return a non-nil error invalidating the transaction. +func (c *Config) PredicateGas(predicateBytes []byte) (uint64, error) { + totalGas := GasCostPerSignatureVerification + bytesGasCost, overflow := math.SafeMul(GasCostPerWarpMessageBytes, uint64(len(predicateBytes))) + if overflow { + return 0, fmt.Errorf("overflow calculating gas cost for warp message bytes of size %d", len(predicateBytes)) + } + totalGas, overflow = math.SafeAdd(totalGas, bytesGasCost) + if overflow { + return 0, fmt.Errorf("overflow adding bytes gas cost of size %d", len(predicateBytes)) + } + + unpackedPredicateBytes, err := predicate.UnpackPredicate(predicateBytes) + if err != nil { + return 0, fmt.Errorf("%w: %s", errInvalidPredicateBytes, err) + } + warpMessage, err := warp.ParseMessage(unpackedPredicateBytes) + if err != nil { + return 0, fmt.Errorf("%w: %s", errInvalidWarpMsg, err) + } + _, err = payload.Parse(warpMessage.Payload) + if err != nil { + return 0, fmt.Errorf("%w: %s", errInvalidWarpMsgPayload, err) + } + + numSigners, err := warpMessage.Signature.NumSigners() + if err != nil { + return 0, fmt.Errorf("%w: %s", errCannotGetNumSigners, err) + } + signerGas, overflow := math.SafeMul(uint64(numSigners), GasCostPerWarpSigner) + if overflow { + return 0, errOverflowSignersGasCost + } + totalGas, overflow = math.SafeAdd(totalGas, signerGas) + if overflow { + return 0, fmt.Errorf("overflow adding signer gas (PrevTotal: %d, VerificationGas: %d)", totalGas, signerGas) + } + + return totalGas, nil +} + +// VerifyPredicate returns whether the predicate described by [predicateBytes] passes verification. +func (c *Config) VerifyPredicate(predicateContext *precompileconfig.PredicateContext, predicateBytes []byte) error { + unpackedPredicateBytes, err := predicate.UnpackPredicate(predicateBytes) + if err != nil { + return fmt.Errorf("%w: %w", errInvalidPredicateBytes, err) + } + + // Note: PredicateGas should be called before VerifyPredicate, so we should never reach an error case here. + warpMsg, err := warp.ParseMessage(unpackedPredicateBytes) + if err != nil { + return fmt.Errorf("%w: %w", errCannotParseWarpMsg, err) + } + + quorumNumerator := WarpDefaultQuorumNumerator + if c.QuorumNumerator != 0 { + quorumNumerator = c.QuorumNumerator + } + + log.Debug("verifying warp message", "warpMsg", warpMsg, "quorumNum", quorumNumerator, "quorumDenom", WarpQuorumDenominator) + err = warpMsg.Signature.Verify( + context.Background(), + &warpMsg.UnsignedMessage, + predicateContext.SnowCtx.NetworkID, + warpValidators.NewState(predicateContext.SnowCtx), // Wrap validators.State on the chain snow context to special case the Primary Network + predicateContext.ProposerVMBlockCtx.PChainHeight, + quorumNumerator, + WarpQuorumDenominator, + ) + + if err != nil { + log.Debug("failed to verify warp signature", "msgID", warpMsg.ID(), "err", err) + return fmt.Errorf("%w: %w", errFailedVerification, err) + } + + return nil +} diff --git a/coreth/precompile/contracts/warp/config_test.go b/coreth/precompile/contracts/warp/config_test.go new file mode 100644 index 00000000..419b9bbe --- /dev/null +++ b/coreth/precompile/contracts/warp/config_test.go @@ -0,0 +1,87 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "fmt" + "testing" + + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/precompile/testutils" + "github.com/ava-labs/coreth/utils" + "go.uber.org/mock/gomock" +) + +func TestVerify(t *testing.T) { + tests := map[string]testutils.ConfigVerifyTest{ + "quorum numerator less than minimum": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum-1), + ExpectedError: fmt.Sprintf("cannot specify quorum numerator (%d) < min quorum numerator (%d)", WarpQuorumNumeratorMinimum-1, WarpQuorumNumeratorMinimum), + }, + "quorum numerator greater than quorum denominator": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumDenominator+1), + ExpectedError: fmt.Sprintf("cannot specify quorum numerator (%d) > quorum denominator (%d)", WarpQuorumDenominator+1, WarpQuorumDenominator), + }, + "default quorum numerator": { + Config: NewDefaultConfig(utils.NewUint64(3)), + }, + "valid quorum numerator 1 less than denominator": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumDenominator-1), + }, + "valid quorum numerator 1 more than minimum": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum+1), + }, + "invalid cannot activated before Durango activation": { + Config: NewConfig(utils.NewUint64(3), 0), + ChainConfig: func() precompileconfig.ChainConfig { + config := precompileconfig.NewMockChainConfig(gomock.NewController(t)) + config.EXPECT().IsDurango(gomock.Any()).Return(false) + return config + }(), + ExpectedError: errWarpCannotBeActivated.Error(), + }, + } + testutils.RunVerifyTests(t, tests) +} + +func TestEqualWarpConfig(t *testing.T) { + tests := map[string]testutils.ConfigEqualTest{ + "non-nil config and nil other": { + Config: NewDefaultConfig(utils.NewUint64(3)), + Other: nil, + Expected: false, + }, + + "different type": { + Config: NewDefaultConfig(utils.NewUint64(3)), + Other: precompileconfig.NewMockConfig(gomock.NewController(t)), + Expected: false, + }, + + "different timestamp": { + Config: NewDefaultConfig(utils.NewUint64(3)), + Other: NewDefaultConfig(utils.NewUint64(4)), + Expected: false, + }, + + "different quorum numerator": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum+1), + Other: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum+2), + Expected: false, + }, + + "same default config": { + Config: NewDefaultConfig(utils.NewUint64(3)), + Other: NewDefaultConfig(utils.NewUint64(3)), + Expected: true, + }, + + "same non-default config": { + Config: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum+5), + Other: NewConfig(utils.NewUint64(3), WarpQuorumNumeratorMinimum+5), + Expected: true, + }, + } + testutils.RunEqualTests(t, tests) +} diff --git a/coreth/precompile/contracts/warp/contract.abi b/coreth/precompile/contracts/warp/contract.abi new file mode 100644 index 00000000..771103ec --- /dev/null +++ b/coreth/precompile/contracts/warp/contract.abi @@ -0,0 +1,136 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "messageID", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "message", + "type": "bytes" + } + ], + "name": "SendWarpMessage", + "type": "event" + }, + { + "inputs": [], + "name": "getBlockchainID", + "outputs": [ + { + "internalType": "bytes32", + "name": "blockchainID", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + } + ], + "name": "getVerifiedWarpBlockHash", + "outputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "sourceChainID", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + } + ], + "internalType": "struct WarpBlockHash", + "name": "warpBlockHash", + "type": "tuple" + }, + { + "internalType": "bool", + "name": "valid", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32", + "name": "index", + "type": "uint32" + } + ], + "name": "getVerifiedWarpMessage", + "outputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "sourceChainID", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "originSenderAddress", + "type": "address" + }, + { + "internalType": "bytes", + "name": "payload", + "type": "bytes" + } + ], + "internalType": "struct WarpMessage", + "name": "message", + "type": "tuple" + }, + { + "internalType": "bool", + "name": "valid", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "payload", + "type": "bytes" + } + ], + "name": "sendWarpMessage", + "outputs": [ + { + "internalType": "bytes32", + "name": "messageID", + "type": "bytes32" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/coreth/precompile/contracts/warp/contract.go b/coreth/precompile/contracts/warp/contract.go new file mode 100644 index 00000000..7e30f9f9 --- /dev/null +++ b/coreth/precompile/contracts/warp/contract.go @@ -0,0 +1,338 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/accounts/abi" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/vmerrs" + + _ "embed" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" +) + +const ( + GetVerifiedWarpMessageBaseCost uint64 = 2 // Base cost of entering getVerifiedWarpMessage + GetBlockchainIDGasCost uint64 = 2 // Based on GasQuickStep used in existing EVM instructions + AddWarpMessageGasCost uint64 = 20_000 // Cost of producing and serving a BLS Signature + // Sum of base log gas cost, cost of producing 4 topics, and producing + serving a BLS Signature (sign + trie write) + // Note: using trie write for the gas cost results in a conservative overestimate since the message is stored in a + // flat database that can be cleaned up after a period of time instead of the EVM trie. + + SendWarpMessageGasCost uint64 = contract.LogGas + 3*contract.LogTopicGas + AddWarpMessageGasCost + contract.WriteGasCostPerSlot + // SendWarpMessageGasCostPerByte cost accounts for producing a signed message of a given size + SendWarpMessageGasCostPerByte uint64 = contract.LogDataGas + + GasCostPerWarpSigner uint64 = 500 + GasCostPerWarpMessageBytes uint64 = 100 + GasCostPerSignatureVerification uint64 = 200_000 +) + +var ( + errInvalidSendInput = errors.New("invalid sendWarpMessage input") + errInvalidIndexInput = errors.New("invalid index to specify warp message") +) + +// Singleton StatefulPrecompiledContract and signatures. +var ( + // WarpRawABI contains the raw ABI of Warp contract. + //go:embed contract.abi + WarpRawABI string + + WarpABI = contract.ParseABI(WarpRawABI) + + WarpPrecompile = createWarpPrecompile() +) + +// WarpBlockHash is an auto generated low-level Go binding around an user-defined struct. +type WarpBlockHash struct { + SourceChainID common.Hash + BlockHash common.Hash +} + +type GetVerifiedWarpBlockHashOutput struct { + WarpBlockHash WarpBlockHash + Valid bool +} + +// WarpMessage is an auto generated low-level Go binding around an user-defined struct. +type WarpMessage struct { + SourceChainID common.Hash + OriginSenderAddress common.Address + Payload []byte +} + +type GetVerifiedWarpMessageOutput struct { + Message WarpMessage + Valid bool +} + +type SendWarpMessageEventData struct { + Message []byte +} + +// PackGetBlockchainID packs the include selector (first 4 func signature bytes). +// This function is mostly used for tests. +func PackGetBlockchainID() ([]byte, error) { + return WarpABI.Pack("getBlockchainID") +} + +// PackGetBlockchainIDOutput attempts to pack given blockchainID of type common.Hash +// to conform the ABI outputs. +func PackGetBlockchainIDOutput(blockchainID common.Hash) ([]byte, error) { + return WarpABI.PackOutput("getBlockchainID", blockchainID) +} + +// getBlockchainID returns the snow Chain Context ChainID of this blockchain. +func getBlockchainID(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + if remainingGas, err = contract.DeductGas(suppliedGas, GetBlockchainIDGasCost); err != nil { + return nil, 0, err + } + packedOutput, err := PackGetBlockchainIDOutput(common.Hash(accessibleState.GetSnowContext().ChainID)) + if err != nil { + return nil, remainingGas, err + } + + // Return the packed output and the remaining gas + return packedOutput, remainingGas, nil +} + +// UnpackGetVerifiedWarpBlockHashInput attempts to unpack [input] into the uint32 type argument +// assumes that [input] does not include selector (omits first 4 func signature bytes) +func UnpackGetVerifiedWarpBlockHashInput(input []byte) (uint32, error) { + // We don't use strict mode here because it was disabled with Durango. + // Since Warp will be deployed after Durango, we don't need to use strict mode + res, err := WarpABI.UnpackInput("getVerifiedWarpBlockHash", input, false) + if err != nil { + return 0, err + } + unpacked := *abi.ConvertType(res[0], new(uint32)).(*uint32) + return unpacked, nil +} + +// PackGetVerifiedWarpBlockHash packs [index] of type uint32 into the appropriate arguments for getVerifiedWarpBlockHash. +// the packed bytes include selector (first 4 func signature bytes). +// This function is mostly used for tests. +func PackGetVerifiedWarpBlockHash(index uint32) ([]byte, error) { + return WarpABI.Pack("getVerifiedWarpBlockHash", index) +} + +// PackGetVerifiedWarpBlockHashOutput attempts to pack given [outputStruct] of type GetVerifiedWarpBlockHashOutput +// to conform the ABI outputs. +func PackGetVerifiedWarpBlockHashOutput(outputStruct GetVerifiedWarpBlockHashOutput) ([]byte, error) { + return WarpABI.PackOutput("getVerifiedWarpBlockHash", + outputStruct.WarpBlockHash, + outputStruct.Valid, + ) +} + +// UnpackGetVerifiedWarpBlockHashOutput attempts to unpack [output] as GetVerifiedWarpBlockHashOutput +// assumes that [output] does not include selector (omits first 4 func signature bytes) +func UnpackGetVerifiedWarpBlockHashOutput(output []byte) (GetVerifiedWarpBlockHashOutput, error) { + outputStruct := GetVerifiedWarpBlockHashOutput{} + err := WarpABI.UnpackIntoInterface(&outputStruct, "getVerifiedWarpBlockHash", output) + + return outputStruct, err +} + +func getVerifiedWarpBlockHash(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + return handleWarpMessage(accessibleState, input, suppliedGas, blockHashHandler{}) +} + +// UnpackGetVerifiedWarpMessageInput attempts to unpack [input] into the uint32 type argument +// assumes that [input] does not include selector (omits first 4 func signature bytes) +func UnpackGetVerifiedWarpMessageInput(input []byte) (uint32, error) { + // We don't use strict mode here because it was disabled with Durango. + // Since Warp will be deployed after Durango, we don't need to use strict mode. + res, err := WarpABI.UnpackInput("getVerifiedWarpMessage", input, false) + if err != nil { + return 0, err + } + unpacked := *abi.ConvertType(res[0], new(uint32)).(*uint32) + return unpacked, nil +} + +// PackGetVerifiedWarpMessage packs [index] of type uint32 into the appropriate arguments for getVerifiedWarpMessage. +// the packed bytes include selector (first 4 func signature bytes). +// This function is mostly used for tests. +func PackGetVerifiedWarpMessage(index uint32) ([]byte, error) { + return WarpABI.Pack("getVerifiedWarpMessage", index) +} + +// PackGetVerifiedWarpMessageOutput attempts to pack given [outputStruct] of type GetVerifiedWarpMessageOutput +// to conform the ABI outputs. +func PackGetVerifiedWarpMessageOutput(outputStruct GetVerifiedWarpMessageOutput) ([]byte, error) { + return WarpABI.PackOutput("getVerifiedWarpMessage", + outputStruct.Message, + outputStruct.Valid, + ) +} + +// UnpackGetVerifiedWarpMessageOutput attempts to unpack [output] as GetVerifiedWarpMessageOutput +// assumes that [output] does not include selector (omits first 4 func signature bytes) +func UnpackGetVerifiedWarpMessageOutput(output []byte) (GetVerifiedWarpMessageOutput, error) { + outputStruct := GetVerifiedWarpMessageOutput{} + err := WarpABI.UnpackIntoInterface(&outputStruct, "getVerifiedWarpMessage", output) + + return outputStruct, err +} + +// getVerifiedWarpMessage retrieves the pre-verified warp message from the predicate storage slots and returns +// the expected ABI encoding of the message to the caller. +func getVerifiedWarpMessage(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + return handleWarpMessage(accessibleState, input, suppliedGas, addressedPayloadHandler{}) +} + +// UnpackSendWarpMessageInput attempts to unpack [input] as []byte +// assumes that [input] does not include selector (omits first 4 func signature bytes) +func UnpackSendWarpMessageInput(input []byte) ([]byte, error) { + // We don't use strict mode here because it was disabled with Durango. + // Since Warp will be deployed after Durango, we don't need to use strict mode. + res, err := WarpABI.UnpackInput("sendWarpMessage", input, false) + if err != nil { + return []byte{}, err + } + unpacked := *abi.ConvertType(res[0], new([]byte)).(*[]byte) + return unpacked, nil +} + +// PackSendWarpMessage packs [inputStruct] of type []byte into the appropriate arguments for sendWarpMessage. +func PackSendWarpMessage(payloadData []byte) ([]byte, error) { + return WarpABI.Pack("sendWarpMessage", payloadData) +} + +// PackSendWarpMessageOutput attempts to pack given messageID of type common.Hash +// to conform the ABI outputs. +func PackSendWarpMessageOutput(messageID common.Hash) ([]byte, error) { + return WarpABI.PackOutput("sendWarpMessage", messageID) +} + +// UnpackSendWarpMessageOutput attempts to unpack given [output] into the common.Hash type output +// assumes that [output] does not include selector (omits first 4 func signature bytes) +func UnpackSendWarpMessageOutput(output []byte) (common.Hash, error) { + res, err := WarpABI.Unpack("sendWarpMessage", output) + if err != nil { + return common.Hash{}, err + } + unpacked := *abi.ConvertType(res[0], new(common.Hash)).(*common.Hash) + return unpacked, nil +} + +// sendWarpMessage constructs an Avalanche Warp Message containing an AddressedPayload and emits a log to signal validators that they should +// be willing to sign this message. +func sendWarpMessage(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { + if remainingGas, err = contract.DeductGas(suppliedGas, SendWarpMessageGasCost); err != nil { + return nil, 0, err + } + // This gas cost includes buffer room because it is based off of the total size of the input instead of the produced payload. + // This ensures that we charge gas before we unpack the variable sized input. + payloadGas, overflow := math.SafeMul(SendWarpMessageGasCostPerByte, uint64(len(input))) + if overflow { + return nil, 0, vmerrs.ErrOutOfGas + } + if remainingGas, err = contract.DeductGas(remainingGas, payloadGas); err != nil { + return nil, 0, err + } + if readOnly { + return nil, remainingGas, vmerrs.ErrWriteProtection + } + // unpack the arguments + payloadData, err := UnpackSendWarpMessageInput(input) + if err != nil { + return nil, remainingGas, fmt.Errorf("%w: %s", errInvalidSendInput, err) + } + + var ( + sourceChainID = accessibleState.GetSnowContext().ChainID + sourceAddress = caller + ) + + addressedPayload, err := payload.NewAddressedCall( + sourceAddress.Bytes(), + payloadData, + ) + if err != nil { + return nil, remainingGas, err + } + unsignedWarpMessage, err := warp.NewUnsignedMessage( + accessibleState.GetSnowContext().NetworkID, + sourceChainID, + addressedPayload.Bytes(), + ) + if err != nil { + return nil, remainingGas, err + } + + // Add a log to be handled if this action is finalized. + topics, data, err := PackSendWarpMessageEvent( + sourceAddress, + common.Hash(unsignedWarpMessage.ID()), + unsignedWarpMessage.Bytes(), + ) + if err != nil { + return nil, remainingGas, err + } + accessibleState.GetStateDB().AddLog( + ContractAddress, + topics, + data, + accessibleState.GetBlockContext().Number().Uint64(), + ) + + packed, err := PackSendWarpMessageOutput(common.Hash(unsignedWarpMessage.ID())) + if err != nil { + return nil, remainingGas, err + } + + // Return the packed message ID and the remaining gas + return packed, remainingGas, nil +} + +// PackSendWarpMessageEvent packs the given arguments into SendWarpMessage events including topics and data. +func PackSendWarpMessageEvent(sourceAddress common.Address, unsignedMessageID common.Hash, unsignedMessageBytes []byte) ([]common.Hash, []byte, error) { + return WarpABI.PackEvent("SendWarpMessage", sourceAddress, unsignedMessageID, unsignedMessageBytes) +} + +// UnpackSendWarpEventDataToMessage attempts to unpack event [data] as warp.UnsignedMessage. +func UnpackSendWarpEventDataToMessage(data []byte) (*warp.UnsignedMessage, error) { + event := SendWarpMessageEventData{} + err := WarpABI.UnpackIntoInterface(&event, "SendWarpMessage", data) + if err != nil { + return nil, err + } + return warp.ParseUnsignedMessage(event.Message) +} + +// createWarpPrecompile returns a StatefulPrecompiledContract with getters and setters for the precompile. +func createWarpPrecompile() contract.StatefulPrecompiledContract { + var functions []*contract.StatefulPrecompileFunction + + abiFunctionMap := map[string]contract.RunStatefulPrecompileFunc{ + "getBlockchainID": getBlockchainID, + "getVerifiedWarpBlockHash": getVerifiedWarpBlockHash, + "getVerifiedWarpMessage": getVerifiedWarpMessage, + "sendWarpMessage": sendWarpMessage, + } + + for name, function := range abiFunctionMap { + method, ok := WarpABI.Methods[name] + if !ok { + panic(fmt.Errorf("given method (%s) does not exist in the ABI", name)) + } + functions = append(functions, contract.NewStatefulPrecompileFunction(method.ID, function)) + } + // Construct the contract with no fallback function. + statefulContract, err := contract.NewStatefulPrecompileContract(nil, functions) + if err != nil { + panic(err) + } + return statefulContract +} diff --git a/coreth/precompile/contracts/warp/contract_test.go b/coreth/precompile/contracts/warp/contract_test.go new file mode 100644 index 00000000..433ad141 --- /dev/null +++ b/coreth/precompile/contracts/warp/contract_test.go @@ -0,0 +1,767 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "math" + "math/big" + "testing" + + "github.com/ava-labs/avalanchego/ids" + agoUtils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/testutils" + "github.com/ava-labs/coreth/predicate" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestGetBlockchainID(t *testing.T) { + callerAddr := common.HexToAddress("0x0123") + + defaultSnowCtx := utils.TestSnowContext() + blockchainID := defaultSnowCtx.ChainID + + tests := map[string]testutils.PrecompileTest{ + "getBlockchainID success": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetBlockchainID() + require.NoError(t, err) + + return input + }, + SuppliedGas: GetBlockchainIDGasCost, + ReadOnly: false, + ExpectedRes: func() []byte { + expectedOutput, err := PackGetBlockchainIDOutput(common.Hash(blockchainID)) + require.NoError(t, err) + + return expectedOutput + }(), + }, + "getBlockchainID readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetBlockchainID() + require.NoError(t, err) + + return input + }, + SuppliedGas: GetBlockchainIDGasCost, + ReadOnly: true, + ExpectedRes: func() []byte { + expectedOutput, err := PackGetBlockchainIDOutput(common.Hash(blockchainID)) + require.NoError(t, err) + + return expectedOutput + }(), + }, + "getBlockchainID insufficient gas": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetBlockchainID() + require.NoError(t, err) + + return input + }, + SuppliedGas: GetBlockchainIDGasCost - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + } + + testutils.RunPrecompileTests(t, Module, state.NewTestStateDB, tests) +} + +func TestSendWarpMessage(t *testing.T) { + callerAddr := common.HexToAddress("0x0123") + + defaultSnowCtx := utils.TestSnowContext() + blockchainID := defaultSnowCtx.ChainID + sendWarpMessagePayload := agoUtils.RandomBytes(100) + + sendWarpMessageInput, err := PackSendWarpMessage(sendWarpMessagePayload) + require.NoError(t, err) + sendWarpMessageAddressedPayload, err := payload.NewAddressedCall( + callerAddr.Bytes(), + sendWarpMessagePayload, + ) + require.NoError(t, err) + unsignedWarpMessage, err := warp.NewUnsignedMessage( + defaultSnowCtx.NetworkID, + blockchainID, + sendWarpMessageAddressedPayload.Bytes(), + ) + require.NoError(t, err) + + tests := map[string]testutils.PrecompileTest{ + "send warp message readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, + SuppliedGas: SendWarpMessageGasCost + uint64(len(sendWarpMessageInput[4:])*int(SendWarpMessageGasCostPerByte)), + ReadOnly: true, + ExpectedErr: vmerrs.ErrWriteProtection.Error(), + }, + "send warp message insufficient gas for first step": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, + SuppliedGas: SendWarpMessageGasCost - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "send warp message insufficient gas for payload bytes": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, + SuppliedGas: SendWarpMessageGasCost + uint64(len(sendWarpMessageInput[4:])*int(SendWarpMessageGasCostPerByte)) - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "send warp message invalid input": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + return sendWarpMessageInput[:4] // Include only the function selector, so that the input is invalid + }, + SuppliedGas: SendWarpMessageGasCost, + ReadOnly: false, + ExpectedErr: errInvalidSendInput.Error(), + }, + "send warp message success": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, + SuppliedGas: SendWarpMessageGasCost + uint64(len(sendWarpMessageInput[4:])*int(SendWarpMessageGasCostPerByte)), + ReadOnly: false, + ExpectedRes: func() []byte { + bytes, err := PackSendWarpMessageOutput(common.Hash(unsignedWarpMessage.ID())) + if err != nil { + panic(err) + } + return bytes + }(), + AfterHook: func(t testing.TB, state contract.StateDB) { + logsTopics, logsData := state.GetLogData() + require.Len(t, logsTopics, 1) + topics := logsTopics[0] + require.Len(t, topics, 3) + require.Equal(t, topics[0], WarpABI.Events["SendWarpMessage"].ID) + require.Equal(t, topics[1], callerAddr.Hash()) + require.Equal(t, topics[2], common.Hash(unsignedWarpMessage.ID())) + + require.Len(t, logsData, 1) + logData := logsData[0] + unsignedWarpMsg, err := UnpackSendWarpEventDataToMessage(logData) + require.NoError(t, err) + addressedPayload, err := payload.ParseAddressedCall(unsignedWarpMsg.Payload) + require.NoError(t, err) + + require.Equal(t, common.BytesToAddress(addressedPayload.SourceAddress), callerAddr) + require.Equal(t, unsignedWarpMsg.SourceChainID, blockchainID) + require.Equal(t, addressedPayload.Payload, sendWarpMessagePayload) + }, + }, + } + + testutils.RunPrecompileTests(t, Module, state.NewTestStateDB, tests) +} + +func TestGetVerifiedWarpMessage(t *testing.T) { + networkID := uint32(54321) + callerAddr := common.HexToAddress("0x0123") + sourceAddress := common.HexToAddress("0x456789") + sourceChainID := ids.GenerateTestID() + packagedPayloadBytes := []byte("mcsorley") + addressedPayload, err := payload.NewAddressedCall( + sourceAddress.Bytes(), + packagedPayloadBytes, + ) + require.NoError(t, err) + unsignedWarpMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, addressedPayload.Bytes()) + require.NoError(t, err) + warpMessage, err := avalancheWarp.NewMessage(unsignedWarpMsg, &avalancheWarp.BitSetSignature{}) // Create message with empty signature for testing + require.NoError(t, err) + warpMessagePredicateBytes := predicate.PackPredicate(warpMessage.Bytes()) + getVerifiedWarpMsg, err := PackGetVerifiedWarpMessage(0) + require.NoError(t, err) + noFailures := set.NewBits().Bytes() + require.Len(t, noFailures, 0) + + tests := map[string]testutils.PrecompileTest{ + "get message success": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{ + Message: WarpMessage{ + SourceChainID: common.Hash(sourceChainID), + OriginSenderAddress: sourceAddress, + Payload: packagedPayloadBytes, + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message out of bounds non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpMessage(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message success non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpMessage(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{{}, warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(set.NewBits(0).Bytes()) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{ + Message: WarpMessage{ + SourceChainID: common.Hash(sourceChainID), + OriginSenderAddress: sourceAddress, + Payload: packagedPayloadBytes, + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message failure non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpMessage(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{{}, warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(set.NewBits(0, 1).Bytes()) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get non-existent message": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message success readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: true, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{ + Message: WarpMessage{ + SourceChainID: common.Hash(sourceChainID), + OriginSenderAddress: sourceAddress, + Payload: packagedPayloadBytes, + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get non-existent message readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: true, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message out of gas for base cost": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "get message out of gas": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)) - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "get message invalid predicate packing": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessage.Bytes()}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessage.Bytes())), + ReadOnly: false, + ExpectedErr: errInvalidPredicateBytes.Error(), + }, + "get message invalid warp message": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{predicate.PackPredicate([]byte{1, 2, 3})}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(32), + ReadOnly: false, + ExpectedErr: errInvalidWarpMsg.Error(), + }, + "get message invalid addressed payload": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpMsg }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, []byte{1, 2, 3}) // Invalid addressed payload + require.NoError(t, err) + warpMessage, err := avalancheWarp.NewMessage(unsignedMessage, &avalancheWarp.BitSetSignature{}) + require.NoError(t, err) + + state.SetPredicateStorageSlots(ContractAddress, [][]byte{predicate.PackPredicate(warpMessage.Bytes())}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(160), + ReadOnly: false, + ExpectedErr: errInvalidAddressedPayload.Error(), + }, + "get message index invalid uint32": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + return append(WarpABI.Methods["getVerifiedWarpMessage"].ID, new(big.Int).SetInt64(math.MaxInt64).Bytes()...) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + "get message index invalid int32": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + res, err := PackGetVerifiedWarpMessage(math.MaxInt32 + 1) + require.NoError(t, err) + return res + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + "get message invalid index input bytes": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + res, err := PackGetVerifiedWarpMessage(1) + require.NoError(t, err) + return res[:len(res)-2] + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + } + + testutils.RunPrecompileTests(t, Module, state.NewTestStateDB, tests) +} + +func TestGetVerifiedWarpBlockHash(t *testing.T) { + networkID := uint32(54321) + callerAddr := common.HexToAddress("0x0123") + sourceChainID := ids.GenerateTestID() + blockHash := ids.GenerateTestID() + blockHashPayload, err := payload.NewHash(blockHash) + require.NoError(t, err) + unsignedWarpMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, blockHashPayload.Bytes()) + require.NoError(t, err) + warpMessage, err := avalancheWarp.NewMessage(unsignedWarpMsg, &avalancheWarp.BitSetSignature{}) // Create message with empty signature for testing + require.NoError(t, err) + warpMessagePredicateBytes := predicate.PackPredicate(warpMessage.Bytes()) + getVerifiedWarpBlockHash, err := PackGetVerifiedWarpBlockHash(0) + require.NoError(t, err) + noFailures := set.NewBits().Bytes() + require.Len(t, noFailures, 0) + + tests := map[string]testutils.PrecompileTest{ + "get message success": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{ + WarpBlockHash: WarpBlockHash{ + SourceChainID: common.Hash(sourceChainID), + BlockHash: common.Hash(blockHash), + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message out of bounds non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpBlockHash(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message success non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpBlockHash(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{{}, warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(set.NewBits(0).Bytes()) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{ + WarpBlockHash: WarpBlockHash{ + SourceChainID: common.Hash(sourceChainID), + BlockHash: common.Hash(blockHash), + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message failure non-zero index": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + input, err := PackGetVerifiedWarpBlockHash(1) + require.NoError(t, err) + return input + }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{{}, warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(set.NewBits(0, 1).Bytes()) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get non-existent message": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message success readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)), + ReadOnly: true, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{ + WarpBlockHash: WarpBlockHash{ + SourceChainID: common.Hash(sourceChainID), + BlockHash: common.Hash(blockHash), + }, + Valid: true, + }) + if err != nil { + panic(err) + } + return res + }(), + }, + "get non-existent message readOnly": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: true, + ExpectedRes: func() []byte { + res, err := PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{Valid: false}) + if err != nil { + panic(err) + } + return res + }(), + }, + "get message out of gas for base cost": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "get message out of gas": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessagePredicateBytes}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)) - 1, + ReadOnly: false, + ExpectedErr: vmerrs.ErrOutOfGas.Error(), + }, + "get message invalid predicate packing": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{warpMessage.Bytes()}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessage.Bytes())), + ReadOnly: false, + ExpectedErr: errInvalidPredicateBytes.Error(), + }, + "get message invalid warp message": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + state.SetPredicateStorageSlots(ContractAddress, [][]byte{predicate.PackPredicate([]byte{1, 2, 3})}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(32), + ReadOnly: false, + ExpectedErr: errInvalidWarpMsg.Error(), + }, + "get message invalid block hash payload": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { return getVerifiedWarpBlockHash }, + BeforeHook: func(t testing.TB, state contract.StateDB) { + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, []byte{1, 2, 3}) // Invalid block hash payload + require.NoError(t, err) + warpMessage, err := avalancheWarp.NewMessage(unsignedMessage, &avalancheWarp.BitSetSignature{}) + require.NoError(t, err) + + state.SetPredicateStorageSlots(ContractAddress, [][]byte{predicate.PackPredicate(warpMessage.Bytes())}) + }, + SetupBlockContext: func(mbc *contract.MockBlockContext) { + mbc.EXPECT().GetPredicateResults(common.Hash{}, ContractAddress).Return(noFailures) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(160), + ReadOnly: false, + ExpectedErr: errInvalidBlockHashPayload.Error(), + }, + "get message index invalid uint32": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + return append(WarpABI.Methods["getVerifiedWarpBlockHash"].ID, new(big.Int).SetInt64(math.MaxInt64).Bytes()...) + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + "get message index invalid int32": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + res, err := PackGetVerifiedWarpBlockHash(math.MaxInt32 + 1) + require.NoError(t, err) + return res + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + "get message invalid index input bytes": { + Caller: callerAddr, + InputFn: func(t testing.TB) []byte { + res, err := PackGetVerifiedWarpBlockHash(1) + require.NoError(t, err) + return res[:len(res)-2] + }, + SuppliedGas: GetVerifiedWarpMessageBaseCost, + ReadOnly: false, + ExpectedErr: errInvalidIndexInput.Error(), + }, + } + + testutils.RunPrecompileTests(t, Module, state.NewTestStateDB, tests) +} + +func TestPackEvents(t *testing.T) { + sourceChainID := ids.GenerateTestID() + sourceAddress := common.HexToAddress("0x0123") + payloadData := []byte("mcsorley") + networkID := uint32(54321) + + addressedPayload, err := payload.NewAddressedCall( + sourceAddress.Bytes(), + payloadData, + ) + require.NoError(t, err) + + unsignedWarpMessage, err := warp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayload.Bytes(), + ) + require.NoError(t, err) + + _, data, err := PackSendWarpMessageEvent( + sourceAddress, + common.Hash(unsignedMsg.ID()), + unsignedWarpMessage.Bytes(), + ) + require.NoError(t, err) + + unpacked, err := UnpackSendWarpEventDataToMessage(data) + require.NoError(t, err) + require.Equal(t, unsignedWarpMessage.Bytes(), unpacked.Bytes()) +} diff --git a/coreth/precompile/contracts/warp/contract_warp_handler.go b/coreth/precompile/contracts/warp/contract_warp_handler.go new file mode 100644 index 00000000..71142ed0 --- /dev/null +++ b/coreth/precompile/contracts/warp/contract_warp_handler.go @@ -0,0 +1,135 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/predicate" + "github.com/ava-labs/coreth/vmerrs" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" +) + +var ( + _ messageHandler = addressedPayloadHandler{} + _ messageHandler = blockHashHandler{} +) + +var ( + getVerifiedWarpMessageInvalidOutput []byte + getVerifiedWarpBlockHashInvalidOutput []byte +) + +func init() { + res, err := PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{Valid: false}) + if err != nil { + panic(err) + } + getVerifiedWarpMessageInvalidOutput = res + + res, err = PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{Valid: false}) + if err != nil { + panic(err) + } + getVerifiedWarpBlockHashInvalidOutput = res +} + +type messageHandler interface { + packFailed() []byte + handleMessage(msg *warp.Message) ([]byte, error) +} + +func handleWarpMessage(accessibleState contract.AccessibleState, input []byte, suppliedGas uint64, handler messageHandler) ([]byte, uint64, error) { + remainingGas, err := contract.DeductGas(suppliedGas, GetVerifiedWarpMessageBaseCost) + if err != nil { + return nil, remainingGas, err + } + + warpIndexInput, err := UnpackGetVerifiedWarpMessageInput(input) + if err != nil { + return nil, remainingGas, fmt.Errorf("%w: %s", errInvalidIndexInput, err) + } + if warpIndexInput > math.MaxInt32 { + return nil, remainingGas, fmt.Errorf("%w: larger than MaxInt32", errInvalidIndexInput) + } + warpIndex := int(warpIndexInput) // This conversion is safe even if int is 32 bits because we checked above. + state := accessibleState.GetStateDB() + predicateBytes, exists := state.GetPredicateStorageSlots(ContractAddress, warpIndex) + predicateResults := accessibleState.GetBlockContext().GetPredicateResults(state.GetTxHash(), ContractAddress) + valid := exists && !set.BitsFromBytes(predicateResults).Contains(warpIndex) + if !valid { + return handler.packFailed(), remainingGas, nil + } + + // Note: we charge for the size of the message during both predicate verification and each time the message is read during + // EVM execution because each execution incurs an additional read cost. + msgBytesGas, overflow := math.SafeMul(GasCostPerWarpMessageBytes, uint64(len(predicateBytes))) + if overflow { + return nil, 0, vmerrs.ErrOutOfGas + } + if remainingGas, err = contract.DeductGas(remainingGas, msgBytesGas); err != nil { + return nil, 0, err + } + // Note: since the predicate is verified in advance of execution, the precompile should not + // hit an error during execution. + unpackedPredicateBytes, err := predicate.UnpackPredicate(predicateBytes) + if err != nil { + return nil, remainingGas, fmt.Errorf("%w: %s", errInvalidPredicateBytes, err) + } + warpMessage, err := warp.ParseMessage(unpackedPredicateBytes) + if err != nil { + return nil, remainingGas, fmt.Errorf("%w: %s", errInvalidWarpMsg, err) + } + res, err := handler.handleMessage(warpMessage) + if err != nil { + return nil, remainingGas, err + } + return res, remainingGas, nil +} + +type addressedPayloadHandler struct{} + +func (addressedPayloadHandler) packFailed() []byte { + return getVerifiedWarpMessageInvalidOutput +} + +func (addressedPayloadHandler) handleMessage(warpMessage *warp.Message) ([]byte, error) { + addressedPayload, err := payload.ParseAddressedCall(warpMessage.UnsignedMessage.Payload) + if err != nil { + return nil, fmt.Errorf("%w: %s", errInvalidAddressedPayload, err) + } + return PackGetVerifiedWarpMessageOutput(GetVerifiedWarpMessageOutput{ + Message: WarpMessage{ + SourceChainID: common.Hash(warpMessage.SourceChainID), + OriginSenderAddress: common.BytesToAddress(addressedPayload.SourceAddress), + Payload: addressedPayload.Payload, + }, + Valid: true, + }) +} + +type blockHashHandler struct{} + +func (blockHashHandler) packFailed() []byte { + return getVerifiedWarpBlockHashInvalidOutput +} + +func (blockHashHandler) handleMessage(warpMessage *warp.Message) ([]byte, error) { + blockHashPayload, err := payload.ParseHash(warpMessage.UnsignedMessage.Payload) + if err != nil { + return nil, fmt.Errorf("%w: %s", errInvalidBlockHashPayload, err) + } + return PackGetVerifiedWarpBlockHashOutput(GetVerifiedWarpBlockHashOutput{ + WarpBlockHash: WarpBlockHash{ + SourceChainID: common.Hash(warpMessage.SourceChainID), + BlockHash: common.BytesToHash(blockHashPayload.Hash[:]), + }, + Valid: true, + }) +} diff --git a/coreth/precompile/contracts/warp/module.go b/coreth/precompile/contracts/warp/module.go new file mode 100644 index 00000000..336100da --- /dev/null +++ b/coreth/precompile/contracts/warp/module.go @@ -0,0 +1,55 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "fmt" + + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" + + "github.com/ethereum/go-ethereum/common" +) + +var _ contract.Configurator = &configurator{} + +// ConfigKey is the key used in json config files to specify this precompile config. +// must be unique across all precompiles. +const ConfigKey = "warpConfig" + +// ContractAddress is the address of the warp precompile contract +var ContractAddress = common.HexToAddress("0x0200000000000000000000000000000000000005") + +// Module is the precompile module. It is used to register the precompile contract. +var Module = modules.Module{ + ConfigKey: ConfigKey, + Address: ContractAddress, + Contract: WarpPrecompile, + Configurator: &configurator{}, +} + +type configurator struct{} + +func init() { + // Register the precompile module. + // Each precompile contract registers itself through [RegisterModule] function. + if err := modules.RegisterModule(Module); err != nil { + panic(err) + } +} + +// MakeConfig returns a new precompile config instance. +// This is required to Marshal/Unmarshal the precompile config. +func (*configurator) MakeConfig() precompileconfig.Config { + return new(Config) +} + +// Configure is a no-op for warp since it does not need to store any information in the state +func (*configurator) Configure(chainConfig precompileconfig.ChainConfig, cfg precompileconfig.Config, state contract.StateDB, _ contract.ConfigurationBlockContext) error { + if _, ok := cfg.(*Config); !ok { + return fmt.Errorf("expected config type %T, got %T: %v", &Config{}, cfg, cfg) + } + return nil +} diff --git a/coreth/precompile/contracts/warp/predicate_test.go b/coreth/precompile/contracts/warp/predicate_test.go new file mode 100644 index 00000000..b0af5644 --- /dev/null +++ b/coreth/precompile/contracts/warp/predicate_test.go @@ -0,0 +1,689 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" + agoUtils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/precompile/testutils" + "github.com/ava-labs/coreth/predicate" + "github.com/ava-labs/coreth/utils" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +const pChainHeight uint64 = 1337 + +var ( + _ agoUtils.Sortable[*testValidator] = (*testValidator)(nil) + + errTest = errors.New("non-nil error") + networkID = uint32(54321) + sourceChainID = ids.GenerateTestID() + sourceSubnetID = ids.GenerateTestID() + + // valid unsigned warp message used throughout testing + unsignedMsg *avalancheWarp.UnsignedMessage + // valid addressed payload + addressedPayload *payload.AddressedCall + addressedPayloadBytes []byte + // blsSignatures of [unsignedMsg] from each of [testVdrs] + blsSignatures []*bls.Signature + + numTestVdrs = 10_000 + testVdrs []*testValidator + vdrs map[ids.NodeID]*validators.GetValidatorOutput + tests []signatureTest + + predicateTests = make(map[string]testutils.PredicateTest) +) + +func init() { + testVdrs = make([]*testValidator, 0, numTestVdrs) + for i := 0; i < numTestVdrs; i++ { + testVdrs = append(testVdrs, newTestValidator()) + } + agoUtils.Sort(testVdrs) + + vdrs = map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + } + + var err error + addr := ids.GenerateTestShortID() + addressedPayload, err = payload.NewAddressedCall( + addr[:], + []byte{1, 2, 3}, + ) + if err != nil { + panic(err) + } + addressedPayloadBytes = addressedPayload.Bytes() + unsignedMsg, err = avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, addressedPayload.Bytes()) + if err != nil { + panic(err) + } + + for _, testVdr := range testVdrs { + blsSignature := bls.Sign(testVdr.sk, unsignedMsg.Bytes()) + blsSignatures = append(blsSignatures, blsSignature) + } + + initWarpPredicateTests() +} + +type testValidator struct { + nodeID ids.NodeID + sk *bls.SecretKey + vdr *avalancheWarp.Validator +} + +func (v *testValidator) Compare(o *testValidator) int { + return v.vdr.Compare(o.vdr) +} + +func newTestValidator() *testValidator { + sk, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + + nodeID := ids.GenerateTestNodeID() + pk := bls.PublicFromSecretKey(sk) + return &testValidator{ + nodeID: nodeID, + sk: sk, + vdr: &avalancheWarp.Validator{ + PublicKey: pk, + PublicKeyBytes: pk.Serialize(), + Weight: 3, + NodeIDs: []ids.NodeID{nodeID}, + }, + } +} + +type signatureTest struct { + name string + stateF func(*gomock.Controller) validators.State + quorumNum uint64 + quorumDen uint64 + msgF func(*require.Assertions) *avalancheWarp.Message + err error +} + +// createWarpMessage constructs a signed warp message using the global variable [unsignedMsg] +// and the first [numKeys] signatures from [blsSignatures] +func createWarpMessage(numKeys int) *avalancheWarp.Message { + aggregateSignature, err := bls.AggregateSignatures(blsSignatures[0:numKeys]) + if err != nil { + panic(err) + } + bitSet := set.NewBits() + for i := 0; i < numKeys; i++ { + bitSet.Add(i) + } + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: bitSet.Bytes(), + } + copy(warpSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) + warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) + if err != nil { + panic(err) + } + return warpMsg +} + +// createPredicate constructs a warp message using createWarpMessage with numKeys signers +// and packs it into predicate encoding. +func createPredicate(numKeys int) []byte { + warpMsg := createWarpMessage(numKeys) + predicateBytes := predicate.PackPredicate(warpMsg.Bytes()) + return predicateBytes +} + +// validatorRange specifies a range of validators to include from [start, end), a staking weight +// to specify for each validator in that range, and whether or not to include the public key. +type validatorRange struct { + start int + end int + weight uint64 + publicKey bool +} + +// createSnowCtx creates a snow.Context instance with a validator state specified by the given validatorRanges +func createSnowCtx(validatorRanges []validatorRange) *snow.Context { + getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) + + for _, validatorRange := range validatorRanges { + for i := validatorRange.start; i < validatorRange.end; i++ { + validatorOutput := &validators.GetValidatorOutput{ + NodeID: testVdrs[i].nodeID, + Weight: validatorRange.weight, + } + if validatorRange.publicKey { + validatorOutput.PublicKey = testVdrs[i].vdr.PublicKey + } + getValidatorsOutput[testVdrs[i].nodeID] = validatorOutput + } + } + + snowCtx := utils.TestSnowContext() + state := &validators.TestState{ + GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { + return sourceSubnetID, nil + }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return getValidatorsOutput, nil + }, + } + snowCtx.ValidatorState = state + snowCtx.NetworkID = networkID + return snowCtx +} + +func createValidPredicateTest(snowCtx *snow.Context, numKeys uint64, predicateBytes []byte) testutils.PredicateTest { + return testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + numKeys*GasCostPerWarpSigner, + GasErr: nil, + ExpectedErr: nil, + } +} + +func TestWarpMessageFromPrimaryNetwork(t *testing.T) { + require := require.New(t) + numKeys := 10 + cChainID := ids.GenerateTestID() + addressedCall, err := payload.NewAddressedCall(agoUtils.RandomBytes(20), agoUtils.RandomBytes(100)) + require.NoError(err) + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, cChainID, addressedCall.Bytes()) + require.NoError(err) + + getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) + blsSignatures := make([]*bls.Signature, 0, numKeys) + for i := 0; i < numKeys; i++ { + validatorOutput := &validators.GetValidatorOutput{ + NodeID: testVdrs[i].nodeID, + Weight: 20, + PublicKey: testVdrs[i].vdr.PublicKey, + } + getValidatorsOutput[testVdrs[i].nodeID] = validatorOutput + blsSignatures = append(blsSignatures, bls.Sign(testVdrs[i].sk, unsignedMsg.Bytes())) + } + aggregateSignature, err := bls.AggregateSignatures(blsSignatures) + require.NoError(err) + bitSet := set.NewBits() + for i := 0; i < numKeys; i++ { + bitSet.Add(i) + } + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: bitSet.Bytes(), + } + copy(warpSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) + warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) + require.NoError(err) + + predicateBytes := predicate.PackPredicate(warpMsg.Bytes()) + + snowCtx := utils.TestSnowContext() + snowCtx.SubnetID = ids.GenerateTestID() + snowCtx.ChainID = ids.GenerateTestID() + snowCtx.CChainID = cChainID + snowCtx.NetworkID = networkID + snowCtx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { + require.Equal(chainID, cChainID) + return constants.PrimaryNetworkID, nil // Return Primary Network SubnetID + }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + require.Equal(snowCtx.SubnetID, subnetID) + return getValidatorsOutput, nil + }, + } + + test := testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numKeys)*GasCostPerWarpSigner, + GasErr: nil, + ExpectedErr: nil, + } + + test.Run(t) +} + +func TestInvalidPredicatePacking(t *testing.T) { + numKeys := 1 + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numKeys, + weight: 20, + publicKey: true, + }, + }) + predicateBytes := createPredicate(numKeys) + predicateBytes = append(predicateBytes, byte(0x01)) // Invalidate the predicate byte packing + + test := testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numKeys)*GasCostPerWarpSigner, + GasErr: errInvalidPredicateBytes, + } + + test.Run(t) +} + +func TestInvalidWarpMessage(t *testing.T) { + numKeys := 1 + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numKeys, + weight: 20, + publicKey: true, + }, + }) + warpMsg := createWarpMessage(1) + warpMsgBytes := warpMsg.Bytes() + warpMsgBytes = append(warpMsgBytes, byte(0x01)) // Invalidate warp message packing + predicateBytes := predicate.PackPredicate(warpMsgBytes) + + test := testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numKeys)*GasCostPerWarpSigner, + GasErr: errInvalidWarpMsg, + } + + test.Run(t) +} + +func TestInvalidAddressedPayload(t *testing.T) { + numKeys := 1 + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numKeys, + weight: 20, + publicKey: true, + }, + }) + aggregateSignature, err := bls.AggregateSignatures(blsSignatures[0:numKeys]) + require.NoError(t, err) + bitSet := set.NewBits() + for i := 0; i < numKeys; i++ { + bitSet.Add(i) + } + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: bitSet.Bytes(), + } + copy(warpSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) + // Create an unsigned message with an invalid addressed payload + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, []byte{1, 2, 3}) + require.NoError(t, err) + warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) + require.NoError(t, err) + warpMsgBytes := warpMsg.Bytes() + predicateBytes := predicate.PackPredicate(warpMsgBytes) + + test := testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numKeys)*GasCostPerWarpSigner, + GasErr: errInvalidWarpMsgPayload, + } + + test.Run(t) +} + +func TestInvalidBitSet(t *testing.T) { + addressedCall, err := payload.NewAddressedCall(agoUtils.RandomBytes(20), agoUtils.RandomBytes(100)) + require.NoError(t, err) + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedCall.Bytes(), + ) + require.NoError(t, err) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: make([]byte, 1), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(t, err) + + numKeys := 1 + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numKeys, + weight: 20, + publicKey: true, + }, + }) + predicateBytes := predicate.PackPredicate(msg.Bytes()) + test := testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numKeys)*GasCostPerWarpSigner, + GasErr: errCannotGetNumSigners, + } + + test.Run(t) +} + +func TestWarpSignatureWeightsDefaultQuorumNumerator(t *testing.T) { + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: 100, + weight: 20, + publicKey: true, + }, + }) + + tests := make(map[string]testutils.PredicateTest) + for _, numSigners := range []int{ + 1, + int(WarpDefaultQuorumNumerator) - 1, + int(WarpDefaultQuorumNumerator), + int(WarpDefaultQuorumNumerator) + 1, + int(WarpQuorumDenominator) - 1, + int(WarpQuorumDenominator), + int(WarpQuorumDenominator) + 1, + } { + predicateBytes := createPredicate(numSigners) + // The predicate is valid iff the number of signers is >= the required numerator and does not exceed the denominator. + var expectedErr error + if numSigners >= int(WarpDefaultQuorumNumerator) && numSigners <= int(WarpQuorumDenominator) { + expectedErr = nil + } else { + expectedErr = errFailedVerification + } + + tests[fmt.Sprintf("default quorum %d signature(s)", numSigners)] = testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numSigners)*GasCostPerWarpSigner, + GasErr: nil, + ExpectedErr: expectedErr, + } + } + testutils.RunPredicateTests(t, tests) +} + +// multiple messages all correct, multiple messages all incorrect, mixed bag +func TestWarpMultiplePredicates(t *testing.T) { + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: 100, + weight: 20, + publicKey: true, + }, + }) + + tests := make(map[string]testutils.PredicateTest) + for _, validMessageIndices := range [][]bool{ + {}, + {true, false}, + {false, true}, + {false, false}, + {true, true}, + } { + var ( + numSigners = int(WarpQuorumDenominator) + invalidPredicateBytes = createPredicate(1) + validPredicateBytes = createPredicate(numSigners) + ) + + for _, valid := range validMessageIndices { + var ( + predicate []byte + expectedGas uint64 + expectedErr error + ) + if valid { + predicate = validPredicateBytes + expectedGas = GasCostPerSignatureVerification + uint64(len(validPredicateBytes))*GasCostPerWarpMessageBytes + uint64(numSigners)*GasCostPerWarpSigner + expectedErr = nil + } else { + expectedGas = GasCostPerSignatureVerification + uint64(len(invalidPredicateBytes))*GasCostPerWarpMessageBytes + uint64(1)*GasCostPerWarpSigner + predicate = invalidPredicateBytes + expectedErr = errFailedVerification + } + + tests[fmt.Sprintf("multiple predicates %v", validMessageIndices)] = testutils.PredicateTest{ + Config: NewDefaultConfig(utils.NewUint64(0)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicate, + Gas: expectedGas, + GasErr: nil, + ExpectedErr: expectedErr, + } + } + } + testutils.RunPredicateTests(t, tests) +} + +func TestWarpSignatureWeightsNonDefaultQuorumNumerator(t *testing.T) { + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: 100, + weight: 20, + publicKey: true, + }, + }) + + tests := make(map[string]testutils.PredicateTest) + nonDefaultQuorumNumerator := 50 + // Ensure this test fails if the DefaultQuroumNumerator is changed to an unexpected value during development + require.NotEqual(t, nonDefaultQuorumNumerator, int(WarpDefaultQuorumNumerator)) + // Add cases with default quorum + for _, numSigners := range []int{nonDefaultQuorumNumerator, nonDefaultQuorumNumerator + 1, 99, 100, 101} { + predicateBytes := createPredicate(numSigners) + // The predicate is valid iff the number of signers is >= the required numerator and does not exceed the denominator. + var expectedErr error + if numSigners >= nonDefaultQuorumNumerator && numSigners <= int(WarpQuorumDenominator) { + expectedErr = nil + } else { + expectedErr = errFailedVerification + } + + name := fmt.Sprintf("non-default quorum %d signature(s)", numSigners) + tests[name] = testutils.PredicateTest{ + Config: NewConfig(utils.NewUint64(0), uint64(nonDefaultQuorumNumerator)), + PredicateContext: &precompileconfig.PredicateContext{ + SnowCtx: snowCtx, + ProposerVMBlockCtx: &block.Context{ + PChainHeight: 1, + }, + }, + PredicateBytes: predicateBytes, + Gas: GasCostPerSignatureVerification + uint64(len(predicateBytes))*GasCostPerWarpMessageBytes + uint64(numSigners)*GasCostPerWarpSigner, + GasErr: nil, + ExpectedErr: expectedErr, + } + } + + testutils.RunPredicateTests(t, tests) +} + +func initWarpPredicateTests() { + for _, totalNodes := range []int{10, 100, 1_000, 10_000} { + testName := fmt.Sprintf("%d signers/%d validators", totalNodes, totalNodes) + + predicateBytes := createPredicate(totalNodes) + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: totalNodes, + weight: 20, + publicKey: true, + }, + }) + predicateTests[testName] = createValidPredicateTest(snowCtx, uint64(totalNodes), predicateBytes) + } + + numSigners := 10 + for _, totalNodes := range []int{100, 1_000, 10_000} { + testName := fmt.Sprintf("%d signers (heavily weighted)/%d validators", numSigners, totalNodes) + + predicateBytes := createPredicate(numSigners) + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numSigners, + weight: 10_000_000, + publicKey: true, + }, + { + start: numSigners, + end: totalNodes, + weight: 20, + publicKey: true, + }, + }) + predicateTests[testName] = createValidPredicateTest(snowCtx, uint64(numSigners), predicateBytes) + } + + for _, totalNodes := range []int{100, 1_000, 10_000} { + testName := fmt.Sprintf("%d signers (heavily weighted)/%d validators (non-signers without registered PublicKey)", numSigners, totalNodes) + + predicateBytes := createPredicate(numSigners) + snowCtx := createSnowCtx([]validatorRange{ + { + start: 0, + end: numSigners, + weight: 10_000_000, + publicKey: true, + }, + { + start: numSigners, + end: totalNodes, + weight: 20, + publicKey: false, + }, + }) + predicateTests[testName] = createValidPredicateTest(snowCtx, uint64(numSigners), predicateBytes) + } + + for _, totalNodes := range []int{100, 1_000, 10_000} { + testName := fmt.Sprintf("%d validators w/ %d signers/repeated PublicKeys", totalNodes, numSigners) + + predicateBytes := createPredicate(numSigners) + getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput, totalNodes) + for i := 0; i < totalNodes; i++ { + getValidatorsOutput[testVdrs[i].nodeID] = &validators.GetValidatorOutput{ + NodeID: testVdrs[i].nodeID, + Weight: 20, + PublicKey: testVdrs[i%numSigners].vdr.PublicKey, + } + } + + snowCtx := utils.TestSnowContext() + snowCtx.NetworkID = networkID + state := &validators.TestState{ + GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { + return sourceSubnetID, nil + }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return getValidatorsOutput, nil + }, + } + snowCtx.ValidatorState = state + + predicateTests[testName] = createValidPredicateTest(snowCtx, uint64(numSigners), predicateBytes) + } +} + +func TestWarpPredicate(t *testing.T) { + testutils.RunPredicateTests(t, predicateTests) +} + +func BenchmarkWarpPredicate(b *testing.B) { + testutils.RunPredicateBenchmarks(b, predicateTests) +} diff --git a/coreth/precompile/contracts/warp/signature_verification_test.go b/coreth/precompile/contracts/warp/signature_verification_test.go new file mode 100644 index 00000000..dadefeb4 --- /dev/null +++ b/coreth/precompile/contracts/warp/signature_verification_test.go @@ -0,0 +1,653 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "math" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +// This test copies the test coverage from https://github.com/ava-labs/avalanchego/blob/v1.10.0/vms/platformvm/warp/signature_test.go#L137. +// These tests are only expected to fail if there is a breaking change in AvalancheGo that unexpectedly changes behavior. +func TestSignatureVerification(t *testing.T) { + tests = []signatureTest{ + { + name: "can't get subnetID", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, errTest) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{}, + ) + require.NoError(err) + return msg + }, + err: errTest, + }, + { + name: "can't get validator set", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(nil, errTest) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{}, + ) + require.NoError(err) + return msg + }, + err: errTest, + }, + { + name: "weight overflow", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: math.MaxUint64, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: math.MaxUint64, + }, + }, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: make([]byte, 8), + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrWeightOverflow, + }, + { + name: "invalid bit set index", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: make([]byte, 1), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrInvalidBitSet, + }, + { + name: "unknown index", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(3) // vdr oob + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrUnknownValidator, + }, + { + name: "insufficient weight", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 1, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + // [signers] has weight from [vdr[0], vdr[1]], + // which is 6, which is less than 9 + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "can't parse sig", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrParseSignature, + }, + { + name: "no validators", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(nil, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: nil, + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: bls.ErrNoPublicKeys, + }, + { + name: "invalid signature (substitute)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + // Give sig from vdr[2] even though the bit vector says it + // should be from vdr[1] + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrInvalidSignature, + }, + { + name: "invalid signature (missing one)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + // Don't give the sig from vdr[1] + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrInvalidSignature, + }, + { + name: "invalid signature (extra one)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + // Give sig from vdr[2] even though the bit vector doesn't have + // it + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: avalancheWarp.ErrInvalidSignature, + }, + { + name: "valid signature", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + // [signers] has weight from [vdr[1], vdr[2]], + // which is 6, which is greater than 4.5 + signers := set.NewBits() + signers.Add(1) + signers.Add(2) + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (boundary)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(vdrs, nil) + return state + }, + quorumNum: 2, + quorumDen: 3, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + // [signers] has weight from [vdr[1], vdr[2]], + // which is 6, which meets the minimum 6 + signers := set.NewBits() + signers.Add(1) + signers.Add(2) + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (missing key)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: nil, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + }, nil) + return state + }, + quorumNum: 1, + quorumDen: 3, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + // [signers] has weight from [vdr2, vdr3], + // which is 6, which is greater than 3 + signers := set.NewBits() + // Note: the bits are shifted because vdr[0]'s key was zeroed + signers.Add(0) // vdr[1] + signers.Add(1) // vdr[2] + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (duplicate key)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(sourceSubnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, sourceSubnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: nil, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + }, nil) + return state + }, + quorumNum: 2, + quorumDen: 3, + msgF: func(require *require.Assertions) *avalancheWarp.Message { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedPayloadBytes, + ) + require.NoError(err) + + // [signers] has weight from [vdr2, vdr3], + // which is 6, which meets the minimum 6 + signers := set.NewBits() + // Note: the bits are shifted because vdr[0]'s key was zeroed + // Note: vdr[1] and vdr[2] were combined because of a shared pk + signers.Add(0) // vdr[1] + vdr[2] + + unsignedBytes := unsignedMsg.Bytes() + // Because vdr[1] and vdr[2] share a key, only one of them sign. + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr2Sig)) + + msg, err := avalancheWarp.NewMessage( + unsignedMsg, + &avalancheWarp.BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + msg := tt.msgF(require) + pChainState := tt.stateF(ctrl) + + err := msg.Signature.Verify( + context.Background(), + &msg.UnsignedMessage, + networkID, + pChainState, + pChainHeight, + tt.quorumNum, + tt.quorumDen, + ) + require.ErrorIs(err, tt.err) + }) + } +} diff --git a/coreth/precompile/modules/module.go b/coreth/precompile/modules/module.go new file mode 100644 index 00000000..fefa9fd2 --- /dev/null +++ b/coreth/precompile/modules/module.go @@ -0,0 +1,37 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package modules + +import ( + "bytes" + + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ethereum/go-ethereum/common" +) + +type Module struct { + // ConfigKey is the key used in json config files to specify this precompile config. + ConfigKey string + // Address returns the address where the stateful precompile is accessible. + Address common.Address + // Contract returns a thread-safe singleton that can be used as the StatefulPrecompiledContract when + // this config is enabled. + Contract contract.StatefulPrecompiledContract + // Configurator is used to configure the stateful precompile when the config is enabled. + contract.Configurator +} + +type moduleArray []Module + +func (u moduleArray) Len() int { + return len(u) +} + +func (u moduleArray) Swap(i, j int) { + u[i], u[j] = u[j], u[i] +} + +func (m moduleArray) Less(i, j int) bool { + return bytes.Compare(m[i].Address.Bytes(), m[j].Address.Bytes()) < 0 +} diff --git a/coreth/precompile/modules/registerer.go b/coreth/precompile/modules/registerer.go new file mode 100644 index 00000000..a7f3b92c --- /dev/null +++ b/coreth/precompile/modules/registerer.go @@ -0,0 +1,98 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package modules + +import ( + "fmt" + "sort" + + "github.com/ava-labs/coreth/constants" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" +) + +var ( + // registeredModules is a list of Module to preserve order + // for deterministic iteration + registeredModules = make([]Module, 0) + + reservedRanges = []utils.AddressRange{ + { + Start: common.HexToAddress("0x0100000000000000000000000000000000000000"), + End: common.HexToAddress("0x01000000000000000000000000000000000000ff"), + }, + { + Start: common.HexToAddress("0x0200000000000000000000000000000000000000"), + End: common.HexToAddress("0x02000000000000000000000000000000000000ff"), + }, + { + Start: common.HexToAddress("0x0300000000000000000000000000000000000000"), + End: common.HexToAddress("0x03000000000000000000000000000000000000ff"), + }, + } +) + +// ReservedAddress returns true if [addr] is in a reserved range for custom precompiles +func ReservedAddress(addr common.Address) bool { + for _, reservedRange := range reservedRanges { + if reservedRange.Contains(addr) { + return true + } + } + + return false +} + +// RegisterModule registers a stateful precompile module +func RegisterModule(stm Module) error { + address := stm.Address + key := stm.ConfigKey + + if address == constants.BlackholeAddr { + return fmt.Errorf("address %s overlaps with blackhole address", address) + } + if !ReservedAddress(address) { + return fmt.Errorf("address %s not in a reserved range", address) + } + + for _, registeredModule := range registeredModules { + if registeredModule.ConfigKey == key { + return fmt.Errorf("name %s already used by a stateful precompile", key) + } + if registeredModule.Address == address { + return fmt.Errorf("address %s already used by a stateful precompile", address) + } + } + // sort by address to ensure deterministic iteration + registeredModules = insertSortedByAddress(registeredModules, stm) + return nil +} + +func GetPrecompileModuleByAddress(address common.Address) (Module, bool) { + for _, stm := range registeredModules { + if stm.Address == address { + return stm, true + } + } + return Module{}, false +} + +func GetPrecompileModule(key string) (Module, bool) { + for _, stm := range registeredModules { + if stm.ConfigKey == key { + return stm, true + } + } + return Module{}, false +} + +func RegisteredModules() []Module { + return registeredModules +} + +func insertSortedByAddress(data []Module, stm Module) []Module { + data = append(data, stm) + sort.Sort(moduleArray(data)) + return data +} diff --git a/coreth/precompile/modules/registerer_test.go b/coreth/precompile/modules/registerer_test.go new file mode 100644 index 00000000..f2519046 --- /dev/null +++ b/coreth/precompile/modules/registerer_test.go @@ -0,0 +1,59 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package modules + +import ( + "math/big" + "testing" + + "github.com/ava-labs/coreth/constants" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestInsertSortedByAddress(t *testing.T) { + data := make([]Module, 0) + // test that the module is registered in sorted order + module1 := Module{ + Address: common.BigToAddress(big.NewInt(1)), + } + data = insertSortedByAddress(data, module1) + + require.Equal(t, []Module{module1}, data) + + module0 := Module{ + Address: common.BigToAddress(big.NewInt(0)), + } + + data = insertSortedByAddress(data, module0) + require.Equal(t, []Module{module0, module1}, data) + + module3 := Module{ + Address: common.BigToAddress(big.NewInt(3)), + } + + data = insertSortedByAddress(data, module3) + require.Equal(t, []Module{module0, module1, module3}, data) + + module2 := Module{ + Address: common.BigToAddress(big.NewInt(2)), + } + + data = insertSortedByAddress(data, module2) + require.Equal(t, []Module{module0, module1, module2, module3}, data) +} + +func TestRegisterModuleInvalidAddresses(t *testing.T) { + // Test the blockhole address cannot be registered + m := Module{ + Address: constants.BlackholeAddr, + } + err := RegisterModule(m) + require.ErrorContains(t, err, "overlaps with blackhole address") + + // Test an address outside of the reserved ranges cannot be registered + m.Address = common.BigToAddress(big.NewInt(1)) + err = RegisterModule(m) + require.ErrorContains(t, err, "not in a reserved range") +} diff --git a/coreth/precompile/params.go b/coreth/precompile/params.go deleted file mode 100644 index 97c62fc5..00000000 --- a/coreth/precompile/params.go +++ /dev/null @@ -1,47 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package precompile - -import ( - "bytes" - - "github.com/ethereum/go-ethereum/common" -) - -// Gas costs for stateful precompiles -// can be added here eg. -// const MintGasCost = 30_000 - -// AddressRange represents a continuous range of addresses -type AddressRange struct { - Start common.Address - End common.Address -} - -// Contains returns true iff [addr] is contained within the (inclusive) -func (a *AddressRange) Contains(addr common.Address) bool { - addrBytes := addr.Bytes() - return bytes.Compare(addrBytes, a.Start[:]) >= 0 && bytes.Compare(addrBytes, a.End[:]) <= 0 -} - -// Designated addresses of stateful precompiles -// Note: it is important that none of these addresses conflict with each other or any other precompiles -// in core/vm/contracts.go. -// We start at 0x0100000000000000000000000000000000000000 and will increment by 1 from here to reduce -// the risk of conflicts. -var ( - UsedAddresses = []common.Address{ - // precompile contract addresses can be added here - } - - // ReservedRanges contains addresses ranges that are reserved - // for precompiles and cannot be used as EOA or deployed contracts. - ReservedRanges = []AddressRange{ - { - // reserved for coreth precompiles - common.HexToAddress("0x0100000000000000000000000000000000000000"), - common.HexToAddress("0x01000000000000000000000000000000000000ff"), - }, - } -) diff --git a/coreth/precompile/precompileconfig/config.go b/coreth/precompile/precompileconfig/config.go new file mode 100644 index 00000000..43365e3b --- /dev/null +++ b/coreth/precompile/precompileconfig/config.go @@ -0,0 +1,88 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Defines the stateless interface for unmarshalling an arbitrary config of a precompile +package precompileconfig + +import ( + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ethereum/go-ethereum/common" +) + +// StatefulPrecompileConfig defines the interface for a stateful precompile to +// be enabled via a network upgrade. +type Config interface { + // Key returns the unique key for the stateful precompile. + Key() string + // Timestamp returns the timestamp at which this stateful precompile should be enabled. + // 1) 0 indicates that the precompile should be enabled from genesis. + // 2) n indicates that the precompile should be enabled in the first block with timestamp >= [n]. + // 3) nil indicates that the precompile is never enabled. + Timestamp() *uint64 + // IsDisabled returns true if this network upgrade should disable the precompile. + IsDisabled() bool + // Equal returns true if the provided argument configures the same precompile with the same parameters. + Equal(Config) bool + // Verify is called on startup and an error is treated as fatal. Configure can assume the Config has passed verification. + Verify(ChainConfig) error +} + +// PredicateContext is the context passed in to the Predicater interface to verify +// a precompile predicate within a specific ProposerVM wrapper. +type PredicateContext struct { + SnowCtx *snow.Context + // ProposerVMBlockCtx defines the ProposerVM context the predicate is verified within + ProposerVMBlockCtx *block.Context +} + +// Predicater is an optional interface for StatefulPrecompileContracts to implement. +// If implemented, the predicate will be called for each predicate included in the +// access list of a transaction. +// PredicateGas will be called while calculating the IntrinsicGas of a transaction +// causing it to be dropped if the total gas goes above the tx gas limit. +// VerifyPredicate is used to populate a bit set of predicates verified prior to +// block execution, which can be accessed via the StateDB during execution. +// The bitset is stored in the block, so that historical blocks can be re-verified +// without calling VerifyPredicate. +type Predicater interface { + PredicateGas(predicateBytes []byte) (uint64, error) + VerifyPredicate(predicateContext *PredicateContext, predicateBytes []byte) error +} + +// SharedMemoryWriter defines an interface to allow a precompile's Accepter to write operations +// into shared memory to be committed atomically on block accept. +type SharedMemoryWriter interface { + AddSharedMemoryRequests(chainID ids.ID, requests *atomic.Requests) +} + +type WarpMessageWriter interface { + AddMessage(unsignedMessage *warp.UnsignedMessage) error +} + +// AcceptContext defines the context passed in to a precompileconfig's Accepter +type AcceptContext struct { + SnowCtx *snow.Context + SharedMemory SharedMemoryWriter + Warp WarpMessageWriter +} + +// Accepter is an optional interface for StatefulPrecompiledContracts to implement. +// If implemented, Accept will be called for every log with the address of the precompile when the block is accepted. +// WARNING: If you are implementing a custom precompile, beware that coreth +// will not maintain backwards compatibility of this interface and your code should not +// rely on this. Designed for use only by precompiles that ship with coreth. +type Accepter interface { + Accept(acceptCtx *AcceptContext, blockHash common.Hash, blockNumber uint64, txHash common.Hash, logIndex int, topics []common.Hash, logData []byte) error +} + +// ChainContext defines an interface that provides information to a stateful precompile +// about the chain configuration. The precompile can access this information to initialize +// its state. +type ChainConfig interface { + // IsDurango returns true if the time is after Durango. + IsDurango(time uint64) bool +} diff --git a/coreth/precompile/precompileconfig/mocks.go b/coreth/precompile/precompileconfig/mocks.go new file mode 100644 index 00000000..4be7f046 --- /dev/null +++ b/coreth/precompile/precompileconfig/mocks.go @@ -0,0 +1,236 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/coreth/precompile/precompileconfig (interfaces: Predicater,Config,ChainConfig,Accepter) +// +// Generated by this command: +// +// mockgen -package=precompileconfig -destination=precompile/precompileconfig/mocks.go github.com/ava-labs/coreth/precompile/precompileconfig Predicater,Config,ChainConfig,Accepter +// + +// Package precompileconfig is a generated GoMock package. +package precompileconfig + +import ( + reflect "reflect" + + common "github.com/ethereum/go-ethereum/common" + gomock "go.uber.org/mock/gomock" +) + +// MockPredicater is a mock of Predicater interface. +type MockPredicater struct { + ctrl *gomock.Controller + recorder *MockPredicaterMockRecorder +} + +// MockPredicaterMockRecorder is the mock recorder for MockPredicater. +type MockPredicaterMockRecorder struct { + mock *MockPredicater +} + +// NewMockPredicater creates a new mock instance. +func NewMockPredicater(ctrl *gomock.Controller) *MockPredicater { + mock := &MockPredicater{ctrl: ctrl} + mock.recorder = &MockPredicaterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPredicater) EXPECT() *MockPredicaterMockRecorder { + return m.recorder +} + +// PredicateGas mocks base method. +func (m *MockPredicater) PredicateGas(arg0 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PredicateGas", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PredicateGas indicates an expected call of PredicateGas. +func (mr *MockPredicaterMockRecorder) PredicateGas(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PredicateGas", reflect.TypeOf((*MockPredicater)(nil).PredicateGas), arg0) +} + +// VerifyPredicate mocks base method. +func (m *MockPredicater) VerifyPredicate(arg0 *PredicateContext, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyPredicate", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyPredicate indicates an expected call of VerifyPredicate. +func (mr *MockPredicaterMockRecorder) VerifyPredicate(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPredicate", reflect.TypeOf((*MockPredicater)(nil).VerifyPredicate), arg0, arg1) +} + +// MockConfig is a mock of Config interface. +type MockConfig struct { + ctrl *gomock.Controller + recorder *MockConfigMockRecorder +} + +// MockConfigMockRecorder is the mock recorder for MockConfig. +type MockConfigMockRecorder struct { + mock *MockConfig +} + +// NewMockConfig creates a new mock instance. +func NewMockConfig(ctrl *gomock.Controller) *MockConfig { + mock := &MockConfig{ctrl: ctrl} + mock.recorder = &MockConfigMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfig) EXPECT() *MockConfigMockRecorder { + return m.recorder +} + +// Equal mocks base method. +func (m *MockConfig) Equal(arg0 Config) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Equal", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Equal indicates an expected call of Equal. +func (mr *MockConfigMockRecorder) Equal(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockConfig)(nil).Equal), arg0) +} + +// IsDisabled mocks base method. +func (m *MockConfig) IsDisabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDisabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsDisabled indicates an expected call of IsDisabled. +func (mr *MockConfigMockRecorder) IsDisabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDisabled", reflect.TypeOf((*MockConfig)(nil).IsDisabled)) +} + +// Key mocks base method. +func (m *MockConfig) Key() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Key") + ret0, _ := ret[0].(string) + return ret0 +} + +// Key indicates an expected call of Key. +func (mr *MockConfigMockRecorder) Key() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockConfig)(nil).Key)) +} + +// Timestamp mocks base method. +func (m *MockConfig) Timestamp() *uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(*uint64) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp. +func (mr *MockConfigMockRecorder) Timestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockConfig)(nil).Timestamp)) +} + +// Verify mocks base method. +func (m *MockConfig) Verify(arg0 ChainConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockConfigMockRecorder) Verify(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockConfig)(nil).Verify), arg0) +} + +// MockChainConfig is a mock of ChainConfig interface. +type MockChainConfig struct { + ctrl *gomock.Controller + recorder *MockChainConfigMockRecorder +} + +// MockChainConfigMockRecorder is the mock recorder for MockChainConfig. +type MockChainConfigMockRecorder struct { + mock *MockChainConfig +} + +// NewMockChainConfig creates a new mock instance. +func NewMockChainConfig(ctrl *gomock.Controller) *MockChainConfig { + mock := &MockChainConfig{ctrl: ctrl} + mock.recorder = &MockChainConfigMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChainConfig) EXPECT() *MockChainConfigMockRecorder { + return m.recorder +} + +// IsDurango mocks base method. +func (m *MockChainConfig) IsDurango(arg0 uint64) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsDurango", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsDurango indicates an expected call of IsDurango. +func (mr *MockChainConfigMockRecorder) IsDurango(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDurango", reflect.TypeOf((*MockChainConfig)(nil).IsDurango), arg0) +} + +// MockAccepter is a mock of Accepter interface. +type MockAccepter struct { + ctrl *gomock.Controller + recorder *MockAccepterMockRecorder +} + +// MockAccepterMockRecorder is the mock recorder for MockAccepter. +type MockAccepterMockRecorder struct { + mock *MockAccepter +} + +// NewMockAccepter creates a new mock instance. +func NewMockAccepter(ctrl *gomock.Controller) *MockAccepter { + mock := &MockAccepter{ctrl: ctrl} + mock.recorder = &MockAccepterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccepter) EXPECT() *MockAccepterMockRecorder { + return m.recorder +} + +// Accept mocks base method. +func (m *MockAccepter) Accept(arg0 *AcceptContext, arg1 common.Hash, arg2 uint64, arg3 common.Hash, arg4 int, arg5 []common.Hash, arg6 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(error) + return ret0 +} + +// Accept indicates an expected call of Accept. +func (mr *MockAccepterMockRecorder) Accept(arg0, arg1, arg2, arg3, arg4, arg5, arg6 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockAccepter)(nil).Accept), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} diff --git a/coreth/precompile/precompileconfig/upgradeable.go b/coreth/precompile/precompileconfig/upgradeable.go new file mode 100644 index 00000000..d63f3105 --- /dev/null +++ b/coreth/precompile/precompileconfig/upgradeable.go @@ -0,0 +1,33 @@ +// (c) 2022 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompileconfig + +import "github.com/ava-labs/coreth/utils" + +// Upgrade contains the timestamp for the upgrade along with +// a boolean [Disable]. If [Disable] is set, the upgrade deactivates +// the precompile and clears its storage. +type Upgrade struct { + BlockTimestamp *uint64 `json:"blockTimestamp"` + Disable bool `json:"disable,omitempty"` +} + +// Timestamp returns the timestamp this network upgrade goes into effect. +func (u *Upgrade) Timestamp() *uint64 { + return u.BlockTimestamp +} + +// IsDisabled returns true if the network upgrade deactivates the precompile. +func (u *Upgrade) IsDisabled() bool { + return u.Disable +} + +// Equal returns true iff [other] has the same blockTimestamp and has the +// same on value for the Disable flag. +func (u *Upgrade) Equal(other *Upgrade) bool { + if other == nil { + return false + } + return u.Disable == other.Disable && utils.Uint64PtrEqual(u.BlockTimestamp, other.BlockTimestamp) +} diff --git a/coreth/precompile/registry/registry.go b/coreth/precompile/registry/registry.go new file mode 100644 index 00000000..a0798ebd --- /dev/null +++ b/coreth/precompile/registry/registry.go @@ -0,0 +1,11 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Module to facilitate the registration of precompiles and their configuration. +package registry + +// Force imports of each precompile to ensure each precompile's init function runs and registers itself +// with the registry. +import ( + _ "github.com/ava-labs/coreth/precompile/contracts/warp" +) diff --git a/coreth/precompile/stateful_precompile_config.go b/coreth/precompile/stateful_precompile_config.go deleted file mode 100644 index 213d3bef..00000000 --- a/coreth/precompile/stateful_precompile_config.go +++ /dev/null @@ -1,59 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package precompile - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ava-labs/coreth/utils" -) - -// StatefulPrecompileConfig defines the interface for a stateful precompile to -type StatefulPrecompileConfig interface { - // Address returns the address where the stateful precompile is accessible. - Address() common.Address - // Timestamp returns the timestamp at which this stateful precompile should be enabled. - // 1) 0 indicates that the precompile should be enabled from genesis. - // 2) n indicates that the precompile should be enabled in the first block with timestamp >= [n]. - // 3) nil indicates that the precompile is never enabled. - Timestamp() *big.Int - // Configure is called on the first block where the stateful precompile should be enabled. - // This allows the stateful precompile to configure its own state via [StateDB] as necessary. - // This function must be deterministic since it will impact the EVM state. If a change to the - // config causes a change to the state modifications made in Configure, then it cannot be safely - // made to the config after the network upgrade has gone into effect. - // - // Configure is called on the first block where the stateful precompile should be enabled. This - // provides the config the ability to set its initial state and should only modify the state within - // its own address space. - Configure(ChainConfig, StateDB, BlockContext) - // Contract returns a thread-safe singleton that can be used as the StatefulPrecompiledContract when - // this config is enabled. - Contract() StatefulPrecompiledContract -} - -// CheckConfigure checks if [config] is activated by the transition from block at [parentTimestamp] to the timestamp -// set in [blockContext]. -// If it does, then it calls Configure on [precompileConfig] to make the necessary state update to enable the StatefulPrecompile. -// Note: this function is called within genesis to configure the starting state if [precompileConfig] specifies that it should be -// configured at genesis, or happens during block processing to update the state before processing the given block. -// TODO: add ability to call Configure at different timestamps, so that developers can easily re-configure by updating the -// stateful precompile config. -// Assumes that [config] is non-nil. -func CheckConfigure(chainConfig ChainConfig, parentTimestamp *big.Int, blockContext BlockContext, precompileConfig StatefulPrecompileConfig, state StateDB) { - forkTimestamp := precompileConfig.Timestamp() - // If the network upgrade goes into effect within this transition, configure the stateful precompile - if utils.IsForkTransition(forkTimestamp, parentTimestamp, blockContext.Timestamp()) { - // Set the nonce of the precompile's address (as is done when a contract is created) to ensure - // that it is marked as non-empty and will not be cleaned up when the statedb is finalized. - state.SetNonce(precompileConfig.Address(), 1) - // Set the code of the precompile's address to a non-zero length byte slice to ensure that the precompile - // can be called from within Solidity contracts. Solidity adds a check before invoking a contract to ensure - // that it does not attempt to invoke a non-existent contract. - state.SetCode(precompileConfig.Address(), []byte{0x1}) - precompileConfig.Configure(chainConfig, state, blockContext) - } -} diff --git a/coreth/precompile/testutils/test_config.go b/coreth/precompile/testutils/test_config.go new file mode 100644 index 00000000..aee07c4a --- /dev/null +++ b/coreth/precompile/testutils/test_config.go @@ -0,0 +1,60 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package testutils + +import ( + "testing" + + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +// ConfigVerifyTest is a test case for verifying a config +type ConfigVerifyTest struct { + Config precompileconfig.Config + ChainConfig precompileconfig.ChainConfig + ExpectedError string +} + +// ConfigEqualTest is a test case for comparing two configs +type ConfigEqualTest struct { + Config precompileconfig.Config + Other precompileconfig.Config + Expected bool +} + +func RunVerifyTests(t *testing.T, tests map[string]ConfigVerifyTest) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Helper() + require := require.New(t) + + chainConfig := test.ChainConfig + if chainConfig == nil { + ctrl := gomock.NewController(t) + mockChainConfig := precompileconfig.NewMockChainConfig(ctrl) + mockChainConfig.EXPECT().IsDurango(gomock.Any()).AnyTimes().Return(true) + chainConfig = mockChainConfig + } + err := test.Config.Verify(chainConfig) + if test.ExpectedError == "" { + require.NoError(err) + } else { + require.ErrorContains(err, test.ExpectedError) + } + }) + } +} + +func RunEqualTests(t *testing.T, tests map[string]ConfigEqualTest) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Helper() + require := require.New(t) + + require.Equal(test.Expected, test.Config.Equal(test.Other)) + }) + } +} diff --git a/coreth/precompile/testutils/test_precompile.go b/coreth/precompile/testutils/test_precompile.go new file mode 100644 index 00000000..a0bc279f --- /dev/null +++ b/coreth/precompile/testutils/test_precompile.go @@ -0,0 +1,206 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package testutils + +import ( + "math/big" + "testing" + "time" + + "github.com/ava-labs/coreth/precompile/contract" + "github.com/ava-labs/coreth/precompile/modules" + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +// PrecompileTest is a test case for a precompile +type PrecompileTest struct { + // Caller is the address of the precompile caller + Caller common.Address + // Input the raw input bytes to the precompile + Input []byte + // InputFn is a function that returns the raw input bytes to the precompile + // If specified, Input will be ignored. + InputFn func(t testing.TB) []byte + // SuppliedGas is the amount of gas supplied to the precompile + SuppliedGas uint64 + // ReadOnly is whether the precompile should be called in read only + // mode. If true, the precompile should not modify the state. + ReadOnly bool + // Config is the config to use for the precompile + // It should be the same precompile config that is used in the + // precompile's configurator. + // If nil, Configure will not be called. + Config precompileconfig.Config + // BeforeHook is called before the precompile is called. + BeforeHook func(t testing.TB, state contract.StateDB) + // SetupBlockContext sets the expected calls on MockBlockContext for the test execution. + SetupBlockContext func(*contract.MockBlockContext) + // AfterHook is called after the precompile is called. + AfterHook func(t testing.TB, state contract.StateDB) + // ExpectedRes is the expected raw byte result returned by the precompile + ExpectedRes []byte + // ExpectedErr is the expected error returned by the precompile + ExpectedErr string + // ChainConfig is the chain config to use for the precompile's block context + // If nil, the default chain config will be used. + ChainConfig precompileconfig.ChainConfig +} + +type PrecompileRunparams struct { + AccessibleState contract.AccessibleState + Caller common.Address + ContractAddress common.Address + Input []byte + SuppliedGas uint64 + ReadOnly bool +} + +func (test PrecompileTest) Run(t *testing.T, module modules.Module, state contract.StateDB) { + runParams := test.setup(t, module, state) + + if runParams.Input != nil { + ret, remainingGas, err := module.Contract.Run(runParams.AccessibleState, runParams.Caller, runParams.ContractAddress, runParams.Input, runParams.SuppliedGas, runParams.ReadOnly) + if len(test.ExpectedErr) != 0 { + require.ErrorContains(t, err, test.ExpectedErr) + } else { + require.NoError(t, err) + } + require.Equal(t, uint64(0), remainingGas) + require.Equal(t, test.ExpectedRes, ret) + } + + if test.AfterHook != nil { + test.AfterHook(t, state) + } +} + +func (test PrecompileTest) setup(t testing.TB, module modules.Module, state contract.StateDB) PrecompileRunparams { + t.Helper() + contractAddress := module.Address + + ctrl := gomock.NewController(t) + + if test.BeforeHook != nil { + test.BeforeHook(t, state) + } + + chainConfig := test.ChainConfig + if chainConfig == nil { + mockChainConfig := precompileconfig.NewMockChainConfig(ctrl) + mockChainConfig.EXPECT().IsDurango(gomock.Any()).AnyTimes().Return(true) + chainConfig = mockChainConfig + } + + blockContext := contract.NewMockBlockContext(ctrl) + if test.SetupBlockContext != nil { + test.SetupBlockContext(blockContext) + } else { + blockContext.EXPECT().Number().Return(big.NewInt(0)).AnyTimes() + blockContext.EXPECT().Timestamp().Return(uint64(time.Now().Unix())).AnyTimes() + } + snowContext := utils.TestSnowContext() + + accessibleState := contract.NewMockAccessibleState(ctrl) + accessibleState.EXPECT().GetStateDB().Return(state).AnyTimes() + accessibleState.EXPECT().GetBlockContext().Return(blockContext).AnyTimes() + accessibleState.EXPECT().GetSnowContext().Return(snowContext).AnyTimes() + accessibleState.EXPECT().GetChainConfig().Return(chainConfig).AnyTimes() + + if test.Config != nil { + err := module.Configure(chainConfig, test.Config, state, blockContext) + require.NoError(t, err) + } + + input := test.Input + if test.InputFn != nil { + input = test.InputFn(t) + } + + return PrecompileRunparams{ + AccessibleState: accessibleState, + Caller: test.Caller, + ContractAddress: contractAddress, + Input: input, + SuppliedGas: test.SuppliedGas, + ReadOnly: test.ReadOnly, + } +} + +func (test PrecompileTest) Bench(b *testing.B, module modules.Module, state contract.StateDB) { + runParams := test.setup(b, module, state) + + if runParams.Input == nil { + b.Skip("Skipping precompile benchmark due to nil input (used for configuration tests)") + } + + stateDB := runParams.AccessibleState.GetStateDB() + snapshot := stateDB.Snapshot() + + ret, remainingGas, err := module.Contract.Run(runParams.AccessibleState, runParams.Caller, runParams.ContractAddress, runParams.Input, runParams.SuppliedGas, runParams.ReadOnly) + if len(test.ExpectedErr) != 0 { + require.ErrorContains(b, err, test.ExpectedErr) + } else { + require.NoError(b, err) + } + require.Equal(b, uint64(0), remainingGas) + require.Equal(b, test.ExpectedRes, ret) + + if test.AfterHook != nil { + test.AfterHook(b, state) + } + + b.ReportAllocs() + start := time.Now() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Revert to the previous snapshot and take a new snapshot, so we can reset the state after execution + stateDB.RevertToSnapshot(snapshot) + snapshot = stateDB.Snapshot() + + // Ignore return values for benchmark + _, _, _ = module.Contract.Run(runParams.AccessibleState, runParams.Caller, runParams.ContractAddress, runParams.Input, runParams.SuppliedGas, runParams.ReadOnly) + } + b.StopTimer() + + elapsed := uint64(time.Since(start)) + if elapsed < 1 { + elapsed = 1 + } + gasUsed := runParams.SuppliedGas * uint64(b.N) + b.ReportMetric(float64(runParams.SuppliedGas), "gas/op") + // Keep it as uint64, multiply 100 to get two digit float later + mgasps := (100 * 1000 * gasUsed) / elapsed + b.ReportMetric(float64(mgasps)/100, "mgas/s") + + // Execute the test one final time to ensure that if our RevertToSnapshot logic breaks such that each run is actually failing or resulting in unexpected behavior + // the benchmark should catch the error here. + stateDB.RevertToSnapshot(snapshot) + ret, remainingGas, err = module.Contract.Run(runParams.AccessibleState, runParams.Caller, runParams.ContractAddress, runParams.Input, runParams.SuppliedGas, runParams.ReadOnly) + if len(test.ExpectedErr) != 0 { + require.ErrorContains(b, err, test.ExpectedErr) + } else { + require.NoError(b, err) + } + require.Equal(b, uint64(0), remainingGas) + require.Equal(b, test.ExpectedRes, ret) + + if test.AfterHook != nil { + test.AfterHook(b, state) + } +} + +func RunPrecompileTests(t *testing.T, module modules.Module, newStateDB func(t testing.TB) contract.StateDB, contractTests map[string]PrecompileTest) { + t.Helper() + + for name, test := range contractTests { + t.Run(name, func(t *testing.T) { + test.Run(t, module, newStateDB(t)) + }) + } +} diff --git a/coreth/precompile/testutils/test_predicate.go b/coreth/precompile/testutils/test_predicate.go new file mode 100644 index 00000000..0b280759 --- /dev/null +++ b/coreth/precompile/testutils/test_predicate.go @@ -0,0 +1,81 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package testutils + +import ( + "testing" + "time" + + "github.com/ava-labs/coreth/precompile/precompileconfig" + "github.com/stretchr/testify/require" +) + +// PredicateTest defines a unit test/benchmark for verifying a precompile predicate. +type PredicateTest struct { + Config precompileconfig.Config + + PredicateContext *precompileconfig.PredicateContext + + PredicateBytes []byte + Gas uint64 + GasErr error + ExpectedErr error +} + +func (test PredicateTest) Run(t testing.TB) { + t.Helper() + require := require.New(t) + predicate := test.Config.(precompileconfig.Predicater) + + predicateGas, predicateGasErr := predicate.PredicateGas(test.PredicateBytes) + require.ErrorIs(predicateGasErr, test.GasErr) + if test.GasErr != nil { + return + } + + require.Equal(test.Gas, predicateGas) + + predicateRes := predicate.VerifyPredicate(test.PredicateContext, test.PredicateBytes) + require.ErrorIs(predicateRes, test.ExpectedErr) +} + +func RunPredicateTests(t *testing.T, predicateTests map[string]PredicateTest) { + t.Helper() + + for name, test := range predicateTests { + t.Run(name, func(t *testing.T) { + test.Run(t) + }) + } +} + +func (test PredicateTest) RunBenchmark(b *testing.B) { + b.ReportAllocs() + start := time.Now() + b.ResetTimer() + for i := 0; i < b.N; i++ { + test.Run(b) + } + b.StopTimer() + elapsed := uint64(time.Since(start)) + if elapsed < 1 { + elapsed = 1 + } + + gasUsed := test.Gas * uint64(b.N) + b.ReportMetric(float64(test.Gas), "gas/op") + // Keep it as uint64, multiply 100 to get two digit float later + mgasps := (100 * 1000 * gasUsed) / elapsed + b.ReportMetric(float64(mgasps)/100, "mgas/s") +} + +func RunPredicateBenchmarks(b *testing.B, predicateTests map[string]PredicateTest) { + b.Helper() + + for name, test := range predicateTests { + b.Run(name, func(b *testing.B) { + test.RunBenchmark(b) + }) + } +} diff --git a/coreth/precompile/utils.go b/coreth/precompile/utils.go deleted file mode 100644 index f185b2fa..00000000 --- a/coreth/precompile/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package precompile - -import ( - "fmt" - "regexp" - - "github.com/ava-labs/coreth/vmerrs" - "github.com/ethereum/go-ethereum/crypto" -) - -var functionSignatureRegex = regexp.MustCompile(`[\w]+\(((([\w]+)?)|((([\w]+),)+([\w]+)))\)`) - -// CalculateFunctionSelector returns the 4 byte function selector that results from [functionSignature] -// Ex. the function setBalance(addr address, balance uint256) should be passed in as the string: -// "setBalance(address,uint256)" -func CalculateFunctionSelector(functionSignature string) []byte { - if !functionSignatureRegex.MatchString(functionSignature) { - panic(fmt.Errorf("invalid function signature: %q", functionSignature)) - } - hash := crypto.Keccak256([]byte(functionSignature)) - return hash[:4] -} - -// deductGas checks if [suppliedGas] is sufficient against [requiredGas] and deducts [requiredGas] from [suppliedGas]. -//nolint:unused,deadcode -func deductGas(suppliedGas uint64, requiredGas uint64) (uint64, error) { - if suppliedGas < requiredGas { - return 0, vmerrs.ErrOutOfGas - } - return suppliedGas - requiredGas, nil -} diff --git a/coreth/predicate/Predicate.md b/coreth/predicate/Predicate.md new file mode 100644 index 00000000..b35cbfd1 --- /dev/null +++ b/coreth/predicate/Predicate.md @@ -0,0 +1,11 @@ +# Predicate + +This package contains the predicate data structure and its encoding and helper functions to unpack/pack the data structure. + +## Encoding + +A byte slice of size N is encoded as: + +1. Slice of N bytes +2. Delimiter byte `0xff` +3. Appended 0s to the nearest multiple of 32 bytes diff --git a/coreth/predicate/Results.md b/coreth/predicate/Results.md new file mode 100644 index 00000000..67e64650 --- /dev/null +++ b/coreth/predicate/Results.md @@ -0,0 +1,126 @@ +# Results + +The results package defines how to encode `PredicateResults` within the block header's `Extra` data field. + +For more information on the motivation for encoding the results of predicate verification within a block, see [here](../../x/warp/README.md#re-processing-historical-blocks). + +## Serialization + +Note: PredicateResults are encoded using the AvalancheGo codec, which serializes a map by serializing the length of the map as a uint32 and then serializes each key-value pair sequentially. + +PredicateResults: +``` ++---------------------+----------------------------------+-------------------+ +| codecID : uint16 | 2 bytes | ++---------------------+----------------------------------+-------------------+ +| results : map[[32]byte]TxPredicateResults | 4 + size(results) | ++---------------------+----------------------------------+-------------------+ + | 6 + size(results) | + +-------------------+ +``` + +- `codecID` is the codec version used to serialize the payload and is hardcoded to `0x0000` +- `results` is a map of transaction hashes to the corresponding `TxPredicateResults` + +TxPredicateResults +``` ++--------------------+---------------------+------------------------------------+ +| txPredicateResults : map[[20]byte][]byte | 4 + size(txPredicateResults) bytes | ++--------------------+---------------------+------------------------------------+ + | 4 + size(txPredicateResults) bytes | + +------------------------------------+ +``` + +- `txPredicateResults` is a map of precompile addresses to the corresponding byte array returned by the predicate + +### Examples + +#### Empty Predicate Results Map + +``` +// codecID +0x00, 0x00, +// results length +0x00, 0x00, 0x00, 0x00 +``` + +#### Predicate Map with a Single Transaction Result + +``` +// codecID +0x00, 0x00, +// Results length +0x00, 0x00, 0x00, 0x01, +// txHash (key in results map) +0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +// TxPredicateResults (value in results map) +// TxPredicateResults length +0x00, 0x00, 0x00, 0x01, +// precompile address (key in TxPredicateResults map) +0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, +// Byte array results (value in TxPredicateResults map) +// Length of bytes result +0x00, 0x00, 0x00, 0x03, +// bytes +0x01, 0x02, 0x03 +``` + +#### Predicate Map with Two Transaction Results + +``` +// codecID +0x00, 0x00, +// Results length +0x00, 0x00, 0x00, 0x02, +// txHash (key in results map) +0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +// TxPredicateResults (value in results map) +// TxPredicateResults length +0x00, 0x00, 0x00, 0x01, +// precompile address (key in TxPredicateResults map) +0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, +// Byte array results (value in TxPredicateResults map) +// Length of bytes result +0x00, 0x00, 0x00, 0x03, +// bytes +0x01, 0x02, 0x03 +// txHash2 (key in results map) +0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +// TxPredicateResults (value in results map) +// TxPredicateResults length +0x00, 0x00, 0x00, 0x01, +// precompile address (key in TxPredicateResults map) +0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, +// Byte array results (value in TxPredicateResults map) +// Length of bytes result +0x00, 0x00, 0x00, 0x03, +// bytes +0x01, 0x02, 0x03 +``` + +### Maximum Size + +Results has a maximum size of 1MB enforced by the codec. The actual size depends on how much data the Precompile predicates may put into the results, the gas cost they charge, and the block gas limit. + +The Results maximum size should comfortably exceed the maximum value that could happen in practice, so that a correct block builder will not attempt to build a block and fail to marshal the predicate results using the codec. + +We make this easy to reason about by assigning a minimum gas cost to the `PredicateGas` function of precompiles. In the case of Warp, the minimum gas cost is set to 200k gas, which can lead to at most 32 additional bytes being included in Results. + +The additional bytes come from the transaction hash (32 bytes), length of tx predicate results (4 bytes), the precompile address (20 bytes), length of the bytes result (4 bytes), and the additional byte in the results bitset (1 byte). This results in 200k gas contributing a maximum of 61 additional bytes to Result. + +For a block with a maximum gas limit of 100M, the block can include up to 500 validated predicates based contributing to the size of Result. At 61 bytes / validated predicate, this yields ~30KB, which is well short of the 1MB cap. diff --git a/coreth/predicate/predicate_bytes.go b/coreth/predicate/predicate_bytes.go new file mode 100644 index 00000000..71a266e3 --- /dev/null +++ b/coreth/predicate/predicate_bytes.go @@ -0,0 +1,64 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "fmt" + + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" +) + +// EndByte is used as a delimiter for the bytes packed into a precompile predicate. +// Precompile predicates are encoded in the Access List of transactions in the access tuples +// which means that its length must be a multiple of 32 (common.HashLength). +// For messages with a length that does not comply to that, this delimiter is used to +// append/remove padding. +var EndByte = byte(0xff) + +var ( + ErrInvalidAllZeroBytes = fmt.Errorf("predicate specified invalid all zero bytes") + ErrInvalidPadding = fmt.Errorf("predicate specified invalid padding") + ErrInvalidEndDelimiter = fmt.Errorf("invalid end delimiter") + ErrorInvalidExtraData = fmt.Errorf("header extra data too short for predicate verification") +) + +// PackPredicate packs [predicate] by delimiting the actual message with [PredicateEndByte] +// and zero padding to reach a length that is a multiple of 32. +func PackPredicate(predicateBytes []byte) []byte { + predicateBytes = append(predicateBytes, EndByte) + return common.RightPadBytes(predicateBytes, (len(predicateBytes)+31)/32*32) +} + +// UnpackPredicate unpacks a predicate by stripping right padded zeroes, checking for the delimter, +// ensuring there is not excess padding, and returning the original message. +// Returns an error if it finds an incorrect encoding. +func UnpackPredicate(paddedPredicate []byte) ([]byte, error) { + trimmedPredicateBytes := common.TrimRightZeroes(paddedPredicate) + if len(trimmedPredicateBytes) == 0 { + return nil, fmt.Errorf("%w: 0x%x", ErrInvalidAllZeroBytes, paddedPredicate) + } + + if expectedPaddedLength := (len(trimmedPredicateBytes) + 31) / 32 * 32; expectedPaddedLength != len(paddedPredicate) { + return nil, fmt.Errorf("%w: got length (%d), expected length (%d)", ErrInvalidPadding, len(paddedPredicate), expectedPaddedLength) + } + + if trimmedPredicateBytes[len(trimmedPredicateBytes)-1] != EndByte { + return nil, ErrInvalidEndDelimiter + } + + return trimmedPredicateBytes[:len(trimmedPredicateBytes)-1], nil +} + +// GetPredicateResultBytes returns the predicate result bytes from the extra data and +// true iff the predicate results bytes have non-zero length. +func GetPredicateResultBytes(extraData []byte) ([]byte, bool) { + // Prior to Durango, the VM enforces the extra data is smaller than or equal to this size. + // After Durango, the VM pre-verifies the extra data past the dynamic fee rollup window is + // valid. + if len(extraData) <= params.DynamicFeeExtraDataSize { + return nil, false + } + return extraData[params.DynamicFeeExtraDataSize:], true +} diff --git a/coreth/predicate/predicate_bytes_test.go b/coreth/predicate/predicate_bytes_test.go new file mode 100644 index 00000000..5184afb0 --- /dev/null +++ b/coreth/predicate/predicate_bytes_test.go @@ -0,0 +1,66 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "bytes" + "testing" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/params" + "github.com/stretchr/testify/require" +) + +func testPackPredicate(t testing.TB, b []byte) { + packedPredicate := PackPredicate(b) + unpackedPredicated, err := UnpackPredicate(packedPredicate) + require.NoError(t, err) + require.Equal(t, b, unpackedPredicated) +} + +func FuzzPackPredicate(f *testing.F) { + for i := 0; i < 100; i++ { + f.Add(utils.RandomBytes(i)) + } + + f.Fuzz(func(t *testing.T, b []byte) { + testPackPredicate(t, b) + }) +} + +func TestUnpackInvalidPredicate(t *testing.T) { + require := require.New(t) + // Predicate encoding requires a 0xff delimiter byte followed by padding of all zeroes, so any other + // excess padding should invalidate the predicate. + paddingCases := make([][]byte, 0, 200) + for i := 1; i < 100; i++ { + paddingCases = append(paddingCases, bytes.Repeat([]byte{0xee}, i)) + paddingCases = append(paddingCases, make([]byte, i)) + } + + for _, l := range []int{0, 1, 31, 32, 33, 63, 64, 65} { + validPredicate := PackPredicate(utils.RandomBytes(l)) + + for _, padding := range paddingCases { + invalidPredicate := append(validPredicate, padding...) + _, err := UnpackPredicate(invalidPredicate) + require.Error(err, "Predicate length %d, Padding length %d (0x%x)", len(validPredicate), len(padding), invalidPredicate) + } + } +} + +func TestPredicateResultsBytes(t *testing.T) { + require := require.New(t) + dataTooShort := utils.RandomBytes(params.DynamicFeeExtraDataSize - 1) + _, ok := GetPredicateResultBytes(dataTooShort) + require.False(ok) + + preDurangoData := utils.RandomBytes(params.DynamicFeeExtraDataSize) + _, ok = GetPredicateResultBytes(preDurangoData) + require.False(ok) + postDurangoData := utils.RandomBytes(params.DynamicFeeExtraDataSize + 2) + resultBytes, ok := GetPredicateResultBytes(postDurangoData) + require.True(ok) + require.Equal(resultBytes, postDurangoData[params.DynamicFeeExtraDataSize:]) +} diff --git a/coreth/predicate/predicate_results.go b/coreth/predicate/predicate_results.go new file mode 100644 index 00000000..c28d811f --- /dev/null +++ b/coreth/predicate/predicate_results.go @@ -0,0 +1,114 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "fmt" + "strings" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ethereum/go-ethereum/common" +) + +const ( + Version = uint16(0) + MaxResultsSize = units.MiB +) + +var Codec codec.Manager + +func init() { + Codec = codec.NewManager(MaxResultsSize) + + c := linearcodec.NewDefault(time.Time{}) + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(Results{}), + Codec.RegisterCodec(Version, c), + ) + if errs.Errored() { + panic(errs.Err) + } +} + +// TxResults is a map of results for each precompile address to the resulting byte array. +type TxResults map[common.Address][]byte + +// Results encodes the precompile predicate results included in a block on a per transaction basis. +// Results is not thread-safe. +type Results struct { + Results map[common.Hash]TxResults `serialize:"true"` +} + +// NewResults returns an empty predicate results. +func NewResults() *Results { + return &Results{ + Results: make(map[common.Hash]TxResults), + } +} + +func NewResultsFromMap(results map[common.Hash]TxResults) *Results { + return &Results{ + Results: results, + } +} + +// ParseResults parses [b] into predicate results. +func ParseResults(b []byte) (*Results, error) { + res := new(Results) + parsedVersion, err := Codec.Unmarshal(b, res) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal predicate results: %w", err) + } + if parsedVersion != Version { + return nil, fmt.Errorf("invalid version (found %d, expected %d)", parsedVersion, Version) + } + return res, nil +} + +// GetResults returns the byte array results for [txHash] from precompile [address] if available. +func (r *Results) GetResults(txHash common.Hash, address common.Address) []byte { + txResults, ok := r.Results[txHash] + if !ok { + return nil + } + return txResults[address] +} + +// SetTxResults sets the predicate results for the given [txHash]. Overrides results if present. +func (r *Results) SetTxResults(txHash common.Hash, txResults TxResults) { + // If there are no tx results, don't store an entry in the map + if len(txResults) == 0 { + delete(r.Results, txHash) + return + } + r.Results[txHash] = txResults +} + +// DeleteTxResults deletes the predicate results for the given [txHash]. +func (r *Results) DeleteTxResults(txHash common.Hash) { + delete(r.Results, txHash) +} + +// Bytes marshals the current state of predicate results +func (r *Results) Bytes() ([]byte, error) { + return Codec.Marshal(Version, r) +} + +func (r *Results) String() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("PredicateResults: (Size = %d)", len(r.Results))) + for txHash, results := range r.Results { + for address, result := range results { + sb.WriteString(fmt.Sprintf("\n%s %s: %x", txHash, address, result)) + } + } + + return sb.String() +} diff --git a/coreth/predicate/predicate_results_test.go b/coreth/predicate/predicate_results_test.go new file mode 100644 index 00000000..e3daefa7 --- /dev/null +++ b/coreth/predicate/predicate_results_test.go @@ -0,0 +1,128 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestPredicateResultsParsing(t *testing.T) { + type test struct { + results map[common.Hash]TxResults + expectedHex string + } + for name, test := range map[string]test{ + "empty": { + results: make(map[common.Hash]TxResults), + expectedHex: "000000000000", + }, + "single tx no results": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{}, + }, + expectedHex: "000000000001010000000000000000000000000000000000000000000000000000000000000000000000", + }, + "single tx single result": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + }, + }, + expectedHex: "000000000001010000000000000000000000000000000000000000000000000000000000000000000001020000000000000000000000000000000000000000000003010203", + }, + "single tx multiple results": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + {3}: {1, 2, 3}, + }, + }, + expectedHex: "000000000001010000000000000000000000000000000000000000000000000000000000000000000002020000000000000000000000000000000000000000000003010203030000000000000000000000000000000000000000000003010203", + }, + "multiple txs no result": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{}, + {2}: map[common.Address][]byte{}, + }, + expectedHex: "000000000002010000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000", + }, + "multiple txs single result": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + }, + {2}: map[common.Address][]byte{ + {3}: {3, 2, 1}, + }, + }, + expectedHex: "000000000002010000000000000000000000000000000000000000000000000000000000000000000001020000000000000000000000000000000000000000000003010203020000000000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000003030201", + }, + "multiple txs multiple results": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + {3}: {3, 2, 1}, + }, + {2}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + {3}: {3, 2, 1}, + }, + }, + expectedHex: "000000000002010000000000000000000000000000000000000000000000000000000000000000000002020000000000000000000000000000000000000000000003010203030000000000000000000000000000000000000000000003030201020000000000000000000000000000000000000000000000000000000000000000000002020000000000000000000000000000000000000000000003010203030000000000000000000000000000000000000000000003030201", + }, + "multiple txs mixed results": { + results: map[common.Hash]TxResults{ + {1}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + }, + {2}: map[common.Address][]byte{ + {2}: {1, 2, 3}, + {3}: {3, 2, 1}, + }, + {3}: map[common.Address][]byte{}, + }, + expectedHex: "000000000003010000000000000000000000000000000000000000000000000000000000000000000001020000000000000000000000000000000000000000000003010203020000000000000000000000000000000000000000000000000000000000000000000002020000000000000000000000000000000000000000000003010203030000000000000000000000000000000000000000000003030201030000000000000000000000000000000000000000000000000000000000000000000000", + }, + } { + t.Run(name, func(t *testing.T) { + require := require.New(t) + predicateResults := NewResultsFromMap(test.results) + b, err := predicateResults.Bytes() + require.NoError(err) + + parsedPredicateResults, err := ParseResults(b) + require.NoError(err) + require.Equal(predicateResults, parsedPredicateResults) + require.Equal(test.expectedHex, common.Bytes2Hex(b)) + }) + } +} + +func TestPredicateResultsAccessors(t *testing.T) { + require := require.New(t) + + predicateResults := NewResults() + + txHash := common.Hash{1} + addr := common.Address{2} + predicateResult := []byte{1, 2, 3} + txPredicateResults := map[common.Address][]byte{ + addr: predicateResult, + } + + require.Empty(predicateResults.GetResults(txHash, addr)) + predicateResults.SetTxResults(txHash, txPredicateResults) + require.Equal(predicateResult, predicateResults.GetResults(txHash, addr)) + predicateResults.DeleteTxResults(txHash) + require.Empty(predicateResults.GetResults(txHash, addr)) + + // Ensure setting empty tx predicate results removes the entry + predicateResults.SetTxResults(txHash, txPredicateResults) + require.Equal(predicateResult, predicateResults.GetResults(txHash, addr)) + predicateResults.SetTxResults(txHash, map[common.Address][]byte{}) + require.Empty(predicateResults.GetResults(txHash, addr)) +} diff --git a/coreth/predicate/predicate_slots.go b/coreth/predicate/predicate_slots.go new file mode 100644 index 00000000..6eccc5bc --- /dev/null +++ b/coreth/predicate/predicate_slots.go @@ -0,0 +1,27 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" +) + +// PreparePredicateStorageSlots populates the the predicate storage slots of a transaction's access list +// Note: if an address is specified multiple times in the access list, each storage slot for that address is +// appended to a slice of byte slices. Each byte slice represents a predicate, making it a slice of predicates +// for each access list address, and every predicate in the slice goes through verification. +func PreparePredicateStorageSlots(rules params.Rules, list types.AccessList) map[common.Address][][]byte { + predicateStorageSlots := make(map[common.Address][][]byte) + for _, el := range list { + if !rules.PredicaterExists(el.Address) { + continue + } + predicateStorageSlots[el.Address] = append(predicateStorageSlots[el.Address], utils.HashSliceToBytes(el.StorageKeys)) + } + + return predicateStorageSlots +} diff --git a/coreth/predicate/predicate_tx.go b/coreth/predicate/predicate_tx.go new file mode 100644 index 00000000..76bb3ce6 --- /dev/null +++ b/coreth/predicate/predicate_tx.go @@ -0,0 +1,44 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package predicate + +import ( + "math/big" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" +) + +// NewPredicateTx returns a transaction with the predicateAddress/predicateBytes tuple +// packed and added to the access list of the transaction. +func NewPredicateTx( + chainID *big.Int, + nonce uint64, + to *common.Address, + gas uint64, + gasFeeCap *big.Int, + gasTipCap *big.Int, + value *big.Int, + data []byte, + accessList types.AccessList, + predicateAddress common.Address, + predicateBytes []byte, +) *types.Transaction { + accessList = append(accessList, types.AccessTuple{ + Address: predicateAddress, + StorageKeys: utils.BytesToHashSlice(PackPredicate(predicateBytes)), + }) + return types.NewTx(&types.DynamicFeeTx{ + ChainID: chainID, + Nonce: nonce, + To: to, + Gas: gas, + GasFeeCap: gasFeeCap, + GasTipCap: gasTipCap, + Value: value, + Data: data, + AccessList: accessList, + }) +} diff --git a/coreth/rpc/client.go b/coreth/rpc/client.go index 68a68390..989441a6 100644 --- a/coreth/rpc/client.go +++ b/coreth/rpc/client.go @@ -52,7 +52,7 @@ var ( const ( // Timeouts defaultDialTimeout = 10 * time.Second // used if context has no deadline - subscribeTimeout = 5 * time.Second // overall timeout eth_subscribe, rpc_modules calls + subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls ) const ( @@ -88,7 +88,7 @@ type Client struct { isHTTP bool // isHTTP specifies if the client uses an HTTP connection services *serviceRegistry - idCounter uint32 + idCounter atomic.Uint32 // This function, if non-nil, is called when the connection is lost. reconnectFunc reconnectFunc @@ -277,7 +277,7 @@ func (c *Client) RegisterName(name string, receiver interface{}) error { } func (c *Client) nextID() json.RawMessage { - id := atomic.AddUint32(&c.idCounter, 1) + id := c.idCounter.Add(1) return strconv.AppendUint(nil, uint64(id), 10) } @@ -359,7 +359,10 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str case len(resp.Result) == 0: return ErrNoResult default: - return json.Unmarshal(resp.Result, &result) + if result == nil { + return nil + } + return json.Unmarshal(resp.Result, result) } } @@ -541,7 +544,7 @@ func (c *Client) write(ctx context.Context, msg interface{}, retry bool) error { return err } } - err := c.writeConn.writeJSON(ctx, msg) + err := c.writeConn.writeJSON(ctx, msg, false) if err != nil { c.writeConn = nil if !retry { @@ -674,7 +677,8 @@ func (c *Client) read(codec ServerCodec) { for { msgs, batch, err := codec.readBatch() if _, ok := err.(*json.SyntaxError); ok { - codec.writeJSON(context.Background(), errorMessage(&parseError{err.Error()})) + msg := errorMessage(&parseError{err.Error()}) + codec.writeJSON(context.Background(), msg, true) } if err != nil { c.readErr <- err diff --git a/coreth/rpc/client_test.go b/coreth/rpc/client_test.go index 5bac4762..4ec0ef41 100644 --- a/coreth/rpc/client_test.go +++ b/coreth/rpc/client_test.go @@ -29,6 +29,7 @@ package rpc import ( "context" "encoding/json" + "errors" "fmt" "math/rand" "net" @@ -76,6 +77,26 @@ func TestClientResponseType(t *testing.T) { } } +// This test checks calling a method that returns 'null'. +func TestClientNullResponse(t *testing.T) { + server := newTestServer() + defer server.Stop() + + client := DialInProc(server) + defer client.Close() + + var result json.RawMessage + if err := client.Call(&result, "test_null"); err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("Expected non-nil result") + } + if !reflect.DeepEqual(result, json.RawMessage("null")) { + t.Errorf("Expected null, got %s", result) + } +} + // This test checks that server-returned errors with code and data come out of Client.Call. func TestClientErrorData(t *testing.T) { server := newTestServer() @@ -156,6 +177,53 @@ func TestClientBatchRequest(t *testing.T) { } } +func TestClientBatchRequest_len(t *testing.T) { + b, err := json.Marshal([]jsonrpcMessage{ + {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)}, + {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)}, + }) + if err != nil { + t.Fatal("failed to encode jsonrpc message:", err) + } + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := rw.Write(b) + if err != nil { + t.Error("failed to write response:", err) + } + })) + t.Cleanup(s.Close) + + client, err := Dial(s.URL) + if err != nil { + t.Fatal("failed to dial test server:", err) + } + defer client.Close() + + t.Run("too-few", func(t *testing.T) { + batch := []BatchElem{ + {Method: "foo"}, + {Method: "bar"}, + {Method: "baz"}, + } + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) + defer cancelFn() + if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { + t.Errorf("expected %q but got: %v", ErrBadResult, err) + } + }) + + t.Run("too-many", func(t *testing.T) { + batch := []BatchElem{ + {Method: "foo"}, + } + ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) + defer cancelFn() + if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { + t.Errorf("expected %q but got: %v", ErrBadResult, err) + } + }) +} + func TestClientNotify(t *testing.T) { server := newTestServer() defer server.Stop() diff --git a/coreth/rpc/context_headers.go b/coreth/rpc/context_headers.go new file mode 100644 index 00000000..442e0425 --- /dev/null +++ b/coreth/rpc/context_headers.go @@ -0,0 +1,66 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "context" + "net/http" +) + +type mdHeaderKey struct{} + +// NewContextWithHeaders wraps the given context, adding HTTP headers. These headers will +// be applied by Client when making a request using the returned context. +func NewContextWithHeaders(ctx context.Context, h http.Header) context.Context { + if len(h) == 0 { + // This check ensures the header map set in context will never be nil. + return ctx + } + + var ctxh http.Header + prev, ok := ctx.Value(mdHeaderKey{}).(http.Header) + if ok { + ctxh = setHeaders(prev.Clone(), h) + } else { + ctxh = h.Clone() + } + return context.WithValue(ctx, mdHeaderKey{}, ctxh) +} + +// headersFromContext is used to extract http.Header from context. +func headersFromContext(ctx context.Context) http.Header { + source, _ := ctx.Value(mdHeaderKey{}).(http.Header) + return source +} + +// setHeaders sets all headers from src in dst. +func setHeaders(dst http.Header, src http.Header) http.Header { + for key, values := range src { + dst[http.CanonicalHeaderKey(key)] = values + } + return dst +} diff --git a/coreth/rpc/errors.go b/coreth/rpc/errors.go index 7db25ffa..4bdeb05f 100644 --- a/coreth/rpc/errors.go +++ b/coreth/rpc/errors.go @@ -70,10 +70,16 @@ var ( const ( errcodeDefault = -32000 errcodeNotificationsUnsupported = -32001 + errcodeTimeout = -32002 + errcodeResponseTooLarge = -32003 errcodePanic = -32603 errcodeMarshalError = -32603 ) +const ( + errMsgTimeout = "request timed out" +) + type methodNotFoundError struct{ method string } func (e *methodNotFoundError) ErrorCode() int { return -32601 } diff --git a/coreth/rpc/handler.go b/coreth/rpc/handler.go index e43e6028..4dad8db4 100644 --- a/coreth/rpc/handler.go +++ b/coreth/rpc/handler.go @@ -42,12 +42,6 @@ import ( ) const ( - errcodeTimeout = -32002 - errcodeResponseTooLarge = -32003 -) - -const ( - errMsgTimeout = "request timed out" errMsgResponseTooLarge = "response too large" errMsgBatchTooLarge = "batch too large" ) @@ -185,7 +179,7 @@ func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) { b.mutex.Lock() defer b.mutex.Unlock() - b.doWrite(ctx, conn) + b.doWrite(ctx, conn, false) } // respondWithError sends the responses added so far. For the remaining unanswered call @@ -199,18 +193,33 @@ func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, b.resp = append(b.resp, msg.errorResponse(err)) } } - b.doWrite(ctx, conn) + b.doWrite(ctx, conn, true) +} + +// timeout sends the responses added so far. For the remaining unanswered call +// messages, it sends a timeout error response. +func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) { + b.mutex.Lock() + defer b.mutex.Unlock() + + for _, msg := range b.calls { + if !msg.isNotification() { + resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) + b.resp = append(b.resp, resp) + } + } + b.doWrite(ctx, conn, true) } // doWrite actually writes the response. // This assumes b.mutex is held. -func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter) { +func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter, isErrorResponse bool) { if b.wrote { return } b.wrote = true // can only write once if len(b.resp) > 0 { - conn.writeJSONSkipDeadline(ctx, b.resp, true) + conn.writeJSONSkipDeadline(ctx, b.resp, isErrorResponse, true) } } @@ -231,7 +240,8 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { // Emit error response for empty batches: if len(msgs) == 0 { h.startCallProc(func(cp *callProc) { - h.conn.writeJSONSkipDeadline(cp.ctx, errorMessage(&invalidRequestError{"empty batch"}), h.deadlineContext > 0) + resp := errorMessage(&invalidRequestError{"empty batch"}) + h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0) }) return } @@ -271,8 +281,7 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { if timeout, ok := ContextRequestTimeout(cp.ctx); ok { timer = time.AfterFunc(timeout, func() { cancel() - err := &internalServerError{errcodeTimeout, errMsgTimeout} - callBuffer.respondWithError(cp.ctx, h.conn, err) + callBuffer.timeout(cp.ctx, h.conn) }) } @@ -288,6 +297,7 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { } resp := h.handleCallMsg(cp, msg) callBuffer.pushResponse(resp) + if resp != nil && batchResponseMaxSize != 0 { responseBytes += len(resp.Result) if responseBytes > batchResponseMaxSize { @@ -300,7 +310,6 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { if timer != nil { timer.Stop() } - h.addSubscriptions(cp.notifiers) callBuffer.write(cp.ctx, h.conn) for _, n := range cp.notifiers { @@ -320,7 +329,7 @@ func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage break } } - h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, h.deadlineContext > 0) + h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, true, h.deadlineContext > 0) } // handleMsg handles a single message. @@ -329,10 +338,36 @@ func (h *handler) handleMsg(msg *jsonrpcMessage) { return } h.startCallProc(func(cp *callProc) { + var ( + responded sync.Once + timer *time.Timer + cancel context.CancelFunc + ) + cp.ctx, cancel = context.WithCancel(cp.ctx) + defer cancel() + + // Cancel the request context after timeout and send an error response. Since the + // running method might not return immediately on timeout, we must wait for the + // timeout concurrently with processing the request. + if timeout, ok := ContextRequestTimeout(cp.ctx); ok { + timer = time.AfterFunc(timeout, func() { + cancel() + responded.Do(func() { + resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) + h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0) + }) + }) + } + answer := h.handleCallMsg(cp, msg) + if timer != nil { + timer.Stop() + } h.addSubscriptions(cp.notifiers) if answer != nil { - h.conn.writeJSONSkipDeadline(cp.ctx, answer, h.deadlineContext > 0) + responded.Do(func() { + h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0) + }) } for _, n := range cp.notifiers { n.activate() @@ -565,7 +600,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess if resp.Error.Data != nil { ctx = append(ctx, "errdata", resp.Error.Data) } - h.log.Warn("Served "+msg.Method, ctx...) + h.log.Info("Served "+msg.Method, ctx...) } else { h.log.Debug("Served "+msg.Method, ctx...) } @@ -597,7 +632,6 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage } start := time.Now() answer := h.runMethod(cp.ctx, msg, callb, args) - // Collect the statistics for RPC calls if metrics is enabled. // We only care about pure rpc call. Filter out subscription. if callb != h.unsubscribeCb { diff --git a/coreth/rpc/http.go b/coreth/rpc/http.go index a146f1b7..56fea59f 100644 --- a/coreth/rpc/http.go +++ b/coreth/rpc/http.go @@ -37,6 +37,7 @@ import ( "mime" "net/http" "net/url" + "strconv" "sync" "time" ) @@ -63,11 +64,11 @@ type httpConn struct { // and some methods don't work. The panic() stubs here exist to ensure // this special treatment is correct. -func (hc *httpConn) writeJSON(ctx context.Context, val interface{}) error { - return hc.writeJSONSkipDeadline(ctx, val, false) +func (hc *httpConn) writeJSON(ctx context.Context, val interface{}, isError bool) error { + return hc.writeJSONSkipDeadline(ctx, val, isError, false) } -func (hc *httpConn) writeJSONSkipDeadline(context.Context, interface{}, bool) error { +func (hc *httpConn) writeJSONSkipDeadline(context.Context, interface{}, bool, bool) error { panic("writeJSON called on httpConn") } @@ -150,6 +151,7 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { } var cfg clientConfig + cfg.httpClient = client fn := newClientTransportHTTP(endpoint, &cfg) return newClient(context.Background(), fn) } @@ -221,7 +223,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos if err != nil { return nil, err } - req, err := http.NewRequestWithContext(ctx, "POST", hc.url, io.NopCloser(bytes.NewReader(body))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, hc.url, io.NopCloser(bytes.NewReader(body))) if err != nil { return nil, err } @@ -232,6 +234,8 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos hc.mu.Lock() req.Header = hc.headers.Clone() hc.mu.Unlock() + setHeaders(req.Header, headersFromContext(ctx)) + if hc.auth != nil { if err := hc.auth(req.Header); err != nil { return nil, err @@ -269,7 +273,42 @@ type httpServerConn struct { func newHTTPServerConn(r *http.Request, w http.ResponseWriter) ServerCodec { body := io.LimitReader(r.Body, maxRequestContentLength) conn := &httpServerConn{Reader: body, Writer: w, r: r} - return NewCodec(conn) + + encoder := func(v any, isErrorResponse bool) error { + if !isErrorResponse { + return json.NewEncoder(conn).Encode(v) + } + + // It's an error response and requires special treatment. + // + // In case of a timeout error, the response must be written before the HTTP + // server's write timeout occurs. So we need to flush the response. The + // Content-Length header also needs to be set to ensure the client knows + // when it has the full response. + encdata, err := json.Marshal(v) + if err != nil { + return err + } + w.Header().Set("content-length", strconv.Itoa(len(encdata))) + + // If this request is wrapped in a handler that might remove Content-Length (such + // as the automatic gzip we do in package node), we need to ensure the HTTP server + // doesn't perform chunked encoding. In case WriteTimeout is reached, the chunked + // encoding might not be finished correctly, and some clients do not like it when + // the final chunk is missing. + w.Header().Set("transfer-encoding", "identity") + + _, err = w.Write(encdata) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + return err + } + + dec := json.NewDecoder(conn) + dec.UseNumber() + + return NewFuncCodec(conn, encoder, dec.Decode) } // Close does nothing and always returns nil. diff --git a/coreth/rpc/http_test.go b/coreth/rpc/http_test.go index 2a22198f..d5d4ad11 100644 --- a/coreth/rpc/http_test.go +++ b/coreth/rpc/http_test.go @@ -27,6 +27,8 @@ package rpc import ( + "context" + "fmt" "net/http" "net/http/httptest" "strings" @@ -102,6 +104,7 @@ func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body if err != nil { t.Fatalf("request failed: %v", err) } + resp.Body.Close() confirmStatusCode(t, resp.StatusCode, expectedStatusCode) } @@ -208,3 +211,43 @@ func TestHTTPPeerInfo(t *testing.T) { t.Errorf("wrong HTTP.Origin %q", info.HTTP.UserAgent) } } + +func TestNewContextWithHeaders(t *testing.T) { + expectedHeaders := 0 + server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + for i := 0; i < expectedHeaders; i++ { + key, want := fmt.Sprintf("key-%d", i), fmt.Sprintf("val-%d", i) + if have := request.Header.Get(key); have != want { + t.Errorf("wrong request headers for %s, want: %s, have: %s", key, want, have) + } + } + writer.WriteHeader(http.StatusOK) + _, _ = writer.Write([]byte(`{}`)) + })) + defer server.Close() + + client, err := Dial(server.URL) + if err != nil { + t.Fatalf("failed to dial: %s", err) + } + defer client.Close() + + newHdr := func(k, v string) http.Header { + header := http.Header{} + header.Set(k, v) + return header + } + ctx1 := NewContextWithHeaders(context.Background(), newHdr("key-0", "val-0")) + ctx2 := NewContextWithHeaders(ctx1, newHdr("key-1", "val-1")) + ctx3 := NewContextWithHeaders(ctx2, newHdr("key-2", "val-2")) + + expectedHeaders = 3 + if err := client.CallContext(ctx3, nil, "test"); err != ErrNoResult { + t.Error("call failed", err) + } + + expectedHeaders = 2 + if err := client.CallContext(ctx2, nil, "test"); err != ErrNoResult { + t.Error("call failed:", err) + } +} diff --git a/coreth/rpc/json.go b/coreth/rpc/json.go index 84b9fede..0f372be8 100644 --- a/coreth/rpc/json.go +++ b/coreth/rpc/json.go @@ -178,18 +178,22 @@ type ConnRemoteAddr interface { // support for parsing arguments and serializing (result) objects. type jsonCodec struct { remote string - closer sync.Once // close closed channel once - closeCh chan interface{} // closed on Close - decode func(v interface{}) error // decoder to allow multiple transports - encMu sync.Mutex // guards the encoder - encode func(v interface{}) error // encoder to allow multiple transports + closer sync.Once // close closed channel once + closeCh chan interface{} // closed on Close + decode decodeFunc // decoder to allow multiple transports + encMu sync.Mutex // guards the encoder + encode encodeFunc // encoder to allow multiple transports conn deadlineCloser } +type encodeFunc = func(v interface{}, isErrorResponse bool) error + +type decodeFunc = func(v interface{}) error + // NewFuncCodec creates a codec which uses the given functions to read and write. If conn // implements ConnRemoteAddr, log messages will use it to include the remote address of // the connection. -func NewFuncCodec(conn deadlineCloser, encode, decode func(v interface{}) error) ServerCodec { +func NewFuncCodec(conn deadlineCloser, encode encodeFunc, decode decodeFunc) ServerCodec { codec := &jsonCodec{ closeCh: make(chan interface{}), encode: encode, @@ -208,7 +212,11 @@ func NewCodec(conn Conn) ServerCodec { enc := json.NewEncoder(conn) dec := json.NewDecoder(conn) dec.UseNumber() - return NewFuncCodec(conn, enc.Encode, dec.Decode) + + encode := func(v interface{}, isErrorResponse bool) error { + return enc.Encode(v) + } + return NewFuncCodec(conn, encode, dec.Decode) } func (c *jsonCodec) peerInfo() PeerInfo { @@ -238,11 +246,11 @@ func (c *jsonCodec) readBatch() (messages []*jsonrpcMessage, batch bool, err err return messages, batch, nil } -func (c *jsonCodec) writeJSON(ctx context.Context, val interface{}) error { - return c.writeJSONSkipDeadline(ctx, val, false) +func (c *jsonCodec) writeJSON(ctx context.Context, val interface{}, isErrorResponse bool) error { + return c.writeJSONSkipDeadline(ctx, val, isErrorResponse, false) } -func (c *jsonCodec) writeJSONSkipDeadline(ctx context.Context, v interface{}, skip bool) error { +func (c *jsonCodec) writeJSONSkipDeadline(ctx context.Context, v interface{}, isErrorResponse bool, skip bool) error { c.encMu.Lock() defer c.encMu.Unlock() @@ -254,7 +262,7 @@ func (c *jsonCodec) writeJSONSkipDeadline(ctx context.Context, v interface{}, sk } } c.conn.SetWriteDeadline(deadline) - return c.encode(v) + return c.encode(v, isErrorResponse) } func (c *jsonCodec) close() { diff --git a/coreth/rpc/server.go b/coreth/rpc/server.go index fa79a8b5..13adf811 100644 --- a/coreth/rpc/server.go +++ b/coreth/rpc/server.go @@ -29,10 +29,10 @@ package rpc import ( "context" "io" + "sync" "sync/atomic" "time" - mapset "github.com/deckarep/golang-set" "github.com/ethereum/go-ethereum/log" ) @@ -55,9 +55,11 @@ const ( type Server struct { services serviceRegistry idgen func() ID - run int32 - codecs mapset.Set maximumDuration time.Duration + + mutex sync.Mutex + codecs map[ServerCodec]struct{} + run atomic.Bool } // NewServer creates a new server instance with no registered handlers. @@ -68,10 +70,10 @@ type Server struct { func NewServer(maximumDuration time.Duration) *Server { server := &Server{ idgen: randomIDGenerator(), - codecs: mapset.NewSet(), - run: 1, + codecs: make(map[ServerCodec]struct{}), maximumDuration: maximumDuration, } + server.run.Store(true) // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server} @@ -95,26 +97,40 @@ func (s *Server) RegisterName(name string, receiver interface{}) error { func (s *Server) ServeCodec(codec ServerCodec, options CodecOption, apiMaxDuration, refillRate, maxStored time.Duration) { defer codec.close() - // Don't serve if server is stopped. - if atomic.LoadInt32(&s.run) == 0 { + if !s.trackCodec(codec) { return } - - // Add the codec to the set so it can be closed by Stop. - s.codecs.Add(codec) - defer s.codecs.Remove(codec) + defer s.untrackCodec(codec) c := initClient(codec, s.idgen, &s.services, apiMaxDuration, refillRate, maxStored) <-codec.closed() c.Close() } +func (s *Server) trackCodec(codec ServerCodec) bool { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.run.Load() { + return false // Don't serve if server is stopped. + } + s.codecs[codec] = struct{}{} + return true +} + +func (s *Server) untrackCodec(codec ServerCodec) { + s.mutex.Lock() + defer s.mutex.Unlock() + + delete(s.codecs, codec) +} + // serveSingleRequest reads and processes a single RPC request from the given codec. This // is used to serve HTTP connections. Subscriptions and reverse calls are not allowed in // this mode. func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { // Don't serve if server is stopped. - if atomic.LoadInt32(&s.run) == 0 { + if !s.run.Load() { return } @@ -126,7 +142,8 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { reqs, batch, err := codec.readBatch() if err != nil { if err != io.EOF { - codec.writeJSON(ctx, errorMessage(&invalidMessageError{"parse error"})) + resp := errorMessage(&invalidMessageError{"parse error"}) + codec.writeJSON(ctx, resp, true) } return } @@ -141,12 +158,14 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { // requests to finish, then closes all codecs which will cancel pending requests and // subscriptions. func (s *Server) Stop() { - if atomic.CompareAndSwapInt32(&s.run, 1, 0) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.run.CompareAndSwap(true, false) { log.Debug("RPC server shutting down") - s.codecs.Each(func(c interface{}) bool { - c.(ServerCodec).close() - return true - }) + for codec := range s.codecs { + codec.close() + } } } diff --git a/coreth/rpc/server_test.go b/coreth/rpc/server_test.go index ac70eb1c..e3b26623 100644 --- a/coreth/rpc/server_test.go +++ b/coreth/rpc/server_test.go @@ -54,7 +54,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 12 + wantCallbacks := 13 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/coreth/rpc/service.go b/coreth/rpc/service.go index cbdb8e30..5279ba16 100644 --- a/coreth/rpc/service.go +++ b/coreth/rpc/service.go @@ -224,19 +224,8 @@ func (c *callback) call(ctx context.Context, method string, args []reflect.Value return results[0].Interface(), nil } -// Is t context.Context or *context.Context? -func isContextType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t == contextType -} - // Does t satisfy the error interface? func isErrorType(t reflect.Type) bool { - for t.Kind() == reflect.Ptr { - t = t.Elem() - } return t.Implements(errorType) } @@ -255,7 +244,7 @@ func isPubSub(methodType reflect.Type) bool { if methodType.NumIn() < 2 || methodType.NumOut() != 2 { return false } - return isContextType(methodType.In(1)) && + return methodType.In(1) == contextType && isSubscriptionType(methodType.Out(0)) && isErrorType(methodType.Out(1)) } diff --git a/coreth/rpc/subscription.go b/coreth/rpc/subscription.go index 4e9fa323..3544a69f 100644 --- a/coreth/rpc/subscription.go +++ b/coreth/rpc/subscription.go @@ -185,11 +185,13 @@ func (n *Notifier) activate() error { func (n *Notifier) send(sub *Subscription, data json.RawMessage) error { params, _ := json.Marshal(&subscriptionResult{ID: string(sub.ID), Result: data}) ctx := context.Background() - return n.h.conn.writeJSON(ctx, &jsonrpcMessage{ + + msg := &jsonrpcMessage{ Version: vsn, Method: n.namespace + notificationMethodSuffix, Params: params, - }) + } + return n.h.conn.writeJSON(ctx, msg, false) } // A Subscription is created by a notifier and tied to that notifier. The client can use diff --git a/coreth/rpc/testservice_test.go b/coreth/rpc/testservice_test.go index c8d0d36e..c06efa98 100644 --- a/coreth/rpc/testservice_test.go +++ b/coreth/rpc/testservice_test.go @@ -88,6 +88,10 @@ func (o *MarshalErrObj) MarshalText() ([]byte, error) { func (s *testService) NoArgsRets() {} +func (s *testService) Null() any { + return nil +} + func (s *testService) Echo(str string, i int, args *echoArgs) echoResult { return echoResult{str, i, args} } diff --git a/coreth/rpc/types.go b/coreth/rpc/types.go index efb8b221..5ccaeb4f 100644 --- a/coreth/rpc/types.go +++ b/coreth/rpc/types.go @@ -31,7 +31,6 @@ import ( "encoding/json" "fmt" "math" - "strconv" "strings" "github.com/ethereum/go-ethereum/common" @@ -60,8 +59,10 @@ type ServerCodec interface { // jsonWriter can write JSON messages to its underlying connection. // Implementations must be safe for concurrent use. type jsonWriter interface { - writeJSON(context.Context, interface{}) error - writeJSONSkipDeadline(context.Context, interface{}, bool) error + // writeJSON writes a message to the connection. + writeJSON(ctx context.Context, msg interface{}, isError bool) error + // writeJSON writes a message to the connection with the option of skipping the deadline. + writeJSONSkipDeadline(ctx context.Context, msg interface{}, isError bool, skip bool) error // Closed returns a channel which is closed when the connection is closed. closed() <-chan interface{} // RemoteAddr returns the peer address of the connection. @@ -71,14 +72,15 @@ type jsonWriter interface { type BlockNumber int64 const ( + SafeBlockNumber = BlockNumber(-4) AcceptedBlockNumber = BlockNumber(-3) - PendingBlockNumber = BlockNumber(-2) - LatestBlockNumber = BlockNumber(-1) + LatestBlockNumber = BlockNumber(-2) + PendingBlockNumber = BlockNumber(-1) EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: -// - "accepted", "finalized", "latest", "earliest" or "pending" as string arguments +// - "accepted", "safe", "finalized", "latest", "earliest" or "pending" as string arguments // - the block number // Returned errors: // - an invalid block number error when the given argument isn't a known strings @@ -99,11 +101,13 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "pending": *bn = PendingBlockNumber return nil - // Include "finalized" and "safe" as an option for compatibility with - // FinalizedBlockNumber and SafeBlockNumber from geth. - case "accepted", "finalized", "safe": + // Include "finalized" as an option for compatibility with FinalizedBlockNumber from geth. + case "accepted", "finalized": *bn = AcceptedBlockNumber return nil + case "safe": + *bn = SafeBlockNumber + return nil } blckNum, err := hexutil.DecodeUint64(input) @@ -117,31 +121,41 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { return nil } +// Int64 returns the block number as int64. +func (bn BlockNumber) Int64() int64 { + return (int64)(bn) +} + // MarshalText implements encoding.TextMarshaler. It marshals: // - "accepted", "latest", "earliest" or "pending" as strings // - other numbers as hex func (bn BlockNumber) MarshalText() ([]byte, error) { + return []byte(bn.String()), nil +} + +func (bn BlockNumber) String() string { switch bn { case EarliestBlockNumber: - return []byte("earliest"), nil + return "earliest" case LatestBlockNumber: - return []byte("latest"), nil + return "latest" case PendingBlockNumber: - return []byte("pending"), nil + return "pending" case AcceptedBlockNumber: - return []byte("accepted"), nil + return "accepted" + case SafeBlockNumber: + return "safe" default: - return hexutil.Uint64(bn).MarshalText() + if bn < 0 { + return fmt.Sprintf("", bn) + } + return hexutil.Uint64(bn).String() } } -func (bn BlockNumber) Int64() int64 { - return (int64)(bn) -} - // IsAccepted returns true if this blockNumber should be treated as a request for the last accepted block func (bn BlockNumber) IsAccepted() bool { - return bn < EarliestBlockNumber && bn >= AcceptedBlockNumber + return bn < EarliestBlockNumber && bn >= SafeBlockNumber } type BlockNumberOrHash struct { @@ -181,12 +195,15 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bn := PendingBlockNumber bnh.BlockNumber = &bn return nil - // Include "finalized" and "safe" as an option for compatibility with - // FinalizedBlockNumber and SafeBlockNumber from geth. - case "accepted", "finalized", "safe": + // Include "finalized" as an option for compatibility with FinalizedBlockNumber from geth. + case "accepted", "finalized": bn := AcceptedBlockNumber bnh.BlockNumber = &bn return nil + case "safe": + bn := SafeBlockNumber + bnh.BlockNumber = &bn + return nil default: if len(input) == 66 { hash := common.Hash{} @@ -220,7 +237,7 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { func (bnh *BlockNumberOrHash) String() string { if bnh.BlockNumber != nil { - return strconv.Itoa(int(*bnh.BlockNumber)) + return bnh.BlockNumber.String() } if bnh.BlockHash != nil { return bnh.BlockHash.String() @@ -250,24 +267,3 @@ func BlockNumberOrHashWithHash(hash common.Hash, canonical bool) BlockNumberOrHa RequireCanonical: canonical, } } - -// DecimalOrHex unmarshals a non-negative decimal or hex parameter into a uint64. -type DecimalOrHex uint64 - -// UnmarshalJSON implements json.Unmarshaler. -func (dh *DecimalOrHex) UnmarshalJSON(data []byte) error { - input := strings.TrimSpace(string(data)) - if len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"' { - input = input[1 : len(input)-1] - } - - value, err := strconv.ParseUint(input, 10, 64) - if err != nil { - value, err = hexutil.DecodeUint64(input) - } - if err != nil { - return err - } - *dh = DecimalOrHex(value) - return nil -} diff --git a/coreth/rpc/types_test.go b/coreth/rpc/types_test.go index 09041cde..a255c1e9 100644 --- a/coreth/rpc/types_test.go +++ b/coreth/rpc/types_test.go @@ -163,3 +163,24 @@ func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) { }) } } + +func TestBlockNumberOrHash_StringAndUnmarshal(t *testing.T) { + tests := []BlockNumberOrHash{ + BlockNumberOrHashWithNumber(math.MaxInt64), + BlockNumberOrHashWithNumber(PendingBlockNumber), + BlockNumberOrHashWithNumber(LatestBlockNumber), + BlockNumberOrHashWithNumber(EarliestBlockNumber), + BlockNumberOrHashWithNumber(32), + BlockNumberOrHashWithHash(common.Hash{0xaa}, false), + } + for _, want := range tests { + marshalled, _ := json.Marshal(want.String()) + var have BlockNumberOrHash + if err := json.Unmarshal(marshalled, &have); err != nil { + t.Fatalf("cannot unmarshal (%v): %v", string(marshalled), err) + } + if !reflect.DeepEqual(want, have) { + t.Fatalf("wrong result: have %v, want %v", have, want) + } + } +} diff --git a/coreth/rpc/websocket.go b/coreth/rpc/websocket.go index a2031315..d753d266 100644 --- a/coreth/rpc/websocket.go +++ b/coreth/rpc/websocket.go @@ -37,7 +37,7 @@ import ( "sync" "time" - mapset "github.com/deckarep/golang-set" + mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/log" "github.com/gorilla/websocket" ) @@ -48,7 +48,7 @@ const ( wsPingInterval = 30 * time.Second wsPingWriteTimeout = 5 * time.Second wsPongTimeout = 30 * time.Second - wsMessageSizeLimit = 15 * 1024 * 1024 + wsMessageSizeLimit = 32 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) @@ -83,7 +83,7 @@ func (s *Server) WebsocketHandlerWithDuration(allowedOrigins []string, apiMaxDur // websocket upgrade process. When a '*' is specified as an allowed origins all // connections are accepted. func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool { - origins := mapset.NewSet() + origins := mapset.NewSet[string]() allowAllOrigins := false for _, origin := range allowedOrigins { @@ -136,10 +136,10 @@ func (e wsHandshakeError) Error() string { return s } -func originIsAllowed(allowedOrigins mapset.Set, browserOrigin string) bool { +func originIsAllowed(allowedOrigins mapset.Set[string], browserOrigin string) bool { it := allowedOrigins.Iterator() for origin := range it.C { - if ruleAllowsOrigin(origin.(string), browserOrigin) { + if ruleAllowsOrigin(origin, browserOrigin) { return true } } @@ -238,6 +238,7 @@ func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, er ReadBufferSize: wsReadBuffer, WriteBufferSize: wsWriteBuffer, WriteBufferPool: wsBufferPool, + Proxy: http.ProxyFromEnvironment, } } @@ -301,8 +302,12 @@ func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) Serve conn.SetReadDeadline(time.Time{}) return nil }) + + encode := func(v interface{}, isErrorResponse bool) error { + return conn.WriteJSON(v) + } wc := &websocketCodec{ - jsonCodec: NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON).(*jsonCodec), + jsonCodec: NewFuncCodec(conn, encode, conn.ReadJSON).(*jsonCodec), conn: conn, pingReset: make(chan struct{}, 1), info: PeerInfo{ @@ -329,12 +334,12 @@ func (wc *websocketCodec) peerInfo() PeerInfo { return wc.info } -func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}) error { - return wc.writeJSONSkipDeadline(ctx, v, false) +func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}, isError bool) error { + return wc.writeJSONSkipDeadline(ctx, v, isError, false) } -func (wc *websocketCodec) writeJSONSkipDeadline(ctx context.Context, v interface{}, skip bool) error { - err := wc.jsonCodec.writeJSONSkipDeadline(ctx, v, skip) +func (wc *websocketCodec) writeJSONSkipDeadline(ctx context.Context, v interface{}, isError bool, skip bool) error { + err := wc.jsonCodec.writeJSONSkipDeadline(ctx, v, isError, skip) if err == nil { // Notify pingLoop to delay the next idle ping. select { diff --git a/coreth/rpc/websocket_test.go b/coreth/rpc/websocket_test.go index 31e9847a..c9a8c439 100644 --- a/coreth/rpc/websocket_test.go +++ b/coreth/rpc/websocket_test.go @@ -29,15 +29,10 @@ package rpc import ( "context" "errors" - "io" "net" "net/http" "net/http/httptest" - "net/http/httputil" - "net/url" - "os" "strings" - "sync/atomic" "testing" "time" @@ -240,66 +235,6 @@ func TestClientWebsocketLargeMessage(t *testing.T) { } } -func TestClientWebsocketSevered(t *testing.T) { - if os.Getenv("RUN_FLAKY_TESTS") != "true" { - t.Skip("FLAKY") - } - t.Parallel() - - var ( - server = wsPingTestServer(t, nil) - ctx = context.Background() - ) - defer server.Shutdown(ctx) - - u, err := url.Parse("http://" + server.Addr) - if err != nil { - t.Fatal(err) - } - rproxy := httputil.NewSingleHostReverseProxy(u) - var severable *severableReadWriteCloser - rproxy.ModifyResponse = func(response *http.Response) error { - severable = &severableReadWriteCloser{ReadWriteCloser: response.Body.(io.ReadWriteCloser)} - response.Body = severable - return nil - } - frontendProxy := httptest.NewServer(rproxy) - defer frontendProxy.Close() - - wsURL := "ws:" + strings.TrimPrefix(frontendProxy.URL, "http:") - client, err := DialWebsocket(ctx, wsURL, "") - if err != nil { - t.Fatalf("client dial error: %v", err) - } - defer client.Close() - - resultChan := make(chan int) - sub, err := client.EthSubscribe(ctx, resultChan, "foo") - if err != nil { - t.Fatalf("client subscribe error: %v", err) - } - - // sever the connection - severable.Sever() - - // Wait for subscription error. - timeout := time.NewTimer(5 * wsPingInterval) - defer timeout.Stop() - for { - select { - case err := <-sub.Err(): - t.Log("client subscription error:", err) - return - case result := <-resultChan: - t.Error("unexpected result:", result) - return - case <-timeout.C: - t.Error("didn't get any error within the test timeout") - return - } - } -} - // wsPingTestServer runs a WebSocket server which accepts a single subscription request. // When a value arrives on sendPing, the server sends a ping frame, waits for a matching // pong and finally delivers a single subscription result. @@ -402,31 +337,3 @@ func wsPingTestHandler(t *testing.T, conn *websocket.Conn, shutdown, sendPing <- } } } - -// severableReadWriteCloser wraps an io.ReadWriteCloser and provides a Sever() method to drop writes and read empty. -type severableReadWriteCloser struct { - io.ReadWriteCloser - severed int32 // atomic -} - -func (s *severableReadWriteCloser) Sever() { - atomic.StoreInt32(&s.severed, 1) -} - -func (s *severableReadWriteCloser) Read(p []byte) (n int, err error) { - if atomic.LoadInt32(&s.severed) > 0 { - return 0, nil - } - return s.ReadWriteCloser.Read(p) -} - -func (s *severableReadWriteCloser) Write(p []byte) (n int, err error) { - if atomic.LoadInt32(&s.severed) > 0 { - return len(p), nil - } - return s.ReadWriteCloser.Write(p) -} - -func (s *severableReadWriteCloser) Close() error { - return s.ReadWriteCloser.Close() -} diff --git a/coreth/scripts/build.sh b/coreth/scripts/build.sh index 3a3c8279..b79d232b 100755 --- a/coreth/scripts/build.sh +++ b/coreth/scripts/build.sh @@ -49,5 +49,5 @@ fi coreth_commit=${CORETH_COMMIT:-$( git rev-list -1 HEAD )} # Build Coreth, which is run as a subprocess -echo "Building Coreth Version: $coreth_version; GitCommit: $coreth_commit" -go build -modcacherw -ldflags "-X github.com/ava-labs/coreth/plugin/evm.GitCommit=$coreth_commit -X github.com/ava-labs/coreth/plugin/evm.Version=$coreth_version" -o "$binary_path" "plugin/"*.go +echo "Building Coreth @ GitCommit: $coreth_commit" +go build -modcacherw -ldflags "-X github.com/ava-labs/coreth/plugin/evm.GitCommit=$coreth_commit" -o "$binary_path" "plugin/"*.go diff --git a/coreth/scripts/build_test.sh b/coreth/scripts/build_test.sh index 848d538a..f6af9434 100755 --- a/coreth/scripts/build_test.sh +++ b/coreth/scripts/build_test.sh @@ -17,4 +17,4 @@ source "$CORETH_PATH"/scripts/constants.sh # We pass in the arguments to this script directly to enable easily passing parameters such as enabling race detection, # parallelism, and test coverage. -go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" ./... $@ +go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" ./... "$@" diff --git a/coreth/scripts/constants.sh b/coreth/scripts/constants.sh index 7eb87f37..ffdc542f 100644 --- a/coreth/scripts/constants.sh +++ b/coreth/scripts/constants.sh @@ -1,5 +1,10 @@ #!/usr/bin/env bash +# Ignore warnings about variables appearing unused since this file is not the consumer of the variables it defines. +# shellcheck disable=SC2034 + +set -euo pipefail + # Set the PATHS GOPATH="$(go env GOPATH)" diff --git a/coreth/scripts/coverage.sh b/coreth/scripts/coverage.sh index 4de86b1c..e5975b8b 100755 --- a/coreth/scripts/coverage.sh +++ b/coreth/scripts/coverage.sh @@ -9,7 +9,7 @@ if [ ! -f "coverage.out" ]; then exit 0 fi -totalCoverage=`go tool cover -func=coverage.out | grep total | grep -Eo '[0-9]+\.[0-9]+'` +totalCoverage=$(go tool cover -func=coverage.out | grep total | grep -Eo '[0-9]+\.[0-9]+') echo "Current test coverage : $totalCoverage %" echo "========================================" diff --git a/coreth/scripts/geth-allowed-packages.txt b/coreth/scripts/geth-allowed-packages.txt index 95f53d07..beb384e4 100644 --- a/coreth/scripts/geth-allowed-packages.txt +++ b/coreth/scripts/geth-allowed-packages.txt @@ -3,6 +3,7 @@ "github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/common/compiler" "github.com/ethereum/go-ethereum/common/hexutil" +"github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/asm" @@ -10,6 +11,10 @@ "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" +"github.com/ethereum/go-ethereum/ethdb" +"github.com/ethereum/go-ethereum/ethdb/leveldb" +"github.com/ethereum/go-ethereum/ethdb/memorydb" +"github.com/ethereum/go-ethereum/ethdb/pebble" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" diff --git a/coreth/scripts/lint.sh b/coreth/scripts/lint.sh index f41d6b56..dcad3007 100755 --- a/coreth/scripts/lint.sh +++ b/coreth/scripts/lint.sh @@ -4,4 +4,17 @@ set -o errexit set -o nounset set -o pipefail +# Upstream is compatible with v1.54.x at time of this writing, and +# checking for this specific version is an attempt to avoid skew +# between local and CI execution. The latest version (v1.55.1) seems +# to cause spurious failures +KNOWN_GOOD_VERSION="v1.54" +VERSION="$(golangci-lint --version | sed -e 's+golangci-lint has version \(v1.*\)\..* built.*+\1+')" +if [[ "${VERSION}" != "${KNOWN_GOOD_VERSION}" ]]; then + echo "expected golangci-lint ${KNOWN_GOOD_VERSION}, but ${VERSION} was used" + echo "${KNOWN_GOOD_VERSION} is used in CI and should be used locally to ensure compatible results" + echo "installation command: go install github.com/golangci/golangci-lint/cmd/golangci-lint@${KNOWN_GOOD_VERSION}" + exit 255 +fi + golangci-lint run --path-prefix=. --timeout 3m diff --git a/coreth/scripts/lint_allowed_geth_imports.sh b/coreth/scripts/lint_allowed_geth_imports.sh old mode 100644 new mode 100755 index b6801485..e4281136 --- a/coreth/scripts/lint_allowed_geth_imports.sh +++ b/coreth/scripts/lint_allowed_geth_imports.sh @@ -9,7 +9,7 @@ set -o pipefail # 2. Sort the unique results # #. Print out the difference between the search results and the list of specified allowed package imports from geth. extra_imports=$(grep -r --include='*.go' '"github.com/ethereum/go-ethereum/.*"' -o -h | sort -u | comm -23 - ./scripts/geth-allowed-packages.txt) -if [ ! -z "${extra_imports}" ]; then +if [ -n "${extra_imports}" ]; then echo "new go-ethereum imports should be added to ./scripts/geth-allowed-packages.txt to prevent accidental imports:" echo "${extra_imports}" exit 1 diff --git a/coreth/scripts/mock.gen.sh b/coreth/scripts/mock.gen.sh new file mode 100755 index 00000000..87465d43 --- /dev/null +++ b/coreth/scripts/mock.gen.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Root directory +CORETH_PATH=$( + cd "$(dirname "${BASH_SOURCE[0]}")" + cd .. && pwd +) + +if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +# https://github.com/uber-go/mock +go install -v go.uber.org/mock/mockgen@v0.4.0 + +if ! command -v go-license &>/dev/null; then + echo "go-license not found, installing..." + # https://github.com/palantir/go-license + go install -v github.com/palantir/go-license@v1.25.0 +fi + +# Load the versions +source "$CORETH_PATH"/scripts/versions.sh + +# Load the constants +source "$CORETH_PATH"/scripts/constants.sh + +# tuples of (source interface import path, comma-separated interface names, output file path) +input="scripts/mocks.mockgen.txt" +while IFS= read -r line; do + IFS='=' read -r src_import_path interface_name output_path <<<"${line}" + package_name=$(basename "$(dirname "$output_path")") + echo "Generating ${output_path}..." + mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" + + go-license \ + --config=./header.yml \ + "${output_path}" +done <"$input" + +echo "SUCCESS" diff --git a/coreth/scripts/mocks.mockgen.txt b/coreth/scripts/mocks.mockgen.txt new file mode 100644 index 00000000..694343e4 --- /dev/null +++ b/coreth/scripts/mocks.mockgen.txt @@ -0,0 +1,2 @@ +github.com/ava-labs/coreth/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go +github.com/ava-labs/coreth/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go diff --git a/coreth/scripts/shellcheck.sh b/coreth/scripts/shellcheck.sh new file mode 100755 index 00000000..61fc09f9 --- /dev/null +++ b/coreth/scripts/shellcheck.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -euo pipefail + +VERSION="v0.9.0" + +function get_version { + local target_path=$1 + if command -v "${target_path}" > /dev/null; then + echo "v$("${target_path}" --version | grep version: | awk '{print $2}')" + fi +} + +REPO_ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +SYSTEM_VERSION="$(get_version shellcheck)" +if [[ "${SYSTEM_VERSION}" == "${VERSION}" ]]; then + SHELLCHECK=shellcheck +else + # Try to install a local version + SHELLCHECK="${REPO_ROOT}/bin/shellcheck" + LOCAL_VERSION="$(get_version "${SHELLCHECK}")" + if [[ -z "${LOCAL_VERSION}" || "${LOCAL_VERSION}" != "${VERSION}" ]]; then + if which sw_vers &> /dev/null; then + echo "on macos, only x86_64 binaries are available so rosetta is required" + echo "to avoid using rosetta, install via homebrew: brew install shellcheck" + DIST=darwin.x86_64 + else + # Linux - binaries for common arches *should* be available + arch="$(uname -i)" + DIST="linux.${arch}" + fi + curl -s -L "https://github.com/koalaman/shellcheck/releases/download/${VERSION}/shellcheck-${VERSION}.${DIST}.tar.xz" | tar Jxv -C /tmp > /dev/null + mkdir -p "$(dirname "${SHELLCHECK}")" + cp /tmp/shellcheck-"${VERSION}"/shellcheck "${SHELLCHECK}" + fi +fi + +find "${REPO_ROOT}" -name "*.sh" -type f -print0 | xargs -0 "${SHELLCHECK}" "${@}" diff --git a/coreth/scripts/tests.e2e.sh b/coreth/scripts/tests.e2e.sh new file mode 100755 index 00000000..b7530503 --- /dev/null +++ b/coreth/scripts/tests.e2e.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Run AvalancheGo e2e tests from the target version against the current state of coreth. + +# e.g., +# ./scripts/tests.e2e.sh +# AVALANCHE_VERSION=v1.10.x ./scripts/tests.e2e.sh +if ! [[ "$0" =~ scripts/tests.e2e.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +# Coreth root directory +CORETH_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +# Allow configuring the clone path to point to an existing clone +AVALANCHEGO_CLONE_PATH="${AVALANCHEGO_CLONE_PATH:-avalanchego}" + +# Load the version +source "$CORETH_PATH"/scripts/versions.sh + +# Always return to the coreth path on exit +function cleanup { + cd "${CORETH_PATH}" +} +trap cleanup EXIT + +echo "checking out target AvalancheGo version ${avalanche_version}" +if [[ -d "${AVALANCHEGO_CLONE_PATH}" ]]; then + echo "updating existing clone" + cd "${AVALANCHEGO_CLONE_PATH}" + git fetch +else + echo "creating new clone" + git clone https://github.com/ava-labs/avalanchego.git "${AVALANCHEGO_CLONE_PATH}" + cd "${AVALANCHEGO_CLONE_PATH}" +fi +# Branch will be reset to $avalanche_version if it already exists +git checkout -B "test-${avalanche_version}" "${avalanche_version}" + +echo "updating coreth dependency to point to ${CORETH_PATH}" +go mod edit -replace "github.com/ava-labs/coreth=${CORETH_PATH}" +go mod tidy + +echo "building avalanchego" +./scripts/build.sh -r + +echo "running AvalancheGo e2e tests" +E2E_SERIAL=1 ./scripts/tests.e2e.sh --ginkgo.label-filter='c || uses-c' diff --git a/coreth/scripts/versions.sh b/coreth/scripts/versions.sh index f2a7c6cf..25817586 100644 --- a/coreth/scripts/versions.sh +++ b/coreth/scripts/versions.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash -# Set up the versions to be used -coreth_version=${CORETH_VERSION:-'v0.12.0'} +# Ignore warnings about variables appearing unused since this file is not the consumer of the variables it defines. +# shellcheck disable=SC2034 + +set -euo pipefail + # Don't export them as they're used in the context of other calls -avalanche_version=${AVALANCHE_VERSION:-'v1.9.16'} +avalanche_version=${AVALANCHE_VERSION:-'e248179ae75918581fec77ba09fd1ca939bb1844'} diff --git a/coreth/sync/README.md b/coreth/sync/README.md index f473a9ac..2e6437a7 100644 --- a/coreth/sync/README.md +++ b/coreth/sync/README.md @@ -9,9 +9,9 @@ _Note: `defaultSyncableInterval` must be divisible by `CommitInterval` (= 4096). State sync is faster than bootstrapping and uses less bandwidth and computation: - Nodes joining the network do not process all the state transitions. -- The amount of data sent over the network is porportionate to the amount of state not the chain's length +- The amount of data sent over the network is proportionate to the amount of state not the chain's length -_Note: nodes joining the network thorugh state sync will not have historical state prior to the syncable block._ +_Note: nodes joining the network through state sync will not have historical state prior to the syncable block._ ## What is the chain state? The node needs the following data from its peers to continue processing blocks from a syncable block: @@ -27,8 +27,8 @@ State sync code is structured as follows: - `LeafsRequestHandler`: handles requests for trie data (leafs) - `CodeRequestHandler`: handles requests for contract code - `BlockRequestHandler`: handles requests for blocks - - _Note: There are response size and time limits in place so peers joining the network do not overload peers providing data. Additionally, the engine tracks the CPU usage of each peer for such messsages and throttles inbound requests accordingly._ -- `sync/client`: Validates reponses from peers and provides support for syncing tries. + - _Note: There are response size and time limits in place so peers joining the network do not overload peers providing data. Additionally, the engine tracks the CPU usage of each peer for such messages and throttles inbound requests accordingly._ +- `sync/client`: Validates responses from peers and provides support for syncing tries. - `sync/statesync`: Uses `sync/client` to sync EVM related state: Accounts, storage tries, and contract code. - `plugin/evm/atomicSyncer`: Uses `sync/client` to sync the atomic trie. - `plugin/evm/`: The engine expects the VM to implement `StateSyncableVM` interface, @@ -50,7 +50,7 @@ The above information is called a _state summary_, and each syncable block corre 1. The engine calls `StateSyncEnabled`. The VM returns `true` to initiate state sync, or `false` to start bootstrapping. In `coreth`, this is controlled by the `state-sync-enabled` flag. 1. The engine calls `GetOngoingSyncStateSummary`. If the VM has a previously interrupted sync to resume it returns that summary. Otherwise, it returns `ErrNotFound`. By default, `coreth` will resume an interrupted sync. -1. The engine samples peers for their latest available summaries, then verifies the correctness and availablility of each sampled summary with validators. The messaging flow is documented [here](https://github.com/ava-labs/avalanchego/blob/master/snow/engine/snowman/block/README.md). +1. The engine samples peers for their latest available summaries, then verifies the correctness and availability of each sampled summary with validators. The messaging flow is documented [here](https://github.com/ava-labs/avalanchego/blob/master/snow/engine/snowman/block/README.md). 1. The engine calls `Accept` on the chosen summary. The VM may return `false` to skip syncing to this summary (`coreth` skips state sync for less than `defaultStateSyncMinBlocks = 300_000` blocks). If the VM decides to perform the sync, it must return `true` without blocking and fetch the state from its peers asynchronously. 1. The VM sends `common.StateSyncDone` on the `toEngine` channel on completion. 1. The engine calls `VM.SetState(Bootstrapping)`. Then, blocks after the syncable block are processed one by one. @@ -106,7 +106,7 @@ Once the tries have been synced, this method: - Verifies the block the engine has received matches the expected block hash and block number in the summary, - Adds a checkpoint to the `core.ChainIndexer` (to avoid indexing missing blocks) - Resets in-memory and on disk pointers on the `core.BlockChain` struct. -- Updates VM's last accepted block, +- Updates VM's last accepted block. - Applies the atomic operations from the atomic trie to shared memory. (Note: the VM will resume applying these operations even if the VM is shutdown prior to completing this step) @@ -127,4 +127,4 @@ While state sync is faster than normal bootstrapping, the process may take sever | `state-sync-skip-resume` | `bool` | set to true to avoid resuming an ongoing sync | `false` | | `state-sync-min-blocks` | `uint64` | Minimum number of blocks the chain must be ahead of local state to prefer state sync over bootstrapping | `300,000` | | `state-sync-server-trie-cache` | `int` | Size of trie cache to serve state sync data in MB. Should be set to multiples of `64`. | `64` | -| `state-sync-ids` | `string` | a comma seperated list of `NodeID-` prefixed node IDs to sync data from. If not provided, peers are randomly selected. | | \ No newline at end of file +| `state-sync-ids` | `string` | a comma separated list of `NodeID-` prefixed node IDs to sync data from. If not provided, peers are randomly selected. | | diff --git a/coreth/sync/client/client.go b/coreth/sync/client/client.go index d77913d0..dd6d7ad4 100644 --- a/coreth/sync/client/client.go +++ b/coreth/sync/client/client.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/sync/client/stats" @@ -24,11 +23,12 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/trie" + "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -159,7 +159,7 @@ func parseLeafsResponse(codec codec.Manager, reqIntf message.Request, data []byt // Populate proof when ProofVals are present in the response. Its ok to pass it as nil to the trie.VerifyRangeProof // function as it will assert that all the leaves belonging to the specified root are present. if len(leafsResponse.ProofVals) > 0 { - proof = memorydb.New() + proof = rawdb.NewMemoryDatabase() defer proof.Close() for _, proofVal := range leafsResponse.ProofVals { proofKey := crypto.Keccak256(proofVal) @@ -337,14 +337,14 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse if c.isSongbirdCode { minVersion = StateSyncVersionSgb } - response, nodeID, err = c.networkClient.SendAppRequestAny(minVersion, requestBytes) + response, nodeID, err = c.networkClient.SendAppRequestAny(ctx, minVersion, requestBytes) } else { // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 // we do this every attempt to ensure we get a different node each time if possible. nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] - response, err = c.networkClient.SendAppRequest(nodeID, requestBytes) + response, err = c.networkClient.SendAppRequest(ctx, nodeID, requestBytes) } metric.UpdateRequestLatency(time.Since(start)) @@ -363,7 +363,7 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse responseIntf, numElements, err = parseFn(c.codec, request, response) if err != nil { lastErr = err - log.Info("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) + log.Debug("could not validate response, retrying", "nodeID", nodeID, "attempt", attempt, "request", request, "err", err) c.networkClient.TrackBandwidth(nodeID, 0) metric.IncFailed() metric.IncInvalidResponse() diff --git a/coreth/sync/client/client_test.go b/coreth/sync/client/client_test.go index 4eb97e1d..bba3a408 100644 --- a/coreth/sync/client/client_test.go +++ b/coreth/sync/client/client_test.go @@ -17,13 +17,14 @@ import ( "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" clientstats "github.com/ava-labs/coreth/sync/client/stats" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -141,7 +142,7 @@ func TestGetBlocks(t *testing.T) { var gspec = &core.Genesis{ Config: params.TestChainConfig, } - memdb := memorydb.New() + memdb := rawdb.NewMemoryDatabase() genesis := gspec.MustCommit(memdb) engine := dummy.NewETHFaker() numBlocks := 110 @@ -409,9 +410,9 @@ func TestGetLeafs(t *testing.T) { const leafsLimit = 1024 - trieDB := trie.NewDatabase(memorydb.New()) - largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) - smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) + trieDB := trie.NewDatabase(rawdb.NewMemoryDatabase()) + largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) + smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ @@ -792,8 +793,8 @@ func TestGetLeafs(t *testing.T) { func TestGetLeafsRetries(t *testing.T) { rand.Seed(1) - trieDB := trie.NewDatabase(memorydb.New()) - root, _, _ := trie.GenerateTrie(t, trieDB, 100_000, common.HashLength) + trieDB := trie.NewDatabase(rawdb.NewMemoryDatabase()) + root, _, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} @@ -811,7 +812,7 @@ func TestGetLeafsRetries(t *testing.T) { Root: root, Start: bytes.Repeat([]byte{0x00}, common.HashLength), End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: defaultLeafRequestLimit, + Limit: 1024, NodeType: message.StateTrieNode, } diff --git a/coreth/sync/client/leaf_syncer.go b/coreth/sync/client/leaf_syncer.go index 754c559e..2ca82b75 100644 --- a/coreth/sync/client/leaf_syncer.go +++ b/coreth/sync/client/leaf_syncer.go @@ -9,20 +9,17 @@ import ( "errors" "fmt" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "golang.org/x/sync/errgroup" - - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/utils" ) var ( errFailedToFetchLeafs = errors.New("failed to fetch leafs") ) -const defaultLeafRequestLimit = 1024 - // LeafSyncTask represents a complete task to be completed by the leaf syncer. // Note: each LeafSyncTask is processed on its own goroutine and there will // not be concurrent calls to the callback methods. Implementations should return @@ -40,9 +37,10 @@ type LeafSyncTask interface { } type CallbackLeafSyncer struct { - client LeafClient - done chan error - tasks <-chan LeafSyncTask + client LeafClient + done chan error + tasks <-chan LeafSyncTask + requestSize uint16 } type LeafClient interface { @@ -52,11 +50,12 @@ type LeafClient interface { } // NewCallbackLeafSyncer creates a new syncer object to perform leaf sync of tries. -func NewCallbackLeafSyncer(client LeafClient, tasks <-chan LeafSyncTask) *CallbackLeafSyncer { +func NewCallbackLeafSyncer(client LeafClient, tasks <-chan LeafSyncTask, requestSize uint16) *CallbackLeafSyncer { return &CallbackLeafSyncer{ - client: client, - done: make(chan error), - tasks: tasks, + client: client, + done: make(chan error), + tasks: tasks, + requestSize: requestSize, } } @@ -102,7 +101,7 @@ func (c *CallbackLeafSyncer) syncTask(ctx context.Context, task LeafSyncTask) er Root: root, Account: task.Account(), Start: start, - Limit: defaultLeafRequestLimit, + Limit: c.requestSize, NodeType: task.NodeType(), }) if err != nil { diff --git a/coreth/sync/client/mock_network.go b/coreth/sync/client/mock_network.go index 43152cd4..b841ffae 100644 --- a/coreth/sync/client/mock_network.go +++ b/coreth/sync/client/mock_network.go @@ -4,6 +4,7 @@ package statesyncclient import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -28,7 +29,7 @@ type mockNetwork struct { nodesRequested []ids.NodeID } -func (t *mockNetwork) SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { +func (t *mockNetwork) SendAppRequestAny(ctx context.Context, minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { if len(t.response) == 0 { return nil, ids.EmptyNodeID, errors.New("no mocked response to return in mockNetwork") } @@ -39,7 +40,7 @@ func (t *mockNetwork) SendAppRequestAny(minVersion *version.Application, request return response, ids.EmptyNodeID, err } -func (t *mockNetwork) SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) { +func (t *mockNetwork) SendAppRequest(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { if len(t.response) == 0 { return nil, errors.New("no mocked response to return in mockNetwork") } @@ -77,7 +78,7 @@ func (t *mockNetwork) Gossip([]byte) error { panic("not implemented") // we don't care about this function for this test } -func (t *mockNetwork) SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) { +func (t *mockNetwork) SendCrossChainRequest(ctx context.Context, chainID ids.ID, request []byte) ([]byte, error) { panic("not implemented") // we don't care about this function for this test } diff --git a/coreth/sync/handlers/block_request.go b/coreth/sync/handlers/block_request.go index a631c61c..2bbb21b0 100644 --- a/coreth/sync/handlers/block_request.go +++ b/coreth/sync/handlers/block_request.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" @@ -17,9 +18,12 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// parentLimit specifies how many parents to retrieve and send given a starting hash -// This value overrides any specified limit in blockRequest.Parents if it is greater than this value -const parentLimit = uint16(64) +const ( + // parentLimit specifies how many parents to retrieve and send given a starting hash + // This value overrides any specified limit in blockRequest.Parents if it is greater than this value + parentLimit = uint16(64) + targetMessageByteSize = units.MiB - units.KiB // Target total block bytes slightly under original network codec max size of 1MB +) // BlockRequestHandler is a peer.RequestHandler for message.BlockRequest // serving requested blocks starting at specified hash @@ -52,6 +56,7 @@ func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.Nod parents = parentLimit } blocks := make([][]byte, 0, parents) + totalBytes := 0 // ensure metrics are captured properly on all return paths defer func() { @@ -80,11 +85,17 @@ func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.Nod buf := new(bytes.Buffer) if err := block.EncodeRLP(buf); err != nil { - log.Warn("failed to RLP encode block", "hash", block.Hash(), "height", block.NumberU64(), "err", err) + log.Error("failed to RLP encode block", "hash", block.Hash(), "height", block.NumberU64(), "err", err) return nil, nil } + if buf.Len()+totalBytes > targetMessageByteSize && len(blocks) > 0 { + log.Debug("Skipping block due to max total bytes size", "totalBlockDataSize", totalBytes, "blockSize", buf.Len(), "maxTotalBytesSize", targetMessageByteSize) + break + } + blocks = append(blocks, buf.Bytes()) + totalBytes += buf.Len() hash = block.ParentHash() height-- } @@ -100,7 +111,7 @@ func (b *BlockRequestHandler) OnBlockRequest(ctx context.Context, nodeID ids.Nod } responseBytes, err := b.codec.Marshal(message.Version, response) if err != nil { - log.Warn("failed to marshal BlockResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "hash", blockRequest.Hash, "parents", blockRequest.Parents, "blocksLen", len(response.Blocks), "err", err) + log.Error("failed to marshal BlockResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "hash", blockRequest.Hash, "parents", blockRequest.Parents, "blocksLen", len(response.Blocks), "err", err) return nil, nil } diff --git a/coreth/sync/handlers/block_request_test.go b/coreth/sync/handlers/block_request_test.go index 4930d3f2..55805c03 100644 --- a/coreth/sync/handlers/block_request_test.go +++ b/coreth/sync/handlers/block_request_test.go @@ -5,42 +5,46 @@ package handlers import ( "context" + "math/big" "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) -func TestBlockRequestHandler(t *testing.T) { - var gspec = &core.Genesis{ - Config: params.TestChainConfig, - } - memdb := memorydb.New() - genesis := gspec.MustCommit(memdb) - engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } +type blockRequestTest struct { + name string - assert.Len(t, blocks, 96) + // starting block, specify either Index or (hash+height) + startBlockIndex int + startBlockHash common.Hash + startBlockHeight uint64 + + requestedParents uint16 + expectedBlocks int + expectNilResponse bool + assertResponse func(t testing.TB, stats *stats.MockHandlerStats, b []byte) +} + +func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*types.Block) { + mockHandlerStats := &stats.MockHandlerStats{} // convert into map blocksDB := make(map[common.Hash]*types.Block, len(blocks)) for _, blk := range blocks { blocksDB[blk.Hash()] = blk } - - mockHandlerStats := &stats.MockHandlerStats{} blockProvider := &TestBlockProvider{ GetBlockFn: func(hash common.Hash, height uint64) *types.Block { blk, ok := blocksDB[hash] @@ -52,19 +56,64 @@ func TestBlockRequestHandler(t *testing.T) { } blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, mockHandlerStats) - tests := []struct { - name string + var blockRequest message.BlockRequest + if test.startBlockHash != (common.Hash{}) { + blockRequest.Hash = test.startBlockHash + blockRequest.Height = test.startBlockHeight + } else { + startingBlock := blocks[test.startBlockIndex] + blockRequest.Hash = startingBlock.Hash() + blockRequest.Height = startingBlock.NumberU64() + } + blockRequest.Parents = test.requestedParents + + responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) + if err != nil { + t.Fatal("unexpected error during block request", err) + } + if test.assertResponse != nil { + test.assertResponse(t, mockHandlerStats, responseBytes) + } + + if test.expectNilResponse { + assert.Nil(t, responseBytes) + return + } - // starting block, specify either Index or (hash+height) - startBlockIndex int - startBlockHash common.Hash - startBlockHeight uint64 + assert.NotEmpty(t, responseBytes) - requestedParents uint16 - expectedBlocks int - expectNilResponse bool - assertResponse func(t *testing.T, response []byte) - }{ + var response message.BlockResponse + if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { + t.Fatal("error unmarshalling", err) + } + assert.Len(t, response.Blocks, test.expectedBlocks) + + for _, blockBytes := range response.Blocks { + block := new(types.Block) + if err := rlp.DecodeBytes(blockBytes, block); err != nil { + t.Fatal("could not parse block", err) + } + assert.GreaterOrEqual(t, test.startBlockIndex, 0) + assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) + test.startBlockIndex-- + } + mockHandlerStats.Reset() +} + +func TestBlockRequestHandler(t *testing.T) { + var gspec = &core.Genesis{ + Config: params.TestChainConfig, + } + memdb := rawdb.NewMemoryDatabase() + genesis := gspec.MustCommit(memdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) {}) + if err != nil { + t.Fatal("unexpected error when generating test blockchain", err) + } + assert.Len(t, blocks, 96) + + tests := []blockRequestTest{ { name: "handler_returns_blocks_as_requested", startBlockIndex: 64, @@ -89,55 +138,74 @@ func TestBlockRequestHandler(t *testing.T) { startBlockHeight: 1_000_000, requestedParents: 64, expectNilResponse: true, - assertResponse: func(t *testing.T, _ []byte) { + assertResponse: func(t testing.TB, mockHandlerStats *stats.MockHandlerStats, _ []byte) { assert.Equal(t, uint32(1), mockHandlerStats.MissingBlockHashCount) }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - var blockRequest message.BlockRequest - if test.startBlockHash != (common.Hash{}) { - blockRequest.Hash = test.startBlockHash - blockRequest.Height = test.startBlockHeight - } else { - startingBlock := blocks[test.startBlockIndex] - blockRequest.Hash = startingBlock.Hash() - blockRequest.Height = startingBlock.NumberU64() - } - blockRequest.Parents = test.requestedParents - - responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) - if err != nil { - t.Fatal("unexpected error during block request", err) - } - if test.assertResponse != nil { - test.assertResponse(t, responseBytes) - } - - if test.expectNilResponse { - assert.Nil(t, responseBytes) - return - } + executeBlockRequestTest(t, test, blocks) + }) + } +} - assert.NotEmpty(t, responseBytes) +func TestBlockRequestHandlerLargeBlocks(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + funds = big.NewInt(1000000000000000000) + gspec = &core.Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: core.GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + memdb := rawdb.NewMemoryDatabase() + genesis := gspec.MustCommit(memdb) + engine := dummy.NewETHFaker() + blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, memdb, 96, 0, func(i int, b *core.BlockGen) { + var data []byte + switch { + case i <= 32: + data = make([]byte, units.MiB) + default: + data = make([]byte, units.MiB/16) + } + tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), addr1, big.NewInt(10000), 4_215_304, nil, data), signer, key1) + if err != nil { + t.Fatal(err) + } + b.AddTx(tx) + }) + if err != nil { + t.Fatal("unexpected error when generating test blockchain", err) + } + assert.Len(t, blocks, 96) - var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } - assert.Len(t, response.Blocks, test.expectedBlocks) - - for _, blockBytes := range response.Blocks { - block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } - assert.GreaterOrEqual(t, test.startBlockIndex, 0) - assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) - test.startBlockIndex-- - } - mockHandlerStats.Reset() + tests := []blockRequestTest{ + { + name: "handler_returns_blocks_as_requested", + startBlockIndex: 64, + requestedParents: 10, + expectedBlocks: 10, + }, + { + name: "handler_caps_blocks_size_limit", + startBlockIndex: 64, + requestedParents: 16, + expectedBlocks: 15, + }, + { + name: "handler_caps_blocks_size_limit_on_first_block", + startBlockIndex: 32, + requestedParents: 10, + expectedBlocks: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + executeBlockRequestTest(t, test, blocks) }) } } @@ -146,7 +214,7 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { var gspec = &core.Genesis{ Config: params.TestChainConfig, } - memdb := memorydb.New() + memdb := rawdb.NewMemoryDatabase() genesis := gspec.MustCommit(memdb) engine := dummy.NewETHFaker() blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(i int, b *core.BlockGen) {}) diff --git a/coreth/sync/handlers/code_request.go b/coreth/sync/handlers/code_request.go index 3f9e3580..b756507f 100644 --- a/coreth/sync/handlers/code_request.go +++ b/coreth/sync/handlers/code_request.go @@ -11,11 +11,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -50,7 +49,7 @@ func (n *CodeRequestHandler) OnCodeRequest(_ context.Context, nodeID ids.NodeID, n.stats.UpdateCodeReadTime(time.Since(startTime)) }() - if len(codeRequest.Hashes) > params.MaxCodeHashesPerRequest { + if len(codeRequest.Hashes) > message.MaxCodeHashesPerRequest { n.stats.IncTooManyHashesRequested() log.Debug("too many hashes requested, dropping request", "nodeID", nodeID, "requestID", requestID, "numHashes", len(codeRequest.Hashes)) return nil, nil @@ -76,7 +75,7 @@ func (n *CodeRequestHandler) OnCodeRequest(_ context.Context, nodeID ids.NodeID, codeResponse := message.CodeResponse{Data: codeBytes} responseBytes, err := n.codec.Marshal(message.Version, codeResponse) if err != nil { - log.Warn("could not marshal CodeResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", codeRequest, "err", err) + log.Error("could not marshal CodeResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "request", codeRequest, "err", err) return nil, nil } n.stats.UpdateCodeBytesReturned(uint32(totalBytes)) diff --git a/coreth/sync/handlers/code_request_test.go b/coreth/sync/handlers/code_request_test.go index 15c20f2d..1bf5bd52 100644 --- a/coreth/sync/handlers/code_request_test.go +++ b/coreth/sync/handlers/code_request_test.go @@ -12,11 +12,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/stretchr/testify/assert" ) @@ -94,7 +94,7 @@ func TestCodeRequestHandler(t *testing.T) { responseBytes, err := codeRequestHandler.OnCodeRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) assert.NoError(t, err) - // If the expected resposne is empty, assert that the handler returns an empty response and return early. + // If the expected response is empty, assert that the handler returns an empty response and return early. if len(expectedResponse) == 0 { assert.Len(t, responseBytes, 0, "expected response to be empty") return diff --git a/coreth/sync/handlers/handler.go b/coreth/sync/handlers/handler.go index 6254801c..71bff519 100644 --- a/coreth/sync/handlers/handler.go +++ b/coreth/sync/handlers/handler.go @@ -4,21 +4,11 @@ package handlers import ( - "context" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/sync/handlers/stats" - "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" ) -var _ message.RequestHandler = &syncHandler{} - type BlockProvider interface { GetBlock(common.Hash, uint64) *types.Block } @@ -31,43 +21,3 @@ type SyncDataProvider interface { BlockProvider SnapshotProvider } - -type syncHandler struct { - stateTrieLeafsRequestHandler *LeafsRequestHandler - atomicTrieLeafsRequestHandler *LeafsRequestHandler - blockRequestHandler *BlockRequestHandler - codeRequestHandler *CodeRequestHandler -} - -// NewSyncHandler constructs the handler for serving state sync. -func NewSyncHandler( - provider SyncDataProvider, - diskDB ethdb.KeyValueReader, - evmTrieDB *trie.Database, - atomicTrieDB *trie.Database, - networkCodec codec.Manager, - stats stats.HandlerStats, -) message.RequestHandler { - return &syncHandler{ - stateTrieLeafsRequestHandler: NewLeafsRequestHandler(evmTrieDB, provider, networkCodec, stats), - atomicTrieLeafsRequestHandler: NewLeafsRequestHandler(atomicTrieDB, nil, networkCodec, stats), - blockRequestHandler: NewBlockRequestHandler(provider, networkCodec, stats), - codeRequestHandler: NewCodeRequestHandler(diskDB, networkCodec, stats), - } -} - -func (s *syncHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { - return s.stateTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) -} - -func (s *syncHandler) HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { - return s.atomicTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) -} - -func (s *syncHandler) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockRequest message.BlockRequest) ([]byte, error) { - return s.blockRequestHandler.OnBlockRequest(ctx, nodeID, requestID, blockRequest) -} - -func (s *syncHandler) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest message.CodeRequest) ([]byte, error) { - return s.codeRequestHandler.OnCodeRequest(ctx, nodeID, requestID, codeRequest) -} diff --git a/coreth/sync/handlers/leafs_request.go b/coreth/sync/handlers/leafs_request.go index dc91b4c8..5643605a 100644 --- a/coreth/sync/handlers/leafs_request.go +++ b/coreth/sync/handlers/leafs_request.go @@ -12,18 +12,17 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" ) @@ -33,6 +32,10 @@ const ( // in message.LeafsRequest if it is greater than this value maxLeavesLimit = uint16(1024) + // Maximum percent of the time left to deadline to spend on optimistically + // reading the snapshot to find the response + maxSnapshotReadTimePercent = 75 + segmentLen = 64 // divide data from snapshot to segments of this size ) @@ -95,7 +98,11 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N return nil, nil } - t, err := trie.New(leafsRequest.Account, leafsRequest.Root, lrh.trieDB) + // TODO: We should know the state root that accounts correspond to, + // as this information will be necessary to access storage tries when + // the trie is path based. + stateRoot := common.Hash{} + t, err := trie.New(trie.StorageTrieID(stateRoot, leafsRequest.Account, leafsRequest.Root), lrh.trieDB) if err != nil { log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err) lrh.stats.IncMissingRoot() @@ -229,7 +236,19 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // modified since the requested root. If this assumption can be verified with // range proofs and data from the trie, we can skip iterating the trie as // an optimization. - snapKeys, snapVals, err := rb.readLeafsFromSnapshot(ctx) + // Since we are performing this read optimistically, we use a separate context + // with reduced timeout so there is enough time to read the trie if the snapshot + // read does not contain up-to-date data. + snapCtx := ctx + if deadline, ok := ctx.Deadline(); ok { + timeTillDeadline := time.Until(deadline) + bufferedDeadline := time.Now().Add(timeTillDeadline * maxSnapshotReadTimePercent / 100) + + var cancel context.CancelFunc + snapCtx, cancel = context.WithDeadline(ctx, bufferedDeadline) + defer cancel() + } + snapKeys, snapVals, err := rb.readLeafsFromSnapshot(snapCtx) // Update read snapshot time here, so that we include the case that an error occurred. rb.stats.UpdateSnapshotReadTime(time.Since(snapshotReadStart)) if err != nil { @@ -264,7 +283,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // segments of the data and use them in the response. hasGap := false for i := 0; i < len(snapKeys); i += segmentLen { - segmentEnd := math.Min(i+segmentLen, len(snapKeys)) + segmentEnd := min(i+segmentLen, len(snapKeys)) proof, ok, _, err := rb.isRangeValid(snapKeys[i:segmentEnd], snapVals[i:segmentEnd], hasGap) if err != nil { rb.stats.IncProofError() @@ -300,7 +319,7 @@ func (rb *responseBuilder) fillFromSnapshot(ctx context.Context) (bool, error) { // all the key/vals in the segment are valid, but possibly shorten segmentEnd // here to respect limit. this is necessary in case the number of leafs we read // from the trie is more than the length of a segment which cannot be validated. limit - segmentEnd = math.Min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys)) + segmentEnd = min(segmentEnd, i+int(rb.limit)-len(rb.response.Keys)) rb.response.Keys = append(rb.response.Keys, snapKeys[i:segmentEnd]...) rb.response.Vals = append(rb.response.Vals, snapVals[i:segmentEnd]...) diff --git a/coreth/sync/handlers/leafs_request_test.go b/coreth/sync/handlers/leafs_request_test.go index 8dad57b5..e543acf7 100644 --- a/coreth/sync/handlers/leafs_request_test.go +++ b/coreth/sync/handlers/leafs_request_test.go @@ -13,13 +13,13 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -27,16 +27,20 @@ import ( func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { rand.Seed(1) mockHandlerStats := &stats.MockHandlerStats{} - memdb := memorydb.New() + memdb := rawdb.NewMemoryDatabase() trieDB := trie.NewDatabase(memdb) - corruptedTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 100, common.HashLength) + corruptedTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 100, common.HashLength) + tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) + if err != nil { + t.Fatal(err) + } // Corrupt [corruptedTrieRoot] - trie.CorruptTrie(t, trieDB, corruptedTrieRoot, 5) + syncutils.CorruptTrie(t, memdb, tr, 5) - largeTrieRoot, largeTrieKeys, _ := trie.GenerateTrie(t, trieDB, 10_000, common.HashLength) - smallTrieRoot, _, _ := trie.GenerateTrie(t, trieDB, 500, common.HashLength) - accountTrieRoot, accounts := trie.FillAccounts( + largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 10_000, common.HashLength) + smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 500, common.HashLength) + accountTrieRoot, accounts := syncutils.FillAccounts( t, trieDB, common.Hash{}, @@ -71,6 +75,12 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } snapshotProvider := &TestSnapshotProvider{} leafsHandler := NewLeafsRequestHandler(trieDB, snapshotProvider, message.Codec, mockHandlerStats) + snapConfig := snapshot.Config{ + CacheSize: 64, + AsyncBuild: false, + NoBuild: false, + SkipVerify: true, + } tests := map[string]struct { prepareTestFn func() (context.Context, message.LeafsRequest) @@ -441,7 +451,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -468,7 +478,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "partial account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -524,7 +534,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -552,7 +562,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "partial storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -603,7 +613,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "last snapshot key removed": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -639,7 +649,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { }, "request last key when removed from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { - snap, err := snapshot.New(memdb, trieDB, 64, common.Hash{}, accountTrieRoot, false, true, false) + snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) if err != nil { t.Fatal(err) } @@ -705,7 +715,7 @@ func assertRangeProofIsValid(t *testing.T, request *message.LeafsRequest, respon var proof ethdb.Database if len(response.ProofVals) > 0 { - proof = memorydb.New() + proof = rawdb.NewMemoryDatabase() defer proof.Close() for _, proofVal := range response.ProofVals { proofKey := crypto.Keccak256(proofVal) diff --git a/coreth/sync/statesync/code_syncer.go b/coreth/sync/statesync/code_syncer.go index 28f47044..60c0361f 100644 --- a/coreth/sync/statesync/code_syncer.go +++ b/coreth/sync/statesync/code_syncer.go @@ -12,10 +12,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/message" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -140,7 +140,7 @@ func (c *codeSyncer) addCodeToFetchFromDBToQueue() error { // work fulfills any incoming requests from the producer channel by fetching code bytes from the network // and fulfilling them by updating the database. func (c *codeSyncer) work(ctx context.Context) error { - codeHashes := make([]common.Hash, 0, params.MaxCodeHashesPerRequest) + codeHashes := make([]common.Hash, 0, message.MaxCodeHashesPerRequest) for { select { @@ -159,7 +159,7 @@ func (c *codeSyncer) work(ctx context.Context) error { codeHashes = append(codeHashes, codeHash) // Try to wait for at least [MaxCodeHashesPerRequest] code hashes to batch into a single request // if there's more work remaining. - if len(codeHashes) < params.MaxCodeHashesPerRequest { + if len(codeHashes) < message.MaxCodeHashesPerRequest { continue } if err := c.fulfillCodeRequest(ctx, codeHashes); err != nil { diff --git a/coreth/sync/statesync/code_syncer_test.go b/coreth/sync/statesync/code_syncer_test.go index 9a3d125c..574290e2 100644 --- a/coreth/sync/statesync/code_syncer_test.go +++ b/coreth/sync/statesync/code_syncer_test.go @@ -10,13 +10,13 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/plugin/evm/message" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/stretchr/testify/assert" ) @@ -44,7 +44,7 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { mockClient := statesyncclient.NewMockClient(message.Codec, nil, codeRequestHandler, nil) mockClient.GetCodeIntercept = test.getCodeIntercept - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() codeSyncer := newCodeSyncer(CodeSyncerConfig{ MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, diff --git a/coreth/sync/statesync/state_syncer.go b/coreth/sync/statesync/state_syncer.go index de56677a..df83f30a 100644 --- a/coreth/sync/statesync/state_syncer.go +++ b/coreth/sync/statesync/state_syncer.go @@ -9,10 +9,10 @@ import ( "sync" "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/ethdb" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "golang.org/x/sync/errgroup" ) @@ -28,8 +28,9 @@ type StateSyncerConfig struct { Client syncclient.Client DB ethdb.Database BatchSize int - MaxOutstandingCodeHashes int // Maximum number of code hashes in the code syncer queue - NumCodeFetchingWorkers int // Number of code syncing threads + MaxOutstandingCodeHashes int // Maximum number of code hashes in the code syncer queue + NumCodeFetchingWorkers int // Number of code syncing threads + RequestSize uint16 // Number of leafs to request from a peer at a time } // stateSync keeps the state of the entire state sync operation. @@ -82,7 +83,7 @@ func NewStateSyncer(config *StateSyncerConfig) (*stateSync, error) { mainTrieDone: make(chan struct{}), done: make(chan error, 1), } - ss.syncer = syncclient.NewCallbackLeafSyncer(config.Client, ss.segments) + ss.syncer = syncclient.NewCallbackLeafSyncer(config.Client, ss.segments, config.RequestSize) ss.codeSyncer = newCodeSyncer(CodeSyncerConfig{ DB: config.DB, Client: config.Client, diff --git a/coreth/sync/statesync/sync_helpers.go b/coreth/sync/statesync/sync_helpers.go index f5c51d6b..dd10be27 100644 --- a/coreth/sync/statesync/sync_helpers.go +++ b/coreth/sync/statesync/sync_helpers.go @@ -7,9 +7,9 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using diff --git a/coreth/sync/statesync/sync_test.go b/coreth/sync/statesync/sync_test.go index 5f6942ec..b96734f5 100644 --- a/coreth/sync/statesync/sync_test.go +++ b/coreth/sync/statesync/sync_test.go @@ -16,15 +16,15 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ava-labs/coreth/plugin/evm/message" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -62,6 +62,7 @@ func testSync(t *testing.T, test syncTest) { BatchSize: 1000, // Use a lower batch size in order to get test coverage of batches being written early. NumCodeFetchingWorkers: DefaultNumCodeFetchingWorkers, MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, + RequestSize: 1024, }) if err != nil { t.Fatal(err) @@ -118,17 +119,17 @@ func TestSimpleSyncCases(t *testing.T) { tests := map[string]syncTest{ "accounts": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) - return memorydb.New(), serverDB, serverTrieDB, root + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, "accounts with code": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index%3 == 0 { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) @@ -142,45 +143,45 @@ func TestSimpleSyncCases(t *testing.T) { } return account }) - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, "accounts with code and storage": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccounts) - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, "accounts with storage": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { if i%5 == 0 { - account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, 16, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, serverTrieDB, 16, common.HashLength) } return account }) - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, "accounts with overlapping storage": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, numAccounts, 3) - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, }, "failed to fetch leafs": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) - return memorydb.New(), serverDB, serverTrieDB, root + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, GetLeafsIntercept: func(_ message.LeafsRequest, _ message.LeafsResponse) (message.LeafsResponse, error) { return message.LeafsResponse{}, clientErr @@ -189,10 +190,10 @@ func TestSimpleSyncCases(t *testing.T) { }, "failed to fetch code": { prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccountsSmall) - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, GetCodeIntercept: func(_ []common.Hash, _ [][]byte) ([][]byte, error) { return nil, clientErr @@ -209,7 +210,7 @@ func TestSimpleSyncCases(t *testing.T) { } func TestCancelSync(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) // Create trie with 2000 accounts (more than one leaf request) root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, 2000) @@ -218,7 +219,7 @@ func TestCancelSync(t *testing.T) { testSync(t, syncTest{ ctx: ctx, prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { - return memorydb.New(), serverDB, serverTrieDB, root + return rawdb.NewMemoryDatabase(), serverDB, serverTrieDB, root }, expectedError: context.Canceled, GetLeafsIntercept: func(_ message.LeafsRequest, lr message.LeafsResponse) (message.LeafsResponse, error) { @@ -250,10 +251,10 @@ func (i *interruptLeafsIntercept) getLeafsIntercept(request message.LeafsRequest } func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 2000, 3) - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() intercept := &interruptLeafsIntercept{ root: root, interruptAfter: 1, @@ -276,18 +277,18 @@ func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { } func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for a single account if index == 10 { account.Root = largeStorageRoot } return account }) - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() intercept := &interruptLeafsIntercept{ root: largeStorageRoot, interruptAfter: 1, @@ -308,25 +309,25 @@ func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { } func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot1, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - largeStorageRoot2, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root1, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + largeStorageRoot2, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root1, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for a single account if index == 10 { account.Root = largeStorageRoot1 } return account }) - root2, _ := trie.FillAccounts(t, serverTrieDB, root1, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + root2, _ := syncutils.FillAccounts(t, serverTrieDB, root1, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index == 20 { account.Root = largeStorageRoot2 } return account }) - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() intercept := &interruptLeafsIntercept{ root: largeStorageRoot1, interruptAfter: 1, @@ -349,18 +350,18 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { } func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { // Set the root for 2 successive accounts if index == 10 || index == 11 { account.Root = largeStorageRoot } return account }) - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() intercept := &interruptLeafsIntercept{ root: largeStorageRoot, interruptAfter: 1, @@ -381,17 +382,17 @@ func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testi } func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing.T) { - serverDB := memorydb.New() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) - largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) - root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + largeStorageRoot, _, _ := syncutils.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) + root, _ := syncutils.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index == 10 || index == 90 { account.Root = largeStorageRoot } return account }) - clientDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() intercept := &interruptLeafsIntercept{ root: largeStorageRoot, interruptAfter: 1, @@ -436,7 +437,7 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { "delete intermediate storage nodes": { deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { clientTrieDB := trie.NewDatabase(clientDB) - tr, err := trie.New(common.Hash{}, root, clientTrieDB) + tr, err := trie.New(trie.TrieID(root), clientTrieDB) if err != nil { t.Fatal(err) } @@ -464,7 +465,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { continue } corruptedStorageRoots[acc.Root] = struct{}{} - trie.CorruptTrie(t, clientTrieDB, acc.Root, 2) + tr, err := trie.New(trie.TrieID(acc.Root), clientTrieDB) + if err != nil { + t.Fatal(err) + } + syncutils.CorruptTrie(t, clientDB, tr, 2) } if err := it.Err; err != nil { t.Fatal(err) @@ -474,7 +479,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { "delete intermediate account trie nodes": { deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { clientTrieDB := trie.NewDatabase(clientDB) - trie.CorruptTrie(t, clientTrieDB, root, 5) + tr, err := trie.New(trie.TrieID(root), clientTrieDB) + if err != nil { + t.Fatal(err) + } + syncutils.CorruptTrie(t, clientDB, tr, 5) }, }, } { @@ -486,8 +495,8 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, common.Hash, ethdb.Database)) { rand.Seed(1) - clientDB := memorydb.New() - serverDB := memorydb.New() + clientDB := rawdb.NewMemoryDatabase() + serverDB := rawdb.NewMemoryDatabase() serverTrieDB := trie.NewDatabase(serverDB) root1, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 1000, 3) diff --git a/coreth/sync/statesync/test_sync.go b/coreth/sync/statesync/test_sync.go index 74a3686a..64135dda 100644 --- a/coreth/sync/statesync/test_sync.go +++ b/coreth/sync/statesync/test_sync.go @@ -12,10 +12,11 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -38,7 +39,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } trieAccountLeaves := 0 - trie.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + syncutils.AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { trieAccountLeaves++ accHash := common.BytesToHash(key) var acc types.StateAccount @@ -73,7 +74,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database storageTrieLeavesCount := 0 // check storage trie and storage snapshot consistency - trie.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { + syncutils.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { storageTrieLeavesCount++ snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) assert.Equal(t, val, snapshotVal) @@ -89,7 +90,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { - newRoot, _ := trie.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { + newRoot, _ := syncutils.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) if err != nil { @@ -102,7 +103,7 @@ func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB // now create state trie numKeys := 16 - account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, serverTrieDB, numKeys, common.HashLength) return account }) return newRoot @@ -119,18 +120,18 @@ func FillAccountsWithOverlappingStorage( ) (common.Hash, map[*keystore.Key]*types.StateAccount) { storageRoots := make([]common.Hash, 0, numOverlappingStorageRoots) for i := 0; i < numOverlappingStorageRoots; i++ { - storageRoot, _, _ := trie.GenerateTrie(t, trieDB, 16, common.HashLength) + storageRoot, _, _ := syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) storageRoots = append(storageRoots, storageRoot) } storageRootIndex := 0 - return trie.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { + return syncutils.FillAccounts(t, trieDB, root, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { switch i % 3 { case 0: // unmodified account case 1: // account with overlapping storage root account.Root = storageRoots[storageRootIndex%numOverlappingStorageRoots] storageRootIndex++ case 2: // account with unique storage root - account.Root, _, _ = trie.GenerateTrie(t, trieDB, 16, common.HashLength) + account.Root, _, _ = syncutils.GenerateTrie(t, trieDB, 16, common.HashLength) } return account diff --git a/coreth/sync/statesync/trie_queue.go b/coreth/sync/statesync/trie_queue.go index feb26b22..3ec7c87a 100644 --- a/coreth/sync/statesync/trie_queue.go +++ b/coreth/sync/statesync/trie_queue.go @@ -5,8 +5,8 @@ package statesync import ( "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // trieQueue persists storage trie roots with their associated diff --git a/coreth/sync/statesync/trie_segments.go b/coreth/sync/statesync/trie_segments.go index 48afb6ec..de7acc3e 100644 --- a/coreth/sync/statesync/trie_segments.go +++ b/coreth/sync/statesync/trie_segments.go @@ -12,12 +12,12 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -63,13 +63,16 @@ type trieToSync struct { // NewTrieToSync initializes a trieToSync and restores any previously started segments. func NewTrieToSync(sync *stateSync, root common.Hash, account common.Hash, syncTask syncTask) (*trieToSync, error) { - batch := sync.db.NewBatch() + batch := sync.db.NewBatch() // TODO: migrate state sync to use database schemes. + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, owner, path, hash, blob, rawdb.HashScheme) + } trieToSync := &trieToSync{ sync: sync, root: root, account: account, batch: batch, - stackTrie: trie.NewStackTrie(batch), + stackTrie: trie.NewStackTrie(writeFn), isMainTrie: (root == sync.root), task: syncTask, segmentsDone: make(map[int]struct{}), @@ -194,7 +197,7 @@ func (t *trieToSync) segmentFinished(ctx context.Context, idx int) error { } // update the stack trie and cap the batch it writes to. value := common.CopyBytes(it.Value()) - if err := t.stackTrie.TryUpdate(it.Key(), value); err != nil { + if err := t.stackTrie.Update(it.Key(), value); err != nil { return err } if t.batch.ValueSize() > t.sync.batchSize { diff --git a/coreth/sync/statesync/trie_sync_tasks.go b/coreth/sync/statesync/trie_sync_tasks.go index 9ecb7559..0c7cad42 100644 --- a/coreth/sync/statesync/trie_sync_tasks.go +++ b/coreth/sync/statesync/trie_sync_tasks.go @@ -8,10 +8,10 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -110,7 +110,11 @@ func (s *storageTrieTask) IterateLeafs(seek common.Hash) ethdb.Iterator { func (s *storageTrieTask) OnStart() (bool, error) { // check if this storage root is on disk - storageTrie, err := trie.New(common.Hash{}, s.root, s.sync.trieDB) + var firstAccount common.Hash + if len(s.accounts) > 0 { + firstAccount = s.accounts[0] + } + storageTrie, err := trie.New(trie.StorageTrieID(s.sync.root, s.root, firstAccount), s.sync.trieDB) if err != nil { return false, nil } diff --git a/coreth/sync/syncutils/iterators.go b/coreth/sync/syncutils/iterators.go index 751f874f..3be6ba58 100644 --- a/coreth/sync/syncutils/iterators.go +++ b/coreth/sync/syncutils/iterators.go @@ -5,7 +5,7 @@ package syncutils import ( "github.com/ava-labs/coreth/core/state/snapshot" - "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/ethdb" ) var ( diff --git a/coreth/trie/test_trie.go b/coreth/sync/syncutils/test_trie.go similarity index 75% rename from coreth/trie/test_trie.go rename to coreth/sync/syncutils/test_trie.go index 580ba154..1d1d6825 100644 --- a/coreth/trie/test_trie.go +++ b/coreth/sync/syncutils/test_trie.go @@ -1,7 +1,7 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package trie +package syncutils import ( cryptoRand "crypto/rand" @@ -13,8 +13,11 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -23,20 +26,19 @@ import ( // Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical // order, and the slice of corresponding values. // GenerateTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { +func GenerateTrie(t *testing.T, trieDB *trie.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { if keySize < wrappers.LongLen+1 { t.Fatal("key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") } - testTrie := NewEmpty(trieDB) + testTrie := trie.NewEmpty(trieDB) keys, values := FillTrie(t, numKeys, keySize, testTrie) // Commit the root to [trieDB] - root, nodes, err := testTrie.Commit(false) + root, nodes := testTrie.Commit(false) + err := trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) assert.NoError(t, err) - err = trieDB.Update(NewWithNodeSet(nodes)) - assert.NoError(t, err) - err = trieDB.Commit(root, false, nil) + err = trieDB.Commit(root, false) assert.NoError(t, err) return root, keys, values @@ -45,7 +47,7 @@ func GenerateTrie(t *testing.T, trieDB *Database, numKeys int, keySize int) (com // FillTrie fills a given trie with [numKeys] number of keys, each of size [keySize] // returns inserted keys and values // FillTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results -func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, [][]byte) { +func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *trie.Trie) ([][]byte, [][]byte) { keys := make([][]byte, 0, numKeys) values := make([][]byte, 0, numKeys) @@ -60,9 +62,7 @@ func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, _, err = rand.Read(value) assert.NoError(t, err) - if err = testTrie.TryUpdate(key, value); err != nil { - t.Fatal("error updating trie", err) - } + testTrie.MustUpdate(key, value) keys = append(keys, key) values = append(values, value) @@ -72,18 +72,18 @@ func FillTrie(t *testing.T, numKeys int, keySize int, testTrie *Trie) ([][]byte, // AssertTrieConsistency ensures given trieDB [a] and [b] both have the same // non-empty trie at [root]. (all key/value pairs must be equal) -func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLeaf func(key, val []byte) error) { - trieA, err := New(common.Hash{}, root, a) +func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, onLeaf func(key, val []byte) error) { + trieA, err := trie.New(trie.TrieID(root), a) if err != nil { t.Fatalf("error creating trieA, root=%s, err=%v", root, err) } - trieB, err := New(common.Hash{}, root, b) + trieB, err := trie.New(trie.TrieID(root), b) if err != nil { t.Fatalf("error creating trieB, root=%s, err=%v", root, err) } - itA := NewIterator(trieA.NodeIterator(nil)) - itB := NewIterator(trieB.NodeIterator(nil)) + itA := trie.NewIterator(trieA.NodeIterator(nil)) + itB := trie.NewIterator(trieB.NodeIterator(nil)) count := 0 for itA.Next() && itB.Next() { count++ @@ -102,16 +102,11 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLea assert.Greater(t, count, 0) } -// CorruptTrie deletes every [n]th trie node from the trie given by [root] from the trieDB. -// Assumes that the trie given by root can be iterated without issue. -func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { - batch := trieDB.diskdb.NewBatch() - // next delete some trie nodes - tr, err := New(common.Hash{}, root, trieDB) - if err != nil { - t.Fatal(err) - } - +// CorruptTrie deletes every [n]th trie node from the trie given by [tr] from the underlying [db]. +// Assumes [tr] can be iterated without issue. +func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { + // Delete some trie nodes + batch := diskdb.NewBatch() nodeIt := tr.NodeIterator(nil) count := 0 for nodeIt.Next(true) { @@ -135,7 +130,7 @@ func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { // [onAccount] is called if non-nil (so the caller can modify the account before it is stored in the secure trie). // returns the new trie root and a map of funded keys to StateAccount structs. func FillAccounts( - t *testing.T, trieDB *Database, root common.Hash, numAccounts int, + t *testing.T, trieDB *trie.Database, root common.Hash, numAccounts int, onAccount func(*testing.T, int, types.StateAccount) types.StateAccount, ) (common.Hash, map[*keystore.Key]*types.StateAccount) { var ( @@ -145,7 +140,7 @@ func FillAccounts( accounts = make(map[*keystore.Key]*types.StateAccount, numAccounts) ) - tr, err := NewStateTrie(common.Hash{}, root, trieDB) + tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) if err != nil { t.Fatalf("error opening trie: %v", err) } @@ -170,20 +165,15 @@ func FillAccounts( if err != nil { t.Fatal(err) } - if err = tr.TryUpdate(key.Address[:], accBytes); err != nil { - t.Fatalf("error updating trie with account, address=%s, err=%v", key.Address, err) - } + tr.MustUpdate(key.Address[:], accBytes) accounts[key] = &acc } - newRoot, nodes, err := tr.Commit(false) - if err != nil { - t.Fatalf("error committing trie: %v", err) - } - if err := trieDB.Update(NewWithNodeSet(nodes)); err != nil { + newRoot, nodes := tr.Commit(false) + if err := trieDB.Update(newRoot, root, trienode.NewWithNodeSet(nodes)); err != nil { t.Fatalf("error updating trieDB: %v", err) } - if err := trieDB.Commit(newRoot, false, nil); err != nil { + if err := trieDB.Commit(newRoot, false); err != nil { t.Fatalf("error committing trieDB: %v", err) } return newRoot, accounts diff --git a/coreth/tests/init.go b/coreth/tests/init.go index eeb50050..4d29c3d6 100644 --- a/coreth/tests/init.go +++ b/coreth/tests/init.go @@ -32,6 +32,7 @@ import ( "sort" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" ) // Forks table defines supported forks and their chain config. @@ -98,6 +99,19 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), }, + "MuirGlacier": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + }, "FrontierToHomesteadAt5": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(5), @@ -161,7 +175,7 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), }, "ApricotPhase2": { ChainID: big.NewInt(1), @@ -174,8 +188,8 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), }, "ApricotPhase3": { ChainID: big.NewInt(1), @@ -188,9 +202,9 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), }, "ApricotPhase4": { ChainID: big.NewInt(1), @@ -203,10 +217,10 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), }, "ApricotPhase5": { ChainID: big.NewInt(1), @@ -218,11 +232,11 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), }, "Banff": { ChainID: big.NewInt(1), @@ -234,12 +248,12 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), }, "Cortina": { ChainID: big.NewInt(1), @@ -251,15 +265,15 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), }, - "DUpgrade": { + "Durango": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -269,18 +283,18 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - ApricotPhase1BlockTimestamp: big.NewInt(0), - ApricotPhase2BlockTimestamp: big.NewInt(0), - ApricotPhase3BlockTimestamp: big.NewInt(0), - ApricotPhase4BlockTimestamp: big.NewInt(0), - ApricotPhase5BlockTimestamp: big.NewInt(0), - BanffBlockTimestamp: big.NewInt(0), - CortinaBlockTimestamp: big.NewInt(0), - DUpgradeBlockTimestamp: big.NewInt(0), + ApricotPhase1BlockTimestamp: utils.NewUint64(0), + ApricotPhase2BlockTimestamp: utils.NewUint64(0), + ApricotPhase3BlockTimestamp: utils.NewUint64(0), + ApricotPhase4BlockTimestamp: utils.NewUint64(0), + ApricotPhase5BlockTimestamp: utils.NewUint64(0), + BanffBlockTimestamp: utils.NewUint64(0), + CortinaBlockTimestamp: utils.NewUint64(0), + DurangoBlockTimestamp: utils.NewUint64(0), }, } -// Returns the set of defined fork names +// AvailableForks returns the set of defined fork names func AvailableForks() []string { var availableForks []string for k := range Forks { diff --git a/coreth/tests/rlp_test_util.go b/coreth/tests/rlp_test_util.go index 01f25630..5af235bc 100644 --- a/coreth/tests/rlp_test_util.go +++ b/coreth/tests/rlp_test_util.go @@ -69,7 +69,7 @@ func FromHex(s string) ([]byte, error) { func (t *RLPTest) Run() error { outb, err := FromHex(t.Out) if err != nil { - return fmt.Errorf("invalid hex in Out") + return errors.New("invalid hex in Out") } // Handle simple decoding tests with no actual In value. @@ -97,7 +97,7 @@ func checkDecodeInterface(b []byte, isValid bool) error { case isValid && err != nil: return fmt.Errorf("decoding failed: %v", err) case !isValid && err == nil: - return fmt.Errorf("decoding of invalid value succeeded") + return errors.New("decoding of invalid value succeeded") } return nil } diff --git a/coreth/tests/state_test_util.go b/coreth/tests/state_test_util.go index abe50d67..76163cb7 100644 --- a/coreth/tests/state_test_util.go +++ b/coreth/tests/state_test_util.go @@ -27,123 +27,18 @@ package tests import ( - "encoding/json" - "fmt" - "math/big" - "strconv" - "strings" - "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethdb" ) -// StateTest checks transaction processing without block context. -// See https://github.com/ethereum/EIPs/issues/176 for the test format specification. -type StateTest struct { - json stJSON -} - -// StateSubtest selects a specific configuration of a General State Test. -type StateSubtest struct { - Fork string - Index int -} - -func (t *StateTest) UnmarshalJSON(in []byte) error { - return json.Unmarshal(in, &t.json) -} - -type stJSON struct { - Env stEnv `json:"env"` - Pre core.GenesisAlloc `json:"pre"` - Tx stTransaction `json:"transaction"` - Out hexutil.Bytes `json:"out"` - Post map[string][]stPostState `json:"post"` -} - -type stPostState struct { - Root common.UnprefixedHash `json:"hash"` - Logs common.UnprefixedHash `json:"logs"` - TxBytes hexutil.Bytes `json:"txbytes"` - ExpectException string `json:"expectException"` - Indexes struct { - Data int `json:"data"` - Gas int `json:"gas"` - Value int `json:"value"` - } -} - -//go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go -type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"` -} - -//go:generate go run github.com/fjl/gencodec -type stTransaction -field-override stTransactionMarshaling -out gen_sttransaction.go -type stTransaction struct { - GasPrice *big.Int `json:"gasPrice"` - MaxFeePerGas *big.Int `json:"maxFeePerGas"` - MaxPriorityFeePerGas *big.Int `json:"maxPriorityFeePerGas"` - Nonce uint64 `json:"nonce"` - To string `json:"to"` - Data []string `json:"data"` - AccessLists []*types.AccessList `json:"accessLists,omitempty"` - GasLimit []uint64 `json:"gasLimit"` - Value []string `json:"value"` - PrivateKey []byte `json:"secretKey"` -} - -// GetChainConfig takes a fork definition and returns a chain config. -// The fork definition can be -// - a plain forkname, e.g. `Byzantium`, -// - a fork basename, and a list of EIPs to enable; e.g. `Byzantium+1884+1283`. -func GetChainConfig(forkString string) (baseConfig *params.ChainConfig, eips []int, err error) { - var ( - splitForks = strings.Split(forkString, "+") - ok bool - baseName, eipsStrings = splitForks[0], splitForks[1:] - ) - if baseConfig, ok = Forks[baseName]; !ok { - return nil, nil, UnsupportedForkError{baseName} - } - for _, eip := range eipsStrings { - if eipNum, err := strconv.Atoi(eip); err != nil { - return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum) - } else { - if !vm.ValidEip(eipNum) { - return nil, nil, fmt.Errorf("syntax error, invalid eip number %v", eipNum) - } - eips = append(eips, eipNum) - } - } - return baseConfig, eips, nil -} - -// Subtests returns all valid subtests of the test. -func (t *StateTest) Subtests() []StateSubtest { - var sub []StateSubtest - for fork, pss := range t.json.Post { - for i := range pss { - sub = append(sub, StateSubtest{fork, i}) - } - } - return sub -} - func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { - sdb := state.NewDatabase(db) - statedb, _ := state.New(common.Hash{}, sdb, nil) + sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) + statedb, _ := state.New(types.EmptyRootHash, sdb, nil) for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) @@ -157,7 +52,13 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo var snaps *snapshot.Tree if snapshotter { - snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, common.Hash{}, root, false, true, false) + snapconfig := snapshot.Config{ + CacheSize: 1, + NoBuild: false, + AsyncBuild: false, + SkipVerify: true, + } + snaps, _ = snapshot.New(snapconfig, db, sdb.TrieDB(), common.Hash{}, root) } statedb, _ = state.New(root, sdb, snaps) return snaps, statedb diff --git a/coreth/trie/committer.go b/coreth/trie/committer.go index a430834d..1cb318c4 100644 --- a/coreth/trie/committer.go +++ b/coreth/trie/committer.go @@ -29,47 +29,39 @@ package trie import ( "fmt" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" ) -// leaf represents a trie leaf node -type leaf struct { - blob []byte // raw blob of leaf - parent common.Hash // the hash of parent node -} - // committer is the tool used for the trie Commit operation. The committer will // capture all dirty nodes during the commit process and keep them cached in // insertion order. type committer struct { - nodes *NodeSet + nodes *trienode.NodeSet + tracer *tracer collectLeaf bool } // newCommitter creates a new committer or picks one from the pool. -func newCommitter(owner common.Hash, collectLeaf bool) *committer { +func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer { return &committer{ - nodes: NewNodeSet(owner), + nodes: nodeset, + tracer: tracer, collectLeaf: collectLeaf, } } -// Commit collapses a node down into a hash node and returns it along with -// the modified nodeset. -func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { - h, err := c.commit(nil, n) - if err != nil { - return nil, nil, err - } - return h.(hashNode), c.nodes, nil +// Commit collapses a node down into a hash node. +func (c *committer) Commit(n node) hashNode { + return c.commit(nil, n).(hashNode) } // commit collapses a node down into a hash node and returns it. -func (c *committer) commit(path []byte, n node) (node, error) { +func (c *committer) commit(path []byte, n node) node { // if this path is clean, use available cached data hash, dirty := n.cache() if hash != nil && !dirty { - return hash, nil + return hash } // Commit children, then parent, and remove the dirty flag. switch cn := n.(type) { @@ -80,35 +72,28 @@ func (c *committer) commit(path []byte, n node) (node, error) { // If the child is fullNode, recursively commit, // otherwise it can only be hashNode or valueNode. if _, ok := cn.Val.(*fullNode); ok { - childV, err := c.commit(append(path, cn.Key...), cn.Val) - if err != nil { - return nil, err - } - collapsed.Val = childV + collapsed.Val = c.commit(append(path, cn.Key...), cn.Val) } // The key needs to be copied, since we're adding it to the // modified nodeset. collapsed.Key = hexToCompact(cn.Key) hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { - return hn, nil + return hn } - return collapsed, nil + return collapsed case *fullNode: - hashedKids, err := c.commitChildren(path, cn) - if err != nil { - return nil, err - } + hashedKids := c.commitChildren(path, cn) collapsed := cn.copy() collapsed.Children = hashedKids hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { - return hn, nil + return hn } - return collapsed, nil + return collapsed case hashNode: - return cn, nil + return cn default: // nil, valuenode shouldn't be committed panic(fmt.Sprintf("%T: invalid node: %v", n, n)) @@ -116,7 +101,7 @@ func (c *committer) commit(path []byte, n node) (node, error) { } // commitChildren commits the children of the given fullnode -func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) { +func (c *committer) commitChildren(path []byte, n *fullNode) [17]node { var children [17]node for i := 0; i < 16; i++ { child := n.Children[i] @@ -133,17 +118,13 @@ func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) { // Commit the child recursively and store the "hashed" value. // Note the returned node can be some embedded nodes, so it's // possible the type is not hashNode. - hashed, err := c.commit(append(path, byte(i)), child) - if err != nil { - return children, err - } - children[i] = hashed + children[i] = c.commit(append(path, byte(i)), child) } // For the 17th child, it's possible the type is valuenode. if n.Children[16] != nil { children[16] = n.Children[16] } - return children, nil + return children } // store hashes the node n and adds it to the modified nodeset. If leaf collection @@ -157,21 +138,25 @@ func (c *committer) store(path []byte, n node) node { // usually is leaf node). But small value (less than 32bytes) is not // our target (leaves in account trie only). if hash == nil { + // The node is embedded in its parent, in other words, this node + // will not be stored in the database independently, mark it as + // deleted only if the node was existent in database before. + prev, ok := c.tracer.accessList[string(path)] + if ok { + c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev)) + } return n } - // We have the hash already, estimate the RLP encoding-size of the node. - // The size is used for mem tracking, does not need to be exact + // Collect the dirty node to nodeset for return. var ( - size = estimateSize(n) nhash = common.BytesToHash(hash) - mnode = &memoryNode{ - hash: nhash, - node: simplifyNode(n), - size: uint16(size), - } + node = trienode.NewWithPrev( + nhash, + nodeToBytes(n), + c.tracer.accessList[string(path)], + ) ) - // Collect the dirty node to nodeset for return. - c.nodes.add(string(path), mnode) + c.nodes.AddNode(path, node) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key @@ -179,38 +164,36 @@ func (c *committer) store(path []byte, n node) node { if c.collectLeaf { if sn, ok := n.(*shortNode); ok { if val, ok := sn.Val.(valueNode); ok { - c.nodes.addLeaf(&leaf{blob: val, parent: nhash}) + c.nodes.AddLeaf(nhash, val) } } } return hash } -// estimateSize estimates the size of an rlp-encoded node, without actually -// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie -// with 1000 leaves, the only errors above 1% are on small shortnodes, where this -// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) -func estimateSize(n node) int { +// mptResolver the children resolver in merkle-patricia-tree. +type mptResolver struct{} + +// ForEach implements childResolver, decodes the provided node and +// traverses the children inside. +func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) { + forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild) +} + +// forGatherChildren traverses the node hierarchy and invokes the callback +// for all the hashnode children. +func forGatherChildren(n node, onChild func(hash common.Hash)) { switch n := n.(type) { case *shortNode: - // A short node contains a compacted key, and a value. - return 3 + len(n.Key) + estimateSize(n.Val) + forGatherChildren(n.Val, onChild) case *fullNode: - // A full node contains up to 16 hashes (some nils), and a key - s := 3 for i := 0; i < 16; i++ { - if child := n.Children[i]; child != nil { - s += estimateSize(child) - } else { - s++ - } + forGatherChildren(n.Children[i], onChild) } - return s - case valueNode: - return 1 + len(n) case hashNode: - return 1 + len(n) + onChild(common.BytesToHash(n)) + case valueNode, nil: default: - panic(fmt.Sprintf("node type %T", n)) + panic(fmt.Sprintf("unknown node type: %T", n)) } } diff --git a/coreth/trie/database_test.go b/coreth/trie/database_test.go index 3a943a8a..685c7385 100644 --- a/coreth/trie/database_test.go +++ b/coreth/trie/database_test.go @@ -27,17 +27,19 @@ package trie import ( - "testing" - - "github.com/ava-labs/coreth/ethdb/memorydb" - "github.com/ethereum/go-ethereum/common" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/ethdb" ) -// Tests that the trie database returns a missing trie node error if attempting -// to retrieve the meta root. -func TestDatabaseMetarootFetch(t *testing.T) { - db := NewDatabase(memorydb.New()) - if _, err := db.RawNode(common.Hash{}); err == nil { - t.Fatalf("metaroot retrieval succeeded") +// newTestDatabase initializes the trie database with specified scheme. +func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { + db := prepare(diskdb, nil) + if scheme == rawdb.HashScheme { + db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) } + //} else { + // db.backend = snap.New(diskdb, db.cleans, nil) + //} + return db } diff --git a/coreth/trie/database_wrap.go b/coreth/trie/database_wrap.go new file mode 100644 index 00000000..84d449b7 --- /dev/null +++ b/coreth/trie/database_wrap.go @@ -0,0 +1,283 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "errors" + "runtime" + "time" + + "github.com/ava-labs/coreth/trie/triedb/hashdb" + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +const ( + cacheStatsUpdateFrequency = 1000 // update trie cache stats once per 1000 ops +) + +// Config defines all necessary options for database. +type Config struct { + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Journal string // Journal of clean cache to survive node restarts + Preimages bool // Flag whether the preimage of trie key is recorded + StatsPrefix string // Prefix for cache stats (disabled if empty) +} + +// backend defines the methods needed to access/update trie nodes in different +// state scheme. +type backend interface { + // Scheme returns the identifier of used storage scheme. + Scheme() string + + // Initialized returns an indicator if the state data is already initialized + // according to the state scheme. + Initialized(genesisRoot common.Hash) bool + + // Size returns the current storage size of the memory cache in front of the + // persistent database layer. + Size() common.StorageSize + + // Update performs a state transition by committing dirty nodes contained + // in the given set in order to update state from the specified parent to + // the specified root. + Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error + UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error + + // Commit writes all relevant trie nodes belonging to the specified state + // to disk. Report specifies whether logs will be displayed in info level. + Commit(root common.Hash, report bool) error + + // Close closes the trie database backend and releases all held resources. + Close() error +} + +type cache interface { + HasGet([]byte, []byte) ([]byte, bool) + Del([]byte) + Set([]byte, []byte) + SaveToFileConcurrent(dir string, threads int) error +} + +// Database is the wrapper of the underlying backend which is shared by different +// types of node backend as an entrypoint. It's responsible for all interactions +// relevant with trie nodes and node preimages. +type Database struct { + config *Config // Configuration for trie database + diskdb ethdb.Database // Persistent database to store the snapshot + cleans cache // Megabytes permitted using for read caches + preimages *preimageStore // The store for caching preimages + backend backend // The backend for managing trie nodes +} + +// prepare initializes the database with provided configs, but the +// database backend is still left as nil. +func prepare(diskdb ethdb.Database, config *Config) *Database { + var cleans cache + if config != nil && config.Cache > 0 { + cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.Journal, config.StatsPrefix, cacheStatsUpdateFrequency) + } + var preimages *preimageStore + if config != nil && config.Preimages { + preimages = newPreimageStore(diskdb) + } + return &Database{ + config: config, + diskdb: diskdb, + cleans: cleans, + preimages: preimages, + } +} + +// NewDatabase initializes the trie database with default settings, namely +// the legacy hash-based scheme is used by default. +func NewDatabase(diskdb ethdb.Database) *Database { + return NewDatabaseWithConfig(diskdb, nil) +} + +// NewDatabaseWithConfig initializes the trie database with provided configs. +// The path-based scheme is not activated yet, always initialized with legacy +// hash-based scheme by default. +func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { + db := prepare(diskdb, config) + db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) + return db +} + +// Reader returns a reader for accessing all trie nodes with provided state root. +// Nil is returned in case the state is not available. +func (db *Database) Reader(blockRoot common.Hash) Reader { + return db.backend.(*hashdb.Database).Reader(blockRoot) +} + +// Update performs a state transition by committing dirty nodes contained in the +// given set in order to update state from the specified parent to the specified +// root. The held pre-images accumulated up to this point will be flushed in case +// the size exceeds the threshold. +func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { + if db.preimages != nil { + db.preimages.commit(false) + } + return db.backend.Update(root, parent, nodes) +} + +func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { + if db.preimages != nil { + db.preimages.commit(false) + } + return db.backend.UpdateAndReferenceRoot(root, parent, nodes) +} + +// Commit iterates over all the children of a particular node, writes them out +// to disk. As a side effect, all pre-images accumulated up to this point are +// also written. +func (db *Database) Commit(root common.Hash, report bool) error { + if db.preimages != nil { + db.preimages.commit(true) + } + return db.backend.Commit(root, report) +} + +// Size returns the storage size of dirty trie nodes in front of the persistent +// database and the size of cached preimages. +func (db *Database) Size() (common.StorageSize, common.StorageSize) { + var ( + storages common.StorageSize + preimages common.StorageSize + ) + storages = db.backend.Size() + if db.preimages != nil { + preimages = db.preimages.size() + } + return storages, preimages +} + +// Initialized returns an indicator if the state data is already initialized +// according to the state scheme. +func (db *Database) Initialized(genesisRoot common.Hash) bool { + return db.backend.Initialized(genesisRoot) +} + +// Scheme returns the node scheme used in the database. +func (db *Database) Scheme() string { + return db.backend.Scheme() +} + +// Close flushes the dangling preimages to disk and closes the trie database. +// It is meant to be called when closing the blockchain object, so that all +// resources held can be released correctly. +func (db *Database) Close() error { + if db.preimages != nil { + db.preimages.commit(true) + } + return db.backend.Close() +} + +// saveCache saves clean state cache to given directory path +// using specified CPU cores. +func (db *Database) saveCache(dir string, threads int) error { + if db.cleans == nil { + return nil + } + log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) + + start := time.Now() + err := db.cleans.SaveToFileConcurrent(dir, threads) + if err != nil { + log.Error("Failed to persist clean trie cache", "error", err) + return err + } + log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// SaveCache atomically saves fast cache data to the given dir using all +// available CPU cores. +func (db *Database) SaveCache(dir string) error { + return db.saveCache(dir, runtime.GOMAXPROCS(0)) +} + +// SaveCachePeriodically atomically saves fast cache data to the given dir with +// the specified interval. All dump operation will only use a single CPU core. +func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + db.saveCache(dir, 1) + case <-stopCh: + return + } + } +} + +// Cap iteratively flushes old but still referenced trie nodes until the total +// memory usage goes below the given threshold. The held pre-images accumulated +// up to this point will be flushed in case the size exceeds the threshold. +// +// It's only supported by hash-based database and will return an error for others. +func (db *Database) Cap(limit common.StorageSize) error { + hdb, ok := db.backend.(*hashdb.Database) + if !ok { + return errors.New("not supported") + } + if db.preimages != nil { + db.preimages.commit(false) + } + return hdb.Cap(limit) +} + +// Reference adds a new reference from a parent node to a child node. This function +// is used to add reference between internal trie node and external node(e.g. storage +// trie root), all internal trie nodes are referenced together by database itself. +// +// It's only supported by hash-based database and will return an error for others. +func (db *Database) Reference(root common.Hash, parent common.Hash) error { + hdb, ok := db.backend.(*hashdb.Database) + if !ok { + return errors.New("not supported") + } + hdb.Reference(root, parent) + return nil +} + +// Dereference removes an existing reference from a root node. It's only +// supported by hash-based database and will return an error for others. +func (db *Database) Dereference(root common.Hash) error { + hdb, ok := db.backend.(*hashdb.Database) + if !ok { + return errors.New("not supported") + } + hdb.Dereference(root) + return nil +} + +// Node retrieves the rlp-encoded node blob with provided node hash. It's +// only supported by hash-based database and will return an error for others. +// Note, this function should be deprecated once ETH66 is deprecated. +func (db *Database) Node(hash common.Hash) ([]byte, error) { + hdb, ok := db.backend.(*hashdb.Database) + if !ok { + return nil, errors.New("not supported") + } + return hdb.Node(hash) +} diff --git a/coreth/trie/encoding_test.go b/coreth/trie/encoding_test.go index e83071bf..65cc5333 100644 --- a/coreth/trie/encoding_test.go +++ b/coreth/trie/encoding_test.go @@ -28,6 +28,7 @@ package trie import ( "bytes" + crand "crypto/rand" "encoding/hex" "math/rand" "testing" @@ -88,17 +89,17 @@ func TestHexKeybytes(t *testing.T) { } func TestHexToCompactInPlace(t *testing.T) { - for i, keyS := range []string{ + for i, key := range []string{ "00", "060a040c0f000a090b040803010801010900080d090a0a0d0903000b10", "10", } { - hexBytes, _ := hex.DecodeString(keyS) + hexBytes, _ := hex.DecodeString(key) exp := hexToCompact(hexBytes) sz := hexToCompactInPlace(hexBytes) got := hexBytes[:sz] if !bytes.Equal(exp, got) { - t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, keyS, got, exp) + t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp) } } } @@ -107,7 +108,7 @@ func TestHexToCompactInPlaceRandom(t *testing.T) { for i := 0; i < 10000; i++ { l := rand.Intn(128) key := make([]byte, l) - rand.Read(key) + crand.Read(key) hexBytes := keybytesToHex(key) hexOrig := []byte(string(hexBytes)) exp := hexToCompact(hexBytes) diff --git a/coreth/trie/errors.go b/coreth/trie/errors.go index d3a9af4c..b6f90132 100644 --- a/coreth/trie/errors.go +++ b/coreth/trie/errors.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete) +// MissingNodeError is returned by the trie functions (Get, Update, Delete) // in the case where a trie node is not present in the local database. It contains // information necessary for retrieving the missing node. type MissingNodeError struct { diff --git a/coreth/trie/iterator.go b/coreth/trie/iterator.go index 8742a7a5..74c761f3 100644 --- a/coreth/trie/iterator.go +++ b/coreth/trie/iterator.go @@ -31,10 +31,17 @@ import ( "container/heap" "errors" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" ) +// NodeResolver is used for looking up trie nodes before reaching into the real +// persistent layer. This is not mandatory, rather is an optimization for cases +// where trie nodes can be recovered from some external mechanism without reading +// from disk. In those cases, this resolver allows short circuiting accesses and +// returning them from memory. +type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte + // Iterator is a key-value trie iterator that traverses a Trie. type Iterator struct { nodeIt NodeIterator @@ -117,8 +124,8 @@ type NodeIterator interface { // to the value after calling Next. LeafProof() [][]byte - // AddResolver sets an intermediate database to use for looking up trie nodes - // before reaching into the real persistent layer. + // AddResolver sets a node resolver to use for looking up trie nodes before + // reaching into the real persistent layer. // // This is not required for normal operation, rather is an optimization for // cases where trie nodes can be recovered from some external mechanism without @@ -128,7 +135,7 @@ type NodeIterator interface { // Before adding a similar mechanism to any other place in Geth, consider // making trie.Database an interface and wrapping at that level. It's a huge // refactor, but it could be worth it if another occurrence arises. - AddResolver(ethdb.KeyValueReader) + AddResolver(NodeResolver) } // nodeIteratorState represents the iteration state at one particular node of the @@ -147,7 +154,7 @@ type nodeIterator struct { path []byte // Path to the current node err error // Failure set in case of an internal error in the iterator - resolver ethdb.KeyValueReader // Optional intermediate resolver above the disk layer + resolver NodeResolver // optional node resolver for avoiding disk hits } // errIteratorEnd is stored in nodeIterator.err when iteration is done. @@ -164,7 +171,7 @@ func (e seekError) Error() string { } func newNodeIterator(trie *Trie, start []byte) NodeIterator { - if trie.Hash() == emptyRoot { + if trie.Hash() == types.EmptyRootHash { return &nodeIterator{ trie: trie, err: errIteratorEnd, @@ -175,7 +182,7 @@ func newNodeIterator(trie *Trie, start []byte) NodeIterator { return it } -func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *nodeIterator) AddResolver(resolver NodeResolver) { it.resolver = resolver } @@ -306,7 +313,7 @@ func (it *nodeIterator) seek(prefix []byte) error { func (it *nodeIterator) init() (*nodeIteratorState, error) { root := it.trie.Hash() state := &nodeIteratorState{node: it.trie.root, index: -1} - if root != emptyRoot { + if root != types.EmptyRootHash { state.hash = root } return state, state.resolve(it, nil) @@ -379,22 +386,39 @@ func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []by func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) { if it.resolver != nil { - if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 { + if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 { if resolved, err := decodeNode(hash, blob); err == nil { return resolved, nil } } } - return it.trie.resolveHash(hash, path) + // Retrieve the specified node from the underlying node reader. + // it.trie.resolveAndTrack is not used since in that function the + // loaded blob will be tracked, while it's not required here since + // all loaded nodes won't be linked to trie at all and track nodes + // may lead to out-of-memory issue. + blob, err := it.trie.reader.node(path, common.BytesToHash(hash)) + if err != nil { + return nil, err + } + // The raw-blob format nodes are loaded either from the + // clean cache or the database, they are all in their own + // copy and safe to use unsafe decoder. + return mustDecodeNodeUnsafe(hash, blob), nil } func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) { if it.resolver != nil { - if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 { + if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 { return blob, nil } } - return it.trie.resolveBlob(hash, path) + // Retrieve the specified node from the underlying node reader. + // it.trie.resolveAndTrack is not used since in that function the + // loaded blob will be tracked, while it's not required here since + // all loaded nodes won't be linked to trie at all and track nodes + // may lead to out-of-memory issue. + return it.trie.reader.node(path, common.BytesToHash(hash)) } func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error { @@ -589,7 +613,7 @@ func (it *differenceIterator) NodeBlob() []byte { return it.b.NodeBlob() } -func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *differenceIterator) AddResolver(resolver NodeResolver) { panic("not implemented") } @@ -704,7 +728,7 @@ func (it *unionIterator) NodeBlob() []byte { return (*it.items)[0].NodeBlob() } -func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) { +func (it *unionIterator) AddResolver(resolver NodeResolver) { panic("not implemented") } diff --git a/coreth/trie/iterator_test.go b/coreth/trie/iterator_test.go index ac017cdf..d53f74a8 100644 --- a/coreth/trie/iterator_test.go +++ b/coreth/trie/iterator_test.go @@ -34,10 +34,12 @@ import ( "testing" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" ) func TestEmptyIterator(t *testing.T) { @@ -68,15 +70,12 @@ func TestIterator(t *testing.T) { all := make(map[string]string) for _, val := range vals { all[val.k] = val.v - trie.Update([]byte(val.k), []byte(val.v)) + trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, err := trie.Commit(false) - if err != nil { - t.Fatalf("Failed to commit trie %v", err) - } - db.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) - trie, _ = New(common.Hash{}, root, db) + trie, _ = New(TrieID(root), db) found := make(map[string]string) it := NewIterator(trie.NodeIterator(nil)) for it.Next() { @@ -102,8 +101,8 @@ func TestIteratorLargeData(t *testing.T) { for i := byte(0); i < 255; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false} - trie.Update(value.k, value.v) - trie.Update(value2.k, value2.v) + trie.MustUpdate(value.k, value.v) + trie.MustUpdate(value2.k, value2.v) vals[string(value.k)] = value vals[string(value2.k)] = value2 } @@ -128,39 +127,61 @@ func TestIteratorLargeData(t *testing.T) { } } +type iterationElement struct { + hash common.Hash + path []byte + blob []byte +} + // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { + testNodeIteratorCoverage(t, rawdb.HashScheme) + //testNodeIteratorCoverage(t, rawdb.PathScheme) +} + +func testNodeIteratorCoverage(t *testing.T, scheme string) { // Create some arbitrary test trie to iterate - db, trie, _ := makeTestTrie() + db, nodeDb, trie, _ := makeTestTrie(scheme) // Gather all the node hashes found by the iterator - hashes := make(map[common.Hash]struct{}) + var elements = make(map[common.Hash]iterationElement) for it := trie.NodeIterator(nil); it.Next(true); { if it.Hash() != (common.Hash{}) { - hashes[it.Hash()] = struct{}{} + elements[it.Hash()] = iterationElement{ + hash: it.Hash(), + path: common.CopyBytes(it.Path()), + blob: common.CopyBytes(it.NodeBlob()), + } } } // Cross check the hashes and the database itself - for hash := range hashes { - if _, err := db.RawNode(hash); err != nil { - t.Errorf("failed to retrieve reported node %x: %v", hash, err) + for _, element := range elements { + if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil { + t.Errorf("failed to retrieve reported node %x: %v", element.hash, err) + } else if !bytes.Equal(blob, element.blob) { + t.Errorf("node blob is different, want %v got %v", element.blob, blob) } } - for hash, obj := range db.dirties { - if obj != nil && hash != (common.Hash{}) { - if _, ok := hashes[hash]; !ok { - t.Errorf("state entry not reported %x", hash) - } - } - } - it := db.diskdb.NewIterator(nil, nil) + var ( + count int + it = db.NewIterator(nil, nil) + ) for it.Next() { - key := it.Key() - if _, ok := hashes[common.BytesToHash(key)]; !ok { - t.Errorf("state entry not reported %x", key) + res, _, _ := isTrieNode(nodeDb.Scheme(), it.Key(), it.Value()) + if !res { + continue + } + count += 1 + if elem, ok := elements[crypto.Keccak256Hash(it.Value())]; !ok { + t.Error("state entry not reported") + } else if !bytes.Equal(it.Value(), elem.blob) { + t.Errorf("node blob is different, want %v got %v", elem.blob, it.Value()) } } it.Release() + if count != len(elements) { + t.Errorf("state entry is mismatched %d %d", count, len(elements)) + } } type kvs struct{ k, v string } @@ -191,7 +212,7 @@ var testdata2 = []kvs{ func TestIteratorSeek(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for _, val := range testdata1 { - trie.Update([]byte(val.k), []byte(val.v)) + trie.MustUpdate([]byte(val.k), []byte(val.v)) } // Seek to the middle. @@ -233,20 +254,20 @@ func TestDifferenceIterator(t *testing.T) { dba := NewDatabase(rawdb.NewMemoryDatabase()) triea := NewEmpty(dba) for _, val := range testdata1 { - triea.Update([]byte(val.k), []byte(val.v)) + triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA, _ := triea.Commit(false) - dba.Update(NewWithNodeSet(nodesA)) - triea, _ = New(common.Hash{}, rootA, dba) + rootA, nodesA := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) + triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase()) trieb := NewEmpty(dbb) for _, val := range testdata2 { - trieb.Update([]byte(val.k), []byte(val.v)) + trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(NewWithNodeSet(nodesB)) - trieb, _ = New(common.Hash{}, rootB, dbb) + rootB, nodesB := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) + trieb, _ = New(TrieID(rootB), dbb) found := make(map[string]string) di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) @@ -275,20 +296,20 @@ func TestUnionIterator(t *testing.T) { dba := NewDatabase(rawdb.NewMemoryDatabase()) triea := NewEmpty(dba) for _, val := range testdata1 { - triea.Update([]byte(val.k), []byte(val.v)) + triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA, _ := triea.Commit(false) - dba.Update(NewWithNodeSet(nodesA)) - triea, _ = New(common.Hash{}, rootA, dba) + rootA, nodesA := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) + triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase()) trieb := NewEmpty(dbb) for _, val := range testdata2 { - trieb.Update([]byte(val.k), []byte(val.v)) + trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(NewWithNodeSet(nodesB)) - trieb, _ = New(common.Hash{}, rootB, dbb) + rootB, nodesB := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) + trieb, _ = New(TrieID(rootB), dbb) di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) it := NewIterator(di) @@ -327,85 +348,104 @@ func TestUnionIterator(t *testing.T) { func TestIteratorNoDups(t *testing.T) { tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for _, val := range testdata1 { - tr.Update([]byte(val.k), []byte(val.v)) + tr.MustUpdate([]byte(val.k), []byte(val.v)) } checkIteratorNoDups(t, tr.NodeIterator(nil), nil) } // This test checks that nodeIterator.Next can be retried after inserting missing trie nodes. -func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueAfterError(t, false) } -func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) } +func TestIteratorContinueAfterError(t *testing.T) { + testIteratorContinueAfterError(t, false, rawdb.HashScheme) + testIteratorContinueAfterError(t, true, rawdb.HashScheme) + // testIteratorContinueAfterError(t, false, rawdb.PathScheme) + // testIteratorContinueAfterError(t, true, rawdb.PathScheme) +} -func testIteratorContinueAfterError(t *testing.T, memonly bool) { - diskdb := memorydb.New() - triedb := NewDatabase(diskdb) +func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { + diskdb := rawdb.NewMemoryDatabase() + tdb := newTestDatabase(diskdb, scheme) - tr := NewEmpty(triedb) + tr := NewEmpty(tdb) for _, val := range testdata1 { - tr.Update([]byte(val.k), []byte(val.v)) + tr.MustUpdate([]byte(val.k), []byte(val.v)) } - _, nodes, _ := tr.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := tr.Commit(false) + tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) if !memonly { - triedb.Commit(tr.Hash(), true, nil) + tdb.Commit(root, false) } + tr, _ = New(TrieID(root), tdb) wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil) var ( - diskKeys [][]byte - memKeys []common.Hash + paths [][]byte + hashes []common.Hash ) if memonly { - memKeys = triedb.Nodes() + for path, n := range nodes.Nodes { + paths = append(paths, []byte(path)) + hashes = append(hashes, n.Hash) + } } else { it := diskdb.NewIterator(nil, nil) for it.Next() { - diskKeys = append(diskKeys, it.Key()) + ok, path, hash := isTrieNode(tdb.Scheme(), it.Key(), it.Value()) + if !ok { + continue + } + paths = append(paths, path) + hashes = append(hashes, hash) } it.Release() } for i := 0; i < 20; i++ { // Create trie that will load all nodes from DB. - tr, _ := New(common.Hash{}, tr.Hash(), triedb) + tr, _ := New(TrieID(tr.Hash()), tdb) // Remove a random node from the database. It can't be the root node // because that one is already loaded. var ( - rkey common.Hash - rval []byte - robj *cachedNode + rval []byte + rpath []byte + rhash common.Hash ) for { if memonly { - rkey = memKeys[rand.Intn(len(memKeys))] + rpath = paths[rand.Intn(len(paths))] + n := nodes.Nodes[string(rpath)] + if n == nil { + continue + } + rhash = n.Hash } else { - copy(rkey[:], diskKeys[rand.Intn(len(diskKeys))]) + index := rand.Intn(len(paths)) + rpath = paths[index] + rhash = hashes[index] } - if rkey != tr.Hash() { + if rhash != tr.Hash() { break } } if memonly { - robj = triedb.dirties[rkey] - delete(triedb.dirties, rkey) + tr.reader.banned = map[string]struct{}{string(rpath): {}} } else { - rval, _ = diskdb.Get(rkey[:]) - diskdb.Delete(rkey[:]) + rval = rawdb.ReadTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme()) + rawdb.DeleteTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme()) } // Iterate until the error is hit. seen := make(map[string]bool) it := tr.NodeIterator(nil) checkIteratorNoDups(t, it, seen) missing, ok := it.Error().(*MissingNodeError) - if !ok || missing.NodeHash != rkey { + if !ok || missing.NodeHash != rhash { t.Fatal("didn't hit missing node, got", it.Error()) } // Add the node back and continue iteration. if memonly { - triedb.dirties[rkey] = robj + delete(tr.reader.banned, string(rpath)) } else { - diskdb.Put(rkey[:], rval) + rawdb.WriteTrieNode(diskdb, common.Hash{}, rpath, rhash, rval, tdb.Scheme()) } checkIteratorNoDups(t, it, seen) if it.Error() != nil { @@ -420,42 +460,48 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) { // Similar to the test above, this one checks that failure to create nodeIterator at a // certain key prefix behaves correctly when Next is called. The expectation is that Next // should retry seeking before returning true for the first time. -func TestIteratorContinueAfterSeekErrorDisk(t *testing.T) { - testIteratorContinueAfterSeekError(t, false) -} -func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) { - testIteratorContinueAfterSeekError(t, true) +func TestIteratorContinueAfterSeekError(t *testing.T) { + testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme) + testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme) + // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) + // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) } -func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) { +func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) { // Commit test trie to db, then remove the node containing "bars". - diskdb := memorydb.New() - triedb := NewDatabase(diskdb) - + var ( + barNodePath []byte + barNodeHash = common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e") + ) + diskdb := rawdb.NewMemoryDatabase() + triedb := newTestDatabase(diskdb, scheme) ctr := NewEmpty(triedb) for _, val := range testdata1 { - ctr.Update([]byte(val.k), []byte(val.v)) + ctr.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := ctr.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := ctr.Commit(false) + for path, n := range nodes.Nodes { + if n.Hash == barNodeHash { + barNodePath = []byte(path) + break + } + } + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) if !memonly { - triedb.Commit(root, true, nil) + triedb.Commit(root, false) } - barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e") var ( barNodeBlob []byte - barNodeObj *cachedNode ) + tr, _ := New(TrieID(root), triedb) if memonly { - barNodeObj = triedb.dirties[barNodeHash] - delete(triedb.dirties, barNodeHash) + tr.reader.banned = map[string]struct{}{string(barNodePath): {}} } else { - barNodeBlob, _ = diskdb.Get(barNodeHash[:]) - diskdb.Delete(barNodeHash[:]) + barNodeBlob = rawdb.ReadTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme()) + rawdb.DeleteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme()) } // Create a new iterator that seeks to "bars". Seeking can't proceed because // the node is missing. - tr, _ := New(common.Hash{}, root, triedb) it := tr.NodeIterator([]byte("bars")) missing, ok := it.Error().(*MissingNodeError) if !ok { @@ -465,9 +511,9 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) { } // Reinsert the missing node. if memonly { - triedb.dirties[barNodeHash] = barNodeObj + delete(tr.reader.banned, string(barNodePath)) } else { - diskdb.Put(barNodeHash[:], barNodeBlob) + rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme()) } // Check that iteration produces the right set of values. if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil { @@ -488,6 +534,11 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in return len(seen) } +func TestIteratorNodeBlob(t *testing.T) { + testIteratorNodeBlob(t, rawdb.HashScheme) + //testIteratorNodeBlob(t, rawdb.PathScheme) +} + type loggingDb struct { getCount uint64 backend ethdb.KeyValueStore @@ -519,10 +570,13 @@ func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch { } func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - fmt.Printf("NewIterator\n") return l.backend.NewIterator(prefix, start) } +func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) { + return l.backend.NewSnapshot() +} + func (l *loggingDb) Stat(property string) (string, error) { return l.backend.Stat(property) } @@ -539,8 +593,8 @@ func (l *loggingDb) Close() error { func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { // Create an empty trie logDb := &loggingDb{0, memorydb.New()} - triedb := NewDatabase(logDb) - trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) + triedb := NewDatabase(rawdb.NewDatabase(logDb)) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) // Fill it with some arbitrary data for i := 0; i < 10000; i++ { @@ -550,10 +604,10 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { binary.BigEndian.PutUint64(val, uint64(i)) key = crypto.Keccak256(key) val = crypto.Keccak256(val) - trie.Update(key, val) + trie.MustUpdate(key, val) } - _, nodes, _ := trie.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Return the generated trie return triedb, trie, logDb } @@ -572,10 +626,10 @@ func TestNodeIteratorLargeTrie(t *testing.T) { } } -func TestIteratorNodeBlob(t *testing.T) { +func testIteratorNodeBlob(t *testing.T, scheme string) { var ( - db = memorydb.New() - triedb = NewDatabase(db) + db = rawdb.NewMemoryDatabase() + triedb = newTestDatabase(db, scheme) trie = NewEmpty(triedb) ) vals := []struct{ k, v string }{ @@ -590,13 +644,14 @@ func TestIteratorNodeBlob(t *testing.T) { all := make(map[string]string) for _, val := range vals { all[val.k] = val.v - trie.Update([]byte(val.k), []byte(val.v)) + trie.MustUpdate([]byte(val.k), []byte(val.v)) } - _, nodes, _ := trie.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) - triedb.Cap(0) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + triedb.Commit(root, false) - found := make(map[common.Hash][]byte) + var found = make(map[common.Hash][]byte) + trie, _ = New(TrieID(root), triedb) it := trie.NodeIterator(nil) for it.Next(true) { if it.Hash() == (common.Hash{}) { @@ -610,9 +665,13 @@ func TestIteratorNodeBlob(t *testing.T) { var count int for dbIter.Next() { - got, present := found[common.BytesToHash(dbIter.Key())] + ok, _, _ := isTrieNode(triedb.Scheme(), dbIter.Key(), dbIter.Value()) + if !ok { + continue + } + got, present := found[crypto.Keccak256Hash(dbIter.Value())] if !present { - t.Fatalf("Miss trie node %v", dbIter.Key()) + t.Fatal("Miss trie node") } if !bytes.Equal(got, dbIter.Value()) { t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got) @@ -623,3 +682,29 @@ func TestIteratorNodeBlob(t *testing.T) { t.Fatal("Find extra trie node via iterator") } } + +// isTrieNode is a helper function which reports if the provided +// database entry belongs to a trie node or not. Note in tests +// only single layer trie is used, namely storage trie is not +// considered at all. +func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) { + var ( + path []byte + hash common.Hash + ) + if scheme == rawdb.HashScheme { + ok := rawdb.IsLegacyTrieNode(key, val) + if !ok { + return false, nil, common.Hash{} + } + hash = common.BytesToHash(key) + } else { + ok, remain := rawdb.IsAccountTrieNode(key) + if !ok { + return false, nil, common.Hash{} + } + path = common.CopyBytes(remain) + hash = crypto.Keccak256Hash(val) + } + return true, path, hash +} diff --git a/coreth/trie/node.go b/coreth/trie/node.go index 7b8dd73e..8a8bc3ad 100644 --- a/coreth/trie/node.go +++ b/coreth/trie/node.go @@ -109,6 +109,19 @@ func (n valueNode) fstring(ind string) string { return fmt.Sprintf("%x ", []byte(n)) } +// rawNode is a simple binary blob used to differentiate between collapsed trie +// nodes and already encoded RLP binary blobs (while at the same time store them +// in the same cache fields). +type rawNode []byte + +func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } + +func (n rawNode) EncodeRLP(w io.Writer) error { + _, err := w.Write(n) + return err +} + // mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered. func mustDecodeNode(hash, buf []byte) node { n, err := decodeNode(hash, buf) diff --git a/coreth/trie/node_enc.go b/coreth/trie/node_enc.go index 06aaeaaf..dc053e10 100644 --- a/coreth/trie/node_enc.go +++ b/coreth/trie/node_enc.go @@ -69,29 +69,6 @@ func (n valueNode) encode(w rlp.EncoderBuffer) { w.WriteBytes(n) } -func (n rawFullNode) encode(w rlp.EncoderBuffer) { - offset := w.List() - for _, c := range n { - if c != nil { - c.encode(w) - } else { - w.Write(rlp.EmptyString) - } - } - w.ListEnd(offset) -} - -func (n *rawShortNode) encode(w rlp.EncoderBuffer) { - offset := w.List() - w.WriteBytes(n.Key) - if n.Val != nil { - n.Val.encode(w) - } else { - w.Write(rlp.EmptyString) - } - w.ListEnd(offset) -} - func (n rawNode) encode(w rlp.EncoderBuffer) { w.Write(n) } diff --git a/coreth/trie/nodeset.go b/coreth/trie/nodeset.go deleted file mode 100644 index 421ad134..00000000 --- a/coreth/trie/nodeset.go +++ /dev/null @@ -1,104 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" -) - -// memoryNode is all the information we know about a single cached trie node -// in the memory. -type memoryNode struct { - hash common.Hash // Node hash, computed by hashing rlp value - size uint16 // Byte size of the useful cached data - node node // Cached collapsed trie node, or raw rlp data -} - -// NodeSet contains all dirty nodes collected during the commit operation. -// Each node is keyed by path. It's not thread-safe to use. -type NodeSet struct { - owner common.Hash // the identifier of the trie - paths []string // the path of dirty nodes, sort by insertion order - nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path - leaves []*leaf // the list of dirty leaves -} - -// NewNodeSet initializes an empty node set to be used for tracking dirty nodes -// from a specific account or storage trie. The owner is zero for the account -// trie and the owning account address hash for storage tries. -func NewNodeSet(owner common.Hash) *NodeSet { - return &NodeSet{ - owner: owner, - nodes: make(map[string]*memoryNode), - } -} - -// add caches node with provided path and node object. -func (set *NodeSet) add(path string, node *memoryNode) { - set.paths = append(set.paths, path) - set.nodes[path] = node -} - -// addLeaf caches the provided leaf node. -func (set *NodeSet) addLeaf(node *leaf) { - set.leaves = append(set.leaves, node) -} - -// Len returns the number of dirty nodes contained in the set. -func (set *NodeSet) Len() int { - return len(set.nodes) -} - -// MergedNodeSet represents a merged dirty node set for a group of tries. -type MergedNodeSet struct { - sets map[common.Hash]*NodeSet -} - -// NewMergedNodeSet initializes an empty merged set. -func NewMergedNodeSet() *MergedNodeSet { - return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)} -} - -// NewWithNodeSet constructs a merged nodeset with the provided single set. -func NewWithNodeSet(set *NodeSet) *MergedNodeSet { - merged := NewMergedNodeSet() - merged.Merge(set) - return merged -} - -// Merge merges the provided dirty nodes of a trie into the set. The assumption -// is held that no duplicated set belonging to the same trie will be merged twice. -func (set *MergedNodeSet) Merge(other *NodeSet) error { - _, present := set.sets[other.owner] - if present { - return fmt.Errorf("duplicate trie for owner %#x", other.owner) - } - set.sets[other.owner] = other - return nil -} diff --git a/coreth/trie/preimages.go b/coreth/trie/preimages.go index 4a11059e..1502372f 100644 --- a/coreth/trie/preimages.go +++ b/coreth/trie/preimages.go @@ -30,8 +30,8 @@ import ( "sync" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) const defaultPreimagesLimit = 4 * 1024 * 1024 // 4 MB diff --git a/coreth/trie/proof.go b/coreth/trie/proof.go index a864b05b..a90d76bb 100644 --- a/coreth/trie/proof.go +++ b/coreth/trie/proof.go @@ -31,9 +31,8 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -70,12 +69,20 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e key = key[1:] nodes = append(nodes, n) case hashNode: - var err error - tn, err = t.resolveHash(n, prefix) + // Retrieve the specified node from the underlying node reader. + // trie.resolveAndTrack is not used since in that function the + // loaded blob will be tracked, while it's not required here since + // all loaded nodes won't be linked to trie at all and track nodes + // may lead to out-of-memory issue. + blob, err := t.reader.node(prefix, common.BytesToHash(n)) if err != nil { log.Error("Unhandled trie error in Trie.Prove", "err", err) return err } + // The raw-blob format nodes are loaded either from the + // clean cache or the database, they are all in their own + // copy and safe to use unsafe decoder. + tn = mustDecodeNodeUnsafe(n, blob) default: panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) } @@ -379,7 +386,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error if removeLeft { if bytes.Compare(cld.Key, key[pos:]) < 0 { // The key of fork shortnode is less than the path - // (it belongs to the range), unset the entrie + // (it belongs to the range), unset the entire // branch. The parent must be a fullnode. fn := parent.(*fullNode) fn.Children[key[pos-1]] = nil @@ -504,7 +511,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key if proof == nil { tr := NewStackTrie(nil) for index, key := range keys { - tr.TryUpdate(key, values[index]) + tr.Update(key, values[index]) } if have, want := tr.Hash(), rootHash; have != want { return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have) @@ -569,12 +576,12 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key } // Rebuild the trie with the leaf stream, the shape of trie // should be same with the original one. - tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())} + tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()} if empty { tr.root = nil } for index, key := range keys { - tr.TryUpdate(key, values[index]) + tr.Update(key, values[index]) } if tr.Hash() != rootHash { return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) diff --git a/coreth/trie/proof_test.go b/coreth/trie/proof_test.go index 4875dfeb..2e39d89b 100644 --- a/coreth/trie/proof_test.go +++ b/coreth/trie/proof_test.go @@ -30,19 +30,33 @@ import ( "bytes" crand "crypto/rand" "encoding/binary" + "fmt" mrand "math/rand" "sort" "testing" - "time" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb/memorydb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" ) -func init() { - mrand.Seed(time.Now().Unix()) +// Prng is a pseudo random number generator seeded by strong randomness. +// The randomness is printed on startup in order to make failures reproducible. +var prng = initRnd() + +func initRnd() *mrand.Rand { + var seed [8]byte + crand.Read(seed[:]) + rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) + fmt.Printf("Seed: %x\n", seed) + return rnd +} + +func randBytes(n int) []byte { + r := make([]byte, n) + prng.Read(r) + return r } // makeProvers creates Merkle trie provers based on different implementations to @@ -399,7 +413,7 @@ func TestOneElementRangeProof(t *testing.T) { // Test the mini trie with only a single element. tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) entry := &kv{randBytes(32), randBytes(20), false} - tinyTrie.Update(entry.k, entry.v) + tinyTrie.MustUpdate(entry.k, entry.v) first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last = entry.k @@ -473,7 +487,7 @@ func TestSingleSideRangeProof(t *testing.T) { var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.Update(value.k, value.v) + trie.MustUpdate(value.k, value.v) entries = append(entries, value) } sort.Sort(entries) @@ -508,7 +522,7 @@ func TestReverseSingleSideRangeProof(t *testing.T) { var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.Update(value.k, value.v) + trie.MustUpdate(value.k, value.v) entries = append(entries, value) } sort.Sort(entries) @@ -615,7 +629,7 @@ func TestGappedRangeProof(t *testing.T) { var entries []*kv // Sorted entries for i := byte(0); i < 10; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} - trie.Update(value.k, value.v) + trie.MustUpdate(value.k, value.v) entries = append(entries, value) } first, last := 2, 8 @@ -689,7 +703,7 @@ func TestHasRightElement(t *testing.T) { var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.Update(value.k, value.v) + trie.MustUpdate(value.k, value.v) entries = append(entries, value) } sort.Sort(entries) @@ -1043,25 +1057,19 @@ func randomTrie(n int) (*Trie, map[string]*kv) { for i := byte(0); i < 100; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false} - trie.Update(value.k, value.v) - trie.Update(value2.k, value2.v) + trie.MustUpdate(value.k, value.v) + trie.MustUpdate(value2.k, value2.v) vals[string(value.k)] = value vals[string(value2.k)] = value2 } for i := 0; i < n; i++ { value := &kv{randBytes(32), randBytes(20), false} - trie.Update(value.k, value.v) + trie.MustUpdate(value.k, value.v) vals[string(value.k)] = value } return trie, vals } -func randBytes(n int) []byte { - r := make([]byte, n) - crand.Read(r) - return r -} - func nonRandomTrie(n int) (*Trie, map[string]*kv) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) vals := make(map[string]*kv) @@ -1073,7 +1081,7 @@ func nonRandomTrie(n int) (*Trie, map[string]*kv) { binary.LittleEndian.PutUint64(value, i-max) //value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} elem := &kv{key, value, false} - trie.Update(elem.k, elem.v) + trie.MustUpdate(elem.k, elem.v) vals[string(elem.k)] = elem } return trie, vals @@ -1090,7 +1098,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { } trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i, key := range keys { - trie.Update(key, vals[i]) + trie.MustUpdate(key, vals[i]) } root := trie.Hash() proof := memorydb.New() diff --git a/coreth/trie/secure_trie.go b/coreth/trie/secure_trie.go index 9927ada3..98132b8b 100644 --- a/coreth/trie/secure_trie.go +++ b/coreth/trie/secure_trie.go @@ -28,8 +28,8 @@ package trie import ( "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) @@ -39,8 +39,13 @@ type SecureTrie = StateTrie // NewSecure creates a new StateTrie. // Deprecated: use NewStateTrie. -func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { - return NewStateTrie(owner, root, db) +func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { + id := &ID{ + StateRoot: stateRoot, + Owner: owner, + Root: root, + } + return NewStateTrie(id, db) } // StateTrie wraps a trie with key hashing. In a stateTrie trie, all @@ -66,40 +71,39 @@ type StateTrie struct { // If root is the zero hash or the sha3 hash of an empty string, the // trie is initially empty. Otherwise, New will panic if db is nil // and returns MissingNodeError if the root node cannot be found. -func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie, error) { +func NewStateTrie(id *ID, db *Database) (*StateTrie, error) { if db == nil { panic("trie.NewStateTrie called without a database") } - trie, err := New(owner, root, db) + trie, err := New(id, db) if err != nil { return nil, err } return &StateTrie{trie: *trie, preimages: db.preimages}, nil } -// Get returns the value for key stored in the trie. +// MustGet returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -func (t *StateTrie) Get(key []byte) []byte { - res, err := t.TryGet(key) - if err != nil { - log.Error("Unhandled trie error in StateTrie.Get", "err", err) - } - return res +// +// This function will omit any encountered error but just +// print out an error message. +func (t *StateTrie) MustGet(key []byte) []byte { + return t.trie.MustGet(t.hashKey(key)) } -// TryGet returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -// If the specified node is not in the trie, nil will be returned. +// GetStorage attempts to retrieve a storage slot with provided account address +// and slot key. The value bytes must not be modified by the caller. +// If the specified storage slot is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) TryGet(key []byte) ([]byte, error) { - return t.trie.TryGet(t.hashKey(key)) +func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { + return t.trie.Get(t.hashKey(key)) } -// TryGetAccount attempts to retrieve an account with provided trie path. +// GetAccount attempts to retrieve an account with provided account address. // If the specified account is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { - res, err := t.trie.TryGet(t.hashKey(key)) +func (t *StateTrie) GetAccount(address common.Address) (*types.StateAccount, error) { + res, err := t.trie.Get(t.hashKey(address.Bytes())) if res == nil || err != nil { return nil, err } @@ -108,11 +112,11 @@ func (t *StateTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { return ret, err } -// TryGetAccountWithPreHashedKey does the same thing as TryGetAccount, however -// it expects a key that is already hashed. This constitutes an abstraction leak, -// since the client code needs to know the key format. -func (t *StateTrie) TryGetAccountWithPreHashedKey(key []byte) (*types.StateAccount, error) { - res, err := t.trie.TryGet(key) +// GetAccountByHash does the same thing as GetAccount, however it expects an +// account hash that is the hash of address. This constitutes an abstraction +// leak, since the client code needs to know the key format. +func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount, error) { + res, err := t.trie.Get(addrHash.Bytes()) if res == nil || err != nil { return nil, err } @@ -121,27 +125,30 @@ func (t *StateTrie) TryGetAccountWithPreHashedKey(key []byte) (*types.StateAccou return ret, err } -// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not +// GetNode attempts to retrieve a trie node by compact-encoded path. It is not // possible to use keybyte-encoding as the path might contain odd nibbles. // If the specified trie node is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) TryGetNode(path []byte) ([]byte, int, error) { - return t.trie.TryGetNode(path) +func (t *StateTrie) GetNode(path []byte) ([]byte, int, error) { + return t.trie.GetNode(path) } -// Update associates key with value in the trie. Subsequent calls to +// MustUpdate associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. -func (t *StateTrie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { - log.Error("Unhandled trie error in StateTrie.Update", "err", err) - } +// +// This function will omit any encountered error but just print out an +// error message. +func (t *StateTrie) MustUpdate(key, value []byte) { + hk := t.hashKey(key) + t.trie.MustUpdate(hk, value) + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) } -// TryUpdate associates key with value in the trie. Subsequent calls to +// UpdateStorage associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // @@ -149,9 +156,9 @@ func (t *StateTrie) Update(key, value []byte) { // stored in the trie. // // If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) TryUpdate(key, value []byte) error { +func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { hk := t.hashKey(key) - err := t.trie.TryUpdate(hk, value) + err := t.trie.Update(hk, value) if err != nil { return err } @@ -159,42 +166,42 @@ func (t *StateTrie) TryUpdate(key, value []byte) error { return nil } -// TryUpdateAccount account will abstract the write of an account to the -// secure trie. -func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { - hk := t.hashKey(key) +// UpdateAccount will abstract the write of an account to the secure trie. +func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { + hk := t.hashKey(address.Bytes()) data, err := rlp.EncodeToBytes(acc) if err != nil { return err } - if err := t.trie.TryUpdate(hk, data); err != nil { + if err := t.trie.Update(hk, data); err != nil { return err } - t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + t.getSecKeyCache()[string(hk)] = address.Bytes() return nil } -// Delete removes any existing value for key from the trie. -func (t *StateTrie) Delete(key []byte) { - if err := t.TryDelete(key); err != nil { - log.Error("Unhandled trie error in StateTrie.Delete", "err", err) - } +// MustDelete removes any existing value for key from the trie. This function +// will omit any encountered error but just print out an error message. +func (t *StateTrie) MustDelete(key []byte) { + hk := t.hashKey(key) + delete(t.getSecKeyCache(), string(hk)) + t.trie.MustDelete(hk) } -// TryDelete removes any existing value for key from the trie. +// DeleteStorage removes any existing storage slot from the trie. // If the specified trie node is not in the trie, nothing will be changed. // If a node is not found in the database, a MissingNodeError is returned. -func (t *StateTrie) TryDelete(key []byte) error { +func (t *StateTrie) DeleteStorage(_ common.Address, key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) - return t.trie.TryDelete(hk) + return t.trie.Delete(hk) } -// TryDeleteAccount abstracts an account deletion from the trie. -func (t *StateTrie) TryDeleteAccount(key []byte) error { - hk := t.hashKey(key) +// DeleteAccount abstracts an account deletion from the trie. +func (t *StateTrie) DeleteAccount(address common.Address) error { + hk := t.hashKey(address.Bytes()) delete(t.getSecKeyCache(), string(hk)) - return t.trie.TryDelete(hk) + return t.trie.Delete(hk) } // GetKey returns the sha3 preimage of a hashed key that was @@ -216,7 +223,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { +func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { if t.preimages != nil { diff --git a/coreth/trie/secure_trie_test.go b/coreth/trie/secure_trie_test.go index a0821927..2935c3bc 100644 --- a/coreth/trie/secure_trie_test.go +++ b/coreth/trie/secure_trie_test.go @@ -33,21 +33,23 @@ import ( "sync" "testing" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New())) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase())) return trie } // makeTestStateTrie creates a large enough secure trie for testing. func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(memorydb.New()) - trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) + triedb := NewDatabase(rawdb.NewMemoryDatabase()) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -55,28 +57,25 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) // Add some other data to inflate the trie for j := byte(3); j < 13; j++ { key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) } } - root, nodes, err := trie.Commit(false) - if err != nil { - panic(fmt.Errorf("failed to commit trie %v", err)) - } - if err := triedb.Update(NewWithNodeSet(nodes)); err != nil { + root, nodes := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state - trie, _ = NewSecure(common.Hash{}, root, triedb) + trie, _ = NewStateTrie(TrieID(root), triedb) return triedb, trie, content } @@ -94,9 +93,9 @@ func TestSecureDelete(t *testing.T) { } for _, val := range vals { if val.v != "" { - trie.Update([]byte(val.k), []byte(val.v)) + trie.MustUpdate([]byte(val.k), []byte(val.v)) } else { - trie.Delete([]byte(val.k)) + trie.MustDelete([]byte(val.k)) } } hash := trie.Hash() @@ -108,13 +107,13 @@ func TestSecureDelete(t *testing.T) { func TestSecureGetKey(t *testing.T) { trie := newEmptySecure() - trie.Update([]byte("foo"), []byte("bar")) + trie.MustUpdate([]byte("foo"), []byte("bar")) key := []byte("foo") value := []byte("bar") seckey := crypto.Keccak256(key) - if !bytes.Equal(trie.Get(key), value) { + if !bytes.Equal(trie.MustGet(key), value) { t.Errorf("Get did not return bar") } if k := trie.GetKey(seckey); !bytes.Equal(k, key) { @@ -131,7 +130,7 @@ func TestStateTrieConcurrency(t *testing.T) { for i := 0; i < threads; i++ { tries[i] = trie.Copy() } - // Start a batch of goroutines interactng with the trie + // Start a batch of goroutines interacting with the trie pend := new(sync.WaitGroup) pend.Add(threads) for i := 0; i < threads; i++ { @@ -141,15 +140,15 @@ func TestStateTrieConcurrency(t *testing.T) { for j := byte(0); j < 255; j++ { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{byte(index), 1, j}, 32), []byte{j} - tries[index].Update(key, val) + tries[index].MustUpdate(key, val) key, val = common.LeftPadBytes([]byte{byte(index), 2, j}, 32), []byte{j} - tries[index].Update(key, val) + tries[index].MustUpdate(key, val) // Add some other data to inflate the trie for k := byte(3); k < 13; k++ { key, val = common.LeftPadBytes([]byte{byte(index), k, j}, 32), []byte{k, j} - tries[index].Update(key, val) + tries[index].MustUpdate(key, val) } } tries[index].Commit(false) diff --git a/coreth/trie/stacktrie.go b/coreth/trie/stacktrie.go index 99773e11..e54bd61f 100644 --- a/coreth/trie/stacktrie.go +++ b/coreth/trie/stacktrie.go @@ -34,7 +34,7 @@ import ( "io" "sync" - "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -47,10 +47,14 @@ var stPool = sync.Pool{ }, } -func stackTrieFromPool(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie { +// NodeWriteFunc is used to provide all information of a dirty node for committing +// so that callers can flush nodes into database with desired scheme. +type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) + +func stackTrieFromPool(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { st := stPool.Get().(*StackTrie) - st.db = db st.owner = owner + st.writeFn = writeFn return st } @@ -63,41 +67,41 @@ func returnToPool(st *StackTrie) { // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - owner common.Hash // the owner of the trie - nodeType uint8 // node type (as in branch, ext, leaf) - val []byte // value contained by this node if it's a leaf - key []byte // key chunk covered by this (leaf|ext) node - children [16]*StackTrie // list of children (for branch and exts) - db ethdb.KeyValueWriter // Pointer to the commit db, can be nil + owner common.Hash // the owner of the trie + nodeType uint8 // node type (as in branch, ext, leaf) + val []byte // value contained by this node if it's a leaf + key []byte // key chunk covered by this (leaf|ext) node + children [16]*StackTrie // list of children (for branch and exts) + writeFn NodeWriteFunc // function for committing nodes, can be nil } // NewStackTrie allocates and initializes an empty trie. -func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie { +func NewStackTrie(writeFn NodeWriteFunc) *StackTrie { return &StackTrie{ nodeType: emptyNode, - db: db, + writeFn: writeFn, } } // NewStackTrieWithOwner allocates and initializes an empty trie, but with // the additional owner field. -func NewStackTrieWithOwner(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie { +func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie { return &StackTrie{ owner: owner, nodeType: emptyNode, - db: db, + writeFn: writeFn, } } // NewFromBinary initialises a serialized stacktrie with the given db. -func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) { +func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) { var st StackTrie if err := st.UnmarshalBinary(data); err != nil { return nil, err } // If a database is used, we need to recursively add it to every child - if db != nil { - st.setDb(db) + if writeFn != nil { + st.setWriter(writeFn) } return &st, nil } @@ -150,7 +154,9 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error { Val []byte Key []byte } - gob.NewDecoder(r).Decode(&dec) + if err := gob.NewDecoder(r).Decode(&dec); err != nil { + return err + } st.owner = dec.Owner st.nodeType = dec.NodeType st.val = dec.Val @@ -164,31 +170,33 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error { continue } var child StackTrie - child.unmarshalBinary(r) + if err := child.unmarshalBinary(r); err != nil { + return err + } st.children[i] = &child } return nil } -func (st *StackTrie) setDb(db ethdb.KeyValueWriter) { - st.db = db +func (st *StackTrie) setWriter(writeFn NodeWriteFunc) { + st.writeFn = writeFn for _, child := range st.children { if child != nil { - child.setDb(db) + child.setWriter(writeFn) } } } -func newLeaf(owner common.Hash, key, val []byte, db ethdb.KeyValueWriter) *StackTrie { - st := stackTrieFromPool(db, owner) +func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie { + st := stackTrieFromPool(writeFn, owner) st.nodeType = leafNode st.key = append(st.key, key...) st.val = val return st } -func newExt(owner common.Hash, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { - st := stackTrieFromPool(db, owner) +func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie { + st := stackTrieFromPool(writeFn, owner) st.nodeType = extNode st.key = append(st.key, key...) st.children[0] = child @@ -204,25 +212,27 @@ const ( hashedNode ) -// TryUpdate inserts a (key, value) pair into the stack trie -func (st *StackTrie) TryUpdate(key, value []byte) error { +// Update inserts a (key, value) pair into the stack trie. +func (st *StackTrie) Update(key, value []byte) error { k := keybytesToHex(key) if len(value) == 0 { panic("deletion not supported") } - st.insert(k[:len(k)-1], value) + st.insert(k[:len(k)-1], value, nil) return nil } -func (st *StackTrie) Update(key, value []byte) { - if err := st.TryUpdate(key, value); err != nil { +// MustUpdate is a wrapper of Update and will omit any encountered error but +// just print out an error message. +func (st *StackTrie) MustUpdate(key, value []byte) { + if err := st.Update(key, value); err != nil { log.Error("Unhandled trie error in StackTrie.Update", "err", err) } } func (st *StackTrie) Reset() { st.owner = common.Hash{} - st.db = nil + st.writeFn = nil st.key = st.key[:0] st.val = nil for i := range st.children { @@ -245,7 +255,7 @@ func (st *StackTrie) getDiffIndex(key []byte) int { // Helper function to that inserts a (key, value) pair into // the trie. -func (st *StackTrie) insert(key, value []byte) { +func (st *StackTrie) insert(key, value []byte, prefix []byte) { switch st.nodeType { case branchNode: /* Branch */ idx := int(key[0]) @@ -254,7 +264,7 @@ func (st *StackTrie) insert(key, value []byte) { for i := idx - 1; i >= 0; i-- { if st.children[i] != nil { if st.children[i].nodeType != hashedNode { - st.children[i].hash() + st.children[i].hash(append(prefix, byte(i))) } break } @@ -262,9 +272,9 @@ func (st *StackTrie) insert(key, value []byte) { // Add new child if st.children[idx] == nil { - st.children[idx] = newLeaf(st.owner, key[1:], value, st.db) + st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn) } else { - st.children[idx].insert(key[1:], value) + st.children[idx].insert(key[1:], value, append(prefix, key[0])) } case extNode: /* Ext */ @@ -279,7 +289,7 @@ func (st *StackTrie) insert(key, value []byte) { if diffidx == len(st.key) { // Ext key and key segment are identical, recurse into // the child node. - st.children[0].insert(key[diffidx:], value) + st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...)) return } // Save the original part. Depending if the break is @@ -288,14 +298,19 @@ func (st *StackTrie) insert(key, value []byte) { // node directly. var n *StackTrie if diffidx < len(st.key)-1 { - n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.db) + // Break on the non-last byte, insert an intermediate + // extension. The path prefix of the newly-inserted + // extension should also contain the different byte. + n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn) + n.hash(append(prefix, st.key[:diffidx+1]...)) } else { // Break on the last byte, no need to insert - // an extension node: reuse the current node + // an extension node: reuse the current node. + // The path prefix of the original part should + // still be same. n = st.children[0] + n.hash(append(prefix, st.key...)) } - // Convert to hash - n.hash() var p *StackTrie if diffidx == 0 { // the break is on the first byte, so @@ -308,12 +323,12 @@ func (st *StackTrie) insert(key, value []byte) { // the common prefix is at least one byte // long, insert a new intermediate branch // node. - st.children[0] = stackTrieFromPool(st.db, st.owner) + st.children[0] = stackTrieFromPool(st.writeFn, st.owner) st.children[0].nodeType = branchNode p = st.children[0] } // Create a leaf for the inserted part - o := newLeaf(st.owner, key[diffidx+1:], value, st.db) + o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) // Insert both child leaves where they belong: origIdx := st.key[diffidx] @@ -349,7 +364,7 @@ func (st *StackTrie) insert(key, value []byte) { // Convert current node into an ext, // and insert a child branch node. st.nodeType = extNode - st.children[0] = NewStackTrieWithOwner(st.db, st.owner) + st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner) st.children[0].nodeType = branchNode p = st.children[0] } @@ -358,11 +373,11 @@ func (st *StackTrie) insert(key, value []byte) { // value and another containing the new value. The child leaf // is hashed directly in order to free up some memory. origIdx := st.key[diffidx] - p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.db) - p.children[origIdx].hash() + p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn) + p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...)) newIdx := key[diffidx] - p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.db) + p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn) // Finally, cut off the key part that has been passed // over to the children. @@ -393,14 +408,14 @@ func (st *StackTrie) insert(key, value []byte) { // - And the 'st.type' will be 'hashedNode' AGAIN // // This method also sets 'st.type' to hashedNode, and clears 'st.key'. -func (st *StackTrie) hash() { +func (st *StackTrie) hash(path []byte) { h := newHasher(false) defer returnHasherToPool(h) - st.hashRec(h) + st.hashRec(h, path) } -func (st *StackTrie) hashRec(hasher *hasher) { +func (st *StackTrie) hashRec(hasher *hasher, path []byte) { // The switch below sets this to the RLP-encoding of this node. var encodedNode []byte @@ -409,24 +424,23 @@ func (st *StackTrie) hashRec(hasher *hasher) { return case emptyNode: - st.val = emptyRoot.Bytes() + st.val = types.EmptyRootHash.Bytes() st.key = st.key[:0] st.nodeType = hashedNode return case branchNode: - var nodes rawFullNode + var nodes fullNode for i, child := range st.children { if child == nil { - nodes[i] = nilValueNode + nodes.Children[i] = nilValueNode continue } - - child.hashRec(hasher) + child.hashRec(hasher, append(path, byte(i))) if len(child.val) < 32 { - nodes[i] = rawNode(child.val) + nodes.Children[i] = rawNode(child.val) } else { - nodes[i] = hashNode(child.val) + nodes.Children[i] = hashNode(child.val) } // Release child back to pool. @@ -438,10 +452,9 @@ func (st *StackTrie) hashRec(hasher *hasher) { encodedNode = hasher.encodedBytes() case extNode: - st.children[0].hashRec(hasher) + st.children[0].hashRec(hasher, append(path, st.key...)) - sz := hexToCompactInPlace(st.key) - n := rawShortNode{Key: st.key[:sz]} + n := shortNode{Key: hexToCompact(st.key)} if len(st.children[0].val) < 32 { n.Val = rawNode(st.children[0].val) } else { @@ -457,8 +470,7 @@ func (st *StackTrie) hashRec(hasher *hasher) { case leafNode: st.key = append(st.key, byte(16)) - sz := hexToCompactInPlace(st.key) - n := rawShortNode{Key: st.key[:sz], Val: valueNode(st.val)} + n := shortNode{Key: hexToCompact(st.key), Val: valueNode(st.val)} n.encode(hasher.encbuf) encodedNode = hasher.encodedBytes() @@ -477,10 +489,8 @@ func (st *StackTrie) hashRec(hasher *hasher) { // Write the hash to the 'val'. We allocate a new val here to not mutate // input values st.val = hasher.hashData(encodedNode) - if st.db != nil { - // TODO! Is it safe to Put the slice here? - // Do all db implementations copy the value provided? - st.db.Put(st.val, encodedNode) + if st.writeFn != nil { + st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode) } } @@ -489,12 +499,11 @@ func (st *StackTrie) Hash() (h common.Hash) { hasher := newHasher(false) defer returnHasherToPool(hasher) - st.hashRec(hasher) + st.hashRec(hasher, nil) if len(st.val) == 32 { copy(h[:], st.val) return h } - // If the node's RLP isn't 32 bytes long, the node will not // be hashed, and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing. @@ -504,7 +513,7 @@ func (st *StackTrie) Hash() (h common.Hash) { return h } -// Commit will firstly hash the entrie trie if it's still not hashed +// Commit will firstly hash the entire trie if it's still not hashed // and then commit all nodes to the associated database. Actually most // of the trie nodes MAY have been committed already. The main purpose // here is to commit the root node. @@ -512,25 +521,24 @@ func (st *StackTrie) Hash() (h common.Hash) { // The associated database is expected, otherwise the whole commit // functionality should be disabled. func (st *StackTrie) Commit() (h common.Hash, err error) { - if st.db == nil { + if st.writeFn == nil { return common.Hash{}, ErrCommitDisabled } - hasher := newHasher(false) defer returnHasherToPool(hasher) - st.hashRec(hasher) + st.hashRec(hasher, nil) if len(st.val) == 32 { copy(h[:], st.val) return h, nil } - // If the node's RLP isn't 32 bytes long, the node will not - // be hashed (and committed), and instead contain the rlp-encoding of the + // be hashed (and committed), and instead contain the rlp-encoding of the // node. For the top level node, we need to force the hashing+commit. hasher.sha.Reset() hasher.sha.Write(st.val) hasher.sha.Read(h[:]) - st.db.Put(h[:], st.val) + + st.writeFn(st.owner, nil, h, st.val) return h, nil } diff --git a/coreth/trie/stacktrie_test.go b/coreth/trie/stacktrie_test.go index 1a238a01..e445e93f 100644 --- a/coreth/trie/stacktrie_test.go +++ b/coreth/trie/stacktrie_test.go @@ -31,7 +31,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -184,7 +184,7 @@ func TestStackTrieInsertAndHash(t *testing.T) { st.Reset() for j := 0; j < l; j++ { kv := &test[j] - if err := st.TryUpdate(common.FromHex(kv.K), []byte(kv.V)); err != nil { + if err := st.Update(common.FromHex(kv.K), []byte(kv.V)); err != nil { t.Fatal(err) } } @@ -198,13 +198,13 @@ func TestStackTrieInsertAndHash(t *testing.T) { func TestSizeBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") - nt.TryUpdate(leaf, value) - st.TryUpdate(leaf, value) + nt.Update(leaf, value) + st.Update(leaf, value) if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -213,7 +213,7 @@ func TestSizeBug(t *testing.T) { func TestEmptyBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -228,8 +228,8 @@ func TestEmptyBug(t *testing.T) { } for _, kv := range kvs { - nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) - st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { @@ -239,7 +239,7 @@ func TestEmptyBug(t *testing.T) { func TestValLength56(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -251,8 +251,8 @@ func TestValLength56(t *testing.T) { } for _, kv := range kvs { - nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) - st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { @@ -264,7 +264,7 @@ func TestValLength56(t *testing.T) { // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) kvs := []struct { K string V string @@ -273,8 +273,8 @@ func TestUpdateSmallNodes(t *testing.T) { {"65", "3000"}, // stacktrie.Update } for _, kv := range kvs { - nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) - st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -292,7 +292,7 @@ func TestUpdateSmallNodes(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) { t.SkipNow() st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(memorydb.New())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) kvs := []struct { K string V string @@ -301,8 +301,8 @@ func TestUpdateVariableKeys(t *testing.T) { {"0x3330353463653239356131303167617430", "313131"}, } for _, kv := range kvs { - nt.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) - st.TryUpdate(common.FromHex(kv.K), common.FromHex(kv.V)) + nt.Update(common.FromHex(kv.K), common.FromHex(kv.V)) + st.Update(common.FromHex(kv.K), common.FromHex(kv.V)) } if nt.Hash() != st.Hash() { t.Fatalf("error %x != %x", st.Hash(), nt.Hash()) @@ -319,7 +319,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { value := make([]byte, 1, 100) value[0] = 0x2 want := common.CopyBytes(value) - st.TryUpdate([]byte{0x01}, value) + st.Update([]byte{0x01}, value) st.Hash() if have := value; !bytes.Equal(have, want) { t.Fatalf("tiny trie: have %#x want %#x", have, want) @@ -340,7 +340,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { for i := 0; i < 1000; i++ { key := common.BigToHash(keyB) value := getValue(i) - st.TryUpdate(key.Bytes(), value) + st.Update(key.Bytes(), value) vals = append(vals, value) keyB = keyB.Add(keyB, keyDelta) keyDelta.Add(keyDelta, common.Big1) @@ -361,7 +361,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { func TestStacktrieSerialization(t *testing.T) { var ( st = NewStackTrie(nil) - nt = NewEmpty(NewDatabase(memorydb.New())) + nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) keyB = big.NewInt(1) keyDelta = big.NewInt(1) vals [][]byte @@ -381,7 +381,7 @@ func TestStacktrieSerialization(t *testing.T) { keyDelta.Add(keyDelta, common.Big1) } for i, k := range keys { - nt.TryUpdate(k, common.CopyBytes(vals[i])) + nt.Update(k, common.CopyBytes(vals[i])) } for i, k := range keys { @@ -394,7 +394,7 @@ func TestStacktrieSerialization(t *testing.T) { t.Fatal(err) } st = newSt - st.TryUpdate(k, common.CopyBytes(vals[i])) + st.Update(k, common.CopyBytes(vals[i])) } if have, want := st.Hash(), nt.Hash(); have != want { t.Fatalf("have %#x want %#x", have, want) diff --git a/coreth/trie/sync_test.go b/coreth/trie/sync_test.go index c04a928c..b9418d8a 100644 --- a/coreth/trie/sync_test.go +++ b/coreth/trie/sync_test.go @@ -29,15 +29,19 @@ package trie import ( "fmt" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" ) // makeTestTrie create a sample test trie to test node-wise reconstruction. -func makeTestTrie() (*Database, *StateTrie, map[string][]byte) { +func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(memorydb.New()) - trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) + db := rawdb.NewMemoryDatabase() + triedb := newTestDatabase(db, scheme) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -45,27 +49,27 @@ func makeTestTrie() (*Database, *StateTrie, map[string][]byte) { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) // Add some other data to inflate the trie for j := byte(3); j < 13; j++ { key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} content[string(key)] = val - trie.Update(key, val) + trie.MustUpdate(key, val) } } - root, nodes, err := trie.Commit(false) - if err != nil { - panic(fmt.Errorf("failed to commit trie %v", err)) - } - if err := triedb.Update(NewWithNodeSet(nodes)); err != nil { + root, nodes := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } + if err := triedb.Commit(root, false); err != nil { + panic(err) + } // Re-create the trie based on the new state - trie, _ = NewSecure(common.Hash{}, root, triedb) - return triedb, trie, content + trie, _ = NewStateTrie(TrieID(root), triedb) + return db, triedb, trie, content } diff --git a/coreth/trie/tracer.go b/coreth/trie/tracer.go new file mode 100644 index 00000000..51079149 --- /dev/null +++ b/coreth/trie/tracer.go @@ -0,0 +1,129 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ethereum/go-ethereum/common" +) + +// tracer tracks the changes of trie nodes. During the trie operations, +// some nodes can be deleted from the trie, while these deleted nodes +// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted +// nodes won't be removed from the disk at all. Tracer is an auxiliary tool +// used to track all insert and delete operations of trie and capture all +// deleted nodes eventually. +// +// The changed nodes can be mainly divided into two categories: the leaf +// node and intermediate node. The former is inserted/deleted by callers +// while the latter is inserted/deleted in order to follow the rule of trie. +// This tool can track all of them no matter the node is embedded in its +// parent or not, but valueNode is never tracked. +// +// Besides, it's also used for recording the original value of the nodes +// when they are resolved from the disk. The pre-value of the nodes will +// be used to construct trie history in the future. +// +// Note tracer is not thread-safe, callers should be responsible for handling +// the concurrency issues by themselves. +type tracer struct { + inserts map[string]struct{} + deletes map[string]struct{} + accessList map[string][]byte +} + +// newTracer initializes the tracer for capturing trie changes. +func newTracer() *tracer { + return &tracer{ + inserts: make(map[string]struct{}), + deletes: make(map[string]struct{}), + accessList: make(map[string][]byte), + } +} + +// onRead tracks the newly loaded trie node and caches the rlp-encoded +// blob internally. Don't change the value outside of function since +// it's not deep-copied. +func (t *tracer) onRead(path []byte, val []byte) { + t.accessList[string(path)] = val +} + +// onInsert tracks the newly inserted trie node. If it's already +// in the deletion set (resurrected node), then just wipe it from +// the deletion set as it's "untouched". +func (t *tracer) onInsert(path []byte) { + if _, present := t.deletes[string(path)]; present { + delete(t.deletes, string(path)) + return + } + t.inserts[string(path)] = struct{}{} +} + +// onDelete tracks the newly deleted trie node. If it's already +// in the addition set, then just wipe it from the addition set +// as it's untouched. +func (t *tracer) onDelete(path []byte) { + if _, present := t.inserts[string(path)]; present { + delete(t.inserts, string(path)) + return + } + t.deletes[string(path)] = struct{}{} +} + +// reset clears the content tracked by tracer. +func (t *tracer) reset() { + t.inserts = make(map[string]struct{}) + t.deletes = make(map[string]struct{}) + t.accessList = make(map[string][]byte) +} + +// copy returns a deep copied tracer instance. +func (t *tracer) copy() *tracer { + var ( + inserts = make(map[string]struct{}) + deletes = make(map[string]struct{}) + accessList = make(map[string][]byte) + ) + for path := range t.inserts { + inserts[path] = struct{}{} + } + for path := range t.deletes { + deletes[path] = struct{}{} + } + for path, blob := range t.accessList { + accessList[path] = common.CopyBytes(blob) + } + return &tracer{ + inserts: inserts, + deletes: deletes, + accessList: accessList, + } +} + +// markDeletions puts all tracked deletions into the provided nodeset. +func (t *tracer) markDeletions(set *trienode.NodeSet) { + for path := range t.deletes { + // It's possible a few deleted nodes were embedded + // in their parent before, the deletions can be no + // effect by deleting nothing, filter them out. + prev, ok := t.accessList[path] + if !ok { + continue + } + set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev)) + } +} diff --git a/coreth/trie/tracer_test.go b/coreth/trie/tracer_test.go new file mode 100644 index 00000000..e11348c2 --- /dev/null +++ b/coreth/trie/tracer_test.go @@ -0,0 +1,375 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "testing" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ethereum/go-ethereum/common" +) + +var ( + tiny = []struct{ k, v string }{ + {"k1", "v1"}, + {"k2", "v2"}, + {"k3", "v3"}, + } + nonAligned = []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + standard = []struct{ k, v string }{ + {string(randBytes(32)), "verb"}, + {string(randBytes(32)), "wookiedoo"}, + {string(randBytes(32)), "stallion"}, + {string(randBytes(32)), "horse"}, + {string(randBytes(32)), "coin"}, + {string(randBytes(32)), "puppy"}, + {string(randBytes(32)), "myothernodedata"}, + } +) + +func TestTrieTracer(t *testing.T) { + testTrieTracer(t, tiny) + testTrieTracer(t, nonAligned) + testTrieTracer(t, standard) +} + +// Tests if the trie diffs are tracked correctly. Tracer should capture +// all non-leaf dirty nodes, no matter the node is embedded or not. +func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + + // Determine all new nodes are tracked + for _, val := range vals { + trie.MustUpdate([]byte(val.k), []byte(val.v)) + } + insertSet := copySet(trie.tracer.inserts) // copy before commit + deleteSet := copySet(trie.tracer.deletes) // copy before commit + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + + seen := setKeys(iterNodes(db, root)) + if !compareSet(insertSet, seen) { + t.Fatal("Unexpected insertion set") + } + if !compareSet(deleteSet, nil) { + t.Fatal("Unexpected deletion set") + } + + // Determine all deletions are tracked + trie, _ = New(TrieID(root), db) + for _, val := range vals { + trie.MustDelete([]byte(val.k)) + } + insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes) + if !compareSet(insertSet, nil) { + t.Fatal("Unexpected insertion set") + } + if !compareSet(deleteSet, seen) { + t.Fatal("Unexpected deletion set") + } +} + +// Test that after inserting a new batch of nodes and deleting them immediately, +// the trie tracer should be cleared normally as no operation happened. +func TestTrieTracerNoop(t *testing.T) { + testTrieTracerNoop(t, tiny) + testTrieTracerNoop(t, nonAligned) + testTrieTracerNoop(t, standard) +} + +func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) { + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + for _, val := range vals { + trie.MustUpdate([]byte(val.k), []byte(val.v)) + } + for _, val := range vals { + trie.MustDelete([]byte(val.k)) + } + if len(trie.tracer.inserts) != 0 { + t.Fatal("Unexpected insertion set") + } + if len(trie.tracer.deletes) != 0 { + t.Fatal("Unexpected deletion set") + } +} + +// Tests if the accessList is correctly tracked. +func TestAccessList(t *testing.T) { + testAccessList(t, tiny) + testAccessList(t, nonAligned) + testAccessList(t, standard) +} + +func testAccessList(t *testing.T, vals []struct{ k, v string }) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + orig = trie.Copy() + ) + // Create trie from scratch + for _, val := range vals { + trie.MustUpdate([]byte(val.k), []byte(val.v)) + } + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Update trie + parent := root + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, val := range vals { + trie.MustUpdate([]byte(val.k), randBytes(32)) + } + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Add more new nodes + parent = root + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + var keys []string + for i := 0; i < 30; i++ { + key := randBytes(32) + keys = append(keys, string(key)) + trie.MustUpdate(key, randBytes(32)) + } + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Partial deletions + parent = root + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, key := range keys { + trie.MustUpdate([]byte(key), nil) + } + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } + + // Delete all + parent = root + trie, _ = New(TrieID(root), db) + orig = trie.Copy() + for _, val := range vals { + trie.MustUpdate([]byte(val.k), nil) + } + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, nodes); err != nil { + t.Fatalf("Invalid accessList %v", err) + } +} + +// Tests origin values won't be tracked in Iterator or Prover +func TestAccessListLeak(t *testing.T) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + ) + // Create trie from scratch + for _, val := range standard { + trie.MustUpdate([]byte(val.k), []byte(val.v)) + } + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + + var cases = []struct { + op func(tr *Trie) + }{ + { + func(tr *Trie) { + it := tr.NodeIterator(nil) + for it.Next(true) { + } + }, + }, + { + func(tr *Trie) { + it := NewIterator(tr.NodeIterator(nil)) + for it.Next() { + } + }, + }, + { + func(tr *Trie) { + for _, val := range standard { + tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) + } + }, + }, + } + for _, c := range cases { + trie, _ = New(TrieID(root), db) + n1 := len(trie.tracer.accessList) + c.op(trie) + n2 := len(trie.tracer.accessList) + + if n1 != n2 { + t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2) + } + } +} + +// Tests whether the original tree node is correctly deleted after being embedded +// in its parent due to the smaller size of the original tree node. +func TestTinyTree(t *testing.T) { + var ( + db = NewDatabase(rawdb.NewMemoryDatabase()) + trie = NewEmpty(db) + ) + for _, val := range tiny { + trie.MustUpdate([]byte(val.k), randBytes(32)) + } + root, set := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set)) + + parent := root + trie, _ = New(TrieID(root), db) + orig := trie.Copy() + for _, val := range tiny { + trie.MustUpdate([]byte(val.k), []byte(val.v)) + } + root, set = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(set)) + + trie, _ = New(TrieID(root), db) + if err := verifyAccessList(orig, trie, set); err != nil { + t.Fatalf("Invalid accessList %v", err) + } +} + +func compareSet(setA, setB map[string]struct{}) bool { + if len(setA) != len(setB) { + return false + } + for key := range setA { + if _, ok := setB[key]; !ok { + return false + } + } + return true +} + +func forNodes(tr *Trie) map[string][]byte { + var ( + it = tr.NodeIterator(nil) + nodes = make(map[string][]byte) + ) + for it.Next(true) { + if it.Leaf() { + continue + } + nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) + } + return nodes +} + +func iterNodes(db *Database, root common.Hash) map[string][]byte { + tr, _ := New(TrieID(root), db) + return forNodes(tr) +} + +func forHashedNodes(tr *Trie) map[string][]byte { + var ( + it = tr.NodeIterator(nil) + nodes = make(map[string][]byte) + ) + for it.Next(true) { + if it.Hash() == (common.Hash{}) { + continue + } + nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) + } + return nodes +} + +func diffTries(trieA, trieB *Trie) (map[string][]byte, map[string][]byte, map[string][]byte) { + var ( + nodesA = forHashedNodes(trieA) + nodesB = forHashedNodes(trieB) + inA = make(map[string][]byte) // hashed nodes in trie a but not b + inB = make(map[string][]byte) // hashed nodes in trie b but not a + both = make(map[string][]byte) // hashed nodes in both tries but different value + ) + for path, blobA := range nodesA { + if blobB, ok := nodesB[path]; ok { + if bytes.Equal(blobA, blobB) { + continue + } + both[path] = blobA + continue + } + inA[path] = blobA + } + for path, blobB := range nodesB { + if _, ok := nodesA[path]; ok { + continue + } + inB[path] = blobB + } + return inA, inB, both +} + +func setKeys(set map[string][]byte) map[string]struct{} { + keys := make(map[string]struct{}) + for k := range set { + keys[k] = struct{}{} + } + return keys +} + +func copySet(set map[string]struct{}) map[string]struct{} { + copied := make(map[string]struct{}) + for k := range set { + copied[k] = struct{}{} + } + return copied +} diff --git a/coreth/trie/trie.go b/coreth/trie/trie.go index 5539f755..f0798630 100644 --- a/coreth/trie/trie.go +++ b/coreth/trie/trie.go @@ -32,31 +32,12 @@ import ( "errors" "fmt" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") -) - -// LeafCallback is a callback type invoked when a trie operation reaches a leaf -// node. -// -// The keys is a path tuple identifying a particular trie node either in a single -// trie (account) or a layered trie (account -> storage). Each key in the tuple -// is in the raw format(32 bytes). -// -// The path is a composite hexary path identifying the trie node. All the key -// bytes are converted to the hexary nibbles and composited with the parent path -// if the trie node is in a layered trie. -// -// It's used by state sync and commit to allow handling external references -// between account and storage tries. And also it's used in the state healing -// for extracting the raw states(leaf nodes) with corresponding paths. -type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error - // Trie is a Merkle Patricia Trie. Use New to create a trie that sits on // top of a database. Whenever trie performs a commit operation, the generated // nodes will be gathered and returned in a set. Once the trie is committed, @@ -73,9 +54,8 @@ type Trie struct { // actually unhashed nodes. unhashed int - // db is the handler trie can retrieve nodes from. It's - // only for reading purpose and not available for writing. - db *Database + // reader is the handler trie can retrieve nodes from. + reader *trieReader // tracer is the tool to track the trie changes. // It will be reset after each commit operation. @@ -93,26 +73,29 @@ func (t *Trie) Copy() *Trie { root: t.root, owner: t.owner, unhashed: t.unhashed, - db: t.db, + reader: t.reader, tracer: t.tracer.copy(), } } -// New creates a trie with an existing root node from db and an assigned -// owner for storage proximity. -// -// If root is the zero hash or the sha3 hash of an empty string, the -// trie is initially empty and does not require a database. Otherwise, -// New will panic if db is nil and returns a MissingNodeError if root does -// not exist in the database. Accessing the trie loads nodes from db on demand. -func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) { +// New creates the trie instance with provided trie id and the read-only +// database. The state specified by trie id must be available, otherwise +// an error will be returned. The trie root specified by trie id can be +// zero hash or the sha3 hash of an empty string, then trie is initially +// empty, otherwise, the root node must be present in database or returns +// a MissingNodeError if not. +func New(id *ID, db NodeReader) (*Trie, error) { + reader, err := newTrieReader(id.StateRoot, id.Owner, db) + if err != nil { + return nil, err + } trie := &Trie{ - owner: owner, - db: db, - //tracer: newTracer(), + owner: id.Owner, + reader: reader, + tracer: newTracer(), } - if root != (common.Hash{}) && root != emptyRoot { - rootnode, err := trie.resolveHash(root[:], nil) + if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash { + rootnode, err := trie.resolveAndTrack(id.Root[:], nil) if err != nil { return nil, err } @@ -123,7 +106,7 @@ func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) { // NewEmpty is a shortcut to create empty tree. It's mostly used in tests. func NewEmpty(db *Database) *Trie { - tr, _ := New(common.Hash{}, common.Hash{}, db) + tr, _ := New(TrieID(types.EmptyRootHash), db) return tr } @@ -133,28 +116,30 @@ func (t *Trie) NodeIterator(start []byte) NodeIterator { return newNodeIterator(t, start) } -// Get returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -func (t *Trie) Get(key []byte) []byte { - res, err := t.TryGet(key) +// MustGet is a wrapper of Get and will omit any encountered error but just +// print out an error message. +func (t *Trie) MustGet(key []byte) []byte { + res, err := t.Get(key) if err != nil { log.Error("Unhandled trie error in Trie.Get", "err", err) } return res } -// TryGet returns the value for key stored in the trie. +// Get returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryGet(key []byte) ([]byte, error) { - value, newroot, didResolve, err := t.tryGet(t.root, keybytesToHex(key), 0) +// +// If the requested node is not present in trie, no error will be returned. +// If the trie is corrupted, a MissingNodeError is returned. +func (t *Trie) Get(key []byte) ([]byte, error) { + value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0) if err == nil && didResolve { t.root = newroot } return value, err } -func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { +func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode node, didResolve bool, err error) { switch n := (origNode).(type) { case nil: return nil, nil, false, nil @@ -165,35 +150,48 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode // key not found in trie return nil, n, false, nil } - value, newnode, didResolve, err = t.tryGet(n.Val, key, pos+len(n.Key)) + value, newnode, didResolve, err = t.get(n.Val, key, pos+len(n.Key)) if err == nil && didResolve { n = n.copy() n.Val = newnode } return value, n, didResolve, err case *fullNode: - value, newnode, didResolve, err = t.tryGet(n.Children[key[pos]], key, pos+1) + value, newnode, didResolve, err = t.get(n.Children[key[pos]], key, pos+1) if err == nil && didResolve { n = n.copy() n.Children[key[pos]] = newnode } return value, n, didResolve, err case hashNode: - child, err := t.resolveHash(n, key[:pos]) + child, err := t.resolveAndTrack(n, key[:pos]) if err != nil { return nil, n, true, err } - value, newnode, _, err := t.tryGet(child, key, pos) + value, newnode, _, err := t.get(child, key, pos) return value, newnode, true, err default: panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) } } -// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not -// possible to use keybyte-encoding as the path might contain odd nibbles. -func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) { - item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0) +// MustGetNode is a wrapper of GetNode and will omit any encountered error but +// just print out an error message. +func (t *Trie) MustGetNode(path []byte) ([]byte, int) { + item, resolved, err := t.GetNode(path) + if err != nil { + log.Error("Unhandled trie error in Trie.GetNode", "err", err) + } + return item, resolved +} + +// GetNode retrieves a trie node by compact-encoded path. It is not possible +// to use keybyte-encoding as the path might contain odd nibbles. +// +// If the requested node is not present in trie, no error will be returned. +// If the trie is corrupted, a MissingNodeError is returned. +func (t *Trie) GetNode(path []byte) ([]byte, int, error) { + item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0) if err != nil { return nil, resolved, err } @@ -203,10 +201,10 @@ func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) { if item == nil { return nil, resolved, nil } - return item, resolved, err + return item, resolved, nil } -func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) { +func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) { // If non-existent path requested, abort if origNode == nil { return nil, nil, 0, nil @@ -225,7 +223,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new if hash == nil { return nil, origNode, 0, errors.New("non-consensus node") } - blob, err := t.db.RawNode(common.BytesToHash(hash)) + blob, err := t.reader.node(path, common.BytesToHash(hash)) return blob, origNode, 1, err } // Path still needs to be traversed, descend into children @@ -239,7 +237,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new // Path branches off from short node return nil, n, 0, nil } - item, newnode, resolved, err = t.tryGetNode(n.Val, path, pos+len(n.Key)) + item, newnode, resolved, err = t.getNode(n.Val, path, pos+len(n.Key)) if err == nil && resolved > 0 { n = n.copy() n.Val = newnode @@ -247,7 +245,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new return item, n, resolved, err case *fullNode: - item, newnode, resolved, err = t.tryGetNode(n.Children[path[pos]], path, pos+1) + item, newnode, resolved, err = t.getNode(n.Children[path[pos]], path, pos+1) if err == nil && resolved > 0 { n = n.copy() n.Children[path[pos]] = newnode @@ -255,11 +253,11 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new return item, n, resolved, err case hashNode: - child, err := t.resolveHash(n, path[:pos]) + child, err := t.resolveAndTrack(n, path[:pos]) if err != nil { return nil, n, 1, err } - item, newnode, resolved, err := t.tryGetNode(child, path, pos) + item, newnode, resolved, err := t.getNode(child, path, pos) return item, newnode, resolved + 1, err default: @@ -267,33 +265,28 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new } } -// Update associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -func (t *Trie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { +// MustUpdate is a wrapper of Update and will omit any encountered error but +// just print out an error message. +func (t *Trie) MustUpdate(key, value []byte) { + if err := t.Update(key, value); err != nil { log.Error("Unhandled trie error in Trie.Update", "err", err) } } -// TryUpdate associates key with value in the trie. Subsequent calls to +// Update associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. // // The value bytes must not be modified by the caller while they are // stored in the trie. // -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryUpdate(key, value []byte) error { - return t.tryUpdate(key, value) +// If the requested node is not present in trie, no error will be returned. +// If the trie is corrupted, a MissingNodeError is returned. +func (t *Trie) Update(key, value []byte) error { + return t.update(key, value) } -// tryUpdate expects an RLP-encoded value and performs the core function -// for TryUpdate and TryUpdateAccount. -func (t *Trie) tryUpdate(key, value []byte) error { +func (t *Trie) update(key, value []byte) error { t.unhashed++ k := keybytesToHex(key) if len(value) != 0 { @@ -376,7 +369,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error // We've hit a part of the trie that isn't loaded yet. Load // the node and insert into it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) + rn, err := t.resolveAndTrack(n, prefix) if err != nil { return false, nil, err } @@ -391,16 +384,19 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error } } -// Delete removes any existing value for key from the trie. -func (t *Trie) Delete(key []byte) { - if err := t.TryDelete(key); err != nil { +// MustDelete is a wrapper of Delete and will omit any encountered error but +// just print out an error message. +func (t *Trie) MustDelete(key []byte) { + if err := t.Delete(key); err != nil { log.Error("Unhandled trie error in Trie.Delete", "err", err) } } -// TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryDelete(key []byte) error { +// Delete removes any existing value for key from the trie. +// +// If the requested node is not present in trie, no error will be returned. +// If the trie is corrupted, a MissingNodeError is returned. +func (t *Trie) Delete(key []byte) error { t.unhashed++ k := keybytesToHex(key) _, n, err := t.delete(t.root, nil, k) @@ -530,7 +526,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // We've hit a part of the trie that isn't loaded yet. Load // the node and delete from it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) + rn, err := t.resolveAndTrack(n, prefix) if err != nil { return false, nil, err } @@ -554,36 +550,28 @@ func concat(s1 []byte, s2 ...byte) []byte { func (t *Trie) resolve(n node, prefix []byte) (node, error) { if n, ok := n.(hashNode); ok { - return t.resolveHash(n, prefix) + return t.resolveAndTrack(n, prefix) } return n, nil } -// resolveHash loads node from the underlying database with the provided -// node hash and path prefix. -func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { - hash := common.BytesToHash(n) - if node := t.db.EncodedNode(hash); node != nil { - return node, nil - } - return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} -} - -// resolveHash loads rlp-encoded node blob from the underlying database -// with the provided node hash and path prefix. -func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) { - hash := common.BytesToHash(n) - blob, _ := t.db.RawNode(hash) - if len(blob) != 0 { - return blob, nil +// resolveAndTrack loads node from the underlying store with the given node hash +// and path prefix and also tracks the loaded node blob in tracer treated as the +// node's original value. The rlp-encoded blob is preferred to be loaded from +// database because it's easy to decode node while complex to encode node to blob. +func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) { + blob, err := t.reader.node(prefix, common.BytesToHash(n)) + if err != nil { + return nil, err } - return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} + t.tracer.onRead(prefix, blob) + return mustDecodeNode(n, blob), nil } // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { - hash, cached, _ := t.hashRoot() + hash, cached := t.hashRoot() t.root = cached return common.BytesToHash(hash.(hashNode)) } @@ -594,11 +582,17 @@ func (t *Trie) Hash() common.Hash { // The returned nodeset can be nil if the trie is clean (nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { +func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { defer t.tracer.reset() + nodes := trienode.NewNodeSet(t.owner) + t.tracer.markDeletions(nodes) + + // Trie is empty and can be classified into two types of situations: + // - The trie was empty and no update happens + // - The trie was non-empty and all nodes are dropped if t.root == nil { - return emptyRoot, nil, nil + return types.EmptyRootHash, nodes } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -610,28 +604,25 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { // Replace the root node with the origin hash in order to // ensure all resolved nodes are dropped after the commit. t.root = hashedNode - return rootHash, nil, nil - } - h := newCommitter(t.owner, collectLeaf) - newRoot, nodes, err := h.Commit(t.root) - if err != nil { - return common.Hash{}, nil, err + return rootHash, nil } - t.root = newRoot - return rootHash, nodes, nil + t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root) + return rootHash, nodes } // hashRoot calculates the root hash of the given trie -func (t *Trie) hashRoot() (node, node, error) { +func (t *Trie) hashRoot() (node, node) { if t.root == nil { - return hashNode(emptyRoot.Bytes()), nil, nil + return hashNode(types.EmptyRootHash.Bytes()), nil } // If the number of changes is below 100, we let one thread handle it h := newHasher(t.unhashed >= 100) - defer returnHasherToPool(h) + defer func() { + returnHasherToPool(h) + t.unhashed = 0 + }() hashed, cached := h.hash(t.root, true) - t.unhashed = 0 - return hashed, cached, nil + return hashed, cached } // Reset drops the referenced root node and cleans all internal state. @@ -639,6 +630,5 @@ func (t *Trie) Reset() { t.root = nil t.owner = common.Hash{} t.unhashed = 0 - //t.db = nil t.tracer.reset() } diff --git a/coreth/trie/trie_id.go b/coreth/trie/trie_id.go new file mode 100644 index 00000000..b3ba417d --- /dev/null +++ b/coreth/trie/trie_id.go @@ -0,0 +1,65 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package trie + +import "github.com/ethereum/go-ethereum/common" + +// ID is the identifier for uniquely identifying a trie. +type ID struct { + StateRoot common.Hash // The root of the corresponding state(block.root) + Owner common.Hash // The contract address hash which the trie belongs to + Root common.Hash // The root hash of trie +} + +// StateTrieID constructs an identifier for state trie with the provided state root. +func StateTrieID(root common.Hash) *ID { + return &ID{ + StateRoot: root, + Owner: common.Hash{}, + Root: root, + } +} + +// StorageTrieID constructs an identifier for storage trie which belongs to a certain +// state and contract specified by the stateRoot and owner. +func StorageTrieID(stateRoot common.Hash, owner common.Hash, root common.Hash) *ID { + return &ID{ + StateRoot: stateRoot, + Owner: owner, + Root: root, + } +} + +// TrieID constructs an identifier for a standard trie(not a second-layer trie) +// with provided root. It's mostly used in tests and some other tries like CHT trie. +func TrieID(root common.Hash) *ID { + return &ID{ + StateRoot: root, + Owner: common.Hash{}, + Root: root, + } +} diff --git a/coreth/trie/trie_reader.go b/coreth/trie/trie_reader.go new file mode 100644 index 00000000..1112f9d2 --- /dev/null +++ b/coreth/trie/trie_reader.go @@ -0,0 +1,91 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +// Reader wraps the Node method of a backing trie store. +type Reader interface { + // Node retrieves the RLP-encoded trie node blob with the provided trie + // identifier, node path and the corresponding node hash. No error will + // be returned if the node is not found. + Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) +} + +// NodeReader wraps all the necessary functions for accessing trie node. +type NodeReader interface { + // Reader returns a reader for accessing all trie nodes with provided + // state root. Nil is returned in case the state is not available. + Reader(root common.Hash) Reader +} + +// trieReader is a wrapper of the underlying node reader. It's not safe +// for concurrent usage. +type trieReader struct { + owner common.Hash + reader Reader + banned map[string]struct{} // Marker to prevent node from being accessed, for tests +} + +// newTrieReader initializes the trie reader with the given node reader. +func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) { + reader := db.Reader(stateRoot) + if reader == nil { + return nil, fmt.Errorf("state not found #%x", stateRoot) + } + return &trieReader{owner: owner, reader: reader}, nil +} + +// newEmptyReader initializes the pure in-memory reader. All read operations +// should be forbidden and returns the MissingNodeError. +func newEmptyReader() *trieReader { + return &trieReader{} +} + +// node retrieves the rlp-encoded trie node with the provided trie node +// information. An MissingNodeError will be returned in case the node is +// not found or any error is encountered. +func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) { + // Perform the logics in tests for preventing trie node access. + if r.banned != nil { + if _, ok := r.banned[string(path)]; ok { + return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path} + } + } + if r.reader == nil { + return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path} + } + blob, err := r.reader.Node(r.owner, path, hash) + if err != nil || len(blob) == 0 { + return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err} + } + return blob, nil +} diff --git a/coreth/trie/trie_test.go b/coreth/trie/trie_test.go index bd74a42a..60b7157c 100644 --- a/coreth/trie/trie_test.go +++ b/coreth/trie/trie_test.go @@ -39,12 +39,14 @@ import ( "testing/quick" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/ethdb" - "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/trie/trienode" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" "golang.org/x/crypto/sha3" ) @@ -56,7 +58,7 @@ func init() { func TestEmptyTrie(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) res := trie.Hash() - exp := emptyRoot + exp := types.EmptyRootHash if res != exp { t.Errorf("expected %x got %x", exp, res) } @@ -66,14 +68,15 @@ func TestNull(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) key := make([]byte, 32) value := []byte("test") - trie.Update(key, value) - if !bytes.Equal(trie.Get(key), value) { + trie.MustUpdate(key, value) + if !bytes.Equal(trie.MustGet(key), value) { t.Fatal("wrong value") } } func TestMissingRoot(t *testing.T) { - trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New())) + root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") + trie, err := New(TrieID(root), NewDatabase(rawdb.NewMemoryDatabase())) if trie != nil { t.Error("New returned non-nil trie for invalid root") } @@ -82,77 +85,87 @@ func TestMissingRoot(t *testing.T) { } } -func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) } -func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) } +func TestMissingNode(t *testing.T) { + testMissingNode(t, false, rawdb.HashScheme) + //testMissingNode(t, false, rawdb.PathScheme) + testMissingNode(t, true, rawdb.HashScheme) + //testMissingNode(t, true, rawdb.PathScheme) +} -func testMissingNode(t *testing.T, memonly bool) { - diskdb := memorydb.New() - triedb := NewDatabase(diskdb) +func testMissingNode(t *testing.T, memonly bool, scheme string) { + diskdb := rawdb.NewMemoryDatabase() + triedb := newTestDatabase(diskdb, scheme) trie := NewEmpty(triedb) updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") - root, nodes, _ := trie.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + if !memonly { - triedb.Commit(root, true, nil) + require.NoError(t, triedb.Commit(root, false)) } - trie, _ = New(common.Hash{}, root, triedb) - _, err := trie.TryGet([]byte("120000")) + trie, _ = New(TrieID(root), triedb) + _, err := trie.Get([]byte("120000")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - _, err = trie.TryGet([]byte("120099")) + trie, _ = New(TrieID(root), triedb) + _, err = trie.Get([]byte("120099")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - _, err = trie.TryGet([]byte("123456")) + trie, _ = New(TrieID(root), triedb) + _, err = trie.Get([]byte("123456")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv")) + trie, _ = New(TrieID(root), triedb) + err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - err = trie.TryDelete([]byte("123456")) + trie, _ = New(TrieID(root), triedb) + err = trie.Delete([]byte("123456")) if err != nil { t.Errorf("Unexpected error: %v", err) } - hash := common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9") + var ( + path []byte + hash = common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9") + ) + for p, n := range nodes.Nodes { + if n.Hash == hash { + path = common.CopyBytes([]byte(p)) + break + } + } + trie, _ = New(TrieID(root), triedb) if memonly { - delete(triedb.dirties, hash) + trie.reader.banned = map[string]struct{}{string(path): {}} } else { - diskdb.Delete(hash[:]) + rawdb.DeleteTrieNode(diskdb, common.Hash{}, path, hash, scheme) } - trie, _ = New(common.Hash{}, root, triedb) - _, err = trie.TryGet([]byte("120000")) + _, err = trie.Get([]byte("120000")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - _, err = trie.TryGet([]byte("120099")) + _, err = trie.Get([]byte("120099")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - _, err = trie.TryGet([]byte("123456")) + _, err = trie.Get([]byte("123456")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - err = trie.TryUpdate([]byte("120099"), []byte("zxcv")) + err = trie.Update([]byte("120099"), []byte("zxcv")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } - trie, _ = New(common.Hash{}, root, triedb) - err = trie.TryDelete([]byte("123456")) + err = trie.Delete([]byte("123456")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } @@ -175,10 +188,7 @@ func TestInsert(t *testing.T) { updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") - root, _, err := trie.Commit(false) - if err != nil { - t.Fatalf("commit error: %v", err) - } + root, _ = trie.Commit(false) if root != exp { t.Errorf("case 2: exp %x got %x", exp, root) } @@ -203,9 +213,9 @@ func TestGet(t *testing.T) { if i == 1 { return } - root, nodes, _ := trie.Commit(false) - db.Update(NewWithNodeSet(nodes)) - trie, _ = New(common.Hash{}, root, db) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + trie, _ = New(TrieID(root), db) } } @@ -261,8 +271,8 @@ func TestEmptyValues(t *testing.T) { } func TestReplication(t *testing.T) { - triedb := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(triedb) + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -275,37 +285,31 @@ func TestReplication(t *testing.T) { for _, val := range vals { updateString(trie, val.k, val.v) } - exp, nodes, err := trie.Commit(false) - if err != nil { - t.Fatalf("commit error: %v", err) - } - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // create a new trie on top of the database and check that lookups work. - trie2, err := New(common.Hash{}, exp, triedb) + trie2, err := New(TrieID(root), db) if err != nil { - t.Fatalf("can't recreate trie at %x: %v", exp, err) + t.Fatalf("can't recreate trie at %x: %v", root, err) } for _, kv := range vals { if string(getString(trie2, kv.k)) != kv.v { t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v) } } - hash, nodes, err := trie2.Commit(false) - if err != nil { - t.Fatalf("commit error: %v", err) - } - if hash != exp { - t.Errorf("root failure. expected %x got %x", exp, hash) + hash, nodes := trie2.Commit(false) + if hash != root { + t.Errorf("root failure. expected %x got %x", root, hash) } // recreate the trie after commit if nodes != nil { - triedb.Update(NewWithNodeSet(nodes)) + db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) } - trie2, err = New(common.Hash{}, hash, triedb) + trie2, err = New(TrieID(hash), db) if err != nil { - t.Fatalf("can't recreate trie at %x: %v", exp, err) + t.Fatalf("can't recreate trie at %x: %v", hash, err) } // perform some insertions on the new trie. vals2 := []struct{ k, v string }{ @@ -322,15 +326,15 @@ func TestReplication(t *testing.T) { for _, val := range vals2 { updateString(trie2, val.k, val.v) } - if hash := trie2.Hash(); hash != exp { - t.Errorf("root failure. expected %x got %x", exp, hash) + if trie2.Hash() != hash { + t.Errorf("root failure. expected %x got %x", hash, hash) } } func TestLargeValue(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - trie.Update([]byte("key1"), []byte{99, 99, 99, 99}) - trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32)) + trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99}) + trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32)) trie.Hash() } @@ -386,6 +390,7 @@ const ( opCommit opItercheckhash opNodeDiff + opProve opMax // boundary value, not an actual op ) @@ -411,7 +416,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value { step.key = genKey() step.value = make([]byte, 8) binary.BigEndian.PutUint64(step.value, uint64(i)) - case opGet, opDelete: + case opGet, opDelete, opProve: step.key = genKey() } steps = append(steps, step) @@ -419,64 +424,122 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value { return reflect.ValueOf(steps) } +func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error { + deletes, inserts, updates := diffTries(old, new) + + // Check insertion set + for path := range inserts { + n, ok := set.Nodes[path] + if !ok || n.IsDeleted() { + return errors.New("expect new node") + } + if len(n.Prev) > 0 { + return errors.New("unexpected origin value") + } + } + // Check deletion set + for path, blob := range deletes { + n, ok := set.Nodes[path] + if !ok || !n.IsDeleted() { + return errors.New("expect deleted node") + } + if len(n.Prev) == 0 { + return errors.New("expect origin value") + } + if !bytes.Equal(n.Prev, blob) { + return errors.New("invalid origin value") + } + } + // Check update set + for path, blob := range updates { + n, ok := set.Nodes[path] + if !ok || n.IsDeleted() { + return errors.New("expect updated node") + } + if len(n.Prev) == 0 { + return errors.New("expect origin value") + } + if !bytes.Equal(n.Prev, blob) { + return errors.New("invalid origin value") + } + } + return nil +} + func runRandTest(rt randTest) bool { + var scheme = rawdb.HashScheme + //if rand.Intn(2) == 0 { + // scheme = rawdb.PathScheme + //} var ( - triedb = NewDatabase(memorydb.New()) + origin = types.EmptyRootHash + triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme) tr = NewEmpty(triedb) values = make(map[string]string) // tracks content of the trie origTrie = NewEmpty(triedb) ) - tr.tracer = newTracer() - for i, step := range rt { // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n", // step.op, step.key, step.value, i) switch step.op { case opUpdate: - tr.Update(step.key, step.value) + tr.MustUpdate(step.key, step.value) values[string(step.key)] = string(step.value) case opDelete: - tr.Delete(step.key) + tr.MustDelete(step.key) delete(values, string(step.key)) case opGet: - v := tr.Get(step.key) + v := tr.MustGet(step.key) want := values[string(step.key)] if string(v) != want { rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want) } + case opProve: + hash := tr.Hash() + if hash == types.EmptyRootHash { + continue + } + proofDb := rawdb.NewMemoryDatabase() + err := tr.Prove(step.key, 0, proofDb) + if err != nil { + rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err) + } + _, err = VerifyProof(hash, step.key, proofDb) + if err != nil { + rt[i].err = fmt.Errorf("failed for verifying key %#x, %v", step.key, err) + } case opHash: tr.Hash() case opCommit: - hash, nodes, err := tr.Commit(false) - if err != nil { - rt[i].err = err - return false - } + root, nodes := tr.Commit(true) if nodes != nil { - triedb.Update(NewWithNodeSet(nodes)) + triedb.Update(root, origin, trienode.NewWithNodeSet(nodes)) } - newtr, err := New(common.Hash{}, hash, triedb) + newtr, err := New(TrieID(root), triedb) if err != nil { rt[i].err = err return false } + if nodes != nil { + if err := verifyAccessList(origTrie, newtr, nodes); err != nil { + rt[i].err = err + return false + } + } tr = newtr - tr.tracer = newTracer() - origTrie = tr.Copy() + origin = root case opItercheckhash: checktr := NewEmpty(triedb) it := NewIterator(tr.NodeIterator(nil)) for it.Next() { - checktr.Update(it.Key, it.Value) + checktr.MustUpdate(it.Key, it.Value) } if tr.Hash() != checktr.Hash() { rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") } case opNodeDiff: var ( - inserted = tr.tracer.insertList() - deleted = tr.tracer.deleteList() origIter = origTrie.NodeIterator(nil) curIter = tr.NodeIterator(nil) origSeen = make(map[string]struct{}) @@ -510,19 +573,19 @@ func runRandTest(rt randTest) bool { deleteExp[path] = struct{}{} } } - if len(insertExp) != len(inserted) { + if len(insertExp) != len(tr.tracer.inserts) { rt[i].err = fmt.Errorf("insert set mismatch") } - if len(deleteExp) != len(deleted) { + if len(deleteExp) != len(tr.tracer.deletes) { rt[i].err = fmt.Errorf("delete set mismatch") } - for _, insert := range inserted { - if _, present := insertExp[string(insert)]; !present { + for insert := range tr.tracer.inserts { + if _, present := insertExp[insert]; !present { rt[i].err = fmt.Errorf("missing inserted node") } } - for _, del := range deleted { - if _, present := deleteExp[string(del)]; !present { + for del := range tr.tracer.deletes { + if _, present := deleteExp[del]; !present { rt[i].err = fmt.Errorf("missing deleted node") } } @@ -556,13 +619,13 @@ func benchGet(b *testing.B) { k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { binary.LittleEndian.PutUint64(k, uint64(i)) - trie.Update(k, k) + trie.MustUpdate(k, k) } binary.LittleEndian.PutUint64(k, benchElemCount/2) b.ResetTimer() for i := 0; i < b.N; i++ { - trie.Get(k) + trie.MustGet(k) } b.StopTimer() } @@ -573,13 +636,13 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { b.ReportAllocs() for i := 0; i < b.N; i++ { e.PutUint64(k, uint64(i)) - trie.Update(k, k) + trie.MustUpdate(k, k) } return trie } // Benchmarks the trie hashing. Since the trie caches the result of any operation, -// we cannot use b.N as the number of hashing rouns, since all rounds apart from +// we cannot use b.N as the number of hashing rounds, since all rounds apart from // the first one will be NOOP. As such, we'll use b.N as the number of account to // insert into the trie before measuring the hashing. // BenchmarkHash-6 288680 4561 ns/op 682 B/op 9 allocs/op @@ -601,11 +664,11 @@ func BenchmarkHash(b *testing.B) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) i := 0 for ; i < len(addresses)/2; i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } trie.Hash() for ; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } b.ResetTimer() b.ReportAllocs() @@ -621,7 +684,7 @@ type account struct { } // Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation, -// we cannot use b.N as the number of hashing rouns, since all rounds apart from +// we cannot use b.N as the number of hashing rounds, since all rounds apart from // the first one will be NOOP. As such, we'll use b.N as the number of account to // insert into the trie before measuring the hashing. func BenchmarkCommitAfterHash(b *testing.B) { @@ -638,7 +701,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { addresses, accounts := makeAccounts(b.N) trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i := 0; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Insert the accounts into the trie and hash it trie.Hash() @@ -651,22 +714,22 @@ func TestTinyTrie(t *testing.T) { // Create a realistic account trie to hash _, accounts := makeAccounts(5) trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) + trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { t.Errorf("1: got %x, exp %x", root, exp) } - trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4]) + trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4]) if exp, root := common.HexToHash("ec63b967e98a5720e7f720482151963982890d82c9093c0d486b7eb8883a66b1"), trie.Hash(); exp != root { t.Errorf("2: got %x, exp %x", root, exp) } - trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4]) + trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4]) if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { t.Errorf("3: got %x, exp %x", root, exp) } checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) it := NewIterator(trie.NodeIterator(nil)) for it.Next() { - checktr.Update(it.Key, it.Value) + checktr.MustUpdate(it.Key, it.Value) } if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot { t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot) @@ -678,7 +741,7 @@ func TestCommitAfterHash(t *testing.T) { addresses, accounts := makeAccounts(1000) trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i := 0; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Insert the accounts into the trie and hash it trie.Hash() @@ -688,7 +751,7 @@ func TestCommitAfterHash(t *testing.T) { if exp != root { t.Errorf("got %x, exp %x", root, exp) } - root, _, _ = trie.Commit(false) + root, _ = trie.Commit(false) if exp != root { t.Errorf("got %x, exp %x", root, exp) } @@ -708,7 +771,7 @@ func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) { for i := 0; i < len(accounts); i++ { var ( nonce = uint64(random.Int63()) - root = emptyRoot + root = types.EmptyRootHash code = crypto.Keccak256(nil) ) // The big.Rand function is not deterministic with regards to 64 vs 32 bit systems, @@ -737,6 +800,7 @@ func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, error func (s *spongeDb) Delete(key []byte) error { panic("implement me") } func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} } func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} } +func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") } func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") } func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") } func (s *spongeDb) Close() error { return nil } @@ -768,47 +832,35 @@ func (b *spongeBatch) Reset() {} func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil } // TestCommitSequence tests that the trie.Commit operation writes the elements of the trie -// in the expected order, and calls the callbacks in the expected order. +// in the expected order. // The test data was based on the 'master' code, and is basically random. It can be used // to check whether changes to the trie modifies the write order or data in any way. func TestCommitSequence(t *testing.T) { for i, tc := range []struct { - count int - expWriteSeqHash []byte - expCallbackSeqHash []byte + count int + expWriteSeqHash []byte }{ - {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"), - common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")}, - {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"), - common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")}, - {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"), - common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")}, + {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")}, + {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")}, + {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")}, } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(s) + db := NewDatabase(rawdb.NewDatabase(s)) trie := NewEmpty(db) - // Another sponge is used to check the callback-sequence - callbackSponge := sha3.NewLegacyKeccak256() // Fill the trie with elements for i := 0; i < tc.count; i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) - db.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Flush memdb -> disk (sponge) - db.Commit(root, false, func(c common.Hash) { - // And spongify the callback-order - callbackSponge.Write(c[:]) - }) + db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) } - if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) { - t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp) - } } } @@ -816,24 +868,18 @@ func TestCommitSequence(t *testing.T) { // but uses random blobs instead of 'accounts' func TestCommitSequenceRandomBlobs(t *testing.T) { for i, tc := range []struct { - count int - expWriteSeqHash []byte - expCallbackSeqHash []byte + count int + expWriteSeqHash []byte }{ - {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc"), - common.FromHex("450238d73bc36dc6cc6f926987e5428535e64be403877c4560e238a52749ba24")}, - {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554"), - common.FromHex("0ace0b03d6cb8c0b82f6289ef5b1a1838306b455a62dafc63cada8e2924f2550")}, - {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424"), - common.FromHex("117d30dafaa62a1eed498c3dfd70982b377ba2b46dd3e725ed6120c80829e518")}, + {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")}, + {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")}, + {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")}, } { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(s) + db := NewDatabase(rawdb.NewDatabase(s)) trie := NewEmpty(db) - // Another sponge is used to check the callback-sequence - callbackSponge := sha3.NewLegacyKeccak256() // Fill the trie with elements for i := 0; i < tc.count; i++ { key := make([]byte, 32) @@ -846,22 +892,16 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { } prng.Read(key) prng.Read(val) - trie.Update(key, val) + trie.MustUpdate(key, val) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) - db.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Flush memdb -> disk (sponge) - db.Commit(root, false, func(c common.Hash) { - // And spongify the callback-order - callbackSponge.Write(c[:]) - }) + db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) } - if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) { - t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp) - } } } @@ -870,11 +910,13 @@ func TestCommitSequenceStackTrie(t *testing.T) { prng := rand.New(rand.NewSource(int64(count))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(s) + db := NewDatabase(rawdb.NewDatabase(s)) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} - stTrie := NewStackTrie(stackTrieSponge) + stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme()) + }) // Fill the trie with elements for i := 0; i < count; i++ { // For the stack trie, we need to do inserts in proper order @@ -888,14 +930,14 @@ func TestCommitSequenceStackTrie(t *testing.T) { val = make([]byte, 1+prng.Intn(1024)) } prng.Read(val) - trie.TryUpdate(key, val) - stTrie.TryUpdate(key, val) + trie.Update(key, val) + stTrie.Update(key, val) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) + root, nodes := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(NewWithNodeSet(nodes)) - db.Commit(root, false, nil) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() if err != nil { @@ -927,21 +969,23 @@ func TestCommitSequenceStackTrie(t *testing.T) { // not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do. func TestCommitSequenceSmallRoot(t *testing.T) { s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(s) + db := NewDatabase(rawdb.NewDatabase(s)) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} - stTrie := NewStackTrie(stackTrieSponge) + stTrie := NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme()) + }) // Add a single small-element to the trie(s) key := make([]byte, 5) key[0] = 1 - trie.TryUpdate(key, []byte{0x1}) - stTrie.TryUpdate(key, []byte{0x1}) + trie.Update(key, []byte{0x1}) + stTrie.Update(key, []byte{0x1}) // Flush trie -> database - root, nodes, _ := trie.Commit(false) + root, nodes := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(NewWithNodeSet(nodes)) - db.Commit(root, false, nil) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) + db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() if err != nil { @@ -1004,7 +1048,7 @@ func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byt b.ReportAllocs() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i := 0; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Insert the accounts into the trie and hash it b.StartTimer() @@ -1055,7 +1099,7 @@ func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accou b.ReportAllocs() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) for i := 0; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Insert the accounts into the trie and hash it trie.Hash() @@ -1108,37 +1152,38 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [] triedb := NewDatabase(rawdb.NewMemoryDatabase()) trie := NewEmpty(triedb) for i := 0; i < len(addresses); i++ { - trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) + trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } h := trie.Hash() - _, nodes, _ := trie.Commit(false) - triedb.Update(NewWithNodeSet(nodes)) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) b.StartTimer() triedb.Dereference(h) b.StopTimer() } func getString(trie *Trie, k string) []byte { - return trie.Get([]byte(k)) + return trie.MustGet([]byte(k)) } func updateString(trie *Trie, k, v string) { - trie.Update([]byte(k), []byte(v)) + trie.MustUpdate([]byte(k), []byte(v)) } func deleteString(trie *Trie, k string) { - trie.Delete([]byte(k)) + trie.MustDelete([]byte(k)) } func TestDecodeNode(t *testing.T) { t.Parallel() + var ( hash = make([]byte, 20) elems = make([]byte, 20) ) for i := 0; i < 5000000; i++ { - rand.Read(hash) - rand.Read(elems) + prng.Read(hash) + prng.Read(elems) decodeNode(hash, elems) } } diff --git a/coreth/trie/database.go b/coreth/trie/triedb/hashdb/database.go similarity index 57% rename from coreth/trie/database.go rename to coreth/trie/triedb/hashdb/database.go index 8d2e4cbf..eba0e856 100644 --- a/coreth/trie/database.go +++ b/coreth/trie/triedb/hashdb/database.go @@ -24,40 +24,35 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package trie +package hashdb import ( "errors" - "fmt" - "io" "reflect" "sync" "time" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/trie/trienode" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) -const ( - cacheStatsUpdateFrequency = 1000 // update trie cache stats once per 1000 ops -) - var ( memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil) memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil) memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil) @@ -79,6 +74,18 @@ var ( memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) ) +// ChildResolver defines the required method to decode the provided +// trie node and iterate the children on top. +type ChildResolver interface { + ForEach(node []byte, onChild func(common.Hash)) +} + +type cache interface { + HasGet([]byte, []byte) ([]byte, bool) + Del([]byte) + Set([]byte, []byte) +} + // Database is an intermediate write layer between the trie data structures and // the disk database. The aim is to accumulate trie writes in-memory and only // periodically flush a couple tries to disk, garbage collecting the remainder. @@ -86,9 +93,10 @@ var ( // The trie Database is thread-safe in its mutations and is thread-safe in providing individual, // independent node access. type Database struct { - diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes + diskdb ethdb.Database // Persistent storage for matured trie nodes + resolver ChildResolver // The handler to resolve children of nodes - cleans *utils.MeteredCache // GC friendly memory cache of clean node RLPs + cleans cache // GC friendly memory cache of clean node RLPs dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes oldest common.Hash // Oldest tracked node, flush-list head newest common.Hash // Newest tracked node, flush-list tail @@ -103,60 +111,18 @@ type Database struct { dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) childrenSize common.StorageSize // Storage size of the external children tracking - preimages *preimageStore // The store for caching preimages lock sync.RWMutex } -// rawNode is a simple binary blob used to differentiate between collapsed trie -// nodes and already encoded RLP binary blobs (while at the same time store them -// in the same cache fields). -type rawNode []byte - -func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } - -func (n rawNode) EncodeRLP(w io.Writer) error { - _, err := w.Write(n) - return err -} - -// rawFullNode represents only the useful data content of a full node, with the -// caches and flags stripped out to minimize its data storage. This type honors -// the same RLP encoding as the original parent. -type rawFullNode [17]node - -func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } - -func (n rawFullNode) EncodeRLP(w io.Writer) error { - eb := rlp.NewEncoderBuffer(w) - n.encode(eb) - return eb.Flush() -} - -// rawShortNode represents only the useful data content of a short node, with the -// caches and flags stripped out to minimize its data storage. This type honors -// the same RLP encoding as the original parent. -type rawShortNode struct { - Key []byte - Val node -} - -func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } -func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } - // cachedNode is all the information we know about a single cached trie node // in the memory database write layer. type cachedNode struct { - node node // Cached collapsed trie node, or raw rlp data - size uint16 // Byte size of the useful cached data - - parents uint32 // Number of live nodes referencing this one - children map[common.Hash]uint16 // External children referenced by this node - - flushPrev common.Hash // Previous node in the flush-list - flushNext common.Hash // Next node in the flush-list + node []byte // Encoded node blob + parents uint32 // Number of live nodes referencing this one + external map[common.Hash]struct{} // The set of external children + flushPrev common.Hash // Previous node in the flush-list + flushNext common.Hash // Next node in the flush-list } // cachedNodeSize is the raw size of a cachedNode data structure without any @@ -164,178 +130,42 @@ type cachedNode struct { // than not counting them. var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) -// cachedNodeChildrenSize is the raw size of an initialized but empty external -// reference map. -const cachedNodeChildrenSize = 48 - -// rlp returns the raw rlp encoded blob of the cached trie node, either directly -// from the cache, or by regenerating it from the collapsed node. -func (n *cachedNode) rlp() []byte { - if node, ok := n.node.(rawNode); ok { - return node - } - return nodeToBytes(n.node) -} - -// obj returns the decoded and expanded trie node, either directly from the cache, -// or by regenerating it from the rlp encoded blob. -func (n *cachedNode) obj(hash common.Hash) node { - if node, ok := n.node.(rawNode); ok { - // The raw-blob format nodes are loaded either from the - // clean cache or the database, they are all in their own - // copy and safe to use unsafe decoder. - return mustDecodeNodeUnsafe(hash[:], node) - } - return expandNode(hash[:], n.node) -} - -// forChilds invokes the callback for all the tracked children of this node, +// forChildren invokes the callback for all the tracked children of this node, // both the implicit ones from inside the node as well as the explicit ones // from outside the node. -func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { - for child := range n.children { +func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) { + for child := range n.external { onChild(child) } - if _, ok := n.node.(rawNode); !ok { - forGatherChildren(n.node, onChild) - } + resolver.ForEach(n.node, onChild) } -// forGatherChildren traverses the node hierarchy of a collapsed storage node and -// invokes the callback for all the hashnode children. -func forGatherChildren(n node, onChild func(hash common.Hash)) { - switch n := n.(type) { - case *rawShortNode: - forGatherChildren(n.Val, onChild) - case rawFullNode: - for i := 0; i < 16; i++ { - forGatherChildren(n[i], onChild) - } - case hashNode: - onChild(common.BytesToHash(n)) - case valueNode, nil, rawNode: - default: - panic(fmt.Sprintf("unknown node type: %T", n)) +// New initializes the hash-based node database. +func New(diskdb ethdb.Database, cleans cache, resolver ChildResolver) *Database { + return &Database{ + diskdb: diskdb, + resolver: resolver, + cleans: cleans, + dirties: make(map[common.Hash]*cachedNode), } } -// simplifyNode traverses the hierarchy of an expanded memory node and discards -// all the internal caches, returning a node that only contains the raw data. -func simplifyNode(n node) node { - switch n := n.(type) { - case *shortNode: - // Short nodes discard the flags and cascade - return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} - - case *fullNode: - // Full nodes discard the flags and cascade - node := rawFullNode(n.Children) - for i := 0; i < len(node); i++ { - if node[i] != nil { - node[i] = simplifyNode(node[i]) - } - } - return node - - case valueNode, hashNode, rawNode: - return n - - default: - panic(fmt.Sprintf("unknown node type: %T", n)) - } -} - -// expandNode traverses the node hierarchy of a collapsed storage node and converts -// all fields and keys into expanded memory form. -func expandNode(hash hashNode, n node) node { - switch n := n.(type) { - case *rawShortNode: - // Short nodes need key and child expansion - return &shortNode{ - Key: compactToHex(n.Key), - Val: expandNode(nil, n.Val), - flags: nodeFlag{ - hash: hash, - }, - } - - case rawFullNode: - // Full nodes need child expansion - node := &fullNode{ - flags: nodeFlag{ - hash: hash, - }, - } - for i := 0; i < len(node.Children); i++ { - if n[i] != nil { - node.Children[i] = expandNode(nil, n[i]) - } - } - return node - - case valueNode, hashNode: - return n - - default: - panic(fmt.Sprintf("unknown node type: %T", n)) - } -} - -// Config defines all necessary options for database. -type Config struct { - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Preimages bool // Flag whether the preimage of trie key is recorded - Journal string // File location to load trie clean cache from - StatsPrefix string // Prefix for cache stats (disabled if empty) -} - -// NewDatabase creates a new trie database to store ephemeral trie content before -// its written out to disk or garbage collected. No read cache is created, so all -// data retrievals will hit the underlying disk database. -func NewDatabase(diskdb ethdb.KeyValueStore) *Database { - return NewDatabaseWithConfig(diskdb, nil) -} - -// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content -// before its written out to disk or garbage collected. It also acts as a read cache -// for nodes loaded from disk. -func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { - var cleans *utils.MeteredCache - if config != nil && config.Cache > 0 { - cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.Journal, config.StatsPrefix, cacheStatsUpdateFrequency) - } - var preimage *preimageStore - if config != nil && config.Preimages { - preimage = newPreimageStore(diskdb) - } - db := &Database{ - diskdb: diskdb, - cleans: cleans, - dirties: map[common.Hash]*cachedNode{{}: { - children: make(map[common.Hash]uint16), - }}, - preimages: preimage, - } - return db -} - // insert inserts a simplified trie node into the memory database. // All nodes inserted by this function will be reference tracked // and in theory should only used for **trie nodes** insertion. -func (db *Database) insert(hash common.Hash, size int, node node) { +func (db *Database) insert(hash common.Hash, node []byte) { // If the node's already cached, skip if _, ok := db.dirties[hash]; ok { return } - memcacheDirtyWriteMeter.Mark(int64(size)) + memcacheDirtyWriteMeter.Mark(int64(len(node))) // Create the cached entry for this node entry := &cachedNode{ node: node, - size: uint16(size), flushPrev: db.newest, } - entry.forChilds(func(child common.Hash) { + entry.forChildren(db.resolver, func(child common.Hash) { if c := db.dirties[child]; c != nil { c.parents++ } @@ -348,45 +178,16 @@ func (db *Database) insert(hash common.Hash, size int, node node) { } else { db.dirties[db.newest].flushNext, db.newest = hash, hash } - db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) + db.dirtiesSize += common.StorageSize(common.HashLength + len(node)) } -// RawNode retrieves an encoded cached trie node from memory. If it cannot be found -// cached, the method queries the persistent database for the content. This function -// will not return the metaroot. -func (db *Database) RawNode(h common.Hash) ([]byte, error) { - if h == (common.Hash{}) { +// Node retrieves an encoded cached trie node from memory. If it cannot be found +// cached, the method queries the persistent database for the content. +func (db *Database) Node(hash common.Hash) ([]byte, error) { + // It doesn't make sense to retrieve the metaroot + if hash == (common.Hash{}) { return nil, errors.New("not found") } - enc, cn, err := db.node(h) - if err != nil { - return nil, err - } - if len(enc) > 0 { - return enc, nil - } - return cn.rlp(), nil -} - -// EncodedNode returns a formatted [node] when given a node hash. If no node -// exists, nil is returned. This function will return the metaroot. -func (db *Database) EncodedNode(h common.Hash) node { - enc, cn, err := db.node(h) - if err != nil { - return nil - } - if len(enc) > 0 { - return mustDecodeNode(h[:], enc) - } - return cn.obj(h) -} - -// node retrieves an encoded cached trie node from memory. If it cannot be found -// cached, the method queries the persistent database for the content. -// -// We do not return a single node representation to avoid useless -// encoding/decoding depending on the caller. -func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { // Retrieve the node from the clean cache if available if db.cleans != nil { k := hash[:] @@ -395,7 +196,7 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { if len(enc) > 0 { memcacheCleanHitMeter.Mark(1) memcacheCleanReadMeter.Mark(int64(len(enc))) - return enc, nil, nil + return enc, nil } else { // Delete anything from cache that may have been added incorrectly // @@ -413,22 +214,22 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { if dirty != nil { memcacheDirtyHitMeter.Mark(1) - memcacheDirtyReadMeter.Mark(int64(dirty.size)) - return nil, dirty, nil + memcacheDirtyReadMeter.Mark(int64(len(dirty.node))) + return dirty.node, nil } memcacheDirtyMissMeter.Mark(1) // Content unavailable in memory, attempt to retrieve from disk - enc := rawdb.ReadTrieNode(db.diskdb, hash) - if len(enc) > 0 { + enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash) + if len(enc) != 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) memcacheCleanWriteMeter.Mark(int64(len(enc))) } - return enc, nil, nil + return enc, nil } - return nil, nil, errors.New("not found") + return nil, errors.New("not found") } // Nodes retrieves the hashes of all the nodes cached within the memory database. @@ -440,9 +241,7 @@ func (db *Database) Nodes() []common.Hash { var hashes = make([]common.Hash, 0, len(db.dirties)) for hash := range db.dirties { - if hash != (common.Hash{}) { // Special case for "root" references/nodes - hashes = append(hashes, hash) - } + hashes = append(hashes, hash) } return hashes } @@ -464,18 +263,22 @@ func (db *Database) reference(child common.Hash, parent common.Hash) { if !ok { return } - // If the reference already exists, only duplicate for roots - if db.dirties[parent].children == nil { - db.dirties[parent].children = make(map[common.Hash]uint16) - db.childrenSize += cachedNodeChildrenSize - } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { + // The reference is for state root, increase the reference counter. + if parent == (common.Hash{}) { + node.parents += 1 return } - node.parents++ - db.dirties[parent].children[child]++ - if db.dirties[parent].children[child] == 1 { - db.childrenSize += common.HashLength + 2 // uint16 counter + // The reference is for external storage trie, don't duplicate if + // the reference is already existent. + if db.dirties[parent].external == nil { + db.dirties[parent].external = make(map[common.Hash]struct{}) + } + if _, ok := db.dirties[parent].external[child]; ok { + return } + node.parents++ + db.dirties[parent].external[child] = struct{}{} + db.childrenSize += common.HashLength } // Dereference removes an existing reference from a root node. @@ -485,11 +288,11 @@ func (db *Database) Dereference(root common.Hash) { log.Error("Attempted to dereference the trie cache meta root") return } - db.lock.Lock() defer db.lock.Unlock() + nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() - db.dereference(root, common.Hash{}) + db.dereference(root) db.gcnodes += uint64(nodes - len(db.dirties)) db.gcsize += storage - db.dirtiesSize @@ -508,23 +311,13 @@ func (db *Database) Dereference(root common.Hash) { } // dereference is the private locked version of Dereference. -func (db *Database) dereference(child common.Hash, parent common.Hash) { - // Dereference the parent-child - node := db.dirties[parent] - - if node.children != nil && node.children[child] > 0 { - node.children[child]-- - if node.children[child] == 0 { - delete(node.children, child) - db.childrenSize -= (common.HashLength + 2) // uint16 counter - } - } - // If the child does not exist, it's a previously committed node. - node, ok := db.dirties[child] +func (db *Database) dereference(hash common.Hash) { + // If the node does not exist, it's a previously committed node. + node, ok := db.dirties[hash] if !ok { return } - // If there are no more references to the child, delete it and cascade + // If there are no more references to the node, delete it and cascade if node.parents > 0 { // This is a special cornercase where a node loaded from disk (i.e. not in the // memcache any more) gets reinjected as a new node (short node split into full, @@ -534,25 +327,29 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { } if node.parents == 0 { // Remove the node from the flush-list - switch child { + switch hash { case db.oldest: db.oldest = node.flushNext - db.dirties[node.flushNext].flushPrev = common.Hash{} + if node.flushNext != (common.Hash{}) { + db.dirties[node.flushNext].flushPrev = common.Hash{} + } case db.newest: db.newest = node.flushPrev - db.dirties[node.flushPrev].flushNext = common.Hash{} + if node.flushPrev != (common.Hash{}) { + db.dirties[node.flushPrev].flushNext = common.Hash{} + } default: db.dirties[node.flushPrev].flushNext = node.flushNext db.dirties[node.flushNext].flushPrev = node.flushPrev } // Dereference all children and delete the node - node.forChilds(func(hash common.Hash) { - db.dereference(hash, child) + node.forChildren(db.resolver, func(child common.Hash) { + db.dereference(child) }) - delete(db.dirties, child) - db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) - if node.children != nil { - db.childrenSize -= cachedNodeChildrenSize + delete(db.dirties, hash) + db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node)) + if node.external != nil { + db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength) } } } @@ -570,9 +367,9 @@ type flushItem struct { func (db *Database) writeFlushItems(toFlush []*flushItem) error { batch := db.diskdb.NewBatch() for _, item := range toFlush { - rlp := item.node.rlp() + rlp := item.node.node item.rlp = rlp - rawdb.WriteTrieNode(batch, item.hash, rlp) + rawdb.WriteLegacyTrieNode(batch, item.hash, rlp) // If we exceeded the ideal batch size, commit and reset if batch.ValueSize() >= ethdb.IdealBatchSize { @@ -597,13 +394,6 @@ func (db *Database) writeFlushItems(toFlush []*flushItem) error { // memory usage goes below the given threshold. func (db *Database) Cap(limit common.StorageSize) error { start := time.Now() - // If the preimage cache got large enough, push to disk. If it's still small - // leave for later to deduplicate writes. - if db.preimages != nil { - if err := db.preimages.commit(false); err != nil { - return err - } - } // It is important that outside code doesn't see an inconsistent state // (referenced data removed from memory cache during commit but not yet @@ -616,8 +406,8 @@ func (db *Database) Cap(limit common.StorageSize) error { // db.dirtiesSize only contains the useful data in the cache, but when reporting // the total memory consumption, the maintenance metadata is also needed to be // counted. - pendingSize := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) - pendingSize += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) + pendingSize := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize) + pendingSize += db.childrenSize if pendingSize <= limit { db.lock.RUnlock() return nil @@ -634,9 +424,9 @@ func (db *Database) Cap(limit common.StorageSize) error { // Iterate to the next flush item, or abort if the size cap was achieved. Size // is the total size, including the useful cached data (hash -> blob), the // cache item metadata, as well as external children mappings. - pendingSize -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) - if node.children != nil { - pendingSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) + pendingSize -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize) + if node.external != nil { + pendingSize -= common.StorageSize(len(node.external) * common.HashLength) } oldest = node.flushNext } @@ -675,19 +465,15 @@ func (db *Database) Cap(limit common.StorageSize) error { log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) + return nil } // Commit iterates over all the children of a particular node, writes them out // to disk, forcefully tearing down all references in both directions. As a side // effect, all pre-images accumulated up to this point are also written. -func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { +func (db *Database) Commit(node common.Hash, report bool) error { start := time.Now() - if db.preimages != nil { - if err := db.preimages.commit(true); err != nil { - return err - } - } // It is important that outside code doesn't see an inconsistent state (referenced // data removed from memory cache during commit but not yet in persistent storage). @@ -695,7 +481,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H db.lock.RLock() lockStart := time.Now() nodes, storage := len(db.dirties), db.dirtiesSize - toFlush, err := db.commit(node, make([]*flushItem, 0, 128), callback) + toFlush, err := db.commit(node, make([]*flushItem, 0, 128)) if err != nil { db.lock.RUnlock() log.Error("Failed to commit trie from trie database", "err", err) @@ -738,6 +524,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H // Reset the garbage collection statistics db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 + return nil } @@ -746,16 +533,16 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H // // [callback] will be invoked as soon as it is determined a trie node will be // flushed to disk (before it is actually written). -func (db *Database) commit(hash common.Hash, toFlush []*flushItem, callback func(common.Hash)) ([]*flushItem, error) { +func (db *Database) commit(hash common.Hash, toFlush []*flushItem) ([]*flushItem, error) { // If the node does not exist, it's a previously committed node node, ok := db.dirties[hash] if !ok { return toFlush, nil } var err error - node.forChilds(func(child common.Hash) { + node.forChildren(db.resolver, func(child common.Hash) { if err == nil { - toFlush, err = db.commit(child, toFlush, callback) + toFlush, err = db.commit(child, toFlush) } }) if err != nil { @@ -765,9 +552,6 @@ func (db *Database) commit(hash common.Hash, toFlush []*flushItem, callback func // that children are committed before their parents (an invariant of this // package). toFlush = append(toFlush, &flushItem{hash, node, nil}) - if callback != nil { - callback(hash) - } return toFlush, nil } @@ -792,19 +576,23 @@ func (db *Database) removeFromDirties(hash common.Hash, rlp []byte) { switch hash { case db.oldest: db.oldest = node.flushNext - db.dirties[node.flushNext].flushPrev = common.Hash{} + if node.flushNext != (common.Hash{}) { + db.dirties[node.flushNext].flushPrev = common.Hash{} + } case db.newest: db.newest = node.flushPrev - db.dirties[node.flushPrev].flushNext = common.Hash{} + if node.flushPrev != (common.Hash{}) { + db.dirties[node.flushPrev].flushNext = common.Hash{} + } default: db.dirties[node.flushPrev].flushNext = node.flushNext db.dirties[node.flushNext].flushPrev = node.flushPrev } // Remove the node from the dirty cache delete(db.dirties, hash) - db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) - if node.children != nil { - db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) + db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node)) + if node.external != nil { + db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength) } // Move the flushed node into the clean cache to prevent insta-reloads if db.cleans != nil { @@ -813,30 +601,48 @@ func (db *Database) removeFromDirties(hash common.Hash, rlp []byte) { } } -// Update inserts the dirty nodes in provided nodeset into database and -// links the account trie with multiple storage tries if necessary. -func (db *Database) Update(nodes *MergedNodeSet) error { +// Initialized returns an indicator if state data is already initialized +// in hash-based scheme by checking the presence of genesis state. +func (db *Database) Initialized(genesisRoot common.Hash) bool { + return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot) +} + +// Update inserts the dirty nodes in provided nodeset into database and link the +// account trie with multiple storage tries if necessary. +func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { + // Ensure the parent state is present and signal a warning if not. + if parent != types.EmptyRootHash { + if blob, _ := db.Node(parent); len(blob) == 0 { + log.Error("parent state is not present") + } + } db.lock.Lock() defer db.lock.Unlock() - return db.update(nodes) + return db.update(root, parent, nodes) } // UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into // database and links the account trie with multiple storage tries if necessary, // then adds a reference [from] root to the metaroot while holding the db's lock. -func (db *Database) UpdateAndReferenceRoot(nodes *MergedNodeSet, root common.Hash) error { +func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { + // Ensure the parent state is present and signal a warning if not. + if parent != types.EmptyRootHash { + if blob, _ := db.Node(parent); len(blob) == 0 { + log.Error("parent state is not present") + } + } db.lock.Lock() defer db.lock.Unlock() - if err := db.update(nodes); err != nil { + if err := db.update(root, parent, nodes); err != nil { return err } db.reference(root, common.Hash{}) return nil } -func (db *Database) update(nodes *MergedNodeSet) error { +func (db *Database) update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { // Insert dirty nodes into the database. In the same tree, it must be // ensured that children are inserted first, then parent so that children // can be linked with their parent correctly. @@ -844,35 +650,34 @@ func (db *Database) update(nodes *MergedNodeSet) error { // Note, the storage tries must be flushed before the account trie to // retain the invariant that children go into the dirty cache first. var order []common.Hash - for owner := range nodes.sets { + for owner := range nodes.Sets { if owner == (common.Hash{}) { continue } order = append(order, owner) } - if _, ok := nodes.sets[common.Hash{}]; ok { + if _, ok := nodes.Sets[common.Hash{}]; ok { order = append(order, common.Hash{}) } for _, owner := range order { - subset := nodes.sets[owner] - for _, path := range subset.paths { - n, ok := subset.nodes[path] - if !ok { - return fmt.Errorf("missing node %x %v", owner, path) + subset := nodes.Sets[owner] + subset.ForEachWithOrder(func(path string, n *trienode.Node) { + if n.IsDeleted() { + return // ignore deletion } - db.insert(n.hash, int(n.size), n.node) - } + db.insert(n.Hash, n.Blob) + }) } // Link up the account trie and storage trie if the node points // to an account trie leaf. - if set, present := nodes.sets[common.Hash{}]; present { - for _, n := range set.leaves { + if set, present := nodes.Sets[common.Hash{}]; present { + for _, n := range set.Leaves { var account types.StateAccount - if err := rlp.DecodeBytes(n.blob, &account); err != nil { + if err := rlp.DecodeBytes(n.Blob, &account); err != nil { return err } - if account.Root != emptyRoot { - db.reference(account.Root, n.parent) + if account.Root != types.EmptyRootHash { + db.reference(account.Root, n.Parent) } } } @@ -881,64 +686,38 @@ func (db *Database) update(nodes *MergedNodeSet) error { // Size returns the current storage size of the memory cache in front of the // persistent database layer. -func (db *Database) Size() (common.StorageSize, common.StorageSize) { +func (db *Database) Size() common.StorageSize { + db.lock.RLock() + defer db.lock.RUnlock() + // db.dirtiesSize only contains the useful data in the cache, but when reporting // the total memory consumption, the maintenance metadata is also needed to be // counted. - db.lock.RLock() - defer db.lock.RUnlock() - var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) - var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) - var preimageSize common.StorageSize - if db.preimages != nil { - preimageSize = db.preimages.size() - } - return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize + var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize) + return db.dirtiesSize + db.childrenSize + metadataSize } -// CommitPreimages flushes the dangling preimages to disk. It is meant to be -// called when closing the blockchain object, so that preimages are persisted -// to the database. -func (db *Database) CommitPreimages() error { - db.lock.Lock() - defer db.lock.Unlock() +// Close closes the trie database and releases all held resources. +func (db *Database) Close() error { return nil } - if db.preimages == nil { - return nil - } - return db.preimages.commit(true) +// Scheme returns the node scheme used in the database. +func (db *Database) Scheme() string { + return rawdb.HashScheme } -// saveCache saves clean state cache to given directory path -// using specified CPU cores. -func (db *Database) saveCache(dir string, threads int) error { - if db.cleans == nil { - return nil - } - log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) +// Reader retrieves a node reader belonging to the given state root. +func (db *Database) Reader(root common.Hash) *reader { + return &reader{db: db} +} - start := time.Now() - err := db.cleans.SaveToFileConcurrent(dir, threads) - if err != nil { - log.Error("Failed to persist clean trie cache", "error", err) - return err - } - log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) - return nil +// reader is a state reader of Database which implements the Reader interface. +type reader struct { + db *Database } -// SaveCachePeriodically atomically saves fast cache data to the given dir with -// the specified interval. All dump operation will only use a single CPU core. -func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - db.saveCache(dir, 1) - case <-stopCh: - return - } - } +// Node retrieves the trie node with the given node hash. +// No error will be returned if the node is not found. +func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { + blob, _ := reader.db.Node(hash) + return blob, nil } diff --git a/coreth/trie/trienode/node.go b/coreth/trie/trienode/node.go new file mode 100644 index 00000000..8152eab6 --- /dev/null +++ b/coreth/trie/trienode/node.go @@ -0,0 +1,197 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see + +package trienode + +import ( + "fmt" + "sort" + "strings" + + "github.com/ethereum/go-ethereum/common" +) + +// Node is a wrapper which contains the encoded blob of the trie node and its +// unique hash identifier. It is general enough that can be used to represent +// trie nodes corresponding to different trie implementations. +type Node struct { + Hash common.Hash // Node hash, empty for deleted node + Blob []byte // Encoded node blob, nil for the deleted node +} + +// Size returns the total memory size used by this node. +func (n *Node) Size() int { + return len(n.Blob) + common.HashLength +} + +// IsDeleted returns the indicator if the node is marked as deleted. +func (n *Node) IsDeleted() bool { + return n.Hash == (common.Hash{}) +} + +// WithPrev wraps the Node with the previous node value attached. +type WithPrev struct { + *Node + Prev []byte // Encoded original value, nil means it's non-existent +} + +// Unwrap returns the internal Node object. +func (n *WithPrev) Unwrap() *Node { + return n.Node +} + +// Size returns the total memory size used by this node. It overloads +// the function in Node by counting the size of previous value as well. +func (n *WithPrev) Size() int { + return n.Node.Size() + len(n.Prev) +} + +// New constructs a node with provided node information. +func New(hash common.Hash, blob []byte) *Node { + return &Node{Hash: hash, Blob: blob} +} + +// NewWithPrev constructs a node with provided node information. +func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev { + return &WithPrev{ + Node: New(hash, blob), + Prev: prev, + } +} + +// leaf represents a trie leaf node +type leaf struct { + Blob []byte // raw blob of leaf + Parent common.Hash // the hash of parent node +} + +// NodeSet contains a set of nodes collected during the commit operation. +// Each node is keyed by path. It's not thread-safe to use. +type NodeSet struct { + Owner common.Hash + Leaves []*leaf + Nodes map[string]*WithPrev + updates int // the count of updated and inserted nodes + deletes int // the count of deleted nodes +} + +// NewNodeSet initializes a node set. The owner is zero for the account trie and +// the owning account address hash for storage tries. +func NewNodeSet(owner common.Hash) *NodeSet { + return &NodeSet{ + Owner: owner, + Nodes: make(map[string]*WithPrev), + } +} + +// ForEachWithOrder iterates the nodes with the order from bottom to top, +// right to left, nodes with the longest path will be iterated first. +func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { + var paths sort.StringSlice + for path := range set.Nodes { + paths = append(paths, path) + } + // Bottom-up, longest path first + sort.Sort(sort.Reverse(paths)) + for _, path := range paths { + callback(path, set.Nodes[path].Unwrap()) + } +} + +// AddNode adds the provided node into set. +func (set *NodeSet) AddNode(path []byte, n *WithPrev) { + if n.IsDeleted() { + set.deletes += 1 + } else { + set.updates += 1 + } + set.Nodes[string(path)] = n +} + +// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can +// we get rid of it? +func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) { + set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent}) +} + +// Size returns the number of dirty nodes in set. +func (set *NodeSet) Size() (int, int) { + return set.updates, set.deletes +} + +// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can +// we get rid of it? +func (set *NodeSet) Hashes() []common.Hash { + var ret []common.Hash + for _, node := range set.Nodes { + ret = append(ret, node.Hash) + } + return ret +} + +// Summary returns a string-representation of the NodeSet. +func (set *NodeSet) Summary() string { + var out = new(strings.Builder) + fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner) + if set.Nodes != nil { + for path, n := range set.Nodes { + // Deletion + if n.IsDeleted() { + fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev) + continue + } + // Insertion + if len(n.Prev) == 0 { + fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash) + continue + } + // Update + fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev) + } + } + for _, n := range set.Leaves { + fmt.Fprintf(out, "[leaf]: %v\n", n) + } + return out.String() +} + +// MergedNodeSet represents a merged node set for a group of tries. +type MergedNodeSet struct { + Sets map[common.Hash]*NodeSet +} + +// NewMergedNodeSet initializes an empty merged set. +func NewMergedNodeSet() *MergedNodeSet { + return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)} +} + +// NewWithNodeSet constructs a merged nodeset with the provided single set. +func NewWithNodeSet(set *NodeSet) *MergedNodeSet { + merged := NewMergedNodeSet() + merged.Merge(set) + return merged +} + +// Merge merges the provided dirty nodes of a trie into the set. The assumption +// is held that no duplicated set belonging to the same trie will be merged twice. +func (set *MergedNodeSet) Merge(other *NodeSet) error { + _, present := set.Sets[other.Owner] + if present { + return fmt.Errorf("duplicate trie for owner %#x", other.Owner) + } + set.Sets[other.Owner] = other + return nil +} diff --git a/coreth/trie/util_test.go b/coreth/trie/util_test.go deleted file mode 100644 index 95103747..00000000 --- a/coreth/trie/util_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "testing" - - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ethereum/go-ethereum/common" -) - -// Tests if the trie diffs are tracked correctly. -func TestTrieTracer(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - trie.Hash() - - seen := make(map[string]struct{}) - it := trie.NodeIterator(nil) - for it.Next(true) { - if it.Leaf() { - continue - } - seen[string(it.Path())] = struct{}{} - } - inserted := trie.tracer.insertList() - if len(inserted) != len(seen) { - t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted)) - } - for _, k := range inserted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } - deleted := trie.tracer.deleteList() - if len(deleted) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) - } - - // Commit the changes and re-create with new root - root, nodes, _ := trie.Commit(false) - if err := db.Update(NewWithNodeSet(nodes)); err != nil { - t.Fatal(err) - } - trie, _ = New(common.Hash{}, root, db) - trie.tracer = newTracer() - - // Delete all the elements, check deletion set - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - trie.Hash() - - inserted = trie.tracer.insertList() - if len(inserted) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(inserted)) - } - deleted = trie.tracer.deleteList() - if len(deleted) != len(seen) { - t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted)) - } - for _, k := range deleted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } -} - -func TestTrieTracerNoop(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - if len(trie.tracer.insertList()) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList())) - } - if len(trie.tracer.deleteList()) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) - } -} diff --git a/coreth/trie/utils.go b/coreth/trie/utils.go deleted file mode 100644 index 8d1593eb..00000000 --- a/coreth/trie/utils.go +++ /dev/null @@ -1,177 +0,0 @@ -// (c) 2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -// tracer tracks the changes of trie nodes. During the trie operations, -// some nodes can be deleted from the trie, while these deleted nodes -// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted -// nodes won't be removed from the disk at all. Tracer is an auxiliary tool -// used to track all insert and delete operations of trie and capture all -// deleted nodes eventually. -// -// The changed nodes can be mainly divided into two categories: the leaf -// node and intermediate node. The former is inserted/deleted by callers -// while the latter is inserted/deleted in order to follow the rule of trie. -// This tool can track all of them no matter the node is embedded in its -// parent or not, but valueNode is never tracked. -// -// Besides, it's also used for recording the original value of the nodes -// when they are resolved from the disk. The pre-value of the nodes will -// be used to construct reverse-diffs in the future. -// -// Note tracer is not thread-safe, callers should be responsible for handling -// the concurrency issues by themselves. -type tracer struct { - insert map[string]struct{} - delete map[string]struct{} - origin map[string][]byte -} - -// newTracer initializes the tracer for capturing trie changes. -func newTracer() *tracer { - return &tracer{ - insert: make(map[string]struct{}), - delete: make(map[string]struct{}), - origin: make(map[string][]byte), - } -} - -/* -// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. -// Don't change the value outside of function since it's not deep-copied. -func (t *tracer) onRead(key []byte, val []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.origin[string(key)] = val -} -*/ - -// onInsert tracks the newly inserted trie node. If it's already in the deletion set -// (resurrected node), then just wipe it from the deletion set as the "untouched". -func (t *tracer) onInsert(key []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.delete[string(key)]; present { - delete(t.delete, string(key)) - return - } - t.insert[string(key)] = struct{}{} -} - -// onDelete tracks the newly deleted trie node. If it's already -// in the addition set, then just wipe it from the addition set -// as it's untouched. -func (t *tracer) onDelete(key []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.insert[string(key)]; present { - delete(t.insert, string(key)) - return - } - t.delete[string(key)] = struct{}{} -} - -// insertList returns the tracked inserted trie nodes in list format. -func (t *tracer) insertList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for key := range t.insert { - ret = append(ret, []byte(key)) - } - return ret -} - -// deleteList returns the tracked deleted trie nodes in list format. -func (t *tracer) deleteList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for key := range t.delete { - ret = append(ret, []byte(key)) - } - return ret -} - -/* -// getPrev returns the cached original value of the specified node. -func (t *tracer) getPrev(key []byte) []byte { - // Don't panic on uninitialized tracer, it's possible in testing. - if t == nil { - return nil - } - return t.origin[string(key)] -} -*/ - -// reset clears the content tracked by tracer. -func (t *tracer) reset() { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.insert = make(map[string]struct{}) - t.delete = make(map[string]struct{}) - t.origin = make(map[string][]byte) -} - -// copy returns a deep copied tracer instance. -func (t *tracer) copy() *tracer { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ( - insert = make(map[string]struct{}) - delete = make(map[string]struct{}) - origin = make(map[string][]byte) - ) - for key := range t.insert { - insert[key] = struct{}{} - } - for key := range t.delete { - delete[key] = struct{}{} - } - for key, val := range t.origin { - origin[key] = val - } - return &tracer{ - insert: insert, - delete: delete, - origin: origin, - } -} diff --git a/coreth/utils/address_range.go b/coreth/utils/address_range.go new file mode 100644 index 00000000..940c39e8 --- /dev/null +++ b/coreth/utils/address_range.go @@ -0,0 +1,23 @@ +// (c) 2021-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common" +) + +// AddressRange represents a continuous range of addresses +type AddressRange struct { + Start common.Address + End common.Address +} + +// Contains returns true iff [addr] is contained within the (inclusive) +// range of addresses defined by [a]. +func (a *AddressRange) Contains(addr common.Address) bool { + addrBytes := addr.Bytes() + return bytes.Compare(addrBytes, a.Start[:]) >= 0 && bytes.Compare(addrBytes, a.End[:]) <= 0 +} diff --git a/coreth/utils/bounded_workers.go b/coreth/utils/bounded_workers.go new file mode 100644 index 00000000..806f923f --- /dev/null +++ b/coreth/utils/bounded_workers.go @@ -0,0 +1,81 @@ +// (c) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "sync" + "sync/atomic" +) + +type BoundedWorkers struct { + workerCount atomic.Int32 + workerSpawner chan struct{} + outstandingWorkers sync.WaitGroup + + work chan func() + workClose sync.Once +} + +// NewBoundedWorkers returns an instance of [BoundedWorkers] that +// will spawn up to [max] goroutines. +func NewBoundedWorkers(max int) *BoundedWorkers { + return &BoundedWorkers{ + workerSpawner: make(chan struct{}, max), + work: make(chan func()), + } +} + +// startWorker creates a new goroutine to execute [f] immediately and then keeps the goroutine +// alive to continue executing new work. +func (b *BoundedWorkers) startWorker(f func()) { + b.workerCount.Add(1) + b.outstandingWorkers.Add(1) + + go func() { + defer b.outstandingWorkers.Done() + + if f != nil { + f() + } + for f := range b.work { + f() + } + }() +} + +// Execute the given function on an existing goroutine waiting for more work, a new goroutine, +// or return if the context is canceled. +// +// Execute must not be called after Wait, otherwise it might panic. +func (b *BoundedWorkers) Execute(f func()) { + // Ensure we feed idle workers first + select { + case b.work <- f: + return + default: + } + + // Fallback to waiting for an idle worker or allocating + // a new worker (if we aren't yet at max concurrency) + select { + case b.work <- f: + case b.workerSpawner <- struct{}{}: + b.startWorker(f) + } +} + +// Wait returns after all enqueued work finishes and all goroutines to exit. +// Wait returns the number of workers that were spawned during the run. +// +// Wait can only be called after ALL calls to [Execute] have returned. +// +// It is safe to call Wait multiple times but not safe to call [Execute] +// after [Wait] has been called. +func (b *BoundedWorkers) Wait() int { + b.workClose.Do(func() { + close(b.work) + }) + b.outstandingWorkers.Wait() + return int(b.workerCount.Load()) +} diff --git a/coreth/utils/bytes.go b/coreth/utils/bytes.go index 186e3c41..54258b20 100644 --- a/coreth/utils/bytes.go +++ b/coreth/utils/bytes.go @@ -3,6 +3,8 @@ package utils +import "github.com/ethereum/go-ethereum/common" + // IncrOne increments bytes value by one func IncrOne(bytes []byte) { index := len(bytes) - 1 @@ -16,3 +18,27 @@ func IncrOne(bytes []byte) { } } } + +// HashSliceToBytes serializes a []common.Hash into a tightly packed byte array. +func HashSliceToBytes(hashes []common.Hash) []byte { + bytes := make([]byte, common.HashLength*len(hashes)) + for i, hash := range hashes { + copy(bytes[i*common.HashLength:], hash[:]) + } + return bytes +} + +// BytesToHashSlice packs [b] into a slice of hash values with zero padding +// to the right if the length of b is not a multiple of 32. +func BytesToHashSlice(b []byte) []common.Hash { + var ( + numHashes = (len(b) + 31) / 32 + hashes = make([]common.Hash, numHashes) + ) + + for i := range hashes { + start := i * common.HashLength + copy(hashes[i][:], b[start:]) + } + return hashes +} diff --git a/coreth/utils/bytes_test.go b/coreth/utils/bytes_test.go new file mode 100644 index 00000000..b1bbc8fa --- /dev/null +++ b/coreth/utils/bytes_test.go @@ -0,0 +1,66 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "bytes" + "testing" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIncrOne(t *testing.T) { + type test struct { + input []byte + expected []byte + } + for name, test := range map[string]test{ + "increment no overflow no carry": { + input: []byte{0, 0}, + expected: []byte{0, 1}, + }, + "increment overflow": { + input: []byte{255, 255}, + expected: []byte{0, 0}, + }, + "increment carry": { + input: []byte{0, 255}, + expected: []byte{1, 0}, + }, + } { + t.Run(name, func(t *testing.T) { + output := common.CopyBytes(test.input) + IncrOne(output) + assert.Equal(t, output, test.expected) + }) + } +} + +func testBytesToHashSlice(t testing.TB, b []byte) { + hashSlice := BytesToHashSlice(b) + + copiedBytes := HashSliceToBytes(hashSlice) + + if len(b)%32 == 0 { + require.Equal(t, b, copiedBytes) + } else { + require.Equal(t, b, copiedBytes[:len(b)]) + // Require that any additional padding is all zeroes + padding := copiedBytes[len(b):] + require.Equal(t, bytes.Repeat([]byte{0x00}, len(padding)), padding) + } +} + +func FuzzHashSliceToBytes(f *testing.F) { + for i := 0; i < 100; i++ { + f.Add(utils.RandomBytes(i)) + } + + f.Fuzz(func(t *testing.T, b []byte) { + testBytesToHashSlice(t, b) + }) +} diff --git a/coreth/utils/fork.go b/coreth/utils/fork.go index a6db844e..381e2561 100644 --- a/coreth/utils/fork.go +++ b/coreth/utils/fork.go @@ -3,22 +3,61 @@ package utils -import "math/big" +import ( + "math/big" + "time" +) -// IsForked returns whether a fork scheduled at block s is active at the given head block. +func NewUint64(val uint64) *uint64 { return &val } + +func TimeToNewUint64(time time.Time) *uint64 { + unix := uint64(time.Unix()) + return NewUint64(unix) +} + +func Uint64ToTime(val *uint64) time.Time { + timestamp := int64(*val) + return time.Unix(timestamp, 0) +} + +// IsBlockForked returns whether a fork scheduled at block s is active at the given head block. // Note: [s] and [head] can be either a block number or a block timestamp. -func IsForked(s, head *big.Int) bool { +func IsBlockForked(s, head *big.Int) bool { if s == nil || head == nil { return false } return s.Cmp(head) <= 0 } -// IsForkTransition returns true if [fork] activates during the transition from [parent] -// to [current]. +// IsTimestampForked returns whether a fork scheduled at timestamp s is active +// at the given head timestamp. Whilst this method is the same as isBlockForked, +// they are explicitly separate for clearer reading. +func IsTimestampForked(s *uint64, head uint64) bool { + if s == nil { + return false + } + return *s <= head +} + +// IsForkTransition returns true if [fork] activates during the transition from +// [parent] to [current]. +// Taking [parent] as a pointer allows for us to pass nil when checking forks +// that activate during genesis. // Note: this works for both block number and timestamp activated forks. -func IsForkTransition(fork *big.Int, parent *big.Int, current *big.Int) bool { - parentForked := IsForked(fork, parent) - currentForked := IsForked(fork, current) +func IsForkTransition(fork *uint64, parent *uint64, current uint64) bool { + var parentForked bool + if parent != nil { + parentForked = IsTimestampForked(fork, *parent) + } + currentForked := IsTimestampForked(fork, current) return !parentForked && currentForked } + +// Uint64PtrEqual returns true if x and y pointers are equivalent ie. both nil or both +// contain the same value. +func Uint64PtrEqual(x, y *uint64) bool { + if x == nil || y == nil { + return x == y + } + return *x == *y +} diff --git a/coreth/utils/fork_test.go b/coreth/utils/fork_test.go index fae184d1..ec7870e8 100644 --- a/coreth/utils/fork_test.go +++ b/coreth/utils/fork_test.go @@ -4,52 +4,52 @@ package utils import ( - "math/big" "testing" "github.com/stretchr/testify/assert" ) -func TestIsForked(t *testing.T) { +func TestIsTimestampForked(t *testing.T) { type test struct { - fork, block *big.Int - isForked bool + fork *uint64 + block uint64 + isForked bool } for name, test := range map[string]test{ "nil fork at 0": { fork: nil, - block: big.NewInt(0), + block: 0, isForked: false, }, "nil fork at non-zero": { fork: nil, - block: big.NewInt(100), + block: 100, isForked: false, }, "zero fork at genesis": { - fork: big.NewInt(0), - block: big.NewInt(0), + fork: NewUint64(0), + block: 0, isForked: true, }, "pre fork timestamp": { - fork: big.NewInt(100), - block: big.NewInt(50), + fork: NewUint64(100), + block: 50, isForked: false, }, "at fork timestamp": { - fork: big.NewInt(100), - block: big.NewInt(100), + fork: NewUint64(100), + block: 100, isForked: true, }, "post fork timestamp": { - fork: big.NewInt(100), - block: big.NewInt(150), + fork: NewUint64(100), + block: 150, isForked: true, }, } { t.Run(name, func(t *testing.T) { - res := IsForked(test.fork, test.block) + res := IsTimestampForked(test.fork, test.block) assert.Equal(t, test.isForked, res) }) } @@ -57,63 +57,64 @@ func TestIsForked(t *testing.T) { func TestIsForkTransition(t *testing.T) { type test struct { - fork, parent, current *big.Int - transitioned bool + fork, parent *uint64 + current uint64 + transitioned bool } for name, test := range map[string]test{ "not active at genesis": { fork: nil, parent: nil, - current: big.NewInt(0), + current: 0, transitioned: false, }, "activate at genesis": { - fork: big.NewInt(0), + fork: NewUint64(0), parent: nil, - current: big.NewInt(0), + current: 0, transitioned: true, }, "nil fork arbitrary transition": { fork: nil, - parent: big.NewInt(100), - current: big.NewInt(101), + parent: NewUint64(100), + current: 101, transitioned: false, }, "nil fork transition same timestamp": { fork: nil, - parent: big.NewInt(100), - current: big.NewInt(100), + parent: NewUint64(100), + current: 100, transitioned: false, }, "exact match on current timestamp": { - fork: big.NewInt(100), - parent: big.NewInt(99), - current: big.NewInt(100), + fork: NewUint64(100), + parent: NewUint64(99), + current: 100, transitioned: true, }, "current same as parent does not transition twice": { - fork: big.NewInt(100), - parent: big.NewInt(101), - current: big.NewInt(101), + fork: NewUint64(100), + parent: NewUint64(101), + current: 101, transitioned: false, }, "current, parent, and fork same should not transition twice": { - fork: big.NewInt(100), - parent: big.NewInt(100), - current: big.NewInt(100), + fork: NewUint64(100), + parent: NewUint64(100), + current: 100, transitioned: false, }, "current transitions after fork": { - fork: big.NewInt(100), - parent: big.NewInt(99), - current: big.NewInt(101), + fork: NewUint64(100), + parent: NewUint64(99), + current: 101, transitioned: true, }, "current and parent come after fork": { - fork: big.NewInt(100), - parent: big.NewInt(101), - current: big.NewInt(102), + fork: NewUint64(100), + parent: NewUint64(101), + current: 102, transitioned: false, }, } { diff --git a/coreth/utils/snow.go b/coreth/utils/snow.go new file mode 100644 index 00000000..76007441 --- /dev/null +++ b/coreth/utils/snow.go @@ -0,0 +1,31 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func TestSnowContext() *snow.Context { + sk, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + pk := bls.PublicFromSecretKey(sk) + return &snow.Context{ + NetworkID: 0, + SubnetID: ids.Empty, + ChainID: ids.Empty, + NodeID: ids.EmptyNodeID, + PublicKey: pk, + Log: logging.NoLog{}, + BCLookup: ids.NewAliaser(), + Metrics: metrics.NewOptionalGatherer(), + ChainDataDir: "", + } +} diff --git a/coreth/warp/aggregator/aggregator.go b/coreth/warp/aggregator/aggregator.go new file mode 100644 index 00000000..9c6c3aa3 --- /dev/null +++ b/coreth/warp/aggregator/aggregator.go @@ -0,0 +1,171 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package aggregator + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/coreth/precompile/contracts/warp" +) + +type AggregateSignatureResult struct { + // Weight of validators included in the aggregate signature. + SignatureWeight uint64 + // Total weight of all validators in the subnet. + TotalWeight uint64 + // The message with the aggregate signature. + Message *avalancheWarp.Message +} + +type signatureFetchResult struct { + sig *bls.Signature + index int + weight uint64 +} + +// Aggregator requests signatures from validators and +// aggregates them into a single signature. +type Aggregator struct { + validators []*avalancheWarp.Validator + totalWeight uint64 + client SignatureGetter +} + +// New returns a signature aggregator that will attempt to aggregate signatures from [validators]. +func New(client SignatureGetter, validators []*avalancheWarp.Validator, totalWeight uint64) *Aggregator { + return &Aggregator{ + client: client, + validators: validators, + totalWeight: totalWeight, + } +} + +// Returns an aggregate signature over [unsignedMessage]. +// The returned signature's weight exceeds the threshold given by [quorumNum]. +func (a *Aggregator) AggregateSignatures(ctx context.Context, unsignedMessage *avalancheWarp.UnsignedMessage, quorumNum uint64) (*AggregateSignatureResult, error) { + // Create a child context to cancel signature fetching if we reach signature threshold. + signatureFetchCtx, signatureFetchCancel := context.WithCancel(ctx) + defer signatureFetchCancel() + + // Fetch signatures from validators concurrently. + signatureFetchResultChan := make(chan *signatureFetchResult) + for i, validator := range a.validators { + var ( + i = i + validator = validator + // TODO: update from a single nodeID to the original slice and use extra nodeIDs as backup. + nodeID = validator.NodeIDs[0] + ) + go func() { + log.Debug("Fetching warp signature", + "nodeID", nodeID, + "index", i, + "msgID", unsignedMessage.ID(), + ) + + signature, err := a.client.GetSignature(signatureFetchCtx, nodeID, unsignedMessage) + if err != nil { + log.Debug("Failed to fetch warp signature", + "nodeID", nodeID, + "index", i, + "err", err, + "msgID", unsignedMessage.ID(), + ) + signatureFetchResultChan <- nil + return + } + + log.Debug("Retrieved warp signature", + "nodeID", nodeID, + "msgID", unsignedMessage.ID(), + "index", i, + ) + + if !bls.Verify(validator.PublicKey, signature, unsignedMessage.Bytes()) { + log.Debug("Failed to verify warp signature", + "nodeID", nodeID, + "index", i, + "msgID", unsignedMessage.ID(), + ) + signatureFetchResultChan <- nil + return + } + + signatureFetchResultChan <- &signatureFetchResult{ + sig: signature, + index: i, + weight: validator.Weight, + } + }() + } + + var ( + signatures = make([]*bls.Signature, 0, len(a.validators)) + signersBitset = set.NewBits() + signaturesWeight = uint64(0) + signaturesPassedThreshold = false + ) + + for i := 0; i < len(a.validators); i++ { + signatureFetchResult := <-signatureFetchResultChan + if signatureFetchResult == nil { + continue + } + + signatures = append(signatures, signatureFetchResult.sig) + signersBitset.Add(signatureFetchResult.index) + signaturesWeight += signatureFetchResult.weight + log.Debug("Updated weight", + "totalWeight", signaturesWeight, + "addedWeight", signatureFetchResult.weight, + "msgID", unsignedMessage.ID(), + ) + + // If the signature weight meets the requested threshold, cancel signature fetching + if err := avalancheWarp.VerifyWeight(signaturesWeight, a.totalWeight, quorumNum, warp.WarpQuorumDenominator); err == nil { + log.Debug("Verify weight passed, exiting aggregation early", + "quorumNum", quorumNum, + "totalWeight", a.totalWeight, + "signatureWeight", signaturesWeight, + "msgID", unsignedMessage.ID(), + ) + signatureFetchCancel() + signaturesPassedThreshold = true + break + } + } + + // If I failed to fetch sufficient signature stake, return an error + if !signaturesPassedThreshold { + return nil, avalancheWarp.ErrInsufficientWeight + } + + // Otherwise, return the aggregate signature + aggregateSignature, err := bls.AggregateSignatures(signatures) + if err != nil { + return nil, fmt.Errorf("failed to aggregate BLS signatures: %w", err) + } + + warpSignature := &avalancheWarp.BitSetSignature{ + Signers: signersBitset.Bytes(), + } + copy(warpSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) + + msg, err := avalancheWarp.NewMessage(unsignedMessage, warpSignature) + if err != nil { + return nil, fmt.Errorf("failed to construct warp message: %w", err) + } + + return &AggregateSignatureResult{ + Message: msg, + SignatureWeight: signaturesWeight, + TotalWeight: a.totalWeight, + }, nil +} diff --git a/coreth/warp/aggregator/aggregator_test.go b/coreth/warp/aggregator/aggregator_test.go new file mode 100644 index 00000000..07d20207 --- /dev/null +++ b/coreth/warp/aggregator/aggregator_test.go @@ -0,0 +1,384 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package aggregator + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +func newValidator(t testing.TB, weight uint64) (*bls.SecretKey, *avalancheWarp.Validator) { + sk, err := bls.NewSecretKey() + require.NoError(t, err) + pk := bls.PublicFromSecretKey(sk) + return sk, &avalancheWarp.Validator{ + PublicKey: pk, + PublicKeyBytes: bls.PublicKeyToBytes(pk), + Weight: weight, + NodeIDs: []ids.NodeID{ids.GenerateTestNodeID()}, + } +} + +func TestAggregateSignatures(t *testing.T) { + errTest := errors.New("test error") + unsignedMsg := &avalancheWarp.UnsignedMessage{ + NetworkID: 1338, + SourceChainID: ids.ID{'y', 'e', 'e', 't'}, + Payload: []byte("hello world"), + } + require.NoError(t, unsignedMsg.Initialize()) + + nodeID1, nodeID2, nodeID3 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + vdrWeight := uint64(10001) + vdr1sk, vdr1 := newValidator(t, vdrWeight) + vdr2sk, vdr2 := newValidator(t, vdrWeight+1) + vdr3sk, vdr3 := newValidator(t, vdrWeight-1) + sig1 := bls.Sign(vdr1sk, unsignedMsg.Bytes()) + sig2 := bls.Sign(vdr2sk, unsignedMsg.Bytes()) + sig3 := bls.Sign(vdr3sk, unsignedMsg.Bytes()) + vdrToSig := map[*avalancheWarp.Validator]*bls.Signature{ + vdr1: sig1, + vdr2: sig2, + vdr3: sig3, + } + nonVdrSk, err := bls.NewSecretKey() + require.NoError(t, err) + nonVdrSig := bls.Sign(nonVdrSk, unsignedMsg.Bytes()) + vdrs := []*avalancheWarp.Validator{ + { + PublicKey: vdr1.PublicKey, + NodeIDs: []ids.NodeID{nodeID1}, + Weight: vdr1.Weight, + }, + { + PublicKey: vdr2.PublicKey, + NodeIDs: []ids.NodeID{nodeID2}, + Weight: vdr2.Weight, + }, + { + PublicKey: vdr3.PublicKey, + NodeIDs: []ids.NodeID{nodeID3}, + Weight: vdr3.Weight, + }, + } + + type test struct { + name string + contextWithCancelFunc func() (context.Context, context.CancelFunc) + aggregatorFunc func(*gomock.Controller, context.CancelFunc) *Aggregator + unsignedMsg *avalancheWarp.UnsignedMessage + quorumNum uint64 + expectedSigners []*avalancheWarp.Validator + expectedErr error + } + + tests := []test{ + { + name: "0/3 validators reply with signature", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errTest).Times(len(vdrs)) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 1, + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "1/3 validators reply with signature; insufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(sig1, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(nil, errTest).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(nil, errTest).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 35, // Require >1/3 of weight + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "2/3 validators reply with signature; insufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(sig1, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(sig2, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(nil, errTest).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 69, // Require >2/3 of weight + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "2/3 validators reply with signature; sufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(sig1, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(sig2, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(nil, errTest).MaxTimes(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 65, // Require <2/3 of weight + expectedSigners: []*avalancheWarp.Validator{vdr1, vdr2}, + expectedErr: nil, + }, + { + name: "3/3 validators reply with signature; sufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(sig1, nil).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(sig2, nil).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(sig3, nil).MaxTimes(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 100, // Require all weight + expectedSigners: []*avalancheWarp.Validator{vdr1, vdr2, vdr3}, + expectedErr: nil, + }, + { + name: "3/3 validators reply with signature; 1 invalid signature; sufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(nonVdrSig, nil).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(sig2, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(sig3, nil).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 64, + expectedSigners: []*avalancheWarp.Validator{vdr2, vdr3}, + expectedErr: nil, + }, + { + name: "3/3 validators reply with signature; 3 invalid signatures; insufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(nonVdrSig, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(nonVdrSig, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(nonVdrSig, nil).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 1, + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "3/3 validators reply with signature; 2 invalid signatures; insufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(nonVdrSig, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(nonVdrSig, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(sig3, nil).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 40, + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "1/3 validators reply with signature; 1 invalid signature; 1 error; sufficient weight", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(nonVdrSig, nil).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(nil, errTest).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).Return(sig3, nil).Times(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 30, + expectedSigners: []*avalancheWarp.Validator{vdr3}, + expectedErr: nil, + }, + { + name: "early termination of signature fetching on parent context cancelation", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx, cancel + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + // Assert that the context passed into each goroutine is canceled + // because the parent context is canceled. + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).MaxTimes(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 60, // Require 2/3 validators + expectedSigners: []*avalancheWarp.Validator{vdr1, vdr2}, + expectedErr: avalancheWarp.ErrInsufficientWeight, + }, + { + name: "context cancels halfway through signature fetching", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(context.Background()) + return ctx, cancel + }, + aggregatorFunc: func(ctrl *gomock.Controller, cancel context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + // cancel the context and return the signature + cancel() + return sig1, nil + }, + ).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + // Should not be able to grab another signature since context was cancelled in another go routine + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).MaxTimes(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).DoAndReturn( + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + // Should not be able to grab another signature since context was cancelled in another go routine + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).MaxTimes(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 33, // 1/3 Should have gotten one signature before cancellation + expectedSigners: []*avalancheWarp.Validator{vdr1}, + expectedErr: nil, + }, + { + name: "early termination of signature fetching on passing threshold", + contextWithCancelFunc: func() (context.Context, context.CancelFunc) { + return context.Background(), nil + }, + aggregatorFunc: func(ctrl *gomock.Controller, _ context.CancelFunc) *Aggregator { + client := NewMockSignatureGetter(ctrl) + client.EXPECT().GetSignature(gomock.Any(), nodeID1, gomock.Any()).Return(sig1, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID2, gomock.Any()).Return(sig2, nil).Times(1) + client.EXPECT().GetSignature(gomock.Any(), nodeID3, gomock.Any()).DoAndReturn( + // The aggregator will receive sig1 and sig2 which is sufficient weight, + // so the remaining outstanding goroutine should be cancelled. + func(ctx context.Context, _ ids.NodeID, _ *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + <-ctx.Done() + err := ctx.Err() + require.ErrorIs(t, err, context.Canceled) + return nil, err + }, + ).MaxTimes(1) + return New(client, vdrs, vdrWeight*uint64(len(vdrs))) + }, + unsignedMsg: unsignedMsg, + quorumNum: 60, // Require 2/3 validators + expectedSigners: []*avalancheWarp.Validator{vdr1, vdr2}, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + require := require.New(t) + + ctx, cancel := tt.contextWithCancelFunc() + // Guarantees that cancel is called preventing goroutine leak + if cancel != nil { + defer cancel() + } + a := tt.aggregatorFunc(ctrl, cancel) + + res, err := a.AggregateSignatures(ctx, tt.unsignedMsg, tt.quorumNum) + require.ErrorIs(err, tt.expectedErr) + if err != nil { + return + } + + require.Equal(unsignedMsg, &res.Message.UnsignedMessage) + + expectedSigWeight := uint64(0) + for _, vdr := range tt.expectedSigners { + expectedSigWeight += vdr.Weight + } + require.Equal(expectedSigWeight, res.SignatureWeight) + require.Equal(vdr1.Weight+vdr2.Weight+vdr3.Weight, res.TotalWeight) + + expectedSigs := []*bls.Signature{} + for _, vdr := range tt.expectedSigners { + expectedSigs = append(expectedSigs, vdrToSig[vdr]) + } + expectedSig, err := bls.AggregateSignatures(expectedSigs) + require.NoError(err) + gotBLSSig, ok := res.Message.Signature.(*avalancheWarp.BitSetSignature) + require.True(ok) + require.Equal(bls.SignatureToBytes(expectedSig), gotBLSSig.Signature[:]) + + numSigners, err := res.Message.Signature.NumSigners() + require.NoError(err) + require.Len(tt.expectedSigners, numSigners) + }) + } +} diff --git a/coreth/warp/aggregator/mock_signature_getter.go b/coreth/warp/aggregator/mock_signature_getter.go new file mode 100644 index 00000000..b3255eb8 --- /dev/null +++ b/coreth/warp/aggregator/mock_signature_getter.go @@ -0,0 +1,53 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/coreth/warp/aggregator (interfaces: SignatureGetter) + +// Package aggregator is a generated GoMock package. +package aggregator + +import ( + context "context" + reflect "reflect" + + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + ids "github.com/ava-labs/avalanchego/ids" + warp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + gomock "go.uber.org/mock/gomock" +) + +// MockSignatureGetter is a mock of SignatureGetter interface. +type MockSignatureGetter struct { + ctrl *gomock.Controller + recorder *MockSignatureGetterMockRecorder +} + +// MockSignatureGetterMockRecorder is the mock recorder for MockSignatureGetter. +type MockSignatureGetterMockRecorder struct { + mock *MockSignatureGetter +} + +// NewMockSignatureGetter creates a new mock instance. +func NewMockSignatureGetter(ctrl *gomock.Controller) *MockSignatureGetter { + mock := &MockSignatureGetter{ctrl: ctrl} + mock.recorder = &MockSignatureGetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSignatureGetter) EXPECT() *MockSignatureGetterMockRecorder { + return m.recorder +} + +// GetSignature mocks base method. +func (m *MockSignatureGetter) GetSignature(arg0 context.Context, arg1 ids.NodeID, arg2 *warp.UnsignedMessage) (*bls.Signature, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSignature", arg0, arg1, arg2) + ret0, _ := ret[0].(*bls.Signature) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSignature indicates an expected call of GetSignature. +func (mr *MockSignatureGetterMockRecorder) GetSignature(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSignature", reflect.TypeOf((*MockSignatureGetter)(nil).GetSignature), arg0, arg1, arg2) +} diff --git a/coreth/warp/aggregator/signature_getter.go b/coreth/warp/aggregator/signature_getter.go new file mode 100644 index 00000000..8bdb60fe --- /dev/null +++ b/coreth/warp/aggregator/signature_getter.go @@ -0,0 +1,117 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package aggregator + +import ( + "context" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/plugin/evm/message" +) + +const ( + initialRetryFetchSignatureDelay = 100 * time.Millisecond + maxRetryFetchSignatureDelay = 5 * time.Second + retryBackoffFactor = 2 +) + +var _ SignatureGetter = (*NetworkSignatureGetter)(nil) + +// SignatureGetter defines the minimum network interface to perform signature aggregation +type SignatureGetter interface { + // GetSignature attempts to fetch a BLS Signature from [nodeID] for [unsignedWarpMessage] + GetSignature(ctx context.Context, nodeID ids.NodeID, unsignedWarpMessage *avalancheWarp.UnsignedMessage) (*bls.Signature, error) +} + +type NetworkClient interface { + SendAppRequest(ctx context.Context, nodeID ids.NodeID, message []byte) ([]byte, error) +} + +// NetworkSignatureGetter fetches warp signatures on behalf of the +// aggregator using VM App-Specific Messaging +type NetworkSignatureGetter struct { + Client NetworkClient +} + +func NewSignatureGetter(client NetworkClient) *NetworkSignatureGetter { + return &NetworkSignatureGetter{ + Client: client, + } +} + +// GetSignature attempts to fetch a BLS Signature of [unsignedWarpMessage] from [nodeID] until it succeeds or receives an invalid response +// +// Note: this function will continue attempting to fetch the signature from [nodeID] until it receives an invalid value or [ctx] is cancelled. +// The caller is responsible to cancel [ctx] if it no longer needs to fetch this signature. +func (s *NetworkSignatureGetter) GetSignature(ctx context.Context, nodeID ids.NodeID, unsignedWarpMessage *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + var signatureReqBytes []byte + parsedPayload, err := payload.Parse(unsignedWarpMessage.Payload) + if err != nil { + return nil, fmt.Errorf("failed to parse unsigned message payload: %w", err) + } + switch p := parsedPayload.(type) { + case *payload.AddressedCall: + signatureReq := message.MessageSignatureRequest{ + MessageID: unsignedWarpMessage.ID(), + } + signatureReqBytes, err = message.RequestToBytes(message.Codec, signatureReq) + if err != nil { + return nil, fmt.Errorf("failed to marshal signature request: %w", err) + } + case *payload.Hash: + signatureReq := message.BlockSignatureRequest{ + BlockID: p.Hash, + } + signatureReqBytes, err = message.RequestToBytes(message.Codec, signatureReq) + if err != nil { + return nil, fmt.Errorf("failed to marshal signature request: %w", err) + } + } + + delay := initialRetryFetchSignatureDelay + timer := time.NewTimer(delay) + defer timer.Stop() + for { + signatureRes, err := s.Client.SendAppRequest(ctx, nodeID, signatureReqBytes) + // If the client fails to retrieve a response perform an exponential backoff. + // Note: it is up to the caller to ensure that [ctx] is eventually cancelled + if err != nil { + // Wait until the retry delay has elapsed before retrying. + if !timer.Stop() { + <-timer.C + } + timer.Reset(delay) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-timer.C: + } + + // Exponential backoff. + delay *= retryBackoffFactor + if delay > maxRetryFetchSignatureDelay { + delay = maxRetryFetchSignatureDelay + } + continue + } + var response message.SignatureResponse + if _, err := message.Codec.Unmarshal(signatureRes, &response); err != nil { + return nil, fmt.Errorf("failed to unmarshal signature res: %w", err) + } + if response.Signature == [bls.SignatureLen]byte{} { + return nil, fmt.Errorf("received empty signature response") + } + blsSignature, err := bls.SignatureFromBytes(response.Signature[:]) + if err != nil { + return nil, fmt.Errorf("failed to parse signature from res: %w", err) + } + return blsSignature, nil + } +} diff --git a/coreth/warp/backend.go b/coreth/warp/backend.go new file mode 100644 index 00000000..8bd0a8b5 --- /dev/null +++ b/coreth/warp/backend.go @@ -0,0 +1,219 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +var ( + _ Backend = &backend{} + errParsingOffChainMessage = errors.New("failed to parse off-chain message") +) + +const batchSize = ethdb.IdealBatchSize + +type BlockClient interface { + GetBlock(ctx context.Context, blockID ids.ID) (snowman.Block, error) +} + +// Backend tracks signature-eligible warp messages and provides an interface to fetch them. +// The backend is also used to query for warp message signatures by the signature request handler. +type Backend interface { + // AddMessage signs [unsignedMessage] and adds it to the warp backend database + AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error + + // GetMessageSignature returns the signature of the requested message hash. + GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) + + // GetBlockSignature returns the signature of the requested message hash. + GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) + + // GetMessage retrieves the [unsignedMessage] from the warp backend database if available + GetMessage(messageHash ids.ID) (*avalancheWarp.UnsignedMessage, error) + + // Clear clears the entire db + Clear() error +} + +// backend implements Backend, keeps track of warp messages, and generates message signatures. +type backend struct { + networkID uint32 + sourceChainID ids.ID + db database.Database + warpSigner avalancheWarp.Signer + blockClient BlockClient + messageSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] + blockSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] + messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] + offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage +} + +// NewBackend creates a new Backend, and initializes the signature cache and message tracking database. +func NewBackend( + networkID uint32, + sourceChainID ids.ID, + warpSigner avalancheWarp.Signer, + blockClient BlockClient, + db database.Database, + cacheSize int, + offchainMessages [][]byte, +) (Backend, error) { + b := &backend{ + networkID: networkID, + sourceChainID: sourceChainID, + db: db, + warpSigner: warpSigner, + blockClient: blockClient, + messageSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, + blockSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, + messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: cacheSize}, + offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), + } + return b, b.initOffChainMessages(offchainMessages) +} + +func (b *backend) initOffChainMessages(offchainMessages [][]byte) error { + for i, offchainMsg := range offchainMessages { + unsignedMsg, err := avalancheWarp.ParseUnsignedMessage(offchainMsg) + if err != nil { + return fmt.Errorf("%w at index %d: %w", errParsingOffChainMessage, i, err) + } + + if unsignedMsg.NetworkID != b.networkID { + return fmt.Errorf("%w at index %d", avalancheWarp.ErrWrongNetworkID, i) + } + + if unsignedMsg.SourceChainID != b.sourceChainID { + return fmt.Errorf("%w at index %d", avalancheWarp.ErrWrongSourceChainID, i) + } + + _, err = payload.ParseAddressedCall(unsignedMsg.Payload) + if err != nil { + return fmt.Errorf("%w at index %d as AddressedCall: %w", errParsingOffChainMessage, i, err) + } + b.offchainAddressedCallMsgs[unsignedMsg.ID()] = unsignedMsg + } + + return nil +} + +func (b *backend) Clear() error { + b.messageSignatureCache.Flush() + b.blockSignatureCache.Flush() + b.messageCache.Flush() + return database.Clear(b.db, batchSize) +} + +func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { + messageID := unsignedMessage.ID() + + // In the case when a node restarts, and possibly changes its bls key, the cache gets emptied but the database does not. + // So to avoid having incorrect signatures saved in the database after a bls key change, we save the full message in the database. + // Whereas for the cache, after the node restart, the cache would be emptied so we can directly save the signatures. + if err := b.db.Put(messageID[:], unsignedMessage.Bytes()); err != nil { + return fmt.Errorf("failed to put warp signature in db: %w", err) + } + + var signature [bls.SignatureLen]byte + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return fmt.Errorf("failed to sign warp message: %w", err) + } + + copy(signature[:], sig) + b.messageSignatureCache.Put(messageID, signature) + log.Debug("Adding warp message to backend", "messageID", messageID) + return nil +} + +func (b *backend) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) { + log.Debug("Getting warp message from backend", "messageID", messageID) + if sig, ok := b.messageSignatureCache.Get(messageID); ok { + return sig, nil + } + + unsignedMessage, err := b.GetMessage(messageID) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + } + + var signature [bls.SignatureLen]byte + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + } + + copy(signature[:], sig) + b.messageSignatureCache.Put(messageID, signature) + return signature, nil +} + +func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { + log.Debug("Getting block from backend", "blockID", blockID) + if sig, ok := b.blockSignatureCache.Get(blockID); ok { + return sig, nil + } + + block, err := b.blockClient.GetBlock(context.TODO(), blockID) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get block %s: %w", blockID, err) + } + if block.Status() != choices.Accepted { + return [bls.SignatureLen]byte{}, fmt.Errorf("block %s was not accepted", blockID) + } + + var signature [bls.SignatureLen]byte + blockHashPayload, err := payload.NewHash(blockID) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) + } + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) + } + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + } + + copy(signature[:], sig) + b.blockSignatureCache.Put(blockID, signature) + return signature, nil +} + +func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, error) { + if message, ok := b.messageCache.Get(messageID); ok { + return message, nil + } + if message, ok := b.offchainAddressedCallMsgs[messageID]; ok { + return message, nil + } + + unsignedMessageBytes, err := b.db.Get(messageID[:]) + if err != nil { + return nil, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + } + + unsignedMessage, err := avalancheWarp.ParseUnsignedMessage(unsignedMessageBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse unsigned message %s: %w", messageID.String(), err) + } + b.messageCache.Put(messageID, unsignedMessage) + + return unsignedMessage, nil +} diff --git a/coreth/warp/backend_test.go b/coreth/warp/backend_test.go new file mode 100644 index 00000000..8bba70f4 --- /dev/null +++ b/coreth/warp/backend_test.go @@ -0,0 +1,236 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "testing" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/hashing" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/stretchr/testify/require" +) + +var ( + networkID uint32 = 54321 + sourceChainID = ids.GenerateTestID() + testSourceAddress = utils.RandomBytes(20) + testPayload = []byte("test") + testUnsignedMessage *avalancheWarp.UnsignedMessage +) + +func init() { + testAddressedCallPayload, err := payload.NewAddressedCall(testSourceAddress, testPayload) + if err != nil { + panic(err) + } + testUnsignedMessage, err = avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, testAddressedCallPayload.Bytes()) + if err != nil { + panic(err) + } +} + +func TestClearDB(t *testing.T) { + db := memdb.New() + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + backendIntf, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + require.NoError(t, err) + backend, ok := backendIntf.(*backend) + require.True(t, ok) + + // use multiple messages to test that all messages get cleared + payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} + messageIDs := []ids.ID{} + + // add all messages + for _, payload := range payloads { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, payload) + require.NoError(t, err) + messageID := hashing.ComputeHash256Array(unsignedMsg.Bytes()) + messageIDs = append(messageIDs, messageID) + err = backend.AddMessage(unsignedMsg) + require.NoError(t, err) + // ensure that the message was added + _, err = backend.GetMessageSignature(messageID) + require.NoError(t, err) + } + + err = backend.Clear() + require.NoError(t, err) + require.Zero(t, backend.messageCache.Len()) + require.Zero(t, backend.messageSignatureCache.Len()) + require.Zero(t, backend.blockSignatureCache.Len()) + it := db.NewIterator() + defer it.Release() + require.False(t, it.Next()) + + // ensure all messages have been deleted + for _, messageID := range messageIDs { + _, err := backend.GetMessageSignature(messageID) + require.ErrorContains(t, err, "failed to get warp message") + } +} + +func TestAddAndGetValidMessage(t *testing.T) { + db := memdb.New() + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + require.NoError(t, err) + + // Add testUnsignedMessage to the warp backend + err = backend.AddMessage(testUnsignedMessage) + require.NoError(t, err) + + // Verify that a signature is returned successfully, and compare to expected signature. + messageID := testUnsignedMessage.ID() + signature, err := backend.GetMessageSignature(messageID) + require.NoError(t, err) + + expectedSig, err := warpSigner.Sign(testUnsignedMessage) + require.NoError(t, err) + require.Equal(t, expectedSig, signature[:]) +} + +func TestAddAndGetUnknownMessage(t *testing.T) { + db := memdb.New() + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + require.NoError(t, err) + + // Try getting a signature for a message that was not added. + messageID := testUnsignedMessage.ID() + _, err = backend.GetMessageSignature(messageID) + require.Error(t, err) +} + +func TestGetBlockSignature(t *testing.T) { + require := require.New(t) + + blkID := ids.GenerateTestID() + testVM := &block.TestVM{ + TestVM: common.TestVM{T: t}, + GetBlockF: func(ctx context.Context, i ids.ID) (snowman.Block, error) { + if i == blkID { + return &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID, + StatusV: choices.Accepted, + }, + }, nil + } + return nil, errors.New("invalid blockID") + }, + } + db := memdb.New() + + sk, err := bls.NewSecretKey() + require.NoError(err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, testVM, db, 500, nil) + require.NoError(err) + + blockHashPayload, err := payload.NewHash(blkID) + require.NoError(err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, blockHashPayload.Bytes()) + require.NoError(err) + expectedSig, err := warpSigner.Sign(unsignedMessage) + require.NoError(err) + + signature, err := backend.GetBlockSignature(blkID) + require.NoError(err) + require.Equal(expectedSig, signature[:]) + + _, err = backend.GetBlockSignature(ids.GenerateTestID()) + require.Error(err) +} + +func TestZeroSizedCache(t *testing.T) { + db := memdb.New() + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + + // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, nil) + require.NoError(t, err) + + // Add testUnsignedMessage to the warp backend + err = backend.AddMessage(testUnsignedMessage) + require.NoError(t, err) + + // Verify that a signature is returned successfully, and compare to expected signature. + messageID := testUnsignedMessage.ID() + signature, err := backend.GetMessageSignature(messageID) + require.NoError(t, err) + + expectedSig, err := warpSigner.Sign(testUnsignedMessage) + require.NoError(t, err) + require.Equal(t, expectedSig, signature[:]) +} + +func TestOffChainMessages(t *testing.T) { + type test struct { + offchainMessages [][]byte + check func(require *require.Assertions, b Backend) + err error + } + sk, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) + + for name, test := range map[string]test{ + "no offchain messages": {}, + "single off-chain message": { + offchainMessages: [][]byte{ + testUnsignedMessage.Bytes(), + }, + check: func(require *require.Assertions, b Backend) { + msg, err := b.GetMessage(testUnsignedMessage.ID()) + require.NoError(err) + require.Equal(testUnsignedMessage.Bytes(), msg.Bytes()) + + signature, err := b.GetMessageSignature(testUnsignedMessage.ID()) + require.NoError(err) + expectedSignatureBytes, err := warpSigner.Sign(msg) + require.NoError(err) + require.Equal(expectedSignatureBytes, signature[:]) + }, + }, + "invalid message": { + offchainMessages: [][]byte{{1, 2, 3}}, + err: errParsingOffChainMessage, + }, + } { + t.Run(name, func(t *testing.T) { + require := require.New(t) + db := memdb.New() + + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, test.offchainMessages) + require.ErrorIs(err, test.err) + if test.check != nil { + test.check(require, backend) + } + }) + } +} diff --git a/coreth/warp/client.go b/coreth/warp/client.go new file mode 100644 index 00000000..90633d6b --- /dev/null +++ b/coreth/warp/client.go @@ -0,0 +1,79 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/rpc" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ Client = (*client)(nil) + +type Client interface { + GetMessage(ctx context.Context, messageID ids.ID) ([]byte, error) + GetMessageSignature(ctx context.Context, messageID ids.ID) ([]byte, error) + GetMessageAggregateSignature(ctx context.Context, messageID ids.ID, quorumNum uint64, subnetIDStr string) ([]byte, error) + GetBlockSignature(ctx context.Context, blockID ids.ID) ([]byte, error) + GetBlockAggregateSignature(ctx context.Context, blockID ids.ID, quorumNum uint64, subnetIDStr string) ([]byte, error) +} + +// client implementation for interacting with EVM [chain] +type client struct { + client *rpc.Client +} + +// NewClient returns a Client for interacting with EVM [chain] +func NewClient(uri, chain string) (Client, error) { + innerClient, err := rpc.Dial(fmt.Sprintf("%s/ext/bc/%s/rpc", uri, chain)) + if err != nil { + return nil, fmt.Errorf("failed to dial client. err: %w", err) + } + return &client{ + client: innerClient, + }, nil +} + +func (c *client) GetMessage(ctx context.Context, messageID ids.ID) ([]byte, error) { + var res hexutil.Bytes + if err := c.client.CallContext(ctx, &res, "warp_getMessage", messageID); err != nil { + return nil, fmt.Errorf("call to warp_getMessage failed. err: %w", err) + } + return res, nil +} + +func (c *client) GetMessageSignature(ctx context.Context, messageID ids.ID) ([]byte, error) { + var res hexutil.Bytes + if err := c.client.CallContext(ctx, &res, "warp_getMessageSignature", messageID); err != nil { + return nil, fmt.Errorf("call to warp_getMessageSignature failed. err: %w", err) + } + return res, nil +} + +func (c *client) GetMessageAggregateSignature(ctx context.Context, messageID ids.ID, quorumNum uint64, subnetIDStr string) ([]byte, error) { + var res hexutil.Bytes + if err := c.client.CallContext(ctx, &res, "warp_getMessageAggregateSignature", messageID, quorumNum, subnetIDStr); err != nil { + return nil, fmt.Errorf("call to warp_getMessageAggregateSignature failed. err: %w", err) + } + return res, nil +} + +func (c *client) GetBlockSignature(ctx context.Context, blockID ids.ID) ([]byte, error) { + var res hexutil.Bytes + if err := c.client.CallContext(ctx, &res, "warp_getBlockSignature", blockID); err != nil { + return nil, fmt.Errorf("call to warp_getBlockSignature failed. err: %w", err) + } + return res, nil +} + +func (c *client) GetBlockAggregateSignature(ctx context.Context, blockID ids.ID, quorumNum uint64, subnetIDStr string) ([]byte, error) { + var res hexutil.Bytes + if err := c.client.CallContext(ctx, &res, "warp_getBlockAggregateSignature", blockID, quorumNum, subnetIDStr); err != nil { + return nil, fmt.Errorf("call to warp_getBlockAggregateSignature failed. err: %w", err) + } + return res, nil +} diff --git a/coreth/warp/fetcher.go b/coreth/warp/fetcher.go new file mode 100644 index 00000000..99c70e54 --- /dev/null +++ b/coreth/warp/fetcher.go @@ -0,0 +1,54 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/warp/aggregator" +) + +var _ aggregator.SignatureGetter = (*apiFetcher)(nil) + +type apiFetcher struct { + clients map[ids.NodeID]Client +} + +func NewAPIFetcher(clients map[ids.NodeID]Client) *apiFetcher { + return &apiFetcher{ + clients: clients, + } +} + +func (f *apiFetcher) GetSignature(ctx context.Context, nodeID ids.NodeID, unsignedWarpMessage *avalancheWarp.UnsignedMessage) (*bls.Signature, error) { + client, ok := f.clients[nodeID] + if !ok { + return nil, fmt.Errorf("no warp client for nodeID: %s", nodeID) + } + var signatureBytes []byte + parsedPayload, err := payload.Parse(unsignedWarpMessage.Payload) + if err != nil { + return nil, fmt.Errorf("failed to parse unsigned message payload: %w", err) + } + switch p := parsedPayload.(type) { + case *payload.AddressedCall: + signatureBytes, err = client.GetMessageSignature(ctx, unsignedWarpMessage.ID()) + case *payload.Hash: + signatureBytes, err = client.GetBlockSignature(ctx, p.Hash) + } + if err != nil { + return nil, err + } + + signature, err := bls.SignatureFromBytes(signatureBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse signature from client %s: %w", nodeID, err) + } + return signature, nil +} diff --git a/coreth/warp/handlers/signature_request.go b/coreth/warp/handlers/signature_request.go new file mode 100644 index 00000000..7b03fc52 --- /dev/null +++ b/coreth/warp/handlers/signature_request.go @@ -0,0 +1,102 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/warp" + "github.com/ethereum/go-ethereum/log" +) + +// SignatureRequestHandler serves warp signature requests. It is a peer.RequestHandler for message.MessageSignatureRequest. +type SignatureRequestHandler struct { + backend warp.Backend + codec codec.Manager + stats *handlerStats +} + +func NewSignatureRequestHandler(backend warp.Backend, codec codec.Manager) *SignatureRequestHandler { + return &SignatureRequestHandler{ + backend: backend, + codec: codec, + stats: newStats(), + } +} + +// OnMessageSignatureRequest handles message.MessageSignatureRequest, and retrieves a warp signature for the requested message ID. +// Never returns an error +// Expects returned errors to be treated as FATAL +// Returns empty response if signature is not found +// Assumes ctx is active +func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest message.MessageSignatureRequest) ([]byte, error) { + startTime := time.Now() + s.stats.IncMessageSignatureRequest() + + // Always report signature request time + defer func() { + s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) + }() + + signature, err := s.backend.GetMessageSignature(signatureRequest.MessageID) + if err != nil { + log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + s.stats.IncMessageSignatureMiss() + signature = [bls.SignatureLen]byte{} + } else { + s.stats.IncMessageSignatureHit() + } + + response := message.SignatureResponse{Signature: signature} + responseBytes, err := s.codec.Marshal(message.Version, &response) + if err != nil { + log.Error("could not marshal SignatureResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "err", err) + return nil, nil + } + + return responseBytes, nil +} + +func (s *SignatureRequestHandler) OnBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request message.BlockSignatureRequest) ([]byte, error) { + startTime := time.Now() + s.stats.IncBlockSignatureRequest() + + // Always report signature request time + defer func() { + s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) + }() + + signature, err := s.backend.GetBlockSignature(request.BlockID) + if err != nil { + log.Debug("Unknown warp signature requested", "blockID", request.BlockID) + s.stats.IncBlockSignatureMiss() + signature = [bls.SignatureLen]byte{} + } else { + s.stats.IncBlockSignatureHit() + } + + response := message.SignatureResponse{Signature: signature} + responseBytes, err := s.codec.Marshal(message.Version, &response) + if err != nil { + log.Error("could not marshal SignatureResponse, dropping request", "nodeID", nodeID, "requestID", requestID, "err", err) + return nil, nil + } + + return responseBytes, nil +} + +type NoopSignatureRequestHandler struct{} + +func (s *NoopSignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest message.MessageSignatureRequest) ([]byte, error) { + return nil, nil +} + +func (s *NoopSignatureRequestHandler) OnBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest message.BlockSignatureRequest) ([]byte, error) { + return nil, nil +} diff --git a/coreth/warp/handlers/signature_request_test.go b/coreth/warp/handlers/signature_request_test.go new file mode 100644 index 00000000..7dc6347b --- /dev/null +++ b/coreth/warp/handlers/signature_request_test.go @@ -0,0 +1,228 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "errors" + "testing" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/warp" + "github.com/stretchr/testify/require" +) + +func TestMessageSignatureHandler(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + + addressedPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) + require.NoError(t, err) + offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedPayload.Bytes()) + require.NoError(t, err) + + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, &block.TestVM{TestVM: common.TestVM{T: t}}, database, 100, [][]byte{offchainMessage.Bytes()}) + require.NoError(t, err) + + msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) + require.NoError(t, err) + messageID := msg.ID() + require.NoError(t, backend.AddMessage(msg)) + signature, err := backend.GetMessageSignature(messageID) + require.NoError(t, err) + offchainSignature, err := backend.GetMessageSignature(offchainMessage.ID()) + require.NoError(t, err) + + unknownMessageID := ids.GenerateTestID() + + emptySignature := [bls.SignatureLen]byte{} + + tests := map[string]struct { + setup func() (request message.MessageSignatureRequest, expectedResponse []byte) + verifyStats func(t *testing.T, stats *handlerStats) + }{ + "known message": { + setup: func() (request message.MessageSignatureRequest, expectedResponse []byte) { + return message.MessageSignatureRequest{ + MessageID: messageID, + }, signature[:] + }, + verifyStats: func(t *testing.T, stats *handlerStats) { + require.EqualValues(t, 1, stats.messageSignatureRequest.Count()) + require.EqualValues(t, 1, stats.messageSignatureHit.Count()) + require.EqualValues(t, 0, stats.messageSignatureMiss.Count()) + require.EqualValues(t, 0, stats.blockSignatureRequest.Count()) + require.EqualValues(t, 0, stats.blockSignatureHit.Count()) + require.EqualValues(t, 0, stats.blockSignatureMiss.Count()) + }, + }, + "offchain message": { + setup: func() (request message.MessageSignatureRequest, expectedResponse []byte) { + return message.MessageSignatureRequest{ + MessageID: offchainMessage.ID(), + }, offchainSignature[:] + }, + verifyStats: func(t *testing.T, stats *handlerStats) { + require.EqualValues(t, 1, stats.messageSignatureRequest.Count()) + require.EqualValues(t, 1, stats.messageSignatureHit.Count()) + require.EqualValues(t, 0, stats.messageSignatureMiss.Count()) + require.EqualValues(t, 0, stats.blockSignatureRequest.Count()) + require.EqualValues(t, 0, stats.blockSignatureHit.Count()) + require.EqualValues(t, 0, stats.blockSignatureMiss.Count()) + }, + }, + "unknown message": { + setup: func() (request message.MessageSignatureRequest, expectedResponse []byte) { + return message.MessageSignatureRequest{ + MessageID: unknownMessageID, + }, emptySignature[:] + }, + verifyStats: func(t *testing.T, stats *handlerStats) { + require.EqualValues(t, 1, stats.messageSignatureRequest.Count()) + require.EqualValues(t, 0, stats.messageSignatureHit.Count()) + require.EqualValues(t, 1, stats.messageSignatureMiss.Count()) + require.EqualValues(t, 0, stats.blockSignatureRequest.Count()) + require.EqualValues(t, 0, stats.blockSignatureHit.Count()) + require.EqualValues(t, 0, stats.blockSignatureMiss.Count()) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + handler := NewSignatureRequestHandler(backend, message.Codec) + handler.stats.Clear() + + request, expectedResponse := test.setup() + responseBytes, err := handler.OnMessageSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + + test.verifyStats(t, handler.stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + var response message.SignatureResponse + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err, "error unmarshalling SignatureResponse") + + require.Equal(t, expectedResponse, response.Signature[:]) + }) + } +} + +func TestBlockSignatureHandler(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSecretKey() + require.NoError(t, err) + + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + blkID := ids.GenerateTestID() + testVM := &block.TestVM{ + TestVM: common.TestVM{T: t}, + GetBlockF: func(ctx context.Context, i ids.ID) (snowman.Block, error) { + if i == blkID { + return &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID, + StatusV: choices.Accepted, + }, + }, nil + } + return nil, errors.New("invalid blockID") + }, + } + backend, err := warp.NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + warpSigner, + testVM, + database, + 100, + nil, + ) + require.NoError(t, err) + + signature, err := backend.GetBlockSignature(blkID) + require.NoError(t, err) + unknownMessageID := ids.GenerateTestID() + + emptySignature := [bls.SignatureLen]byte{} + + tests := map[string]struct { + setup func() (request message.BlockSignatureRequest, expectedResponse []byte) + verifyStats func(t *testing.T, stats *handlerStats) + }{ + "known block": { + setup: func() (request message.BlockSignatureRequest, expectedResponse []byte) { + return message.BlockSignatureRequest{ + BlockID: blkID, + }, signature[:] + }, + verifyStats: func(t *testing.T, stats *handlerStats) { + require.EqualValues(t, 0, stats.messageSignatureRequest.Count()) + require.EqualValues(t, 0, stats.messageSignatureHit.Count()) + require.EqualValues(t, 0, stats.messageSignatureMiss.Count()) + require.EqualValues(t, 1, stats.blockSignatureRequest.Count()) + require.EqualValues(t, 1, stats.blockSignatureHit.Count()) + require.EqualValues(t, 0, stats.blockSignatureMiss.Count()) + }, + }, + "unknown block": { + setup: func() (request message.BlockSignatureRequest, expectedResponse []byte) { + return message.BlockSignatureRequest{ + BlockID: unknownMessageID, + }, emptySignature[:] + }, + verifyStats: func(t *testing.T, stats *handlerStats) { + require.EqualValues(t, 0, stats.messageSignatureRequest.Count()) + require.EqualValues(t, 0, stats.messageSignatureHit.Count()) + require.EqualValues(t, 0, stats.messageSignatureMiss.Count()) + require.EqualValues(t, 1, stats.blockSignatureRequest.Count()) + require.EqualValues(t, 0, stats.blockSignatureHit.Count()) + require.EqualValues(t, 1, stats.blockSignatureMiss.Count()) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + handler := NewSignatureRequestHandler(backend, message.Codec) + handler.stats.Clear() + + request, expectedResponse := test.setup() + responseBytes, err := handler.OnBlockSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) + require.NoError(t, err) + + test.verifyStats(t, handler.stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + var response message.SignatureResponse + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err, "error unmarshalling SignatureResponse") + + require.Equal(t, expectedResponse, response.Signature[:]) + }) + } +} diff --git a/coreth/warp/handlers/stats.go b/coreth/warp/handlers/stats.go new file mode 100644 index 00000000..9e2ea373 --- /dev/null +++ b/coreth/warp/handlers/stats.go @@ -0,0 +1,59 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "time" + + "github.com/ava-labs/coreth/metrics" +) + +type handlerStats struct { + // MessageSignatureRequestHandler metrics + messageSignatureRequest metrics.Counter + messageSignatureHit metrics.Counter + messageSignatureMiss metrics.Counter + messageSignatureRequestDuration metrics.Gauge + // BlockSignatureRequestHandler metrics + blockSignatureRequest metrics.Counter + blockSignatureHit metrics.Counter + blockSignatureMiss metrics.Counter + blockSignatureRequestDuration metrics.Gauge +} + +func newStats() *handlerStats { + return &handlerStats{ + messageSignatureRequest: metrics.GetOrRegisterCounter("message_signature_request_count", nil), + messageSignatureHit: metrics.GetOrRegisterCounter("message_signature_request_hit", nil), + messageSignatureMiss: metrics.GetOrRegisterCounter("message_signature_request_miss", nil), + messageSignatureRequestDuration: metrics.GetOrRegisterGauge("message_signature_request_duration", nil), + blockSignatureRequest: metrics.GetOrRegisterCounter("block_signature_request_count", nil), + blockSignatureHit: metrics.GetOrRegisterCounter("block_signature_request_hit", nil), + blockSignatureMiss: metrics.GetOrRegisterCounter("block_signature_request_miss", nil), + blockSignatureRequestDuration: metrics.GetOrRegisterGauge("block_signature_request_duration", nil), + } +} + +func (h *handlerStats) IncMessageSignatureRequest() { h.messageSignatureRequest.Inc(1) } +func (h *handlerStats) IncMessageSignatureHit() { h.messageSignatureHit.Inc(1) } +func (h *handlerStats) IncMessageSignatureMiss() { h.messageSignatureMiss.Inc(1) } +func (h *handlerStats) UpdateMessageSignatureRequestTime(duration time.Duration) { + h.messageSignatureRequestDuration.Inc(int64(duration)) +} +func (h *handlerStats) IncBlockSignatureRequest() { h.blockSignatureRequest.Inc(1) } +func (h *handlerStats) IncBlockSignatureHit() { h.blockSignatureHit.Inc(1) } +func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) } +func (h *handlerStats) UpdateBlockSignatureRequestTime(duration time.Duration) { + h.blockSignatureRequestDuration.Inc(int64(duration)) +} +func (h *handlerStats) Clear() { + h.messageSignatureRequest.Clear() + h.messageSignatureHit.Clear() + h.messageSignatureMiss.Clear() + h.messageSignatureRequestDuration.Update(0) + h.blockSignatureRequest.Clear() + h.blockSignatureHit.Clear() + h.blockSignatureMiss.Clear() + h.blockSignatureRequestDuration.Update(0) +} diff --git a/coreth/warp/service.go b/coreth/warp/service.go new file mode 100644 index 00000000..6eec775e --- /dev/null +++ b/coreth/warp/service.go @@ -0,0 +1,131 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/peer" + "github.com/ava-labs/coreth/warp/aggregator" + "github.com/ava-labs/coreth/warp/validators" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" +) + +var errNoValidators = errors.New("cannot aggregate signatures from subnet with no validators") + +// API introduces snowman specific functionality to the evm +type API struct { + networkID uint32 + sourceSubnetID, sourceChainID ids.ID + backend Backend + state *validators.State + client peer.NetworkClient +} + +func NewAPI(networkID uint32, sourceSubnetID ids.ID, sourceChainID ids.ID, state *validators.State, backend Backend, client peer.NetworkClient) *API { + return &API{ + networkID: networkID, + sourceSubnetID: sourceSubnetID, + sourceChainID: sourceChainID, + backend: backend, + state: state, + client: client, + } +} + +// GetMessage returns the Warp message associated with a messageID. +func (a *API) GetMessage(ctx context.Context, messageID ids.ID) (hexutil.Bytes, error) { + message, err := a.backend.GetMessage(messageID) + if err != nil { + return nil, fmt.Errorf("failed to get message %s with error %w", messageID, err) + } + return hexutil.Bytes(message.Bytes()), nil +} + +// GetMessageSignature returns the BLS signature associated with a messageID. +func (a *API) GetMessageSignature(ctx context.Context, messageID ids.ID) (hexutil.Bytes, error) { + signature, err := a.backend.GetMessageSignature(messageID) + if err != nil { + return nil, fmt.Errorf("failed to get signature for message %s with error %w", messageID, err) + } + return signature[:], nil +} + +// GetBlockSignature returns the BLS signature associated with a blockID. +func (a *API) GetBlockSignature(ctx context.Context, blockID ids.ID) (hexutil.Bytes, error) { + signature, err := a.backend.GetBlockSignature(blockID) + if err != nil { + return nil, fmt.Errorf("failed to get signature for block %s with error %w", blockID, err) + } + return signature[:], nil +} + +// GetMessageAggregateSignature fetches the aggregate signature for the requested [messageID] +func (a *API) GetMessageAggregateSignature(ctx context.Context, messageID ids.ID, quorumNum uint64, subnetIDStr string) (signedMessageBytes hexutil.Bytes, err error) { + unsignedMessage, err := a.backend.GetMessage(messageID) + if err != nil { + return nil, err + } + return a.aggregateSignatures(ctx, unsignedMessage, quorumNum, subnetIDStr) +} + +// GetBlockAggregateSignature fetches the aggregate signature for the requested [blockID] +func (a *API) GetBlockAggregateSignature(ctx context.Context, blockID ids.ID, quorumNum uint64, subnetIDStr string) (signedMessageBytes hexutil.Bytes, err error) { + blockHashPayload, err := payload.NewHash(blockID) + if err != nil { + return nil, err + } + unsignedMessage, err := warp.NewUnsignedMessage(a.networkID, a.sourceChainID, blockHashPayload.Bytes()) + if err != nil { + return nil, err + } + + return a.aggregateSignatures(ctx, unsignedMessage, quorumNum, subnetIDStr) +} + +func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.UnsignedMessage, quorumNum uint64, subnetIDStr string) (hexutil.Bytes, error) { + subnetID := a.sourceSubnetID + if len(subnetIDStr) > 0 { + sid, err := ids.FromString(subnetIDStr) + if err != nil { + return nil, fmt.Errorf("failed to parse subnetID: %q", subnetIDStr) + } + subnetID = sid + } + pChainHeight, err := a.state.GetCurrentHeight(ctx) + if err != nil { + return nil, err + } + + validators, totalWeight, err := warp.GetCanonicalValidatorSet(ctx, a.state, pChainHeight, subnetID) + if err != nil { + return nil, fmt.Errorf("failed to get validator set: %w", err) + } + if len(validators) == 0 { + return nil, fmt.Errorf("%w (SubnetID: %s, Height: %d)", errNoValidators, subnetID, pChainHeight) + } + + log.Debug("Fetching signature", + "sourceSubnetID", subnetID, + "height", pChainHeight, + "numValidators", len(validators), + "totalWeight", totalWeight, + ) + + agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validators, totalWeight) + signatureResult, err := agg.AggregateSignatures(ctx, unsignedMessage, quorumNum) + if err != nil { + return nil, err + } + // TODO: return the signature and total weight as well to the caller for more complete details + // Need to decide on the best UI for this and write up documentation with the potential + // gotchas that could impact signed messages becoming invalid. + return hexutil.Bytes(signatureResult.Message.Bytes()), nil +} diff --git a/coreth/warp/validators/state.go b/coreth/warp/validators/state.go new file mode 100644 index 00000000..c2929a58 --- /dev/null +++ b/coreth/warp/validators/state.go @@ -0,0 +1,51 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" +) + +var _ validators.State = (*State)(nil) + +// State provides a special case used to handle Avalanche Warp Message verification for messages sent +// from the Primary Network. Subnets have strictly fewer validators than the Primary Network, so we require +// signatures from a threshold of the RECEIVING subnet validator set rather than the full Primary Network +// since the receiving subnet already relies on a majority of its validators being correct. +type State struct { + chainContext *snow.Context + validators.State +} + +// NewState returns a wrapper of [validators.State] which special cases the handling of the Primary Network. +// +// The wrapped state will return the chainContext's Subnet validator set instead of the Primary Network when +// the Primary Network SubnetID is passed in. +func NewState(chainContext *snow.Context) *State { + return &State{ + chainContext: chainContext, + State: chainContext.ValidatorState, + } +} + +func (s *State) GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, +) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + // If the subnetID is anything other than the Primary Network, this is a direct + // passthrough + if subnetID != constants.PrimaryNetworkID { + return s.State.GetValidatorSet(ctx, height, subnetID) + } + + // If the requested subnet is the primary network, then we return the validator + // set for the Subnet that is receiving the message instead. + return s.State.GetValidatorSet(ctx, height, s.chainContext.SubnetID) +} diff --git a/coreth/warp/validators/state_test.go b/coreth/warp/validators/state_test.go new file mode 100644 index 00000000..e06d1374 --- /dev/null +++ b/coreth/warp/validators/state_test.go @@ -0,0 +1,47 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/coreth/utils" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestGetValidatorSetPrimaryNetwork(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + mySubnetID := ids.GenerateTestID() + otherSubnetID := ids.GenerateTestID() + + mockState := validators.NewMockState(ctrl) + snowCtx := utils.TestSnowContext() + snowCtx.SubnetID = mySubnetID + snowCtx.ValidatorState = mockState + state := NewState(snowCtx) + // Expect that requesting my validator set returns my validator set + mockState.EXPECT().GetValidatorSet(gomock.Any(), gomock.Any(), mySubnetID).Return(make(map[ids.NodeID]*validators.GetValidatorOutput), nil) + output, err := state.GetValidatorSet(context.Background(), 10, mySubnetID) + require.NoError(err) + require.Len(output, 0) + + // Expect that requesting the Primary Network validator set overrides and returns my validator set + mockState.EXPECT().GetValidatorSet(gomock.Any(), gomock.Any(), mySubnetID).Return(make(map[ids.NodeID]*validators.GetValidatorOutput), nil) + output, err = state.GetValidatorSet(context.Background(), 10, constants.PrimaryNetworkID) + require.NoError(err) + require.Len(output, 0) + + // Expect that requesting other validator set returns that validator set + mockState.EXPECT().GetValidatorSet(gomock.Any(), gomock.Any(), otherSubnetID).Return(make(map[ids.NodeID]*validators.GetValidatorOutput), nil) + output, err = state.GetValidatorSet(context.Background(), 10, otherSubnetID) + require.NoError(err) + require.Len(output, 0) +} diff --git a/entrypoint.sh b/entrypoint.sh old mode 100755 new mode 100644 index 4dfc5707..323edbe0 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -50,17 +50,18 @@ then fi exec /app/build/avalanchego \ - --http-host=$HTTP_HOST \ - --http-port=$HTTP_PORT \ - --staking-port=$STAKING_PORT \ - --public-ip=$PUBLIC_IP \ - --db-dir=$DB_DIR \ - --db-type=$DB_TYPE \ - --bootstrap-ips=$BOOTSTRAP_IPS \ - --bootstrap-ids=$BOOTSTRAP_IDS \ - --bootstrap-beacon-connection-timeout=$BOOTSTRAP_BEACON_CONNECTION_TIMEOUT \ - --chain-config-dir=$CHAIN_CONFIG_DIR \ - --log-dir=$LOG_DIR \ - --log-level=$LOG_LEVEL \ - --network-id=$NETWORK_ID \ - $EXTRA_ARGUMENTS + --http-host="$HTTP_HOST" \ + --http-port="$HTTP_PORT" \ + --staking-port="$STAKING_PORT" \ + --public-ip="$PUBLIC_IP" \ + --db-dir="$DB_DIR" \ + --db-type="$DB_TYPE" \ + --bootstrap-ips="$BOOTSTRAP_IPS" \ + --bootstrap-ids="$BOOTSTRAP_IDS" \ + --bootstrap-beacon-connection-timeout="$BOOTSTRAP_BEACON_CONNECTION_TIMEOUT" \ + --chain-config-dir="$CHAIN_CONFIG_DIR" \ + --log-dir="$LOG_DIR" \ + --log-level="$LOG_LEVEL" \ + --network-id="$NETWORK_ID" \ + --http-allowed-hosts="$HTTP_ALLOWED_HOSTS" \ + $EXTRA_ARGUMENTS \ No newline at end of file diff --git a/entrypoint/main.go b/entrypoint/main.go new file mode 100644 index 00000000..225e2941 --- /dev/null +++ b/entrypoint/main.go @@ -0,0 +1,225 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "syscall" + "time" +) + +type rpcRequest struct { + JSONRPC string `json:"jsonrpc"` + ID int `json:"id"` + Method string `json:"method"` +} +type rpcResponse struct { + Result json.RawMessage `json:"result"` +} + +func fetchPublicIP() (string, error) { + resp, err := http.Get("https://flare.network/cdn-cgi/trace") + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + for _, line := range strings.Split(string(body), "\n") { + if strings.HasPrefix(line, "ip=") { + return strings.TrimPrefix(line, "ip="), nil + } + } + + return "", fmt.Errorf("no ip= line in trace") +} + +func parseIPResult(raw json.RawMessage) (string, error) { + var obj struct { + IP json.RawMessage `json:"ip"` + } + if err := json.Unmarshal(raw, &obj); err == nil { + var single string + if err := json.Unmarshal(obj.IP, &single); err == nil { + return single, nil + } + var arr []string + if err := json.Unmarshal(obj.IP, &arr); err == nil { + return strings.Join(arr, ","), nil + } + return "", fmt.Errorf("unexpected ip field format: %s", obj.IP) + } + var single string + if err := json.Unmarshal(raw, &single); err == nil { + return single, nil + } + var arr []string + if err := json.Unmarshal(raw, &arr); err == nil { + return strings.Join(arr, ","), nil + } + return "", fmt.Errorf("unexpected result format: %s", string(raw)) +} + +func parseNodeIDResult(raw json.RawMessage) (string, error) { + var obj struct { + NodeID string `json:"nodeID"` + } + if err := json.Unmarshal(raw, &obj); err == nil { + return obj.NodeID, nil + } + var s string + if err := json.Unmarshal(raw, &s); err == nil { + return s, nil + } + return "", fmt.Errorf("unexpected nodeID format: %s", string(raw)) +} + +func rpcCall(client *http.Client, url, method string) (json.RawMessage, error) { + body, _ := json.Marshal(rpcRequest{"2.0", 1, method}) + resp, err := client.Post(url, "application/json", bytes.NewReader(body)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var wrap rpcResponse + if err := json.NewDecoder(resp.Body).Decode(&wrap); err != nil { + return nil, err + } + return wrap.Result, nil +} + +func main() { + if os.Getenv("AUTOCONFIGURE_PUBLIC_IP") == "1" { + if os.Getenv("PUBLIC_IP") == "" { + fmt.Fprintln(os.Stderr, "Autoconfiguring public IP") + ip, err := fetchPublicIP() + if err != nil { + fmt.Fprintln(os.Stderr, "failed to get ip") + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "Got public address %s \n", ip) + os.Setenv("PUBLIC_IP", ip) + } else { + msg := fmt.Sprintf( + `/!\ AUTOCONFIGURE_PUBLIC_IP is enabled, but PUBLIC_IP is already `+ + `set to '%s'! Skipping autoconfigure and using current PUBLIC_IP value!`+"\n", + os.Getenv("PUBLIC_IP"), + ) + fmt.Fprint(os.Stderr, msg) + } + } + + if os.Getenv("AUTOCONFIGURE_BOOTSTRAP") == "1" { + endpoints := []string{os.Getenv("AUTOCONFIGURE_BOOTSTRAP_ENDPOINT")} + if fb := os.Getenv("AUTOCONFIGURE_FALLBACK_ENDPOINTS"); fb != "" { + for _, e := range strings.Split(fb, ",") { + e = strings.TrimSpace(e) + if e != "" { + endpoints = append(endpoints, e) + } + } + } + + var endpoint string + fmt.Fprintln(os.Stderr, "Trying provided bootstrap endpoints") + client := http.Client{Timeout: 5 * time.Second} + probe := []byte(`{"jsonrpc":"2.0","id":1,"method":"info.getNodeIP"}`) + + for _, ep := range endpoints { + fmt.Fprintf(os.Stderr, " Trying endpoint %s\n", ep) + + resp, err := client.Post(ep, "application/json", bytes.NewReader(probe)) + if err != nil { + fmt.Fprintf(os.Stderr, " error: %v\n", err) + continue + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + endpoint = ep + break + } + fmt.Fprintln(os.Stderr, " Failed! The endpoint is unreachable.") + } + + if endpoint == "" { + fmt.Fprintln(os.Stderr, " None of provided bootstrap endpoints worked!") + os.Exit(1) + } + fmt.Fprintln(os.Stderr, "found endpoint : ", endpoint) + + fmt.Fprintln(os.Stderr, "Autoconfiguring bootstrap IPs and IDs") + + rawIPs, err := rpcCall(&client, endpoint, "info.getNodeIP") + if err != nil { + fmt.Fprintln(os.Stderr, " getNodeIP RPC failed:", err) + os.Exit(1) + } + bootstrap_IPs, err := parseIPResult(rawIPs) + if err != nil { + fmt.Fprintln(os.Stderr, " parsing IPs failed:", err) + os.Exit(1) + } + + rawIDs, err := rpcCall(&client, endpoint, "info.getNodeID") + if err != nil { + fmt.Fprintln(os.Stderr, " getNodeID RPC failed:", err) + os.Exit(1) + } + + bootstrap_IDs, err := parseNodeIDResult(rawIDs) + if err != nil { + fmt.Fprintln(os.Stderr, " parsing IDs failed:", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stderr, " Got bootstrap ips: '%s'\n", bootstrap_IPs) + fmt.Fprintf(os.Stderr, " Got bootstrap ids: '%s'\n", bootstrap_IDs) + + os.Setenv("BOOTSTRAP_IPS", bootstrap_IPs) + os.Setenv("BOOTSTRAP_IDS", bootstrap_IDs) + } + + args := []string{ + "--http-host", os.Getenv("HTTP_HOST"), + "--http-port", os.Getenv("HTTP_PORT"), + "--staking-port", os.Getenv("STAKING_PORT"), + "--public-ip", os.Getenv("PUBLIC_IP"), + "--db-dir", os.Getenv("DB_DIR"), + "--db-type", os.Getenv("DB_TYPE"), + "--bootstrap-ips", os.Getenv("BOOTSTRAP_IPS"), + "--bootstrap-ids", os.Getenv("BOOTSTRAP_IDS"), + "--bootstrap-beacon-connection-timeout", os.Getenv("BOOTSTRAP_BEACON_CONNECTION_TIMEOUT"), + "--chain-config-dir", os.Getenv("CHAIN_CONFIG_DIR"), + "--log-dir", os.Getenv("LOG_DIR"), + "--log-level", os.Getenv("LOG_LEVEL"), + "--network-id", os.Getenv("NETWORK_ID"), + "--http-allowed-hosts", os.Getenv("HTTP_ALLOWED_HOSTS"), + } + if extra := os.Getenv("EXTRA_ARGUMENTS"); extra != "" { + args = append(args, strings.Fields(extra)...) + } + fmt.Fprintln(os.Stderr, args) + path := "/app/build/avalanchego" + + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + fmt.Fprintln(os.Stderr, "file does not exist") + os.Exit(1) + } else { + env := os.Environ() + err := syscall.Exec(path, args, env) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to exec avalanchego: %v\n", err) + os.Exit(1) + } + } +}